blob: 0650de74d0b3db7c0f90ef0b2d55fe938e737084 [file] [log] [blame]
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
9 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10 *
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 */
20
21#include <linux/config.h>
22#include <linux/module.h>
23#include <linux/sched.h>
24#include <linux/kernel.h>
25#include <linux/errno.h>
26#include <linux/string.h>
27#include <linux/types.h>
28#include <linux/mm.h>
29#include <linux/stddef.h>
30#include <linux/init.h>
31#include <linux/bootmem.h>
32#include <linux/highmem.h>
33#include <linux/initrd.h>
34#include <linux/pagemap.h>
35
36#include <asm/pgalloc.h>
37#include <asm/prom.h>
38#include <asm/io.h>
39#include <asm/mmu_context.h>
40#include <asm/pgtable.h>
41#include <asm/mmu.h>
42#include <asm/smp.h>
43#include <asm/machdep.h>
44#include <asm/btext.h>
45#include <asm/tlb.h>
46#include <asm/bootinfo.h>
47#include <asm/prom.h>
Paul Mackerras7c8c6b92005-10-06 12:23:33 +100048#include <asm/lmb.h>
49#include <asm/sections.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100050
Paul Mackerras14cf11a2005-09-26 16:04:21 +100051#include "mmu_decl.h"
52
53#ifndef CPU_FTR_COHERENT_ICACHE
54#define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
55#define CPU_FTR_NOEXECUTE 0
56#endif
57
Paul Mackerras7c8c6b92005-10-06 12:23:33 +100058int init_bootmem_done;
59int mem_init_done;
60
Paul Mackerras14cf11a2005-09-26 16:04:21 +100061/*
62 * This is called by /dev/mem to know if a given address has to
63 * be mapped non-cacheable or not
64 */
65int page_is_ram(unsigned long pfn)
66{
67 unsigned long paddr = (pfn << PAGE_SHIFT);
68
69#ifndef CONFIG_PPC64 /* XXX for now */
70 return paddr < __pa(high_memory);
71#else
72 int i;
73 for (i=0; i < lmb.memory.cnt; i++) {
74 unsigned long base;
75
76 base = lmb.memory.region[i].base;
77
78 if ((paddr >= base) &&
79 (paddr < (base + lmb.memory.region[i].size))) {
80 return 1;
81 }
82 }
83
84 return 0;
85#endif
86}
87EXPORT_SYMBOL(page_is_ram);
88
89pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
90 unsigned long size, pgprot_t vma_prot)
91{
92 if (ppc_md.phys_mem_access_prot)
93 return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot);
94
95 if (!page_is_ram(addr >> PAGE_SHIFT))
96 vma_prot = __pgprot(pgprot_val(vma_prot)
97 | _PAGE_GUARDED | _PAGE_NO_CACHE);
98 return vma_prot;
99}
100EXPORT_SYMBOL(phys_mem_access_prot);
101
102void show_mem(void)
103{
104 unsigned long total = 0, reserved = 0;
105 unsigned long shared = 0, cached = 0;
106 unsigned long highmem = 0;
107 struct page *page;
108 pg_data_t *pgdat;
109 unsigned long i;
110
111 printk("Mem-info:\n");
112 show_free_areas();
113 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
114 for_each_pgdat(pgdat) {
115 for (i = 0; i < pgdat->node_spanned_pages; i++) {
116 page = pgdat_page_nr(pgdat, i);
117 total++;
118 if (PageHighMem(page))
119 highmem++;
120 if (PageReserved(page))
121 reserved++;
122 else if (PageSwapCache(page))
123 cached++;
124 else if (page_count(page))
125 shared += page_count(page) - 1;
126 }
127 }
128 printk("%ld pages of RAM\n", total);
129#ifdef CONFIG_HIGHMEM
130 printk("%ld pages of HIGHMEM\n", highmem);
131#endif
132 printk("%ld reserved pages\n", reserved);
133 printk("%ld pages shared\n", shared);
134 printk("%ld pages swap cached\n", cached);
135}
136
137/*
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000138 * Initialize the bootmem system and give it all the memory we
139 * have available. If we are using highmem, we only put the
140 * lowmem into the bootmem system.
141 */
142#ifndef CONFIG_NEED_MULTIPLE_NODES
143void __init do_init_bootmem(void)
144{
145 unsigned long i;
146 unsigned long start, bootmap_pages;
147 unsigned long total_pages;
148 int boot_mapsize;
149
150 max_pfn = total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
151#ifdef CONFIG_HIGHMEM
152 total_pages = total_lowmem >> PAGE_SHIFT;
153#endif
154
155 /*
156 * Find an area to use for the bootmem bitmap. Calculate the size of
157 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
158 * Add 1 additional page in case the address isn't page-aligned.
159 */
160 bootmap_pages = bootmem_bootmap_pages(total_pages);
161
162 start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
163 BUG_ON(!start);
164
165 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
166
167 /* Add all physical memory to the bootmem map, mark each area
168 * present.
169 */
170 for (i = 0; i < lmb.memory.cnt; i++) {
171 unsigned long base = lmb.memory.region[i].base;
172 unsigned long size = lmb_size_bytes(&lmb.memory, i);
173#ifdef CONFIG_HIGHMEM
174 if (base >= total_lowmem)
175 continue;
176 if (base + size > total_lowmem)
177 size = total_lowmem - base;
178#endif
179 free_bootmem(base, size);
180 }
181
182 /* reserve the sections we're already using */
183 for (i = 0; i < lmb.reserved.cnt; i++)
184 reserve_bootmem(lmb.reserved.region[i].base,
185 lmb_size_bytes(&lmb.reserved, i));
186
187 /* XXX need to clip this if using highmem? */
188 for (i = 0; i < lmb.memory.cnt; i++)
189 memory_present(0, lmb_start_pfn(&lmb.memory, i),
190 lmb_end_pfn(&lmb.memory, i));
191 init_bootmem_done = 1;
192}
193
194/*
195 * paging_init() sets up the page tables - in fact we've already done this.
196 */
197void __init paging_init(void)
198{
199 unsigned long zones_size[MAX_NR_ZONES];
200 unsigned long zholes_size[MAX_NR_ZONES];
201 unsigned long total_ram = lmb_phys_mem_size();
202 unsigned long top_of_ram = lmb_end_of_DRAM();
203
204#ifdef CONFIG_HIGHMEM
205 map_page(PKMAP_BASE, 0, 0); /* XXX gross */
206 pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k
207 (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
208 map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */
209 kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k
210 (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN);
211 kmap_prot = PAGE_KERNEL;
212#endif /* CONFIG_HIGHMEM */
213
214 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
215 top_of_ram, total_ram);
216 printk(KERN_INFO "Memory hole size: %ldMB\n",
217 (top_of_ram - total_ram) >> 20);
218 /*
219 * All pages are DMA-able so we put them all in the DMA zone.
220 */
221 memset(zones_size, 0, sizeof(zones_size));
222 memset(zholes_size, 0, sizeof(zholes_size));
223
224 zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
225 zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
226
227#ifdef CONFIG_HIGHMEM
228 zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
229 zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT;
230 zholes_size[ZONE_HIGHMEM] = (top_of_ram - total_ram) >> PAGE_SHIFT;
231#else
232 zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
233 zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
234#endif /* CONFIG_HIGHMEM */
235
236 free_area_init_node(0, NODE_DATA(0), zones_size,
237 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
238}
239#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
240
241void __init mem_init(void)
242{
243#ifdef CONFIG_NEED_MULTIPLE_NODES
244 int nid;
245#endif
246 pg_data_t *pgdat;
247 unsigned long i;
248 struct page *page;
249 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
250
251 num_physpages = max_pfn; /* RAM is assumed contiguous */
252 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
253
254#ifdef CONFIG_NEED_MULTIPLE_NODES
255 for_each_online_node(nid) {
256 if (NODE_DATA(nid)->node_spanned_pages != 0) {
257 printk("freeing bootmem node %x\n", nid);
258 totalram_pages +=
259 free_all_bootmem_node(NODE_DATA(nid));
260 }
261 }
262#else
263 max_mapnr = num_physpages;
264 totalram_pages += free_all_bootmem();
265#endif
266 for_each_pgdat(pgdat) {
267 for (i = 0; i < pgdat->node_spanned_pages; i++) {
268 page = pgdat_page_nr(pgdat, i);
269 if (PageReserved(page))
270 reservedpages++;
271 }
272 }
273
274 codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
275 datasize = (unsigned long)&__init_begin - (unsigned long)&_sdata;
276 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
277 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
278
279#ifdef CONFIG_HIGHMEM
280 {
281 unsigned long pfn, highmem_mapnr;
282
283 highmem_mapnr = total_lowmem >> PAGE_SHIFT;
284 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
285 struct page *page = pfn_to_page(pfn);
286
287 ClearPageReserved(page);
288 set_page_count(page, 1);
289 __free_page(page);
290 totalhigh_pages++;
291 }
292 totalram_pages += totalhigh_pages;
293 printk(KERN_INFO "High memory: %luk\n",
294 totalhigh_pages << (PAGE_SHIFT-10));
295 }
296#endif /* CONFIG_HIGHMEM */
297
298 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
299 "%luk reserved, %luk data, %luk bss, %luk init)\n",
300 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
301 num_physpages << (PAGE_SHIFT-10),
302 codesize >> 10,
303 reservedpages << (PAGE_SHIFT-10),
304 datasize >> 10,
305 bsssize >> 10,
306 initsize >> 10);
307
308 mem_init_done = 1;
309
310#ifdef CONFIG_PPC64
311 /* Initialize the vDSO */
312 vdso_init();
313#endif
314}
315
316/*
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000317 * This is called when a page has been modified by the kernel.
318 * It just marks the page as not i-cache clean. We do the i-cache
319 * flush later when the page is given to a user process, if necessary.
320 */
321void flush_dcache_page(struct page *page)
322{
323 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
324 return;
325 /* avoid an atomic op if possible */
326 if (test_bit(PG_arch_1, &page->flags))
327 clear_bit(PG_arch_1, &page->flags);
328}
329EXPORT_SYMBOL(flush_dcache_page);
330
331void flush_dcache_icache_page(struct page *page)
332{
333#ifdef CONFIG_BOOKE
334 void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
335 __flush_dcache_icache(start);
336 kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
337#elif defined(CONFIG_8xx)
338 /* On 8xx there is no need to kmap since highmem is not supported */
339 __flush_dcache_icache(page_address(page));
340#else
341 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
342#endif
343
344}
345void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
346{
347 clear_page(page);
348
349 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
350 return;
351 /*
352 * We shouldnt have to do this, but some versions of glibc
353 * require it (ld.so assumes zero filled pages are icache clean)
354 * - Anton
355 */
356
357 /* avoid an atomic op if possible */
358 if (test_bit(PG_arch_1, &pg->flags))
359 clear_bit(PG_arch_1, &pg->flags);
360}
361EXPORT_SYMBOL(clear_user_page);
362
363void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
364 struct page *pg)
365{
366 copy_page(vto, vfrom);
367
368 /*
369 * We should be able to use the following optimisation, however
370 * there are two problems.
371 * Firstly a bug in some versions of binutils meant PLT sections
372 * were not marked executable.
373 * Secondly the first word in the GOT section is blrl, used
374 * to establish the GOT address. Until recently the GOT was
375 * not marked executable.
376 * - Anton
377 */
378#if 0
379 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
380 return;
381#endif
382
383 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
384 return;
385
386 /* avoid an atomic op if possible */
387 if (test_bit(PG_arch_1, &pg->flags))
388 clear_bit(PG_arch_1, &pg->flags);
389}
390
391void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
392 unsigned long addr, int len)
393{
394 unsigned long maddr;
395
396 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
397 flush_icache_range(maddr, maddr + len);
398 kunmap(page);
399}
400EXPORT_SYMBOL(flush_icache_user_range);
401
402/*
403 * This is called at the end of handling a user page fault, when the
404 * fault has been handled by updating a PTE in the linux page tables.
405 * We use it to preload an HPTE into the hash table corresponding to
406 * the updated linux PTE.
407 *
408 * This must always be called with the mm->page_table_lock held
409 */
410void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
411 pte_t pte)
412{
413 /* handle i-cache coherency */
414 unsigned long pfn = pte_pfn(pte);
415#ifdef CONFIG_PPC32
416 pmd_t *pmd;
417#else
418 unsigned long vsid;
419 void *pgdir;
420 pte_t *ptep;
421 int local = 0;
422 cpumask_t tmp;
423 unsigned long flags;
424#endif
425
426 /* handle i-cache coherency */
427 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
428 !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
429 pfn_valid(pfn)) {
430 struct page *page = pfn_to_page(pfn);
431 if (!PageReserved(page)
432 && !test_bit(PG_arch_1, &page->flags)) {
433 if (vma->vm_mm == current->active_mm) {
434#ifdef CONFIG_8xx
435 /* On 8xx, cache control instructions (particularly
436 * "dcbst" from flush_dcache_icache) fault as write
437 * operation if there is an unpopulated TLB entry
438 * for the address in question. To workaround that,
439 * we invalidate the TLB here, thus avoiding dcbst
440 * misbehaviour.
441 */
442 _tlbie(address);
443#endif
444 __flush_dcache_icache((void *) address);
445 } else
446 flush_dcache_icache_page(page);
447 set_bit(PG_arch_1, &page->flags);
448 }
449 }
450
451#ifdef CONFIG_PPC_STD_MMU
452 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
453 if (!pte_young(pte) || address >= TASK_SIZE)
454 return;
455#ifdef CONFIG_PPC32
456 if (Hash == 0)
457 return;
458 pmd = pmd_offset(pgd_offset(vma->vm_mm, address), address);
459 if (!pmd_none(*pmd))
460 add_hash_page(vma->vm_mm->context, address, pmd_val(*pmd));
461#else
462 pgdir = vma->vm_mm->pgd;
463 if (pgdir == NULL)
464 return;
465
466 ptep = find_linux_pte(pgdir, ea);
467 if (!ptep)
468 return;
469
470 vsid = get_vsid(vma->vm_mm->context.id, ea);
471
472 local_irq_save(flags);
473 tmp = cpumask_of_cpu(smp_processor_id());
474 if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
475 local = 1;
476
477 __hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep,
478 0x300, local);
479 local_irq_restore(flags);
480#endif
481#endif
482}