blob: cf33d7ec2e2936d0770064215042e7195e2c7a2e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
9 *
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 *
13 * Dave Engebretsen <engebret@us.ibm.com>
14 * Rework for PPC64 port.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 *
21 */
22
23#include <linux/config.h>
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/mman.h>
31#include <linux/mm.h>
32#include <linux/swap.h>
33#include <linux/stddef.h>
34#include <linux/vmalloc.h>
35#include <linux/init.h>
36#include <linux/delay.h>
37#include <linux/bootmem.h>
38#include <linux/highmem.h>
39#include <linux/idr.h>
40#include <linux/nodemask.h>
41#include <linux/module.h>
42
43#include <asm/pgalloc.h>
44#include <asm/page.h>
45#include <asm/abs_addr.h>
46#include <asm/prom.h>
47#include <asm/lmb.h>
48#include <asm/rtas.h>
49#include <asm/io.h>
50#include <asm/mmu_context.h>
51#include <asm/pgtable.h>
52#include <asm/mmu.h>
53#include <asm/uaccess.h>
54#include <asm/smp.h>
55#include <asm/machdep.h>
56#include <asm/tlb.h>
57#include <asm/eeh.h>
58#include <asm/processor.h>
59#include <asm/mmzone.h>
60#include <asm/cputable.h>
61#include <asm/ppcdebug.h>
62#include <asm/sections.h>
63#include <asm/system.h>
64#include <asm/iommu.h>
65#include <asm/abs_addr.h>
66#include <asm/vdso.h>
67
68int mem_init_done;
69unsigned long ioremap_bot = IMALLOC_BASE;
70static unsigned long phbs_io_bot = PHBS_IO_BASE;
71
72extern pgd_t swapper_pg_dir[];
73extern struct task_struct *current_set[NR_CPUS];
74
75extern pgd_t ioremap_dir[];
76pgd_t * ioremap_pgd = (pgd_t *)&ioremap_dir;
77
78unsigned long klimit = (unsigned long)_end;
79
80unsigned long _SDR1=0;
81unsigned long _ASR=0;
82
83/* max amount of RAM to use */
84unsigned long __max_memory;
85
86/* info on what we think the IO hole is */
87unsigned long io_hole_start;
88unsigned long io_hole_size;
89
90void show_mem(void)
91{
92 unsigned long total = 0, reserved = 0;
93 unsigned long shared = 0, cached = 0;
94 struct page *page;
95 pg_data_t *pgdat;
96 unsigned long i;
97
98 printk("Mem-info:\n");
99 show_free_areas();
100 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
101 for_each_pgdat(pgdat) {
102 for (i = 0; i < pgdat->node_spanned_pages; i++) {
103 page = pgdat->node_mem_map + i;
104 total++;
105 if (PageReserved(page))
106 reserved++;
107 else if (PageSwapCache(page))
108 cached++;
109 else if (page_count(page))
110 shared += page_count(page) - 1;
111 }
112 }
113 printk("%ld pages of RAM\n", total);
114 printk("%ld reserved pages\n", reserved);
115 printk("%ld pages shared\n", shared);
116 printk("%ld pages swap cached\n", cached);
117}
118
119#ifdef CONFIG_PPC_ISERIES
120
121void __iomem *ioremap(unsigned long addr, unsigned long size)
122{
123 return (void __iomem *)addr;
124}
125
126extern void __iomem *__ioremap(unsigned long addr, unsigned long size,
127 unsigned long flags)
128{
129 return (void __iomem *)addr;
130}
131
132void iounmap(volatile void __iomem *addr)
133{
134 return;
135}
136
137#else
138
Benjamin Herrenschmidt58366af2005-05-01 08:58:44 -0700139static void unmap_im_area_pte(pmd_t *pmd, unsigned long addr,
140 unsigned long end)
141{
142 pte_t *pte;
143
144 pte = pte_offset_kernel(pmd, addr);
145 do {
146 pte_t ptent = ptep_get_and_clear(&ioremap_mm, addr, pte);
147 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
148 } while (pte++, addr += PAGE_SIZE, addr != end);
149}
150
151static inline void unmap_im_area_pmd(pud_t *pud, unsigned long addr,
152 unsigned long end)
153{
154 pmd_t *pmd;
155 unsigned long next;
156
157 pmd = pmd_offset(pud, addr);
158 do {
159 next = pmd_addr_end(addr, end);
160 if (pmd_none_or_clear_bad(pmd))
161 continue;
162 unmap_im_area_pte(pmd, addr, next);
163 } while (pmd++, addr = next, addr != end);
164}
165
166static inline void unmap_im_area_pud(pgd_t *pgd, unsigned long addr,
167 unsigned long end)
168{
169 pud_t *pud;
170 unsigned long next;
171
172 pud = pud_offset(pgd, addr);
173 do {
174 next = pud_addr_end(addr, end);
175 if (pud_none_or_clear_bad(pud))
176 continue;
177 unmap_im_area_pmd(pud, addr, next);
178 } while (pud++, addr = next, addr != end);
179}
180
181static void unmap_im_area(unsigned long addr, unsigned long end)
182{
183 struct mm_struct *mm = &ioremap_mm;
184 unsigned long next;
185 pgd_t *pgd;
186
187 spin_lock(&mm->page_table_lock);
188
189 pgd = pgd_offset_i(addr);
190 flush_cache_vunmap(addr, end);
191 do {
192 next = pgd_addr_end(addr, end);
193 if (pgd_none_or_clear_bad(pgd))
194 continue;
195 unmap_im_area_pud(pgd, addr, next);
196 } while (pgd++, addr = next, addr != end);
197 flush_tlb_kernel_range(start, end);
198
199 spin_unlock(&mm->page_table_lock);
200}
201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202/*
203 * map_io_page currently only called by __ioremap
204 * map_io_page adds an entry to the ioremap page table
205 * and adds an entry to the HPT, possibly bolting it
206 */
Benjamin Herrenschmidt58366af2005-05-01 08:58:44 -0700207static int map_io_page(unsigned long ea, unsigned long pa, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208{
209 pgd_t *pgdp;
Benjamin Herrenschmidt58366af2005-05-01 08:58:44 -0700210 pud_t *pudp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 pmd_t *pmdp;
212 pte_t *ptep;
213 unsigned long vsid;
214
215 if (mem_init_done) {
216 spin_lock(&ioremap_mm.page_table_lock);
217 pgdp = pgd_offset_i(ea);
Benjamin Herrenschmidt58366af2005-05-01 08:58:44 -0700218 pudp = pud_alloc(&ioremap_mm, pgdp, ea);
219 if (!pudp)
220 return -ENOMEM;
221 pmdp = pmd_alloc(&ioremap_mm, pudp, ea);
222 if (!pmdp)
223 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea);
Benjamin Herrenschmidt58366af2005-05-01 08:58:44 -0700225 if (!ptep)
226 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 pa = abs_to_phys(pa);
Benjamin Herrenschmidtdfbacdc2005-04-16 15:24:33 -0700228 set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
229 __pgprot(flags)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 spin_unlock(&ioremap_mm.page_table_lock);
231 } else {
232 unsigned long va, vpn, hash, hpteg;
233
234 /*
235 * If the mm subsystem is not fully up, we cannot create a
236 * linux page table entry for this mapping. Simply bolt an
237 * entry in the hardware page table.
238 */
239 vsid = get_kernel_vsid(ea);
240 va = (vsid << 28) | (ea & 0xFFFFFFF);
241 vpn = va >> PAGE_SHIFT;
242
243 hash = hpt_hash(vpn, 0);
244
245 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
246
247 /* Panic if a pte grpup is full */
248 if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT, 0,
249 _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX,
250 1, 0) == -1) {
251 panic("map_io_page: could not insert mapping");
252 }
253 }
Benjamin Herrenschmidt58366af2005-05-01 08:58:44 -0700254 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255}
256
257
258static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
259 unsigned long ea, unsigned long size,
260 unsigned long flags)
261{
262 unsigned long i;
263
264 if ((flags & _PAGE_PRESENT) == 0)
265 flags |= pgprot_val(PAGE_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266
Benjamin Herrenschmidtdfbacdc2005-04-16 15:24:33 -0700267 for (i = 0; i < size; i += PAGE_SIZE)
Benjamin Herrenschmidt58366af2005-05-01 08:58:44 -0700268 if (map_io_page(ea+i, pa+i, flags))
269 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
271 return (void __iomem *) (ea + (addr & ~PAGE_MASK));
Benjamin Herrenschmidt58366af2005-05-01 08:58:44 -0700272 failure:
273 if (mem_init_done)
274 unmap_im_area(ea, ea + size);
275 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276}
277
278
279void __iomem *
280ioremap(unsigned long addr, unsigned long size)
281{
Benjamin Herrenschmidtdfbacdc2005-04-16 15:24:33 -0700282 return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283}
284
Benjamin Herrenschmidt58366af2005-05-01 08:58:44 -0700285void __iomem * __ioremap(unsigned long addr, unsigned long size,
286 unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287{
288 unsigned long pa, ea;
Benjamin Herrenschmidt58366af2005-05-01 08:58:44 -0700289 void __iomem *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290
291 /*
292 * Choose an address to map it to.
293 * Once the imalloc system is running, we use it.
294 * Before that, we map using addresses going
295 * up from ioremap_bot. imalloc will use
296 * the addresses from ioremap_bot through
297 * IMALLOC_END (0xE000001fffffffff)
298 *
299 */
300 pa = addr & PAGE_MASK;
301 size = PAGE_ALIGN(addr + size) - pa;
302
303 if (size == 0)
304 return NULL;
305
306 if (mem_init_done) {
307 struct vm_struct *area;
308 area = im_get_free_area(size);
309 if (area == NULL)
310 return NULL;
311 ea = (unsigned long)(area->addr);
Benjamin Herrenschmidt58366af2005-05-01 08:58:44 -0700312 ret = __ioremap_com(addr, pa, ea, size, flags);
313 if (!ret)
314 im_free(area->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 } else {
316 ea = ioremap_bot;
Benjamin Herrenschmidt58366af2005-05-01 08:58:44 -0700317 ret = __ioremap_com(addr, pa, ea, size, flags);
318 if (ret)
319 ioremap_bot += size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 }
Benjamin Herrenschmidt58366af2005-05-01 08:58:44 -0700321 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322}
323
324#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
325
326int __ioremap_explicit(unsigned long pa, unsigned long ea,
327 unsigned long size, unsigned long flags)
328{
329 struct vm_struct *area;
Benjamin Herrenschmidt58366af2005-05-01 08:58:44 -0700330 void __iomem *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
332 /* For now, require page-aligned values for pa, ea, and size */
333 if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
334 !IS_PAGE_ALIGNED(size)) {
335 printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__);
336 return 1;
337 }
338
339 if (!mem_init_done) {
340 /* Two things to consider in this case:
341 * 1) No records will be kept (imalloc, etc) that the region
342 * has been remapped
343 * 2) It won't be easy to iounmap() the region later (because
344 * of 1)
345 */
346 ;
347 } else {
348 area = im_get_area(ea, size,
349 IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS);
350 if (area == NULL) {
351 /* Expected when PHB-dlpar is in play */
352 return 1;
353 }
354 if (ea != (unsigned long) area->addr) {
Benjamin Herrenschmidtdfbacdc2005-04-16 15:24:33 -0700355 printk(KERN_ERR "unexpected addr return from "
356 "im_get_area\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 return 1;
358 }
359 }
360
Benjamin Herrenschmidt58366af2005-05-01 08:58:44 -0700361 ret = __ioremap_com(pa, pa, ea, size, flags);
362 if (ret == NULL) {
363 printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
364 return 1;
365 }
366 if (ret != (void *) ea) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
368 return 1;
369 }
370
371 return 0;
372}
373
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374/*
375 * Unmap an IO region and remove it from imalloc'd list.
376 * Access to IO memory should be serialized by driver.
377 * This code is modeled after vmalloc code - unmap_vm_area()
378 *
Benjamin Herrenschmidtdfbacdc2005-04-16 15:24:33 -0700379 * XXX what about calls before mem_init_done (ie python_countermeasures())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 */
381void iounmap(volatile void __iomem *token)
382{
Benjamin Herrenschmidt58366af2005-05-01 08:58:44 -0700383 unsigned long address, size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 void *addr;
385
Benjamin Herrenschmidt58366af2005-05-01 08:58:44 -0700386 if (!mem_init_done)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388
389 addr = (void *) ((unsigned long __force) token & PAGE_MASK);
390
Benjamin Herrenschmidt58366af2005-05-01 08:58:44 -0700391 if ((size = im_free(addr)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393
394 address = (unsigned long)addr;
Benjamin Herrenschmidt58366af2005-05-01 08:58:44 -0700395 unmap_im_area(address, address + size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396}
397
398static int iounmap_subset_regions(unsigned long addr, unsigned long size)
399{
400 struct vm_struct *area;
401
402 /* Check whether subsets of this region exist */
403 area = im_get_area(addr, size, IM_REGION_SUPERSET);
404 if (area == NULL)
405 return 1;
406
407 while (area) {
408 iounmap((void __iomem *) area->addr);
409 area = im_get_area(addr, size,
410 IM_REGION_SUPERSET);
411 }
412
413 return 0;
414}
415
416int iounmap_explicit(volatile void __iomem *start, unsigned long size)
417{
418 struct vm_struct *area;
419 unsigned long addr;
420 int rc;
421
422 addr = (unsigned long __force) start & PAGE_MASK;
423
424 /* Verify that the region either exists or is a subset of an existing
425 * region. In the latter case, split the parent region to create
426 * the exact region
427 */
428 area = im_get_area(addr, size,
429 IM_REGION_EXISTS | IM_REGION_SUBSET);
430 if (area == NULL) {
431 /* Determine whether subset regions exist. If so, unmap */
432 rc = iounmap_subset_regions(addr, size);
433 if (rc) {
434 printk(KERN_ERR
435 "%s() cannot unmap nonexistent range 0x%lx\n",
436 __FUNCTION__, addr);
437 return 1;
438 }
439 } else {
440 iounmap((void __iomem *) area->addr);
441 }
442 /*
443 * FIXME! This can't be right:
444 iounmap(area->addr);
445 * Maybe it should be "iounmap(area);"
446 */
447 return 0;
448}
449
450#endif
451
452EXPORT_SYMBOL(ioremap);
453EXPORT_SYMBOL(__ioremap);
454EXPORT_SYMBOL(iounmap);
455
456void free_initmem(void)
457{
458 unsigned long addr;
459
460 addr = (unsigned long)__init_begin;
461 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
462 ClearPageReserved(virt_to_page(addr));
463 set_page_count(virt_to_page(addr), 1);
464 free_page(addr);
465 totalram_pages++;
466 }
467 printk ("Freeing unused kernel memory: %luk freed\n",
468 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
469}
470
471#ifdef CONFIG_BLK_DEV_INITRD
472void free_initrd_mem(unsigned long start, unsigned long end)
473{
474 if (start < end)
475 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
476 for (; start < end; start += PAGE_SIZE) {
477 ClearPageReserved(virt_to_page(start));
478 set_page_count(virt_to_page(start), 1);
479 free_page(start);
480 totalram_pages++;
481 }
482}
483#endif
484
485static DEFINE_SPINLOCK(mmu_context_lock);
486static DEFINE_IDR(mmu_context_idr);
487
488int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
489{
490 int index;
491 int err;
492
493#ifdef CONFIG_HUGETLB_PAGE
494 /* We leave htlb_segs as it was, but for a fork, we need to
495 * clear the huge_pgdir. */
496 mm->context.huge_pgdir = NULL;
497#endif
498
499again:
500 if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
501 return -ENOMEM;
502
503 spin_lock(&mmu_context_lock);
504 err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index);
505 spin_unlock(&mmu_context_lock);
506
507 if (err == -EAGAIN)
508 goto again;
509 else if (err)
510 return err;
511
512 if (index > MAX_CONTEXT) {
513 idr_remove(&mmu_context_idr, index);
514 return -ENOMEM;
515 }
516
517 mm->context.id = index;
518
519 return 0;
520}
521
522void destroy_context(struct mm_struct *mm)
523{
524 spin_lock(&mmu_context_lock);
525 idr_remove(&mmu_context_idr, mm->context.id);
526 spin_unlock(&mmu_context_lock);
527
528 mm->context.id = NO_CONTEXT;
529
530 hugetlb_mm_free_pgd(mm);
531}
532
533/*
534 * Do very early mm setup.
535 */
536void __init mm_init_ppc64(void)
537{
538#ifndef CONFIG_PPC_ISERIES
539 unsigned long i;
540#endif
541
542 ppc64_boot_msg(0x100, "MM Init");
543
544 /* This is the story of the IO hole... please, keep seated,
545 * unfortunately, we are out of oxygen masks at the moment.
546 * So we need some rough way to tell where your big IO hole
547 * is. On pmac, it's between 2G and 4G, on POWER3, it's around
548 * that area as well, on POWER4 we don't have one, etc...
549 * We need that as a "hint" when sizing the TCE table on POWER3
550 * So far, the simplest way that seem work well enough for us it
551 * to just assume that the first discontinuity in our physical
552 * RAM layout is the IO hole. That may not be correct in the future
553 * (and isn't on iSeries but then we don't care ;)
554 */
555
556#ifndef CONFIG_PPC_ISERIES
557 for (i = 1; i < lmb.memory.cnt; i++) {
558 unsigned long base, prevbase, prevsize;
559
560 prevbase = lmb.memory.region[i-1].physbase;
561 prevsize = lmb.memory.region[i-1].size;
562 base = lmb.memory.region[i].physbase;
563 if (base > (prevbase + prevsize)) {
564 io_hole_start = prevbase + prevsize;
565 io_hole_size = base - (prevbase + prevsize);
566 break;
567 }
568 }
569#endif /* CONFIG_PPC_ISERIES */
570 if (io_hole_start)
571 printk("IO Hole assumed to be %lx -> %lx\n",
572 io_hole_start, io_hole_start + io_hole_size - 1);
573
574 ppc64_boot_msg(0x100, "MM Init Done");
575}
576
577/*
578 * This is called by /dev/mem to know if a given address has to
579 * be mapped non-cacheable or not
580 */
581int page_is_ram(unsigned long pfn)
582{
583 int i;
584 unsigned long paddr = (pfn << PAGE_SHIFT);
585
586 for (i=0; i < lmb.memory.cnt; i++) {
587 unsigned long base;
588
589#ifdef CONFIG_MSCHUNKS
590 base = lmb.memory.region[i].physbase;
591#else
592 base = lmb.memory.region[i].base;
593#endif
594 if ((paddr >= base) &&
595 (paddr < (base + lmb.memory.region[i].size))) {
596 return 1;
597 }
598 }
599
600 return 0;
601}
602EXPORT_SYMBOL(page_is_ram);
603
604/*
605 * Initialize the bootmem system and give it all the memory we
606 * have available.
607 */
608#ifndef CONFIG_DISCONTIGMEM
609void __init do_init_bootmem(void)
610{
611 unsigned long i;
612 unsigned long start, bootmap_pages;
613 unsigned long total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
614 int boot_mapsize;
615
616 /*
617 * Find an area to use for the bootmem bitmap. Calculate the size of
618 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
619 * Add 1 additional page in case the address isn't page-aligned.
620 */
621 bootmap_pages = bootmem_bootmap_pages(total_pages);
622
623 start = abs_to_phys(lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE));
624 BUG_ON(!start);
625
626 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
627
628 max_pfn = max_low_pfn;
629
630 /* add all physical memory to the bootmem map. Also find the first */
631 for (i=0; i < lmb.memory.cnt; i++) {
632 unsigned long physbase, size;
633
634 physbase = lmb.memory.region[i].physbase;
635 size = lmb.memory.region[i].size;
636 free_bootmem(physbase, size);
637 }
638
639 /* reserve the sections we're already using */
640 for (i=0; i < lmb.reserved.cnt; i++) {
641 unsigned long physbase = lmb.reserved.region[i].physbase;
642 unsigned long size = lmb.reserved.region[i].size;
643
644 reserve_bootmem(physbase, size);
645 }
646}
647
648/*
649 * paging_init() sets up the page tables - in fact we've already done this.
650 */
651void __init paging_init(void)
652{
653 unsigned long zones_size[MAX_NR_ZONES];
654 unsigned long zholes_size[MAX_NR_ZONES];
655 unsigned long total_ram = lmb_phys_mem_size();
656 unsigned long top_of_ram = lmb_end_of_DRAM();
657
658 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
659 top_of_ram, total_ram);
660 printk(KERN_INFO "Memory hole size: %ldMB\n",
661 (top_of_ram - total_ram) >> 20);
662 /*
663 * All pages are DMA-able so we put them all in the DMA zone.
664 */
665 memset(zones_size, 0, sizeof(zones_size));
666 memset(zholes_size, 0, sizeof(zholes_size));
667
668 zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
669 zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
670
671 free_area_init_node(0, &contig_page_data, zones_size,
672 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
673}
674#endif /* CONFIG_DISCONTIGMEM */
675
676static struct kcore_list kcore_vmem;
677
678static int __init setup_kcore(void)
679{
680 int i;
681
682 for (i=0; i < lmb.memory.cnt; i++) {
683 unsigned long physbase, size;
684 struct kcore_list *kcore_mem;
685
686 physbase = lmb.memory.region[i].physbase;
687 size = lmb.memory.region[i].size;
688
689 /* GFP_ATOMIC to avoid might_sleep warnings during boot */
690 kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
691 if (!kcore_mem)
692 panic("mem_init: kmalloc failed\n");
693
694 kclist_add(kcore_mem, __va(physbase), size);
695 }
696
697 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
698
699 return 0;
700}
701module_init(setup_kcore);
702
703void __init mem_init(void)
704{
705#ifdef CONFIG_DISCONTIGMEM
706 int nid;
707#endif
708 pg_data_t *pgdat;
709 unsigned long i;
710 struct page *page;
711 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
712
713 num_physpages = max_low_pfn; /* RAM is assumed contiguous */
714 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
715
716#ifdef CONFIG_DISCONTIGMEM
717 for_each_online_node(nid) {
718 if (NODE_DATA(nid)->node_spanned_pages != 0) {
719 printk("freeing bootmem node %x\n", nid);
720 totalram_pages +=
721 free_all_bootmem_node(NODE_DATA(nid));
722 }
723 }
724#else
725 max_mapnr = num_physpages;
726 totalram_pages += free_all_bootmem();
727#endif
728
729 for_each_pgdat(pgdat) {
730 for (i = 0; i < pgdat->node_spanned_pages; i++) {
731 page = pgdat->node_mem_map + i;
732 if (PageReserved(page))
733 reservedpages++;
734 }
735 }
736
737 codesize = (unsigned long)&_etext - (unsigned long)&_stext;
738 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
739 datasize = (unsigned long)&_edata - (unsigned long)&__init_end;
740 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
741
742 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
743 "%luk reserved, %luk data, %luk bss, %luk init)\n",
744 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
745 num_physpages << (PAGE_SHIFT-10),
746 codesize >> 10,
747 reservedpages << (PAGE_SHIFT-10),
748 datasize >> 10,
749 bsssize >> 10,
750 initsize >> 10);
751
752 mem_init_done = 1;
753
754#ifdef CONFIG_PPC_ISERIES
755 iommu_vio_init();
756#endif
757 /* Initialize the vDSO */
758 vdso_init();
759}
760
761/*
762 * This is called when a page has been modified by the kernel.
763 * It just marks the page as not i-cache clean. We do the i-cache
764 * flush later when the page is given to a user process, if necessary.
765 */
766void flush_dcache_page(struct page *page)
767{
768 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
769 return;
770 /* avoid an atomic op if possible */
771 if (test_bit(PG_arch_1, &page->flags))
772 clear_bit(PG_arch_1, &page->flags);
773}
774EXPORT_SYMBOL(flush_dcache_page);
775
776void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
777{
778 clear_page(page);
779
780 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
781 return;
782 /*
783 * We shouldnt have to do this, but some versions of glibc
784 * require it (ld.so assumes zero filled pages are icache clean)
785 * - Anton
786 */
787
788 /* avoid an atomic op if possible */
789 if (test_bit(PG_arch_1, &pg->flags))
790 clear_bit(PG_arch_1, &pg->flags);
791}
792EXPORT_SYMBOL(clear_user_page);
793
794void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
795 struct page *pg)
796{
797 copy_page(vto, vfrom);
798
799 /*
800 * We should be able to use the following optimisation, however
801 * there are two problems.
802 * Firstly a bug in some versions of binutils meant PLT sections
803 * were not marked executable.
804 * Secondly the first word in the GOT section is blrl, used
805 * to establish the GOT address. Until recently the GOT was
806 * not marked executable.
807 * - Anton
808 */
809#if 0
810 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
811 return;
812#endif
813
814 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
815 return;
816
817 /* avoid an atomic op if possible */
818 if (test_bit(PG_arch_1, &pg->flags))
819 clear_bit(PG_arch_1, &pg->flags);
820}
821
822void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
823 unsigned long addr, int len)
824{
825 unsigned long maddr;
826
827 maddr = (unsigned long)page_address(page) + (addr & ~PAGE_MASK);
828 flush_icache_range(maddr, maddr + len);
829}
830EXPORT_SYMBOL(flush_icache_user_range);
831
832/*
833 * This is called at the end of handling a user page fault, when the
834 * fault has been handled by updating a PTE in the linux page tables.
835 * We use it to preload an HPTE into the hash table corresponding to
836 * the updated linux PTE.
837 *
838 * This must always be called with the mm->page_table_lock held
839 */
840void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
841 pte_t pte)
842{
843 unsigned long vsid;
844 void *pgdir;
845 pte_t *ptep;
846 int local = 0;
847 cpumask_t tmp;
848 unsigned long flags;
849
850 /* handle i-cache coherency */
851 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
852 !cpu_has_feature(CPU_FTR_NOEXECUTE)) {
853 unsigned long pfn = pte_pfn(pte);
854 if (pfn_valid(pfn)) {
855 struct page *page = pfn_to_page(pfn);
856 if (!PageReserved(page)
857 && !test_bit(PG_arch_1, &page->flags)) {
858 __flush_dcache_icache(page_address(page));
859 set_bit(PG_arch_1, &page->flags);
860 }
861 }
862 }
863
864 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
865 if (!pte_young(pte))
866 return;
867
868 pgdir = vma->vm_mm->pgd;
869 if (pgdir == NULL)
870 return;
871
872 ptep = find_linux_pte(pgdir, ea);
873 if (!ptep)
874 return;
875
876 vsid = get_vsid(vma->vm_mm->context.id, ea);
877
878 local_irq_save(flags);
879 tmp = cpumask_of_cpu(smp_processor_id());
880 if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
881 local = 1;
882
883 __hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep,
884 0x300, local);
885 local_irq_restore(flags);
886}
887
888void __iomem * reserve_phb_iospace(unsigned long size)
889{
890 void __iomem *virt_addr;
891
892 if (phbs_io_bot >= IMALLOC_BASE)
893 panic("reserve_phb_iospace(): phb io space overflow\n");
894
895 virt_addr = (void __iomem *) phbs_io_bot;
896 phbs_io_bot += size;
897
898 return virt_addr;
899}
900
901kmem_cache_t *zero_cache;
902
903static void zero_ctor(void *pte, kmem_cache_t *cache, unsigned long flags)
904{
905 memset(pte, 0, PAGE_SIZE);
906}
907
908void pgtable_cache_init(void)
909{
910 zero_cache = kmem_cache_create("zero",
911 PAGE_SIZE,
912 0,
913 SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN,
914 zero_ctor,
915 NULL);
916 if (!zero_cache)
917 panic("pgtable_cache_init(): could not create zero_cache!\n");
918}
919
920pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
921 unsigned long size, pgprot_t vma_prot)
922{
923 if (ppc_md.phys_mem_access_prot)
924 return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot);
925
926 if (!page_is_ram(addr >> PAGE_SHIFT))
927 vma_prot = __pgprot(pgprot_val(vma_prot)
928 | _PAGE_GUARDED | _PAGE_NO_CACHE);
929 return vma_prot;
930}
931EXPORT_SYMBOL(phys_mem_access_prot);