blob: 5792e533916f29b1d16cea442d3c6e4ecb323f54 [file] [log] [blame]
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001/*
2 * This file contains the routines setting up the linux page tables.
3 * -- paulus
4 *
5 * Derived from arch/ppc/mm/init.c:
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 *
8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
9 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 * Copyright (C) 1996 Paul Mackerras
11 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
12 *
13 * Derived from "arch/i386/mm/init.c"
14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 *
21 */
22
23#include <linux/config.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/types.h>
27#include <linux/mm.h>
28#include <linux/vmalloc.h>
29#include <linux/init.h>
30#include <linux/highmem.h>
31
32#include <asm/pgtable.h>
33#include <asm/pgalloc.h>
34#include <asm/io.h>
35
36#include "mmu_decl.h"
37
38unsigned long ioremap_base;
39unsigned long ioremap_bot;
40int io_bat_index;
41
42#if defined(CONFIG_6xx) || defined(CONFIG_POWER3)
43#define HAVE_BATS 1
44#endif
45
46#if defined(CONFIG_FSL_BOOKE)
47#define HAVE_TLBCAM 1
48#endif
49
50extern char etext[], _stext[];
51
52#ifdef CONFIG_SMP
53extern void hash_page_sync(void);
54#endif
55
56#ifdef HAVE_BATS
57extern unsigned long v_mapped_by_bats(unsigned long va);
58extern unsigned long p_mapped_by_bats(unsigned long pa);
59void setbat(int index, unsigned long virt, unsigned long phys,
60 unsigned int size, int flags);
61
62#else /* !HAVE_BATS */
63#define v_mapped_by_bats(x) (0UL)
64#define p_mapped_by_bats(x) (0UL)
65#endif /* HAVE_BATS */
66
67#ifdef HAVE_TLBCAM
68extern unsigned int tlbcam_index;
69extern unsigned long v_mapped_by_tlbcam(unsigned long va);
70extern unsigned long p_mapped_by_tlbcam(unsigned long pa);
71#else /* !HAVE_TLBCAM */
72#define v_mapped_by_tlbcam(x) (0UL)
73#define p_mapped_by_tlbcam(x) (0UL)
74#endif /* HAVE_TLBCAM */
75
76#ifdef CONFIG_PTE_64BIT
77/* 44x uses an 8kB pgdir because it has 8-byte Linux PTEs. */
78#define PGDIR_ORDER 1
79#else
80#define PGDIR_ORDER 0
81#endif
82
83pgd_t *pgd_alloc(struct mm_struct *mm)
84{
85 pgd_t *ret;
86
87 ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGDIR_ORDER);
88 return ret;
89}
90
91void pgd_free(pgd_t *pgd)
92{
93 free_pages((unsigned long)pgd, PGDIR_ORDER);
94}
95
96pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
97{
98 pte_t *pte;
99 extern int mem_init_done;
100 extern void *early_get_page(void);
101
102 if (mem_init_done) {
103 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
104 } else {
105 pte = (pte_t *)early_get_page();
106 if (pte)
107 clear_page(pte);
108 }
109 return pte;
110}
111
112struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
113{
114 struct page *ptepage;
115
116#ifdef CONFIG_HIGHPTE
117 int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
118#else
119 int flags = GFP_KERNEL | __GFP_REPEAT;
120#endif
121
122 ptepage = alloc_pages(flags, 0);
123 if (ptepage)
124 clear_highpage(ptepage);
125 return ptepage;
126}
127
128void pte_free_kernel(pte_t *pte)
129{
130#ifdef CONFIG_SMP
131 hash_page_sync();
132#endif
133 free_page((unsigned long)pte);
134}
135
136void pte_free(struct page *ptepage)
137{
138#ifdef CONFIG_SMP
139 hash_page_sync();
140#endif
141 __free_page(ptepage);
142}
143
144#ifndef CONFIG_PHYS_64BIT
145void __iomem *
146ioremap(phys_addr_t addr, unsigned long size)
147{
148 return __ioremap(addr, size, _PAGE_NO_CACHE);
149}
150#else /* CONFIG_PHYS_64BIT */
151void __iomem *
152ioremap64(unsigned long long addr, unsigned long size)
153{
154 return __ioremap(addr, size, _PAGE_NO_CACHE);
155}
156
157void __iomem *
158ioremap(phys_addr_t addr, unsigned long size)
159{
160 phys_addr_t addr64 = fixup_bigphys_addr(addr, size);
161
162 return ioremap64(addr64, size);
163}
164#endif /* CONFIG_PHYS_64BIT */
165
166void __iomem *
167__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
168{
169 unsigned long v, i;
170 phys_addr_t p;
171 int err;
172
173 /*
174 * Choose an address to map it to.
175 * Once the vmalloc system is running, we use it.
176 * Before then, we use space going down from ioremap_base
177 * (ioremap_bot records where we're up to).
178 */
179 p = addr & PAGE_MASK;
180 size = PAGE_ALIGN(addr + size) - p;
181
182 /*
183 * If the address lies within the first 16 MB, assume it's in ISA
184 * memory space
185 */
186 if (p < 16*1024*1024)
187 p += _ISA_MEM_BASE;
188
189 /*
190 * Don't allow anybody to remap normal RAM that we're using.
191 * mem_init() sets high_memory so only do the check after that.
192 */
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000193 if (mem_init_done && (p < virt_to_phys(high_memory))) {
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000194 printk("__ioremap(): phys addr "PHYS_FMT" is RAM lr %p\n", p,
195 __builtin_return_address(0));
196 return NULL;
197 }
198
199 if (size == 0)
200 return NULL;
201
202 /*
203 * Is it already mapped? Perhaps overlapped by a previous
204 * BAT mapping. If the whole area is mapped then we're done,
205 * otherwise remap it since we want to keep the virt addrs for
206 * each request contiguous.
207 *
208 * We make the assumption here that if the bottom and top
209 * of the range we want are mapped then it's mapped to the
210 * same virt address (and this is contiguous).
211 * -- Cort
212 */
213 if ((v = p_mapped_by_bats(p)) /*&& p_mapped_by_bats(p+size-1)*/ )
214 goto out;
215
216 if ((v = p_mapped_by_tlbcam(p)))
217 goto out;
218
219 if (mem_init_done) {
220 struct vm_struct *area;
221 area = get_vm_area(size, VM_IOREMAP);
222 if (area == 0)
223 return NULL;
224 v = (unsigned long) area->addr;
225 } else {
226 v = (ioremap_bot -= size);
227 }
228
229 if ((flags & _PAGE_PRESENT) == 0)
230 flags |= _PAGE_KERNEL;
231 if (flags & _PAGE_NO_CACHE)
232 flags |= _PAGE_GUARDED;
233
234 /*
235 * Should check if it is a candidate for a BAT mapping
236 */
237
238 err = 0;
239 for (i = 0; i < size && err == 0; i += PAGE_SIZE)
240 err = map_page(v+i, p+i, flags);
241 if (err) {
242 if (mem_init_done)
243 vunmap((void *)v);
244 return NULL;
245 }
246
247out:
248 return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
249}
250
251void iounmap(volatile void __iomem *addr)
252{
253 /*
254 * If mapped by BATs then there is nothing to do.
255 * Calling vfree() generates a benign warning.
256 */
257 if (v_mapped_by_bats((unsigned long)addr)) return;
258
259 if (addr > high_memory && (unsigned long) addr < ioremap_bot)
260 vunmap((void *) (PAGE_MASK & (unsigned long)addr));
261}
262
263void __iomem *ioport_map(unsigned long port, unsigned int len)
264{
265 return (void __iomem *) (port + _IO_BASE);
266}
267
268void ioport_unmap(void __iomem *addr)
269{
270 /* Nothing to do */
271}
272EXPORT_SYMBOL(ioport_map);
273EXPORT_SYMBOL(ioport_unmap);
274
275int
276map_page(unsigned long va, phys_addr_t pa, int flags)
277{
278 pmd_t *pd;
279 pte_t *pg;
280 int err = -ENOMEM;
281
282 spin_lock(&init_mm.page_table_lock);
283 /* Use upper 10 bits of VA to index the first level map */
284 pd = pmd_offset(pgd_offset_k(va), va);
285 /* Use middle 10 bits of VA to index the second-level map */
286 pg = pte_alloc_kernel(&init_mm, pd, va);
287 if (pg != 0) {
288 err = 0;
289 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags)));
290 if (mem_init_done)
291 flush_HPTE(0, va, pmd_val(*pd));
292 }
293 spin_unlock(&init_mm.page_table_lock);
294 return err;
295}
296
297/*
298 * Map in all of physical memory starting at KERNELBASE.
299 */
300void __init mapin_ram(void)
301{
302 unsigned long v, p, s, f;
303
304 s = mmu_mapin_ram();
305 v = KERNELBASE + s;
306 p = PPC_MEMSTART + s;
307 for (; s < total_lowmem; s += PAGE_SIZE) {
308 if ((char *) v >= _stext && (char *) v < etext)
309 f = _PAGE_RAM_TEXT;
310 else
311 f = _PAGE_RAM;
312 map_page(v, p, f);
313 v += PAGE_SIZE;
314 p += PAGE_SIZE;
315 }
316}
317
318/* is x a power of 2? */
319#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
320
321/* is x a power of 4? */
322#define is_power_of_4(x) ((x) != 0 && (((x) & (x-1)) == 0) && (ffs(x) & 1))
323
324/*
325 * Set up a mapping for a block of I/O.
326 * virt, phys, size must all be page-aligned.
327 * This should only be called before ioremap is called.
328 */
329void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
330 unsigned int size, int flags)
331{
332 int i;
333
334 if (virt > KERNELBASE && virt < ioremap_bot)
335 ioremap_bot = ioremap_base = virt;
336
337#ifdef HAVE_BATS
338 /*
339 * Use a BAT for this if possible...
340 */
341 if (io_bat_index < 2 && is_power_of_2(size)
342 && (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) {
343 setbat(io_bat_index, virt, phys, size, flags);
344 ++io_bat_index;
345 return;
346 }
347#endif /* HAVE_BATS */
348
349#ifdef HAVE_TLBCAM
350 /*
351 * Use a CAM for this if possible...
352 */
353 if (tlbcam_index < num_tlbcam_entries && is_power_of_4(size)
354 && (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) {
355 settlbcam(tlbcam_index, virt, phys, size, flags, 0);
356 ++tlbcam_index;
357 return;
358 }
359#endif /* HAVE_TLBCAM */
360
361 /* No BATs available, put it in the page tables. */
362 for (i = 0; i < size; i += PAGE_SIZE)
363 map_page(virt + i, phys + i, flags);
364}
365
366/* Scan the real Linux page tables and return a PTE pointer for
367 * a virtual address in a context.
368 * Returns true (1) if PTE was found, zero otherwise. The pointer to
369 * the PTE pointer is unmodified if PTE is not found.
370 */
371int
372get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
373{
374 pgd_t *pgd;
375 pmd_t *pmd;
376 pte_t *pte;
377 int retval = 0;
378
379 pgd = pgd_offset(mm, addr & PAGE_MASK);
380 if (pgd) {
381 pmd = pmd_offset(pgd, addr & PAGE_MASK);
382 if (pmd_present(*pmd)) {
383 pte = pte_offset_map(pmd, addr & PAGE_MASK);
384 if (pte) {
385 retval = 1;
386 *ptep = pte;
387 /* XXX caller needs to do pte_unmap, yuck */
388 }
389 }
390 }
391 return(retval);
392}
393
394/* Find physical address for this virtual address. Normally used by
395 * I/O functions, but anyone can call it.
396 */
397unsigned long iopa(unsigned long addr)
398{
399 unsigned long pa;
400
401 /* I don't know why this won't work on PMacs or CHRP. It
402 * appears there is some bug, or there is some implicit
403 * mapping done not properly represented by BATs or in page
404 * tables.......I am actively working on resolving this, but
405 * can't hold up other stuff. -- Dan
406 */
407 pte_t *pte;
408 struct mm_struct *mm;
409
410 /* Check the BATs */
411 pa = v_mapped_by_bats(addr);
412 if (pa)
413 return pa;
414
415 /* Allow mapping of user addresses (within the thread)
416 * for DMA if necessary.
417 */
418 if (addr < TASK_SIZE)
419 mm = current->mm;
420 else
421 mm = &init_mm;
422
423 pa = 0;
424 if (get_pteptr(mm, addr, &pte)) {
425 pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
426 pte_unmap(pte);
427 }
428
429 return(pa);
430}
431
432/* This is will find the virtual address for a physical one....
433 * Swiped from APUS, could be dangerous :-).
434 * This is only a placeholder until I really find a way to make this
435 * work. -- Dan
436 */
437unsigned long
438mm_ptov (unsigned long paddr)
439{
440 unsigned long ret;
441#if 0
442 if (paddr < 16*1024*1024)
443 ret = ZTWO_VADDR(paddr);
444 else {
445 int i;
446
447 for (i = 0; i < kmap_chunk_count;){
448 unsigned long phys = kmap_chunks[i++];
449 unsigned long size = kmap_chunks[i++];
450 unsigned long virt = kmap_chunks[i++];
451 if (paddr >= phys
452 && paddr < (phys + size)){
453 ret = virt + paddr - phys;
454 goto exit;
455 }
456 }
457
458 ret = (unsigned long) __va(paddr);
459 }
460exit:
461#ifdef DEBUGPV
462 printk ("PTOV(%lx)=%lx\n", paddr, ret);
463#endif
464#else
465 ret = (unsigned long)paddr + KERNELBASE;
466#endif
467 return ret;
468}
469