blob: 4d4b20c9de78fbf31097ea3cb5e098b352cf73f7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifdef __KERNEL__
2#ifndef _PPC_PGTABLE_H
3#define _PPC_PGTABLE_H
4
5#include <asm-generic/4level-fixup.h>
6
7#include <linux/config.h>
8
9#ifndef __ASSEMBLY__
10#include <linux/sched.h>
11#include <linux/threads.h>
12#include <asm/processor.h> /* For TASK_SIZE */
13#include <asm/mmu.h>
14#include <asm/page.h>
15
16extern unsigned long va_to_phys(unsigned long address);
17extern pte_t *va_to_pte(unsigned long address);
18extern unsigned long ioremap_bot, ioremap_base;
19#endif /* __ASSEMBLY__ */
20
21/*
22 * The PowerPC MMU uses a hash table containing PTEs, together with
23 * a set of 16 segment registers (on 32-bit implementations), to define
24 * the virtual to physical address mapping.
25 *
26 * We use the hash table as an extended TLB, i.e. a cache of currently
27 * active mappings. We maintain a two-level page table tree, much
28 * like that used by the i386, for the sake of the Linux memory
29 * management code. Low-level assembler code in hashtable.S
30 * (procedure hash_page) is responsible for extracting ptes from the
31 * tree and putting them into the hash table when necessary, and
32 * updating the accessed and modified bits in the page table tree.
33 */
34
35/*
36 * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk.
37 * We also use the two level tables, but we can put the real bits in them
38 * needed for the TLB and tablewalk. These definitions require Mx_CTR.PPM = 0,
39 * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1. The level 2 descriptor has
40 * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit
41 * based upon user/super access. The TLB does not have accessed nor write
42 * protect. We assume that if the TLB get loaded with an entry it is
43 * accessed, and overload the changed bit for write protect. We use
44 * two bits in the software pte that are supposed to be set to zero in
45 * the TLB entry (24 and 25) for these indicators. Although the level 1
46 * descriptor contains the guarded and writethrough/copyback bits, we can
47 * set these at the page level since they get copied from the Mx_TWC
48 * register when the TLB entry is loaded. We will use bit 27 for guard, since
49 * that is where it exists in the MD_TWC, and bit 26 for writethrough.
50 * These will get masked from the level 2 descriptor at TLB load time, and
51 * copied to the MD_TWC before it gets loaded.
52 * Large page sizes added. We currently support two sizes, 4K and 8M.
53 * This also allows a TLB hander optimization because we can directly
54 * load the PMD into MD_TWC. The 8M pages are only used for kernel
55 * mapping of well known areas. The PMD (PGD) entries contain control
56 * flags in addition to the address, so care must be taken that the
57 * software no longer assumes these are only pointers.
58 */
59
60/*
61 * At present, all PowerPC 400-class processors share a similar TLB
62 * architecture. The instruction and data sides share a unified,
63 * 64-entry, fully-associative TLB which is maintained totally under
64 * software control. In addition, the instruction side has a
65 * hardware-managed, 4-entry, fully-associative TLB which serves as a
66 * first level to the shared TLB. These two TLBs are known as the UTLB
67 * and ITLB, respectively (see "mmu.h" for definitions).
68 */
69
70/*
71 * The normal case is that PTEs are 32-bits and we have a 1-page
72 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
73 *
74 * For any >32-bit physical address platform, we can use the following
75 * two level page table layout where the pgdir is 8KB and the MS 13 bits
76 * are an index to the second level table. The combined pgdir/pmd first
77 * level has 2048 entries and the second level has 512 64-bit PTE entries.
78 * -Matt
79 */
80/* PMD_SHIFT determines the size of the area mapped by the PTE pages */
81#define PMD_SHIFT (PAGE_SHIFT + PTE_SHIFT)
82#define PMD_SIZE (1UL << PMD_SHIFT)
83#define PMD_MASK (~(PMD_SIZE-1))
84
85/* PGDIR_SHIFT determines what a top-level page table entry can map */
86#define PGDIR_SHIFT PMD_SHIFT
87#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
88#define PGDIR_MASK (~(PGDIR_SIZE-1))
89
90/*
91 * entries per page directory level: our page-table tree is two-level, so
92 * we don't really have any PMD directory.
93 */
94#define PTRS_PER_PTE (1 << PTE_SHIFT)
95#define PTRS_PER_PMD 1
96#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
97
98#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
Hugh Dickinsd455a362005-04-19 13:29:23 -070099#define FIRST_USER_ADDRESS 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
101#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
102#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
103
104#define pte_ERROR(e) \
105 printk("%s:%d: bad pte "PTE_FMT".\n", __FILE__, __LINE__, pte_val(e))
106#define pmd_ERROR(e) \
107 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
108#define pgd_ERROR(e) \
109 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
110
111/*
112 * Just any arbitrary offset to the start of the vmalloc VM area: the
113 * current 64MB value just means that there will be a 64MB "hole" after the
114 * physical memory until the kernel virtual memory starts. That means that
115 * any out-of-bounds memory accesses will hopefully be caught.
116 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
117 * area for the same reason. ;)
118 *
119 * We no longer map larger than phys RAM with the BATs so we don't have
120 * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
121 * about clashes between our early calls to ioremap() that start growing down
122 * from ioremap_base being run into the VM area allocations (growing upwards
123 * from VMALLOC_START). For this reason we have ioremap_bot to check when
124 * we actually run into our mappings setup in the early boot with the VM
125 * system. This really does become a problem for machines with good amounts
126 * of RAM. -- Cort
127 */
128#define VMALLOC_OFFSET (0x1000000) /* 16M */
129#ifdef CONFIG_44x
130#include <asm/ibm44x.h>
131#define VMALLOC_START (((_ALIGN((long)high_memory, PPC44x_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
132#else
133#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
134#endif
135#define VMALLOC_END ioremap_bot
136
137/*
138 * Bits in a linux-style PTE. These match the bits in the
139 * (hardware-defined) PowerPC PTE as closely as possible.
140 */
141
142#if defined(CONFIG_40x)
143
144/* There are several potential gotchas here. The 40x hardware TLBLO
145 field looks like this:
146
147 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31
148 RPN..................... 0 0 EX WR ZSEL....... W I M G
149
150 Where possible we make the Linux PTE bits match up with this
151
152 - bits 20 and 21 must be cleared, because we use 4k pages (40x can
153 support down to 1k pages), this is done in the TLBMiss exception
154 handler.
155 - We use only zones 0 (for kernel pages) and 1 (for user pages)
156 of the 16 available. Bit 24-26 of the TLB are cleared in the TLB
157 miss handler. Bit 27 is PAGE_USER, thus selecting the correct
158 zone.
159 - PRESENT *must* be in the bottom two bits because swap cache
160 entries use the top 30 bits. Because 40x doesn't support SMP
161 anyway, M is irrelevant so we borrow it for PAGE_PRESENT. Bit 30
162 is cleared in the TLB miss handler before the TLB entry is loaded.
163 - All other bits of the PTE are loaded into TLBLO without
164 modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
165 software PTE bits. We actually use use bits 21, 24, 25, and
166 30 respectively for the software bits: ACCESSED, DIRTY, RW, and
167 PRESENT.
168*/
169
170/* Definitions for 40x embedded chips. */
171#define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */
172#define _PAGE_FILE 0x001 /* when !present: nonlinear file mapping */
173#define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */
174#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
175#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
176#define _PAGE_USER 0x010 /* matches one of the zone permission bits */
177#define _PAGE_RW 0x040 /* software: Writes permitted */
178#define _PAGE_DIRTY 0x080 /* software: dirty page */
179#define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */
180#define _PAGE_HWEXEC 0x200 /* hardware: EX permission */
181#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */
182
183#define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */
184#define _PMD_BAD 0x802
185#define _PMD_SIZE 0x0e0 /* size field, != 0 for large-page PMD entry */
186#define _PMD_SIZE_4M 0x0c0
187#define _PMD_SIZE_16M 0x0e0
188#define PMD_PAGE_SIZE(pmdval) (1024 << (((pmdval) & _PMD_SIZE) >> 4))
189
190#elif defined(CONFIG_44x)
191/*
192 * Definitions for PPC440
193 *
194 * Because of the 3 word TLB entries to support 36-bit addressing,
195 * the attribute are difficult to map in such a fashion that they
196 * are easily loaded during exception processing. I decided to
197 * organize the entry so the ERPN is the only portion in the
198 * upper word of the PTE and the attribute bits below are packed
199 * in as sensibly as they can be in the area below a 4KB page size
200 * oriented RPN. This at least makes it easy to load the RPN and
201 * ERPN fields in the TLB. -Matt
202 *
203 * Note that these bits preclude future use of a page size
204 * less than 4KB.
205 */
206#define _PAGE_PRESENT 0x00000001 /* S: PTE valid */
207#define _PAGE_RW 0x00000002 /* S: Write permission */
208#define _PAGE_DIRTY 0x00000004 /* S: Page dirty */
209#define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */
210#define _PAGE_HWWRITE 0x00000010 /* H: Dirty & RW */
211#define _PAGE_HWEXEC 0x00000020 /* H: Execute permission */
212#define _PAGE_USER 0x00000040 /* S: User page */
213#define _PAGE_ENDIAN 0x00000080 /* H: E bit */
214#define _PAGE_GUARDED 0x00000100 /* H: G bit */
215#define _PAGE_COHERENT 0x00000200 /* H: M bit */
216#define _PAGE_FILE 0x00000400 /* S: nonlinear file mapping */
217#define _PAGE_NO_CACHE 0x00000400 /* H: I bit */
218#define _PAGE_WRITETHRU 0x00000800 /* H: W bit */
219
220/* TODO: Add large page lowmem mapping support */
221#define _PMD_PRESENT 0
222#define _PMD_PRESENT_MASK (PAGE_MASK)
223#define _PMD_BAD (~PAGE_MASK)
224
225/* ERPN in a PTE never gets cleared, ignore it */
226#define _PTE_NONE_MASK 0xffffffff00000000ULL
227
Kumar Galaf50b1532005-04-16 15:24:22 -0700228#elif defined(CONFIG_FSL_BOOKE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229/*
230 MMU Assist Register 3:
231
232 32 33 34 35 36 ... 50 51 52 53 54 55 56 57 58 59 60 61 62 63
233 RPN...................... 0 0 U0 U1 U2 U3 UX SX UW SW UR SR
234
235 - PRESENT *must* be in the bottom three bits because swap cache
236 entries use the top 29 bits.
237
238 - FILE *must* be in the bottom three bits because swap cache
239 entries use the top 29 bits.
240*/
241
Kumar Galaf50b1532005-04-16 15:24:22 -0700242/* Definitions for FSL Book-E Cores */
243#define _PAGE_PRESENT 0x00001 /* S: PTE contains a translation */
244#define _PAGE_USER 0x00002 /* S: User page (maps to UR) */
245#define _PAGE_FILE 0x00002 /* S: when !present: nonlinear file mapping */
246#define _PAGE_ACCESSED 0x00004 /* S: Page referenced */
247#define _PAGE_HWWRITE 0x00008 /* H: Dirty & RW, set in exception */
248#define _PAGE_RW 0x00010 /* S: Write permission */
249#define _PAGE_HWEXEC 0x00020 /* H: UX permission */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
Kumar Galaf50b1532005-04-16 15:24:22 -0700251#define _PAGE_ENDIAN 0x00040 /* H: E bit */
252#define _PAGE_GUARDED 0x00080 /* H: G bit */
253#define _PAGE_COHERENT 0x00100 /* H: M bit */
254#define _PAGE_NO_CACHE 0x00200 /* H: I bit */
255#define _PAGE_WRITETHRU 0x00400 /* H: W bit */
256
257#ifdef CONFIG_PTE_64BIT
258#define _PAGE_DIRTY 0x08000 /* S: Page dirty */
259
260/* ERPN in a PTE never gets cleared, ignore it */
261#define _PTE_NONE_MASK 0xffffffffffff0000ULL
262#else
263#define _PAGE_DIRTY 0x00800 /* S: Page dirty */
264#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265
266#define _PMD_PRESENT 0
267#define _PMD_PRESENT_MASK (PAGE_MASK)
268#define _PMD_BAD (~PAGE_MASK)
269
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270#elif defined(CONFIG_8xx)
271/* Definitions for 8xx embedded chips. */
272#define _PAGE_PRESENT 0x0001 /* Page is valid */
273#define _PAGE_FILE 0x0002 /* when !present: nonlinear file mapping */
274#define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */
275#define _PAGE_SHARED 0x0004 /* No ASID (context) compare */
276
277/* These five software bits must be masked out when the entry is loaded
278 * into the TLB.
279 */
280#define _PAGE_EXEC 0x0008 /* software: i-cache coherency required */
281#define _PAGE_GUARDED 0x0010 /* software: guarded access */
282#define _PAGE_DIRTY 0x0020 /* software: page changed */
283#define _PAGE_RW 0x0040 /* software: user write access allowed */
284#define _PAGE_ACCESSED 0x0080 /* software: page referenced */
285
286/* Setting any bits in the nibble with the follow two controls will
287 * require a TLB exception handler change. It is assumed unused bits
288 * are always zero.
289 */
290#define _PAGE_HWWRITE 0x0100 /* h/w write enable: never set in Linux PTE */
291#define _PAGE_USER 0x0800 /* One of the PP bits, the other is USER&~RW */
292
293#define _PMD_PRESENT 0x0001
294#define _PMD_BAD 0x0ff0
295#define _PMD_PAGE_MASK 0x000c
296#define _PMD_PAGE_8M 0x000c
297
298/*
299 * The 8xx TLB miss handler allegedly sets _PAGE_ACCESSED in the PTE
300 * for an address even if _PAGE_PRESENT is not set, as a performance
301 * optimization. This is a bug if you ever want to use swap unless
302 * _PAGE_ACCESSED is 2, which it isn't, or unless you have 8xx-specific
303 * definitions for __swp_entry etc. below, which would be gross.
304 * -- paulus
305 */
306#define _PTE_NONE_MASK _PAGE_ACCESSED
307
308#else /* CONFIG_6xx */
309/* Definitions for 60x, 740/750, etc. */
310#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
311#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
312#define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
313#define _PAGE_USER 0x004 /* usermode access allowed */
314#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
315#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
316#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
317#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
318#define _PAGE_DIRTY 0x080 /* C: page changed */
319#define _PAGE_ACCESSED 0x100 /* R: page referenced */
320#define _PAGE_EXEC 0x200 /* software: i-cache coherency required */
321#define _PAGE_RW 0x400 /* software: user write access allowed */
322
323#define _PTE_NONE_MASK _PAGE_HASHPTE
324
325#define _PMD_PRESENT 0
326#define _PMD_PRESENT_MASK (PAGE_MASK)
327#define _PMD_BAD (~PAGE_MASK)
328#endif
329
330/*
331 * Some bits are only used on some cpu families...
332 */
333#ifndef _PAGE_HASHPTE
334#define _PAGE_HASHPTE 0
335#endif
336#ifndef _PTE_NONE_MASK
337#define _PTE_NONE_MASK 0
338#endif
339#ifndef _PAGE_SHARED
340#define _PAGE_SHARED 0
341#endif
342#ifndef _PAGE_HWWRITE
343#define _PAGE_HWWRITE 0
344#endif
345#ifndef _PAGE_HWEXEC
346#define _PAGE_HWEXEC 0
347#endif
348#ifndef _PAGE_EXEC
349#define _PAGE_EXEC 0
350#endif
351#ifndef _PMD_PRESENT_MASK
352#define _PMD_PRESENT_MASK _PMD_PRESENT
353#endif
354#ifndef _PMD_SIZE
355#define _PMD_SIZE 0
356#define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE()
357#endif
358
359#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
360
361/*
362 * Note: the _PAGE_COHERENT bit automatically gets set in the hardware
363 * PTE if CONFIG_SMP is defined (hash_page does this); there is no need
364 * to have it in the Linux PTE, and in fact the bit could be reused for
365 * another purpose. -- paulus.
366 */
367
368#ifdef CONFIG_44x
369#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_GUARDED)
370#else
371#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
372#endif
373#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
374#define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE)
375
376#ifdef CONFIG_PPC_STD_MMU
377/* On standard PPC MMU, no user access implies kernel read/write access,
378 * so to write-protect kernel memory we must turn on user access */
379#define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED | _PAGE_USER)
380#else
381#define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED)
382#endif
383
384#define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED)
385#define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC)
386
387#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH)
388/* We want the debuggers to be able to set breakpoints anywhere, so
389 * don't write protect the kernel text */
390#define _PAGE_RAM_TEXT _PAGE_RAM
391#else
392#define _PAGE_RAM_TEXT (_PAGE_KERNEL_RO | _PAGE_HWEXEC)
393#endif
394
395#define PAGE_NONE __pgprot(_PAGE_BASE)
396#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
397#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
398#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
399#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
400#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
401#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
402
403#define PAGE_KERNEL __pgprot(_PAGE_RAM)
404#define PAGE_KERNEL_NOCACHE __pgprot(_PAGE_IO)
405
406/*
407 * The PowerPC can only do execute protection on a segment (256MB) basis,
408 * not on a page basis. So we consider execute permission the same as read.
409 * Also, write permissions imply read permissions.
410 * This is the closest we can get..
411 */
412#define __P000 PAGE_NONE
413#define __P001 PAGE_READONLY_X
414#define __P010 PAGE_COPY
415#define __P011 PAGE_COPY_X
416#define __P100 PAGE_READONLY
417#define __P101 PAGE_READONLY_X
418#define __P110 PAGE_COPY
419#define __P111 PAGE_COPY_X
420
421#define __S000 PAGE_NONE
422#define __S001 PAGE_READONLY_X
423#define __S010 PAGE_SHARED
424#define __S011 PAGE_SHARED_X
425#define __S100 PAGE_READONLY
426#define __S101 PAGE_READONLY_X
427#define __S110 PAGE_SHARED
428#define __S111 PAGE_SHARED_X
429
430#ifndef __ASSEMBLY__
431/* Make sure we get a link error if PMD_PAGE_SIZE is ever called on a
432 * kernel without large page PMD support */
433extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
434
435/*
436 * Conversions between PTE values and page frame numbers.
437 */
438
Kumar Galab464fce2005-04-16 15:24:21 -0700439/* in some case we want to additionaly adjust where the pfn is in the pte to
440 * allow room for more flags */
Kumar Galaf50b1532005-04-16 15:24:22 -0700441#if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
442#define PFN_SHIFT_OFFSET (PAGE_SHIFT + 8)
443#else
Kumar Galab464fce2005-04-16 15:24:21 -0700444#define PFN_SHIFT_OFFSET (PAGE_SHIFT)
Kumar Galaf50b1532005-04-16 15:24:22 -0700445#endif
Kumar Galab464fce2005-04-16 15:24:21 -0700446
447#define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448#define pte_page(x) pfn_to_page(pte_pfn(x))
449
Kumar Galab464fce2005-04-16 15:24:21 -0700450#define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) |\
451 pgprot_val(prot))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
453
454/*
455 * ZERO_PAGE is a global shared page that is always zero: used
456 * for zero-mapped memory areas etc..
457 */
458extern unsigned long empty_zero_page[1024];
459#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
460
461#endif /* __ASSEMBLY__ */
462
463#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
464#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
465#define pte_clear(mm,addr,ptep) do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0)
466
467#define pmd_none(pmd) (!pmd_val(pmd))
468#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
469#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
470#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
471
472#ifndef __ASSEMBLY__
473/*
474 * The "pgd_xxx()" functions here are trivial for a folded two-level
475 * setup: the pgd is never bad, and a pmd always exists (as it's folded
476 * into the pgd entry)
477 */
478static inline int pgd_none(pgd_t pgd) { return 0; }
479static inline int pgd_bad(pgd_t pgd) { return 0; }
480static inline int pgd_present(pgd_t pgd) { return 1; }
481#define pgd_clear(xp) do { } while (0)
482
483#define pgd_page(pgd) \
484 ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
485
486/*
487 * The following only work if pte_present() is true.
488 * Undefined behaviour if not..
489 */
490static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
491static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
492static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
493static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
494static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
495static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
496
497static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
498static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
499
500static inline pte_t pte_rdprotect(pte_t pte) {
501 pte_val(pte) &= ~_PAGE_USER; return pte; }
502static inline pte_t pte_wrprotect(pte_t pte) {
503 pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
504static inline pte_t pte_exprotect(pte_t pte) {
505 pte_val(pte) &= ~_PAGE_EXEC; return pte; }
506static inline pte_t pte_mkclean(pte_t pte) {
507 pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
508static inline pte_t pte_mkold(pte_t pte) {
509 pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
510
511static inline pte_t pte_mkread(pte_t pte) {
512 pte_val(pte) |= _PAGE_USER; return pte; }
513static inline pte_t pte_mkexec(pte_t pte) {
514 pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
515static inline pte_t pte_mkwrite(pte_t pte) {
516 pte_val(pte) |= _PAGE_RW; return pte; }
517static inline pte_t pte_mkdirty(pte_t pte) {
518 pte_val(pte) |= _PAGE_DIRTY; return pte; }
519static inline pte_t pte_mkyoung(pte_t pte) {
520 pte_val(pte) |= _PAGE_ACCESSED; return pte; }
521
522static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
523{
524 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
525 return pte;
526}
527
528/*
529 * When flushing the tlb entry for a page, we also need to flush the hash
530 * table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
531 */
532extern int flush_hash_pages(unsigned context, unsigned long va,
533 unsigned long pmdval, int count);
534
535/* Add an HPTE to the hash table */
536extern void add_hash_page(unsigned context, unsigned long va,
537 unsigned long pmdval);
538
539/*
540 * Atomic PTE updates.
541 *
542 * pte_update clears and sets bit atomically, and returns
Kumar Gala7a1e3352005-04-16 15:24:20 -0700543 * the old pte value. In the 64-bit PTE case we lock around the
544 * low PTE word since we expect ALL flag bits to be there
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 */
Kumar Gala7a1e3352005-04-16 15:24:20 -0700546#ifndef CONFIG_PTE_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547static inline unsigned long pte_update(pte_t *p, unsigned long clr,
548 unsigned long set)
549{
550 unsigned long old, tmp;
551
552 __asm__ __volatile__("\
5531: lwarx %0,0,%3\n\
554 andc %1,%0,%4\n\
555 or %1,%1,%5\n"
556 PPC405_ERR77(0,%3)
557" stwcx. %1,0,%3\n\
558 bne- 1b"
559 : "=&r" (old), "=&r" (tmp), "=m" (*p)
Kumar Gala7a1e3352005-04-16 15:24:20 -0700560 : "r" (p), "r" (clr), "r" (set), "m" (*p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 : "cc" );
562 return old;
563}
Kumar Gala7a1e3352005-04-16 15:24:20 -0700564#else
565static inline unsigned long long pte_update(pte_t *p, unsigned long clr,
566 unsigned long set)
567{
568 unsigned long long old;
569 unsigned long tmp;
570
571 __asm__ __volatile__("\
5721: lwarx %L0,0,%4\n\
573 lwzx %0,0,%3\n\
574 andc %1,%L0,%5\n\
575 or %1,%1,%6\n"
576 PPC405_ERR77(0,%3)
577" stwcx. %1,0,%4\n\
578 bne- 1b"
579 : "=&r" (old), "=&r" (tmp), "=m" (*p)
580 : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
581 : "cc" );
582 return old;
583}
584#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585
586/*
587 * set_pte stores a linux PTE into the linux page table.
588 * On machines which use an MMU hash table we avoid changing the
589 * _PAGE_HASHPTE bit.
590 */
591static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
592 pte_t *ptep, pte_t pte)
593{
594#if _PAGE_HASHPTE != 0
595 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE);
596#else
597 *ptep = pte;
598#endif
599}
600
601/*
602 * 2.6 calles this without flushing the TLB entry, this is wrong
603 * for our hash-based implementation, we fix that up here
604 */
605#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
606static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
607{
608 unsigned long old;
609 old = pte_update(ptep, _PAGE_ACCESSED, 0);
610#if _PAGE_HASHPTE != 0
611 if (old & _PAGE_HASHPTE) {
612 unsigned long ptephys = __pa(ptep) & PAGE_MASK;
613 flush_hash_pages(context, addr, ptephys, 1);
614 }
615#endif
616 return (old & _PAGE_ACCESSED) != 0;
617}
618#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
619 __ptep_test_and_clear_young((__vma)->vm_mm->context, __addr, __ptep)
620
621#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
622static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma,
623 unsigned long addr, pte_t *ptep)
624{
625 return (pte_update(ptep, (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
626}
627
628#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
629static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
630 pte_t *ptep)
631{
632 return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
633}
634
635#define __HAVE_ARCH_PTEP_SET_WRPROTECT
636static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
637 pte_t *ptep)
638{
639 pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
640}
641
642#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
643static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
644{
645 unsigned long bits = pte_val(entry) &
646 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW);
647 pte_update(ptep, 0, bits);
648}
649
650#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
651 do { \
652 __ptep_set_access_flags(__ptep, __entry, __dirty); \
653 flush_tlb_page_nohash(__vma, __address); \
654 } while(0)
655
656/*
657 * Macro to mark a page protection value as "uncacheable".
658 */
659#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
660
661struct file;
662extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
663 unsigned long size, pgprot_t vma_prot);
664#define __HAVE_PHYS_MEM_ACCESS_PROT
665
666#define __HAVE_ARCH_PTE_SAME
667#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
668
669/*
670 * Note that on Book E processors, the pmd contains the kernel virtual
671 * (lowmem) address of the pte page. The physical address is less useful
672 * because everything runs with translation enabled (even the TLB miss
673 * handler). On everything else the pmd contains the physical address
674 * of the pte page. -- paulus
675 */
676#ifndef CONFIG_BOOKE
677#define pmd_page_kernel(pmd) \
678 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
679#define pmd_page(pmd) \
680 (mem_map + (pmd_val(pmd) >> PAGE_SHIFT))
681#else
682#define pmd_page_kernel(pmd) \
683 ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
684#define pmd_page(pmd) \
685 (mem_map + (__pa(pmd_val(pmd)) >> PAGE_SHIFT))
686#endif
687
688/* to find an entry in a kernel page-table-directory */
689#define pgd_offset_k(address) pgd_offset(&init_mm, address)
690
691/* to find an entry in a page-table-directory */
692#define pgd_index(address) ((address) >> PGDIR_SHIFT)
693#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
694
695/* Find an entry in the second-level page table.. */
696static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
697{
698 return (pmd_t *) dir;
699}
700
701/* Find an entry in the third-level page table.. */
702#define pte_index(address) \
703 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
704#define pte_offset_kernel(dir, addr) \
705 ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
706#define pte_offset_map(dir, addr) \
707 ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))
708#define pte_offset_map_nested(dir, addr) \
709 ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))
710
711#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
712#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
713
714extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
715
716extern void paging_init(void);
717
718/*
719 * Encode and decode a swap entry.
720 * Note that the bits we use in a PTE for representing a swap entry
721 * must not include the _PAGE_PRESENT bit, the _PAGE_FILE bit, or the
722 *_PAGE_HASHPTE bit (if used). -- paulus
723 */
724#define __swp_type(entry) ((entry).val & 0x1f)
725#define __swp_offset(entry) ((entry).val >> 5)
726#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
727#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
728#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
729
730/* Encode and decode a nonlinear file mapping entry */
731#define PTE_FILE_MAX_BITS 29
732#define pte_to_pgoff(pte) (pte_val(pte) >> 3)
733#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })
734
735/* CONFIG_APUS */
736/* For virtual address to physical address conversion */
737extern void cache_clear(__u32 addr, int length);
738extern void cache_push(__u32 addr, int length);
739extern int mm_end_of_chunk (unsigned long addr, int len);
740extern unsigned long iopa(unsigned long addr);
741extern unsigned long mm_ptov(unsigned long addr) __attribute_const__;
742
743/* Values for nocacheflag and cmode */
744/* These are not used by the APUS kernel_map, but prevents
745 compilation errors. */
746#define KERNELMAP_FULL_CACHING 0
747#define KERNELMAP_NOCACHE_SER 1
748#define KERNELMAP_NOCACHE_NONSER 2
749#define KERNELMAP_NO_COPYBACK 3
750
751/*
752 * Map some physical address range into the kernel address space.
753 */
754extern unsigned long kernel_map(unsigned long paddr, unsigned long size,
755 int nocacheflag, unsigned long *memavailp );
756
757/*
758 * Set cache mode of (kernel space) address range.
759 */
760extern void kernel_set_cachemode (unsigned long address, unsigned long size,
761 unsigned int cmode);
762
763/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
764#define kern_addr_valid(addr) (1)
765
766#ifdef CONFIG_PHYS_64BIT
767extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
768 unsigned long paddr, unsigned long size, pgprot_t prot);
769static inline int io_remap_page_range(struct vm_area_struct *vma,
770 unsigned long vaddr,
771 unsigned long paddr,
772 unsigned long size,
773 pgprot_t prot)
774{
775 phys_addr_t paddr64 = fixup_bigphys_addr(paddr, size);
776 return remap_pfn_range(vma, vaddr, paddr64 >> PAGE_SHIFT, size, prot);
777}
778
779static inline int io_remap_pfn_range(struct vm_area_struct *vma,
780 unsigned long vaddr,
781 unsigned long pfn,
782 unsigned long size,
783 pgprot_t prot)
784{
785 phys_addr_t paddr64 = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
786 return remap_pfn_range(vma, vaddr, paddr64 >> PAGE_SHIFT, size, prot);
787}
788#else
789#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
790 remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
791#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
792 remap_pfn_range(vma, vaddr, pfn, size, prot)
793#endif
794
795#define MK_IOSPACE_PFN(space, pfn) (pfn)
796#define GET_IOSPACE(pfn) 0
797#define GET_PFN(pfn) (pfn)
798
799/*
800 * No page table caches to initialise
801 */
802#define pgtable_cache_init() do { } while (0)
803
804extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep);
805
806#include <asm-generic/pgtable.h>
807
808#endif /* !__ASSEMBLY__ */
809
810#endif /* _PPC_PGTABLE_H */
811#endif /* __KERNEL__ */