blob: d0af2a3b0152f2c7d2eadae2a1f1c4b6076fa5ac [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003 Ralf Baechle
7 */
8#ifndef _ASM_PGTABLE_H
9#define _ASM_PGTABLE_H
10
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/config.h>
Ralf Baechle875d43e2005-09-03 15:56:16 -070012#ifdef CONFIG_32BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <asm/pgtable-32.h>
14#endif
Ralf Baechle875d43e2005-09-03 15:56:16 -070015#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <asm/pgtable-64.h>
17#endif
18
Pete Popovf10fae02005-07-14 00:17:05 +000019#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <asm/pgtable-bits.h>
21
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080022struct mm_struct;
23struct vm_area_struct;
24
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
26#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
27 PAGE_CACHABLE_DEFAULT)
28#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | \
29 PAGE_CACHABLE_DEFAULT)
30#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | \
31 PAGE_CACHABLE_DEFAULT)
32#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
33 _PAGE_GLOBAL | PAGE_CACHABLE_DEFAULT)
34#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
35 PAGE_CACHABLE_DEFAULT)
36#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
37 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
38
39/*
40 * MIPS can't do page protection for execute, and considers that the same like
41 * read. Also, write permissions imply read permissions. This is the closest
42 * we can get by reasonable means..
43 */
44#define __P000 PAGE_NONE
45#define __P001 PAGE_READONLY
46#define __P010 PAGE_COPY
47#define __P011 PAGE_COPY
48#define __P100 PAGE_READONLY
49#define __P101 PAGE_READONLY
50#define __P110 PAGE_COPY
51#define __P111 PAGE_COPY
52
53#define __S000 PAGE_NONE
54#define __S001 PAGE_READONLY
55#define __S010 PAGE_SHARED
56#define __S011 PAGE_SHARED
57#define __S100 PAGE_READONLY
58#define __S101 PAGE_READONLY
59#define __S110 PAGE_SHARED
60#define __S111 PAGE_SHARED
61
62/*
63 * ZERO_PAGE is a global shared page that is always zero; used
64 * for zero-mapped memory areas etc..
65 */
66
67extern unsigned long empty_zero_page;
68extern unsigned long zero_page_mask;
69
70#define ZERO_PAGE(vaddr) \
71 (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))
72
David S. Miller0b0968a2006-06-01 17:47:25 -070073#define __HAVE_ARCH_MOVE_PTE
74#define move_pte(pte, prot, old_addr, new_addr) \
75({ \
76 pte_t newpte = (pte); \
77 if (pte_present(pte) && pfn_valid(pte_pfn(pte)) && \
78 pte_page(pte) == ZERO_PAGE(old_addr)) \
79 newpte = mk_pte(ZERO_PAGE(new_addr), (prot)); \
80 newpte; \
81})
Nick Piggin8b1f3122005-09-27 21:45:18 -070082
Linus Torvalds1da177e2005-04-16 15:20:36 -070083extern void paging_init(void);
84
85/*
86 * Conversion functions: convert a page and protection to a page entry,
87 * and a page entry and page directory to the page they refer to.
88 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070089#define pmd_phys(pmd) (pmd_val(pmd) - PAGE_OFFSET)
90#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
91#define pmd_page_kernel(pmd) pmd_val(pmd)
92
Ralf Baechle6e760c82005-07-06 12:08:11 +000093#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
Sergei Shtylyov6e953892006-04-16 23:27:21 +040094
95#define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
96#define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
97
Linus Torvalds1da177e2005-04-16 15:20:36 -070098static inline void set_pte(pte_t *ptep, pte_t pte)
99{
100 ptep->pte_high = pte.pte_high;
101 smp_wmb();
102 ptep->pte_low = pte.pte_low;
103 //printk("pte_high %x pte_low %x\n", ptep->pte_high, ptep->pte_low);
104
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400105 if (pte.pte_low & _PAGE_GLOBAL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 pte_t *buddy = ptep_buddy(ptep);
107 /*
108 * Make sure the buddy is global too (if it's !none,
109 * it better already be global)
110 */
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400111 if (pte_none(*buddy)) {
112 buddy->pte_low |= _PAGE_GLOBAL;
113 buddy->pte_high |= _PAGE_GLOBAL;
114 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 }
116}
117#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
118
119static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
120{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400121 pte_t null = __pte(0);
122
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 /* Preserve global status for the pair */
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400124 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
125 null.pte_low = null.pte_high = _PAGE_GLOBAL;
126
127 set_pte_at(mm, addr, ptep, null);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128}
129#else
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400130
131#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
132#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
133
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134/*
135 * Certain architectures need to do special things when pte's
136 * within a page table are directly modified. Thus, the following
137 * hook is made available.
138 */
139static inline void set_pte(pte_t *ptep, pte_t pteval)
140{
141 *ptep = pteval;
142#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
143 if (pte_val(pteval) & _PAGE_GLOBAL) {
144 pte_t *buddy = ptep_buddy(ptep);
145 /*
146 * Make sure the buddy is global too (if it's !none,
147 * it better already be global)
148 */
149 if (pte_none(*buddy))
150 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
151 }
152#endif
153}
154#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
155
156static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
157{
158#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
159 /* Preserve global status for the pair */
160 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
161 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
162 else
163#endif
164 set_pte_at(mm, addr, ptep, __pte(0));
165}
166#endif
167
168/*
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000169 * (pmds are folded into puds so this doesn't get actually called,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 * but the define is needed for a generic inline function.)
171 */
172#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000173
174#ifdef CONFIG_64BIT
175/*
176 * (puds are folded into pgds so this doesn't get actually called,
177 * but the define is needed for a generic inline function.)
178 */
179#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
180#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
182#define PGD_T_LOG2 ffz(~sizeof(pgd_t))
183#define PMD_T_LOG2 ffz(~sizeof(pmd_t))
184#define PTE_T_LOG2 ffz(~sizeof(pte_t))
185
186extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
187
188/*
189 * The following only work if pte_present() is true.
190 * Undefined behaviour if not..
191 */
192static inline int pte_user(pte_t pte) { BUG(); return 0; }
Ralf Baechle6e760c82005-07-06 12:08:11 +0000193#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400194static inline int pte_read(pte_t pte) { return pte.pte_low & _PAGE_READ; }
195static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
196static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
197static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
198static inline int pte_file(pte_t pte) { return pte.pte_low & _PAGE_FILE; }
199
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200static inline pte_t pte_wrprotect(pte_t pte)
201{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400202 pte.pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
203 pte.pte_high &= ~_PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 return pte;
205}
206
207static inline pte_t pte_rdprotect(pte_t pte)
208{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400209 pte.pte_low &= ~(_PAGE_READ | _PAGE_SILENT_READ);
210 pte.pte_high &= ~_PAGE_SILENT_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 return pte;
212}
213
214static inline pte_t pte_mkclean(pte_t pte)
215{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400216 pte.pte_low &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
217 pte.pte_high &= ~_PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 return pte;
219}
220
221static inline pte_t pte_mkold(pte_t pte)
222{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400223 pte.pte_low &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
224 pte.pte_high &= ~_PAGE_SILENT_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 return pte;
226}
227
228static inline pte_t pte_mkwrite(pte_t pte)
229{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400230 pte.pte_low |= _PAGE_WRITE;
231 if (pte.pte_low & _PAGE_MODIFIED) {
232 pte.pte_low |= _PAGE_SILENT_WRITE;
233 pte.pte_high |= _PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 }
235 return pte;
236}
237
238static inline pte_t pte_mkread(pte_t pte)
239{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400240 pte.pte_low |= _PAGE_READ;
241 if (pte.pte_low & _PAGE_ACCESSED) {
242 pte.pte_low |= _PAGE_SILENT_READ;
243 pte.pte_high |= _PAGE_SILENT_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 }
245 return pte;
246}
247
248static inline pte_t pte_mkdirty(pte_t pte)
249{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400250 pte.pte_low |= _PAGE_MODIFIED;
251 if (pte.pte_low & _PAGE_WRITE) {
252 pte.pte_low |= _PAGE_SILENT_WRITE;
253 pte.pte_high |= _PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 }
255 return pte;
256}
257
258static inline pte_t pte_mkyoung(pte_t pte)
259{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400260 pte.pte_low |= _PAGE_ACCESSED;
261 if (pte.pte_low & _PAGE_READ)
262 pte.pte_low |= _PAGE_SILENT_READ;
263 pte.pte_high |= _PAGE_SILENT_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 return pte;
265}
266#else
267static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_READ; }
268static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
269static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
270static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
271static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
272
273static inline pte_t pte_wrprotect(pte_t pte)
274{
275 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
276 return pte;
277}
278
279static inline pte_t pte_rdprotect(pte_t pte)
280{
281 pte_val(pte) &= ~(_PAGE_READ | _PAGE_SILENT_READ);
282 return pte;
283}
284
285static inline pte_t pte_mkclean(pte_t pte)
286{
287 pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE);
288 return pte;
289}
290
291static inline pte_t pte_mkold(pte_t pte)
292{
293 pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
294 return pte;
295}
296
297static inline pte_t pte_mkwrite(pte_t pte)
298{
299 pte_val(pte) |= _PAGE_WRITE;
300 if (pte_val(pte) & _PAGE_MODIFIED)
301 pte_val(pte) |= _PAGE_SILENT_WRITE;
302 return pte;
303}
304
305static inline pte_t pte_mkread(pte_t pte)
306{
307 pte_val(pte) |= _PAGE_READ;
308 if (pte_val(pte) & _PAGE_ACCESSED)
309 pte_val(pte) |= _PAGE_SILENT_READ;
310 return pte;
311}
312
313static inline pte_t pte_mkdirty(pte_t pte)
314{
315 pte_val(pte) |= _PAGE_MODIFIED;
316 if (pte_val(pte) & _PAGE_WRITE)
317 pte_val(pte) |= _PAGE_SILENT_WRITE;
318 return pte;
319}
320
321static inline pte_t pte_mkyoung(pte_t pte)
322{
323 pte_val(pte) |= _PAGE_ACCESSED;
324 if (pte_val(pte) & _PAGE_READ)
325 pte_val(pte) |= _PAGE_SILENT_READ;
326 return pte;
327}
328#endif
329
330/*
331 * Macro to make mark a page protection value as "uncacheable". Note
332 * that "protection" is really a misnomer here as the protection value
333 * contains the memory attribute bits, dirty bits, and various other
334 * bits as well.
335 */
336#define pgprot_noncached pgprot_noncached
337
338static inline pgprot_t pgprot_noncached(pgprot_t _prot)
339{
340 unsigned long prot = pgprot_val(_prot);
341
342 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
343
344 return __pgprot(prot);
345}
346
347/*
348 * Conversion functions: convert a page and protection to a page entry,
349 * and a page entry and page directory to the page they refer to.
350 */
351#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
352
Ralf Baechle6e760c82005-07-06 12:08:11 +0000353#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
355{
Sergei Shtylyov79e0bc32006-05-03 22:56:43 +0400356 pte.pte_low &= _PAGE_CHG_MASK;
357 pte.pte_high &= ~0x3f;
358 pte.pte_low |= pgprot_val(newprot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 pte.pte_high |= pgprot_val(newprot) & 0x3f;
360 return pte;
361}
362#else
363static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
364{
365 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
366}
367#endif
368
369
370extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
371 pte_t pte);
372extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
373 pte_t pte);
374
375static inline void update_mmu_cache(struct vm_area_struct *vma,
376 unsigned long address, pte_t pte)
377{
378 __update_tlb(vma, address, pte);
379 __update_cache(vma, address, pte);
380}
381
Yoichi Yuasab4819b52005-06-25 14:54:31 -0700382#ifndef CONFIG_NEED_MULTIPLE_NODES
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383#define kern_addr_valid(addr) (1)
384#endif
385
386#ifdef CONFIG_64BIT_PHYS_ADDR
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
388
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389static inline int io_remap_pfn_range(struct vm_area_struct *vma,
390 unsigned long vaddr,
391 unsigned long pfn,
392 unsigned long size,
393 pgprot_t prot)
394{
395 phys_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
Thiemo Seuferac5d8c02005-04-11 12:24:16 +0000396 return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397}
398#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
400 remap_pfn_range(vma, vaddr, pfn, size, prot)
401#endif
402
403#define MK_IOSPACE_PFN(space, pfn) (pfn)
404#define GET_IOSPACE(pfn) 0
405#define GET_PFN(pfn) (pfn)
406
407#include <asm-generic/pgtable.h>
408
409/*
410 * We provide our own get_unmapped area to cope with the virtual aliasing
411 * constraints placed on us by the cache architecture.
412 */
413#define HAVE_ARCH_UNMAPPED_AREA
414
415/*
416 * No page table caches to initialise
417 */
418#define pgtable_cache_init() do { } while (0)
419
420#endif /* _ASM_PGTABLE_H */