blob: fdc62fb5630da68bb746ed3dac53b45eb5bddc37 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003 Ralf Baechle
7 */
8#ifndef _ASM_PGTABLE_H
9#define _ASM_PGTABLE_H
10
Ralf Baechle970d0322012-10-18 13:54:15 +020011#include <linux/mmzone.h>
Ralf Baechle875d43e2005-09-03 15:56:16 -070012#ifdef CONFIG_32BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <asm/pgtable-32.h>
14#endif
Ralf Baechle875d43e2005-09-03 15:56:16 -070015#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <asm/pgtable-64.h>
17#endif
18
Pete Popovf10fae02005-07-14 00:17:05 +000019#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <asm/pgtable-bits.h>
21
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080022struct mm_struct;
23struct vm_area_struct;
24
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
Steven J. Hill05857c62012-09-13 16:51:46 -050026#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | (cpu_has_rixi ? 0 : _PAGE_READ) | \
Chris Dearman35133692007-09-19 00:58:24 +010027 _page_cachable_default)
Steven J. Hill05857c62012-09-13 16:51:46 -050028#define PAGE_COPY __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \
29 (cpu_has_rixi ? _PAGE_NO_EXEC : 0) | _page_cachable_default)
30#define PAGE_READONLY __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \
Chris Dearman35133692007-09-19 00:58:24 +010031 _page_cachable_default)
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
Chris Dearman35133692007-09-19 00:58:24 +010033 _PAGE_GLOBAL | _page_cachable_default)
Steven J. Hill05857c62012-09-13 16:51:46 -050034#define PAGE_USERIO __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \
Chris Dearman35133692007-09-19 00:58:24 +010035 _page_cachable_default)
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
37 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
38
39/*
David Daney6dd93442010-02-10 15:12:47 -080040 * If _PAGE_NO_EXEC is not defined, we can't do page protection for
41 * execute, and consider it to be the same as read. Also, write
42 * permissions imply read permissions. This is the closest we can get
43 * by reasonable means..
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Chris Dearman35133692007-09-19 00:58:24 +010046/*
47 * Dummy values to fill the table in mmap.c
48 * The real values will be generated at runtime
49 */
50#define __P000 __pgprot(0)
51#define __P001 __pgprot(0)
52#define __P010 __pgprot(0)
53#define __P011 __pgprot(0)
54#define __P100 __pgprot(0)
55#define __P101 __pgprot(0)
56#define __P110 __pgprot(0)
57#define __P111 __pgprot(0)
58
59#define __S000 __pgprot(0)
60#define __S001 __pgprot(0)
61#define __S010 __pgprot(0)
62#define __S011 __pgprot(0)
63#define __S100 __pgprot(0)
64#define __S101 __pgprot(0)
65#define __S110 __pgprot(0)
66#define __S111 __pgprot(0)
67
68extern unsigned long _page_cachable_default;
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
70/*
71 * ZERO_PAGE is a global shared page that is always zero; used
72 * for zero-mapped memory areas etc..
73 */
74
75extern unsigned long empty_zero_page;
76extern unsigned long zero_page_mask;
77
78#define ZERO_PAGE(vaddr) \
Franck Bui-Huu99e3b942006-10-19 13:19:59 +020079 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
Kirill A. Shutemov816422a2012-12-12 13:52:36 -080080#define __HAVE_COLOR_ZERO_PAGE
Hugh Dickins62eede62009-09-21 17:03:34 -070081
Linus Torvalds1da177e2005-04-16 15:20:36 -070082extern void paging_init(void);
83
84/*
85 * Conversion functions: convert a page and protection to a page entry,
86 * and a page entry and page directory to the page they refer to.
87 */
Franck Bui-Huuc9d06962007-03-19 17:36:42 +010088#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
Ralf Baechle970d0322012-10-18 13:54:15 +020089
90#define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
91#ifndef CONFIG_TRANSPARENT_HUGEPAGE
92#define pmd_page(pmd) __pmd_page(pmd)
93#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
94
Dave McCracken46a82b22006-09-25 23:31:48 -070095#define pmd_page_vaddr(pmd) pmd_val(pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
Chris Dearman962f4802007-09-19 00:46:32 +010097#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
Sergei Shtylyov6e953892006-04-16 23:27:21 +040098
99#define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
100#define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
101
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102static inline void set_pte(pte_t *ptep, pte_t pte)
103{
104 ptep->pte_high = pte.pte_high;
105 smp_wmb();
106 ptep->pte_low = pte.pte_low;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400108 if (pte.pte_low & _PAGE_GLOBAL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 pte_t *buddy = ptep_buddy(ptep);
110 /*
111 * Make sure the buddy is global too (if it's !none,
112 * it better already be global)
113 */
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400114 if (pte_none(*buddy)) {
Ralf Baechle70342282013-01-22 12:59:30 +0100115 buddy->pte_low |= _PAGE_GLOBAL;
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400116 buddy->pte_high |= _PAGE_GLOBAL;
117 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 }
119}
Ralf Baechle21a151d2007-10-11 23:46:15 +0100120#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
122static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
123{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400124 pte_t null = __pte(0);
125
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 /* Preserve global status for the pair */
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400127 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
128 null.pte_low = null.pte_high = _PAGE_GLOBAL;
129
130 set_pte_at(mm, addr, ptep, null);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131}
132#else
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400133
134#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
135#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
136
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137/*
138 * Certain architectures need to do special things when pte's
139 * within a page table are directly modified. Thus, the following
140 * hook is made available.
141 */
142static inline void set_pte(pte_t *ptep, pte_t pteval)
143{
144 *ptep = pteval;
145#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
146 if (pte_val(pteval) & _PAGE_GLOBAL) {
147 pte_t *buddy = ptep_buddy(ptep);
148 /*
149 * Make sure the buddy is global too (if it's !none,
150 * it better already be global)
151 */
152 if (pte_none(*buddy))
153 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
154 }
155#endif
156}
Ralf Baechle21a151d2007-10-11 23:46:15 +0100157#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
159static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
160{
161#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
162 /* Preserve global status for the pair */
163 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
164 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
165 else
166#endif
167 set_pte_at(mm, addr, ptep, __pte(0));
168}
169#endif
170
171/*
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000172 * (pmds are folded into puds so this doesn't get actually called,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 * but the define is needed for a generic inline function.)
174 */
175#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000176
David Daney325f8a02009-12-04 13:52:36 -0800177#ifndef __PAGETABLE_PMD_FOLDED
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000178/*
179 * (puds are folded into pgds so this doesn't get actually called,
180 * but the define is needed for a generic inline function.)
181 */
182#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
183#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184
Ralf Baechle5ff97472007-08-01 15:25:28 +0100185#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
186#define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
187#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
Ralf Baechle9975e772007-08-13 12:44:41 +0100189/*
190 * We used to declare this array with size but gcc 3.3 and older are not able
191 * to find that this expression is a constant, so the size is dropped.
192 */
193extern pgd_t swapper_pg_dir[];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194
195/*
196 * The following only work if pte_present() is true.
197 * Undefined behaviour if not..
198 */
Chris Dearman962f4802007-09-19 00:46:32 +0100199#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400200static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
201static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
202static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
203static inline int pte_file(pte_t pte) { return pte.pte_low & _PAGE_FILE; }
204
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205static inline pte_t pte_wrprotect(pte_t pte)
206{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400207 pte.pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
208 pte.pte_high &= ~_PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 return pte;
210}
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212static inline pte_t pte_mkclean(pte_t pte)
213{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400214 pte.pte_low &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
215 pte.pte_high &= ~_PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 return pte;
217}
218
219static inline pte_t pte_mkold(pte_t pte)
220{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400221 pte.pte_low &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
222 pte.pte_high &= ~_PAGE_SILENT_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 return pte;
224}
225
226static inline pte_t pte_mkwrite(pte_t pte)
227{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400228 pte.pte_low |= _PAGE_WRITE;
229 if (pte.pte_low & _PAGE_MODIFIED) {
230 pte.pte_low |= _PAGE_SILENT_WRITE;
231 pte.pte_high |= _PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 }
233 return pte;
234}
235
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236static inline pte_t pte_mkdirty(pte_t pte)
237{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400238 pte.pte_low |= _PAGE_MODIFIED;
239 if (pte.pte_low & _PAGE_WRITE) {
240 pte.pte_low |= _PAGE_SILENT_WRITE;
241 pte.pte_high |= _PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 }
243 return pte;
244}
245
246static inline pte_t pte_mkyoung(pte_t pte)
247{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400248 pte.pte_low |= _PAGE_ACCESSED;
Ilpo Järvinen057229f2008-05-02 14:08:20 +0300249 if (pte.pte_low & _PAGE_READ) {
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400250 pte.pte_low |= _PAGE_SILENT_READ;
251 pte.pte_high |= _PAGE_SILENT_READ;
Ilpo Järvinen057229f2008-05-02 14:08:20 +0300252 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 return pte;
254}
255#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
257static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
258static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
259static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
260
261static inline pte_t pte_wrprotect(pte_t pte)
262{
263 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
264 return pte;
265}
266
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267static inline pte_t pte_mkclean(pte_t pte)
268{
269 pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE);
270 return pte;
271}
272
273static inline pte_t pte_mkold(pte_t pte)
274{
275 pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
276 return pte;
277}
278
279static inline pte_t pte_mkwrite(pte_t pte)
280{
281 pte_val(pte) |= _PAGE_WRITE;
282 if (pte_val(pte) & _PAGE_MODIFIED)
283 pte_val(pte) |= _PAGE_SILENT_WRITE;
284 return pte;
285}
286
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287static inline pte_t pte_mkdirty(pte_t pte)
288{
289 pte_val(pte) |= _PAGE_MODIFIED;
290 if (pte_val(pte) & _PAGE_WRITE)
291 pte_val(pte) |= _PAGE_SILENT_WRITE;
292 return pte;
293}
294
295static inline pte_t pte_mkyoung(pte_t pte)
296{
297 pte_val(pte) |= _PAGE_ACCESSED;
Steven J. Hill05857c62012-09-13 16:51:46 -0500298 if (cpu_has_rixi) {
David Daney6dd93442010-02-10 15:12:47 -0800299 if (!(pte_val(pte) & _PAGE_NO_READ))
300 pte_val(pte) |= _PAGE_SILENT_READ;
301 } else {
302 if (pte_val(pte) & _PAGE_READ)
303 pte_val(pte) |= _PAGE_SILENT_READ;
304 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 return pte;
306}
David Daneydd794392009-05-27 17:47:43 -0700307
308#ifdef _PAGE_HUGE
309static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
310
311static inline pte_t pte_mkhuge(pte_t pte)
312{
313 pte_val(pte) |= _PAGE_HUGE;
314 return pte;
315}
316#endif /* _PAGE_HUGE */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317#endif
Nick Piggin7e675132008-04-28 02:13:00 -0700318static inline int pte_special(pte_t pte) { return 0; }
319static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320
321/*
Ralf Baechle70342282013-01-22 12:59:30 +0100322 * Macro to make mark a page protection value as "uncacheable". Note
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 * that "protection" is really a misnomer here as the protection value
324 * contains the memory attribute bits, dirty bits, and various other
325 * bits as well.
326 */
327#define pgprot_noncached pgprot_noncached
328
329static inline pgprot_t pgprot_noncached(pgprot_t _prot)
330{
331 unsigned long prot = pgprot_val(_prot);
332
333 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
334
335 return __pgprot(prot);
336}
337
338/*
339 * Conversion functions: convert a page and protection to a page entry,
340 * and a page entry and page directory to the page they refer to.
341 */
342#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
343
Chris Dearman962f4802007-09-19 00:46:32 +0100344#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
346{
Sergei Shtylyov79e0bc32006-05-03 22:56:43 +0400347 pte.pte_low &= _PAGE_CHG_MASK;
348 pte.pte_high &= ~0x3f;
349 pte.pte_low |= pgprot_val(newprot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 pte.pte_high |= pgprot_val(newprot) & 0x3f;
351 return pte;
352}
353#else
354static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
355{
356 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
357}
358#endif
359
360
361extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
362 pte_t pte);
363extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
364 pte_t pte);
365
366static inline void update_mmu_cache(struct vm_area_struct *vma,
Russell King4b3073e2009-12-18 16:40:18 +0000367 unsigned long address, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368{
Russell King4b3073e2009-12-18 16:40:18 +0000369 pte_t pte = *ptep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 __update_tlb(vma, address, pte);
371 __update_cache(vma, address, pte);
372}
373
Ralf Baechle970d0322012-10-18 13:54:15 +0200374static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
375 unsigned long address, pmd_t *pmdp)
376{
377 pte_t pte = *(pte_t *)pmdp;
378
379 __update_tlb(vma, address, pte);
380}
381
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382#define kern_addr_valid(addr) (1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383
384#ifdef CONFIG_64BIT_PHYS_ADDR
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
386
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387static inline int io_remap_pfn_range(struct vm_area_struct *vma,
388 unsigned long vaddr,
389 unsigned long pfn,
390 unsigned long size,
391 pgprot_t prot)
392{
393 phys_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
Thiemo Seuferac5d8c02005-04-11 12:24:16 +0000394 return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395}
396#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
398 remap_pfn_range(vma, vaddr, pfn, size, prot)
399#endif
400
Ralf Baechle970d0322012-10-18 13:54:15 +0200401#ifdef CONFIG_TRANSPARENT_HUGEPAGE
402
403extern int has_transparent_hugepage(void);
404
405static inline int pmd_trans_huge(pmd_t pmd)
406{
407 return !!(pmd_val(pmd) & _PAGE_HUGE);
408}
409
410static inline pmd_t pmd_mkhuge(pmd_t pmd)
411{
412 pmd_val(pmd) |= _PAGE_HUGE;
413
414 return pmd;
415}
416
417static inline int pmd_trans_splitting(pmd_t pmd)
418{
419 return !!(pmd_val(pmd) & _PAGE_SPLITTING);
420}
421
422static inline pmd_t pmd_mksplitting(pmd_t pmd)
423{
424 pmd_val(pmd) |= _PAGE_SPLITTING;
425
426 return pmd;
427}
428
429extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
430 pmd_t *pmdp, pmd_t pmd);
431
432#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
433/* Extern to avoid header file madness */
434extern void pmdp_splitting_flush(struct vm_area_struct *vma,
435 unsigned long address,
436 pmd_t *pmdp);
437
438#define __HAVE_ARCH_PMD_WRITE
439static inline int pmd_write(pmd_t pmd)
440{
441 return !!(pmd_val(pmd) & _PAGE_WRITE);
442}
443
444static inline pmd_t pmd_wrprotect(pmd_t pmd)
445{
446 pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
447 return pmd;
448}
449
450static inline pmd_t pmd_mkwrite(pmd_t pmd)
451{
452 pmd_val(pmd) |= _PAGE_WRITE;
453 if (pmd_val(pmd) & _PAGE_MODIFIED)
454 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
455
456 return pmd;
457}
458
459static inline int pmd_dirty(pmd_t pmd)
460{
461 return !!(pmd_val(pmd) & _PAGE_MODIFIED);
462}
463
464static inline pmd_t pmd_mkclean(pmd_t pmd)
465{
466 pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
467 return pmd;
468}
469
470static inline pmd_t pmd_mkdirty(pmd_t pmd)
471{
472 pmd_val(pmd) |= _PAGE_MODIFIED;
473 if (pmd_val(pmd) & _PAGE_WRITE)
474 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
475
476 return pmd;
477}
478
479static inline int pmd_young(pmd_t pmd)
480{
481 return !!(pmd_val(pmd) & _PAGE_ACCESSED);
482}
483
484static inline pmd_t pmd_mkold(pmd_t pmd)
485{
486 pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
487
488 return pmd;
489}
490
491static inline pmd_t pmd_mkyoung(pmd_t pmd)
492{
493 pmd_val(pmd) |= _PAGE_ACCESSED;
494
495 if (cpu_has_rixi) {
496 if (!(pmd_val(pmd) & _PAGE_NO_READ))
497 pmd_val(pmd) |= _PAGE_SILENT_READ;
498 } else {
499 if (pmd_val(pmd) & _PAGE_READ)
500 pmd_val(pmd) |= _PAGE_SILENT_READ;
501 }
502
503 return pmd;
504}
505
506/* Extern to avoid header file madness */
507extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
508
509static inline unsigned long pmd_pfn(pmd_t pmd)
510{
511 return pmd_val(pmd) >> _PFN_SHIFT;
512}
513
514static inline struct page *pmd_page(pmd_t pmd)
515{
516 if (pmd_trans_huge(pmd))
517 return pfn_to_page(pmd_pfn(pmd));
518
519 return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
520}
521
522static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
523{
524 pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot);
525 return pmd;
526}
527
528static inline pmd_t pmd_mknotpresent(pmd_t pmd)
529{
530 pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
531
532 return pmd;
533}
534
535/*
536 * The generic version pmdp_get_and_clear uses a version of pmd_clear() with a
537 * different prototype.
538 */
539#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
540static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
541 unsigned long address, pmd_t *pmdp)
542{
543 pmd_t old = *pmdp;
544
545 pmd_clear(pmdp);
546
547 return old;
548}
549
550#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
551
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552#include <asm-generic/pgtable.h>
553
554/*
Wu Zhangjin22f1fdf2009-11-11 13:59:23 +0800555 * uncached accelerated TLB map for video memory access
556 */
557#ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
558#define __HAVE_PHYS_MEM_ACCESS_PROT
559
560struct file;
561pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
562 unsigned long size, pgprot_t vma_prot);
563int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
564 unsigned long size, pgprot_t *vma_prot);
565#endif
566
567/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 * We provide our own get_unmapped area to cope with the virtual aliasing
569 * constraints placed on us by the cache architecture.
570 */
571#define HAVE_ARCH_UNMAPPED_AREA
Jian Pengd0be89f2011-05-17 12:27:49 -0700572#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573
574/*
575 * No page table caches to initialise
576 */
577#define pgtable_cache_init() do { } while (0)
578
579#endif /* _ASM_PGTABLE_H */