blob: 008324d1c2612a75475d7333b5e960718b042bae [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003 Ralf Baechle
7 */
8#ifndef _ASM_PGTABLE_H
9#define _ASM_PGTABLE_H
10
Corey Minyard5bbea362013-04-08 16:06:35 +020011#include <linux/mm_types.h>
Ralf Baechle970d0322012-10-18 13:54:15 +020012#include <linux/mmzone.h>
Ralf Baechle875d43e2005-09-03 15:56:16 -070013#ifdef CONFIG_32BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <asm/pgtable-32.h>
15#endif
Ralf Baechle875d43e2005-09-03 15:56:16 -070016#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <asm/pgtable-64.h>
18#endif
19
Pete Popovf10fae02005-07-14 00:17:05 +000020#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <asm/pgtable-bits.h>
22
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080023struct mm_struct;
24struct vm_area_struct;
25
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
Steven J. Hill05857c62012-09-13 16:51:46 -050027#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | (cpu_has_rixi ? 0 : _PAGE_READ) | \
Chris Dearman35133692007-09-19 00:58:24 +010028 _page_cachable_default)
Steven J. Hill05857c62012-09-13 16:51:46 -050029#define PAGE_COPY __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \
30 (cpu_has_rixi ? _PAGE_NO_EXEC : 0) | _page_cachable_default)
31#define PAGE_READONLY __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \
Chris Dearman35133692007-09-19 00:58:24 +010032 _page_cachable_default)
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
Chris Dearman35133692007-09-19 00:58:24 +010034 _PAGE_GLOBAL | _page_cachable_default)
Steven J. Hill05857c62012-09-13 16:51:46 -050035#define PAGE_USERIO __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \
Chris Dearman35133692007-09-19 00:58:24 +010036 _page_cachable_default)
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
38 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
39
40/*
David Daney6dd93442010-02-10 15:12:47 -080041 * If _PAGE_NO_EXEC is not defined, we can't do page protection for
42 * execute, and consider it to be the same as read. Also, write
43 * permissions imply read permissions. This is the closest we can get
44 * by reasonable means..
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Chris Dearman35133692007-09-19 00:58:24 +010047/*
48 * Dummy values to fill the table in mmap.c
49 * The real values will be generated at runtime
50 */
51#define __P000 __pgprot(0)
52#define __P001 __pgprot(0)
53#define __P010 __pgprot(0)
54#define __P011 __pgprot(0)
55#define __P100 __pgprot(0)
56#define __P101 __pgprot(0)
57#define __P110 __pgprot(0)
58#define __P111 __pgprot(0)
59
60#define __S000 __pgprot(0)
61#define __S001 __pgprot(0)
62#define __S010 __pgprot(0)
63#define __S011 __pgprot(0)
64#define __S100 __pgprot(0)
65#define __S101 __pgprot(0)
66#define __S110 __pgprot(0)
67#define __S111 __pgprot(0)
68
69extern unsigned long _page_cachable_default;
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
71/*
72 * ZERO_PAGE is a global shared page that is always zero; used
73 * for zero-mapped memory areas etc..
74 */
75
76extern unsigned long empty_zero_page;
77extern unsigned long zero_page_mask;
78
79#define ZERO_PAGE(vaddr) \
Franck Bui-Huu99e3b942006-10-19 13:19:59 +020080 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
Kirill A. Shutemov816422a2012-12-12 13:52:36 -080081#define __HAVE_COLOR_ZERO_PAGE
Hugh Dickins62eede62009-09-21 17:03:34 -070082
Linus Torvalds1da177e2005-04-16 15:20:36 -070083extern void paging_init(void);
84
85/*
86 * Conversion functions: convert a page and protection to a page entry,
87 * and a page entry and page directory to the page they refer to.
88 */
Franck Bui-Huuc9d06962007-03-19 17:36:42 +010089#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
Ralf Baechle970d0322012-10-18 13:54:15 +020090
91#define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
92#ifndef CONFIG_TRANSPARENT_HUGEPAGE
93#define pmd_page(pmd) __pmd_page(pmd)
94#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
95
Dave McCracken46a82b22006-09-25 23:31:48 -070096#define pmd_page_vaddr(pmd) pmd_val(pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Chris Dearman962f4802007-09-19 00:46:32 +010098#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
Sergei Shtylyov6e953892006-04-16 23:27:21 +040099
100#define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
101#define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
102
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103static inline void set_pte(pte_t *ptep, pte_t pte)
104{
105 ptep->pte_high = pte.pte_high;
106 smp_wmb();
107 ptep->pte_low = pte.pte_low;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400109 if (pte.pte_low & _PAGE_GLOBAL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 pte_t *buddy = ptep_buddy(ptep);
111 /*
112 * Make sure the buddy is global too (if it's !none,
113 * it better already be global)
114 */
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400115 if (pte_none(*buddy)) {
Ralf Baechle70342282013-01-22 12:59:30 +0100116 buddy->pte_low |= _PAGE_GLOBAL;
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400117 buddy->pte_high |= _PAGE_GLOBAL;
118 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 }
120}
Ralf Baechle21a151d2007-10-11 23:46:15 +0100121#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
123static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
124{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400125 pte_t null = __pte(0);
126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 /* Preserve global status for the pair */
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400128 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
129 null.pte_low = null.pte_high = _PAGE_GLOBAL;
130
131 set_pte_at(mm, addr, ptep, null);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132}
133#else
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400134
135#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
136#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138/*
139 * Certain architectures need to do special things when pte's
140 * within a page table are directly modified. Thus, the following
141 * hook is made available.
142 */
143static inline void set_pte(pte_t *ptep, pte_t pteval)
144{
145 *ptep = pteval;
146#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
147 if (pte_val(pteval) & _PAGE_GLOBAL) {
148 pte_t *buddy = ptep_buddy(ptep);
149 /*
150 * Make sure the buddy is global too (if it's !none,
151 * it better already be global)
152 */
153 if (pte_none(*buddy))
154 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
155 }
156#endif
157}
Ralf Baechle21a151d2007-10-11 23:46:15 +0100158#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
160static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
161{
162#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
163 /* Preserve global status for the pair */
164 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
165 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
166 else
167#endif
168 set_pte_at(mm, addr, ptep, __pte(0));
169}
170#endif
171
172/*
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000173 * (pmds are folded into puds so this doesn't get actually called,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 * but the define is needed for a generic inline function.)
175 */
176#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000177
David Daney325f8a02009-12-04 13:52:36 -0800178#ifndef __PAGETABLE_PMD_FOLDED
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000179/*
180 * (puds are folded into pgds so this doesn't get actually called,
181 * but the define is needed for a generic inline function.)
182 */
183#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
184#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
Ralf Baechle5ff97472007-08-01 15:25:28 +0100186#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
187#define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
188#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
Ralf Baechle9975e772007-08-13 12:44:41 +0100190/*
191 * We used to declare this array with size but gcc 3.3 and older are not able
192 * to find that this expression is a constant, so the size is dropped.
193 */
194extern pgd_t swapper_pg_dir[];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
196/*
197 * The following only work if pte_present() is true.
198 * Undefined behaviour if not..
199 */
Chris Dearman962f4802007-09-19 00:46:32 +0100200#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400201static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
202static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
203static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
204static inline int pte_file(pte_t pte) { return pte.pte_low & _PAGE_FILE; }
205
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206static inline pte_t pte_wrprotect(pte_t pte)
207{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400208 pte.pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
209 pte.pte_high &= ~_PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 return pte;
211}
212
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213static inline pte_t pte_mkclean(pte_t pte)
214{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400215 pte.pte_low &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
216 pte.pte_high &= ~_PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 return pte;
218}
219
220static inline pte_t pte_mkold(pte_t pte)
221{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400222 pte.pte_low &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
223 pte.pte_high &= ~_PAGE_SILENT_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 return pte;
225}
226
227static inline pte_t pte_mkwrite(pte_t pte)
228{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400229 pte.pte_low |= _PAGE_WRITE;
230 if (pte.pte_low & _PAGE_MODIFIED) {
231 pte.pte_low |= _PAGE_SILENT_WRITE;
232 pte.pte_high |= _PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 }
234 return pte;
235}
236
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237static inline pte_t pte_mkdirty(pte_t pte)
238{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400239 pte.pte_low |= _PAGE_MODIFIED;
240 if (pte.pte_low & _PAGE_WRITE) {
241 pte.pte_low |= _PAGE_SILENT_WRITE;
242 pte.pte_high |= _PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 }
244 return pte;
245}
246
247static inline pte_t pte_mkyoung(pte_t pte)
248{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400249 pte.pte_low |= _PAGE_ACCESSED;
Ilpo Järvinen057229f2008-05-02 14:08:20 +0300250 if (pte.pte_low & _PAGE_READ) {
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400251 pte.pte_low |= _PAGE_SILENT_READ;
252 pte.pte_high |= _PAGE_SILENT_READ;
Ilpo Järvinen057229f2008-05-02 14:08:20 +0300253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 return pte;
255}
256#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
258static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
259static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
260static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
261
262static inline pte_t pte_wrprotect(pte_t pte)
263{
264 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
265 return pte;
266}
267
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268static inline pte_t pte_mkclean(pte_t pte)
269{
270 pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE);
271 return pte;
272}
273
274static inline pte_t pte_mkold(pte_t pte)
275{
276 pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
277 return pte;
278}
279
280static inline pte_t pte_mkwrite(pte_t pte)
281{
282 pte_val(pte) |= _PAGE_WRITE;
283 if (pte_val(pte) & _PAGE_MODIFIED)
284 pte_val(pte) |= _PAGE_SILENT_WRITE;
285 return pte;
286}
287
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288static inline pte_t pte_mkdirty(pte_t pte)
289{
290 pte_val(pte) |= _PAGE_MODIFIED;
291 if (pte_val(pte) & _PAGE_WRITE)
292 pte_val(pte) |= _PAGE_SILENT_WRITE;
293 return pte;
294}
295
296static inline pte_t pte_mkyoung(pte_t pte)
297{
298 pte_val(pte) |= _PAGE_ACCESSED;
Steven J. Hill05857c62012-09-13 16:51:46 -0500299 if (cpu_has_rixi) {
David Daney6dd93442010-02-10 15:12:47 -0800300 if (!(pte_val(pte) & _PAGE_NO_READ))
301 pte_val(pte) |= _PAGE_SILENT_READ;
302 } else {
303 if (pte_val(pte) & _PAGE_READ)
304 pte_val(pte) |= _PAGE_SILENT_READ;
305 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 return pte;
307}
David Daneydd794392009-05-27 17:47:43 -0700308
309#ifdef _PAGE_HUGE
310static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
311
312static inline pte_t pte_mkhuge(pte_t pte)
313{
314 pte_val(pte) |= _PAGE_HUGE;
315 return pte;
316}
317#endif /* _PAGE_HUGE */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318#endif
Nick Piggin7e675132008-04-28 02:13:00 -0700319static inline int pte_special(pte_t pte) { return 0; }
320static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321
322/*
Ralf Baechle70342282013-01-22 12:59:30 +0100323 * Macro to make mark a page protection value as "uncacheable". Note
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 * that "protection" is really a misnomer here as the protection value
325 * contains the memory attribute bits, dirty bits, and various other
326 * bits as well.
327 */
328#define pgprot_noncached pgprot_noncached
329
330static inline pgprot_t pgprot_noncached(pgprot_t _prot)
331{
332 unsigned long prot = pgprot_val(_prot);
333
334 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
335
336 return __pgprot(prot);
337}
338
339/*
340 * Conversion functions: convert a page and protection to a page entry,
341 * and a page entry and page directory to the page they refer to.
342 */
343#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
344
Chris Dearman962f4802007-09-19 00:46:32 +0100345#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
347{
Sergei Shtylyov79e0bc32006-05-03 22:56:43 +0400348 pte.pte_low &= _PAGE_CHG_MASK;
349 pte.pte_high &= ~0x3f;
350 pte.pte_low |= pgprot_val(newprot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 pte.pte_high |= pgprot_val(newprot) & 0x3f;
352 return pte;
353}
354#else
355static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
356{
357 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
358}
359#endif
360
361
362extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
363 pte_t pte);
364extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
365 pte_t pte);
366
367static inline void update_mmu_cache(struct vm_area_struct *vma,
Russell King4b3073e2009-12-18 16:40:18 +0000368 unsigned long address, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369{
Russell King4b3073e2009-12-18 16:40:18 +0000370 pte_t pte = *ptep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 __update_tlb(vma, address, pte);
372 __update_cache(vma, address, pte);
373}
374
Ralf Baechle970d0322012-10-18 13:54:15 +0200375static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
376 unsigned long address, pmd_t *pmdp)
377{
378 pte_t pte = *(pte_t *)pmdp;
379
380 __update_tlb(vma, address, pte);
381}
382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383#define kern_addr_valid(addr) (1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384
385#ifdef CONFIG_64BIT_PHYS_ADDR
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388static inline int io_remap_pfn_range(struct vm_area_struct *vma,
389 unsigned long vaddr,
390 unsigned long pfn,
391 unsigned long size,
392 pgprot_t prot)
393{
394 phys_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
Thiemo Seuferac5d8c02005-04-11 12:24:16 +0000395 return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396}
Al Viro40d158e2013-05-11 12:13:10 -0400397#define io_remap_pfn_range io_remap_pfn_range
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398#endif
399
Ralf Baechle970d0322012-10-18 13:54:15 +0200400#ifdef CONFIG_TRANSPARENT_HUGEPAGE
401
402extern int has_transparent_hugepage(void);
403
404static inline int pmd_trans_huge(pmd_t pmd)
405{
406 return !!(pmd_val(pmd) & _PAGE_HUGE);
407}
408
409static inline pmd_t pmd_mkhuge(pmd_t pmd)
410{
411 pmd_val(pmd) |= _PAGE_HUGE;
412
413 return pmd;
414}
415
416static inline int pmd_trans_splitting(pmd_t pmd)
417{
418 return !!(pmd_val(pmd) & _PAGE_SPLITTING);
419}
420
421static inline pmd_t pmd_mksplitting(pmd_t pmd)
422{
423 pmd_val(pmd) |= _PAGE_SPLITTING;
424
425 return pmd;
426}
427
428extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
429 pmd_t *pmdp, pmd_t pmd);
430
431#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
432/* Extern to avoid header file madness */
433extern void pmdp_splitting_flush(struct vm_area_struct *vma,
434 unsigned long address,
435 pmd_t *pmdp);
436
437#define __HAVE_ARCH_PMD_WRITE
438static inline int pmd_write(pmd_t pmd)
439{
440 return !!(pmd_val(pmd) & _PAGE_WRITE);
441}
442
443static inline pmd_t pmd_wrprotect(pmd_t pmd)
444{
445 pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
446 return pmd;
447}
448
449static inline pmd_t pmd_mkwrite(pmd_t pmd)
450{
451 pmd_val(pmd) |= _PAGE_WRITE;
452 if (pmd_val(pmd) & _PAGE_MODIFIED)
453 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
454
455 return pmd;
456}
457
458static inline int pmd_dirty(pmd_t pmd)
459{
460 return !!(pmd_val(pmd) & _PAGE_MODIFIED);
461}
462
463static inline pmd_t pmd_mkclean(pmd_t pmd)
464{
465 pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
466 return pmd;
467}
468
469static inline pmd_t pmd_mkdirty(pmd_t pmd)
470{
471 pmd_val(pmd) |= _PAGE_MODIFIED;
472 if (pmd_val(pmd) & _PAGE_WRITE)
473 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
474
475 return pmd;
476}
477
478static inline int pmd_young(pmd_t pmd)
479{
480 return !!(pmd_val(pmd) & _PAGE_ACCESSED);
481}
482
483static inline pmd_t pmd_mkold(pmd_t pmd)
484{
485 pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
486
487 return pmd;
488}
489
490static inline pmd_t pmd_mkyoung(pmd_t pmd)
491{
492 pmd_val(pmd) |= _PAGE_ACCESSED;
493
494 if (cpu_has_rixi) {
495 if (!(pmd_val(pmd) & _PAGE_NO_READ))
496 pmd_val(pmd) |= _PAGE_SILENT_READ;
497 } else {
498 if (pmd_val(pmd) & _PAGE_READ)
499 pmd_val(pmd) |= _PAGE_SILENT_READ;
500 }
501
502 return pmd;
503}
504
505/* Extern to avoid header file madness */
506extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
507
508static inline unsigned long pmd_pfn(pmd_t pmd)
509{
510 return pmd_val(pmd) >> _PFN_SHIFT;
511}
512
513static inline struct page *pmd_page(pmd_t pmd)
514{
515 if (pmd_trans_huge(pmd))
516 return pfn_to_page(pmd_pfn(pmd));
517
518 return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
519}
520
521static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
522{
523 pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot);
524 return pmd;
525}
526
527static inline pmd_t pmd_mknotpresent(pmd_t pmd)
528{
529 pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
530
531 return pmd;
532}
533
534/*
535 * The generic version pmdp_get_and_clear uses a version of pmd_clear() with a
536 * different prototype.
537 */
538#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
539static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
540 unsigned long address, pmd_t *pmdp)
541{
542 pmd_t old = *pmdp;
543
544 pmd_clear(pmdp);
545
546 return old;
547}
548
549#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
550
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551#include <asm-generic/pgtable.h>
552
553/*
Wu Zhangjin22f1fdf2009-11-11 13:59:23 +0800554 * uncached accelerated TLB map for video memory access
555 */
556#ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
557#define __HAVE_PHYS_MEM_ACCESS_PROT
558
559struct file;
560pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
561 unsigned long size, pgprot_t vma_prot);
562int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
563 unsigned long size, pgprot_t *vma_prot);
564#endif
565
566/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 * We provide our own get_unmapped area to cope with the virtual aliasing
568 * constraints placed on us by the cache architecture.
569 */
570#define HAVE_ARCH_UNMAPPED_AREA
Jian Pengd0be89f2011-05-17 12:27:49 -0700571#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572
573/*
574 * No page table caches to initialise
575 */
576#define pgtable_cache_init() do { } while (0)
577
578#endif /* _ASM_PGTABLE_H */