blob: ada823a13c7c9460a06e330e00de2456cab9e66f [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_PGTABLE_H
2#define _ASM_X86_PGTABLE_H
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +01003
Ingo Molnarc47c1b12009-02-09 11:57:45 +01004#include <asm/page.h>
Suresh Siddha1adcaaf2009-08-17 13:23:50 -07005#include <asm/e820.h>
Ingo Molnarc47c1b12009-02-09 11:57:45 +01006
Jeremy Fitzhardinge8d19c992009-02-08 18:46:18 -08007#include <asm/pgtable_types.h>
Suresh Siddhab2bc2732008-09-23 14:00:36 -07008
venkatesh.pallipadi@intel.com8a7b12f2008-12-18 11:41:31 -08009/*
10 * Macro to mark a page protection value as UC-
11 */
12#define pgprot_noncached(prot) \
13 ((boot_cpu_data.x86 > 3) \
14 ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS)) \
15 : (prot))
16
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +010017#ifndef __ASSEMBLY__
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +010018
H. Peter Anvin55a6ca22009-11-23 15:12:07 -080019#include <asm/x86_init.h>
20
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +010021/*
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +010022 * ZERO_PAGE is a global shared page that is always zero: used
23 * for zero-mapped memory areas etc..
24 */
Joe Perches3cbaeaf2008-03-23 01:03:12 -070025extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +010026#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
27
Jeremy Fitzhardingee3ed9102008-01-30 13:34:11 +010028extern spinlock_t pgd_lock;
29extern struct list_head pgd_list;
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +010030
Jeremy Fitzhardinge617d34d2010-09-21 12:01:51 -070031extern struct mm_struct *pgd_page_get_mm(struct page *page);
32
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080033#ifdef CONFIG_PARAVIRT
34#include <asm/paravirt.h>
35#else /* !CONFIG_PARAVIRT */
36#define set_pte(ptep, pte) native_set_pte(ptep, pte)
37#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
38
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080039#define set_pte_atomic(ptep, pte) \
40 native_set_pte_atomic(ptep, pte)
41
42#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
43
44#ifndef __PAGETABLE_PUD_FOLDED
45#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
46#define pgd_clear(pgd) native_pgd_clear(pgd)
47#endif
48
49#ifndef set_pud
50# define set_pud(pudp, pud) native_set_pud(pudp, pud)
51#endif
52
53#ifndef __PAGETABLE_PMD_FOLDED
54#define pud_clear(pud) native_pud_clear(pud)
55#endif
56
57#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
58#define pmd_clear(pmd) native_pmd_clear(pmd)
59
60#define pte_update(mm, addr, ptep) do { } while (0)
61#define pte_update_defer(mm, addr, ptep) do { } while (0)
62
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080063#define pgd_val(x) native_pgd_val(x)
64#define __pgd(x) native_make_pgd(x)
65
66#ifndef __PAGETABLE_PUD_FOLDED
67#define pud_val(x) native_pud_val(x)
68#define __pud(x) native_make_pud(x)
69#endif
70
71#ifndef __PAGETABLE_PMD_FOLDED
72#define pmd_val(x) native_pmd_val(x)
73#define __pmd(x) native_make_pmd(x)
74#endif
75
76#define pte_val(x) native_pte_val(x)
77#define __pte(x) native_make_pte(x)
78
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -080079#define arch_end_context_switch(prev) do {} while(0)
80
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080081#endif /* CONFIG_PARAVIRT */
82
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +010083/*
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +010084 * The following only work if pte_present() is true.
85 * Undefined behaviour if not..
86 */
Joe Perches3cbaeaf2008-03-23 01:03:12 -070087static inline int pte_dirty(pte_t pte)
88{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +010089 return pte_flags(pte) & _PAGE_DIRTY;
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +010090}
91
Joe Perches3cbaeaf2008-03-23 01:03:12 -070092static inline int pte_young(pte_t pte)
93{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +010094 return pte_flags(pte) & _PAGE_ACCESSED;
Joe Perches3cbaeaf2008-03-23 01:03:12 -070095}
96
97static inline int pte_write(pte_t pte)
98{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +010099 return pte_flags(pte) & _PAGE_RW;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700100}
101
102static inline int pte_file(pte_t pte)
103{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100104 return pte_flags(pte) & _PAGE_FILE;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700105}
106
107static inline int pte_huge(pte_t pte)
108{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100109 return pte_flags(pte) & _PAGE_PSE;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700110}
111
112static inline int pte_global(pte_t pte)
113{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100114 return pte_flags(pte) & _PAGE_GLOBAL;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700115}
116
117static inline int pte_exec(pte_t pte)
118{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100119 return !(pte_flags(pte) & _PAGE_NX);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700120}
121
Nick Piggin7e675132008-04-28 02:13:00 -0700122static inline int pte_special(pte_t pte)
123{
Jan Beulich606ee442008-09-17 16:48:17 +0100124 return pte_flags(pte) & _PAGE_SPECIAL;
Nick Piggin7e675132008-04-28 02:13:00 -0700125}
126
Hugh Dickins91030ca2008-09-09 16:42:45 +0100127static inline unsigned long pte_pfn(pte_t pte)
128{
129 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
130}
131
Akinobu Mita087975b2009-06-27 15:35:15 +0900132static inline unsigned long pmd_pfn(pmd_t pmd)
133{
134 return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
135}
136
Hugh Dickins91030ca2008-09-09 16:42:45 +0100137#define pte_page(pte) pfn_to_page(pte_pfn(pte))
138
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700139static inline int pmd_large(pmd_t pte)
140{
Jeremy Fitzhardinge18a7a1992009-02-05 11:31:16 -0800141 return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700142 (_PAGE_PSE | _PAGE_PRESENT);
143}
144
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800145static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
146{
147 pteval_t v = native_pte_val(pte);
148
149 return native_make_pte(v | set);
150}
151
152static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
153{
154 pteval_t v = native_pte_val(pte);
155
156 return native_make_pte(v & ~clear);
157}
158
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700159static inline pte_t pte_mkclean(pte_t pte)
160{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800161 return pte_clear_flags(pte, _PAGE_DIRTY);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700162}
163
164static inline pte_t pte_mkold(pte_t pte)
165{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800166 return pte_clear_flags(pte, _PAGE_ACCESSED);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700167}
168
169static inline pte_t pte_wrprotect(pte_t pte)
170{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800171 return pte_clear_flags(pte, _PAGE_RW);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700172}
173
174static inline pte_t pte_mkexec(pte_t pte)
175{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800176 return pte_clear_flags(pte, _PAGE_NX);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700177}
178
179static inline pte_t pte_mkdirty(pte_t pte)
180{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800181 return pte_set_flags(pte, _PAGE_DIRTY);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700182}
183
184static inline pte_t pte_mkyoung(pte_t pte)
185{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800186 return pte_set_flags(pte, _PAGE_ACCESSED);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700187}
188
189static inline pte_t pte_mkwrite(pte_t pte)
190{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800191 return pte_set_flags(pte, _PAGE_RW);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700192}
193
194static inline pte_t pte_mkhuge(pte_t pte)
195{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800196 return pte_set_flags(pte, _PAGE_PSE);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700197}
198
199static inline pte_t pte_clrhuge(pte_t pte)
200{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800201 return pte_clear_flags(pte, _PAGE_PSE);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700202}
203
204static inline pte_t pte_mkglobal(pte_t pte)
205{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800206 return pte_set_flags(pte, _PAGE_GLOBAL);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700207}
208
209static inline pte_t pte_clrglobal(pte_t pte)
210{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800211 return pte_clear_flags(pte, _PAGE_GLOBAL);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700212}
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100213
Nick Piggin7e675132008-04-28 02:13:00 -0700214static inline pte_t pte_mkspecial(pte_t pte)
215{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800216 return pte_set_flags(pte, _PAGE_SPECIAL);
Nick Piggin7e675132008-04-28 02:13:00 -0700217}
218
Jeremy Fitzhardingeb5348162009-02-04 18:33:38 -0800219/*
220 * Mask out unsupported bits in a present pgprot. Non-present pgprots
221 * can use those bits for other purposes, so leave them be.
222 */
223static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
224{
225 pgprotval_t protval = pgprot_val(pgprot);
226
227 if (protval & _PAGE_PRESENT)
228 protval &= __supported_pte_mask;
229
230 return protval;
231}
232
Jeremy Fitzhardinge6fdc05d2008-01-30 13:32:57 +0100233static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
234{
Jeremy Fitzhardingeb5348162009-02-04 18:33:38 -0800235 return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
236 massage_pgprot(pgprot));
Jeremy Fitzhardinge6fdc05d2008-01-30 13:32:57 +0100237}
238
239static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
240{
Jeremy Fitzhardingeb5348162009-02-04 18:33:38 -0800241 return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
242 massage_pgprot(pgprot));
Jeremy Fitzhardinge6fdc05d2008-01-30 13:32:57 +0100243}
244
Ingo Molnar38472312008-01-30 13:32:57 +0100245static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
246{
247 pteval_t val = pte_val(pte);
248
249 /*
250 * Chop off the NX bit (if present), and add the NX portion of
251 * the newprot (if present):
252 */
Venki Pallipadi1c12c4c2008-05-14 16:05:51 -0700253 val &= _PAGE_CHG_MASK;
Jeremy Fitzhardingeb5348162009-02-04 18:33:38 -0800254 val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
Ingo Molnar38472312008-01-30 13:32:57 +0100255
256 return __pte(val);
257}
258
Venki Pallipadi1c12c4c2008-05-14 16:05:51 -0700259/* mprotect needs to preserve PAT bits when updating vm_page_prot */
260#define pgprot_modify pgprot_modify
261static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
262{
263 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
264 pgprotval_t addbits = pgprot_val(newprot);
265 return __pgprot(preservebits | addbits);
266}
267
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700268#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
Andi Kleenc6ca18e2008-01-30 13:33:51 +0100269
Jeremy Fitzhardingeb5348162009-02-04 18:33:38 -0800270#define canon_pgprot(p) __pgprot(massage_pgprot(p))
Andi Kleen1e8e23b2008-01-30 13:33:53 +0100271
Suresh Siddha1adcaaf2009-08-17 13:23:50 -0700272static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
273 unsigned long flags,
274 unsigned long new_flags)
venkatesh.pallipadi@intel.comafc7d202009-01-09 16:13:10 -0800275{
276 /*
H. Peter Anvin55a6ca22009-11-23 15:12:07 -0800277 * PAT type is always WB for untracked ranges, so no need to check.
Suresh Siddha1adcaaf2009-08-17 13:23:50 -0700278 */
H. Peter Anvin8a271382009-11-23 14:49:20 -0800279 if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
Suresh Siddha1adcaaf2009-08-17 13:23:50 -0700280 return 1;
281
282 /*
venkatesh.pallipadi@intel.comafc7d202009-01-09 16:13:10 -0800283 * Certain new memtypes are not allowed with certain
284 * requested memtype:
285 * - request is uncached, return cannot be write-back
286 * - request is write-combine, return cannot be write-back
287 */
288 if ((flags == _PAGE_CACHE_UC_MINUS &&
289 new_flags == _PAGE_CACHE_WB) ||
290 (flags == _PAGE_CACHE_WC &&
291 new_flags == _PAGE_CACHE_WB)) {
292 return 0;
293 }
294
295 return 1;
296}
297
Tejun Heo458a3e62009-02-24 11:57:21 +0900298pmd_t *populate_extra_pmd(unsigned long vaddr);
299pte_t *populate_extra_pte(unsigned long vaddr);
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100300#endif /* __ASSEMBLY__ */
301
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200302#ifdef CONFIG_X86_32
303# include "pgtable_32.h"
304#else
305# include "pgtable_64.h"
306#endif
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +0100307
Jeremy Fitzhardingeaca159d2009-02-05 11:30:54 -0800308#ifndef __ASSEMBLY__
Jeremy Fitzhardingef4769612009-02-05 11:30:55 -0800309#include <linux/mm_types.h>
Jeremy Fitzhardingeaca159d2009-02-05 11:30:54 -0800310
Jeremy Fitzhardingea034a012009-02-05 11:30:43 -0800311static inline int pte_none(pte_t pte)
312{
313 return !pte.pte;
314}
315
Jeremy Fitzhardinge8de01da2009-02-05 11:30:44 -0800316#define __HAVE_ARCH_PTE_SAME
317static inline int pte_same(pte_t a, pte_t b)
318{
319 return a.pte == b.pte;
320}
321
Jeremy Fitzhardinge7c683852009-02-05 11:30:45 -0800322static inline int pte_present(pte_t a)
323{
324 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
325}
326
Jeremy Fitzhardingeeb636572009-02-06 13:05:56 -0800327static inline int pte_hidden(pte_t pte)
Vegard Nossumdfec0722008-04-04 00:51:41 +0200328{
Jeremy Fitzhardingeeb636572009-02-06 13:05:56 -0800329 return pte_flags(pte) & _PAGE_HIDDEN;
Vegard Nossumdfec0722008-04-04 00:51:41 +0200330}
331
Jeremy Fitzhardinge649e8ef2009-02-05 11:30:50 -0800332static inline int pmd_present(pmd_t pmd)
333{
Jeremy Fitzhardinge18a7a1992009-02-05 11:31:16 -0800334 return pmd_flags(pmd) & _PAGE_PRESENT;
Jeremy Fitzhardinge649e8ef2009-02-05 11:30:50 -0800335}
336
Jeremy Fitzhardinge4fea8012009-02-05 11:30:51 -0800337static inline int pmd_none(pmd_t pmd)
338{
339 /* Only check low word on 32-bit platforms, since it might be
340 out of sync with upper half. */
Jeremy Fitzhardinge26c8e3172009-02-05 11:31:17 -0800341 return (unsigned long)native_pmd_val(pmd) == 0;
Jeremy Fitzhardinge4fea8012009-02-05 11:30:51 -0800342}
343
Jeremy Fitzhardinge3ffb3562009-02-05 11:30:59 -0800344static inline unsigned long pmd_page_vaddr(pmd_t pmd)
345{
346 return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
347}
348
Ingo Molnare5f7f2022009-02-09 11:42:57 +0100349/*
350 * Currently stuck as a macro due to indirect forward reference to
351 * linux/mmzone.h's __section_mem_map_addr() definition:
352 */
353#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
Jeremy Fitzhardinge20063ca2009-02-05 11:31:00 -0800354
Jeremy Fitzhardingee24d7ee2009-02-05 11:31:01 -0800355/*
356 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
357 *
358 * this macro returns the index of the entry in the pmd page which would
359 * control the given virtual address
360 */
Figo.zhangce0c0f92009-06-28 18:07:39 +0800361static inline unsigned long pmd_index(unsigned long address)
Jeremy Fitzhardingee24d7ee2009-02-05 11:31:01 -0800362{
363 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
364}
365
Jeremy Fitzhardinge97e28172009-02-05 11:31:05 -0800366/*
367 * Conversion functions: convert a page and protection to a page entry,
368 * and a page entry and page directory to the page they refer to.
369 *
370 * (Currently stuck as a macro because of indirect forward reference
371 * to linux/mm.h:page_to_nid())
372 */
373#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
374
Jeremy Fitzhardinge346309c2009-02-05 11:31:06 -0800375/*
376 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
377 *
378 * this function returns the index of the entry in the pte page which would
379 * control the given virtual address
380 */
Figo.zhangce0c0f92009-06-28 18:07:39 +0800381static inline unsigned long pte_index(unsigned long address)
Jeremy Fitzhardinge346309c2009-02-05 11:31:06 -0800382{
383 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
384}
385
Jeremy Fitzhardinge3fbc2442009-02-05 11:31:07 -0800386static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
387{
388 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
389}
390
Jeremy Fitzhardinge99510232009-02-05 11:31:11 -0800391static inline int pmd_bad(pmd_t pmd)
392{
Jeremy Fitzhardinge18a7a1992009-02-05 11:31:16 -0800393 return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
Jeremy Fitzhardinge99510232009-02-05 11:31:11 -0800394}
395
Jeremy Fitzhardingecc290ca2009-02-05 11:31:12 -0800396static inline unsigned long pages_to_mb(unsigned long npg)
397{
398 return npg >> (20 - PAGE_SHIFT);
399}
400
Jeremy Fitzhardinge6cf71502009-02-05 11:31:15 -0800401#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
402 remap_pfn_range(vma, vaddr, pfn, size, prot)
403
Jeremy Fitzhardinge5ba7c912009-02-05 11:30:48 -0800404#if PAGETABLE_LEVELS > 2
Jeremy Fitzhardingedeb79cf2009-02-05 11:31:13 -0800405static inline int pud_none(pud_t pud)
406{
Jeremy Fitzhardinge26c8e3172009-02-05 11:31:17 -0800407 return native_pud_val(pud) == 0;
Jeremy Fitzhardingedeb79cf2009-02-05 11:31:13 -0800408}
409
Jeremy Fitzhardinge5ba7c912009-02-05 11:30:48 -0800410static inline int pud_present(pud_t pud)
411{
Jeremy Fitzhardinge18a7a1992009-02-05 11:31:16 -0800412 return pud_flags(pud) & _PAGE_PRESENT;
Jeremy Fitzhardinge5ba7c912009-02-05 11:30:48 -0800413}
Jeremy Fitzhardinge6fff47e2009-02-05 11:30:53 -0800414
415static inline unsigned long pud_page_vaddr(pud_t pud)
416{
417 return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
418}
Jeremy Fitzhardingef4769612009-02-05 11:30:55 -0800419
Ingo Molnare5f7f2022009-02-09 11:42:57 +0100420/*
421 * Currently stuck as a macro due to indirect forward reference to
422 * linux/mmzone.h's __section_mem_map_addr() definition:
423 */
424#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
Jeremy Fitzhardinge01ade202009-02-05 11:31:02 -0800425
426/* Find an entry in the second-level page table.. */
427static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
428{
429 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
430}
Jeremy Fitzhardinge3180fba2009-02-05 11:31:04 -0800431
Jeremy Fitzhardinge3f6cbef2009-02-05 11:31:08 -0800432static inline int pud_large(pud_t pud)
433{
Jeremy Fitzhardingee2f5bda2009-02-09 00:09:52 -0800434 return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
Jeremy Fitzhardinge3f6cbef2009-02-05 11:31:08 -0800435 (_PAGE_PSE | _PAGE_PRESENT);
436}
Jeremy Fitzhardingea61bb292009-02-05 11:31:10 -0800437
438static inline int pud_bad(pud_t pud)
439{
Jeremy Fitzhardinge18a7a1992009-02-05 11:31:16 -0800440 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
Jeremy Fitzhardingea61bb292009-02-05 11:31:10 -0800441}
Jeremy Fitzhardingee2f5bda2009-02-09 00:09:52 -0800442#else
443static inline int pud_large(pud_t pud)
444{
445 return 0;
446}
Jeremy Fitzhardinge5ba7c912009-02-05 11:30:48 -0800447#endif /* PAGETABLE_LEVELS > 2 */
448
Jeremy Fitzhardinge9f38d7e2009-02-05 11:30:49 -0800449#if PAGETABLE_LEVELS > 3
450static inline int pgd_present(pgd_t pgd)
451{
Jeremy Fitzhardinge18a7a1992009-02-05 11:31:16 -0800452 return pgd_flags(pgd) & _PAGE_PRESENT;
Jeremy Fitzhardinge9f38d7e2009-02-05 11:30:49 -0800453}
Jeremy Fitzhardingec5f040b2009-02-05 11:30:52 -0800454
455static inline unsigned long pgd_page_vaddr(pgd_t pgd)
456{
457 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
458}
Jeremy Fitzhardinge777cba12009-02-05 11:30:56 -0800459
Ingo Molnare5f7f2022009-02-09 11:42:57 +0100460/*
461 * Currently stuck as a macro due to indirect forward reference to
462 * linux/mmzone.h's __section_mem_map_addr() definition:
463 */
464#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
Jeremy Fitzhardinge7cfb8102009-02-05 11:30:57 -0800465
466/* to find an entry in a page-table-directory. */
Figo.zhangce0c0f92009-06-28 18:07:39 +0800467static inline unsigned long pud_index(unsigned long address)
Jeremy Fitzhardinge7cfb8102009-02-05 11:30:57 -0800468{
469 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
470}
Jeremy Fitzhardinge3d081b12009-02-05 11:30:58 -0800471
472static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
473{
474 return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
475}
Jeremy Fitzhardinge30f10312009-02-05 11:31:09 -0800476
477static inline int pgd_bad(pgd_t pgd)
478{
Jeremy Fitzhardinge18a7a1992009-02-05 11:31:16 -0800479 return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
Jeremy Fitzhardinge30f10312009-02-05 11:31:09 -0800480}
Jeremy Fitzhardinge7325cc22009-02-05 11:31:14 -0800481
482static inline int pgd_none(pgd_t pgd)
483{
Jeremy Fitzhardinge26c8e3172009-02-05 11:31:17 -0800484 return !native_pgd_val(pgd);
Jeremy Fitzhardinge7325cc22009-02-05 11:31:14 -0800485}
Jeremy Fitzhardinge9f38d7e2009-02-05 11:30:49 -0800486#endif /* PAGETABLE_LEVELS > 3 */
487
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200488#endif /* __ASSEMBLY__ */
489
Jeremy Fitzhardingefb15a9b2008-06-25 00:19:06 -0400490/*
491 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
492 *
493 * this macro returns the index of the entry in the pgd page which would
494 * control the given virtual address
495 */
496#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
497
498/*
499 * pgd_offset() returns a (pgd_t *)
500 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
501 */
502#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
503/*
504 * a shortcut which implies the use of the kernel's pgd, instead
505 * of a process's
506 */
507#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
508
509
Jeremy Fitzhardinge68db0652008-03-17 16:37:13 -0700510#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
511#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
512
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100513#ifndef __ASSEMBLY__
514
Jaswinder Singh Rajput2c1b2842009-04-11 00:03:10 +0530515extern int direct_gbpages;
516
Jeremy Fitzhardinge48916452008-01-30 13:32:58 +0100517/* local pte updates need not use xchg for locking */
518static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
519{
520 pte_t res = *ptep;
521
522 /* Pure native function needs no input for mm, addr */
523 native_pte_clear(NULL, 0, ptep);
524 return res;
525}
526
527static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
528 pte_t *ptep , pte_t pte)
529{
530 native_set_pte(ptep, pte);
531}
532
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100533#ifndef CONFIG_PARAVIRT
534/*
535 * Rules for using pte_update - it must be called after any PTE update which
536 * has not been done using the set_pte / clear_pte interfaces. It is used by
537 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
538 * updates should either be sets, clears, or set_pte_atomic for P->P
539 * transitions, which means this hook should only be called for user PTEs.
540 * This hook implies a P->P protection or access change has taken place, which
541 * requires a subsequent TLB flush. The notification can optionally be delayed
542 * until the TLB flush event by using the pte_update_defer form of the
543 * interface, but care must be taken to assure that the flush happens while
544 * still holding the same page table lock so that the shadow and primary pages
545 * do not become out of sync on SMP.
546 */
547#define pte_update(mm, addr, ptep) do { } while (0)
548#define pte_update_defer(mm, addr, ptep) do { } while (0)
549#endif
550
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100551/*
552 * We only update the dirty/accessed state if we set
553 * the dirty bit by hand in the kernel, since the hardware
554 * will do the accessed bit for us, and we don't want to
555 * race with other CPU's that might be updating the dirty
556 * bit at the same time.
557 */
Jeremy Fitzhardingebea41802008-06-25 00:18:57 -0400558struct vm_area_struct;
559
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100560#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
Jeremy Fitzhardingeee5aa8d2008-03-17 16:37:03 -0700561extern int ptep_set_access_flags(struct vm_area_struct *vma,
562 unsigned long address, pte_t *ptep,
563 pte_t entry, int dirty);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100564
565#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
Jeremy Fitzhardingef9fbf1a2008-03-17 16:37:04 -0700566extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
567 unsigned long addr, pte_t *ptep);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100568
569#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
Jeremy Fitzhardingec20311e2008-03-17 16:37:05 -0700570extern int ptep_clear_flush_young(struct vm_area_struct *vma,
571 unsigned long address, pte_t *ptep);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100572
573#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700574static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
575 pte_t *ptep)
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100576{
577 pte_t pte = native_ptep_get_and_clear(ptep);
578 pte_update(mm, addr, ptep);
579 return pte;
580}
581
582#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700583static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
584 unsigned long addr, pte_t *ptep,
585 int full)
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100586{
587 pte_t pte;
588 if (full) {
589 /*
590 * Full address destruction in progress; paravirt does not
591 * care about updates and native needs no locking
592 */
593 pte = native_local_ptep_get_and_clear(ptep);
594 } else {
595 pte = ptep_get_and_clear(mm, addr, ptep);
596 }
597 return pte;
598}
599
600#define __HAVE_ARCH_PTEP_SET_WRPROTECT
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700601static inline void ptep_set_wrprotect(struct mm_struct *mm,
602 unsigned long addr, pte_t *ptep)
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100603{
Jeremy Fitzhardinged8d89822008-01-30 13:32:58 +0100604 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100605 pte_update(mm, addr, ptep);
606}
607
Shaohua Li61c77322010-08-16 09:16:55 +0800608#define flush_tlb_fix_spurious_fault(vma, address)
609
Jeremy Fitzhardinge85958b42008-03-17 16:37:14 -0700610/*
611 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
612 *
613 * dst - pointer to pgd range anwhere on a pgd page
614 * src - ""
615 * count - the number of pgds to copy.
616 *
617 * dst and src can be on the same page, but the range must not overlap,
618 * and must not cross a page boundary.
619 */
620static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
621{
622 memcpy(dst, src, count * sizeof(pgd_t));
623}
624
625
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100626#include <asm-generic/pgtable.h>
627#endif /* __ASSEMBLY__ */
628
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700629#endif /* _ASM_X86_PGTABLE_H */