blob: 217e83a657e21092c9c4f41a862eee0d8018d856 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_PGTABLE_H
2#define _ASM_X86_PGTABLE_H
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +01003
Ingo Molnarc47c1b12009-02-09 11:57:45 +01004#include <asm/page.h>
Suresh Siddha1adcaaf2009-08-17 13:23:50 -07005#include <asm/e820.h>
Ingo Molnarc47c1b12009-02-09 11:57:45 +01006
Jeremy Fitzhardinge8d19c992009-02-08 18:46:18 -08007#include <asm/pgtable_types.h>
Suresh Siddhab2bc2732008-09-23 14:00:36 -07008
venkatesh.pallipadi@intel.com8a7b12f2008-12-18 11:41:31 -08009/*
10 * Macro to mark a page protection value as UC-
11 */
Juergen Grossd85f3332014-11-03 14:01:53 +010012#define pgprot_noncached(prot) \
13 ((boot_cpu_data.x86 > 3) \
14 ? (__pgprot(pgprot_val(prot) | \
15 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \
venkatesh.pallipadi@intel.com8a7b12f2008-12-18 11:41:31 -080016 : (prot))
17
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +010018#ifndef __ASSEMBLY__
H. Peter Anvin55a6ca22009-11-23 15:12:07 -080019#include <asm/x86_init.h>
20
Hugh Dickins23e09432017-09-24 16:59:49 -070021#ifdef CONFIG_KAISER
22extern int kaiser_enabled;
23#else
24#define kaiser_enabled 0
25#endif
26
Borislav Petkovef6bea62014-01-18 12:48:14 +010027void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
Stephen Smalleye1a58322015-10-05 12:55:20 -040028void ptdump_walk_pgd_level_checkwx(void);
29
30#ifdef CONFIG_DEBUG_WX
31#define debug_checkwx() ptdump_walk_pgd_level_checkwx()
32#else
33#define debug_checkwx() do { } while (0)
34#endif
Borislav Petkovef6bea62014-01-18 12:48:14 +010035
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +010036/*
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +010037 * ZERO_PAGE is a global shared page that is always zero: used
38 * for zero-mapped memory areas etc..
39 */
Andi Kleen277d5b42013-08-05 15:02:43 -070040extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
41 __visible;
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +010042#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
43
Jeremy Fitzhardingee3ed9102008-01-30 13:34:11 +010044extern spinlock_t pgd_lock;
45extern struct list_head pgd_list;
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +010046
Jeremy Fitzhardinge617d34d2010-09-21 12:01:51 -070047extern struct mm_struct *pgd_page_get_mm(struct page *page);
48
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080049#ifdef CONFIG_PARAVIRT
50#include <asm/paravirt.h>
51#else /* !CONFIG_PARAVIRT */
52#define set_pte(ptep, pte) native_set_pte(ptep, pte)
53#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
Andrea Arcangeli2609ae62011-01-13 15:46:37 -080054#define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080055
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080056#define set_pte_atomic(ptep, pte) \
57 native_set_pte_atomic(ptep, pte)
58
59#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
60
61#ifndef __PAGETABLE_PUD_FOLDED
62#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
63#define pgd_clear(pgd) native_pgd_clear(pgd)
64#endif
65
66#ifndef set_pud
67# define set_pud(pudp, pud) native_set_pud(pudp, pud)
68#endif
69
70#ifndef __PAGETABLE_PMD_FOLDED
71#define pud_clear(pud) native_pud_clear(pud)
72#endif
73
74#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
75#define pmd_clear(pmd) native_pmd_clear(pmd)
76
77#define pte_update(mm, addr, ptep) do { } while (0)
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080078
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080079#define pgd_val(x) native_pgd_val(x)
80#define __pgd(x) native_make_pgd(x)
81
82#ifndef __PAGETABLE_PUD_FOLDED
83#define pud_val(x) native_pud_val(x)
84#define __pud(x) native_make_pud(x)
85#endif
86
87#ifndef __PAGETABLE_PMD_FOLDED
88#define pmd_val(x) native_pmd_val(x)
89#define __pmd(x) native_make_pmd(x)
90#endif
91
92#define pte_val(x) native_pte_val(x)
93#define __pte(x) native_make_pte(x)
94
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -080095#define arch_end_context_switch(prev) do {} while(0)
96
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080097#endif /* CONFIG_PARAVIRT */
98
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +010099/*
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100100 * The following only work if pte_present() is true.
101 * Undefined behaviour if not..
102 */
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700103static inline int pte_dirty(pte_t pte)
104{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100105 return pte_flags(pte) & _PAGE_DIRTY;
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100106}
107
Dave Hansena927cb82016-02-12 13:02:15 -0800108
109static inline u32 read_pkru(void)
110{
111 if (boot_cpu_has(X86_FEATURE_OSPKE))
112 return __read_pkru();
113 return 0;
114}
115
Xiao Guangrong9e901992016-03-22 16:51:17 +0800116static inline void write_pkru(u32 pkru)
117{
118 if (boot_cpu_has(X86_FEATURE_OSPKE))
119 __write_pkru(pkru);
120}
121
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700122static inline int pte_young(pte_t pte)
123{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100124 return pte_flags(pte) & _PAGE_ACCESSED;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700125}
126
Kirill A. Shutemovc164e032014-12-10 15:44:36 -0800127static inline int pmd_dirty(pmd_t pmd)
128{
129 return pmd_flags(pmd) & _PAGE_DIRTY;
130}
131
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800132static inline int pmd_young(pmd_t pmd)
133{
134 return pmd_flags(pmd) & _PAGE_ACCESSED;
135}
136
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700137static inline int pte_write(pte_t pte)
138{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100139 return pte_flags(pte) & _PAGE_RW;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700140}
141
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700142static inline int pte_huge(pte_t pte)
143{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100144 return pte_flags(pte) & _PAGE_PSE;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700145}
146
147static inline int pte_global(pte_t pte)
148{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100149 return pte_flags(pte) & _PAGE_GLOBAL;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700150}
151
152static inline int pte_exec(pte_t pte)
153{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100154 return !(pte_flags(pte) & _PAGE_NX);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700155}
156
Nick Piggin7e675132008-04-28 02:13:00 -0700157static inline int pte_special(pte_t pte)
158{
Mel Gormanc819f372015-02-12 14:58:38 -0800159 return pte_flags(pte) & _PAGE_SPECIAL;
Nick Piggin7e675132008-04-28 02:13:00 -0700160}
161
Hugh Dickins91030ca2008-09-09 16:42:45 +0100162static inline unsigned long pte_pfn(pte_t pte)
163{
164 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
165}
166
Akinobu Mita087975b2009-06-27 15:35:15 +0900167static inline unsigned long pmd_pfn(pmd_t pmd)
168{
Toshi Kanif70abb02015-09-17 12:24:17 -0600169 return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
Akinobu Mita087975b2009-06-27 15:35:15 +0900170}
171
Mel Gorman0ee364e2013-02-11 14:52:36 +0000172static inline unsigned long pud_pfn(pud_t pud)
173{
Toshi Kanif70abb02015-09-17 12:24:17 -0600174 return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
Mel Gorman0ee364e2013-02-11 14:52:36 +0000175}
176
Hugh Dickins91030ca2008-09-09 16:42:45 +0100177#define pte_page(pte) pfn_to_page(pte_pfn(pte))
178
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700179static inline int pmd_large(pmd_t pte)
180{
Andrea Arcangeli027ef6c2012-10-08 16:33:27 -0700181 return pmd_flags(pte) & _PAGE_PSE;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700182}
183
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800184#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800185static inline int pmd_trans_huge(pmd_t pmd)
186{
Dan Williams5c7fb562016-01-15 16:56:52 -0800187 return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800188}
Andrea Arcangeli4b7167b2011-01-13 15:47:09 -0800189
Hugh Dickinsfd8cfd32016-05-19 17:13:00 -0700190#define has_transparent_hugepage has_transparent_hugepage
Andrea Arcangeli4b7167b2011-01-13 15:47:09 -0800191static inline int has_transparent_hugepage(void)
192{
Borislav Petkov16bf9222016-03-29 17:42:03 +0200193 return boot_cpu_has(X86_FEATURE_PSE);
Andrea Arcangeli4b7167b2011-01-13 15:47:09 -0800194}
Dan Williams5c7fb562016-01-15 16:56:52 -0800195
196#ifdef __HAVE_ARCH_PTE_DEVMAP
197static inline int pmd_devmap(pmd_t pmd)
198{
199 return !!(pmd_val(pmd) & _PAGE_DEVMAP);
200}
201#endif
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800202#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
203
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800204static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
205{
206 pteval_t v = native_pte_val(pte);
207
208 return native_make_pte(v | set);
209}
210
211static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
212{
213 pteval_t v = native_pte_val(pte);
214
215 return native_make_pte(v & ~clear);
216}
217
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700218static inline pte_t pte_mkclean(pte_t pte)
219{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800220 return pte_clear_flags(pte, _PAGE_DIRTY);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700221}
222
223static inline pte_t pte_mkold(pte_t pte)
224{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800225 return pte_clear_flags(pte, _PAGE_ACCESSED);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700226}
227
228static inline pte_t pte_wrprotect(pte_t pte)
229{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800230 return pte_clear_flags(pte, _PAGE_RW);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700231}
232
233static inline pte_t pte_mkexec(pte_t pte)
234{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800235 return pte_clear_flags(pte, _PAGE_NX);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700236}
237
238static inline pte_t pte_mkdirty(pte_t pte)
239{
Pavel Emelyanov0f8975e2013-07-03 15:01:20 -0700240 return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700241}
242
243static inline pte_t pte_mkyoung(pte_t pte)
244{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800245 return pte_set_flags(pte, _PAGE_ACCESSED);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700246}
247
248static inline pte_t pte_mkwrite(pte_t pte)
249{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800250 return pte_set_flags(pte, _PAGE_RW);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700251}
252
253static inline pte_t pte_mkhuge(pte_t pte)
254{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800255 return pte_set_flags(pte, _PAGE_PSE);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700256}
257
258static inline pte_t pte_clrhuge(pte_t pte)
259{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800260 return pte_clear_flags(pte, _PAGE_PSE);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700261}
262
263static inline pte_t pte_mkglobal(pte_t pte)
264{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800265 return pte_set_flags(pte, _PAGE_GLOBAL);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700266}
267
268static inline pte_t pte_clrglobal(pte_t pte)
269{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800270 return pte_clear_flags(pte, _PAGE_GLOBAL);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700271}
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100272
Nick Piggin7e675132008-04-28 02:13:00 -0700273static inline pte_t pte_mkspecial(pte_t pte)
274{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800275 return pte_set_flags(pte, _PAGE_SPECIAL);
Nick Piggin7e675132008-04-28 02:13:00 -0700276}
277
Dan Williams01c8f1c2016-01-15 16:56:40 -0800278static inline pte_t pte_mkdevmap(pte_t pte)
279{
280 return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
281}
282
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800283static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
284{
285 pmdval_t v = native_pmd_val(pmd);
286
287 return __pmd(v | set);
288}
289
290static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
291{
292 pmdval_t v = native_pmd_val(pmd);
293
294 return __pmd(v & ~clear);
295}
296
297static inline pmd_t pmd_mkold(pmd_t pmd)
298{
299 return pmd_clear_flags(pmd, _PAGE_ACCESSED);
300}
301
Minchan Kim590a4712016-01-15 16:55:20 -0800302static inline pmd_t pmd_mkclean(pmd_t pmd)
303{
304 return pmd_clear_flags(pmd, _PAGE_DIRTY);
305}
306
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800307static inline pmd_t pmd_wrprotect(pmd_t pmd)
308{
309 return pmd_clear_flags(pmd, _PAGE_RW);
310}
311
312static inline pmd_t pmd_mkdirty(pmd_t pmd)
313{
Pavel Emelyanov0f8975e2013-07-03 15:01:20 -0700314 return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800315}
316
Dan Williamsf25748e32016-01-15 16:56:43 -0800317static inline pmd_t pmd_mkdevmap(pmd_t pmd)
318{
319 return pmd_set_flags(pmd, _PAGE_DEVMAP);
320}
321
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800322static inline pmd_t pmd_mkhuge(pmd_t pmd)
323{
324 return pmd_set_flags(pmd, _PAGE_PSE);
325}
326
327static inline pmd_t pmd_mkyoung(pmd_t pmd)
328{
329 return pmd_set_flags(pmd, _PAGE_ACCESSED);
330}
331
332static inline pmd_t pmd_mkwrite(pmd_t pmd)
333{
334 return pmd_set_flags(pmd, _PAGE_RW);
335}
336
337static inline pmd_t pmd_mknotpresent(pmd_t pmd)
338{
Mel Gorman21d9ee32015-02-12 14:58:32 -0800339 return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800340}
341
Cyrill Gorcunov2bf01f92014-06-04 16:08:16 -0700342#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
Pavel Emelyanov0f8975e2013-07-03 15:01:20 -0700343static inline int pte_soft_dirty(pte_t pte)
344{
345 return pte_flags(pte) & _PAGE_SOFT_DIRTY;
346}
347
348static inline int pmd_soft_dirty(pmd_t pmd)
349{
350 return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
351}
352
353static inline pte_t pte_mksoft_dirty(pte_t pte)
354{
355 return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
356}
357
358static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
359{
360 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
361}
362
Martin Schwidefskya7b76172015-04-22 14:20:47 +0200363static inline pte_t pte_clear_soft_dirty(pte_t pte)
364{
365 return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
366}
367
368static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
369{
370 return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
371}
372
Cyrill Gorcunov2bf01f92014-06-04 16:08:16 -0700373#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
374
Jeremy Fitzhardingeb5348162009-02-04 18:33:38 -0800375/*
376 * Mask out unsupported bits in a present pgprot. Non-present pgprots
377 * can use those bits for other purposes, so leave them be.
378 */
379static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
380{
381 pgprotval_t protval = pgprot_val(pgprot);
382
383 if (protval & _PAGE_PRESENT)
384 protval &= __supported_pte_mask;
385
386 return protval;
387}
388
Jeremy Fitzhardinge6fdc05d2008-01-30 13:32:57 +0100389static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
390{
Jeremy Fitzhardingeb5348162009-02-04 18:33:38 -0800391 return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
392 massage_pgprot(pgprot));
Jeremy Fitzhardinge6fdc05d2008-01-30 13:32:57 +0100393}
394
395static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
396{
Jeremy Fitzhardingeb5348162009-02-04 18:33:38 -0800397 return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
398 massage_pgprot(pgprot));
Jeremy Fitzhardinge6fdc05d2008-01-30 13:32:57 +0100399}
400
Ingo Molnar38472312008-01-30 13:32:57 +0100401static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
402{
403 pteval_t val = pte_val(pte);
404
405 /*
406 * Chop off the NX bit (if present), and add the NX portion of
407 * the newprot (if present):
408 */
Venki Pallipadi1c12c4c2008-05-14 16:05:51 -0700409 val &= _PAGE_CHG_MASK;
Jeremy Fitzhardingeb5348162009-02-04 18:33:38 -0800410 val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
Ingo Molnar38472312008-01-30 13:32:57 +0100411
412 return __pte(val);
413}
414
Johannes Weinerc489f122011-01-13 15:47:02 -0800415static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
416{
417 pmdval_t val = pmd_val(pmd);
418
419 val &= _HPAGE_CHG_MASK;
420 val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
421
422 return __pmd(val);
423}
424
Venki Pallipadi1c12c4c2008-05-14 16:05:51 -0700425/* mprotect needs to preserve PAT bits when updating vm_page_prot */
426#define pgprot_modify pgprot_modify
427static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
428{
429 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
430 pgprotval_t addbits = pgprot_val(newprot);
431 return __pgprot(preservebits | addbits);
432}
433
Toshi Kanibbac8c62015-09-17 12:24:18 -0600434#define pte_pgprot(x) __pgprot(pte_flags(x))
435#define pmd_pgprot(x) __pgprot(pmd_flags(x))
436#define pud_pgprot(x) __pgprot(pud_flags(x))
Andi Kleenc6ca18e2008-01-30 13:33:51 +0100437
Jeremy Fitzhardingeb5348162009-02-04 18:33:38 -0800438#define canon_pgprot(p) __pgprot(massage_pgprot(p))
Andi Kleen1e8e23b2008-01-30 13:33:53 +0100439
Suresh Siddha1adcaaf2009-08-17 13:23:50 -0700440static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
Juergen Grossd85f3332014-11-03 14:01:53 +0100441 enum page_cache_mode pcm,
442 enum page_cache_mode new_pcm)
venkatesh.pallipadi@intel.comafc7d202009-01-09 16:13:10 -0800443{
444 /*
H. Peter Anvin55a6ca22009-11-23 15:12:07 -0800445 * PAT type is always WB for untracked ranges, so no need to check.
Suresh Siddha1adcaaf2009-08-17 13:23:50 -0700446 */
H. Peter Anvin8a271382009-11-23 14:49:20 -0800447 if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
Suresh Siddha1adcaaf2009-08-17 13:23:50 -0700448 return 1;
449
450 /*
venkatesh.pallipadi@intel.comafc7d202009-01-09 16:13:10 -0800451 * Certain new memtypes are not allowed with certain
452 * requested memtype:
453 * - request is uncached, return cannot be write-back
454 * - request is write-combine, return cannot be write-back
Toshi Kaniecb2feb2015-06-04 18:55:14 +0200455 * - request is write-through, return cannot be write-back
456 * - request is write-through, return cannot be write-combine
venkatesh.pallipadi@intel.comafc7d202009-01-09 16:13:10 -0800457 */
Juergen Grossd85f3332014-11-03 14:01:53 +0100458 if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
459 new_pcm == _PAGE_CACHE_MODE_WB) ||
460 (pcm == _PAGE_CACHE_MODE_WC &&
Toshi Kaniecb2feb2015-06-04 18:55:14 +0200461 new_pcm == _PAGE_CACHE_MODE_WB) ||
462 (pcm == _PAGE_CACHE_MODE_WT &&
463 new_pcm == _PAGE_CACHE_MODE_WB) ||
464 (pcm == _PAGE_CACHE_MODE_WT &&
465 new_pcm == _PAGE_CACHE_MODE_WC)) {
venkatesh.pallipadi@intel.comafc7d202009-01-09 16:13:10 -0800466 return 0;
467 }
468
469 return 1;
470}
471
Tejun Heo458a3e62009-02-24 11:57:21 +0900472pmd_t *populate_extra_pmd(unsigned long vaddr);
473pte_t *populate_extra_pte(unsigned long vaddr);
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100474#endif /* __ASSEMBLY__ */
475
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200476#ifdef CONFIG_X86_32
David Howellsa1ce3922012-10-02 18:01:25 +0100477# include <asm/pgtable_32.h>
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200478#else
David Howellsa1ce3922012-10-02 18:01:25 +0100479# include <asm/pgtable_64.h>
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200480#endif
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +0100481
Jeremy Fitzhardingeaca159d2009-02-05 11:30:54 -0800482#ifndef __ASSEMBLY__
Jeremy Fitzhardingef4769612009-02-05 11:30:55 -0800483#include <linux/mm_types.h>
Cyrill Gorcunovfa0f2812013-09-11 14:22:47 -0700484#include <linux/mmdebug.h>
Dave Hansen4cbeb512013-01-22 13:24:31 -0800485#include <linux/log2.h>
Jeremy Fitzhardingeaca159d2009-02-05 11:30:54 -0800486
Jeremy Fitzhardingea034a012009-02-05 11:30:43 -0800487static inline int pte_none(pte_t pte)
488{
Dave Hansen97e3c602016-07-07 17:19:12 -0700489 return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
Jeremy Fitzhardingea034a012009-02-05 11:30:43 -0800490}
491
Jeremy Fitzhardinge8de01da2009-02-05 11:30:44 -0800492#define __HAVE_ARCH_PTE_SAME
493static inline int pte_same(pte_t a, pte_t b)
494{
495 return a.pte == b.pte;
496}
497
Jeremy Fitzhardinge7c683852009-02-05 11:30:45 -0800498static inline int pte_present(pte_t a)
499{
Mel Gormanc46a7c82014-06-04 16:06:30 -0700500 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
501}
502
Dan Williams3565fce2016-01-15 16:56:55 -0800503#ifdef __HAVE_ARCH_PTE_DEVMAP
504static inline int pte_devmap(pte_t a)
505{
506 return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
507}
508#endif
509
Rik van Riel2c3cf552012-10-09 15:31:12 +0200510#define pte_accessible pte_accessible
Rik van Riel20841402013-12-18 17:08:44 -0800511static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
Rik van Riel2c3cf552012-10-09 15:31:12 +0200512{
Rik van Riel20841402013-12-18 17:08:44 -0800513 if (pte_flags(a) & _PAGE_PRESENT)
514 return true;
515
Mel Gorman21d9ee32015-02-12 14:58:32 -0800516 if ((pte_flags(a) & _PAGE_PROTNONE) &&
Rik van Riel20841402013-12-18 17:08:44 -0800517 mm_tlb_flush_pending(mm))
518 return true;
519
520 return false;
Rik van Riel2c3cf552012-10-09 15:31:12 +0200521}
522
Jeremy Fitzhardingeeb636572009-02-06 13:05:56 -0800523static inline int pte_hidden(pte_t pte)
Vegard Nossumdfec0722008-04-04 00:51:41 +0200524{
Jeremy Fitzhardingeeb636572009-02-06 13:05:56 -0800525 return pte_flags(pte) & _PAGE_HIDDEN;
Vegard Nossumdfec0722008-04-04 00:51:41 +0200526}
527
Jeremy Fitzhardinge649e8ef2009-02-05 11:30:50 -0800528static inline int pmd_present(pmd_t pmd)
529{
Andrea Arcangeli027ef6c2012-10-08 16:33:27 -0700530 /*
531 * Checking for _PAGE_PSE is needed too because
532 * split_huge_page will temporarily clear the present bit (but
533 * the _PAGE_PSE flag will remain set at all times while the
534 * _PAGE_PRESENT bit is clear).
535 */
Mel Gorman21d9ee32015-02-12 14:58:32 -0800536 return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
Jeremy Fitzhardinge649e8ef2009-02-05 11:30:50 -0800537}
538
Mel Gormane7bb4b6d2015-02-12 14:58:19 -0800539#ifdef CONFIG_NUMA_BALANCING
540/*
541 * These work without NUMA balancing but the kernel does not care. See the
542 * comment in include/asm-generic/pgtable.h
543 */
544static inline int pte_protnone(pte_t pte)
545{
David Vrabele3a1f6c2015-02-19 13:06:53 +0000546 return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
547 == _PAGE_PROTNONE;
Mel Gormane7bb4b6d2015-02-12 14:58:19 -0800548}
549
550static inline int pmd_protnone(pmd_t pmd)
551{
David Vrabele3a1f6c2015-02-19 13:06:53 +0000552 return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
553 == _PAGE_PROTNONE;
Mel Gormane7bb4b6d2015-02-12 14:58:19 -0800554}
555#endif /* CONFIG_NUMA_BALANCING */
556
Jeremy Fitzhardinge4fea8012009-02-05 11:30:51 -0800557static inline int pmd_none(pmd_t pmd)
558{
559 /* Only check low word on 32-bit platforms, since it might be
560 out of sync with upper half. */
Dave Hansen97e3c602016-07-07 17:19:12 -0700561 unsigned long val = native_pmd_val(pmd);
562 return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
Jeremy Fitzhardinge4fea8012009-02-05 11:30:51 -0800563}
564
Jeremy Fitzhardinge3ffb3562009-02-05 11:30:59 -0800565static inline unsigned long pmd_page_vaddr(pmd_t pmd)
566{
Toshi Kanif70abb02015-09-17 12:24:17 -0600567 return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
Jeremy Fitzhardinge3ffb3562009-02-05 11:30:59 -0800568}
569
Ingo Molnare5f7f2022009-02-09 11:42:57 +0100570/*
571 * Currently stuck as a macro due to indirect forward reference to
572 * linux/mmzone.h's __section_mem_map_addr() definition:
573 */
Toshi Kanif70abb02015-09-17 12:24:17 -0600574#define pmd_page(pmd) \
575 pfn_to_page((pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT)
Jeremy Fitzhardinge20063ca2009-02-05 11:31:00 -0800576
Jeremy Fitzhardingee24d7ee2009-02-05 11:31:01 -0800577/*
578 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
579 *
580 * this macro returns the index of the entry in the pmd page which would
581 * control the given virtual address
582 */
Figo.zhangce0c0f92009-06-28 18:07:39 +0800583static inline unsigned long pmd_index(unsigned long address)
Jeremy Fitzhardingee24d7ee2009-02-05 11:31:01 -0800584{
585 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
586}
587
Jeremy Fitzhardinge97e28172009-02-05 11:31:05 -0800588/*
589 * Conversion functions: convert a page and protection to a page entry,
590 * and a page entry and page directory to the page they refer to.
591 *
592 * (Currently stuck as a macro because of indirect forward reference
593 * to linux/mm.h:page_to_nid())
594 */
595#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
596
Jeremy Fitzhardinge346309c2009-02-05 11:31:06 -0800597/*
598 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
599 *
600 * this function returns the index of the entry in the pte page which would
601 * control the given virtual address
602 */
Figo.zhangce0c0f92009-06-28 18:07:39 +0800603static inline unsigned long pte_index(unsigned long address)
Jeremy Fitzhardinge346309c2009-02-05 11:31:06 -0800604{
605 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
606}
607
Jeremy Fitzhardinge3fbc2442009-02-05 11:31:07 -0800608static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
609{
610 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
611}
612
Jeremy Fitzhardinge99510232009-02-05 11:31:11 -0800613static inline int pmd_bad(pmd_t pmd)
614{
Jeremy Fitzhardinge18a7a1992009-02-05 11:31:16 -0800615 return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
Jeremy Fitzhardinge99510232009-02-05 11:31:11 -0800616}
617
Jeremy Fitzhardingecc290ca2009-02-05 11:31:12 -0800618static inline unsigned long pages_to_mb(unsigned long npg)
619{
620 return npg >> (20 - PAGE_SHIFT);
621}
622
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700623#if CONFIG_PGTABLE_LEVELS > 2
Jeremy Fitzhardingedeb79cf2009-02-05 11:31:13 -0800624static inline int pud_none(pud_t pud)
625{
Dave Hansen97e3c602016-07-07 17:19:12 -0700626 return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
Jeremy Fitzhardingedeb79cf2009-02-05 11:31:13 -0800627}
628
Jeremy Fitzhardinge5ba7c912009-02-05 11:30:48 -0800629static inline int pud_present(pud_t pud)
630{
Jeremy Fitzhardinge18a7a1992009-02-05 11:31:16 -0800631 return pud_flags(pud) & _PAGE_PRESENT;
Jeremy Fitzhardinge5ba7c912009-02-05 11:30:48 -0800632}
Jeremy Fitzhardinge6fff47e2009-02-05 11:30:53 -0800633
634static inline unsigned long pud_page_vaddr(pud_t pud)
635{
Toshi Kanif70abb02015-09-17 12:24:17 -0600636 return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
Jeremy Fitzhardinge6fff47e2009-02-05 11:30:53 -0800637}
Jeremy Fitzhardingef4769612009-02-05 11:30:55 -0800638
Ingo Molnare5f7f2022009-02-09 11:42:57 +0100639/*
640 * Currently stuck as a macro due to indirect forward reference to
641 * linux/mmzone.h's __section_mem_map_addr() definition:
642 */
Toshi Kanif70abb02015-09-17 12:24:17 -0600643#define pud_page(pud) \
644 pfn_to_page((pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT)
Jeremy Fitzhardinge01ade202009-02-05 11:31:02 -0800645
646/* Find an entry in the second-level page table.. */
647static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
648{
649 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
650}
Jeremy Fitzhardinge3180fba2009-02-05 11:31:04 -0800651
Jeremy Fitzhardinge3f6cbef2009-02-05 11:31:08 -0800652static inline int pud_large(pud_t pud)
653{
Jeremy Fitzhardingee2f5bda2009-02-09 00:09:52 -0800654 return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
Jeremy Fitzhardinge3f6cbef2009-02-05 11:31:08 -0800655 (_PAGE_PSE | _PAGE_PRESENT);
656}
Jeremy Fitzhardingea61bb292009-02-05 11:31:10 -0800657
658static inline int pud_bad(pud_t pud)
659{
Jeremy Fitzhardinge18a7a1992009-02-05 11:31:16 -0800660 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
Jeremy Fitzhardingea61bb292009-02-05 11:31:10 -0800661}
Jeremy Fitzhardingee2f5bda2009-02-09 00:09:52 -0800662#else
663static inline int pud_large(pud_t pud)
664{
665 return 0;
666}
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700667#endif /* CONFIG_PGTABLE_LEVELS > 2 */
Jeremy Fitzhardinge5ba7c912009-02-05 11:30:48 -0800668
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700669#if CONFIG_PGTABLE_LEVELS > 3
Jeremy Fitzhardinge9f38d7e2009-02-05 11:30:49 -0800670static inline int pgd_present(pgd_t pgd)
671{
Jeremy Fitzhardinge18a7a1992009-02-05 11:31:16 -0800672 return pgd_flags(pgd) & _PAGE_PRESENT;
Jeremy Fitzhardinge9f38d7e2009-02-05 11:30:49 -0800673}
Jeremy Fitzhardingec5f040b2009-02-05 11:30:52 -0800674
675static inline unsigned long pgd_page_vaddr(pgd_t pgd)
676{
677 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
678}
Jeremy Fitzhardinge777cba12009-02-05 11:30:56 -0800679
Ingo Molnare5f7f2022009-02-09 11:42:57 +0100680/*
681 * Currently stuck as a macro due to indirect forward reference to
682 * linux/mmzone.h's __section_mem_map_addr() definition:
683 */
684#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
Jeremy Fitzhardinge7cfb8102009-02-05 11:30:57 -0800685
686/* to find an entry in a page-table-directory. */
Figo.zhangce0c0f92009-06-28 18:07:39 +0800687static inline unsigned long pud_index(unsigned long address)
Jeremy Fitzhardinge7cfb8102009-02-05 11:30:57 -0800688{
689 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
690}
Jeremy Fitzhardinge3d081b12009-02-05 11:30:58 -0800691
692static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
693{
694 return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
695}
Jeremy Fitzhardinge30f10312009-02-05 11:31:09 -0800696
697static inline int pgd_bad(pgd_t pgd)
698{
Dave Hansen8f0baad2017-08-30 16:23:00 -0700699 pgdval_t ignore_flags = _PAGE_USER;
700 /*
701 * We set NX on KAISER pgds that map userspace memory so
702 * that userspace can not meaningfully use the kernel
703 * page table by accident; it will fault on the first
704 * instruction it tries to run. See native_set_pgd().
705 */
Hugh Dickins23e09432017-09-24 16:59:49 -0700706 if (kaiser_enabled)
Dave Hansen8f0baad2017-08-30 16:23:00 -0700707 ignore_flags |= _PAGE_NX;
708
709 return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
Jeremy Fitzhardinge30f10312009-02-05 11:31:09 -0800710}
Jeremy Fitzhardinge7325cc22009-02-05 11:31:14 -0800711
712static inline int pgd_none(pgd_t pgd)
713{
Dave Hansen97e3c602016-07-07 17:19:12 -0700714 /*
715 * There is no need to do a workaround for the KNL stray
716 * A/D bit erratum here. PGDs only point to page tables
717 * except on 32-bit non-PAE which is not supported on
718 * KNL.
719 */
Jeremy Fitzhardinge26c8e3172009-02-05 11:31:17 -0800720 return !native_pgd_val(pgd);
Jeremy Fitzhardinge7325cc22009-02-05 11:31:14 -0800721}
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700722#endif /* CONFIG_PGTABLE_LEVELS > 3 */
Jeremy Fitzhardinge9f38d7e2009-02-05 11:30:49 -0800723
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200724#endif /* __ASSEMBLY__ */
725
Jeremy Fitzhardingefb15a9b2008-06-25 00:19:06 -0400726/*
727 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
728 *
729 * this macro returns the index of the entry in the pgd page which would
730 * control the given virtual address
731 */
732#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
733
734/*
735 * pgd_offset() returns a (pgd_t *)
736 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
737 */
738#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
739/*
740 * a shortcut which implies the use of the kernel's pgd, instead
741 * of a process's
742 */
743#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
744
745
Jeremy Fitzhardinge68db0652008-03-17 16:37:13 -0700746#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
747#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
748
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100749#ifndef __ASSEMBLY__
750
Jaswinder Singh Rajput2c1b2842009-04-11 00:03:10 +0530751extern int direct_gbpages;
Yinghai Lu22ddfca2012-11-16 19:38:41 -0800752void init_mem_mapping(void);
Yinghai Lu8d574702012-11-16 19:38:58 -0800753void early_alloc_pgt_buf(void);
Jaswinder Singh Rajput2c1b2842009-04-11 00:03:10 +0530754
Thomas Garnierb234e8a2016-06-21 17:47:01 -0700755#ifdef CONFIG_X86_64
756/* Realmode trampoline initialization. */
757extern pgd_t trampoline_pgd_entry;
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700758static inline void __meminit init_trampoline_default(void)
Thomas Garnierb234e8a2016-06-21 17:47:01 -0700759{
760 /* Default trampoline pgd value */
761 trampoline_pgd_entry = init_level4_pgt[pgd_index(__PAGE_OFFSET)];
762}
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700763# ifdef CONFIG_RANDOMIZE_MEMORY
764void __meminit init_trampoline(void);
765# else
766# define init_trampoline init_trampoline_default
767# endif
Thomas Garnierb234e8a2016-06-21 17:47:01 -0700768#else
769static inline void init_trampoline(void) { }
770#endif
771
Jeremy Fitzhardinge48916452008-01-30 13:32:58 +0100772/* local pte updates need not use xchg for locking */
773static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
774{
775 pte_t res = *ptep;
776
777 /* Pure native function needs no input for mm, addr */
778 native_pte_clear(NULL, 0, ptep);
779 return res;
780}
781
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800782static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
783{
784 pmd_t res = *pmdp;
785
786 native_pmd_clear(pmdp);
787 return res;
788}
789
Jeremy Fitzhardinge48916452008-01-30 13:32:58 +0100790static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
791 pte_t *ptep , pte_t pte)
792{
793 native_set_pte(ptep, pte);
794}
795
Andrea Arcangeli0a47de52011-01-13 15:46:35 -0800796static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
797 pmd_t *pmdp , pmd_t pmd)
798{
799 native_set_pmd(pmdp, pmd);
800}
801
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100802#ifndef CONFIG_PARAVIRT
803/*
804 * Rules for using pte_update - it must be called after any PTE update which
805 * has not been done using the set_pte / clear_pte interfaces. It is used by
806 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
807 * updates should either be sets, clears, or set_pte_atomic for P->P
808 * transitions, which means this hook should only be called for user PTEs.
809 * This hook implies a P->P protection or access change has taken place, which
Juergen Grossd6ccc3e2015-11-17 15:51:19 +0100810 * requires a subsequent TLB flush.
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100811 */
812#define pte_update(mm, addr, ptep) do { } while (0)
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100813#endif
814
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100815/*
816 * We only update the dirty/accessed state if we set
817 * the dirty bit by hand in the kernel, since the hardware
818 * will do the accessed bit for us, and we don't want to
819 * race with other CPU's that might be updating the dirty
820 * bit at the same time.
821 */
Jeremy Fitzhardingebea41802008-06-25 00:18:57 -0400822struct vm_area_struct;
823
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100824#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
Jeremy Fitzhardingeee5aa8d2008-03-17 16:37:03 -0700825extern int ptep_set_access_flags(struct vm_area_struct *vma,
826 unsigned long address, pte_t *ptep,
827 pte_t entry, int dirty);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100828
829#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
Jeremy Fitzhardingef9fbf1a2008-03-17 16:37:04 -0700830extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
831 unsigned long addr, pte_t *ptep);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100832
833#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
Jeremy Fitzhardingec20311e2008-03-17 16:37:05 -0700834extern int ptep_clear_flush_young(struct vm_area_struct *vma,
835 unsigned long address, pte_t *ptep);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100836
837#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700838static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
839 pte_t *ptep)
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100840{
841 pte_t pte = native_ptep_get_and_clear(ptep);
842 pte_update(mm, addr, ptep);
843 return pte;
844}
845
846#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700847static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
848 unsigned long addr, pte_t *ptep,
849 int full)
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100850{
851 pte_t pte;
852 if (full) {
853 /*
854 * Full address destruction in progress; paravirt does not
855 * care about updates and native needs no locking
856 */
857 pte = native_local_ptep_get_and_clear(ptep);
858 } else {
859 pte = ptep_get_and_clear(mm, addr, ptep);
860 }
861 return pte;
862}
863
864#define __HAVE_ARCH_PTEP_SET_WRPROTECT
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700865static inline void ptep_set_wrprotect(struct mm_struct *mm,
866 unsigned long addr, pte_t *ptep)
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100867{
Jeremy Fitzhardinged8d89822008-01-30 13:32:58 +0100868 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100869 pte_update(mm, addr, ptep);
870}
871
Jesper Juhl2ac13462011-12-18 01:32:09 +0100872#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
Shaohua Li61c77322010-08-16 09:16:55 +0800873
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800874#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
875
876#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
877extern int pmdp_set_access_flags(struct vm_area_struct *vma,
878 unsigned long address, pmd_t *pmdp,
879 pmd_t entry, int dirty);
880
881#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
882extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
883 unsigned long addr, pmd_t *pmdp);
884
885#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
886extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
887 unsigned long address, pmd_t *pmdp);
888
889
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800890#define __HAVE_ARCH_PMD_WRITE
891static inline int pmd_write(pmd_t pmd)
892{
893 return pmd_flags(pmd) & _PAGE_RW;
894}
895
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700896#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
897static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800898 pmd_t *pmdp)
899{
Juergen Grossd6ccc3e2015-11-17 15:51:19 +0100900 return native_pmdp_get_and_clear(pmdp);
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800901}
902
903#define __HAVE_ARCH_PMDP_SET_WRPROTECT
904static inline void pmdp_set_wrprotect(struct mm_struct *mm,
905 unsigned long addr, pmd_t *pmdp)
906{
907 clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800908}
909
Jeremy Fitzhardinge85958b42008-03-17 16:37:14 -0700910/*
911 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
912 *
913 * dst - pointer to pgd range anwhere on a pgd page
914 * src - ""
915 * count - the number of pgds to copy.
916 *
917 * dst and src can be on the same page, but the range must not overlap,
918 * and must not cross a page boundary.
919 */
920static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
921{
Hugh Dickins23e09432017-09-24 16:59:49 -0700922 memcpy(dst, src, count * sizeof(pgd_t));
Richard Fellner13be4482017-05-04 14:26:50 +0200923#ifdef CONFIG_KAISER
Hugh Dickins23e09432017-09-24 16:59:49 -0700924 if (kaiser_enabled) {
925 /* Clone the shadow pgd part as well */
926 memcpy(native_get_shadow_pgd(dst),
927 native_get_shadow_pgd(src),
928 count * sizeof(pgd_t));
929 }
Richard Fellner13be4482017-05-04 14:26:50 +0200930#endif
Jeremy Fitzhardinge85958b42008-03-17 16:37:14 -0700931}
932
Dave Hansen4cbeb512013-01-22 13:24:31 -0800933#define PTE_SHIFT ilog2(PTRS_PER_PTE)
934static inline int page_level_shift(enum pg_level level)
935{
936 return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
937}
938static inline unsigned long page_level_size(enum pg_level level)
939{
940 return 1UL << page_level_shift(level);
941}
942static inline unsigned long page_level_mask(enum pg_level level)
943{
944 return ~(page_level_size(level) - 1);
945}
Jeremy Fitzhardinge85958b42008-03-17 16:37:14 -0700946
Kirill A. Shutemov602e0182012-12-18 12:22:18 -0800947/*
948 * The x86 doesn't have any external MMU info: the kernel page
949 * tables contain all the necessary information.
950 */
951static inline void update_mmu_cache(struct vm_area_struct *vma,
952 unsigned long addr, pte_t *ptep)
953{
954}
955static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
956 unsigned long addr, pmd_t *pmd)
957{
958}
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200959
Cyrill Gorcunov2bf01f92014-06-04 16:08:16 -0700960#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
Cyrill Gorcunovfa0f2812013-09-11 14:22:47 -0700961static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
962{
Cyrill Gorcunovfa0f2812013-09-11 14:22:47 -0700963 return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
964}
965
966static inline int pte_swp_soft_dirty(pte_t pte)
967{
Cyrill Gorcunovfa0f2812013-09-11 14:22:47 -0700968 return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
969}
970
971static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
972{
Cyrill Gorcunovfa0f2812013-09-11 14:22:47 -0700973 return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
974}
Cyrill Gorcunov2bf01f92014-06-04 16:08:16 -0700975#endif
Cyrill Gorcunovfa0f2812013-09-11 14:22:47 -0700976
Dave Hansen33a709b2016-02-12 13:02:19 -0800977#define PKRU_AD_BIT 0x1
978#define PKRU_WD_BIT 0x2
Dave Hansen84594292016-02-12 13:02:36 -0800979#define PKRU_BITS_PER_PKEY 2
Dave Hansen33a709b2016-02-12 13:02:19 -0800980
981static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
982{
Dave Hansen84594292016-02-12 13:02:36 -0800983 int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
Dave Hansen33a709b2016-02-12 13:02:19 -0800984 return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits));
985}
986
987static inline bool __pkru_allows_write(u32 pkru, u16 pkey)
988{
Dave Hansen84594292016-02-12 13:02:36 -0800989 int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
Dave Hansen33a709b2016-02-12 13:02:19 -0800990 /*
991 * Access-disable disables writes too so we need to check
992 * both bits here.
993 */
994 return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits));
995}
996
997static inline u16 pte_flags_pkey(unsigned long pte_flags)
998{
999#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1000 /* ifdef to avoid doing 59-bit shift on 32-bit values */
1001 return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
1002#else
1003 return 0;
1004#endif
1005}
1006
Thomas Gleixner96a388d2007-10-11 11:20:03 +02001007#include <asm-generic/pgtable.h>
1008#endif /* __ASSEMBLY__ */
1009
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001010#endif /* _ASM_X86_PGTABLE_H */