blob: 47b37affddcf287098b304c8f11a8e5802456ce1 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_PGTABLE_H
2#define _ASM_X86_PGTABLE_H
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +01003
Ingo Molnarc47c1b12009-02-09 11:57:45 +01004#include <asm/page.h>
Suresh Siddha1adcaaf2009-08-17 13:23:50 -07005#include <asm/e820.h>
Ingo Molnarc47c1b12009-02-09 11:57:45 +01006
Jeremy Fitzhardinge8d19c992009-02-08 18:46:18 -08007#include <asm/pgtable_types.h>
Suresh Siddhab2bc2732008-09-23 14:00:36 -07008
venkatesh.pallipadi@intel.com8a7b12f2008-12-18 11:41:31 -08009/*
10 * Macro to mark a page protection value as UC-
11 */
Juergen Grossd85f3332014-11-03 14:01:53 +010012#define pgprot_noncached(prot) \
13 ((boot_cpu_data.x86 > 3) \
14 ? (__pgprot(pgprot_val(prot) | \
15 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \
venkatesh.pallipadi@intel.com8a7b12f2008-12-18 11:41:31 -080016 : (prot))
17
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +010018#ifndef __ASSEMBLY__
H. Peter Anvin55a6ca22009-11-23 15:12:07 -080019#include <asm/x86_init.h>
20
Kees Cooke71fac02018-01-03 10:17:35 -080021#ifdef CONFIG_PAGE_TABLE_ISOLATION
Hugh Dickins23e09432017-09-24 16:59:49 -070022extern int kaiser_enabled;
Hugh Dickins400d3c82018-01-29 18:17:58 -080023/*
24 * Instead of one PGD, we acquire two PGDs. Being order-1, it is
25 * both 8k in size and 8k-aligned. That lets us just flip bit 12
26 * in a pointer to swap between the two 4k halves.
27 */
Hugh Dickins23e09432017-09-24 16:59:49 -070028#else
29#define kaiser_enabled 0
30#endif
Hugh Dickins400d3c82018-01-29 18:17:58 -080031#define PGD_ALLOCATION_ORDER kaiser_enabled
Hugh Dickins23e09432017-09-24 16:59:49 -070032
Borislav Petkovef6bea62014-01-18 12:48:14 +010033void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
Stephen Smalleye1a58322015-10-05 12:55:20 -040034void ptdump_walk_pgd_level_checkwx(void);
35
36#ifdef CONFIG_DEBUG_WX
37#define debug_checkwx() ptdump_walk_pgd_level_checkwx()
38#else
39#define debug_checkwx() do { } while (0)
40#endif
Borislav Petkovef6bea62014-01-18 12:48:14 +010041
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +010042/*
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +010043 * ZERO_PAGE is a global shared page that is always zero: used
44 * for zero-mapped memory areas etc..
45 */
Andi Kleen277d5b42013-08-05 15:02:43 -070046extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
47 __visible;
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +010048#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
49
Jeremy Fitzhardingee3ed9102008-01-30 13:34:11 +010050extern spinlock_t pgd_lock;
51extern struct list_head pgd_list;
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +010052
Jeremy Fitzhardinge617d34d2010-09-21 12:01:51 -070053extern struct mm_struct *pgd_page_get_mm(struct page *page);
54
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080055#ifdef CONFIG_PARAVIRT
56#include <asm/paravirt.h>
57#else /* !CONFIG_PARAVIRT */
58#define set_pte(ptep, pte) native_set_pte(ptep, pte)
59#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
Andrea Arcangeli2609ae62011-01-13 15:46:37 -080060#define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080061
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080062#define set_pte_atomic(ptep, pte) \
63 native_set_pte_atomic(ptep, pte)
64
65#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
66
67#ifndef __PAGETABLE_PUD_FOLDED
68#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
69#define pgd_clear(pgd) native_pgd_clear(pgd)
70#endif
71
72#ifndef set_pud
73# define set_pud(pudp, pud) native_set_pud(pudp, pud)
74#endif
75
76#ifndef __PAGETABLE_PMD_FOLDED
77#define pud_clear(pud) native_pud_clear(pud)
78#endif
79
80#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
81#define pmd_clear(pmd) native_pmd_clear(pmd)
82
83#define pte_update(mm, addr, ptep) do { } while (0)
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080084
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080085#define pgd_val(x) native_pgd_val(x)
86#define __pgd(x) native_make_pgd(x)
87
88#ifndef __PAGETABLE_PUD_FOLDED
89#define pud_val(x) native_pud_val(x)
90#define __pud(x) native_make_pud(x)
91#endif
92
93#ifndef __PAGETABLE_PMD_FOLDED
94#define pmd_val(x) native_pmd_val(x)
95#define __pmd(x) native_make_pmd(x)
96#endif
97
98#define pte_val(x) native_pte_val(x)
99#define __pte(x) native_make_pte(x)
100
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -0800101#define arch_end_context_switch(prev) do {} while(0)
102
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -0800103#endif /* CONFIG_PARAVIRT */
104
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +0100105/*
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100106 * The following only work if pte_present() is true.
107 * Undefined behaviour if not..
108 */
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700109static inline int pte_dirty(pte_t pte)
110{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100111 return pte_flags(pte) & _PAGE_DIRTY;
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100112}
113
Dave Hansena927cb82016-02-12 13:02:15 -0800114
115static inline u32 read_pkru(void)
116{
117 if (boot_cpu_has(X86_FEATURE_OSPKE))
118 return __read_pkru();
119 return 0;
120}
121
Xiao Guangrong9e901992016-03-22 16:51:17 +0800122static inline void write_pkru(u32 pkru)
123{
124 if (boot_cpu_has(X86_FEATURE_OSPKE))
125 __write_pkru(pkru);
126}
127
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700128static inline int pte_young(pte_t pte)
129{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100130 return pte_flags(pte) & _PAGE_ACCESSED;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700131}
132
Kirill A. Shutemovc164e032014-12-10 15:44:36 -0800133static inline int pmd_dirty(pmd_t pmd)
134{
135 return pmd_flags(pmd) & _PAGE_DIRTY;
136}
137
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800138static inline int pmd_young(pmd_t pmd)
139{
140 return pmd_flags(pmd) & _PAGE_ACCESSED;
141}
142
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700143static inline int pte_write(pte_t pte)
144{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100145 return pte_flags(pte) & _PAGE_RW;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700146}
147
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700148static inline int pte_huge(pte_t pte)
149{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100150 return pte_flags(pte) & _PAGE_PSE;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700151}
152
153static inline int pte_global(pte_t pte)
154{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100155 return pte_flags(pte) & _PAGE_GLOBAL;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700156}
157
158static inline int pte_exec(pte_t pte)
159{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100160 return !(pte_flags(pte) & _PAGE_NX);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700161}
162
Nick Piggin7e675132008-04-28 02:13:00 -0700163static inline int pte_special(pte_t pte)
164{
Mel Gormanc819f372015-02-12 14:58:38 -0800165 return pte_flags(pte) & _PAGE_SPECIAL;
Nick Piggin7e675132008-04-28 02:13:00 -0700166}
167
Andi Kleen33182fe2018-06-13 15:48:24 -0700168/* Entries that were set to PROT_NONE are inverted */
169
170static inline u64 protnone_mask(u64 val);
171
Hugh Dickins91030ca2008-09-09 16:42:45 +0100172static inline unsigned long pte_pfn(pte_t pte)
173{
Michal Hocko3f0eb662018-06-27 17:46:50 +0200174 phys_addr_t pfn = pte_val(pte);
Andi Kleen33182fe2018-06-13 15:48:24 -0700175 pfn ^= protnone_mask(pfn);
176 return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
Hugh Dickins91030ca2008-09-09 16:42:45 +0100177}
178
Akinobu Mita087975b2009-06-27 15:35:15 +0900179static inline unsigned long pmd_pfn(pmd_t pmd)
180{
Michal Hocko3f0eb662018-06-27 17:46:50 +0200181 phys_addr_t pfn = pmd_val(pmd);
Andi Kleen33182fe2018-06-13 15:48:24 -0700182 pfn ^= protnone_mask(pfn);
183 return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
Akinobu Mita087975b2009-06-27 15:35:15 +0900184}
185
Mel Gorman0ee364e2013-02-11 14:52:36 +0000186static inline unsigned long pud_pfn(pud_t pud)
187{
Michal Hocko3f0eb662018-06-27 17:46:50 +0200188 phys_addr_t pfn = pud_val(pud);
Andi Kleen33182fe2018-06-13 15:48:24 -0700189 pfn ^= protnone_mask(pfn);
190 return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
Mel Gorman0ee364e2013-02-11 14:52:36 +0000191}
192
Hugh Dickins91030ca2008-09-09 16:42:45 +0100193#define pte_page(pte) pfn_to_page(pte_pfn(pte))
194
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700195static inline int pmd_large(pmd_t pte)
196{
Andrea Arcangeli027ef6c2012-10-08 16:33:27 -0700197 return pmd_flags(pte) & _PAGE_PSE;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700198}
199
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800200#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800201static inline int pmd_trans_huge(pmd_t pmd)
202{
Dan Williams5c7fb562016-01-15 16:56:52 -0800203 return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800204}
Andrea Arcangeli4b7167b2011-01-13 15:47:09 -0800205
Hugh Dickinsfd8cfd32016-05-19 17:13:00 -0700206#define has_transparent_hugepage has_transparent_hugepage
Andrea Arcangeli4b7167b2011-01-13 15:47:09 -0800207static inline int has_transparent_hugepage(void)
208{
Borislav Petkov16bf9222016-03-29 17:42:03 +0200209 return boot_cpu_has(X86_FEATURE_PSE);
Andrea Arcangeli4b7167b2011-01-13 15:47:09 -0800210}
Dan Williams5c7fb562016-01-15 16:56:52 -0800211
212#ifdef __HAVE_ARCH_PTE_DEVMAP
213static inline int pmd_devmap(pmd_t pmd)
214{
215 return !!(pmd_val(pmd) & _PAGE_DEVMAP);
216}
217#endif
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800218#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
219
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800220static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
221{
222 pteval_t v = native_pte_val(pte);
223
224 return native_make_pte(v | set);
225}
226
227static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
228{
229 pteval_t v = native_pte_val(pte);
230
231 return native_make_pte(v & ~clear);
232}
233
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700234static inline pte_t pte_mkclean(pte_t pte)
235{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800236 return pte_clear_flags(pte, _PAGE_DIRTY);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700237}
238
239static inline pte_t pte_mkold(pte_t pte)
240{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800241 return pte_clear_flags(pte, _PAGE_ACCESSED);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700242}
243
244static inline pte_t pte_wrprotect(pte_t pte)
245{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800246 return pte_clear_flags(pte, _PAGE_RW);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700247}
248
249static inline pte_t pte_mkexec(pte_t pte)
250{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800251 return pte_clear_flags(pte, _PAGE_NX);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700252}
253
254static inline pte_t pte_mkdirty(pte_t pte)
255{
Pavel Emelyanov0f8975e2013-07-03 15:01:20 -0700256 return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700257}
258
259static inline pte_t pte_mkyoung(pte_t pte)
260{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800261 return pte_set_flags(pte, _PAGE_ACCESSED);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700262}
263
264static inline pte_t pte_mkwrite(pte_t pte)
265{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800266 return pte_set_flags(pte, _PAGE_RW);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700267}
268
269static inline pte_t pte_mkhuge(pte_t pte)
270{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800271 return pte_set_flags(pte, _PAGE_PSE);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700272}
273
274static inline pte_t pte_clrhuge(pte_t pte)
275{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800276 return pte_clear_flags(pte, _PAGE_PSE);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700277}
278
279static inline pte_t pte_mkglobal(pte_t pte)
280{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800281 return pte_set_flags(pte, _PAGE_GLOBAL);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700282}
283
284static inline pte_t pte_clrglobal(pte_t pte)
285{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800286 return pte_clear_flags(pte, _PAGE_GLOBAL);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700287}
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100288
Nick Piggin7e675132008-04-28 02:13:00 -0700289static inline pte_t pte_mkspecial(pte_t pte)
290{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800291 return pte_set_flags(pte, _PAGE_SPECIAL);
Nick Piggin7e675132008-04-28 02:13:00 -0700292}
293
Dan Williams01c8f1c2016-01-15 16:56:40 -0800294static inline pte_t pte_mkdevmap(pte_t pte)
295{
296 return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
297}
298
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800299static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
300{
301 pmdval_t v = native_pmd_val(pmd);
302
303 return __pmd(v | set);
304}
305
306static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
307{
308 pmdval_t v = native_pmd_val(pmd);
309
310 return __pmd(v & ~clear);
311}
312
313static inline pmd_t pmd_mkold(pmd_t pmd)
314{
315 return pmd_clear_flags(pmd, _PAGE_ACCESSED);
316}
317
Minchan Kim590a4712016-01-15 16:55:20 -0800318static inline pmd_t pmd_mkclean(pmd_t pmd)
319{
320 return pmd_clear_flags(pmd, _PAGE_DIRTY);
321}
322
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800323static inline pmd_t pmd_wrprotect(pmd_t pmd)
324{
325 return pmd_clear_flags(pmd, _PAGE_RW);
326}
327
328static inline pmd_t pmd_mkdirty(pmd_t pmd)
329{
Pavel Emelyanov0f8975e2013-07-03 15:01:20 -0700330 return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800331}
332
Dan Williamsf25748e32016-01-15 16:56:43 -0800333static inline pmd_t pmd_mkdevmap(pmd_t pmd)
334{
335 return pmd_set_flags(pmd, _PAGE_DEVMAP);
336}
337
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800338static inline pmd_t pmd_mkhuge(pmd_t pmd)
339{
340 return pmd_set_flags(pmd, _PAGE_PSE);
341}
342
343static inline pmd_t pmd_mkyoung(pmd_t pmd)
344{
345 return pmd_set_flags(pmd, _PAGE_ACCESSED);
346}
347
348static inline pmd_t pmd_mkwrite(pmd_t pmd)
349{
350 return pmd_set_flags(pmd, _PAGE_RW);
351}
352
Cyrill Gorcunov2bf01f92014-06-04 16:08:16 -0700353#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
Pavel Emelyanov0f8975e2013-07-03 15:01:20 -0700354static inline int pte_soft_dirty(pte_t pte)
355{
356 return pte_flags(pte) & _PAGE_SOFT_DIRTY;
357}
358
359static inline int pmd_soft_dirty(pmd_t pmd)
360{
361 return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
362}
363
364static inline pte_t pte_mksoft_dirty(pte_t pte)
365{
366 return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
367}
368
369static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
370{
371 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
372}
373
Martin Schwidefskya7b76172015-04-22 14:20:47 +0200374static inline pte_t pte_clear_soft_dirty(pte_t pte)
375{
376 return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
377}
378
379static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
380{
381 return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
382}
383
Cyrill Gorcunov2bf01f92014-06-04 16:08:16 -0700384#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
385
Jeremy Fitzhardingeb5348162009-02-04 18:33:38 -0800386/*
387 * Mask out unsupported bits in a present pgprot. Non-present pgprots
388 * can use those bits for other purposes, so leave them be.
389 */
390static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
391{
392 pgprotval_t protval = pgprot_val(pgprot);
393
394 if (protval & _PAGE_PRESENT)
395 protval &= __supported_pte_mask;
396
397 return protval;
398}
399
Jeremy Fitzhardinge6fdc05d2008-01-30 13:32:57 +0100400static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
401{
Michal Hocko3f0eb662018-06-27 17:46:50 +0200402 phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
Andi Kleen33182fe2018-06-13 15:48:24 -0700403 pfn ^= protnone_mask(pgprot_val(pgprot));
404 pfn &= PTE_PFN_MASK;
405 return __pte(pfn | massage_pgprot(pgprot));
Jeremy Fitzhardinge6fdc05d2008-01-30 13:32:57 +0100406}
407
408static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
409{
Michal Hocko3f0eb662018-06-27 17:46:50 +0200410 phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
Andi Kleen33182fe2018-06-13 15:48:24 -0700411 pfn ^= protnone_mask(pgprot_val(pgprot));
412 pfn &= PHYSICAL_PMD_PAGE_MASK;
413 return __pmd(pfn | massage_pgprot(pgprot));
Jeremy Fitzhardinge6fdc05d2008-01-30 13:32:57 +0100414}
415
Andi Kleen5ebf3f82018-08-07 15:09:37 -0700416static inline pmd_t pmd_mknotpresent(pmd_t pmd)
417{
418 return pfn_pmd(pmd_pfn(pmd),
419 __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
420}
421
Andi Kleen33182fe2018-06-13 15:48:24 -0700422static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
423
Ingo Molnar38472312008-01-30 13:32:57 +0100424static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
425{
Andi Kleen33182fe2018-06-13 15:48:24 -0700426 pteval_t val = pte_val(pte), oldval = val;
Ingo Molnar38472312008-01-30 13:32:57 +0100427
428 /*
429 * Chop off the NX bit (if present), and add the NX portion of
430 * the newprot (if present):
431 */
Venki Pallipadi1c12c4c2008-05-14 16:05:51 -0700432 val &= _PAGE_CHG_MASK;
Jeremy Fitzhardingeb5348162009-02-04 18:33:38 -0800433 val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
Andi Kleen33182fe2018-06-13 15:48:24 -0700434 val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
Ingo Molnar38472312008-01-30 13:32:57 +0100435 return __pte(val);
436}
437
Johannes Weinerc489f122011-01-13 15:47:02 -0800438static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
439{
Andi Kleen33182fe2018-06-13 15:48:24 -0700440 pmdval_t val = pmd_val(pmd), oldval = val;
Johannes Weinerc489f122011-01-13 15:47:02 -0800441
442 val &= _HPAGE_CHG_MASK;
443 val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
Andi Kleen33182fe2018-06-13 15:48:24 -0700444 val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
Johannes Weinerc489f122011-01-13 15:47:02 -0800445 return __pmd(val);
446}
447
Venki Pallipadi1c12c4c2008-05-14 16:05:51 -0700448/* mprotect needs to preserve PAT bits when updating vm_page_prot */
449#define pgprot_modify pgprot_modify
450static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
451{
452 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
453 pgprotval_t addbits = pgprot_val(newprot);
454 return __pgprot(preservebits | addbits);
455}
456
Toshi Kanibbac8c62015-09-17 12:24:18 -0600457#define pte_pgprot(x) __pgprot(pte_flags(x))
458#define pmd_pgprot(x) __pgprot(pmd_flags(x))
459#define pud_pgprot(x) __pgprot(pud_flags(x))
Andi Kleenc6ca18e2008-01-30 13:33:51 +0100460
Jeremy Fitzhardingeb5348162009-02-04 18:33:38 -0800461#define canon_pgprot(p) __pgprot(massage_pgprot(p))
Andi Kleen1e8e23b2008-01-30 13:33:53 +0100462
Suresh Siddha1adcaaf2009-08-17 13:23:50 -0700463static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
Juergen Grossd85f3332014-11-03 14:01:53 +0100464 enum page_cache_mode pcm,
465 enum page_cache_mode new_pcm)
venkatesh.pallipadi@intel.comafc7d202009-01-09 16:13:10 -0800466{
467 /*
H. Peter Anvin55a6ca22009-11-23 15:12:07 -0800468 * PAT type is always WB for untracked ranges, so no need to check.
Suresh Siddha1adcaaf2009-08-17 13:23:50 -0700469 */
H. Peter Anvin8a271382009-11-23 14:49:20 -0800470 if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
Suresh Siddha1adcaaf2009-08-17 13:23:50 -0700471 return 1;
472
473 /*
venkatesh.pallipadi@intel.comafc7d202009-01-09 16:13:10 -0800474 * Certain new memtypes are not allowed with certain
475 * requested memtype:
476 * - request is uncached, return cannot be write-back
477 * - request is write-combine, return cannot be write-back
Toshi Kaniecb2feb2015-06-04 18:55:14 +0200478 * - request is write-through, return cannot be write-back
479 * - request is write-through, return cannot be write-combine
venkatesh.pallipadi@intel.comafc7d202009-01-09 16:13:10 -0800480 */
Juergen Grossd85f3332014-11-03 14:01:53 +0100481 if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
482 new_pcm == _PAGE_CACHE_MODE_WB) ||
483 (pcm == _PAGE_CACHE_MODE_WC &&
Toshi Kaniecb2feb2015-06-04 18:55:14 +0200484 new_pcm == _PAGE_CACHE_MODE_WB) ||
485 (pcm == _PAGE_CACHE_MODE_WT &&
486 new_pcm == _PAGE_CACHE_MODE_WB) ||
487 (pcm == _PAGE_CACHE_MODE_WT &&
488 new_pcm == _PAGE_CACHE_MODE_WC)) {
venkatesh.pallipadi@intel.comafc7d202009-01-09 16:13:10 -0800489 return 0;
490 }
491
492 return 1;
493}
494
Tejun Heo458a3e62009-02-24 11:57:21 +0900495pmd_t *populate_extra_pmd(unsigned long vaddr);
496pte_t *populate_extra_pte(unsigned long vaddr);
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100497#endif /* __ASSEMBLY__ */
498
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200499#ifdef CONFIG_X86_32
David Howellsa1ce3922012-10-02 18:01:25 +0100500# include <asm/pgtable_32.h>
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200501#else
David Howellsa1ce3922012-10-02 18:01:25 +0100502# include <asm/pgtable_64.h>
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200503#endif
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +0100504
Jeremy Fitzhardingeaca159d2009-02-05 11:30:54 -0800505#ifndef __ASSEMBLY__
Jeremy Fitzhardingef4769612009-02-05 11:30:55 -0800506#include <linux/mm_types.h>
Cyrill Gorcunovfa0f2812013-09-11 14:22:47 -0700507#include <linux/mmdebug.h>
Dave Hansen4cbeb512013-01-22 13:24:31 -0800508#include <linux/log2.h>
Jeremy Fitzhardingeaca159d2009-02-05 11:30:54 -0800509
Jeremy Fitzhardingea034a012009-02-05 11:30:43 -0800510static inline int pte_none(pte_t pte)
511{
Dave Hansen97e3c602016-07-07 17:19:12 -0700512 return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
Jeremy Fitzhardingea034a012009-02-05 11:30:43 -0800513}
514
Jeremy Fitzhardinge8de01da2009-02-05 11:30:44 -0800515#define __HAVE_ARCH_PTE_SAME
516static inline int pte_same(pte_t a, pte_t b)
517{
518 return a.pte == b.pte;
519}
520
Jeremy Fitzhardinge7c683852009-02-05 11:30:45 -0800521static inline int pte_present(pte_t a)
522{
Mel Gormanc46a7c82014-06-04 16:06:30 -0700523 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
524}
525
Dan Williams3565fce2016-01-15 16:56:55 -0800526#ifdef __HAVE_ARCH_PTE_DEVMAP
527static inline int pte_devmap(pte_t a)
528{
529 return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
530}
531#endif
532
Rik van Riel2c3cf552012-10-09 15:31:12 +0200533#define pte_accessible pte_accessible
Rik van Riel20841402013-12-18 17:08:44 -0800534static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
Rik van Riel2c3cf552012-10-09 15:31:12 +0200535{
Rik van Riel20841402013-12-18 17:08:44 -0800536 if (pte_flags(a) & _PAGE_PRESENT)
537 return true;
538
Mel Gorman21d9ee32015-02-12 14:58:32 -0800539 if ((pte_flags(a) & _PAGE_PROTNONE) &&
Rik van Riel20841402013-12-18 17:08:44 -0800540 mm_tlb_flush_pending(mm))
541 return true;
542
543 return false;
Rik van Riel2c3cf552012-10-09 15:31:12 +0200544}
545
Jeremy Fitzhardingeeb636572009-02-06 13:05:56 -0800546static inline int pte_hidden(pte_t pte)
Vegard Nossumdfec0722008-04-04 00:51:41 +0200547{
Jeremy Fitzhardingeeb636572009-02-06 13:05:56 -0800548 return pte_flags(pte) & _PAGE_HIDDEN;
Vegard Nossumdfec0722008-04-04 00:51:41 +0200549}
550
Jeremy Fitzhardinge649e8ef2009-02-05 11:30:50 -0800551static inline int pmd_present(pmd_t pmd)
552{
Andrea Arcangeli027ef6c2012-10-08 16:33:27 -0700553 /*
554 * Checking for _PAGE_PSE is needed too because
555 * split_huge_page will temporarily clear the present bit (but
556 * the _PAGE_PSE flag will remain set at all times while the
557 * _PAGE_PRESENT bit is clear).
558 */
Mel Gorman21d9ee32015-02-12 14:58:32 -0800559 return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
Jeremy Fitzhardinge649e8ef2009-02-05 11:30:50 -0800560}
561
Mel Gormane7bb4b6d2015-02-12 14:58:19 -0800562#ifdef CONFIG_NUMA_BALANCING
563/*
564 * These work without NUMA balancing but the kernel does not care. See the
565 * comment in include/asm-generic/pgtable.h
566 */
567static inline int pte_protnone(pte_t pte)
568{
David Vrabele3a1f6c2015-02-19 13:06:53 +0000569 return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
570 == _PAGE_PROTNONE;
Mel Gormane7bb4b6d2015-02-12 14:58:19 -0800571}
572
573static inline int pmd_protnone(pmd_t pmd)
574{
David Vrabele3a1f6c2015-02-19 13:06:53 +0000575 return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
576 == _PAGE_PROTNONE;
Mel Gormane7bb4b6d2015-02-12 14:58:19 -0800577}
578#endif /* CONFIG_NUMA_BALANCING */
579
Jeremy Fitzhardinge4fea8012009-02-05 11:30:51 -0800580static inline int pmd_none(pmd_t pmd)
581{
582 /* Only check low word on 32-bit platforms, since it might be
583 out of sync with upper half. */
Dave Hansen97e3c602016-07-07 17:19:12 -0700584 unsigned long val = native_pmd_val(pmd);
585 return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
Jeremy Fitzhardinge4fea8012009-02-05 11:30:51 -0800586}
587
Jeremy Fitzhardinge3ffb3562009-02-05 11:30:59 -0800588static inline unsigned long pmd_page_vaddr(pmd_t pmd)
589{
Toshi Kanif70abb02015-09-17 12:24:17 -0600590 return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
Jeremy Fitzhardinge3ffb3562009-02-05 11:30:59 -0800591}
592
Ingo Molnare5f7f2022009-02-09 11:42:57 +0100593/*
594 * Currently stuck as a macro due to indirect forward reference to
595 * linux/mmzone.h's __section_mem_map_addr() definition:
596 */
Toshi Kanif70abb02015-09-17 12:24:17 -0600597#define pmd_page(pmd) \
598 pfn_to_page((pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT)
Jeremy Fitzhardinge20063ca2009-02-05 11:31:00 -0800599
Jeremy Fitzhardingee24d7ee2009-02-05 11:31:01 -0800600/*
601 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
602 *
603 * this macro returns the index of the entry in the pmd page which would
604 * control the given virtual address
605 */
Figo.zhangce0c0f92009-06-28 18:07:39 +0800606static inline unsigned long pmd_index(unsigned long address)
Jeremy Fitzhardingee24d7ee2009-02-05 11:31:01 -0800607{
608 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
609}
610
Jeremy Fitzhardinge97e28172009-02-05 11:31:05 -0800611/*
612 * Conversion functions: convert a page and protection to a page entry,
613 * and a page entry and page directory to the page they refer to.
614 *
615 * (Currently stuck as a macro because of indirect forward reference
616 * to linux/mm.h:page_to_nid())
617 */
618#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
619
Jeremy Fitzhardinge346309c2009-02-05 11:31:06 -0800620/*
621 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
622 *
623 * this function returns the index of the entry in the pte page which would
624 * control the given virtual address
625 */
Figo.zhangce0c0f92009-06-28 18:07:39 +0800626static inline unsigned long pte_index(unsigned long address)
Jeremy Fitzhardinge346309c2009-02-05 11:31:06 -0800627{
628 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
629}
630
Jeremy Fitzhardinge3fbc2442009-02-05 11:31:07 -0800631static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
632{
633 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
634}
635
Jeremy Fitzhardinge99510232009-02-05 11:31:11 -0800636static inline int pmd_bad(pmd_t pmd)
637{
Jeremy Fitzhardinge18a7a1992009-02-05 11:31:16 -0800638 return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
Jeremy Fitzhardinge99510232009-02-05 11:31:11 -0800639}
640
Jeremy Fitzhardingecc290ca2009-02-05 11:31:12 -0800641static inline unsigned long pages_to_mb(unsigned long npg)
642{
643 return npg >> (20 - PAGE_SHIFT);
644}
645
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700646#if CONFIG_PGTABLE_LEVELS > 2
Jeremy Fitzhardingedeb79cf2009-02-05 11:31:13 -0800647static inline int pud_none(pud_t pud)
648{
Dave Hansen97e3c602016-07-07 17:19:12 -0700649 return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
Jeremy Fitzhardingedeb79cf2009-02-05 11:31:13 -0800650}
651
Jeremy Fitzhardinge5ba7c912009-02-05 11:30:48 -0800652static inline int pud_present(pud_t pud)
653{
Jeremy Fitzhardinge18a7a1992009-02-05 11:31:16 -0800654 return pud_flags(pud) & _PAGE_PRESENT;
Jeremy Fitzhardinge5ba7c912009-02-05 11:30:48 -0800655}
Jeremy Fitzhardinge6fff47e2009-02-05 11:30:53 -0800656
657static inline unsigned long pud_page_vaddr(pud_t pud)
658{
Toshi Kanif70abb02015-09-17 12:24:17 -0600659 return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
Jeremy Fitzhardinge6fff47e2009-02-05 11:30:53 -0800660}
Jeremy Fitzhardingef4769612009-02-05 11:30:55 -0800661
Ingo Molnare5f7f2022009-02-09 11:42:57 +0100662/*
663 * Currently stuck as a macro due to indirect forward reference to
664 * linux/mmzone.h's __section_mem_map_addr() definition:
665 */
Toshi Kanif70abb02015-09-17 12:24:17 -0600666#define pud_page(pud) \
667 pfn_to_page((pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT)
Jeremy Fitzhardinge01ade202009-02-05 11:31:02 -0800668
669/* Find an entry in the second-level page table.. */
670static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
671{
672 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
673}
Jeremy Fitzhardinge3180fba2009-02-05 11:31:04 -0800674
Jeremy Fitzhardinge3f6cbef2009-02-05 11:31:08 -0800675static inline int pud_large(pud_t pud)
676{
Jeremy Fitzhardingee2f5bda2009-02-09 00:09:52 -0800677 return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
Jeremy Fitzhardinge3f6cbef2009-02-05 11:31:08 -0800678 (_PAGE_PSE | _PAGE_PRESENT);
679}
Jeremy Fitzhardingea61bb292009-02-05 11:31:10 -0800680
681static inline int pud_bad(pud_t pud)
682{
Jeremy Fitzhardinge18a7a1992009-02-05 11:31:16 -0800683 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
Jeremy Fitzhardingea61bb292009-02-05 11:31:10 -0800684}
Jeremy Fitzhardingee2f5bda2009-02-09 00:09:52 -0800685#else
686static inline int pud_large(pud_t pud)
687{
688 return 0;
689}
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700690#endif /* CONFIG_PGTABLE_LEVELS > 2 */
Jeremy Fitzhardinge5ba7c912009-02-05 11:30:48 -0800691
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700692#if CONFIG_PGTABLE_LEVELS > 3
Jeremy Fitzhardinge9f38d7e2009-02-05 11:30:49 -0800693static inline int pgd_present(pgd_t pgd)
694{
Jeremy Fitzhardinge18a7a1992009-02-05 11:31:16 -0800695 return pgd_flags(pgd) & _PAGE_PRESENT;
Jeremy Fitzhardinge9f38d7e2009-02-05 11:30:49 -0800696}
Jeremy Fitzhardingec5f040b2009-02-05 11:30:52 -0800697
698static inline unsigned long pgd_page_vaddr(pgd_t pgd)
699{
700 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
701}
Jeremy Fitzhardinge777cba12009-02-05 11:30:56 -0800702
Ingo Molnare5f7f2022009-02-09 11:42:57 +0100703/*
704 * Currently stuck as a macro due to indirect forward reference to
705 * linux/mmzone.h's __section_mem_map_addr() definition:
706 */
707#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
Jeremy Fitzhardinge7cfb8102009-02-05 11:30:57 -0800708
709/* to find an entry in a page-table-directory. */
Figo.zhangce0c0f92009-06-28 18:07:39 +0800710static inline unsigned long pud_index(unsigned long address)
Jeremy Fitzhardinge7cfb8102009-02-05 11:30:57 -0800711{
712 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
713}
Jeremy Fitzhardinge3d081b12009-02-05 11:30:58 -0800714
715static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
716{
717 return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
718}
Jeremy Fitzhardinge30f10312009-02-05 11:31:09 -0800719
720static inline int pgd_bad(pgd_t pgd)
721{
Dave Hansen8f0baad2017-08-30 16:23:00 -0700722 pgdval_t ignore_flags = _PAGE_USER;
723 /*
724 * We set NX on KAISER pgds that map userspace memory so
725 * that userspace can not meaningfully use the kernel
726 * page table by accident; it will fault on the first
727 * instruction it tries to run. See native_set_pgd().
728 */
Hugh Dickins23e09432017-09-24 16:59:49 -0700729 if (kaiser_enabled)
Dave Hansen8f0baad2017-08-30 16:23:00 -0700730 ignore_flags |= _PAGE_NX;
731
732 return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
Jeremy Fitzhardinge30f10312009-02-05 11:31:09 -0800733}
Jeremy Fitzhardinge7325cc22009-02-05 11:31:14 -0800734
735static inline int pgd_none(pgd_t pgd)
736{
Dave Hansen97e3c602016-07-07 17:19:12 -0700737 /*
738 * There is no need to do a workaround for the KNL stray
739 * A/D bit erratum here. PGDs only point to page tables
740 * except on 32-bit non-PAE which is not supported on
741 * KNL.
742 */
Jeremy Fitzhardinge26c8e3172009-02-05 11:31:17 -0800743 return !native_pgd_val(pgd);
Jeremy Fitzhardinge7325cc22009-02-05 11:31:14 -0800744}
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700745#endif /* CONFIG_PGTABLE_LEVELS > 3 */
Jeremy Fitzhardinge9f38d7e2009-02-05 11:30:49 -0800746
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200747#endif /* __ASSEMBLY__ */
748
Jeremy Fitzhardingefb15a9b2008-06-25 00:19:06 -0400749/*
750 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
751 *
752 * this macro returns the index of the entry in the pgd page which would
753 * control the given virtual address
754 */
755#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
756
757/*
758 * pgd_offset() returns a (pgd_t *)
759 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
760 */
761#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
762/*
763 * a shortcut which implies the use of the kernel's pgd, instead
764 * of a process's
765 */
766#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
767
768
Jeremy Fitzhardinge68db0652008-03-17 16:37:13 -0700769#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
770#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
771
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100772#ifndef __ASSEMBLY__
773
Jaswinder Singh Rajput2c1b2842009-04-11 00:03:10 +0530774extern int direct_gbpages;
Yinghai Lu22ddfca2012-11-16 19:38:41 -0800775void init_mem_mapping(void);
Yinghai Lu8d574702012-11-16 19:38:58 -0800776void early_alloc_pgt_buf(void);
Jaswinder Singh Rajput2c1b2842009-04-11 00:03:10 +0530777
Thomas Garnierb234e8a2016-06-21 17:47:01 -0700778#ifdef CONFIG_X86_64
779/* Realmode trampoline initialization. */
780extern pgd_t trampoline_pgd_entry;
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700781static inline void __meminit init_trampoline_default(void)
Thomas Garnierb234e8a2016-06-21 17:47:01 -0700782{
783 /* Default trampoline pgd value */
784 trampoline_pgd_entry = init_level4_pgt[pgd_index(__PAGE_OFFSET)];
785}
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700786# ifdef CONFIG_RANDOMIZE_MEMORY
787void __meminit init_trampoline(void);
788# else
789# define init_trampoline init_trampoline_default
790# endif
Thomas Garnierb234e8a2016-06-21 17:47:01 -0700791#else
792static inline void init_trampoline(void) { }
793#endif
794
Jeremy Fitzhardinge48916452008-01-30 13:32:58 +0100795/* local pte updates need not use xchg for locking */
796static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
797{
798 pte_t res = *ptep;
799
800 /* Pure native function needs no input for mm, addr */
801 native_pte_clear(NULL, 0, ptep);
802 return res;
803}
804
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800805static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
806{
807 pmd_t res = *pmdp;
808
809 native_pmd_clear(pmdp);
810 return res;
811}
812
Jeremy Fitzhardinge48916452008-01-30 13:32:58 +0100813static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
814 pte_t *ptep , pte_t pte)
815{
816 native_set_pte(ptep, pte);
817}
818
Andrea Arcangeli0a47de52011-01-13 15:46:35 -0800819static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
820 pmd_t *pmdp , pmd_t pmd)
821{
822 native_set_pmd(pmdp, pmd);
823}
824
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100825#ifndef CONFIG_PARAVIRT
826/*
827 * Rules for using pte_update - it must be called after any PTE update which
828 * has not been done using the set_pte / clear_pte interfaces. It is used by
829 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
830 * updates should either be sets, clears, or set_pte_atomic for P->P
831 * transitions, which means this hook should only be called for user PTEs.
832 * This hook implies a P->P protection or access change has taken place, which
Juergen Grossd6ccc3e2015-11-17 15:51:19 +0100833 * requires a subsequent TLB flush.
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100834 */
835#define pte_update(mm, addr, ptep) do { } while (0)
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100836#endif
837
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100838/*
839 * We only update the dirty/accessed state if we set
840 * the dirty bit by hand in the kernel, since the hardware
841 * will do the accessed bit for us, and we don't want to
842 * race with other CPU's that might be updating the dirty
843 * bit at the same time.
844 */
Jeremy Fitzhardingebea41802008-06-25 00:18:57 -0400845struct vm_area_struct;
846
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100847#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
Jeremy Fitzhardingeee5aa8d2008-03-17 16:37:03 -0700848extern int ptep_set_access_flags(struct vm_area_struct *vma,
849 unsigned long address, pte_t *ptep,
850 pte_t entry, int dirty);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100851
852#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
Jeremy Fitzhardingef9fbf1a2008-03-17 16:37:04 -0700853extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
854 unsigned long addr, pte_t *ptep);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100855
856#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
Jeremy Fitzhardingec20311e2008-03-17 16:37:05 -0700857extern int ptep_clear_flush_young(struct vm_area_struct *vma,
858 unsigned long address, pte_t *ptep);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100859
860#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700861static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
862 pte_t *ptep)
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100863{
864 pte_t pte = native_ptep_get_and_clear(ptep);
865 pte_update(mm, addr, ptep);
866 return pte;
867}
868
869#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700870static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
871 unsigned long addr, pte_t *ptep,
872 int full)
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100873{
874 pte_t pte;
875 if (full) {
876 /*
877 * Full address destruction in progress; paravirt does not
878 * care about updates and native needs no locking
879 */
880 pte = native_local_ptep_get_and_clear(ptep);
881 } else {
882 pte = ptep_get_and_clear(mm, addr, ptep);
883 }
884 return pte;
885}
886
887#define __HAVE_ARCH_PTEP_SET_WRPROTECT
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700888static inline void ptep_set_wrprotect(struct mm_struct *mm,
889 unsigned long addr, pte_t *ptep)
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100890{
Jeremy Fitzhardinged8d89822008-01-30 13:32:58 +0100891 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100892 pte_update(mm, addr, ptep);
893}
894
Jesper Juhl2ac13462011-12-18 01:32:09 +0100895#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
Shaohua Li61c77322010-08-16 09:16:55 +0800896
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800897#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
898
899#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
900extern int pmdp_set_access_flags(struct vm_area_struct *vma,
901 unsigned long address, pmd_t *pmdp,
902 pmd_t entry, int dirty);
903
904#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
905extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
906 unsigned long addr, pmd_t *pmdp);
907
908#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
909extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
910 unsigned long address, pmd_t *pmdp);
911
912
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800913#define __HAVE_ARCH_PMD_WRITE
914static inline int pmd_write(pmd_t pmd)
915{
916 return pmd_flags(pmd) & _PAGE_RW;
917}
918
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700919#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
920static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800921 pmd_t *pmdp)
922{
Juergen Grossd6ccc3e2015-11-17 15:51:19 +0100923 return native_pmdp_get_and_clear(pmdp);
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800924}
925
926#define __HAVE_ARCH_PMDP_SET_WRPROTECT
927static inline void pmdp_set_wrprotect(struct mm_struct *mm,
928 unsigned long addr, pmd_t *pmdp)
929{
930 clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800931}
932
Jeremy Fitzhardinge85958b42008-03-17 16:37:14 -0700933/*
934 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
935 *
936 * dst - pointer to pgd range anwhere on a pgd page
937 * src - ""
938 * count - the number of pgds to copy.
939 *
940 * dst and src can be on the same page, but the range must not overlap,
941 * and must not cross a page boundary.
942 */
943static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
944{
Hugh Dickins23e09432017-09-24 16:59:49 -0700945 memcpy(dst, src, count * sizeof(pgd_t));
Kees Cooke71fac02018-01-03 10:17:35 -0800946#ifdef CONFIG_PAGE_TABLE_ISOLATION
Hugh Dickins23e09432017-09-24 16:59:49 -0700947 if (kaiser_enabled) {
948 /* Clone the shadow pgd part as well */
949 memcpy(native_get_shadow_pgd(dst),
950 native_get_shadow_pgd(src),
951 count * sizeof(pgd_t));
952 }
Richard Fellner13be4482017-05-04 14:26:50 +0200953#endif
Jeremy Fitzhardinge85958b42008-03-17 16:37:14 -0700954}
955
Dave Hansen4cbeb512013-01-22 13:24:31 -0800956#define PTE_SHIFT ilog2(PTRS_PER_PTE)
957static inline int page_level_shift(enum pg_level level)
958{
959 return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
960}
961static inline unsigned long page_level_size(enum pg_level level)
962{
963 return 1UL << page_level_shift(level);
964}
965static inline unsigned long page_level_mask(enum pg_level level)
966{
967 return ~(page_level_size(level) - 1);
968}
Jeremy Fitzhardinge85958b42008-03-17 16:37:14 -0700969
Kirill A. Shutemov602e0182012-12-18 12:22:18 -0800970/*
971 * The x86 doesn't have any external MMU info: the kernel page
972 * tables contain all the necessary information.
973 */
974static inline void update_mmu_cache(struct vm_area_struct *vma,
975 unsigned long addr, pte_t *ptep)
976{
977}
978static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
979 unsigned long addr, pmd_t *pmd)
980{
981}
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200982
Cyrill Gorcunov2bf01f92014-06-04 16:08:16 -0700983#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
Cyrill Gorcunovfa0f2812013-09-11 14:22:47 -0700984static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
985{
Cyrill Gorcunovfa0f2812013-09-11 14:22:47 -0700986 return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
987}
988
989static inline int pte_swp_soft_dirty(pte_t pte)
990{
Cyrill Gorcunovfa0f2812013-09-11 14:22:47 -0700991 return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
992}
993
994static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
995{
Cyrill Gorcunovfa0f2812013-09-11 14:22:47 -0700996 return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
997}
Cyrill Gorcunov2bf01f92014-06-04 16:08:16 -0700998#endif
Cyrill Gorcunovfa0f2812013-09-11 14:22:47 -0700999
Dave Hansen33a709b2016-02-12 13:02:19 -08001000#define PKRU_AD_BIT 0x1
1001#define PKRU_WD_BIT 0x2
Dave Hansen84594292016-02-12 13:02:36 -08001002#define PKRU_BITS_PER_PKEY 2
Dave Hansen33a709b2016-02-12 13:02:19 -08001003
1004static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
1005{
Dave Hansen84594292016-02-12 13:02:36 -08001006 int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
Dave Hansen33a709b2016-02-12 13:02:19 -08001007 return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits));
1008}
1009
1010static inline bool __pkru_allows_write(u32 pkru, u16 pkey)
1011{
Dave Hansen84594292016-02-12 13:02:36 -08001012 int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
Dave Hansen33a709b2016-02-12 13:02:19 -08001013 /*
1014 * Access-disable disables writes too so we need to check
1015 * both bits here.
1016 */
1017 return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits));
1018}
1019
1020static inline u16 pte_flags_pkey(unsigned long pte_flags)
1021{
1022#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1023 /* ifdef to avoid doing 59-bit shift on 32-bit values */
1024 return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
1025#else
1026 return 0;
1027#endif
1028}
1029
Andi Kleen7c5b42f2018-06-13 15:48:27 -07001030
1031#define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
1032extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
1033
1034static inline bool arch_has_pfn_modify_check(void)
1035{
1036 return boot_cpu_has_bug(X86_BUG_L1TF);
1037}
1038
Thomas Gleixner96a388d2007-10-11 11:20:03 +02001039#include <asm-generic/pgtable.h>
1040#endif /* __ASSEMBLY__ */
1041
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001042#endif /* _ASM_X86_PGTABLE_H */