blob: 1cee98e182b7445bcab4f41f0d010565c29acbaf [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_PGTABLE_H
2#define _ASM_X86_PGTABLE_H
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +01003
Ingo Molnarc47c1b12009-02-09 11:57:45 +01004#include <asm/page.h>
Suresh Siddha1adcaaf2009-08-17 13:23:50 -07005#include <asm/e820.h>
Ingo Molnarc47c1b12009-02-09 11:57:45 +01006
Jeremy Fitzhardinge8d19c992009-02-08 18:46:18 -08007#include <asm/pgtable_types.h>
Suresh Siddhab2bc2732008-09-23 14:00:36 -07008
venkatesh.pallipadi@intel.com8a7b12f2008-12-18 11:41:31 -08009/*
10 * Macro to mark a page protection value as UC-
11 */
Juergen Grossd85f3332014-11-03 14:01:53 +010012#define pgprot_noncached(prot) \
13 ((boot_cpu_data.x86 > 3) \
14 ? (__pgprot(pgprot_val(prot) | \
15 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \
venkatesh.pallipadi@intel.com8a7b12f2008-12-18 11:41:31 -080016 : (prot))
17
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +010018#ifndef __ASSEMBLY__
H. Peter Anvin55a6ca22009-11-23 15:12:07 -080019#include <asm/x86_init.h>
20
Borislav Petkovef6bea62014-01-18 12:48:14 +010021void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
Stephen Smalleye1a58322015-10-05 12:55:20 -040022void ptdump_walk_pgd_level_checkwx(void);
23
24#ifdef CONFIG_DEBUG_WX
25#define debug_checkwx() ptdump_walk_pgd_level_checkwx()
26#else
27#define debug_checkwx() do { } while (0)
28#endif
Borislav Petkovef6bea62014-01-18 12:48:14 +010029
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +010030/*
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +010031 * ZERO_PAGE is a global shared page that is always zero: used
32 * for zero-mapped memory areas etc..
33 */
Andi Kleen277d5b42013-08-05 15:02:43 -070034extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
35 __visible;
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +010036#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
37
Jeremy Fitzhardingee3ed9102008-01-30 13:34:11 +010038extern spinlock_t pgd_lock;
39extern struct list_head pgd_list;
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +010040
Jeremy Fitzhardinge617d34d2010-09-21 12:01:51 -070041extern struct mm_struct *pgd_page_get_mm(struct page *page);
42
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080043#ifdef CONFIG_PARAVIRT
44#include <asm/paravirt.h>
45#else /* !CONFIG_PARAVIRT */
46#define set_pte(ptep, pte) native_set_pte(ptep, pte)
47#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
Andrea Arcangeli2609ae62011-01-13 15:46:37 -080048#define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080049
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080050#define set_pte_atomic(ptep, pte) \
51 native_set_pte_atomic(ptep, pte)
52
53#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
54
55#ifndef __PAGETABLE_PUD_FOLDED
56#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
57#define pgd_clear(pgd) native_pgd_clear(pgd)
58#endif
59
60#ifndef set_pud
61# define set_pud(pudp, pud) native_set_pud(pudp, pud)
62#endif
63
64#ifndef __PAGETABLE_PMD_FOLDED
65#define pud_clear(pud) native_pud_clear(pud)
66#endif
67
68#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
69#define pmd_clear(pmd) native_pmd_clear(pmd)
70
71#define pte_update(mm, addr, ptep) do { } while (0)
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080072
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080073#define pgd_val(x) native_pgd_val(x)
74#define __pgd(x) native_make_pgd(x)
75
76#ifndef __PAGETABLE_PUD_FOLDED
77#define pud_val(x) native_pud_val(x)
78#define __pud(x) native_make_pud(x)
79#endif
80
81#ifndef __PAGETABLE_PMD_FOLDED
82#define pmd_val(x) native_pmd_val(x)
83#define __pmd(x) native_make_pmd(x)
84#endif
85
86#define pte_val(x) native_pte_val(x)
87#define __pte(x) native_make_pte(x)
88
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -080089#define arch_end_context_switch(prev) do {} while(0)
90
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080091#endif /* CONFIG_PARAVIRT */
92
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +010093/*
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +010094 * The following only work if pte_present() is true.
95 * Undefined behaviour if not..
96 */
Joe Perches3cbaeaf2008-03-23 01:03:12 -070097static inline int pte_dirty(pte_t pte)
98{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +010099 return pte_flags(pte) & _PAGE_DIRTY;
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100100}
101
Dave Hansena927cb82016-02-12 13:02:15 -0800102
103static inline u32 read_pkru(void)
104{
105 if (boot_cpu_has(X86_FEATURE_OSPKE))
106 return __read_pkru();
107 return 0;
108}
109
Xiao Guangrong9e901992016-03-22 16:51:17 +0800110static inline void write_pkru(u32 pkru)
111{
112 if (boot_cpu_has(X86_FEATURE_OSPKE))
113 __write_pkru(pkru);
114}
115
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700116static inline int pte_young(pte_t pte)
117{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100118 return pte_flags(pte) & _PAGE_ACCESSED;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700119}
120
Kirill A. Shutemovc164e032014-12-10 15:44:36 -0800121static inline int pmd_dirty(pmd_t pmd)
122{
123 return pmd_flags(pmd) & _PAGE_DIRTY;
124}
125
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800126static inline int pmd_young(pmd_t pmd)
127{
128 return pmd_flags(pmd) & _PAGE_ACCESSED;
129}
130
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700131static inline int pte_write(pte_t pte)
132{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100133 return pte_flags(pte) & _PAGE_RW;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700134}
135
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700136static inline int pte_huge(pte_t pte)
137{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100138 return pte_flags(pte) & _PAGE_PSE;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700139}
140
141static inline int pte_global(pte_t pte)
142{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100143 return pte_flags(pte) & _PAGE_GLOBAL;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700144}
145
146static inline int pte_exec(pte_t pte)
147{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100148 return !(pte_flags(pte) & _PAGE_NX);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700149}
150
Nick Piggin7e675132008-04-28 02:13:00 -0700151static inline int pte_special(pte_t pte)
152{
Mel Gormanc819f372015-02-12 14:58:38 -0800153 return pte_flags(pte) & _PAGE_SPECIAL;
Nick Piggin7e675132008-04-28 02:13:00 -0700154}
155
Hugh Dickins91030ca2008-09-09 16:42:45 +0100156static inline unsigned long pte_pfn(pte_t pte)
157{
158 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
159}
160
Akinobu Mita087975b2009-06-27 15:35:15 +0900161static inline unsigned long pmd_pfn(pmd_t pmd)
162{
Toshi Kanif70abb02015-09-17 12:24:17 -0600163 return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
Akinobu Mita087975b2009-06-27 15:35:15 +0900164}
165
Mel Gorman0ee364e2013-02-11 14:52:36 +0000166static inline unsigned long pud_pfn(pud_t pud)
167{
Toshi Kanif70abb02015-09-17 12:24:17 -0600168 return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
Mel Gorman0ee364e2013-02-11 14:52:36 +0000169}
170
Hugh Dickins91030ca2008-09-09 16:42:45 +0100171#define pte_page(pte) pfn_to_page(pte_pfn(pte))
172
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700173static inline int pmd_large(pmd_t pte)
174{
Andrea Arcangeli027ef6c2012-10-08 16:33:27 -0700175 return pmd_flags(pte) & _PAGE_PSE;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700176}
177
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800178#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800179static inline int pmd_trans_huge(pmd_t pmd)
180{
Dan Williams5c7fb562016-01-15 16:56:52 -0800181 return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800182}
Andrea Arcangeli4b7167b2011-01-13 15:47:09 -0800183
Hugh Dickinsfd8cfd32016-05-19 17:13:00 -0700184#define has_transparent_hugepage has_transparent_hugepage
Andrea Arcangeli4b7167b2011-01-13 15:47:09 -0800185static inline int has_transparent_hugepage(void)
186{
Borislav Petkov16bf9222016-03-29 17:42:03 +0200187 return boot_cpu_has(X86_FEATURE_PSE);
Andrea Arcangeli4b7167b2011-01-13 15:47:09 -0800188}
Dan Williams5c7fb562016-01-15 16:56:52 -0800189
190#ifdef __HAVE_ARCH_PTE_DEVMAP
191static inline int pmd_devmap(pmd_t pmd)
192{
193 return !!(pmd_val(pmd) & _PAGE_DEVMAP);
194}
195#endif
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800196#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
197
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800198static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
199{
200 pteval_t v = native_pte_val(pte);
201
202 return native_make_pte(v | set);
203}
204
205static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
206{
207 pteval_t v = native_pte_val(pte);
208
209 return native_make_pte(v & ~clear);
210}
211
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700212static inline pte_t pte_mkclean(pte_t pte)
213{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800214 return pte_clear_flags(pte, _PAGE_DIRTY);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700215}
216
217static inline pte_t pte_mkold(pte_t pte)
218{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800219 return pte_clear_flags(pte, _PAGE_ACCESSED);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700220}
221
222static inline pte_t pte_wrprotect(pte_t pte)
223{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800224 return pte_clear_flags(pte, _PAGE_RW);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700225}
226
227static inline pte_t pte_mkexec(pte_t pte)
228{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800229 return pte_clear_flags(pte, _PAGE_NX);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700230}
231
232static inline pte_t pte_mkdirty(pte_t pte)
233{
Pavel Emelyanov0f8975e2013-07-03 15:01:20 -0700234 return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700235}
236
237static inline pte_t pte_mkyoung(pte_t pte)
238{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800239 return pte_set_flags(pte, _PAGE_ACCESSED);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700240}
241
242static inline pte_t pte_mkwrite(pte_t pte)
243{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800244 return pte_set_flags(pte, _PAGE_RW);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700245}
246
247static inline pte_t pte_mkhuge(pte_t pte)
248{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800249 return pte_set_flags(pte, _PAGE_PSE);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700250}
251
252static inline pte_t pte_clrhuge(pte_t pte)
253{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800254 return pte_clear_flags(pte, _PAGE_PSE);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700255}
256
257static inline pte_t pte_mkglobal(pte_t pte)
258{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800259 return pte_set_flags(pte, _PAGE_GLOBAL);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700260}
261
262static inline pte_t pte_clrglobal(pte_t pte)
263{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800264 return pte_clear_flags(pte, _PAGE_GLOBAL);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700265}
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100266
Nick Piggin7e675132008-04-28 02:13:00 -0700267static inline pte_t pte_mkspecial(pte_t pte)
268{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800269 return pte_set_flags(pte, _PAGE_SPECIAL);
Nick Piggin7e675132008-04-28 02:13:00 -0700270}
271
Dan Williams01c8f1c2016-01-15 16:56:40 -0800272static inline pte_t pte_mkdevmap(pte_t pte)
273{
274 return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
275}
276
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800277static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
278{
279 pmdval_t v = native_pmd_val(pmd);
280
281 return __pmd(v | set);
282}
283
284static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
285{
286 pmdval_t v = native_pmd_val(pmd);
287
288 return __pmd(v & ~clear);
289}
290
291static inline pmd_t pmd_mkold(pmd_t pmd)
292{
293 return pmd_clear_flags(pmd, _PAGE_ACCESSED);
294}
295
Minchan Kim590a4712016-01-15 16:55:20 -0800296static inline pmd_t pmd_mkclean(pmd_t pmd)
297{
298 return pmd_clear_flags(pmd, _PAGE_DIRTY);
299}
300
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800301static inline pmd_t pmd_wrprotect(pmd_t pmd)
302{
303 return pmd_clear_flags(pmd, _PAGE_RW);
304}
305
306static inline pmd_t pmd_mkdirty(pmd_t pmd)
307{
Pavel Emelyanov0f8975e2013-07-03 15:01:20 -0700308 return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800309}
310
Dan Williamsf25748e32016-01-15 16:56:43 -0800311static inline pmd_t pmd_mkdevmap(pmd_t pmd)
312{
313 return pmd_set_flags(pmd, _PAGE_DEVMAP);
314}
315
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800316static inline pmd_t pmd_mkhuge(pmd_t pmd)
317{
318 return pmd_set_flags(pmd, _PAGE_PSE);
319}
320
321static inline pmd_t pmd_mkyoung(pmd_t pmd)
322{
323 return pmd_set_flags(pmd, _PAGE_ACCESSED);
324}
325
326static inline pmd_t pmd_mkwrite(pmd_t pmd)
327{
328 return pmd_set_flags(pmd, _PAGE_RW);
329}
330
331static inline pmd_t pmd_mknotpresent(pmd_t pmd)
332{
Mel Gorman21d9ee32015-02-12 14:58:32 -0800333 return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800334}
335
Cyrill Gorcunov2bf01f92014-06-04 16:08:16 -0700336#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
Pavel Emelyanov0f8975e2013-07-03 15:01:20 -0700337static inline int pte_soft_dirty(pte_t pte)
338{
339 return pte_flags(pte) & _PAGE_SOFT_DIRTY;
340}
341
342static inline int pmd_soft_dirty(pmd_t pmd)
343{
344 return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
345}
346
347static inline pte_t pte_mksoft_dirty(pte_t pte)
348{
349 return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
350}
351
352static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
353{
354 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
355}
356
Martin Schwidefskya7b76172015-04-22 14:20:47 +0200357static inline pte_t pte_clear_soft_dirty(pte_t pte)
358{
359 return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
360}
361
362static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
363{
364 return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
365}
366
Cyrill Gorcunov2bf01f92014-06-04 16:08:16 -0700367#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
368
Jeremy Fitzhardingeb5348162009-02-04 18:33:38 -0800369/*
370 * Mask out unsupported bits in a present pgprot. Non-present pgprots
371 * can use those bits for other purposes, so leave them be.
372 */
373static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
374{
375 pgprotval_t protval = pgprot_val(pgprot);
376
377 if (protval & _PAGE_PRESENT)
378 protval &= __supported_pte_mask;
379
380 return protval;
381}
382
Jeremy Fitzhardinge6fdc05d2008-01-30 13:32:57 +0100383static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
384{
Jeremy Fitzhardingeb5348162009-02-04 18:33:38 -0800385 return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
386 massage_pgprot(pgprot));
Jeremy Fitzhardinge6fdc05d2008-01-30 13:32:57 +0100387}
388
389static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
390{
Jeremy Fitzhardingeb5348162009-02-04 18:33:38 -0800391 return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
392 massage_pgprot(pgprot));
Jeremy Fitzhardinge6fdc05d2008-01-30 13:32:57 +0100393}
394
Ingo Molnar38472312008-01-30 13:32:57 +0100395static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
396{
397 pteval_t val = pte_val(pte);
398
399 /*
400 * Chop off the NX bit (if present), and add the NX portion of
401 * the newprot (if present):
402 */
Venki Pallipadi1c12c4c2008-05-14 16:05:51 -0700403 val &= _PAGE_CHG_MASK;
Jeremy Fitzhardingeb5348162009-02-04 18:33:38 -0800404 val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
Ingo Molnar38472312008-01-30 13:32:57 +0100405
406 return __pte(val);
407}
408
Johannes Weinerc489f122011-01-13 15:47:02 -0800409static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
410{
411 pmdval_t val = pmd_val(pmd);
412
413 val &= _HPAGE_CHG_MASK;
414 val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
415
416 return __pmd(val);
417}
418
Venki Pallipadi1c12c4c2008-05-14 16:05:51 -0700419/* mprotect needs to preserve PAT bits when updating vm_page_prot */
420#define pgprot_modify pgprot_modify
421static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
422{
423 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
424 pgprotval_t addbits = pgprot_val(newprot);
425 return __pgprot(preservebits | addbits);
426}
427
Toshi Kanibbac8c62015-09-17 12:24:18 -0600428#define pte_pgprot(x) __pgprot(pte_flags(x))
429#define pmd_pgprot(x) __pgprot(pmd_flags(x))
430#define pud_pgprot(x) __pgprot(pud_flags(x))
Andi Kleenc6ca18e2008-01-30 13:33:51 +0100431
Jeremy Fitzhardingeb5348162009-02-04 18:33:38 -0800432#define canon_pgprot(p) __pgprot(massage_pgprot(p))
Andi Kleen1e8e23b2008-01-30 13:33:53 +0100433
Suresh Siddha1adcaaf2009-08-17 13:23:50 -0700434static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
Juergen Grossd85f3332014-11-03 14:01:53 +0100435 enum page_cache_mode pcm,
436 enum page_cache_mode new_pcm)
venkatesh.pallipadi@intel.comafc7d202009-01-09 16:13:10 -0800437{
438 /*
H. Peter Anvin55a6ca22009-11-23 15:12:07 -0800439 * PAT type is always WB for untracked ranges, so no need to check.
Suresh Siddha1adcaaf2009-08-17 13:23:50 -0700440 */
H. Peter Anvin8a271382009-11-23 14:49:20 -0800441 if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
Suresh Siddha1adcaaf2009-08-17 13:23:50 -0700442 return 1;
443
444 /*
venkatesh.pallipadi@intel.comafc7d202009-01-09 16:13:10 -0800445 * Certain new memtypes are not allowed with certain
446 * requested memtype:
447 * - request is uncached, return cannot be write-back
448 * - request is write-combine, return cannot be write-back
Toshi Kaniecb2feb2015-06-04 18:55:14 +0200449 * - request is write-through, return cannot be write-back
450 * - request is write-through, return cannot be write-combine
venkatesh.pallipadi@intel.comafc7d202009-01-09 16:13:10 -0800451 */
Juergen Grossd85f3332014-11-03 14:01:53 +0100452 if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
453 new_pcm == _PAGE_CACHE_MODE_WB) ||
454 (pcm == _PAGE_CACHE_MODE_WC &&
Toshi Kaniecb2feb2015-06-04 18:55:14 +0200455 new_pcm == _PAGE_CACHE_MODE_WB) ||
456 (pcm == _PAGE_CACHE_MODE_WT &&
457 new_pcm == _PAGE_CACHE_MODE_WB) ||
458 (pcm == _PAGE_CACHE_MODE_WT &&
459 new_pcm == _PAGE_CACHE_MODE_WC)) {
venkatesh.pallipadi@intel.comafc7d202009-01-09 16:13:10 -0800460 return 0;
461 }
462
463 return 1;
464}
465
Tejun Heo458a3e62009-02-24 11:57:21 +0900466pmd_t *populate_extra_pmd(unsigned long vaddr);
467pte_t *populate_extra_pte(unsigned long vaddr);
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100468#endif /* __ASSEMBLY__ */
469
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200470#ifdef CONFIG_X86_32
David Howellsa1ce3922012-10-02 18:01:25 +0100471# include <asm/pgtable_32.h>
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200472#else
David Howellsa1ce3922012-10-02 18:01:25 +0100473# include <asm/pgtable_64.h>
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200474#endif
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +0100475
Jeremy Fitzhardingeaca159d2009-02-05 11:30:54 -0800476#ifndef __ASSEMBLY__
Jeremy Fitzhardingef4769612009-02-05 11:30:55 -0800477#include <linux/mm_types.h>
Cyrill Gorcunovfa0f2812013-09-11 14:22:47 -0700478#include <linux/mmdebug.h>
Dave Hansen4cbeb512013-01-22 13:24:31 -0800479#include <linux/log2.h>
Jeremy Fitzhardingeaca159d2009-02-05 11:30:54 -0800480
Jeremy Fitzhardingea034a012009-02-05 11:30:43 -0800481static inline int pte_none(pte_t pte)
482{
Dave Hansen97e3c602016-07-07 17:19:12 -0700483 return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
Jeremy Fitzhardingea034a012009-02-05 11:30:43 -0800484}
485
Jeremy Fitzhardinge8de01da2009-02-05 11:30:44 -0800486#define __HAVE_ARCH_PTE_SAME
487static inline int pte_same(pte_t a, pte_t b)
488{
489 return a.pte == b.pte;
490}
491
Jeremy Fitzhardinge7c683852009-02-05 11:30:45 -0800492static inline int pte_present(pte_t a)
493{
Mel Gormanc46a7c82014-06-04 16:06:30 -0700494 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
495}
496
Dan Williams3565fce2016-01-15 16:56:55 -0800497#ifdef __HAVE_ARCH_PTE_DEVMAP
498static inline int pte_devmap(pte_t a)
499{
500 return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
501}
502#endif
503
Rik van Riel2c3cf552012-10-09 15:31:12 +0200504#define pte_accessible pte_accessible
Rik van Riel20841402013-12-18 17:08:44 -0800505static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
Rik van Riel2c3cf552012-10-09 15:31:12 +0200506{
Rik van Riel20841402013-12-18 17:08:44 -0800507 if (pte_flags(a) & _PAGE_PRESENT)
508 return true;
509
Mel Gorman21d9ee32015-02-12 14:58:32 -0800510 if ((pte_flags(a) & _PAGE_PROTNONE) &&
Rik van Riel20841402013-12-18 17:08:44 -0800511 mm_tlb_flush_pending(mm))
512 return true;
513
514 return false;
Rik van Riel2c3cf552012-10-09 15:31:12 +0200515}
516
Jeremy Fitzhardingeeb636572009-02-06 13:05:56 -0800517static inline int pte_hidden(pte_t pte)
Vegard Nossumdfec0722008-04-04 00:51:41 +0200518{
Jeremy Fitzhardingeeb636572009-02-06 13:05:56 -0800519 return pte_flags(pte) & _PAGE_HIDDEN;
Vegard Nossumdfec0722008-04-04 00:51:41 +0200520}
521
Jeremy Fitzhardinge649e8ef2009-02-05 11:30:50 -0800522static inline int pmd_present(pmd_t pmd)
523{
Andrea Arcangeli027ef6c2012-10-08 16:33:27 -0700524 /*
525 * Checking for _PAGE_PSE is needed too because
526 * split_huge_page will temporarily clear the present bit (but
527 * the _PAGE_PSE flag will remain set at all times while the
528 * _PAGE_PRESENT bit is clear).
529 */
Mel Gorman21d9ee32015-02-12 14:58:32 -0800530 return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
Jeremy Fitzhardinge649e8ef2009-02-05 11:30:50 -0800531}
532
Mel Gormane7bb4b6d2015-02-12 14:58:19 -0800533#ifdef CONFIG_NUMA_BALANCING
534/*
535 * These work without NUMA balancing but the kernel does not care. See the
536 * comment in include/asm-generic/pgtable.h
537 */
538static inline int pte_protnone(pte_t pte)
539{
David Vrabele3a1f6c2015-02-19 13:06:53 +0000540 return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
541 == _PAGE_PROTNONE;
Mel Gormane7bb4b6d2015-02-12 14:58:19 -0800542}
543
544static inline int pmd_protnone(pmd_t pmd)
545{
David Vrabele3a1f6c2015-02-19 13:06:53 +0000546 return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
547 == _PAGE_PROTNONE;
Mel Gormane7bb4b6d2015-02-12 14:58:19 -0800548}
549#endif /* CONFIG_NUMA_BALANCING */
550
Jeremy Fitzhardinge4fea8012009-02-05 11:30:51 -0800551static inline int pmd_none(pmd_t pmd)
552{
553 /* Only check low word on 32-bit platforms, since it might be
554 out of sync with upper half. */
Dave Hansen97e3c602016-07-07 17:19:12 -0700555 unsigned long val = native_pmd_val(pmd);
556 return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
Jeremy Fitzhardinge4fea8012009-02-05 11:30:51 -0800557}
558
Jeremy Fitzhardinge3ffb3562009-02-05 11:30:59 -0800559static inline unsigned long pmd_page_vaddr(pmd_t pmd)
560{
Toshi Kanif70abb02015-09-17 12:24:17 -0600561 return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
Jeremy Fitzhardinge3ffb3562009-02-05 11:30:59 -0800562}
563
Ingo Molnare5f7f2022009-02-09 11:42:57 +0100564/*
565 * Currently stuck as a macro due to indirect forward reference to
566 * linux/mmzone.h's __section_mem_map_addr() definition:
567 */
Toshi Kanif70abb02015-09-17 12:24:17 -0600568#define pmd_page(pmd) \
569 pfn_to_page((pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT)
Jeremy Fitzhardinge20063ca2009-02-05 11:31:00 -0800570
Jeremy Fitzhardingee24d7ee2009-02-05 11:31:01 -0800571/*
572 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
573 *
574 * this macro returns the index of the entry in the pmd page which would
575 * control the given virtual address
576 */
Figo.zhangce0c0f92009-06-28 18:07:39 +0800577static inline unsigned long pmd_index(unsigned long address)
Jeremy Fitzhardingee24d7ee2009-02-05 11:31:01 -0800578{
579 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
580}
581
Jeremy Fitzhardinge97e28172009-02-05 11:31:05 -0800582/*
583 * Conversion functions: convert a page and protection to a page entry,
584 * and a page entry and page directory to the page they refer to.
585 *
586 * (Currently stuck as a macro because of indirect forward reference
587 * to linux/mm.h:page_to_nid())
588 */
589#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
590
Jeremy Fitzhardinge346309c2009-02-05 11:31:06 -0800591/*
592 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
593 *
594 * this function returns the index of the entry in the pte page which would
595 * control the given virtual address
596 */
Figo.zhangce0c0f92009-06-28 18:07:39 +0800597static inline unsigned long pte_index(unsigned long address)
Jeremy Fitzhardinge346309c2009-02-05 11:31:06 -0800598{
599 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
600}
601
Jeremy Fitzhardinge3fbc2442009-02-05 11:31:07 -0800602static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
603{
604 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
605}
606
Jeremy Fitzhardinge99510232009-02-05 11:31:11 -0800607static inline int pmd_bad(pmd_t pmd)
608{
Jeremy Fitzhardinge18a7a1992009-02-05 11:31:16 -0800609 return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
Jeremy Fitzhardinge99510232009-02-05 11:31:11 -0800610}
611
Jeremy Fitzhardingecc290ca2009-02-05 11:31:12 -0800612static inline unsigned long pages_to_mb(unsigned long npg)
613{
614 return npg >> (20 - PAGE_SHIFT);
615}
616
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700617#if CONFIG_PGTABLE_LEVELS > 2
Jeremy Fitzhardingedeb79cf2009-02-05 11:31:13 -0800618static inline int pud_none(pud_t pud)
619{
Dave Hansen97e3c602016-07-07 17:19:12 -0700620 return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
Jeremy Fitzhardingedeb79cf2009-02-05 11:31:13 -0800621}
622
Jeremy Fitzhardinge5ba7c912009-02-05 11:30:48 -0800623static inline int pud_present(pud_t pud)
624{
Jeremy Fitzhardinge18a7a1992009-02-05 11:31:16 -0800625 return pud_flags(pud) & _PAGE_PRESENT;
Jeremy Fitzhardinge5ba7c912009-02-05 11:30:48 -0800626}
Jeremy Fitzhardinge6fff47e2009-02-05 11:30:53 -0800627
628static inline unsigned long pud_page_vaddr(pud_t pud)
629{
Toshi Kanif70abb02015-09-17 12:24:17 -0600630 return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
Jeremy Fitzhardinge6fff47e2009-02-05 11:30:53 -0800631}
Jeremy Fitzhardingef4769612009-02-05 11:30:55 -0800632
Ingo Molnare5f7f2022009-02-09 11:42:57 +0100633/*
634 * Currently stuck as a macro due to indirect forward reference to
635 * linux/mmzone.h's __section_mem_map_addr() definition:
636 */
Toshi Kanif70abb02015-09-17 12:24:17 -0600637#define pud_page(pud) \
638 pfn_to_page((pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT)
Jeremy Fitzhardinge01ade202009-02-05 11:31:02 -0800639
640/* Find an entry in the second-level page table.. */
641static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
642{
643 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
644}
Jeremy Fitzhardinge3180fba2009-02-05 11:31:04 -0800645
Jeremy Fitzhardinge3f6cbef2009-02-05 11:31:08 -0800646static inline int pud_large(pud_t pud)
647{
Jeremy Fitzhardingee2f5bda2009-02-09 00:09:52 -0800648 return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
Jeremy Fitzhardinge3f6cbef2009-02-05 11:31:08 -0800649 (_PAGE_PSE | _PAGE_PRESENT);
650}
Jeremy Fitzhardingea61bb292009-02-05 11:31:10 -0800651
652static inline int pud_bad(pud_t pud)
653{
Jeremy Fitzhardinge18a7a1992009-02-05 11:31:16 -0800654 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
Jeremy Fitzhardingea61bb292009-02-05 11:31:10 -0800655}
Jeremy Fitzhardingee2f5bda2009-02-09 00:09:52 -0800656#else
657static inline int pud_large(pud_t pud)
658{
659 return 0;
660}
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700661#endif /* CONFIG_PGTABLE_LEVELS > 2 */
Jeremy Fitzhardinge5ba7c912009-02-05 11:30:48 -0800662
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700663#if CONFIG_PGTABLE_LEVELS > 3
Jeremy Fitzhardinge9f38d7e2009-02-05 11:30:49 -0800664static inline int pgd_present(pgd_t pgd)
665{
Jeremy Fitzhardinge18a7a1992009-02-05 11:31:16 -0800666 return pgd_flags(pgd) & _PAGE_PRESENT;
Jeremy Fitzhardinge9f38d7e2009-02-05 11:30:49 -0800667}
Jeremy Fitzhardingec5f040b2009-02-05 11:30:52 -0800668
669static inline unsigned long pgd_page_vaddr(pgd_t pgd)
670{
671 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
672}
Jeremy Fitzhardinge777cba12009-02-05 11:30:56 -0800673
Ingo Molnare5f7f2022009-02-09 11:42:57 +0100674/*
675 * Currently stuck as a macro due to indirect forward reference to
676 * linux/mmzone.h's __section_mem_map_addr() definition:
677 */
678#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
Jeremy Fitzhardinge7cfb8102009-02-05 11:30:57 -0800679
680/* to find an entry in a page-table-directory. */
Figo.zhangce0c0f92009-06-28 18:07:39 +0800681static inline unsigned long pud_index(unsigned long address)
Jeremy Fitzhardinge7cfb8102009-02-05 11:30:57 -0800682{
683 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
684}
Jeremy Fitzhardinge3d081b12009-02-05 11:30:58 -0800685
686static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
687{
688 return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
689}
Jeremy Fitzhardinge30f10312009-02-05 11:31:09 -0800690
691static inline int pgd_bad(pgd_t pgd)
692{
Dave Hansen8f0baad2017-08-30 16:23:00 -0700693 pgdval_t ignore_flags = _PAGE_USER;
694 /*
695 * We set NX on KAISER pgds that map userspace memory so
696 * that userspace can not meaningfully use the kernel
697 * page table by accident; it will fault on the first
698 * instruction it tries to run. See native_set_pgd().
699 */
700 if (IS_ENABLED(CONFIG_KAISER))
701 ignore_flags |= _PAGE_NX;
702
703 return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
Jeremy Fitzhardinge30f10312009-02-05 11:31:09 -0800704}
Jeremy Fitzhardinge7325cc22009-02-05 11:31:14 -0800705
706static inline int pgd_none(pgd_t pgd)
707{
Dave Hansen97e3c602016-07-07 17:19:12 -0700708 /*
709 * There is no need to do a workaround for the KNL stray
710 * A/D bit erratum here. PGDs only point to page tables
711 * except on 32-bit non-PAE which is not supported on
712 * KNL.
713 */
Jeremy Fitzhardinge26c8e3172009-02-05 11:31:17 -0800714 return !native_pgd_val(pgd);
Jeremy Fitzhardinge7325cc22009-02-05 11:31:14 -0800715}
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700716#endif /* CONFIG_PGTABLE_LEVELS > 3 */
Jeremy Fitzhardinge9f38d7e2009-02-05 11:30:49 -0800717
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200718#endif /* __ASSEMBLY__ */
719
Jeremy Fitzhardingefb15a9b2008-06-25 00:19:06 -0400720/*
721 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
722 *
723 * this macro returns the index of the entry in the pgd page which would
724 * control the given virtual address
725 */
726#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
727
728/*
729 * pgd_offset() returns a (pgd_t *)
730 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
731 */
732#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
733/*
734 * a shortcut which implies the use of the kernel's pgd, instead
735 * of a process's
736 */
737#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
738
739
Jeremy Fitzhardinge68db0652008-03-17 16:37:13 -0700740#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
741#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
742
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100743#ifndef __ASSEMBLY__
744
Jaswinder Singh Rajput2c1b2842009-04-11 00:03:10 +0530745extern int direct_gbpages;
Yinghai Lu22ddfca2012-11-16 19:38:41 -0800746void init_mem_mapping(void);
Yinghai Lu8d574702012-11-16 19:38:58 -0800747void early_alloc_pgt_buf(void);
Jaswinder Singh Rajput2c1b2842009-04-11 00:03:10 +0530748
Thomas Garnierb234e8a2016-06-21 17:47:01 -0700749#ifdef CONFIG_X86_64
750/* Realmode trampoline initialization. */
751extern pgd_t trampoline_pgd_entry;
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700752static inline void __meminit init_trampoline_default(void)
Thomas Garnierb234e8a2016-06-21 17:47:01 -0700753{
754 /* Default trampoline pgd value */
755 trampoline_pgd_entry = init_level4_pgt[pgd_index(__PAGE_OFFSET)];
756}
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700757# ifdef CONFIG_RANDOMIZE_MEMORY
758void __meminit init_trampoline(void);
759# else
760# define init_trampoline init_trampoline_default
761# endif
Thomas Garnierb234e8a2016-06-21 17:47:01 -0700762#else
763static inline void init_trampoline(void) { }
764#endif
765
Jeremy Fitzhardinge48916452008-01-30 13:32:58 +0100766/* local pte updates need not use xchg for locking */
767static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
768{
769 pte_t res = *ptep;
770
771 /* Pure native function needs no input for mm, addr */
772 native_pte_clear(NULL, 0, ptep);
773 return res;
774}
775
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800776static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
777{
778 pmd_t res = *pmdp;
779
780 native_pmd_clear(pmdp);
781 return res;
782}
783
Jeremy Fitzhardinge48916452008-01-30 13:32:58 +0100784static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
785 pte_t *ptep , pte_t pte)
786{
787 native_set_pte(ptep, pte);
788}
789
Andrea Arcangeli0a47de52011-01-13 15:46:35 -0800790static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
791 pmd_t *pmdp , pmd_t pmd)
792{
793 native_set_pmd(pmdp, pmd);
794}
795
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100796#ifndef CONFIG_PARAVIRT
797/*
798 * Rules for using pte_update - it must be called after any PTE update which
799 * has not been done using the set_pte / clear_pte interfaces. It is used by
800 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
801 * updates should either be sets, clears, or set_pte_atomic for P->P
802 * transitions, which means this hook should only be called for user PTEs.
803 * This hook implies a P->P protection or access change has taken place, which
Juergen Grossd6ccc3e2015-11-17 15:51:19 +0100804 * requires a subsequent TLB flush.
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100805 */
806#define pte_update(mm, addr, ptep) do { } while (0)
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100807#endif
808
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100809/*
810 * We only update the dirty/accessed state if we set
811 * the dirty bit by hand in the kernel, since the hardware
812 * will do the accessed bit for us, and we don't want to
813 * race with other CPU's that might be updating the dirty
814 * bit at the same time.
815 */
Jeremy Fitzhardingebea41802008-06-25 00:18:57 -0400816struct vm_area_struct;
817
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100818#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
Jeremy Fitzhardingeee5aa8d2008-03-17 16:37:03 -0700819extern int ptep_set_access_flags(struct vm_area_struct *vma,
820 unsigned long address, pte_t *ptep,
821 pte_t entry, int dirty);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100822
823#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
Jeremy Fitzhardingef9fbf1a2008-03-17 16:37:04 -0700824extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
825 unsigned long addr, pte_t *ptep);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100826
827#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
Jeremy Fitzhardingec20311e2008-03-17 16:37:05 -0700828extern int ptep_clear_flush_young(struct vm_area_struct *vma,
829 unsigned long address, pte_t *ptep);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100830
831#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700832static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
833 pte_t *ptep)
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100834{
835 pte_t pte = native_ptep_get_and_clear(ptep);
836 pte_update(mm, addr, ptep);
837 return pte;
838}
839
840#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700841static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
842 unsigned long addr, pte_t *ptep,
843 int full)
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100844{
845 pte_t pte;
846 if (full) {
847 /*
848 * Full address destruction in progress; paravirt does not
849 * care about updates and native needs no locking
850 */
851 pte = native_local_ptep_get_and_clear(ptep);
852 } else {
853 pte = ptep_get_and_clear(mm, addr, ptep);
854 }
855 return pte;
856}
857
858#define __HAVE_ARCH_PTEP_SET_WRPROTECT
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700859static inline void ptep_set_wrprotect(struct mm_struct *mm,
860 unsigned long addr, pte_t *ptep)
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100861{
Jeremy Fitzhardinged8d89822008-01-30 13:32:58 +0100862 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100863 pte_update(mm, addr, ptep);
864}
865
Jesper Juhl2ac13462011-12-18 01:32:09 +0100866#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
Shaohua Li61c77322010-08-16 09:16:55 +0800867
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800868#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
869
870#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
871extern int pmdp_set_access_flags(struct vm_area_struct *vma,
872 unsigned long address, pmd_t *pmdp,
873 pmd_t entry, int dirty);
874
875#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
876extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
877 unsigned long addr, pmd_t *pmdp);
878
879#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
880extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
881 unsigned long address, pmd_t *pmdp);
882
883
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800884#define __HAVE_ARCH_PMD_WRITE
885static inline int pmd_write(pmd_t pmd)
886{
887 return pmd_flags(pmd) & _PAGE_RW;
888}
889
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700890#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
891static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800892 pmd_t *pmdp)
893{
Juergen Grossd6ccc3e2015-11-17 15:51:19 +0100894 return native_pmdp_get_and_clear(pmdp);
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800895}
896
897#define __HAVE_ARCH_PMDP_SET_WRPROTECT
898static inline void pmdp_set_wrprotect(struct mm_struct *mm,
899 unsigned long addr, pmd_t *pmdp)
900{
901 clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800902}
903
Jeremy Fitzhardinge85958b42008-03-17 16:37:14 -0700904/*
905 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
906 *
907 * dst - pointer to pgd range anwhere on a pgd page
908 * src - ""
909 * count - the number of pgds to copy.
910 *
911 * dst and src can be on the same page, but the range must not overlap,
912 * and must not cross a page boundary.
913 */
914static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
915{
916 memcpy(dst, src, count * sizeof(pgd_t));
Richard Fellner13be4482017-05-04 14:26:50 +0200917#ifdef CONFIG_KAISER
Dave Hansen8f0baad2017-08-30 16:23:00 -0700918 /* Clone the shadow pgd part as well */
919 memcpy(native_get_shadow_pgd(dst),
920 native_get_shadow_pgd(src),
921 count * sizeof(pgd_t));
Richard Fellner13be4482017-05-04 14:26:50 +0200922#endif
Jeremy Fitzhardinge85958b42008-03-17 16:37:14 -0700923}
924
Dave Hansen4cbeb512013-01-22 13:24:31 -0800925#define PTE_SHIFT ilog2(PTRS_PER_PTE)
926static inline int page_level_shift(enum pg_level level)
927{
928 return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
929}
930static inline unsigned long page_level_size(enum pg_level level)
931{
932 return 1UL << page_level_shift(level);
933}
934static inline unsigned long page_level_mask(enum pg_level level)
935{
936 return ~(page_level_size(level) - 1);
937}
Jeremy Fitzhardinge85958b42008-03-17 16:37:14 -0700938
Kirill A. Shutemov602e0182012-12-18 12:22:18 -0800939/*
940 * The x86 doesn't have any external MMU info: the kernel page
941 * tables contain all the necessary information.
942 */
943static inline void update_mmu_cache(struct vm_area_struct *vma,
944 unsigned long addr, pte_t *ptep)
945{
946}
947static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
948 unsigned long addr, pmd_t *pmd)
949{
950}
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200951
Cyrill Gorcunov2bf01f92014-06-04 16:08:16 -0700952#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
Cyrill Gorcunovfa0f2812013-09-11 14:22:47 -0700953static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
954{
Cyrill Gorcunovfa0f2812013-09-11 14:22:47 -0700955 return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
956}
957
958static inline int pte_swp_soft_dirty(pte_t pte)
959{
Cyrill Gorcunovfa0f2812013-09-11 14:22:47 -0700960 return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
961}
962
963static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
964{
Cyrill Gorcunovfa0f2812013-09-11 14:22:47 -0700965 return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
966}
Cyrill Gorcunov2bf01f92014-06-04 16:08:16 -0700967#endif
Cyrill Gorcunovfa0f2812013-09-11 14:22:47 -0700968
Dave Hansen33a709b2016-02-12 13:02:19 -0800969#define PKRU_AD_BIT 0x1
970#define PKRU_WD_BIT 0x2
Dave Hansen84594292016-02-12 13:02:36 -0800971#define PKRU_BITS_PER_PKEY 2
Dave Hansen33a709b2016-02-12 13:02:19 -0800972
973static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
974{
Dave Hansen84594292016-02-12 13:02:36 -0800975 int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
Dave Hansen33a709b2016-02-12 13:02:19 -0800976 return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits));
977}
978
979static inline bool __pkru_allows_write(u32 pkru, u16 pkey)
980{
Dave Hansen84594292016-02-12 13:02:36 -0800981 int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
Dave Hansen33a709b2016-02-12 13:02:19 -0800982 /*
983 * Access-disable disables writes too so we need to check
984 * both bits here.
985 */
986 return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits));
987}
988
989static inline u16 pte_flags_pkey(unsigned long pte_flags)
990{
991#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
992 /* ifdef to avoid doing 59-bit shift on 32-bit values */
993 return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
994#else
995 return 0;
996#endif
997}
998
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200999#include <asm-generic/pgtable.h>
1000#endif /* __ASSEMBLY__ */
1001
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001002#endif /* _ASM_X86_PGTABLE_H */