blob: 163e01a0631d7fbda9d771bb8682632f5c18bc3e [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
H. Peter Anvin1965aae2008-10-22 22:26:29 -07002#ifndef _ASM_X86_PGTABLE_64_H
3#define _ASM_X86_PGTABLE_64_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
Randy Dunlap6df95fd2007-05-08 00:31:11 -07005#include <linux/const.h>
Jeremy Fitzhardingefb355142009-02-08 18:50:52 -08006#include <asm/pgtable_64_types.h>
7
Vivek Goyal9d291e72007-05-02 19:27:06 +02008#ifndef __ASSEMBLY__
9
Linus Torvalds1da177e2005-04-16 15:20:36 -070010/*
11 * This file contains the functions and defines necessary to modify and use
12 * the x86-64 page table tree.
13 */
14#include <asm/processor.h>
Jiri Slaby1977f032007-10-18 23:40:25 -070015#include <linux/bitops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/threads.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
Kirill A. Shutemov032370b2017-06-06 14:31:28 +030018extern p4d_t level4_kernel_pgt[512];
19extern p4d_t level4_ident_pgt[512];
Linus Torvalds1da177e2005-04-16 15:20:36 -070020extern pud_t level3_kernel_pgt[512];
Linus Torvalds1da177e2005-04-16 15:20:36 -070021extern pud_t level3_ident_pgt[512];
22extern pmd_t level2_kernel_pgt[512];
Jeremy Fitzhardinge084a2a42008-07-08 15:06:50 -070023extern pmd_t level2_fixmap_pgt[512];
24extern pmd_t level2_ident_pgt[512];
Stefan Bader0b5a5062014-09-02 11:16:01 +010025extern pte_t level1_fixmap_pgt[512];
Kirill A. Shutemov65ade2f2017-06-06 14:31:27 +030026extern pgd_t init_top_pgt[];
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Kirill A. Shutemov65ade2f2017-06-06 14:31:27 +030028#define swapper_pg_dir init_top_pgt
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Linus Torvalds1da177e2005-04-16 15:20:36 -070030extern void paging_init(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Joe Perches7f944012008-03-23 01:03:11 -070032#define pte_ERROR(e) \
Joe Perchesc767a542012-05-21 19:50:07 -070033 pr_err("%s:%d: bad pte %p(%016lx)\n", \
Joe Perches7f944012008-03-23 01:03:11 -070034 __FILE__, __LINE__, &(e), pte_val(e))
35#define pmd_ERROR(e) \
Joe Perchesc767a542012-05-21 19:50:07 -070036 pr_err("%s:%d: bad pmd %p(%016lx)\n", \
Joe Perches7f944012008-03-23 01:03:11 -070037 __FILE__, __LINE__, &(e), pmd_val(e))
38#define pud_ERROR(e) \
Joe Perchesc767a542012-05-21 19:50:07 -070039 pr_err("%s:%d: bad pud %p(%016lx)\n", \
Joe Perches7f944012008-03-23 01:03:11 -070040 __FILE__, __LINE__, &(e), pud_val(e))
Kirill A. Shutemovb8504052017-03-30 11:07:29 +030041
42#if CONFIG_PGTABLE_LEVELS >= 5
43#define p4d_ERROR(e) \
44 pr_err("%s:%d: bad p4d %p(%016lx)\n", \
45 __FILE__, __LINE__, &(e), p4d_val(e))
46#endif
47
Joe Perches7f944012008-03-23 01:03:11 -070048#define pgd_ERROR(e) \
Joe Perchesc767a542012-05-21 19:50:07 -070049 pr_err("%s:%d: bad pgd %p(%016lx)\n", \
Joe Perches7f944012008-03-23 01:03:11 -070050 __FILE__, __LINE__, &(e), pgd_val(e))
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080052struct mm_struct;
53
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +030054void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte);
Eduardo Habkost0814e0b2008-06-25 00:19:22 -040055void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
56
Jeremy Fitzhardinge48916452008-01-30 13:32:58 +010057static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
58 pte_t *ptep)
Zachary Amsden61e06032005-09-03 15:55:06 -070059{
Jeremy Fitzhardinge48916452008-01-30 13:32:58 +010060 *ptep = native_make_pte(0);
61}
62
63static inline void native_set_pte(pte_t *ptep, pte_t pte)
64{
65 *ptep = pte;
66}
67
Ingo Molnarb65e6392008-01-30 13:34:01 +010068static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
69{
70 native_set_pte(ptep, pte);
71}
72
Andrea Arcangelidb3eb96f2011-01-13 15:46:41 -080073static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
74{
75 *pmdp = pmd;
76}
77
78static inline void native_pmd_clear(pmd_t *pmd)
79{
80 native_set_pmd(pmd, native_make_pmd(0));
81}
82
Jeremy Fitzhardinge48916452008-01-30 13:32:58 +010083static inline pte_t native_ptep_get_and_clear(pte_t *xp)
84{
85#ifdef CONFIG_SMP
86 return native_make_pte(xchg(&xp->pte, 0));
87#else
Joe Perches7f944012008-03-23 01:03:11 -070088 /* native_local_ptep_get_and_clear,
89 but duplicated because of cyclic dependency */
Jeremy Fitzhardinge48916452008-01-30 13:32:58 +010090 pte_t ret = *xp;
91 native_pte_clear(NULL, 0, xp);
92 return ret;
93#endif
94}
95
Andrea Arcangelidb3eb96f2011-01-13 15:46:41 -080096static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
Jeremy Fitzhardinge48916452008-01-30 13:32:58 +010097{
Andrea Arcangelidb3eb96f2011-01-13 15:46:41 -080098#ifdef CONFIG_SMP
99 return native_make_pmd(xchg(&xp->pmd, 0));
100#else
101 /* native_local_pmdp_get_and_clear,
102 but duplicated because of cyclic dependency */
103 pmd_t ret = *xp;
104 native_pmd_clear(xp);
105 return ret;
106#endif
Jeremy Fitzhardinge48916452008-01-30 13:32:58 +0100107}
108
109static inline void native_set_pud(pud_t *pudp, pud_t pud)
110{
111 *pudp = pud;
112}
113
114static inline void native_pud_clear(pud_t *pud)
115{
116 native_set_pud(pud, native_make_pud(0));
117}
118
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800119static inline pud_t native_pudp_get_and_clear(pud_t *xp)
120{
121#ifdef CONFIG_SMP
122 return native_make_pud(xchg(&xp->pud, 0));
123#else
124 /* native_local_pudp_get_and_clear,
125 * but duplicated because of cyclic dependency
126 */
127 pud_t ret = *xp;
128
129 native_pud_clear(xp);
130 return ret;
131#endif
132}
133
Dave Hansen61e9b362017-12-04 15:07:37 +0100134#ifdef CONFIG_PAGE_TABLE_ISOLATION
135/*
136 * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages
137 * (8k-aligned and 8k in size). The kernel one is at the beginning 4k and
138 * the user one is in the last 4k. To switch between them, you
139 * just need to flip the 12th bit in their addresses.
140 */
141#define PTI_PGTABLE_SWITCH_BIT PAGE_SHIFT
142
143/*
144 * This generates better code than the inline assembly in
145 * __set_bit().
146 */
147static inline void *ptr_set_bit(void *ptr, int bit)
148{
149 unsigned long __ptr = (unsigned long)ptr;
150
151 __ptr |= BIT(bit);
152 return (void *)__ptr;
153}
154static inline void *ptr_clear_bit(void *ptr, int bit)
155{
156 unsigned long __ptr = (unsigned long)ptr;
157
158 __ptr &= ~BIT(bit);
159 return (void *)__ptr;
160}
161
162static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
163{
164 return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
165}
166
167static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
168{
169 return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
170}
171
172static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
173{
174 return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
175}
176
177static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
178{
179 return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
180}
181#endif /* CONFIG_PAGE_TABLE_ISOLATION */
182
183/*
184 * Page table pages are page-aligned. The lower half of the top
185 * level is used for userspace and the top half for the kernel.
186 *
187 * Returns true for parts of the PGD that map userspace and
188 * false for the parts that map the kernel.
189 */
190static inline bool pgdp_maps_userspace(void *__ptr)
191{
192 unsigned long ptr = (unsigned long)__ptr;
193
194 return (ptr & ~PAGE_MASK) < (PAGE_SIZE / 2);
195}
196
197#ifdef CONFIG_PAGE_TABLE_ISOLATION
198pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd);
199
200/*
201 * Take a PGD location (pgdp) and a pgd value that needs to be set there.
202 * Populates the user and returns the resulting PGD that must be set in
203 * the kernel copy of the page tables.
204 */
205static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
206{
207 if (!static_cpu_has(X86_FEATURE_PTI))
208 return pgd;
209 return __pti_set_user_pgd(pgdp, pgd);
210}
211#else
212static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
213{
214 return pgd;
215}
216#endif
217
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300218static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
219{
Kirill A. Shutemov91f606a2018-02-14 21:25:41 +0300220 pgd_t pgd;
221
222 if (pgtable_l5_enabled || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) {
223 *p4dp = p4d;
224 return;
225 }
226
Kirill A. Shutemova5b162b2018-03-05 11:16:41 +0300227 pgd = native_make_pgd(native_p4d_val(p4d));
Kirill A. Shutemov91f606a2018-02-14 21:25:41 +0300228 pgd = pti_set_user_pgd((pgd_t *)p4dp, pgd);
Kirill A. Shutemova5b162b2018-03-05 11:16:41 +0300229 *p4dp = native_make_p4d(native_pgd_val(pgd));
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300230}
231
232static inline void native_p4d_clear(p4d_t *p4d)
233{
Kirill A. Shutemovb8504052017-03-30 11:07:29 +0300234 native_set_p4d(p4d, native_make_p4d(0));
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300235}
236
Jeremy Fitzhardinge48916452008-01-30 13:32:58 +0100237static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
238{
Dave Hansen61e9b362017-12-04 15:07:37 +0100239 *pgdp = pti_set_user_pgd(pgdp, pgd);
Jeremy Fitzhardinge48916452008-01-30 13:32:58 +0100240}
241
Joe Perches7f944012008-03-23 01:03:11 -0700242static inline void native_pgd_clear(pgd_t *pgd)
Jeremy Fitzhardinge48916452008-01-30 13:32:58 +0100243{
244 native_set_pgd(pgd, native_make_pgd(0));
Zachary Amsden61e06032005-09-03 15:55:06 -0700245}
246
Kirill A. Shutemov5372e152016-12-15 02:44:03 +0300247extern void sync_global_pgds(unsigned long start, unsigned long end);
Haicheng Li6afb5152010-05-19 17:42:14 +0800248
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 * Conversion functions: convert a page and protection to a page entry,
251 * and a page entry and page directory to the page they refer to.
252 */
253
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254/*
255 * Level 4 access.
256 */
H. Peter Anvine00fc5422008-02-19 16:18:32 +0100257static inline int pgd_large(pgd_t pgd) { return 0; }
Eduardo Habkoste7a9b0b2008-06-25 00:19:05 -0400258#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
260/* PUD - Level3 access */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262/* PMD - Level 2 access */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
264/* PTE - Level 1 access. */
265
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266/* x86-64 always has all page tables mapped. */
Joe Perches7f944012008-03-23 01:03:11 -0700267#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
Andi Kleen4e60c862010-08-09 17:19:03 -0700268#define pte_unmap(pte) ((void)(pte))/* NOP */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269
Dave Hansen00839ee2016-07-07 17:19:11 -0700270/*
271 * Encode and de-code a swap entry
272 *
Naoya Horiguchieee48182017-09-08 16:10:46 -0700273 * | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number
274 * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names
275 * | OFFSET (14->63) | TYPE (9-13) |0|0|X|X| X| X|X|SD|0| <- swp entry
Dave Hansen00839ee2016-07-07 17:19:11 -0700276 *
277 * G (8) is aliased and used as a PROT_NONE indicator for
278 * !present ptes. We need to start storing swap entries above
279 * there. We also need to avoid using A and D because of an
280 * erratum where they can be incorrectly set by hardware on
281 * non-present PTEs.
Naoya Horiguchieee48182017-09-08 16:10:46 -0700282 *
283 * SD (1) in swp entry is used to store soft dirty bit, which helps us
284 * remember soft dirty over page migration
285 *
286 * Bit 7 in swp entry should be 0 because pmd_present checks not only P,
287 * but also L and G.
Dave Hansen00839ee2016-07-07 17:19:11 -0700288 */
289#define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
Kirill A. Shutemov0a191362015-02-10 14:11:22 -0800290#define SWP_TYPE_BITS 5
Dave Hansen00839ee2016-07-07 17:19:11 -0700291/* Place the offset above the type: */
Dave Hansenace7fab2016-08-10 10:23:25 -0700292#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS)
Jan Beulich17963162008-12-16 11:35:24 +0000293
294#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
295
Dave Hansen00839ee2016-07-07 17:19:11 -0700296#define __swp_type(x) (((x).val >> (SWP_TYPE_FIRST_BIT)) \
Jan Beulich17963162008-12-16 11:35:24 +0000297 & ((1U << SWP_TYPE_BITS) - 1))
Dave Hansen00839ee2016-07-07 17:19:11 -0700298#define __swp_offset(x) ((x).val >> SWP_OFFSET_FIRST_BIT)
Jan Beulich17963162008-12-16 11:35:24 +0000299#define __swp_entry(type, offset) ((swp_entry_t) { \
Dave Hansen00839ee2016-07-07 17:19:11 -0700300 ((type) << (SWP_TYPE_FIRST_BIT)) \
301 | ((offset) << SWP_OFFSET_FIRST_BIT) })
Joe Perches7f944012008-03-23 01:03:11 -0700302#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
Zi Yan616b8372017-09-08 16:10:57 -0700303#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val((pmd)) })
Jeremy Fitzhardingec8e53932008-01-30 13:32:57 +0100304#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
Zi Yan616b8372017-09-08 16:10:57 -0700305#define __swp_entry_to_pmd(x) ((pmd_t) { .pmd = (x).val })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306
Joe Perches7f944012008-03-23 01:03:11 -0700307extern int kern_addr_valid(unsigned long addr);
Thomas Gleixner31eedd82008-02-15 17:29:12 +0100308extern void cleanup_highmap(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310#define HAVE_ARCH_UNMAPPED_AREA
Jiri Kosinacc503c12008-01-30 13:31:07 +0100311#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312
313#define pgtable_cache_init() do { } while (0)
Linus Torvaldsda8f1532007-09-21 12:09:41 -0700314#define check_pgt_cache() do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315
316#define PAGE_AGP PAGE_KERNEL_NOCACHE
317#define HAVE_PAGE_AGP 1
318
319/* fs/proc/kcore.c */
320#define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
Linus Torvalds9063c612009-06-20 15:40:00 -0700321#define kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323#define __HAVE_ARCH_PTE_SAME
Andrea Arcangeli5f6e8da2011-01-13 15:46:40 -0800324
Alexander Duyckfb50b022012-11-16 13:53:09 -0800325#define vmemmap ((struct page *)VMEMMAP_START)
326
327extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
328extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
329
Kirill A. Shutemove5855132017-06-06 14:31:20 +0300330#define gup_fast_permitted gup_fast_permitted
331static inline bool gup_fast_permitted(unsigned long start, int nr_pages,
332 int write)
333{
334 unsigned long len, end;
Ingo Molnar6dd29b32017-04-23 11:37:17 +0200335
Kirill A. Shutemove5855132017-06-06 14:31:20 +0300336 len = (unsigned long)nr_pages << PAGE_SHIFT;
337 end = start + len;
338 if (end < start)
339 return false;
340 if (end >> __VIRTUAL_MASK_SHIFT)
341 return false;
342 return true;
343}
344
345#endif /* !__ASSEMBLY__ */
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700346#endif /* _ASM_X86_PGTABLE_64_H */