Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 1 | #include <linux/mm.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 2 | #include <linux/gfp.h> |
Joerg Roedel | 1e26941 | 2018-04-11 17:24:38 +0200 | [diff] [blame] | 3 | #include <linux/hugetlb.h> |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 4 | #include <asm/pgalloc.h> |
Jeremy Fitzhardinge | ee5aa8d | 2008-03-17 16:37:03 -0700 | [diff] [blame] | 5 | #include <asm/pgtable.h> |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 6 | #include <asm/tlb.h> |
Ingo Molnar | a1d5a86 | 2008-06-20 15:34:46 +0200 | [diff] [blame] | 7 | #include <asm/fixmap.h> |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 8 | #include <asm/mtrr.h> |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 9 | |
Vladimir Davydov | 3e79ec7 | 2016-07-26 15:24:30 -0700 | [diff] [blame] | 10 | #define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | __GFP_ZERO) |
Vegard Nossum | 9e73023 | 2009-02-22 11:28:25 +0100 | [diff] [blame] | 11 | |
Ian Campbell | 1431559 | 2010-02-17 10:38:10 +0000 | [diff] [blame] | 12 | #ifdef CONFIG_HIGHPTE |
| 13 | #define PGALLOC_USER_GFP __GFP_HIGHMEM |
| 14 | #else |
| 15 | #define PGALLOC_USER_GFP 0 |
| 16 | #endif |
| 17 | |
| 18 | gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP; |
| 19 | |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 20 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
| 21 | { |
Vladimir Davydov | 3e79ec7 | 2016-07-26 15:24:30 -0700 | [diff] [blame] | 22 | return (pte_t *)__get_free_page(PGALLOC_GFP & ~__GFP_ACCOUNT); |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 23 | } |
| 24 | |
| 25 | pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) |
| 26 | { |
| 27 | struct page *pte; |
| 28 | |
Ian Campbell | 1431559 | 2010-02-17 10:38:10 +0000 | [diff] [blame] | 29 | pte = alloc_pages(__userpte_alloc_gfp, 0); |
Kirill A. Shutemov | cecbd1b | 2013-11-14 14:31:47 -0800 | [diff] [blame] | 30 | if (!pte) |
| 31 | return NULL; |
| 32 | if (!pgtable_page_ctor(pte)) { |
| 33 | __free_page(pte); |
| 34 | return NULL; |
| 35 | } |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 36 | return pte; |
| 37 | } |
| 38 | |
Ian Campbell | 1431559 | 2010-02-17 10:38:10 +0000 | [diff] [blame] | 39 | static int __init setup_userpte(char *arg) |
| 40 | { |
| 41 | if (!arg) |
| 42 | return -EINVAL; |
| 43 | |
| 44 | /* |
| 45 | * "userpte=nohigh" disables allocation of user pagetables in |
| 46 | * high memory. |
| 47 | */ |
| 48 | if (strcmp(arg, "nohigh") == 0) |
| 49 | __userpte_alloc_gfp &= ~__GFP_HIGHMEM; |
| 50 | else |
| 51 | return -EINVAL; |
| 52 | return 0; |
| 53 | } |
| 54 | early_param("userpte", setup_userpte); |
| 55 | |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 56 | void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) |
Jeremy Fitzhardinge | 397f687 | 2008-03-17 16:36:57 -0700 | [diff] [blame] | 57 | { |
| 58 | pgtable_page_dtor(pte); |
Jeremy Fitzhardinge | 6944a9c | 2008-03-17 16:37:01 -0700 | [diff] [blame] | 59 | paravirt_release_pte(page_to_pfn(pte)); |
Jeremy Fitzhardinge | 397f687 | 2008-03-17 16:36:57 -0700 | [diff] [blame] | 60 | tlb_remove_page(tlb, pte); |
| 61 | } |
| 62 | |
Kirill A. Shutemov | 9823336 | 2015-04-14 15:46:14 -0700 | [diff] [blame] | 63 | #if CONFIG_PGTABLE_LEVELS > 2 |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 64 | void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) |
Jeremy Fitzhardinge | 170fdff | 2008-03-17 16:36:58 -0700 | [diff] [blame] | 65 | { |
Kirill A. Shutemov | c283610 | 2013-11-21 14:32:09 -0800 | [diff] [blame] | 66 | struct page *page = virt_to_page(pmd); |
Jeremy Fitzhardinge | 6944a9c | 2008-03-17 16:37:01 -0700 | [diff] [blame] | 67 | paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); |
Dave Hansen | 1de14c3 | 2013-04-12 16:23:54 -0700 | [diff] [blame] | 68 | /* |
| 69 | * NOTE! For PAE, any changes to the top page-directory-pointer-table |
| 70 | * entries need a full cr3 reload to flush. |
| 71 | */ |
| 72 | #ifdef CONFIG_X86_PAE |
| 73 | tlb->need_flush_all = 1; |
| 74 | #endif |
Kirill A. Shutemov | c283610 | 2013-11-21 14:32:09 -0800 | [diff] [blame] | 75 | pgtable_pmd_page_dtor(page); |
| 76 | tlb_remove_page(tlb, page); |
Jeremy Fitzhardinge | 170fdff | 2008-03-17 16:36:58 -0700 | [diff] [blame] | 77 | } |
Jeremy Fitzhardinge | 5a5f8f4 | 2008-03-17 16:36:59 -0700 | [diff] [blame] | 78 | |
Kirill A. Shutemov | 9823336 | 2015-04-14 15:46:14 -0700 | [diff] [blame] | 79 | #if CONFIG_PGTABLE_LEVELS > 3 |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 80 | void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) |
Jeremy Fitzhardinge | 5a5f8f4 | 2008-03-17 16:36:59 -0700 | [diff] [blame] | 81 | { |
Jeremy Fitzhardinge | 2761fa0 | 2008-03-17 16:37:02 -0700 | [diff] [blame] | 82 | paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); |
Jeremy Fitzhardinge | 5a5f8f4 | 2008-03-17 16:36:59 -0700 | [diff] [blame] | 83 | tlb_remove_page(tlb, virt_to_page(pud)); |
| 84 | } |
Kirill A. Shutemov | 9823336 | 2015-04-14 15:46:14 -0700 | [diff] [blame] | 85 | #endif /* CONFIG_PGTABLE_LEVELS > 3 */ |
| 86 | #endif /* CONFIG_PGTABLE_LEVELS > 2 */ |
Jeremy Fitzhardinge | 170fdff | 2008-03-17 16:36:58 -0700 | [diff] [blame] | 87 | |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 88 | static inline void pgd_list_add(pgd_t *pgd) |
| 89 | { |
| 90 | struct page *page = virt_to_page(pgd); |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 91 | |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 92 | list_add(&page->lru, &pgd_list); |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 93 | } |
| 94 | |
| 95 | static inline void pgd_list_del(pgd_t *pgd) |
| 96 | { |
| 97 | struct page *page = virt_to_page(pgd); |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 98 | |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 99 | list_del(&page->lru); |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 100 | } |
| 101 | |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 102 | #define UNSHARED_PTRS_PER_PGD \ |
Jeremy Fitzhardinge | 68db065 | 2008-03-17 16:37:13 -0700 | [diff] [blame] | 103 | (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 104 | |
Jeremy Fitzhardinge | 617d34d | 2010-09-21 12:01:51 -0700 | [diff] [blame] | 105 | |
| 106 | static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) |
| 107 | { |
| 108 | BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm)); |
| 109 | virt_to_page(pgd)->index = (pgoff_t)mm; |
| 110 | } |
| 111 | |
| 112 | struct mm_struct *pgd_page_get_mm(struct page *page) |
| 113 | { |
| 114 | return (struct mm_struct *)page->index; |
| 115 | } |
| 116 | |
| 117 | static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 118 | { |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 119 | /* If the pgd points to a shared pagetable level (either the |
| 120 | ptes in non-PAE, or shared PMD in PAE), then just copy the |
| 121 | references from swapper_pg_dir. */ |
Kirill A. Shutemov | 9823336 | 2015-04-14 15:46:14 -0700 | [diff] [blame] | 122 | if (CONFIG_PGTABLE_LEVELS == 2 || |
| 123 | (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) || |
| 124 | CONFIG_PGTABLE_LEVELS == 4) { |
Jeremy Fitzhardinge | 68db065 | 2008-03-17 16:37:13 -0700 | [diff] [blame] | 125 | clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, |
| 126 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 127 | KERNEL_PGD_PTRS); |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 128 | } |
| 129 | |
| 130 | /* list required to sync kernel mapping updates */ |
Jeremy Fitzhardinge | 617d34d | 2010-09-21 12:01:51 -0700 | [diff] [blame] | 131 | if (!SHARED_KERNEL_PMD) { |
| 132 | pgd_set_mm(pgd, mm); |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 133 | pgd_list_add(pgd); |
Jeremy Fitzhardinge | 617d34d | 2010-09-21 12:01:51 -0700 | [diff] [blame] | 134 | } |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 135 | } |
| 136 | |
Jan Beulich | 17b7462 | 2008-08-29 12:51:32 +0100 | [diff] [blame] | 137 | static void pgd_dtor(pgd_t *pgd) |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 138 | { |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 139 | if (SHARED_KERNEL_PMD) |
| 140 | return; |
| 141 | |
Andrea Arcangeli | a79e53d | 2011-02-16 15:45:22 -0800 | [diff] [blame] | 142 | spin_lock(&pgd_lock); |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 143 | pgd_list_del(pgd); |
Andrea Arcangeli | a79e53d | 2011-02-16 15:45:22 -0800 | [diff] [blame] | 144 | spin_unlock(&pgd_lock); |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 145 | } |
| 146 | |
Jeremy Fitzhardinge | 85958b4 | 2008-03-17 16:37:14 -0700 | [diff] [blame] | 147 | /* |
| 148 | * List of all pgd's needed for non-PAE so it can invalidate entries |
| 149 | * in both cached and uncached pgd's; not needed for PAE since the |
| 150 | * kernel pmd is shared. If PAE were not to share the pmd a similar |
| 151 | * tactic would be needed. This is essentially codepath-based locking |
| 152 | * against pageattr.c; it is the unique case in which a valid change |
| 153 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. |
| 154 | * vmalloc faults work because attached pagetables are never freed. |
Nadia Yvette Chambers | 6d49e35 | 2012-12-06 10:39:54 +0100 | [diff] [blame] | 155 | * -- nyc |
Jeremy Fitzhardinge | 85958b4 | 2008-03-17 16:37:14 -0700 | [diff] [blame] | 156 | */ |
| 157 | |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 158 | #ifdef CONFIG_X86_PAE |
| 159 | /* |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 160 | * In PAE mode, we need to do a cr3 reload (=tlb flush) when |
| 161 | * updating the top-level pagetable entries to guarantee the |
| 162 | * processor notices the update. Since this is expensive, and |
| 163 | * all 4 top-level entries are used almost immediately in a |
| 164 | * new process's life, we just pre-populate them here. |
| 165 | * |
| 166 | * Also, if we're in a paravirt environment where the kernel pmd is |
| 167 | * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate |
| 168 | * and initialize the kernel pmds here. |
| 169 | */ |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 170 | #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD |
Ingo Molnar | 1ec1fe7 | 2008-03-19 20:30:40 +0100 | [diff] [blame] | 171 | |
| 172 | void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) |
| 173 | { |
Jeremy Fitzhardinge | 6944a9c | 2008-03-17 16:37:01 -0700 | [diff] [blame] | 174 | paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); |
Ingo Molnar | 1ec1fe7 | 2008-03-19 20:30:40 +0100 | [diff] [blame] | 175 | |
| 176 | /* Note: almost everything apart from _PAGE_PRESENT is |
| 177 | reserved at the pmd (PDPT) level. */ |
| 178 | set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT)); |
| 179 | |
| 180 | /* |
| 181 | * According to Intel App note "TLBs, Paging-Structure Caches, |
| 182 | * and Their Invalidation", April 2007, document 317080-001, |
| 183 | * section 8.1: in PAE mode we explicitly have to flush the |
| 184 | * TLB via cr3 if the top-level pgd is changed... |
| 185 | */ |
Shaohua Li | 4981d01 | 2011-03-16 11:37:29 +0800 | [diff] [blame] | 186 | flush_tlb_mm(mm); |
Ingo Molnar | 1ec1fe7 | 2008-03-19 20:30:40 +0100 | [diff] [blame] | 187 | } |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 188 | #else /* !CONFIG_X86_PAE */ |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 189 | |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 190 | /* No need to prepopulate any pagetable entries in non-PAE modes. */ |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 191 | #define PREALLOCATED_PMDS 0 |
| 192 | |
| 193 | #endif /* CONFIG_X86_PAE */ |
| 194 | |
Kirill A. Shutemov | dc6c9a3 | 2015-02-11 15:26:50 -0800 | [diff] [blame] | 195 | static void free_pmds(struct mm_struct *mm, pmd_t *pmds[]) |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 196 | { |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 197 | int i; |
| 198 | |
| 199 | for(i = 0; i < PREALLOCATED_PMDS; i++) |
Kirill A. Shutemov | 09ef493 | 2013-11-14 14:31:13 -0800 | [diff] [blame] | 200 | if (pmds[i]) { |
| 201 | pgtable_pmd_page_dtor(virt_to_page(pmds[i])); |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 202 | free_page((unsigned long)pmds[i]); |
Kirill A. Shutemov | dc6c9a3 | 2015-02-11 15:26:50 -0800 | [diff] [blame] | 203 | mm_dec_nr_pmds(mm); |
Kirill A. Shutemov | 09ef493 | 2013-11-14 14:31:13 -0800 | [diff] [blame] | 204 | } |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 205 | } |
| 206 | |
Kirill A. Shutemov | dc6c9a3 | 2015-02-11 15:26:50 -0800 | [diff] [blame] | 207 | static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[]) |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 208 | { |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 209 | int i; |
| 210 | bool failed = false; |
Vladimir Davydov | 3e79ec7 | 2016-07-26 15:24:30 -0700 | [diff] [blame] | 211 | gfp_t gfp = PGALLOC_GFP; |
| 212 | |
| 213 | if (mm == &init_mm) |
| 214 | gfp &= ~__GFP_ACCOUNT; |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 215 | |
| 216 | for(i = 0; i < PREALLOCATED_PMDS; i++) { |
Vladimir Davydov | 3e79ec7 | 2016-07-26 15:24:30 -0700 | [diff] [blame] | 217 | pmd_t *pmd = (pmd_t *)__get_free_page(gfp); |
Kirill A. Shutemov | 09ef493 | 2013-11-14 14:31:13 -0800 | [diff] [blame] | 218 | if (!pmd) |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 219 | failed = true; |
Kirill A. Shutemov | 09ef493 | 2013-11-14 14:31:13 -0800 | [diff] [blame] | 220 | if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) { |
Al Viro | 2a46eed | 2013-11-20 22:16:36 +0000 | [diff] [blame] | 221 | free_page((unsigned long)pmd); |
Kirill A. Shutemov | 09ef493 | 2013-11-14 14:31:13 -0800 | [diff] [blame] | 222 | pmd = NULL; |
| 223 | failed = true; |
| 224 | } |
Kirill A. Shutemov | dc6c9a3 | 2015-02-11 15:26:50 -0800 | [diff] [blame] | 225 | if (pmd) |
| 226 | mm_inc_nr_pmds(mm); |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 227 | pmds[i] = pmd; |
| 228 | } |
| 229 | |
| 230 | if (failed) { |
Kirill A. Shutemov | dc6c9a3 | 2015-02-11 15:26:50 -0800 | [diff] [blame] | 231 | free_pmds(mm, pmds); |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 232 | return -ENOMEM; |
| 233 | } |
| 234 | |
| 235 | return 0; |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 236 | } |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 237 | |
| 238 | /* |
| 239 | * Mop up any pmd pages which may still be attached to the pgd. |
| 240 | * Normally they will be freed by munmap/exit_mmap, but any pmd we |
| 241 | * preallocate which never got a corresponding vma will need to be |
| 242 | * freed manually. |
| 243 | */ |
| 244 | static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) |
| 245 | { |
| 246 | int i; |
| 247 | |
| 248 | for(i = 0; i < PREALLOCATED_PMDS; i++) { |
| 249 | pgd_t pgd = pgdp[i]; |
| 250 | |
| 251 | if (pgd_val(pgd) != 0) { |
| 252 | pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); |
| 253 | |
| 254 | pgdp[i] = native_make_pgd(0); |
| 255 | |
| 256 | paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); |
| 257 | pmd_free(mm, pmd); |
Kirill A. Shutemov | dc6c9a3 | 2015-02-11 15:26:50 -0800 | [diff] [blame] | 258 | mm_dec_nr_pmds(mm); |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 259 | } |
| 260 | } |
| 261 | } |
| 262 | |
| 263 | static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) |
| 264 | { |
| 265 | pud_t *pud; |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 266 | int i; |
| 267 | |
Jeremy Fitzhardinge | cf3e505 | 2008-08-08 13:46:07 -0700 | [diff] [blame] | 268 | if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */ |
| 269 | return; |
| 270 | |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 271 | pud = pud_offset(pgd, 0); |
| 272 | |
Wanpeng Li | 73b44ff | 2013-07-08 16:00:17 -0700 | [diff] [blame] | 273 | for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) { |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 274 | pmd_t *pmd = pmds[i]; |
| 275 | |
| 276 | if (i >= KERNEL_PGD_BOUNDARY) |
| 277 | memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), |
| 278 | sizeof(pmd_t) * PTRS_PER_PMD); |
| 279 | |
| 280 | pud_populate(mm, pud, pmd); |
| 281 | } |
| 282 | } |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 283 | |
Fenghua Yu | 1db491f | 2015-01-15 20:30:01 -0800 | [diff] [blame] | 284 | /* |
| 285 | * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also |
| 286 | * assumes that pgd should be in one page. |
| 287 | * |
| 288 | * But kernel with PAE paging that is not running as a Xen domain |
| 289 | * only needs to allocate 32 bytes for pgd instead of one page. |
| 290 | */ |
| 291 | #ifdef CONFIG_X86_PAE |
| 292 | |
| 293 | #include <linux/slab.h> |
| 294 | |
| 295 | #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) |
| 296 | #define PGD_ALIGN 32 |
| 297 | |
| 298 | static struct kmem_cache *pgd_cache; |
| 299 | |
| 300 | static int __init pgd_cache_init(void) |
| 301 | { |
| 302 | /* |
| 303 | * When PAE kernel is running as a Xen domain, it does not use |
| 304 | * shared kernel pmd. And this requires a whole page for pgd. |
| 305 | */ |
| 306 | if (!SHARED_KERNEL_PMD) |
| 307 | return 0; |
| 308 | |
| 309 | /* |
| 310 | * when PAE kernel is not running as a Xen domain, it uses |
| 311 | * shared kernel pmd. Shared kernel pmd does not require a whole |
| 312 | * page for pgd. We are able to just allocate a 32-byte for pgd. |
| 313 | * During boot time, we create a 32-byte slab for pgd table allocation. |
| 314 | */ |
| 315 | pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN, |
| 316 | SLAB_PANIC, NULL); |
| 317 | if (!pgd_cache) |
| 318 | return -ENOMEM; |
| 319 | |
| 320 | return 0; |
| 321 | } |
| 322 | core_initcall(pgd_cache_init); |
| 323 | |
| 324 | static inline pgd_t *_pgd_alloc(void) |
| 325 | { |
| 326 | /* |
| 327 | * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain. |
| 328 | * We allocate one page for pgd. |
| 329 | */ |
| 330 | if (!SHARED_KERNEL_PMD) |
| 331 | return (pgd_t *)__get_free_page(PGALLOC_GFP); |
| 332 | |
| 333 | /* |
| 334 | * Now PAE kernel is not running as a Xen domain. We can allocate |
| 335 | * a 32-byte slab for pgd to save memory space. |
| 336 | */ |
| 337 | return kmem_cache_alloc(pgd_cache, PGALLOC_GFP); |
| 338 | } |
| 339 | |
| 340 | static inline void _pgd_free(pgd_t *pgd) |
| 341 | { |
| 342 | if (!SHARED_KERNEL_PMD) |
| 343 | free_page((unsigned long)pgd); |
| 344 | else |
| 345 | kmem_cache_free(pgd_cache, pgd); |
| 346 | } |
| 347 | #else |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 348 | |
Fenghua Yu | 1db491f | 2015-01-15 20:30:01 -0800 | [diff] [blame] | 349 | static inline pgd_t *_pgd_alloc(void) |
| 350 | { |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 351 | return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER); |
Fenghua Yu | 1db491f | 2015-01-15 20:30:01 -0800 | [diff] [blame] | 352 | } |
| 353 | |
| 354 | static inline void _pgd_free(pgd_t *pgd) |
| 355 | { |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 356 | free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER); |
Fenghua Yu | 1db491f | 2015-01-15 20:30:01 -0800 | [diff] [blame] | 357 | } |
| 358 | #endif /* CONFIG_X86_PAE */ |
| 359 | |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 360 | pgd_t *pgd_alloc(struct mm_struct *mm) |
| 361 | { |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 362 | pgd_t *pgd; |
| 363 | pmd_t *pmds[PREALLOCATED_PMDS]; |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 364 | |
Fenghua Yu | 1db491f | 2015-01-15 20:30:01 -0800 | [diff] [blame] | 365 | pgd = _pgd_alloc(); |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 366 | |
| 367 | if (pgd == NULL) |
| 368 | goto out; |
| 369 | |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 370 | mm->pgd = pgd; |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 371 | |
Kirill A. Shutemov | dc6c9a3 | 2015-02-11 15:26:50 -0800 | [diff] [blame] | 372 | if (preallocate_pmds(mm, pmds) != 0) |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 373 | goto out_free_pgd; |
| 374 | |
| 375 | if (paravirt_pgd_alloc(mm) != 0) |
| 376 | goto out_free_pmds; |
| 377 | |
| 378 | /* |
| 379 | * Make sure that pre-populating the pmds is atomic with |
| 380 | * respect to anything walking the pgd_list, so that they |
| 381 | * never see a partially populated pgd. |
| 382 | */ |
Andrea Arcangeli | a79e53d | 2011-02-16 15:45:22 -0800 | [diff] [blame] | 383 | spin_lock(&pgd_lock); |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 384 | |
Jeremy Fitzhardinge | 617d34d | 2010-09-21 12:01:51 -0700 | [diff] [blame] | 385 | pgd_ctor(mm, pgd); |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 386 | pgd_prepopulate_pmd(mm, pgd, pmds); |
| 387 | |
Andrea Arcangeli | a79e53d | 2011-02-16 15:45:22 -0800 | [diff] [blame] | 388 | spin_unlock(&pgd_lock); |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 389 | |
| 390 | return pgd; |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 391 | |
| 392 | out_free_pmds: |
Kirill A. Shutemov | dc6c9a3 | 2015-02-11 15:26:50 -0800 | [diff] [blame] | 393 | free_pmds(mm, pmds); |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 394 | out_free_pgd: |
Fenghua Yu | 1db491f | 2015-01-15 20:30:01 -0800 | [diff] [blame] | 395 | _pgd_free(pgd); |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 396 | out: |
| 397 | return NULL; |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 398 | } |
| 399 | |
| 400 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
| 401 | { |
| 402 | pgd_mop_up_pmds(mm, pgd); |
| 403 | pgd_dtor(pgd); |
Jeremy Fitzhardinge | eba0045 | 2008-06-25 00:19:12 -0400 | [diff] [blame] | 404 | paravirt_pgd_free(mm, pgd); |
Fenghua Yu | 1db491f | 2015-01-15 20:30:01 -0800 | [diff] [blame] | 405 | _pgd_free(pgd); |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 406 | } |
Jeremy Fitzhardinge | ee5aa8d | 2008-03-17 16:37:03 -0700 | [diff] [blame] | 407 | |
Rik van Riel | 0f9a921 | 2012-11-06 09:54:47 +0000 | [diff] [blame] | 408 | /* |
| 409 | * Used to set accessed or dirty bits in the page table entries |
| 410 | * on other architectures. On x86, the accessed and dirty bits |
| 411 | * are tracked by hardware. However, do_wp_page calls this function |
| 412 | * to also make the pte writeable at the same time the dirty bit is |
| 413 | * set. In that case we do actually need to write the PTE. |
| 414 | */ |
Jeremy Fitzhardinge | ee5aa8d | 2008-03-17 16:37:03 -0700 | [diff] [blame] | 415 | int ptep_set_access_flags(struct vm_area_struct *vma, |
| 416 | unsigned long address, pte_t *ptep, |
| 417 | pte_t entry, int dirty) |
| 418 | { |
| 419 | int changed = !pte_same(*ptep, entry); |
| 420 | |
| 421 | if (changed && dirty) { |
| 422 | *ptep = entry; |
Juergen Gross | d6ccc3e | 2015-11-17 15:51:19 +0100 | [diff] [blame] | 423 | pte_update(vma->vm_mm, address, ptep); |
Jeremy Fitzhardinge | ee5aa8d | 2008-03-17 16:37:03 -0700 | [diff] [blame] | 424 | } |
| 425 | |
| 426 | return changed; |
| 427 | } |
Jeremy Fitzhardinge | f9fbf1a | 2008-03-17 16:37:04 -0700 | [diff] [blame] | 428 | |
Andrea Arcangeli | db3eb96f | 2011-01-13 15:46:41 -0800 | [diff] [blame] | 429 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 430 | int pmdp_set_access_flags(struct vm_area_struct *vma, |
| 431 | unsigned long address, pmd_t *pmdp, |
| 432 | pmd_t entry, int dirty) |
| 433 | { |
| 434 | int changed = !pmd_same(*pmdp, entry); |
| 435 | |
| 436 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
| 437 | |
| 438 | if (changed && dirty) { |
| 439 | *pmdp = entry; |
Ingo Molnar | 5e4bf1a | 2012-11-20 13:02:51 +0100 | [diff] [blame] | 440 | /* |
| 441 | * We had a write-protection fault here and changed the pmd |
| 442 | * to to more permissive. No need to flush the TLB for that, |
| 443 | * #PF is architecturally guaranteed to do that and in the |
| 444 | * worst-case we'll generate a spurious fault. |
| 445 | */ |
Andrea Arcangeli | db3eb96f | 2011-01-13 15:46:41 -0800 | [diff] [blame] | 446 | } |
| 447 | |
| 448 | return changed; |
| 449 | } |
| 450 | #endif |
| 451 | |
Jeremy Fitzhardinge | f9fbf1a | 2008-03-17 16:37:04 -0700 | [diff] [blame] | 452 | int ptep_test_and_clear_young(struct vm_area_struct *vma, |
| 453 | unsigned long addr, pte_t *ptep) |
| 454 | { |
| 455 | int ret = 0; |
| 456 | |
| 457 | if (pte_young(*ptep)) |
| 458 | ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, |
Thomas Gleixner | 48e2395 | 2008-05-24 17:24:34 +0200 | [diff] [blame] | 459 | (unsigned long *) &ptep->pte); |
Jeremy Fitzhardinge | f9fbf1a | 2008-03-17 16:37:04 -0700 | [diff] [blame] | 460 | |
| 461 | if (ret) |
| 462 | pte_update(vma->vm_mm, addr, ptep); |
| 463 | |
| 464 | return ret; |
| 465 | } |
Jeremy Fitzhardinge | c20311e | 2008-03-17 16:37:05 -0700 | [diff] [blame] | 466 | |
Andrea Arcangeli | db3eb96f | 2011-01-13 15:46:41 -0800 | [diff] [blame] | 467 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 468 | int pmdp_test_and_clear_young(struct vm_area_struct *vma, |
| 469 | unsigned long addr, pmd_t *pmdp) |
| 470 | { |
| 471 | int ret = 0; |
| 472 | |
| 473 | if (pmd_young(*pmdp)) |
| 474 | ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, |
Johannes Weiner | f2d6bfe | 2011-01-13 15:47:01 -0800 | [diff] [blame] | 475 | (unsigned long *)pmdp); |
Andrea Arcangeli | db3eb96f | 2011-01-13 15:46:41 -0800 | [diff] [blame] | 476 | |
Andrea Arcangeli | db3eb96f | 2011-01-13 15:46:41 -0800 | [diff] [blame] | 477 | return ret; |
| 478 | } |
| 479 | #endif |
| 480 | |
Jeremy Fitzhardinge | c20311e | 2008-03-17 16:37:05 -0700 | [diff] [blame] | 481 | int ptep_clear_flush_young(struct vm_area_struct *vma, |
| 482 | unsigned long address, pte_t *ptep) |
| 483 | { |
Shaohua Li | b13b1d2 | 2014-04-08 15:58:09 +0800 | [diff] [blame] | 484 | /* |
| 485 | * On x86 CPUs, clearing the accessed bit without a TLB flush |
| 486 | * doesn't cause data corruption. [ It could cause incorrect |
| 487 | * page aging and the (mistaken) reclaim of hot pages, but the |
| 488 | * chance of that should be relatively low. ] |
| 489 | * |
| 490 | * So as a performance optimization don't flush the TLB when |
| 491 | * clearing the accessed bit, it will eventually be flushed by |
| 492 | * a context switch or a VM operation anyway. [ In the rare |
| 493 | * event of it not getting flushed for a long time the delay |
| 494 | * shouldn't really matter because there's no real memory |
| 495 | * pressure for swapout to react to. ] |
| 496 | */ |
| 497 | return ptep_test_and_clear_young(vma, address, ptep); |
Jeremy Fitzhardinge | c20311e | 2008-03-17 16:37:05 -0700 | [diff] [blame] | 498 | } |
Jeremy Fitzhardinge | 7c7e6e0 | 2008-06-17 11:41:54 -0700 | [diff] [blame] | 499 | |
Andrea Arcangeli | db3eb96f | 2011-01-13 15:46:41 -0800 | [diff] [blame] | 500 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 501 | int pmdp_clear_flush_young(struct vm_area_struct *vma, |
| 502 | unsigned long address, pmd_t *pmdp) |
| 503 | { |
| 504 | int young; |
| 505 | |
| 506 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
| 507 | |
| 508 | young = pmdp_test_and_clear_young(vma, address, pmdp); |
| 509 | if (young) |
| 510 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
| 511 | |
| 512 | return young; |
| 513 | } |
Andrea Arcangeli | db3eb96f | 2011-01-13 15:46:41 -0800 | [diff] [blame] | 514 | #endif |
| 515 | |
Gustavo F. Padovan | fd862dd | 2009-02-15 21:48:54 -0300 | [diff] [blame] | 516 | /** |
| 517 | * reserve_top_address - reserves a hole in the top of kernel address space |
| 518 | * @reserve - size of hole to reserve |
| 519 | * |
| 520 | * Can be used to relocate the fixmap area and poke a hole in the top |
| 521 | * of kernel address space to make room for a hypervisor. |
| 522 | */ |
| 523 | void __init reserve_top_address(unsigned long reserve) |
| 524 | { |
| 525 | #ifdef CONFIG_X86_32 |
| 526 | BUG_ON(fixmaps_set > 0); |
Andy Lutomirski | 73159fd | 2014-05-05 12:19:31 -0700 | [diff] [blame] | 527 | __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE; |
| 528 | printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n", |
| 529 | -reserve, __FIXADDR_TOP + PAGE_SIZE); |
Gustavo F. Padovan | fd862dd | 2009-02-15 21:48:54 -0300 | [diff] [blame] | 530 | #endif |
| 531 | } |
| 532 | |
Jeremy Fitzhardinge | 7c7e6e0 | 2008-06-17 11:41:54 -0700 | [diff] [blame] | 533 | int fixmaps_set; |
| 534 | |
Jeremy Fitzhardinge | aeaaa59 | 2008-06-17 11:42:01 -0700 | [diff] [blame] | 535 | void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) |
Jeremy Fitzhardinge | 7c7e6e0 | 2008-06-17 11:41:54 -0700 | [diff] [blame] | 536 | { |
| 537 | unsigned long address = __fix_to_virt(idx); |
| 538 | |
| 539 | if (idx >= __end_of_fixed_addresses) { |
| 540 | BUG(); |
| 541 | return; |
| 542 | } |
Jeremy Fitzhardinge | aeaaa59 | 2008-06-17 11:42:01 -0700 | [diff] [blame] | 543 | set_pte_vaddr(address, pte); |
Jeremy Fitzhardinge | 7c7e6e0 | 2008-06-17 11:41:54 -0700 | [diff] [blame] | 544 | fixmaps_set++; |
| 545 | } |
Jeremy Fitzhardinge | aeaaa59 | 2008-06-17 11:42:01 -0700 | [diff] [blame] | 546 | |
Masami Hiramatsu | 3b3809a | 2009-04-09 10:55:33 -0700 | [diff] [blame] | 547 | void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys, |
| 548 | pgprot_t flags) |
Jeremy Fitzhardinge | aeaaa59 | 2008-06-17 11:42:01 -0700 | [diff] [blame] | 549 | { |
| 550 | __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags)); |
| 551 | } |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 552 | |
| 553 | #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP |
Toshi Kani | 3d3ca41 | 2015-05-26 10:28:07 +0200 | [diff] [blame] | 554 | /** |
| 555 | * pud_set_huge - setup kernel PUD mapping |
| 556 | * |
Toshi Kani | b73522e | 2015-05-26 10:28:10 +0200 | [diff] [blame] | 557 | * MTRRs can override PAT memory types with 4KiB granularity. Therefore, this |
| 558 | * function sets up a huge page only if any of the following conditions are met: |
| 559 | * |
| 560 | * - MTRRs are disabled, or |
| 561 | * |
| 562 | * - MTRRs are enabled and the range is completely covered by a single MTRR, or |
| 563 | * |
| 564 | * - MTRRs are enabled and the corresponding MTRR memory type is WB, which |
| 565 | * has no effect on the requested PAT memory type. |
| 566 | * |
| 567 | * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger |
| 568 | * page mapping attempt fails. |
Toshi Kani | 3d3ca41 | 2015-05-26 10:28:07 +0200 | [diff] [blame] | 569 | * |
| 570 | * Returns 1 on success and 0 on failure. |
| 571 | */ |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 572 | int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) |
| 573 | { |
Toshi Kani | b73522e | 2015-05-26 10:28:10 +0200 | [diff] [blame] | 574 | u8 mtrr, uniform; |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 575 | |
Toshi Kani | b73522e | 2015-05-26 10:28:10 +0200 | [diff] [blame] | 576 | mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform); |
| 577 | if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) && |
| 578 | (mtrr != MTRR_TYPE_WRBACK)) |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 579 | return 0; |
| 580 | |
Joerg Roedel | 1e26941 | 2018-04-11 17:24:38 +0200 | [diff] [blame] | 581 | /* Bail out if we are we on a populated non-leaf entry: */ |
| 582 | if (pud_present(*pud) && !pud_huge(*pud)) |
| 583 | return 0; |
| 584 | |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 585 | prot = pgprot_4k_2_large(prot); |
| 586 | |
| 587 | set_pte((pte_t *)pud, pfn_pte( |
| 588 | (u64)addr >> PAGE_SHIFT, |
| 589 | __pgprot(pgprot_val(prot) | _PAGE_PSE))); |
| 590 | |
| 591 | return 1; |
| 592 | } |
| 593 | |
Toshi Kani | 3d3ca41 | 2015-05-26 10:28:07 +0200 | [diff] [blame] | 594 | /** |
| 595 | * pmd_set_huge - setup kernel PMD mapping |
| 596 | * |
Toshi Kani | b73522e | 2015-05-26 10:28:10 +0200 | [diff] [blame] | 597 | * See text over pud_set_huge() above. |
Toshi Kani | 3d3ca41 | 2015-05-26 10:28:07 +0200 | [diff] [blame] | 598 | * |
| 599 | * Returns 1 on success and 0 on failure. |
| 600 | */ |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 601 | int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) |
| 602 | { |
Toshi Kani | b73522e | 2015-05-26 10:28:10 +0200 | [diff] [blame] | 603 | u8 mtrr, uniform; |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 604 | |
Toshi Kani | b73522e | 2015-05-26 10:28:10 +0200 | [diff] [blame] | 605 | mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform); |
| 606 | if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) && |
| 607 | (mtrr != MTRR_TYPE_WRBACK)) { |
| 608 | pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n", |
| 609 | __func__, addr, addr + PMD_SIZE); |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 610 | return 0; |
Toshi Kani | b73522e | 2015-05-26 10:28:10 +0200 | [diff] [blame] | 611 | } |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 612 | |
Joerg Roedel | 1e26941 | 2018-04-11 17:24:38 +0200 | [diff] [blame] | 613 | /* Bail out if we are we on a populated non-leaf entry: */ |
| 614 | if (pmd_present(*pmd) && !pmd_huge(*pmd)) |
| 615 | return 0; |
| 616 | |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 617 | prot = pgprot_4k_2_large(prot); |
| 618 | |
| 619 | set_pte((pte_t *)pmd, pfn_pte( |
| 620 | (u64)addr >> PAGE_SHIFT, |
| 621 | __pgprot(pgprot_val(prot) | _PAGE_PSE))); |
| 622 | |
| 623 | return 1; |
| 624 | } |
| 625 | |
Toshi Kani | 3d3ca41 | 2015-05-26 10:28:07 +0200 | [diff] [blame] | 626 | /** |
| 627 | * pud_clear_huge - clear kernel PUD mapping when it is set |
| 628 | * |
| 629 | * Returns 1 on success and 0 on failure (no PUD map is found). |
| 630 | */ |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 631 | int pud_clear_huge(pud_t *pud) |
| 632 | { |
| 633 | if (pud_large(*pud)) { |
| 634 | pud_clear(pud); |
| 635 | return 1; |
| 636 | } |
| 637 | |
| 638 | return 0; |
| 639 | } |
| 640 | |
Toshi Kani | 3d3ca41 | 2015-05-26 10:28:07 +0200 | [diff] [blame] | 641 | /** |
| 642 | * pmd_clear_huge - clear kernel PMD mapping when it is set |
| 643 | * |
| 644 | * Returns 1 on success and 0 on failure (no PMD map is found). |
| 645 | */ |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 646 | int pmd_clear_huge(pmd_t *pmd) |
| 647 | { |
| 648 | if (pmd_large(*pmd)) { |
| 649 | pmd_clear(pmd); |
| 650 | return 1; |
| 651 | } |
| 652 | |
| 653 | return 0; |
| 654 | } |
Toshi Kani | 9c7f7bd | 2018-03-22 16:17:20 -0700 | [diff] [blame] | 655 | |
| 656 | /** |
| 657 | * pud_free_pmd_page - Clear pud entry and free pmd page. |
| 658 | * @pud: Pointer to a PUD. |
| 659 | * |
| 660 | * Context: The pud range has been unmaped and TLB purged. |
| 661 | * Return: 1 if clearing the entry succeeded. 0 otherwise. |
| 662 | */ |
| 663 | int pud_free_pmd_page(pud_t *pud) |
| 664 | { |
Toshi Kani | f4fe4f9 | 2018-03-22 16:17:24 -0700 | [diff] [blame] | 665 | pmd_t *pmd; |
| 666 | int i; |
| 667 | |
| 668 | if (pud_none(*pud)) |
| 669 | return 1; |
| 670 | |
| 671 | pmd = (pmd_t *)pud_page_vaddr(*pud); |
| 672 | |
| 673 | for (i = 0; i < PTRS_PER_PMD; i++) |
| 674 | if (!pmd_free_pte_page(&pmd[i])) |
| 675 | return 0; |
| 676 | |
| 677 | pud_clear(pud); |
| 678 | free_page((unsigned long)pmd); |
| 679 | |
| 680 | return 1; |
Toshi Kani | 9c7f7bd | 2018-03-22 16:17:20 -0700 | [diff] [blame] | 681 | } |
| 682 | |
| 683 | /** |
| 684 | * pmd_free_pte_page - Clear pmd entry and free pte page. |
| 685 | * @pmd: Pointer to a PMD. |
| 686 | * |
| 687 | * Context: The pmd range has been unmaped and TLB purged. |
| 688 | * Return: 1 if clearing the entry succeeded. 0 otherwise. |
| 689 | */ |
| 690 | int pmd_free_pte_page(pmd_t *pmd) |
| 691 | { |
Toshi Kani | f4fe4f9 | 2018-03-22 16:17:24 -0700 | [diff] [blame] | 692 | pte_t *pte; |
| 693 | |
| 694 | if (pmd_none(*pmd)) |
| 695 | return 1; |
| 696 | |
| 697 | pte = (pte_t *)pmd_page_vaddr(*pmd); |
| 698 | pmd_clear(pmd); |
| 699 | free_page((unsigned long)pte); |
| 700 | |
| 701 | return 1; |
Toshi Kani | 9c7f7bd | 2018-03-22 16:17:20 -0700 | [diff] [blame] | 702 | } |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 703 | #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ |