blob: 45bec627bae3ecb3fc533c64c971fa53554c2514 [file] [log] [blame]
Steve Capper084bd292013-04-10 13:48:00 +01001/*
2 * arch/arm64/mm/hugetlbpage.c
3 *
4 * Copyright (C) 2013 Linaro Ltd.
5 *
6 * Based on arch/x86/mm/hugetlbpage.c.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
Steve Capper084bd292013-04-10 13:48:00 +010016 */
17
18#include <linux/init.h>
19#include <linux/fs.h>
20#include <linux/mm.h>
21#include <linux/hugetlb.h>
22#include <linux/pagemap.h>
23#include <linux/err.h>
24#include <linux/sysctl.h>
25#include <asm/mman.h>
26#include <asm/tlb.h>
27#include <asm/tlbflush.h>
28#include <asm/pgalloc.h>
29
Steve Capper084bd292013-04-10 13:48:00 +010030int pmd_huge(pmd_t pmd)
31{
Christoffer Dallfd28f5d2015-07-01 14:08:31 +020032 return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
Steve Capper084bd292013-04-10 13:48:00 +010033}
34
35int pud_huge(pud_t pud)
36{
Mark Salter4797ec22014-05-15 15:19:22 +010037#ifndef __PAGETABLE_PMD_FOLDED
Christoffer Dallfd28f5d2015-07-01 14:08:31 +020038 return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
Mark Salter4797ec22014-05-15 15:19:22 +010039#else
40 return 0;
41#endif
Steve Capper084bd292013-04-10 13:48:00 +010042}
43
David Woods66b39232015-12-17 14:31:26 -050044static int find_num_contig(struct mm_struct *mm, unsigned long addr,
45 pte_t *ptep, pte_t pte, size_t *pgsize)
46{
47 pgd_t *pgd = pgd_offset(mm, addr);
48 pud_t *pud;
49 pmd_t *pmd;
50
51 *pgsize = PAGE_SIZE;
52 if (!pte_cont(pte))
53 return 1;
David Woods66b39232015-12-17 14:31:26 -050054 pud = pud_offset(pgd, addr);
David Woods66b39232015-12-17 14:31:26 -050055 pmd = pmd_offset(pud, addr);
David Woods66b39232015-12-17 14:31:26 -050056 if ((pte_t *)pmd == ptep) {
57 *pgsize = PMD_SIZE;
58 return CONT_PMDS;
59 }
60 return CONT_PTES;
61}
62
63void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
64 pte_t *ptep, pte_t pte)
65{
66 size_t pgsize;
67 int i;
68 int ncontig = find_num_contig(mm, addr, ptep, pte, &pgsize);
69 unsigned long pfn;
70 pgprot_t hugeprot;
71
72 if (ncontig == 1) {
73 set_pte_at(mm, addr, ptep, pte);
74 return;
75 }
76
77 pfn = pte_pfn(pte);
78 hugeprot = __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
79 for (i = 0; i < ncontig; i++) {
80 pr_debug("%s: set pte %p to 0x%llx\n", __func__, ptep,
81 pte_val(pfn_pte(pfn, hugeprot)));
82 set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
83 ptep++;
84 pfn += pgsize >> PAGE_SHIFT;
85 addr += pgsize;
86 }
87}
88
89pte_t *huge_pte_alloc(struct mm_struct *mm,
90 unsigned long addr, unsigned long sz)
91{
92 pgd_t *pgd;
93 pud_t *pud;
94 pte_t *pte = NULL;
95
96 pr_debug("%s: addr:0x%lx sz:0x%lx\n", __func__, addr, sz);
97 pgd = pgd_offset(mm, addr);
98 pud = pud_alloc(mm, pgd, addr);
99 if (!pud)
100 return NULL;
101
102 if (sz == PUD_SIZE) {
103 pte = (pte_t *)pud;
104 } else if (sz == (PAGE_SIZE * CONT_PTES)) {
105 pmd_t *pmd = pmd_alloc(mm, pud, addr);
106
107 WARN_ON(addr & (sz - 1));
108 /*
109 * Note that if this code were ever ported to the
110 * 32-bit arm platform then it will cause trouble in
111 * the case where CONFIG_HIGHPTE is set, since there
112 * will be no pte_unmap() to correspond with this
113 * pte_alloc_map().
114 */
Kirill A. Shutemov3ed3a4f2016-03-17 14:19:11 -0700115 pte = pte_alloc_map(mm, pmd, addr);
David Woods66b39232015-12-17 14:31:26 -0500116 } else if (sz == PMD_SIZE) {
117 if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) &&
118 pud_none(*pud))
119 pte = huge_pmd_share(mm, addr, pud);
120 else
121 pte = (pte_t *)pmd_alloc(mm, pud, addr);
122 } else if (sz == (PMD_SIZE * CONT_PMDS)) {
123 pmd_t *pmd;
124
125 pmd = pmd_alloc(mm, pud, addr);
126 WARN_ON(addr & (sz - 1));
127 return (pte_t *)pmd;
128 }
129
130 pr_debug("%s: addr:0x%lx sz:0x%lx ret pte=%p/0x%llx\n", __func__, addr,
131 sz, pte, pte_val(*pte));
132 return pte;
133}
134
135pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
136{
137 pgd_t *pgd;
138 pud_t *pud;
139 pmd_t *pmd = NULL;
140 pte_t *pte = NULL;
141
142 pgd = pgd_offset(mm, addr);
143 pr_debug("%s: addr:0x%lx pgd:%p\n", __func__, addr, pgd);
144 if (!pgd_present(*pgd))
145 return NULL;
146 pud = pud_offset(pgd, addr);
147 if (!pud_present(*pud))
148 return NULL;
149
150 if (pud_huge(*pud))
151 return (pte_t *)pud;
152 pmd = pmd_offset(pud, addr);
153 if (!pmd_present(*pmd))
154 return NULL;
155
156 if (pte_cont(pmd_pte(*pmd))) {
157 pmd = pmd_offset(
158 pud, (addr & CONT_PMD_MASK));
159 return (pte_t *)pmd;
160 }
161 if (pmd_huge(*pmd))
162 return (pte_t *)pmd;
163 pte = pte_offset_kernel(pmd, addr);
164 if (pte_present(*pte) && pte_cont(*pte)) {
165 pte = pte_offset_kernel(
166 pmd, (addr & CONT_PTE_MASK));
167 return pte;
168 }
169 return NULL;
170}
171
172pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
173 struct page *page, int writable)
174{
175 size_t pagesize = huge_page_size(hstate_vma(vma));
176
177 if (pagesize == CONT_PTE_SIZE) {
178 entry = pte_mkcont(entry);
179 } else if (pagesize == CONT_PMD_SIZE) {
180 entry = pmd_pte(pmd_mkcont(pte_pmd(entry)));
181 } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) {
182 pr_warn("%s: unrecognized huge page size 0x%lx\n",
183 __func__, pagesize);
184 }
185 return entry;
186}
187
188pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
189 unsigned long addr, pte_t *ptep)
190{
191 pte_t pte;
192
193 if (pte_cont(*ptep)) {
194 int ncontig, i;
195 size_t pgsize;
196 pte_t *cpte;
197 bool is_dirty = false;
198
199 cpte = huge_pte_offset(mm, addr);
200 ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
201 /* save the 1st pte to return */
202 pte = ptep_get_and_clear(mm, addr, cpte);
Huang Shijie736aec92016-11-08 13:44:39 +0800203 for (i = 1, addr += pgsize; i < ncontig; ++i, addr += pgsize) {
David Woods66b39232015-12-17 14:31:26 -0500204 /*
205 * If HW_AFDBM is enabled, then the HW could
206 * turn on the dirty bit for any of the page
207 * in the set, so check them all.
208 */
209 ++cpte;
210 if (pte_dirty(ptep_get_and_clear(mm, addr, cpte)))
211 is_dirty = true;
212 }
213 if (is_dirty)
214 return pte_mkdirty(pte);
215 else
216 return pte;
217 } else {
218 return ptep_get_and_clear(mm, addr, ptep);
219 }
220}
221
222int huge_ptep_set_access_flags(struct vm_area_struct *vma,
223 unsigned long addr, pte_t *ptep,
224 pte_t pte, int dirty)
225{
226 pte_t *cpte;
227
228 if (pte_cont(pte)) {
229 int ncontig, i, changed = 0;
230 size_t pgsize = 0;
231 unsigned long pfn = pte_pfn(pte);
232 /* Select all bits except the pfn */
233 pgprot_t hugeprot =
234 __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^
235 pte_val(pte));
236
237 cpte = huge_pte_offset(vma->vm_mm, addr);
238 pfn = pte_pfn(*cpte);
239 ncontig = find_num_contig(vma->vm_mm, addr, cpte,
240 *cpte, &pgsize);
Huang Shijie736aec92016-11-08 13:44:39 +0800241 for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize) {
Huang Shijieb90a6172017-01-11 14:02:00 +0800242 changed |= ptep_set_access_flags(vma, addr, cpte,
David Woods66b39232015-12-17 14:31:26 -0500243 pfn_pte(pfn,
244 hugeprot),
245 dirty);
246 pfn += pgsize >> PAGE_SHIFT;
247 }
248 return changed;
249 } else {
250 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
251 }
252}
253
254void huge_ptep_set_wrprotect(struct mm_struct *mm,
255 unsigned long addr, pte_t *ptep)
256{
257 if (pte_cont(*ptep)) {
258 int ncontig, i;
259 pte_t *cpte;
260 size_t pgsize = 0;
261
262 cpte = huge_pte_offset(mm, addr);
263 ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
Huang Shijie736aec92016-11-08 13:44:39 +0800264 for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize)
David Woods66b39232015-12-17 14:31:26 -0500265 ptep_set_wrprotect(mm, addr, cpte);
266 } else {
267 ptep_set_wrprotect(mm, addr, ptep);
268 }
269}
270
271void huge_ptep_clear_flush(struct vm_area_struct *vma,
272 unsigned long addr, pte_t *ptep)
273{
274 if (pte_cont(*ptep)) {
275 int ncontig, i;
276 pte_t *cpte;
277 size_t pgsize = 0;
278
279 cpte = huge_pte_offset(vma->vm_mm, addr);
280 ncontig = find_num_contig(vma->vm_mm, addr, cpte,
281 *cpte, &pgsize);
Huang Shijie736aec92016-11-08 13:44:39 +0800282 for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize)
David Woods66b39232015-12-17 14:31:26 -0500283 ptep_clear_flush(vma, addr, cpte);
284 } else {
285 ptep_clear_flush(vma, addr, ptep);
286 }
287}
288
Steve Capper084bd292013-04-10 13:48:00 +0100289static __init int setup_hugepagesz(char *opt)
290{
291 unsigned long ps = memparse(opt, &opt);
David Woods66b39232015-12-17 14:31:26 -0500292
Steve Capper084bd292013-04-10 13:48:00 +0100293 if (ps == PMD_SIZE) {
294 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
295 } else if (ps == PUD_SIZE) {
296 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
Will Deaconab2e1b82016-05-31 11:00:09 +0100297 } else if (ps == (PAGE_SIZE * CONT_PTES)) {
298 hugetlb_add_hstate(CONT_PTE_SHIFT);
299 } else if (ps == (PMD_SIZE * CONT_PMDS)) {
300 hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
Steve Capper084bd292013-04-10 13:48:00 +0100301 } else {
Vaishali Thakkard77e20c2016-05-19 17:11:08 -0700302 hugetlb_bad_size();
David Woods66b39232015-12-17 14:31:26 -0500303 pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
Steve Capper084bd292013-04-10 13:48:00 +0100304 return 0;
305 }
306 return 1;
307}
308__setup("hugepagesz=", setup_hugepagesz);
Will Deaconab2e1b82016-05-31 11:00:09 +0100309
310#ifdef CONFIG_ARM64_64K_PAGES
311static __init int add_default_hugepagesz(void)
312{
313 if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
314 hugetlb_add_hstate(CONT_PMD_SHIFT);
315 return 0;
316}
317arch_initcall(add_default_hugepagesz);
318#endif