blob: 434a9564917beceadeffd0f34d041683b717af1c [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002/*
3 * Page table allocation functions
4 *
5 * Copyright IBM Corp. 2016
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
9#include <linux/mm.h>
10#include <linux/sysctl.h>
11#include <asm/mmu_context.h>
12#include <asm/pgalloc.h>
13#include <asm/gmap.h>
14#include <asm/tlb.h>
15#include <asm/tlbflush.h>
16
17#ifdef CONFIG_PGSTE
18
19static int page_table_allocate_pgste_min = 0;
20static int page_table_allocate_pgste_max = 1;
21int page_table_allocate_pgste = 0;
22EXPORT_SYMBOL(page_table_allocate_pgste);
23
24static struct ctl_table page_table_sysctl[] = {
25 {
26 .procname = "allocate_pgste",
27 .data = &page_table_allocate_pgste,
28 .maxlen = sizeof(int),
29 .mode = S_IRUGO | S_IWUSR,
30 .proc_handler = proc_dointvec,
31 .extra1 = &page_table_allocate_pgste_min,
32 .extra2 = &page_table_allocate_pgste_max,
33 },
34 { }
35};
36
37static struct ctl_table page_table_sysctl_dir[] = {
38 {
39 .procname = "vm",
40 .maxlen = 0,
41 .mode = 0555,
42 .child = page_table_sysctl,
43 },
44 { }
45};
46
47static int __init page_table_register_sysctl(void)
48{
49 return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
50}
51__initcall(page_table_register_sysctl);
52
53#endif /* CONFIG_PGSTE */
54
55unsigned long *crst_table_alloc(struct mm_struct *mm)
56{
57 struct page *page = alloc_pages(GFP_KERNEL, 2);
58
59 if (!page)
60 return NULL;
Martin Schwidefskyc9b5ad52016-06-14 12:56:01 +020061 arch_set_page_dat(page, 2);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010062 return (unsigned long *) page_to_phys(page);
63}
64
65void crst_table_free(struct mm_struct *mm, unsigned long *table)
66{
67 free_pages((unsigned long) table, 2);
68}
69
70static void __crst_table_upgrade(void *arg)
71{
72 struct mm_struct *mm = arg;
73
Martin Schwidefsky0aaba412017-08-22 12:08:22 +020074 if (current->active_mm == mm)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010075 set_user_asce(mm);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010076 __tlb_flush_local();
77}
78
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020079int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010080{
81 unsigned long *table, *pgd;
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020082 int rc, notify;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010083
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020084 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
Martin Schwidefsky2fc48762017-08-31 13:18:22 +020085 VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE);
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020086 if (end >= TASK_SIZE_MAX)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010087 return -ENOMEM;
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020088 rc = 0;
89 notify = 0;
90 while (mm->context.asce_limit < end) {
91 table = crst_table_alloc(mm);
92 if (!table) {
93 rc = -ENOMEM;
94 break;
95 }
96 spin_lock_bh(&mm->page_table_lock);
97 pgd = (unsigned long *) mm->pgd;
Heiko Carstensf1c11742017-07-05 07:37:27 +020098 if (mm->context.asce_limit == _REGION2_SIZE) {
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020099 crst_table_init(table, _REGION2_ENTRY_EMPTY);
100 p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd);
101 mm->pgd = (pgd_t *) table;
Heiko Carstensf1c11742017-07-05 07:37:27 +0200102 mm->context.asce_limit = _REGION1_SIZE;
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200103 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
104 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
105 } else {
106 crst_table_init(table, _REGION1_ENTRY_EMPTY);
107 pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
108 mm->pgd = (pgd_t *) table;
109 mm->context.asce_limit = -PAGE_SIZE;
110 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
111 _ASCE_USER_BITS | _ASCE_TYPE_REGION1;
112 }
113 notify = 1;
114 spin_unlock_bh(&mm->page_table_lock);
115 }
116 if (notify)
117 on_each_cpu(__crst_table_upgrade, mm, 0);
118 return rc;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100119}
120
Gerald Schaefer723cacb2016-04-15 16:38:40 +0200121void crst_table_downgrade(struct mm_struct *mm)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100122{
123 pgd_t *pgd;
124
Gerald Schaefer723cacb2016-04-15 16:38:40 +0200125 /* downgrade should only happen from 3 to 2 levels (compat only) */
Martin Schwidefsky2fc48762017-08-31 13:18:22 +0200126 VM_BUG_ON(mm->context.asce_limit != _REGION2_SIZE);
Gerald Schaefer723cacb2016-04-15 16:38:40 +0200127
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100128 if (current->active_mm == mm) {
129 clear_user_asce();
130 __tlb_flush_mm(mm);
131 }
Gerald Schaefer723cacb2016-04-15 16:38:40 +0200132
133 pgd = mm->pgd;
134 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
Heiko Carstensf1c11742017-07-05 07:37:27 +0200135 mm->context.asce_limit = _REGION3_SIZE;
Gerald Schaefer723cacb2016-04-15 16:38:40 +0200136 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
137 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
Gerald Schaefer723cacb2016-04-15 16:38:40 +0200138 crst_table_free(mm, (unsigned long *) pgd);
139
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100140 if (current->active_mm == mm)
141 set_user_asce(mm);
142}
143
144static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
145{
146 unsigned int old, new;
147
148 do {
149 old = atomic_read(v);
150 new = old ^ bits;
151 } while (atomic_cmpxchg(v, old, new) != old);
152 return new;
153}
154
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100155#ifdef CONFIG_PGSTE
156
157struct page *page_table_alloc_pgste(struct mm_struct *mm)
158{
159 struct page *page;
Heiko Carstens41879ff2017-10-04 19:27:07 +0200160 u64 *table;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100161
Michal Hockofaee35a2017-03-07 16:48:40 +0100162 page = alloc_page(GFP_KERNEL);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100163 if (page) {
Heiko Carstens41879ff2017-10-04 19:27:07 +0200164 table = (u64 *)page_to_phys(page);
165 memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
166 memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100167 }
168 return page;
169}
170
171void page_table_free_pgste(struct page *page)
172{
173 __free_page(page);
174}
175
176#endif /* CONFIG_PGSTE */
177
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100178/*
179 * page table entry allocation/free routines.
180 */
181unsigned long *page_table_alloc(struct mm_struct *mm)
182{
183 unsigned long *table;
184 struct page *page;
185 unsigned int mask, bit;
186
187 /* Try to get a fragment of a 4K page as a 2K page table */
188 if (!mm_alloc_pgste(mm)) {
189 table = NULL;
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200190 spin_lock_bh(&mm->context.lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100191 if (!list_empty(&mm->context.pgtable_list)) {
192 page = list_first_entry(&mm->context.pgtable_list,
193 struct page, lru);
194 mask = atomic_read(&page->_mapcount);
195 mask = (mask | (mask >> 4)) & 3;
196 if (mask != 3) {
197 table = (unsigned long *) page_to_phys(page);
198 bit = mask & 1; /* =1 -> second 2K */
199 if (bit)
200 table += PTRS_PER_PTE;
201 atomic_xor_bits(&page->_mapcount, 1U << bit);
202 list_del(&page->lru);
203 }
204 }
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200205 spin_unlock_bh(&mm->context.lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100206 if (table)
207 return table;
208 }
209 /* Allocate a fresh page */
Michal Hocko10d58bf2016-06-24 14:49:17 -0700210 page = alloc_page(GFP_KERNEL);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100211 if (!page)
212 return NULL;
213 if (!pgtable_page_ctor(page)) {
214 __free_page(page);
215 return NULL;
216 }
Martin Schwidefskyc9b5ad52016-06-14 12:56:01 +0200217 arch_set_page_dat(page, 0);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100218 /* Initialize page table */
219 table = (unsigned long *) page_to_phys(page);
220 if (mm_alloc_pgste(mm)) {
221 /* Return 4K page table with PGSTEs */
222 atomic_set(&page->_mapcount, 3);
Heiko Carstens41879ff2017-10-04 19:27:07 +0200223 memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
224 memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100225 } else {
226 /* Return the first 2K fragment of the page */
227 atomic_set(&page->_mapcount, 1);
Heiko Carstens41879ff2017-10-04 19:27:07 +0200228 memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200229 spin_lock_bh(&mm->context.lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100230 list_add(&page->lru, &mm->context.pgtable_list);
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200231 spin_unlock_bh(&mm->context.lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100232 }
233 return table;
234}
235
236void page_table_free(struct mm_struct *mm, unsigned long *table)
237{
238 struct page *page;
239 unsigned int bit, mask;
240
241 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
242 if (!mm_alloc_pgste(mm)) {
243 /* Free 2K page table fragment of a 4K page */
244 bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200245 spin_lock_bh(&mm->context.lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100246 mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
247 if (mask & 3)
248 list_add(&page->lru, &mm->context.pgtable_list);
249 else
250 list_del(&page->lru);
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200251 spin_unlock_bh(&mm->context.lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100252 if (mask != 0)
253 return;
254 }
255
256 pgtable_page_dtor(page);
257 atomic_set(&page->_mapcount, -1);
258 __free_page(page);
259}
260
261void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
262 unsigned long vmaddr)
263{
264 struct mm_struct *mm;
265 struct page *page;
266 unsigned int bit, mask;
267
268 mm = tlb->mm;
269 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
270 if (mm_alloc_pgste(mm)) {
271 gmap_unlink(mm, table, vmaddr);
272 table = (unsigned long *) (__pa(table) | 3);
273 tlb_remove_table(tlb, table);
274 return;
275 }
276 bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200277 spin_lock_bh(&mm->context.lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100278 mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
279 if (mask & 3)
280 list_add_tail(&page->lru, &mm->context.pgtable_list);
281 else
282 list_del(&page->lru);
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200283 spin_unlock_bh(&mm->context.lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100284 table = (unsigned long *) (__pa(table) | (1U << bit));
285 tlb_remove_table(tlb, table);
286}
287
288static void __tlb_remove_table(void *_table)
289{
290 unsigned int mask = (unsigned long) _table & 3;
291 void *table = (void *)((unsigned long) _table ^ mask);
292 struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
293
294 switch (mask) {
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200295 case 0: /* pmd, pud, or p4d */
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100296 free_pages((unsigned long) table, 2);
297 break;
298 case 1: /* lower 2K of a 4K page table */
299 case 2: /* higher 2K of a 4K page table */
300 if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
301 break;
302 /* fallthrough */
303 case 3: /* 4K page table with pgstes */
304 pgtable_page_dtor(page);
305 atomic_set(&page->_mapcount, -1);
306 __free_page(page);
307 break;
308 }
309}
310
311static void tlb_remove_table_smp_sync(void *arg)
312{
313 /* Simply deliver the interrupt */
314}
315
316static void tlb_remove_table_one(void *table)
317{
318 /*
319 * This isn't an RCU grace period and hence the page-tables cannot be
320 * assumed to be actually RCU-freed.
321 *
322 * It is however sufficient for software page-table walkers that rely
323 * on IRQ disabling. See the comment near struct mmu_table_batch.
324 */
325 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
326 __tlb_remove_table(table);
327}
328
329static void tlb_remove_table_rcu(struct rcu_head *head)
330{
331 struct mmu_table_batch *batch;
332 int i;
333
334 batch = container_of(head, struct mmu_table_batch, rcu);
335
336 for (i = 0; i < batch->nr; i++)
337 __tlb_remove_table(batch->tables[i]);
338
339 free_page((unsigned long)batch);
340}
341
342void tlb_table_flush(struct mmu_gather *tlb)
343{
344 struct mmu_table_batch **batch = &tlb->batch;
345
346 if (*batch) {
347 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
348 *batch = NULL;
349 }
350}
351
352void tlb_remove_table(struct mmu_gather *tlb, void *table)
353{
354 struct mmu_table_batch **batch = &tlb->batch;
355
356 tlb->mm->context.flush_mm = 1;
357 if (*batch == NULL) {
358 *batch = (struct mmu_table_batch *)
359 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
360 if (*batch == NULL) {
361 __tlb_flush_mm_lazy(tlb->mm);
362 tlb_remove_table_one(table);
363 return;
364 }
365 (*batch)->nr = 0;
366 }
367 (*batch)->tables[(*batch)->nr++] = table;
368 if ((*batch)->nr == MAX_TABLE_BATCH)
369 tlb_flush_mmu(tlb);
370}