blob: 18918e394ce4b67614f9ac0e70a7a112f73fbc6a [file] [log] [blame]
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001/*
2 * Page table allocation functions
3 *
4 * Copyright IBM Corp. 2016
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 */
7
8#include <linux/mm.h>
9#include <linux/sysctl.h>
10#include <asm/mmu_context.h>
11#include <asm/pgalloc.h>
12#include <asm/gmap.h>
13#include <asm/tlb.h>
14#include <asm/tlbflush.h>
15
16#ifdef CONFIG_PGSTE
17
18static int page_table_allocate_pgste_min = 0;
19static int page_table_allocate_pgste_max = 1;
20int page_table_allocate_pgste = 0;
21EXPORT_SYMBOL(page_table_allocate_pgste);
22
23static struct ctl_table page_table_sysctl[] = {
24 {
25 .procname = "allocate_pgste",
26 .data = &page_table_allocate_pgste,
27 .maxlen = sizeof(int),
28 .mode = S_IRUGO | S_IWUSR,
29 .proc_handler = proc_dointvec,
30 .extra1 = &page_table_allocate_pgste_min,
31 .extra2 = &page_table_allocate_pgste_max,
32 },
33 { }
34};
35
36static struct ctl_table page_table_sysctl_dir[] = {
37 {
38 .procname = "vm",
39 .maxlen = 0,
40 .mode = 0555,
41 .child = page_table_sysctl,
42 },
43 { }
44};
45
46static int __init page_table_register_sysctl(void)
47{
48 return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
49}
50__initcall(page_table_register_sysctl);
51
52#endif /* CONFIG_PGSTE */
53
54unsigned long *crst_table_alloc(struct mm_struct *mm)
55{
56 struct page *page = alloc_pages(GFP_KERNEL, 2);
57
58 if (!page)
59 return NULL;
60 return (unsigned long *) page_to_phys(page);
61}
62
63void crst_table_free(struct mm_struct *mm, unsigned long *table)
64{
65 free_pages((unsigned long) table, 2);
66}
67
68static void __crst_table_upgrade(void *arg)
69{
70 struct mm_struct *mm = arg;
71
72 if (current->active_mm == mm) {
73 clear_user_asce();
74 set_user_asce(mm);
75 }
76 __tlb_flush_local();
77}
78
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020079int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010080{
81 unsigned long *table, *pgd;
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020082 int rc, notify;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010083
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020084 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
85 BUG_ON(mm->context.asce_limit < (1UL << 42));
86 if (end >= TASK_SIZE_MAX)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010087 return -ENOMEM;
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020088 rc = 0;
89 notify = 0;
90 while (mm->context.asce_limit < end) {
91 table = crst_table_alloc(mm);
92 if (!table) {
93 rc = -ENOMEM;
94 break;
95 }
96 spin_lock_bh(&mm->page_table_lock);
97 pgd = (unsigned long *) mm->pgd;
98 if (mm->context.asce_limit == (1UL << 42)) {
99 crst_table_init(table, _REGION2_ENTRY_EMPTY);
100 p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd);
101 mm->pgd = (pgd_t *) table;
102 mm->context.asce_limit = 1UL << 53;
103 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
104 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
105 } else {
106 crst_table_init(table, _REGION1_ENTRY_EMPTY);
107 pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
108 mm->pgd = (pgd_t *) table;
109 mm->context.asce_limit = -PAGE_SIZE;
110 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
111 _ASCE_USER_BITS | _ASCE_TYPE_REGION1;
112 }
113 notify = 1;
114 spin_unlock_bh(&mm->page_table_lock);
115 }
116 if (notify)
117 on_each_cpu(__crst_table_upgrade, mm, 0);
118 return rc;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100119}
120
Gerald Schaefer723cacb2016-04-15 16:38:40 +0200121void crst_table_downgrade(struct mm_struct *mm)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100122{
123 pgd_t *pgd;
124
Gerald Schaefer723cacb2016-04-15 16:38:40 +0200125 /* downgrade should only happen from 3 to 2 levels (compat only) */
126 BUG_ON(mm->context.asce_limit != (1UL << 42));
127
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100128 if (current->active_mm == mm) {
129 clear_user_asce();
130 __tlb_flush_mm(mm);
131 }
Gerald Schaefer723cacb2016-04-15 16:38:40 +0200132
133 pgd = mm->pgd;
134 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
135 mm->context.asce_limit = 1UL << 31;
136 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
137 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
Gerald Schaefer723cacb2016-04-15 16:38:40 +0200138 crst_table_free(mm, (unsigned long *) pgd);
139
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100140 if (current->active_mm == mm)
141 set_user_asce(mm);
142}
143
144static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
145{
146 unsigned int old, new;
147
148 do {
149 old = atomic_read(v);
150 new = old ^ bits;
151 } while (atomic_cmpxchg(v, old, new) != old);
152 return new;
153}
154
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100155#ifdef CONFIG_PGSTE
156
157struct page *page_table_alloc_pgste(struct mm_struct *mm)
158{
159 struct page *page;
160 unsigned long *table;
161
Michal Hockofaee35a2017-03-07 16:48:40 +0100162 page = alloc_page(GFP_KERNEL);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100163 if (page) {
164 table = (unsigned long *) page_to_phys(page);
165 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
166 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
167 }
168 return page;
169}
170
171void page_table_free_pgste(struct page *page)
172{
173 __free_page(page);
174}
175
176#endif /* CONFIG_PGSTE */
177
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100178/*
179 * page table entry allocation/free routines.
180 */
181unsigned long *page_table_alloc(struct mm_struct *mm)
182{
183 unsigned long *table;
184 struct page *page;
185 unsigned int mask, bit;
186
187 /* Try to get a fragment of a 4K page as a 2K page table */
188 if (!mm_alloc_pgste(mm)) {
189 table = NULL;
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100190 spin_lock_bh(&mm->context.pgtable_lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100191 if (!list_empty(&mm->context.pgtable_list)) {
192 page = list_first_entry(&mm->context.pgtable_list,
193 struct page, lru);
194 mask = atomic_read(&page->_mapcount);
195 mask = (mask | (mask >> 4)) & 3;
196 if (mask != 3) {
197 table = (unsigned long *) page_to_phys(page);
198 bit = mask & 1; /* =1 -> second 2K */
199 if (bit)
200 table += PTRS_PER_PTE;
201 atomic_xor_bits(&page->_mapcount, 1U << bit);
202 list_del(&page->lru);
203 }
204 }
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100205 spin_unlock_bh(&mm->context.pgtable_lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100206 if (table)
207 return table;
208 }
209 /* Allocate a fresh page */
Michal Hocko10d58bf2016-06-24 14:49:17 -0700210 page = alloc_page(GFP_KERNEL);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100211 if (!page)
212 return NULL;
213 if (!pgtable_page_ctor(page)) {
214 __free_page(page);
215 return NULL;
216 }
217 /* Initialize page table */
218 table = (unsigned long *) page_to_phys(page);
219 if (mm_alloc_pgste(mm)) {
220 /* Return 4K page table with PGSTEs */
221 atomic_set(&page->_mapcount, 3);
222 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
223 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
224 } else {
225 /* Return the first 2K fragment of the page */
226 atomic_set(&page->_mapcount, 1);
227 clear_table(table, _PAGE_INVALID, PAGE_SIZE);
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100228 spin_lock_bh(&mm->context.pgtable_lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100229 list_add(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100230 spin_unlock_bh(&mm->context.pgtable_lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100231 }
232 return table;
233}
234
235void page_table_free(struct mm_struct *mm, unsigned long *table)
236{
237 struct page *page;
238 unsigned int bit, mask;
239
240 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
241 if (!mm_alloc_pgste(mm)) {
242 /* Free 2K page table fragment of a 4K page */
243 bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100244 spin_lock_bh(&mm->context.pgtable_lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100245 mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
246 if (mask & 3)
247 list_add(&page->lru, &mm->context.pgtable_list);
248 else
249 list_del(&page->lru);
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100250 spin_unlock_bh(&mm->context.pgtable_lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100251 if (mask != 0)
252 return;
253 }
254
255 pgtable_page_dtor(page);
256 atomic_set(&page->_mapcount, -1);
257 __free_page(page);
258}
259
260void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
261 unsigned long vmaddr)
262{
263 struct mm_struct *mm;
264 struct page *page;
265 unsigned int bit, mask;
266
267 mm = tlb->mm;
268 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
269 if (mm_alloc_pgste(mm)) {
270 gmap_unlink(mm, table, vmaddr);
271 table = (unsigned long *) (__pa(table) | 3);
272 tlb_remove_table(tlb, table);
273 return;
274 }
275 bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100276 spin_lock_bh(&mm->context.pgtable_lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100277 mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
278 if (mask & 3)
279 list_add_tail(&page->lru, &mm->context.pgtable_list);
280 else
281 list_del(&page->lru);
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100282 spin_unlock_bh(&mm->context.pgtable_lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100283 table = (unsigned long *) (__pa(table) | (1U << bit));
284 tlb_remove_table(tlb, table);
285}
286
287static void __tlb_remove_table(void *_table)
288{
289 unsigned int mask = (unsigned long) _table & 3;
290 void *table = (void *)((unsigned long) _table ^ mask);
291 struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
292
293 switch (mask) {
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200294 case 0: /* pmd, pud, or p4d */
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100295 free_pages((unsigned long) table, 2);
296 break;
297 case 1: /* lower 2K of a 4K page table */
298 case 2: /* higher 2K of a 4K page table */
299 if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
300 break;
301 /* fallthrough */
302 case 3: /* 4K page table with pgstes */
303 pgtable_page_dtor(page);
304 atomic_set(&page->_mapcount, -1);
305 __free_page(page);
306 break;
307 }
308}
309
310static void tlb_remove_table_smp_sync(void *arg)
311{
312 /* Simply deliver the interrupt */
313}
314
315static void tlb_remove_table_one(void *table)
316{
317 /*
318 * This isn't an RCU grace period and hence the page-tables cannot be
319 * assumed to be actually RCU-freed.
320 *
321 * It is however sufficient for software page-table walkers that rely
322 * on IRQ disabling. See the comment near struct mmu_table_batch.
323 */
324 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
325 __tlb_remove_table(table);
326}
327
328static void tlb_remove_table_rcu(struct rcu_head *head)
329{
330 struct mmu_table_batch *batch;
331 int i;
332
333 batch = container_of(head, struct mmu_table_batch, rcu);
334
335 for (i = 0; i < batch->nr; i++)
336 __tlb_remove_table(batch->tables[i]);
337
338 free_page((unsigned long)batch);
339}
340
341void tlb_table_flush(struct mmu_gather *tlb)
342{
343 struct mmu_table_batch **batch = &tlb->batch;
344
345 if (*batch) {
346 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
347 *batch = NULL;
348 }
349}
350
351void tlb_remove_table(struct mmu_gather *tlb, void *table)
352{
353 struct mmu_table_batch **batch = &tlb->batch;
354
355 tlb->mm->context.flush_mm = 1;
356 if (*batch == NULL) {
357 *batch = (struct mmu_table_batch *)
358 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
359 if (*batch == NULL) {
360 __tlb_flush_mm_lazy(tlb->mm);
361 tlb_remove_table_one(table);
362 return;
363 }
364 (*batch)->nr = 0;
365 }
366 (*batch)->tables[(*batch)->nr++] = table;
367 if ((*batch)->nr == MAX_TABLE_BATCH)
368 tlb_flush_mmu(tlb);
369}