blob: 601deb85d2a0580f484b1cfa7fe1374880dc5ece [file] [log] [blame]
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * Copyright IBM Corp. 2007, 2011
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02003 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4 */
5
6#include <linux/sched.h>
7#include <linux/kernel.h>
8#include <linux/errno.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/gfp.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020010#include <linux/mm.h>
11#include <linux/swap.h>
12#include <linux/smp.h>
13#include <linux/highmem.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020014#include <linux/pagemap.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/quicklist.h>
Martin Schwidefsky80217142010-10-25 16:10:11 +020018#include <linux/rcupdate.h>
Martin Schwidefskye5992f22011-07-24 10:48:20 +020019#include <linux/slab.h>
Konstantin Weitzb31288f2013-04-17 17:36:29 +020020#include <linux/swapops.h>
Dominik Dingel3ac8e382014-10-23 12:09:17 +020021#include <linux/ksm.h>
22#include <linux/mman.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020023
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020024#include <asm/pgtable.h>
25#include <asm/pgalloc.h>
26#include <asm/tlb.h>
27#include <asm/tlbflush.h>
Martin Schwidefsky6252d702008-02-09 18:24:37 +010028#include <asm/mmu_context.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020029
30#ifndef CONFIG_64BIT
31#define ALLOC_ORDER 1
Martin Schwidefsky36409f62011-06-06 14:14:41 +020032#define FRAG_MASK 0x0f
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020033#else
34#define ALLOC_ORDER 2
Martin Schwidefsky36409f62011-06-06 14:14:41 +020035#define FRAG_MASK 0x03
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020036#endif
37
Heiko Carstens239a64252009-06-12 10:26:33 +020038
Martin Schwidefsky043d0702011-05-23 10:24:23 +020039unsigned long *crst_table_alloc(struct mm_struct *mm)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020040{
41 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
42
43 if (!page)
44 return NULL;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020045 return (unsigned long *) page_to_phys(page);
46}
47
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010048void crst_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020049{
Martin Schwidefsky043d0702011-05-23 10:24:23 +020050 free_pages((unsigned long) table, ALLOC_ORDER);
Martin Schwidefsky80217142010-10-25 16:10:11 +020051}
52
Martin Schwidefsky6252d702008-02-09 18:24:37 +010053#ifdef CONFIG_64BIT
Martin Schwidefsky10607862013-10-28 14:48:30 +010054static void __crst_table_upgrade(void *arg)
55{
56 struct mm_struct *mm = arg;
57
Martin Schwidefskybeef5602014-04-14 15:11:26 +020058 if (current->active_mm == mm) {
59 clear_user_asce();
60 set_user_asce(mm);
61 }
Martin Schwidefsky10607862013-10-28 14:48:30 +010062 __tlb_flush_local();
63}
64
Martin Schwidefsky6252d702008-02-09 18:24:37 +010065int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
66{
67 unsigned long *table, *pgd;
68 unsigned long entry;
Martin Schwidefsky10607862013-10-28 14:48:30 +010069 int flush;
Martin Schwidefsky6252d702008-02-09 18:24:37 +010070
71 BUG_ON(limit > (1UL << 53));
Martin Schwidefsky10607862013-10-28 14:48:30 +010072 flush = 0;
Martin Schwidefsky6252d702008-02-09 18:24:37 +010073repeat:
Martin Schwidefsky043d0702011-05-23 10:24:23 +020074 table = crst_table_alloc(mm);
Martin Schwidefsky6252d702008-02-09 18:24:37 +010075 if (!table)
76 return -ENOMEM;
Martin Schwidefsky80217142010-10-25 16:10:11 +020077 spin_lock_bh(&mm->page_table_lock);
Martin Schwidefsky6252d702008-02-09 18:24:37 +010078 if (mm->context.asce_limit < limit) {
79 pgd = (unsigned long *) mm->pgd;
80 if (mm->context.asce_limit <= (1UL << 31)) {
81 entry = _REGION3_ENTRY_EMPTY;
82 mm->context.asce_limit = 1UL << 42;
83 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
84 _ASCE_USER_BITS |
85 _ASCE_TYPE_REGION3;
86 } else {
87 entry = _REGION2_ENTRY_EMPTY;
88 mm->context.asce_limit = 1UL << 53;
89 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
90 _ASCE_USER_BITS |
91 _ASCE_TYPE_REGION2;
92 }
93 crst_table_init(table, entry);
94 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
95 mm->pgd = (pgd_t *) table;
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +010096 mm->task_size = mm->context.asce_limit;
Martin Schwidefsky6252d702008-02-09 18:24:37 +010097 table = NULL;
Martin Schwidefsky10607862013-10-28 14:48:30 +010098 flush = 1;
Martin Schwidefsky6252d702008-02-09 18:24:37 +010099 }
Martin Schwidefsky80217142010-10-25 16:10:11 +0200100 spin_unlock_bh(&mm->page_table_lock);
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100101 if (table)
102 crst_table_free(mm, table);
103 if (mm->context.asce_limit < limit)
104 goto repeat;
Martin Schwidefsky10607862013-10-28 14:48:30 +0100105 if (flush)
106 on_each_cpu(__crst_table_upgrade, mm, 0);
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100107 return 0;
108}
109
110void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
111{
112 pgd_t *pgd;
113
Martin Schwidefsky02a8f3a2014-04-03 13:54:59 +0200114 if (current->active_mm == mm) {
Martin Schwidefskybeef5602014-04-14 15:11:26 +0200115 clear_user_asce();
Martin Schwidefsky10607862013-10-28 14:48:30 +0100116 __tlb_flush_mm(mm);
Martin Schwidefsky02a8f3a2014-04-03 13:54:59 +0200117 }
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100118 while (mm->context.asce_limit > limit) {
119 pgd = mm->pgd;
120 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
121 case _REGION_ENTRY_TYPE_R2:
122 mm->context.asce_limit = 1UL << 42;
123 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
124 _ASCE_USER_BITS |
125 _ASCE_TYPE_REGION3;
126 break;
127 case _REGION_ENTRY_TYPE_R3:
128 mm->context.asce_limit = 1UL << 31;
129 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
130 _ASCE_USER_BITS |
131 _ASCE_TYPE_SEGMENT;
132 break;
133 default:
134 BUG();
135 }
136 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +0100137 mm->task_size = mm->context.asce_limit;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100138 crst_table_free(mm, (unsigned long *) pgd);
139 }
Martin Schwidefsky10607862013-10-28 14:48:30 +0100140 if (current->active_mm == mm)
Martin Schwidefskybeef5602014-04-14 15:11:26 +0200141 set_user_asce(mm);
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100142}
143#endif
144
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200145#ifdef CONFIG_PGSTE
146
147/**
148 * gmap_alloc - allocate a guest address space
149 * @mm: pointer to the parent mm_struct
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200150 * @limit: maximum size of the gmap address space
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200151 *
152 * Returns a guest address space structure.
153 */
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200154struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit)
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200155{
156 struct gmap *gmap;
157 struct page *page;
158 unsigned long *table;
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200159 unsigned long etype, atype;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200160
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200161 if (limit < (1UL << 31)) {
162 limit = (1UL << 31) - 1;
163 atype = _ASCE_TYPE_SEGMENT;
164 etype = _SEGMENT_ENTRY_EMPTY;
165 } else if (limit < (1UL << 42)) {
166 limit = (1UL << 42) - 1;
167 atype = _ASCE_TYPE_REGION3;
168 etype = _REGION3_ENTRY_EMPTY;
169 } else if (limit < (1UL << 53)) {
170 limit = (1UL << 53) - 1;
171 atype = _ASCE_TYPE_REGION2;
172 etype = _REGION2_ENTRY_EMPTY;
173 } else {
174 limit = -1UL;
175 atype = _ASCE_TYPE_REGION1;
176 etype = _REGION1_ENTRY_EMPTY;
177 }
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200178 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
179 if (!gmap)
180 goto out;
181 INIT_LIST_HEAD(&gmap->crst_list);
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200182 INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
183 INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
184 spin_lock_init(&gmap->guest_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200185 gmap->mm = mm;
186 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
187 if (!page)
188 goto out_free;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200189 page->index = 0;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200190 list_add(&page->lru, &gmap->crst_list);
191 table = (unsigned long *) page_to_phys(page);
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200192 crst_table_init(table, etype);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200193 gmap->table = table;
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200194 gmap->asce = atype | _ASCE_TABLE_LENGTH |
195 _ASCE_USER_BITS | __pa(table);
196 gmap->asce_end = limit;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200197 down_write(&mm->mmap_sem);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200198 list_add(&gmap->list, &mm->context.gmap_list);
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200199 up_write(&mm->mmap_sem);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200200 return gmap;
201
202out_free:
203 kfree(gmap);
204out:
205 return NULL;
206}
207EXPORT_SYMBOL_GPL(gmap_alloc);
208
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200209static void gmap_flush_tlb(struct gmap *gmap)
210{
211 if (MACHINE_HAS_IDTE)
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200212 __tlb_flush_asce(gmap->mm, gmap->asce);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200213 else
214 __tlb_flush_global();
215}
216
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200217static void gmap_radix_tree_free(struct radix_tree_root *root)
218{
219 struct radix_tree_iter iter;
220 unsigned long indices[16];
221 unsigned long index;
222 void **slot;
223 int i, nr;
224
225 /* A radix tree is freed by deleting all of its entries */
226 index = 0;
227 do {
228 nr = 0;
229 radix_tree_for_each_slot(slot, root, &iter, index) {
230 indices[nr] = iter.index;
231 if (++nr == 16)
232 break;
233 }
234 for (i = 0; i < nr; i++) {
235 index = indices[i];
236 radix_tree_delete(root, index);
237 }
238 } while (nr > 0);
239}
240
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200241/**
242 * gmap_free - free a guest address space
243 * @gmap: pointer to the guest address space structure
244 */
245void gmap_free(struct gmap *gmap)
246{
247 struct page *page, *next;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200248
249 /* Flush tlb. */
250 if (MACHINE_HAS_IDTE)
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200251 __tlb_flush_asce(gmap->mm, gmap->asce);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200252 else
253 __tlb_flush_global();
254
255 /* Free all segment & region tables. */
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200256 list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200257 __free_pages(page, ALLOC_ORDER);
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200258 gmap_radix_tree_free(&gmap->guest_to_host);
259 gmap_radix_tree_free(&gmap->host_to_guest);
260 down_write(&gmap->mm->mmap_sem);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200261 list_del(&gmap->list);
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200262 up_write(&gmap->mm->mmap_sem);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200263 kfree(gmap);
264}
265EXPORT_SYMBOL_GPL(gmap_free);
266
267/**
268 * gmap_enable - switch primary space to the guest address space
269 * @gmap: pointer to the guest address space structure
270 */
271void gmap_enable(struct gmap *gmap)
272{
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200273 S390_lowcore.gmap = (unsigned long) gmap;
274}
275EXPORT_SYMBOL_GPL(gmap_enable);
276
277/**
278 * gmap_disable - switch back to the standard primary address space
279 * @gmap: pointer to the guest address space structure
280 */
281void gmap_disable(struct gmap *gmap)
282{
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200283 S390_lowcore.gmap = 0UL;
284}
285EXPORT_SYMBOL_GPL(gmap_disable);
286
Carsten Ottea9162f22011-10-30 15:17:00 +0100287/*
288 * gmap_alloc_table is assumed to be called with mmap_sem held
289 */
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200290static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
291 unsigned long init, unsigned long gaddr)
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200292{
293 struct page *page;
294 unsigned long *new;
295
Christian Borntraegerc86cce22011-12-27 11:25:47 +0100296 /* since we dont free the gmap table until gmap_free we can unlock */
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200297 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
298 if (!page)
299 return -ENOMEM;
300 new = (unsigned long *) page_to_phys(page);
301 crst_table_init(new, init);
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200302 spin_lock(&gmap->mm->page_table_lock);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200303 if (*table & _REGION_ENTRY_INVALID) {
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200304 list_add(&page->lru, &gmap->crst_list);
305 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
306 (*table & _REGION_ENTRY_TYPE_MASK);
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200307 page->index = gaddr;
308 page = NULL;
309 }
310 spin_unlock(&gmap->mm->page_table_lock);
311 if (page)
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200312 __free_pages(page, ALLOC_ORDER);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200313 return 0;
314}
315
316/**
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200317 * __gmap_segment_gaddr - find virtual address from segment pointer
318 * @entry: pointer to a segment table entry in the guest address space
319 *
320 * Returns the virtual address in the guest address space for the segment
321 */
322static unsigned long __gmap_segment_gaddr(unsigned long *entry)
323{
324 struct page *page;
Martin Schwidefskyfbc89c92015-01-07 11:00:02 +0100325 unsigned long offset, mask;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200326
327 offset = (unsigned long) entry / sizeof(unsigned long);
328 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
Martin Schwidefskyfbc89c92015-01-07 11:00:02 +0100329 mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
330 page = virt_to_page((void *)((unsigned long) entry & mask));
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200331 return page->index + offset;
332}
333
334/**
335 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
336 * @gmap: pointer to the guest address space structure
337 * @vmaddr: address in the host process address space
338 *
339 * Returns 1 if a TLB flush is required
340 */
341static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
342{
343 unsigned long *entry;
344 int flush = 0;
345
346 spin_lock(&gmap->guest_table_lock);
347 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
348 if (entry) {
349 flush = (*entry != _SEGMENT_ENTRY_INVALID);
350 *entry = _SEGMENT_ENTRY_INVALID;
351 }
352 spin_unlock(&gmap->guest_table_lock);
353 return flush;
354}
355
356/**
357 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
358 * @gmap: pointer to the guest address space structure
359 * @gaddr: address in the guest address space
360 *
361 * Returns 1 if a TLB flush is required
362 */
363static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
364{
365 unsigned long vmaddr;
366
367 vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
368 gaddr >> PMD_SHIFT);
369 return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
370}
371
372/**
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200373 * gmap_unmap_segment - unmap segment from the guest address space
374 * @gmap: pointer to the guest address space structure
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200375 * @to: address in the guest address space
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200376 * @len: length of the memory area to unmap
377 *
Hendrik Bruecknerb4a96012013-12-13 12:53:42 +0100378 * Returns 0 if the unmap succeeded, -EINVAL if not.
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200379 */
380int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
381{
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200382 unsigned long off;
383 int flush;
384
385 if ((to | len) & (PMD_SIZE - 1))
386 return -EINVAL;
387 if (len == 0 || to + len < to)
388 return -EINVAL;
389
390 flush = 0;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200391 down_write(&gmap->mm->mmap_sem);
392 for (off = 0; off < len; off += PMD_SIZE)
393 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
394 up_write(&gmap->mm->mmap_sem);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200395 if (flush)
396 gmap_flush_tlb(gmap);
397 return 0;
398}
399EXPORT_SYMBOL_GPL(gmap_unmap_segment);
400
401/**
402 * gmap_mmap_segment - map a segment to the guest address space
403 * @gmap: pointer to the guest address space structure
404 * @from: source address in the parent address space
405 * @to: target address in the guest address space
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200406 * @len: length of the memory area to map
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200407 *
Hendrik Bruecknerb4a96012013-12-13 12:53:42 +0100408 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200409 */
410int gmap_map_segment(struct gmap *gmap, unsigned long from,
411 unsigned long to, unsigned long len)
412{
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200413 unsigned long off;
414 int flush;
415
416 if ((from | to | len) & (PMD_SIZE - 1))
417 return -EINVAL;
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200418 if (len == 0 || from + len < from || to + len < to ||
419 from + len > TASK_MAX_SIZE || to + len > gmap->asce_end)
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200420 return -EINVAL;
421
422 flush = 0;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200423 down_write(&gmap->mm->mmap_sem);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200424 for (off = 0; off < len; off += PMD_SIZE) {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200425 /* Remove old translation */
426 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
427 /* Store new translation */
428 if (radix_tree_insert(&gmap->guest_to_host,
429 (to + off) >> PMD_SHIFT,
430 (void *) from + off))
431 break;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200432 }
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200433 up_write(&gmap->mm->mmap_sem);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200434 if (flush)
435 gmap_flush_tlb(gmap);
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200436 if (off >= len)
437 return 0;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200438 gmap_unmap_segment(gmap, to, len);
439 return -ENOMEM;
440}
441EXPORT_SYMBOL_GPL(gmap_map_segment);
442
Heiko Carstensc5034942012-09-10 16:14:33 +0200443/**
444 * __gmap_translate - translate a guest address to a user space address
Heiko Carstensc5034942012-09-10 16:14:33 +0200445 * @gmap: pointer to guest mapping meta data structure
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200446 * @gaddr: guest address
Heiko Carstensc5034942012-09-10 16:14:33 +0200447 *
448 * Returns user space address which corresponds to the guest address or
449 * -EFAULT if no such mapping exists.
450 * This function does not establish potentially missing page table entries.
451 * The mmap_sem of the mm that belongs to the address space must be held
452 * when this function gets called.
453 */
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200454unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
Heiko Carstensc5034942012-09-10 16:14:33 +0200455{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200456 unsigned long vmaddr;
Heiko Carstensc5034942012-09-10 16:14:33 +0200457
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200458 vmaddr = (unsigned long)
459 radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
460 return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
Heiko Carstensc5034942012-09-10 16:14:33 +0200461}
462EXPORT_SYMBOL_GPL(__gmap_translate);
463
464/**
465 * gmap_translate - translate a guest address to a user space address
Heiko Carstensc5034942012-09-10 16:14:33 +0200466 * @gmap: pointer to guest mapping meta data structure
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200467 * @gaddr: guest address
Heiko Carstensc5034942012-09-10 16:14:33 +0200468 *
469 * Returns user space address which corresponds to the guest address or
470 * -EFAULT if no such mapping exists.
471 * This function does not establish potentially missing page table entries.
472 */
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200473unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
Heiko Carstensc5034942012-09-10 16:14:33 +0200474{
475 unsigned long rc;
476
477 down_read(&gmap->mm->mmap_sem);
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200478 rc = __gmap_translate(gmap, gaddr);
Heiko Carstensc5034942012-09-10 16:14:33 +0200479 up_read(&gmap->mm->mmap_sem);
480 return rc;
481}
482EXPORT_SYMBOL_GPL(gmap_translate);
483
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200484/**
485 * gmap_unlink - disconnect a page table from the gmap shadow tables
486 * @gmap: pointer to guest mapping meta data structure
487 * @table: pointer to the host page table
488 * @vmaddr: vm address associated with the host page table
489 */
490static void gmap_unlink(struct mm_struct *mm, unsigned long *table,
491 unsigned long vmaddr)
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200492{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200493 struct gmap *gmap;
494 int flush;
495
496 list_for_each_entry(gmap, &mm->context.gmap_list, list) {
497 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
498 if (flush)
499 gmap_flush_tlb(gmap);
500 }
501}
502
503/**
504 * gmap_link - set up shadow page tables to connect a host to a guest address
505 * @gmap: pointer to guest mapping meta data structure
506 * @gaddr: guest address
507 * @vmaddr: vm address
508 *
509 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
510 * if the vm address is already mapped to a different guest segment.
511 * The mmap_sem of the mm that belongs to the address space must be held
512 * when this function gets called.
513 */
514int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
515{
Heiko Carstensc5034942012-09-10 16:14:33 +0200516 struct mm_struct *mm;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200517 unsigned long *table;
518 spinlock_t *ptl;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200519 pgd_t *pgd;
520 pud_t *pud;
521 pmd_t *pmd;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200522 int rc;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200523
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200524 /* Create higher level tables in the gmap page table */
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200525 table = gmap->table;
526 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
527 table += (gaddr >> 53) & 0x7ff;
528 if ((*table & _REGION_ENTRY_INVALID) &&
529 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
530 gaddr & 0xffe0000000000000))
531 return -ENOMEM;
532 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
533 }
534 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
535 table += (gaddr >> 42) & 0x7ff;
536 if ((*table & _REGION_ENTRY_INVALID) &&
537 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
538 gaddr & 0xfffffc0000000000))
539 return -ENOMEM;
540 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
541 }
542 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
543 table += (gaddr >> 31) & 0x7ff;
544 if ((*table & _REGION_ENTRY_INVALID) &&
545 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
546 gaddr & 0xffffffff80000000))
547 return -ENOMEM;
548 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
549 }
550 table += (gaddr >> 20) & 0x7ff;
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200551 /* Walk the parent mm page table */
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200552 mm = gmap->mm;
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200553 pgd = pgd_offset(mm, vmaddr);
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200554 VM_BUG_ON(pgd_none(*pgd));
555 pud = pud_offset(pgd, vmaddr);
556 VM_BUG_ON(pud_none(*pud));
557 pmd = pmd_offset(pud, vmaddr);
558 VM_BUG_ON(pmd_none(*pmd));
Alex Thorlton1e1836e2014-04-07 15:37:09 -0700559 /* large pmds cannot yet be handled */
560 if (pmd_large(*pmd))
561 return -EFAULT;
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200562 /* Link gmap segment table entry location to page table. */
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200563 rc = radix_tree_preload(GFP_KERNEL);
564 if (rc)
565 return rc;
566 ptl = pmd_lock(mm, pmd);
567 spin_lock(&gmap->guest_table_lock);
568 if (*table == _SEGMENT_ENTRY_INVALID) {
569 rc = radix_tree_insert(&gmap->host_to_guest,
570 vmaddr >> PMD_SHIFT, table);
571 if (!rc)
572 *table = pmd_val(*pmd);
573 } else
574 rc = 0;
575 spin_unlock(&gmap->guest_table_lock);
576 spin_unlock(ptl);
577 radix_tree_preload_end();
578 return rc;
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200579}
580
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200581/**
582 * gmap_fault - resolve a fault on a guest address
583 * @gmap: pointer to guest mapping meta data structure
584 * @gaddr: guest address
585 * @fault_flags: flags to pass down to handle_mm_fault()
586 *
587 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
588 * if the vm address is already mapped to a different guest segment.
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200589 */
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200590int gmap_fault(struct gmap *gmap, unsigned long gaddr,
591 unsigned int fault_flags)
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200592{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200593 unsigned long vmaddr;
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200594 int rc;
595
Carsten Otte499069e2011-10-30 15:17:02 +0100596 down_read(&gmap->mm->mmap_sem);
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200597 vmaddr = __gmap_translate(gmap, gaddr);
598 if (IS_ERR_VALUE(vmaddr)) {
599 rc = vmaddr;
600 goto out_up;
601 }
602 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags)) {
603 rc = -EFAULT;
604 goto out_up;
605 }
606 rc = __gmap_link(gmap, gaddr, vmaddr);
607out_up:
Carsten Otte499069e2011-10-30 15:17:02 +0100608 up_read(&gmap->mm->mmap_sem);
Carsten Otte499069e2011-10-30 15:17:02 +0100609 return rc;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200610}
611EXPORT_SYMBOL_GPL(gmap_fault);
612
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200613static void gmap_zap_swap_entry(swp_entry_t entry, struct mm_struct *mm)
614{
615 if (!non_swap_entry(entry))
616 dec_mm_counter(mm, MM_SWAPENTS);
617 else if (is_migration_entry(entry)) {
618 struct page *page = migration_entry_to_page(entry);
619
620 if (PageAnon(page))
621 dec_mm_counter(mm, MM_ANONPAGES);
622 else
623 dec_mm_counter(mm, MM_FILEPAGES);
624 }
625 free_swap_and_cache(entry);
626}
627
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200628/*
629 * this function is assumed to be called with mmap_sem held
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200630 */
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200631void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200632{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200633 unsigned long vmaddr, ptev, pgstev;
634 pte_t *ptep, pte;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200635 spinlock_t *ptl;
636 pgste_t pgste;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200637
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200638 /* Find the vm address for the guest address */
639 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
640 gaddr >> PMD_SHIFT);
641 if (!vmaddr)
642 return;
643 vmaddr |= gaddr & ~PMD_MASK;
644 /* Get pointer to the page table entry */
645 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200646 if (unlikely(!ptep))
647 return;
648 pte = *ptep;
649 if (!pte_swap(pte))
650 goto out_pte;
651 /* Zap unused and logically-zero pages */
652 pgste = pgste_get_lock(ptep);
653 pgstev = pgste_val(pgste);
654 ptev = pte_val(pte);
655 if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) ||
656 ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID))) {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200657 gmap_zap_swap_entry(pte_to_swp_entry(pte), gmap->mm);
658 pte_clear(gmap->mm, vmaddr, ptep);
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200659 }
660 pgste_set_unlock(ptep, pgste);
661out_pte:
Dominik Dingel66e9bbd2014-10-06 16:34:44 +0200662 pte_unmap_unlock(ptep, ptl);
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200663}
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200664EXPORT_SYMBOL_GPL(__gmap_zap);
665
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200666void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
Christian Borntraeger388186b2011-10-30 15:17:03 +0100667{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200668 unsigned long gaddr, vmaddr, size;
Christian Borntraeger388186b2011-10-30 15:17:03 +0100669 struct vm_area_struct *vma;
Christian Borntraeger388186b2011-10-30 15:17:03 +0100670
671 down_read(&gmap->mm->mmap_sem);
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200672 for (gaddr = from; gaddr < to;
673 gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
674 /* Find the vm address for the guest address */
675 vmaddr = (unsigned long)
676 radix_tree_lookup(&gmap->guest_to_host,
677 gaddr >> PMD_SHIFT);
678 if (!vmaddr)
Christian Borntraeger388186b2011-10-30 15:17:03 +0100679 continue;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200680 vmaddr |= gaddr & ~PMD_MASK;
681 /* Find vma in the parent mm */
682 vma = find_vma(gmap->mm, vmaddr);
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200683 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200684 zap_page_range(vma, vmaddr, size, NULL);
Christian Borntraeger388186b2011-10-30 15:17:03 +0100685 }
686 up_read(&gmap->mm->mmap_sem);
687}
688EXPORT_SYMBOL_GPL(gmap_discard);
689
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200690static LIST_HEAD(gmap_notifier_list);
691static DEFINE_SPINLOCK(gmap_notifier_lock);
692
693/**
694 * gmap_register_ipte_notifier - register a pte invalidation callback
695 * @nb: pointer to the gmap notifier block
696 */
697void gmap_register_ipte_notifier(struct gmap_notifier *nb)
698{
699 spin_lock(&gmap_notifier_lock);
700 list_add(&nb->list, &gmap_notifier_list);
701 spin_unlock(&gmap_notifier_lock);
702}
703EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier);
704
705/**
706 * gmap_unregister_ipte_notifier - remove a pte invalidation callback
707 * @nb: pointer to the gmap notifier block
708 */
709void gmap_unregister_ipte_notifier(struct gmap_notifier *nb)
710{
711 spin_lock(&gmap_notifier_lock);
712 list_del_init(&nb->list);
713 spin_unlock(&gmap_notifier_lock);
714}
715EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
716
717/**
718 * gmap_ipte_notify - mark a range of ptes for invalidation notification
719 * @gmap: pointer to guest mapping meta data structure
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200720 * @gaddr: virtual address in the guest address space
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200721 * @len: size of area
722 *
723 * Returns 0 if for each page in the given range a gmap mapping exists and
724 * the invalidation notification could be set. If the gmap mapping is missing
725 * for one or more pages -EFAULT is returned. If no memory could be allocated
726 * -ENOMEM is returned. This function establishes missing page table entries.
727 */
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200728int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200729{
730 unsigned long addr;
731 spinlock_t *ptl;
732 pte_t *ptep, entry;
733 pgste_t pgste;
734 int rc = 0;
735
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200736 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK))
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200737 return -EINVAL;
738 down_read(&gmap->mm->mmap_sem);
739 while (len) {
740 /* Convert gmap address and connect the page tables */
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200741 addr = __gmap_translate(gmap, gaddr);
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200742 if (IS_ERR_VALUE(addr)) {
743 rc = addr;
744 break;
745 }
746 /* Get the page mapped */
Christian Borntraegerbb4b42c2013-05-08 15:25:38 +0200747 if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) {
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200748 rc = -EFAULT;
749 break;
750 }
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200751 rc = __gmap_link(gmap, gaddr, addr);
752 if (rc)
753 break;
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200754 /* Walk the process page table, lock and get pte pointer */
755 ptep = get_locked_pte(gmap->mm, addr, &ptl);
Dominik Dingel6972cae2014-10-15 15:29:01 +0200756 VM_BUG_ON(!ptep);
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200757 /* Set notification bit in the pgste of the pte */
758 entry = *ptep;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200759 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200760 pgste = pgste_get_lock(ptep);
Martin Schwidefsky0d0dafc2013-05-17 14:41:33 +0200761 pgste_val(pgste) |= PGSTE_IN_BIT;
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200762 pgste_set_unlock(ptep, pgste);
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200763 gaddr += PAGE_SIZE;
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200764 len -= PAGE_SIZE;
765 }
Martin Schwidefskya697e052014-10-30 10:55:37 +0100766 pte_unmap_unlock(ptep, ptl);
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200767 }
768 up_read(&gmap->mm->mmap_sem);
769 return rc;
770}
771EXPORT_SYMBOL_GPL(gmap_ipte_notify);
772
773/**
774 * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte.
775 * @mm: pointer to the process mm_struct
Martin Schwidefsky9da4e382014-04-30 14:46:26 +0200776 * @addr: virtual address in the process address space
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200777 * @pte: pointer to the page table entry
778 *
779 * This function is assumed to be called with the page table lock held
780 * for the pte to notify.
781 */
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200782void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long vmaddr, pte_t *pte)
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200783{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200784 unsigned long offset, gaddr;
785 unsigned long *table;
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200786 struct gmap_notifier *nb;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200787 struct gmap *gmap;
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200788
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200789 offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
790 offset = offset * (4096 / sizeof(pte_t));
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200791 spin_lock(&gmap_notifier_lock);
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200792 list_for_each_entry(gmap, &mm->context.gmap_list, list) {
793 table = radix_tree_lookup(&gmap->host_to_guest,
794 vmaddr >> PMD_SHIFT);
795 if (!table)
796 continue;
797 gaddr = __gmap_segment_gaddr(table) + offset;
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200798 list_for_each_entry(nb, &gmap_notifier_list, list)
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200799 nb->notifier_call(gmap, gaddr);
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200800 }
801 spin_unlock(&gmap_notifier_lock);
802}
Martin Schwidefsky0a61b222013-10-18 12:03:41 +0200803EXPORT_SYMBOL_GPL(gmap_do_ipte_notify);
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200804
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +0200805static inline int page_table_with_pgste(struct page *page)
806{
807 return atomic_read(&page->_mapcount) == 0;
808}
809
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200810static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200811{
812 struct page *page;
813 unsigned long *table;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200814
815 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
816 if (!page)
817 return NULL;
Kirill A. Shutemove89cfa52013-11-14 14:31:39 -0800818 if (!pgtable_page_ctor(page)) {
Kirill A. Shutemove89cfa52013-11-14 14:31:39 -0800819 __free_page(page);
820 return NULL;
821 }
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +0200822 atomic_set(&page->_mapcount, 0);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200823 table = (unsigned long *) page_to_phys(page);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200824 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
Martin Schwidefsky0a61b222013-10-18 12:03:41 +0200825 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200826 return table;
827}
828
829static inline void page_table_free_pgste(unsigned long *table)
830{
831 struct page *page;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200832
833 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
Martin Schwidefsky2320c572012-02-17 10:29:21 +0100834 pgtable_page_dtor(page);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200835 atomic_set(&page->_mapcount, -1);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200836 __free_page(page);
837}
838
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200839int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
840 unsigned long key, bool nq)
841{
842 spinlock_t *ptl;
843 pgste_t old, new;
844 pte_t *ptep;
845
846 down_read(&mm->mmap_sem);
Christian Borntraegerab3f2852014-08-19 16:19:35 +0200847retry:
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200848 ptep = get_locked_pte(current->mm, addr, &ptl);
849 if (unlikely(!ptep)) {
850 up_read(&mm->mmap_sem);
851 return -EFAULT;
852 }
Christian Borntraegerab3f2852014-08-19 16:19:35 +0200853 if (!(pte_val(*ptep) & _PAGE_INVALID) &&
854 (pte_val(*ptep) & _PAGE_PROTECT)) {
Dominik Dingel66e9bbd2014-10-06 16:34:44 +0200855 pte_unmap_unlock(ptep, ptl);
Christian Borntraegerdc77d342014-08-27 12:20:02 +0200856 if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE)) {
857 up_read(&mm->mmap_sem);
858 return -EFAULT;
Christian Borntraegerab3f2852014-08-19 16:19:35 +0200859 }
Christian Borntraegerdc77d342014-08-27 12:20:02 +0200860 goto retry;
861 }
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200862
863 new = old = pgste_get_lock(ptep);
864 pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
865 PGSTE_ACC_BITS | PGSTE_FP_BIT);
866 pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
867 pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
868 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200869 unsigned long address, bits, skey;
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200870
871 address = pte_val(*ptep) & PAGE_MASK;
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200872 skey = (unsigned long) page_get_storage_key(address);
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200873 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200874 skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200875 /* Set storage key ACC and FP */
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200876 page_set_storage_key(address, skey, !nq);
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200877 /* Merge host changed & referenced into pgste */
878 pgste_val(new) |= bits << 52;
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200879 }
880 /* changing the guest storage key is considered a change of the page */
881 if ((pgste_val(new) ^ pgste_val(old)) &
882 (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
Martin Schwidefsky0a61b222013-10-18 12:03:41 +0200883 pgste_val(new) |= PGSTE_UC_BIT;
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200884
885 pgste_set_unlock(ptep, new);
Dominik Dingel66e9bbd2014-10-06 16:34:44 +0200886 pte_unmap_unlock(ptep, ptl);
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200887 up_read(&mm->mmap_sem);
888 return 0;
889}
890EXPORT_SYMBOL(set_guest_storage_key);
891
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200892#else /* CONFIG_PGSTE */
893
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +0200894static inline int page_table_with_pgste(struct page *page)
895{
896 return 0;
897}
898
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200899static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200900{
Jan Glauber944291d2011-08-03 16:44:18 +0200901 return NULL;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200902}
903
904static inline void page_table_free_pgste(unsigned long *table)
905{
906}
907
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200908static inline void gmap_unlink(struct mm_struct *mm, unsigned long *table,
909 unsigned long vmaddr)
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200910{
911}
912
913#endif /* CONFIG_PGSTE */
914
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200915static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
916{
917 unsigned int old, new;
918
919 do {
920 old = atomic_read(v);
921 new = old ^ bits;
922 } while (atomic_cmpxchg(v, old, new) != old);
923 return new;
924}
925
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200926/*
927 * page table entry allocation/free routines.
928 */
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200929unsigned long *page_table_alloc(struct mm_struct *mm)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200930{
Heiko Carstens41459d32012-09-14 11:09:52 +0200931 unsigned long *uninitialized_var(table);
932 struct page *uninitialized_var(page);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200933 unsigned int mask, bit;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200934
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200935 if (mm_has_pgste(mm))
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200936 return page_table_alloc_pgste(mm);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200937 /* Allocate fragments of a 4K page as 1K/2K page table */
Martin Schwidefsky80217142010-10-25 16:10:11 +0200938 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200939 mask = FRAG_MASK;
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100940 if (!list_empty(&mm->context.pgtable_list)) {
941 page = list_first_entry(&mm->context.pgtable_list,
942 struct page, lru);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200943 table = (unsigned long *) page_to_phys(page);
944 mask = atomic_read(&page->_mapcount);
945 mask = mask | (mask >> 4);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200946 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200947 if ((mask & FRAG_MASK) == FRAG_MASK) {
Martin Schwidefsky80217142010-10-25 16:10:11 +0200948 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100949 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
950 if (!page)
951 return NULL;
Kirill A. Shutemove89cfa52013-11-14 14:31:39 -0800952 if (!pgtable_page_ctor(page)) {
953 __free_page(page);
954 return NULL;
955 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200956 atomic_set(&page->_mapcount, 1);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100957 table = (unsigned long *) page_to_phys(page);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200958 clear_table(table, _PAGE_INVALID, PAGE_SIZE);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200959 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100960 list_add(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200961 } else {
962 for (bit = 1; mask & bit; bit <<= 1)
963 table += PTRS_PER_PTE;
964 mask = atomic_xor_bits(&page->_mapcount, bit);
965 if ((mask & FRAG_MASK) == FRAG_MASK)
966 list_del(&page->lru);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100967 }
Martin Schwidefsky80217142010-10-25 16:10:11 +0200968 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200969 return table;
970}
971
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100972void page_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200973{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100974 struct page *page;
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200975 unsigned int bit, mask;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200976
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +0200977 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200978 if (page_table_with_pgste(page))
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200979 return page_table_free_pgste(table);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200980 /* Free 1K/2K page table fragment of a 4K page */
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200981 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
Martin Schwidefsky80217142010-10-25 16:10:11 +0200982 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200983 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100984 list_del(&page->lru);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200985 mask = atomic_xor_bits(&page->_mapcount, bit);
986 if (mask & FRAG_MASK)
987 list_add(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200988 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200989 if (mask == 0) {
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100990 pgtable_page_dtor(page);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200991 atomic_set(&page->_mapcount, -1);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100992 __free_page(page);
993 }
994}
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200995
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200996static void __page_table_free_rcu(void *table, unsigned bit)
997{
998 struct page *page;
999
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001000 if (bit == FRAG_MASK)
1001 return page_table_free_pgste(table);
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001002 /* Free 1K/2K page table fragment of a 4K page */
1003 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1004 if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
1005 pgtable_page_dtor(page);
1006 atomic_set(&page->_mapcount, -1);
1007 __free_page(page);
Martin Schwidefsky80217142010-10-25 16:10:11 +02001008 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001009}
1010
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001011void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
1012 unsigned long vmaddr)
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001013{
1014 struct mm_struct *mm;
1015 struct page *page;
1016 unsigned int bit, mask;
1017
1018 mm = tlb->mm;
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001019 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1020 if (page_table_with_pgste(page)) {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001021 gmap_unlink(mm, table, vmaddr);
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001022 table = (unsigned long *) (__pa(table) | FRAG_MASK);
1023 tlb_remove_table(tlb, table);
1024 return;
Martin Schwidefsky80217142010-10-25 16:10:11 +02001025 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001026 bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
Martin Schwidefsky80217142010-10-25 16:10:11 +02001027 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001028 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
1029 list_del(&page->lru);
1030 mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
1031 if (mask & FRAG_MASK)
1032 list_add_tail(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky80217142010-10-25 16:10:11 +02001033 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001034 table = (unsigned long *) (__pa(table) | (bit << 4));
1035 tlb_remove_table(tlb, table);
Martin Schwidefsky80217142010-10-25 16:10:11 +02001036}
1037
Heiko Carstens63df41d62013-09-06 19:10:48 +02001038static void __tlb_remove_table(void *_table)
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001039{
Martin Schwidefskye73b7ff2011-10-30 15:16:08 +01001040 const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
1041 void *table = (void *)((unsigned long) _table & ~mask);
1042 unsigned type = (unsigned long) _table & mask;
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001043
1044 if (type)
1045 __page_table_free_rcu(table, type);
1046 else
1047 free_pages((unsigned long) table, ALLOC_ORDER);
1048}
1049
Martin Schwidefskycd94154cc2012-04-11 14:28:07 +02001050static void tlb_remove_table_smp_sync(void *arg)
1051{
1052 /* Simply deliver the interrupt */
1053}
1054
1055static void tlb_remove_table_one(void *table)
1056{
1057 /*
1058 * This isn't an RCU grace period and hence the page-tables cannot be
1059 * assumed to be actually RCU-freed.
1060 *
1061 * It is however sufficient for software page-table walkers that rely
1062 * on IRQ disabling. See the comment near struct mmu_table_batch.
1063 */
1064 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
1065 __tlb_remove_table(table);
1066}
1067
1068static void tlb_remove_table_rcu(struct rcu_head *head)
1069{
1070 struct mmu_table_batch *batch;
1071 int i;
1072
1073 batch = container_of(head, struct mmu_table_batch, rcu);
1074
1075 for (i = 0; i < batch->nr; i++)
1076 __tlb_remove_table(batch->tables[i]);
1077
1078 free_page((unsigned long)batch);
1079}
1080
1081void tlb_table_flush(struct mmu_gather *tlb)
1082{
1083 struct mmu_table_batch **batch = &tlb->batch;
1084
1085 if (*batch) {
Martin Schwidefskycd94154cc2012-04-11 14:28:07 +02001086 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
1087 *batch = NULL;
1088 }
1089}
1090
1091void tlb_remove_table(struct mmu_gather *tlb, void *table)
1092{
1093 struct mmu_table_batch **batch = &tlb->batch;
1094
Martin Schwidefsky5c474a12013-08-16 13:31:40 +02001095 tlb->mm->context.flush_mm = 1;
Martin Schwidefskycd94154cc2012-04-11 14:28:07 +02001096 if (*batch == NULL) {
1097 *batch = (struct mmu_table_batch *)
1098 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
1099 if (*batch == NULL) {
Martin Schwidefsky5c474a12013-08-16 13:31:40 +02001100 __tlb_flush_mm_lazy(tlb->mm);
Martin Schwidefskycd94154cc2012-04-11 14:28:07 +02001101 tlb_remove_table_one(table);
1102 return;
1103 }
1104 (*batch)->nr = 0;
1105 }
1106 (*batch)->tables[(*batch)->nr++] = table;
1107 if ((*batch)->nr == MAX_TABLE_BATCH)
Martin Schwidefsky5c474a12013-08-16 13:31:40 +02001108 tlb_flush_mmu(tlb);
Martin Schwidefskycd94154cc2012-04-11 14:28:07 +02001109}
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001110
Gerald Schaefer274023d2012-10-08 16:30:21 -07001111#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001112static inline void thp_split_vma(struct vm_area_struct *vma)
Gerald Schaefer274023d2012-10-08 16:30:21 -07001113{
1114 unsigned long addr;
Gerald Schaefer274023d2012-10-08 16:30:21 -07001115
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001116 for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE)
1117 follow_page(vma, addr, FOLL_SPLIT);
Gerald Schaefer274023d2012-10-08 16:30:21 -07001118}
1119
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001120static inline void thp_split_mm(struct mm_struct *mm)
Gerald Schaefer274023d2012-10-08 16:30:21 -07001121{
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001122 struct vm_area_struct *vma;
Gerald Schaefer274023d2012-10-08 16:30:21 -07001123
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001124 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
Gerald Schaefer274023d2012-10-08 16:30:21 -07001125 thp_split_vma(vma);
1126 vma->vm_flags &= ~VM_HUGEPAGE;
1127 vma->vm_flags |= VM_NOHUGEPAGE;
Gerald Schaefer274023d2012-10-08 16:30:21 -07001128 }
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001129 mm->def_flags |= VM_NOHUGEPAGE;
1130}
1131#else
1132static inline void thp_split_mm(struct mm_struct *mm)
1133{
Gerald Schaefer274023d2012-10-08 16:30:21 -07001134}
1135#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1136
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001137static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb,
1138 struct mm_struct *mm, pud_t *pud,
1139 unsigned long addr, unsigned long end)
1140{
1141 unsigned long next, *table, *new;
1142 struct page *page;
Christian Borntraeger55e42832014-07-25 14:23:29 +02001143 spinlock_t *ptl;
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001144 pmd_t *pmd;
1145
1146 pmd = pmd_offset(pud, addr);
1147 do {
1148 next = pmd_addr_end(addr, end);
1149again:
1150 if (pmd_none_or_clear_bad(pmd))
1151 continue;
1152 table = (unsigned long *) pmd_deref(*pmd);
1153 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1154 if (page_table_with_pgste(page))
1155 continue;
1156 /* Allocate new page table with pgstes */
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001157 new = page_table_alloc_pgste(mm);
Dominik Dingelbe39f192013-10-31 10:01:16 +01001158 if (!new)
1159 return -ENOMEM;
1160
Christian Borntraeger55e42832014-07-25 14:23:29 +02001161 ptl = pmd_lock(mm, pmd);
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001162 if (likely((unsigned long *) pmd_deref(*pmd) == table)) {
1163 /* Nuke pmd entry pointing to the "short" page table */
1164 pmdp_flush_lazy(mm, addr, pmd);
1165 pmd_clear(pmd);
1166 /* Copy ptes from old table to new table */
1167 memcpy(new, table, PAGE_SIZE/2);
1168 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
1169 /* Establish new table */
1170 pmd_populate(mm, pmd, (pte_t *) new);
1171 /* Free old table with rcu, there might be a walker! */
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001172 page_table_free_rcu(tlb, table, addr);
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001173 new = NULL;
1174 }
Christian Borntraeger55e42832014-07-25 14:23:29 +02001175 spin_unlock(ptl);
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001176 if (new) {
1177 page_table_free_pgste(new);
1178 goto again;
1179 }
1180 } while (pmd++, addr = next, addr != end);
1181
1182 return addr;
1183}
1184
1185static unsigned long page_table_realloc_pud(struct mmu_gather *tlb,
1186 struct mm_struct *mm, pgd_t *pgd,
1187 unsigned long addr, unsigned long end)
1188{
1189 unsigned long next;
1190 pud_t *pud;
1191
1192 pud = pud_offset(pgd, addr);
1193 do {
1194 next = pud_addr_end(addr, end);
1195 if (pud_none_or_clear_bad(pud))
1196 continue;
1197 next = page_table_realloc_pmd(tlb, mm, pud, addr, next);
Dominik Dingelbe39f192013-10-31 10:01:16 +01001198 if (unlikely(IS_ERR_VALUE(next)))
1199 return next;
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001200 } while (pud++, addr = next, addr != end);
1201
1202 return addr;
1203}
1204
Dominik Dingelbe39f192013-10-31 10:01:16 +01001205static unsigned long page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,
1206 unsigned long addr, unsigned long end)
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001207{
1208 unsigned long next;
1209 pgd_t *pgd;
1210
1211 pgd = pgd_offset(mm, addr);
1212 do {
1213 next = pgd_addr_end(addr, end);
1214 if (pgd_none_or_clear_bad(pgd))
1215 continue;
1216 next = page_table_realloc_pud(tlb, mm, pgd, addr, next);
Dominik Dingelbe39f192013-10-31 10:01:16 +01001217 if (unlikely(IS_ERR_VALUE(next)))
1218 return next;
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001219 } while (pgd++, addr = next, addr != end);
Dominik Dingelbe39f192013-10-31 10:01:16 +01001220
1221 return 0;
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001222}
1223
Carsten Otte402b0862008-03-25 18:47:10 +01001224/*
1225 * switch on pgstes for its userspace process (for kvm)
1226 */
1227int s390_enable_sie(void)
1228{
1229 struct task_struct *tsk = current;
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001230 struct mm_struct *mm = tsk->mm;
1231 struct mmu_gather tlb;
Carsten Otte402b0862008-03-25 18:47:10 +01001232
Christian Borntraeger74b6b522008-05-21 13:37:29 +02001233 /* Do we have pgstes? if yes, we are done */
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001234 if (mm_has_pgste(tsk->mm))
Christian Borntraeger74b6b522008-05-21 13:37:29 +02001235 return 0;
Carsten Otte402b0862008-03-25 18:47:10 +01001236
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001237 down_write(&mm->mmap_sem);
Gerald Schaefer274023d2012-10-08 16:30:21 -07001238 /* split thp mappings and disable thp for future mappings */
1239 thp_split_mm(mm);
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001240 /* Reallocate the page tables with pgstes */
Linus Torvaldsae7a8352013-09-04 18:15:06 -07001241 tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE);
Dominik Dingelbe39f192013-10-31 10:01:16 +01001242 if (!page_table_realloc(&tlb, mm, 0, TASK_SIZE))
1243 mm->context.has_pgste = 1;
Linus Torvaldsae7a8352013-09-04 18:15:06 -07001244 tlb_finish_mmu(&tlb, 0, TASK_SIZE);
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001245 up_write(&mm->mmap_sem);
1246 return mm->context.has_pgste ? 0 : -ENOMEM;
Carsten Otte402b0862008-03-25 18:47:10 +01001247}
1248EXPORT_SYMBOL_GPL(s390_enable_sie);
Hans-Joachim Picht7db11a32009-06-16 10:30:26 +02001249
Dominik Dingel934bc132014-01-14 18:10:17 +01001250/*
1251 * Enable storage key handling from now on and initialize the storage
1252 * keys with the default key.
1253 */
Dominik Dingela13cff32014-10-23 12:07:14 +02001254static int __s390_enable_skey(pte_t *pte, unsigned long addr,
1255 unsigned long next, struct mm_walk *walk)
1256{
1257 unsigned long ptev;
1258 pgste_t pgste;
1259
1260 pgste = pgste_get_lock(pte);
Dominik Dingel2faee8f2014-10-23 12:08:38 +02001261 /*
1262 * Remove all zero page mappings,
1263 * after establishing a policy to forbid zero page mappings
1264 * following faults for that page will get fresh anonymous pages
1265 */
1266 if (is_zero_pfn(pte_pfn(*pte))) {
1267 ptep_flush_direct(walk->mm, addr, pte);
1268 pte_val(*pte) = _PAGE_INVALID;
1269 }
Dominik Dingela13cff32014-10-23 12:07:14 +02001270 /* Clear storage key */
1271 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
1272 PGSTE_GR_BIT | PGSTE_GC_BIT);
1273 ptev = pte_val(*pte);
1274 if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
1275 page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
1276 pgste_set_unlock(pte, pgste);
1277 return 0;
1278}
1279
Dominik Dingel3ac8e382014-10-23 12:09:17 +02001280int s390_enable_skey(void)
Dominik Dingel934bc132014-01-14 18:10:17 +01001281{
Dominik Dingela13cff32014-10-23 12:07:14 +02001282 struct mm_walk walk = { .pte_entry = __s390_enable_skey };
1283 struct mm_struct *mm = current->mm;
Dominik Dingel3ac8e382014-10-23 12:09:17 +02001284 struct vm_area_struct *vma;
1285 int rc = 0;
Dominik Dingela13cff32014-10-23 12:07:14 +02001286
1287 down_write(&mm->mmap_sem);
1288 if (mm_use_skey(mm))
1289 goto out_up;
Dominik Dingel2faee8f2014-10-23 12:08:38 +02001290
1291 mm->context.use_skey = 1;
Dominik Dingel3ac8e382014-10-23 12:09:17 +02001292 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1293 if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
1294 MADV_UNMERGEABLE, &vma->vm_flags)) {
1295 mm->context.use_skey = 0;
1296 rc = -ENOMEM;
1297 goto out_up;
1298 }
1299 }
1300 mm->def_flags &= ~VM_MERGEABLE;
Dominik Dingel2faee8f2014-10-23 12:08:38 +02001301
Dominik Dingela13cff32014-10-23 12:07:14 +02001302 walk.mm = mm;
1303 walk_page_range(0, TASK_SIZE, &walk);
Dominik Dingela13cff32014-10-23 12:07:14 +02001304
1305out_up:
1306 up_write(&mm->mmap_sem);
Dominik Dingel3ac8e382014-10-23 12:09:17 +02001307 return rc;
Dominik Dingel934bc132014-01-14 18:10:17 +01001308}
1309EXPORT_SYMBOL_GPL(s390_enable_skey);
1310
Dominik Dingela0bf4f12014-03-24 14:27:58 +01001311/*
Dominik Dingela13cff32014-10-23 12:07:14 +02001312 * Reset CMMA state, make all pages stable again.
1313 */
1314static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
1315 unsigned long next, struct mm_walk *walk)
1316{
1317 pgste_t pgste;
1318
1319 pgste = pgste_get_lock(pte);
1320 pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
1321 pgste_set_unlock(pte, pgste);
1322 return 0;
1323}
1324
1325void s390_reset_cmma(struct mm_struct *mm)
1326{
1327 struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
1328
1329 down_write(&mm->mmap_sem);
1330 walk.mm = mm;
1331 walk_page_range(0, TASK_SIZE, &walk);
1332 up_write(&mm->mmap_sem);
1333}
1334EXPORT_SYMBOL_GPL(s390_reset_cmma);
1335
1336/*
Dominik Dingela0bf4f12014-03-24 14:27:58 +01001337 * Test and reset if a guest page is dirty
1338 */
1339bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap)
1340{
1341 pte_t *pte;
1342 spinlock_t *ptl;
1343 bool dirty = false;
1344
1345 pte = get_locked_pte(gmap->mm, address, &ptl);
1346 if (unlikely(!pte))
1347 return false;
1348
1349 if (ptep_test_and_clear_user_dirty(gmap->mm, address, pte))
1350 dirty = true;
1351
1352 spin_unlock(ptl);
1353 return dirty;
1354}
1355EXPORT_SYMBOL_GPL(gmap_test_and_clear_dirty);
1356
Gerald Schaefer75077af2012-10-08 16:30:15 -07001357#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001358int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
1359 pmd_t *pmdp)
1360{
1361 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1362 /* No need to flush TLB
1363 * On s390 reference bits are in storage key and never in TLB */
1364 return pmdp_test_and_clear_young(vma, address, pmdp);
1365}
1366
1367int pmdp_set_access_flags(struct vm_area_struct *vma,
1368 unsigned long address, pmd_t *pmdp,
1369 pmd_t entry, int dirty)
1370{
1371 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1372
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001373 entry = pmd_mkyoung(entry);
1374 if (dirty)
1375 entry = pmd_mkdirty(entry);
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001376 if (pmd_same(*pmdp, entry))
1377 return 0;
1378 pmdp_invalidate(vma, address, pmdp);
1379 set_pmd_at(vma->vm_mm, address, pmdp, entry);
1380 return 1;
1381}
1382
Gerald Schaefer75077af2012-10-08 16:30:15 -07001383static void pmdp_splitting_flush_sync(void *arg)
1384{
1385 /* Simply deliver the interrupt */
1386}
1387
1388void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
1389 pmd_t *pmdp)
1390{
1391 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1392 if (!test_and_set_bit(_SEGMENT_ENTRY_SPLIT_BIT,
1393 (unsigned long *) pmdp)) {
1394 /* need to serialize against gup-fast (IRQ disabled) */
1395 smp_call_function(pmdp_splitting_flush_sync, NULL, 1);
1396 }
1397}
Gerald Schaefer9501d092012-10-08 16:30:18 -07001398
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -07001399void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1400 pgtable_t pgtable)
Gerald Schaefer9501d092012-10-08 16:30:18 -07001401{
1402 struct list_head *lh = (struct list_head *) pgtable;
1403
Martin Schwidefskyec66ad62014-02-12 14:16:18 +01001404 assert_spin_locked(pmd_lockptr(mm, pmdp));
Gerald Schaefer9501d092012-10-08 16:30:18 -07001405
1406 /* FIFO */
Kirill A. Shutemovc389a252013-11-14 14:30:59 -08001407 if (!pmd_huge_pte(mm, pmdp))
Gerald Schaefer9501d092012-10-08 16:30:18 -07001408 INIT_LIST_HEAD(lh);
1409 else
Kirill A. Shutemovc389a252013-11-14 14:30:59 -08001410 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
1411 pmd_huge_pte(mm, pmdp) = pgtable;
Gerald Schaefer9501d092012-10-08 16:30:18 -07001412}
1413
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -07001414pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
Gerald Schaefer9501d092012-10-08 16:30:18 -07001415{
1416 struct list_head *lh;
1417 pgtable_t pgtable;
1418 pte_t *ptep;
1419
Martin Schwidefskyec66ad62014-02-12 14:16:18 +01001420 assert_spin_locked(pmd_lockptr(mm, pmdp));
Gerald Schaefer9501d092012-10-08 16:30:18 -07001421
1422 /* FIFO */
Kirill A. Shutemovc389a252013-11-14 14:30:59 -08001423 pgtable = pmd_huge_pte(mm, pmdp);
Gerald Schaefer9501d092012-10-08 16:30:18 -07001424 lh = (struct list_head *) pgtable;
1425 if (list_empty(lh))
Kirill A. Shutemovc389a252013-11-14 14:30:59 -08001426 pmd_huge_pte(mm, pmdp) = NULL;
Gerald Schaefer9501d092012-10-08 16:30:18 -07001427 else {
Kirill A. Shutemovc389a252013-11-14 14:30:59 -08001428 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
Gerald Schaefer9501d092012-10-08 16:30:18 -07001429 list_del(lh);
1430 }
1431 ptep = (pte_t *) pgtable;
Martin Schwidefskye5098612013-07-23 20:57:57 +02001432 pte_val(*ptep) = _PAGE_INVALID;
Gerald Schaefer9501d092012-10-08 16:30:18 -07001433 ptep++;
Martin Schwidefskye5098612013-07-23 20:57:57 +02001434 pte_val(*ptep) = _PAGE_INVALID;
Gerald Schaefer9501d092012-10-08 16:30:18 -07001435 return pgtable;
1436}
Gerald Schaefer75077af2012-10-08 16:30:15 -07001437#endif /* CONFIG_TRANSPARENT_HUGEPAGE */