blob: b02d0d0cc641367f44eb03f9a1d1830666bfe878 [file] [log] [blame]
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001/*
2 * KVM guest address space mapping code
3 *
4 * Copyright IBM Corp. 2007, 2016
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/mm.h>
10#include <linux/swap.h>
11#include <linux/smp.h>
12#include <linux/spinlock.h>
13#include <linux/slab.h>
14#include <linux/swapops.h>
15#include <linux/ksm.h>
16#include <linux/mman.h>
17
18#include <asm/pgtable.h>
19#include <asm/pgalloc.h>
20#include <asm/gmap.h>
21#include <asm/tlb.h>
22
23/**
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +010024 * gmap_alloc - allocate and initialize a guest address space
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010025 * @mm: pointer to the parent mm_struct
Christian Borntraeger9c650d02016-04-04 09:41:32 +020026 * @limit: maximum address of the gmap address space
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010027 *
28 * Returns a guest address space structure.
29 */
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +010030static struct gmap *gmap_alloc(unsigned long limit)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010031{
32 struct gmap *gmap;
33 struct page *page;
34 unsigned long *table;
35 unsigned long etype, atype;
36
37 if (limit < (1UL << 31)) {
38 limit = (1UL << 31) - 1;
39 atype = _ASCE_TYPE_SEGMENT;
40 etype = _SEGMENT_ENTRY_EMPTY;
41 } else if (limit < (1UL << 42)) {
42 limit = (1UL << 42) - 1;
43 atype = _ASCE_TYPE_REGION3;
44 etype = _REGION3_ENTRY_EMPTY;
45 } else if (limit < (1UL << 53)) {
46 limit = (1UL << 53) - 1;
47 atype = _ASCE_TYPE_REGION2;
48 etype = _REGION2_ENTRY_EMPTY;
49 } else {
50 limit = -1UL;
51 atype = _ASCE_TYPE_REGION1;
52 etype = _REGION1_ENTRY_EMPTY;
53 }
54 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
55 if (!gmap)
56 goto out;
57 INIT_LIST_HEAD(&gmap->crst_list);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +010058 INIT_LIST_HEAD(&gmap->children);
59 INIT_LIST_HEAD(&gmap->pt_list);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010060 INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
61 INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +010062 INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010063 spin_lock_init(&gmap->guest_table_lock);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +010064 spin_lock_init(&gmap->shadow_lock);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +010065 atomic_set(&gmap->ref_count, 1);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010066 page = alloc_pages(GFP_KERNEL, 2);
67 if (!page)
68 goto out_free;
69 page->index = 0;
70 list_add(&page->lru, &gmap->crst_list);
71 table = (unsigned long *) page_to_phys(page);
72 crst_table_init(table, etype);
73 gmap->table = table;
74 gmap->asce = atype | _ASCE_TABLE_LENGTH |
75 _ASCE_USER_BITS | __pa(table);
76 gmap->asce_end = limit;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010077 return gmap;
78
79out_free:
80 kfree(gmap);
81out:
82 return NULL;
83}
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +010084
85/**
86 * gmap_create - create a guest address space
87 * @mm: pointer to the parent mm_struct
88 * @limit: maximum size of the gmap address space
89 *
90 * Returns a guest address space structure.
91 */
92struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
93{
94 struct gmap *gmap;
95
96 gmap = gmap_alloc(limit);
97 if (!gmap)
98 return NULL;
99 gmap->mm = mm;
100 spin_lock(&mm->context.gmap_lock);
101 list_add_rcu(&gmap->list, &mm->context.gmap_list);
102 spin_unlock(&mm->context.gmap_lock);
103 return gmap;
104}
105EXPORT_SYMBOL_GPL(gmap_create);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100106
107static void gmap_flush_tlb(struct gmap *gmap)
108{
109 if (MACHINE_HAS_IDTE)
110 __tlb_flush_asce(gmap->mm, gmap->asce);
111 else
112 __tlb_flush_global();
113}
114
115static void gmap_radix_tree_free(struct radix_tree_root *root)
116{
117 struct radix_tree_iter iter;
118 unsigned long indices[16];
119 unsigned long index;
120 void **slot;
121 int i, nr;
122
123 /* A radix tree is freed by deleting all of its entries */
124 index = 0;
125 do {
126 nr = 0;
127 radix_tree_for_each_slot(slot, root, &iter, index) {
128 indices[nr] = iter.index;
129 if (++nr == 16)
130 break;
131 }
132 for (i = 0; i < nr; i++) {
133 index = indices[i];
134 radix_tree_delete(root, index);
135 }
136 } while (nr > 0);
137}
138
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100139static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
140{
141 struct gmap_rmap *rmap, *rnext, *head;
142 struct radix_tree_iter iter;
143 unsigned long indices[16];
144 unsigned long index;
145 void **slot;
146 int i, nr;
147
148 /* A radix tree is freed by deleting all of its entries */
149 index = 0;
150 do {
151 nr = 0;
152 radix_tree_for_each_slot(slot, root, &iter, index) {
153 indices[nr] = iter.index;
154 if (++nr == 16)
155 break;
156 }
157 for (i = 0; i < nr; i++) {
158 index = indices[i];
159 head = radix_tree_delete(root, index);
160 gmap_for_each_rmap_safe(rmap, rnext, head)
161 kfree(rmap);
162 }
163 } while (nr > 0);
164}
165
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100166/**
167 * gmap_free - free a guest address space
168 * @gmap: pointer to the guest address space structure
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100169 *
170 * No locks required. There are no references to this gmap anymore.
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100171 */
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100172static void gmap_free(struct gmap *gmap)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100173{
174 struct page *page, *next;
175
David Hildenbrandeea36782016-04-15 12:45:45 +0200176 /* Flush tlb of all gmaps (if not already done for shadows) */
177 if (!(gmap_is_shadow(gmap) && gmap->removed))
178 gmap_flush_tlb(gmap);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100179 /* Free all segment & region tables. */
180 list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
181 __free_pages(page, 2);
182 gmap_radix_tree_free(&gmap->guest_to_host);
183 gmap_radix_tree_free(&gmap->host_to_guest);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100184
185 /* Free additional data for a shadow gmap */
186 if (gmap_is_shadow(gmap)) {
187 /* Free all page tables. */
188 list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
189 page_table_free_pgste(page);
190 gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
191 /* Release reference to the parent */
192 gmap_put(gmap->parent);
193 }
194
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100195 kfree(gmap);
196}
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100197
198/**
199 * gmap_get - increase reference counter for guest address space
200 * @gmap: pointer to the guest address space structure
201 *
202 * Returns the gmap pointer
203 */
204struct gmap *gmap_get(struct gmap *gmap)
205{
206 atomic_inc(&gmap->ref_count);
207 return gmap;
208}
209EXPORT_SYMBOL_GPL(gmap_get);
210
211/**
212 * gmap_put - decrease reference counter for guest address space
213 * @gmap: pointer to the guest address space structure
214 *
215 * If the reference counter reaches zero the guest address space is freed.
216 */
217void gmap_put(struct gmap *gmap)
218{
219 if (atomic_dec_return(&gmap->ref_count) == 0)
220 gmap_free(gmap);
221}
222EXPORT_SYMBOL_GPL(gmap_put);
223
224/**
225 * gmap_remove - remove a guest address space but do not free it yet
226 * @gmap: pointer to the guest address space structure
227 */
228void gmap_remove(struct gmap *gmap)
229{
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100230 struct gmap *sg, *next;
231
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100232 /* Remove all shadow gmaps linked to this gmap */
233 if (!list_empty(&gmap->children)) {
234 spin_lock(&gmap->shadow_lock);
235 list_for_each_entry_safe(sg, next, &gmap->children, list) {
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100236 list_del(&sg->list);
237 gmap_put(sg);
238 }
239 spin_unlock(&gmap->shadow_lock);
240 }
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100241 /* Remove gmap from the pre-mm list */
242 spin_lock(&gmap->mm->context.gmap_lock);
243 list_del_rcu(&gmap->list);
244 spin_unlock(&gmap->mm->context.gmap_lock);
245 synchronize_rcu();
246 /* Put reference */
247 gmap_put(gmap);
248}
249EXPORT_SYMBOL_GPL(gmap_remove);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100250
251/**
252 * gmap_enable - switch primary space to the guest address space
253 * @gmap: pointer to the guest address space structure
254 */
255void gmap_enable(struct gmap *gmap)
256{
257 S390_lowcore.gmap = (unsigned long) gmap;
258}
259EXPORT_SYMBOL_GPL(gmap_enable);
260
261/**
262 * gmap_disable - switch back to the standard primary address space
263 * @gmap: pointer to the guest address space structure
264 */
265void gmap_disable(struct gmap *gmap)
266{
267 S390_lowcore.gmap = 0UL;
268}
269EXPORT_SYMBOL_GPL(gmap_disable);
270
271/*
272 * gmap_alloc_table is assumed to be called with mmap_sem held
273 */
274static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
275 unsigned long init, unsigned long gaddr)
276{
277 struct page *page;
278 unsigned long *new;
279
280 /* since we dont free the gmap table until gmap_free we can unlock */
281 page = alloc_pages(GFP_KERNEL, 2);
282 if (!page)
283 return -ENOMEM;
284 new = (unsigned long *) page_to_phys(page);
285 crst_table_init(new, init);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100286 spin_lock(&gmap->guest_table_lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100287 if (*table & _REGION_ENTRY_INVALID) {
288 list_add(&page->lru, &gmap->crst_list);
289 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
290 (*table & _REGION_ENTRY_TYPE_MASK);
291 page->index = gaddr;
292 page = NULL;
293 }
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100294 spin_unlock(&gmap->guest_table_lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100295 if (page)
296 __free_pages(page, 2);
297 return 0;
298}
299
300/**
301 * __gmap_segment_gaddr - find virtual address from segment pointer
302 * @entry: pointer to a segment table entry in the guest address space
303 *
304 * Returns the virtual address in the guest address space for the segment
305 */
306static unsigned long __gmap_segment_gaddr(unsigned long *entry)
307{
308 struct page *page;
309 unsigned long offset, mask;
310
311 offset = (unsigned long) entry / sizeof(unsigned long);
312 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
313 mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
314 page = virt_to_page((void *)((unsigned long) entry & mask));
315 return page->index + offset;
316}
317
318/**
319 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
320 * @gmap: pointer to the guest address space structure
321 * @vmaddr: address in the host process address space
322 *
323 * Returns 1 if a TLB flush is required
324 */
325static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
326{
327 unsigned long *entry;
328 int flush = 0;
329
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100330 BUG_ON(gmap_is_shadow(gmap));
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100331 spin_lock(&gmap->guest_table_lock);
332 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
333 if (entry) {
334 flush = (*entry != _SEGMENT_ENTRY_INVALID);
335 *entry = _SEGMENT_ENTRY_INVALID;
336 }
337 spin_unlock(&gmap->guest_table_lock);
338 return flush;
339}
340
341/**
342 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
343 * @gmap: pointer to the guest address space structure
344 * @gaddr: address in the guest address space
345 *
346 * Returns 1 if a TLB flush is required
347 */
348static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
349{
350 unsigned long vmaddr;
351
352 vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
353 gaddr >> PMD_SHIFT);
354 return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
355}
356
357/**
358 * gmap_unmap_segment - unmap segment from the guest address space
359 * @gmap: pointer to the guest address space structure
360 * @to: address in the guest address space
361 * @len: length of the memory area to unmap
362 *
363 * Returns 0 if the unmap succeeded, -EINVAL if not.
364 */
365int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
366{
367 unsigned long off;
368 int flush;
369
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100370 BUG_ON(gmap_is_shadow(gmap));
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100371 if ((to | len) & (PMD_SIZE - 1))
372 return -EINVAL;
373 if (len == 0 || to + len < to)
374 return -EINVAL;
375
376 flush = 0;
377 down_write(&gmap->mm->mmap_sem);
378 for (off = 0; off < len; off += PMD_SIZE)
379 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
380 up_write(&gmap->mm->mmap_sem);
381 if (flush)
382 gmap_flush_tlb(gmap);
383 return 0;
384}
385EXPORT_SYMBOL_GPL(gmap_unmap_segment);
386
387/**
388 * gmap_map_segment - map a segment to the guest address space
389 * @gmap: pointer to the guest address space structure
390 * @from: source address in the parent address space
391 * @to: target address in the guest address space
392 * @len: length of the memory area to map
393 *
394 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
395 */
396int gmap_map_segment(struct gmap *gmap, unsigned long from,
397 unsigned long to, unsigned long len)
398{
399 unsigned long off;
400 int flush;
401
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100402 BUG_ON(gmap_is_shadow(gmap));
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100403 if ((from | to | len) & (PMD_SIZE - 1))
404 return -EINVAL;
405 if (len == 0 || from + len < from || to + len < to ||
Christian Borntraeger9c650d02016-04-04 09:41:32 +0200406 from + len - 1 > TASK_MAX_SIZE || to + len - 1 > gmap->asce_end)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100407 return -EINVAL;
408
409 flush = 0;
410 down_write(&gmap->mm->mmap_sem);
411 for (off = 0; off < len; off += PMD_SIZE) {
412 /* Remove old translation */
413 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
414 /* Store new translation */
415 if (radix_tree_insert(&gmap->guest_to_host,
416 (to + off) >> PMD_SHIFT,
417 (void *) from + off))
418 break;
419 }
420 up_write(&gmap->mm->mmap_sem);
421 if (flush)
422 gmap_flush_tlb(gmap);
423 if (off >= len)
424 return 0;
425 gmap_unmap_segment(gmap, to, len);
426 return -ENOMEM;
427}
428EXPORT_SYMBOL_GPL(gmap_map_segment);
429
430/**
431 * __gmap_translate - translate a guest address to a user space address
432 * @gmap: pointer to guest mapping meta data structure
433 * @gaddr: guest address
434 *
435 * Returns user space address which corresponds to the guest address or
436 * -EFAULT if no such mapping exists.
437 * This function does not establish potentially missing page table entries.
438 * The mmap_sem of the mm that belongs to the address space must be held
439 * when this function gets called.
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100440 *
441 * Note: Can also be called for shadow gmaps.
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100442 */
443unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
444{
445 unsigned long vmaddr;
446
447 vmaddr = (unsigned long)
448 radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100449 /* Note: guest_to_host is empty for a shadow gmap */
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100450 return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
451}
452EXPORT_SYMBOL_GPL(__gmap_translate);
453
454/**
455 * gmap_translate - translate a guest address to a user space address
456 * @gmap: pointer to guest mapping meta data structure
457 * @gaddr: guest address
458 *
459 * Returns user space address which corresponds to the guest address or
460 * -EFAULT if no such mapping exists.
461 * This function does not establish potentially missing page table entries.
462 */
463unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
464{
465 unsigned long rc;
466
467 down_read(&gmap->mm->mmap_sem);
468 rc = __gmap_translate(gmap, gaddr);
469 up_read(&gmap->mm->mmap_sem);
470 return rc;
471}
472EXPORT_SYMBOL_GPL(gmap_translate);
473
474/**
475 * gmap_unlink - disconnect a page table from the gmap shadow tables
476 * @gmap: pointer to guest mapping meta data structure
477 * @table: pointer to the host page table
478 * @vmaddr: vm address associated with the host page table
479 */
480void gmap_unlink(struct mm_struct *mm, unsigned long *table,
481 unsigned long vmaddr)
482{
483 struct gmap *gmap;
484 int flush;
485
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100486 rcu_read_lock();
487 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100488 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
489 if (flush)
490 gmap_flush_tlb(gmap);
491 }
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100492 rcu_read_unlock();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100493}
494
495/**
496 * gmap_link - set up shadow page tables to connect a host to a guest address
497 * @gmap: pointer to guest mapping meta data structure
498 * @gaddr: guest address
499 * @vmaddr: vm address
500 *
501 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
502 * if the vm address is already mapped to a different guest segment.
503 * The mmap_sem of the mm that belongs to the address space must be held
504 * when this function gets called.
505 */
506int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
507{
508 struct mm_struct *mm;
509 unsigned long *table;
510 spinlock_t *ptl;
511 pgd_t *pgd;
512 pud_t *pud;
513 pmd_t *pmd;
514 int rc;
515
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100516 BUG_ON(gmap_is_shadow(gmap));
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100517 /* Create higher level tables in the gmap page table */
518 table = gmap->table;
519 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
520 table += (gaddr >> 53) & 0x7ff;
521 if ((*table & _REGION_ENTRY_INVALID) &&
522 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
523 gaddr & 0xffe0000000000000UL))
524 return -ENOMEM;
525 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
526 }
527 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
528 table += (gaddr >> 42) & 0x7ff;
529 if ((*table & _REGION_ENTRY_INVALID) &&
530 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
531 gaddr & 0xfffffc0000000000UL))
532 return -ENOMEM;
533 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
534 }
535 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
536 table += (gaddr >> 31) & 0x7ff;
537 if ((*table & _REGION_ENTRY_INVALID) &&
538 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
539 gaddr & 0xffffffff80000000UL))
540 return -ENOMEM;
541 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
542 }
543 table += (gaddr >> 20) & 0x7ff;
544 /* Walk the parent mm page table */
545 mm = gmap->mm;
546 pgd = pgd_offset(mm, vmaddr);
547 VM_BUG_ON(pgd_none(*pgd));
548 pud = pud_offset(pgd, vmaddr);
549 VM_BUG_ON(pud_none(*pud));
550 pmd = pmd_offset(pud, vmaddr);
551 VM_BUG_ON(pmd_none(*pmd));
552 /* large pmds cannot yet be handled */
553 if (pmd_large(*pmd))
554 return -EFAULT;
555 /* Link gmap segment table entry location to page table. */
556 rc = radix_tree_preload(GFP_KERNEL);
557 if (rc)
558 return rc;
559 ptl = pmd_lock(mm, pmd);
560 spin_lock(&gmap->guest_table_lock);
561 if (*table == _SEGMENT_ENTRY_INVALID) {
562 rc = radix_tree_insert(&gmap->host_to_guest,
563 vmaddr >> PMD_SHIFT, table);
564 if (!rc)
565 *table = pmd_val(*pmd);
566 } else
567 rc = 0;
568 spin_unlock(&gmap->guest_table_lock);
569 spin_unlock(ptl);
570 radix_tree_preload_end();
571 return rc;
572}
573
574/**
575 * gmap_fault - resolve a fault on a guest address
576 * @gmap: pointer to guest mapping meta data structure
577 * @gaddr: guest address
578 * @fault_flags: flags to pass down to handle_mm_fault()
579 *
580 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
581 * if the vm address is already mapped to a different guest segment.
582 */
583int gmap_fault(struct gmap *gmap, unsigned long gaddr,
584 unsigned int fault_flags)
585{
586 unsigned long vmaddr;
587 int rc;
588 bool unlocked;
589
590 down_read(&gmap->mm->mmap_sem);
591
592retry:
593 unlocked = false;
594 vmaddr = __gmap_translate(gmap, gaddr);
595 if (IS_ERR_VALUE(vmaddr)) {
596 rc = vmaddr;
597 goto out_up;
598 }
599 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
600 &unlocked)) {
601 rc = -EFAULT;
602 goto out_up;
603 }
604 /*
605 * In the case that fixup_user_fault unlocked the mmap_sem during
606 * faultin redo __gmap_translate to not race with a map/unmap_segment.
607 */
608 if (unlocked)
609 goto retry;
610
611 rc = __gmap_link(gmap, gaddr, vmaddr);
612out_up:
613 up_read(&gmap->mm->mmap_sem);
614 return rc;
615}
616EXPORT_SYMBOL_GPL(gmap_fault);
617
618/*
619 * this function is assumed to be called with mmap_sem held
620 */
621void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
622{
623 unsigned long vmaddr;
624 spinlock_t *ptl;
625 pte_t *ptep;
626
627 /* Find the vm address for the guest address */
628 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
629 gaddr >> PMD_SHIFT);
630 if (vmaddr) {
631 vmaddr |= gaddr & ~PMD_MASK;
632 /* Get pointer to the page table entry */
633 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
634 if (likely(ptep))
635 ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
636 pte_unmap_unlock(ptep, ptl);
637 }
638}
639EXPORT_SYMBOL_GPL(__gmap_zap);
640
641void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
642{
643 unsigned long gaddr, vmaddr, size;
644 struct vm_area_struct *vma;
645
646 down_read(&gmap->mm->mmap_sem);
647 for (gaddr = from; gaddr < to;
648 gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
649 /* Find the vm address for the guest address */
650 vmaddr = (unsigned long)
651 radix_tree_lookup(&gmap->guest_to_host,
652 gaddr >> PMD_SHIFT);
653 if (!vmaddr)
654 continue;
655 vmaddr |= gaddr & ~PMD_MASK;
656 /* Find vma in the parent mm */
657 vma = find_vma(gmap->mm, vmaddr);
658 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
659 zap_page_range(vma, vmaddr, size, NULL);
660 }
661 up_read(&gmap->mm->mmap_sem);
662}
663EXPORT_SYMBOL_GPL(gmap_discard);
664
665static LIST_HEAD(gmap_notifier_list);
666static DEFINE_SPINLOCK(gmap_notifier_lock);
667
668/**
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100669 * gmap_register_pte_notifier - register a pte invalidation callback
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100670 * @nb: pointer to the gmap notifier block
671 */
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100672void gmap_register_pte_notifier(struct gmap_notifier *nb)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100673{
674 spin_lock(&gmap_notifier_lock);
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100675 list_add_rcu(&nb->list, &gmap_notifier_list);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100676 spin_unlock(&gmap_notifier_lock);
677}
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100678EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100679
680/**
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100681 * gmap_unregister_pte_notifier - remove a pte invalidation callback
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100682 * @nb: pointer to the gmap notifier block
683 */
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100684void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100685{
686 spin_lock(&gmap_notifier_lock);
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100687 list_del_rcu(&nb->list);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100688 spin_unlock(&gmap_notifier_lock);
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100689 synchronize_rcu();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100690}
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100691EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100692
693/**
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100694 * gmap_call_notifier - call all registered invalidation callbacks
695 * @gmap: pointer to guest mapping meta data structure
696 * @start: start virtual address in the guest address space
697 * @end: end virtual address in the guest address space
698 */
699static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
700 unsigned long end)
701{
702 struct gmap_notifier *nb;
703
704 list_for_each_entry(nb, &gmap_notifier_list, list)
705 nb->notifier_call(gmap, start, end);
706}
707
708/**
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100709 * gmap_table_walk - walk the gmap page tables
710 * @gmap: pointer to guest mapping meta data structure
711 * @gaddr: virtual address in the guest address space
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100712 * @level: page table level to stop at
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100713 *
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100714 * Returns a table entry pointer for the given guest address and @level
715 * @level=0 : returns a pointer to a page table table entry (or NULL)
716 * @level=1 : returns a pointer to a segment table entry (or NULL)
717 * @level=2 : returns a pointer to a region-3 table entry (or NULL)
718 * @level=3 : returns a pointer to a region-2 table entry (or NULL)
719 * @level=4 : returns a pointer to a region-1 table entry (or NULL)
720 *
721 * Returns NULL if the gmap page tables could not be walked to the
722 * requested level.
723 *
724 * Note: Can also be called for shadow gmaps.
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100725 */
726static inline unsigned long *gmap_table_walk(struct gmap *gmap,
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100727 unsigned long gaddr, int level)
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100728{
729 unsigned long *table;
730
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100731 if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
732 return NULL;
733 if (gmap_is_shadow(gmap) && gmap->removed)
734 return NULL;
735 if (gaddr & (-1UL << (31 + ((gmap->asce & _ASCE_TYPE_MASK) >> 2)*11)))
736 return NULL;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100737 table = gmap->table;
738 switch (gmap->asce & _ASCE_TYPE_MASK) {
739 case _ASCE_TYPE_REGION1:
740 table += (gaddr >> 53) & 0x7ff;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100741 if (level == 4)
742 break;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100743 if (*table & _REGION_ENTRY_INVALID)
744 return NULL;
745 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
746 /* Fallthrough */
747 case _ASCE_TYPE_REGION2:
748 table += (gaddr >> 42) & 0x7ff;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100749 if (level == 3)
750 break;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100751 if (*table & _REGION_ENTRY_INVALID)
752 return NULL;
753 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
754 /* Fallthrough */
755 case _ASCE_TYPE_REGION3:
756 table += (gaddr >> 31) & 0x7ff;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100757 if (level == 2)
758 break;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100759 if (*table & _REGION_ENTRY_INVALID)
760 return NULL;
761 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
762 /* Fallthrough */
763 case _ASCE_TYPE_SEGMENT:
764 table += (gaddr >> 20) & 0x7ff;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100765 if (level == 1)
766 break;
767 if (*table & _REGION_ENTRY_INVALID)
768 return NULL;
769 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
770 table += (gaddr >> 12) & 0xff;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100771 }
772 return table;
773}
774
775/**
776 * gmap_pte_op_walk - walk the gmap page table, get the page table lock
777 * and return the pte pointer
778 * @gmap: pointer to guest mapping meta data structure
779 * @gaddr: virtual address in the guest address space
780 * @ptl: pointer to the spinlock pointer
781 *
782 * Returns a pointer to the locked pte for a guest address, or NULL
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100783 *
784 * Note: Can also be called for shadow gmaps.
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100785 */
786static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
787 spinlock_t **ptl)
788{
789 unsigned long *table;
790
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100791 if (gmap_is_shadow(gmap))
792 spin_lock(&gmap->guest_table_lock);
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100793 /* Walk the gmap page table, lock and get pte pointer */
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100794 table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
795 if (!table || *table & _SEGMENT_ENTRY_INVALID) {
796 if (gmap_is_shadow(gmap))
797 spin_unlock(&gmap->guest_table_lock);
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100798 return NULL;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100799 }
800 if (gmap_is_shadow(gmap)) {
801 *ptl = &gmap->guest_table_lock;
802 return pte_offset_map((pmd_t *) table, gaddr);
803 }
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100804 return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
805}
806
807/**
808 * gmap_pte_op_fixup - force a page in and connect the gmap page table
809 * @gmap: pointer to guest mapping meta data structure
810 * @gaddr: virtual address in the guest address space
811 * @vmaddr: address in the host process address space
812 *
813 * Returns 0 if the caller can retry __gmap_translate (might fail again),
814 * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
815 * up or connecting the gmap page table.
816 */
817static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
818 unsigned long vmaddr)
819{
820 struct mm_struct *mm = gmap->mm;
821 bool unlocked = false;
822
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100823 BUG_ON(gmap_is_shadow(gmap));
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100824 if (fixup_user_fault(current, mm, vmaddr, FAULT_FLAG_WRITE, &unlocked))
825 return -EFAULT;
826 if (unlocked)
827 /* lost mmap_sem, caller has to retry __gmap_translate */
828 return 0;
829 /* Connect the page tables */
830 return __gmap_link(gmap, gaddr, vmaddr);
831}
832
833/**
834 * gmap_pte_op_end - release the page table lock
835 * @ptl: pointer to the spinlock pointer
836 */
837static void gmap_pte_op_end(spinlock_t *ptl)
838{
839 spin_unlock(ptl);
840}
841
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100842/*
843 * gmap_protect_range - remove access rights to memory and set pgste bits
844 * @gmap: pointer to guest mapping meta data structure
845 * @gaddr: virtual address in the guest address space
846 * @len: size of area
847 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
848 * @bits: pgste notification bits to set
849 *
850 * Returns 0 if successfully protected, -ENOMEM if out of memory and
851 * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
852 *
853 * Called with sg->mm->mmap_sem in read.
854 *
855 * Note: Can also be called for shadow gmaps.
856 */
857static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
858 unsigned long len, int prot, unsigned long bits)
859{
860 unsigned long vmaddr;
861 spinlock_t *ptl;
862 pte_t *ptep;
863 int rc;
864
865 while (len) {
866 rc = -EAGAIN;
867 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
868 if (ptep) {
869 rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, bits);
870 gmap_pte_op_end(ptl);
871 }
872 if (rc) {
873 vmaddr = __gmap_translate(gmap, gaddr);
874 if (IS_ERR_VALUE(vmaddr))
875 return vmaddr;
876 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr);
877 if (rc)
878 return rc;
879 continue;
880 }
881 gaddr += PAGE_SIZE;
882 len -= PAGE_SIZE;
883 }
884 return 0;
885}
886
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100887/**
888 * gmap_mprotect_notify - change access rights for a range of ptes and
889 * call the notifier if any pte changes again
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100890 * @gmap: pointer to guest mapping meta data structure
891 * @gaddr: virtual address in the guest address space
892 * @len: size of area
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100893 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100894 *
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100895 * Returns 0 if for each page in the given range a gmap mapping exists,
896 * the new access rights could be set and the notifier could be armed.
897 * If the gmap mapping is missing for one or more pages -EFAULT is
898 * returned. If no memory could be allocated -ENOMEM is returned.
899 * This function establishes missing page table entries.
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100900 */
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100901int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
902 unsigned long len, int prot)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100903{
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100904 int rc;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100905
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100906 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100907 return -EINVAL;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100908 if (!MACHINE_HAS_ESOP && prot == PROT_READ)
909 return -EINVAL;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100910 down_read(&gmap->mm->mmap_sem);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100911 rc = gmap_protect_range(gmap, gaddr, len, prot, PGSTE_IN_BIT);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100912 up_read(&gmap->mm->mmap_sem);
913 return rc;
914}
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100915EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100916
917/**
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100918 * gmap_read_table - get an unsigned long value from a guest page table using
919 * absolute addressing, without marking the page referenced.
920 * @gmap: pointer to guest mapping meta data structure
921 * @gaddr: virtual address in the guest address space
922 * @val: pointer to the unsigned long value to return
923 *
924 * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
925 * if reading using the virtual address failed.
926 *
927 * Called with gmap->mm->mmap_sem in read.
928 */
929int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
930{
931 unsigned long address, vmaddr;
932 spinlock_t *ptl;
933 pte_t *ptep, pte;
934 int rc;
935
936 while (1) {
937 rc = -EAGAIN;
938 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
939 if (ptep) {
940 pte = *ptep;
941 if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
942 address = pte_val(pte) & PAGE_MASK;
943 address += gaddr & ~PAGE_MASK;
944 *val = *(unsigned long *) address;
945 pte_val(*ptep) |= _PAGE_YOUNG;
946 /* Do *NOT* clear the _PAGE_INVALID bit! */
947 rc = 0;
948 }
949 gmap_pte_op_end(ptl);
950 }
951 if (!rc)
952 break;
953 vmaddr = __gmap_translate(gmap, gaddr);
954 if (IS_ERR_VALUE(vmaddr)) {
955 rc = vmaddr;
956 break;
957 }
958 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr);
959 if (rc)
960 break;
961 }
962 return rc;
963}
964EXPORT_SYMBOL_GPL(gmap_read_table);
965
966/**
967 * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
968 * @sg: pointer to the shadow guest address space structure
969 * @vmaddr: vm address associated with the rmap
970 * @rmap: pointer to the rmap structure
971 *
972 * Called with the sg->guest_table_lock
973 */
974static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
975 struct gmap_rmap *rmap)
976{
977 void **slot;
978
979 BUG_ON(!gmap_is_shadow(sg));
980 slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
981 if (slot) {
982 rmap->next = radix_tree_deref_slot_protected(slot,
983 &sg->guest_table_lock);
984 radix_tree_replace_slot(slot, rmap);
985 } else {
986 rmap->next = NULL;
987 radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
988 rmap);
989 }
990}
991
992/**
993 * gmap_protect_rmap - modify access rights to memory and create an rmap
994 * @sg: pointer to the shadow guest address space structure
995 * @raddr: rmap address in the shadow gmap
996 * @paddr: address in the parent guest address space
997 * @len: length of the memory area to protect
998 * @prot: indicates access rights: none, read-only or read-write
999 *
1000 * Returns 0 if successfully protected and the rmap was created, -ENOMEM
1001 * if out of memory and -EFAULT if paddr is invalid.
1002 */
1003static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
1004 unsigned long paddr, unsigned long len, int prot)
1005{
1006 struct gmap *parent;
1007 struct gmap_rmap *rmap;
1008 unsigned long vmaddr;
1009 spinlock_t *ptl;
1010 pte_t *ptep;
1011 int rc;
1012
1013 BUG_ON(!gmap_is_shadow(sg));
1014 parent = sg->parent;
1015 while (len) {
1016 vmaddr = __gmap_translate(parent, paddr);
1017 if (IS_ERR_VALUE(vmaddr))
1018 return vmaddr;
1019 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
1020 if (!rmap)
1021 return -ENOMEM;
1022 rmap->raddr = raddr;
1023 rc = radix_tree_preload(GFP_KERNEL);
1024 if (rc) {
1025 kfree(rmap);
1026 return rc;
1027 }
1028 rc = -EAGAIN;
1029 ptep = gmap_pte_op_walk(parent, paddr, &ptl);
1030 if (ptep) {
1031 spin_lock(&sg->guest_table_lock);
1032 rc = ptep_force_prot(parent->mm, paddr, ptep, prot,
1033 PGSTE_VSIE_BIT);
1034 if (!rc)
1035 gmap_insert_rmap(sg, vmaddr, rmap);
1036 spin_unlock(&sg->guest_table_lock);
1037 gmap_pte_op_end(ptl);
1038 }
1039 radix_tree_preload_end();
1040 if (rc) {
1041 kfree(rmap);
1042 rc = gmap_pte_op_fixup(parent, paddr, vmaddr);
1043 if (rc)
1044 return rc;
1045 continue;
1046 }
1047 paddr += PAGE_SIZE;
1048 len -= PAGE_SIZE;
1049 }
1050 return 0;
1051}
1052
1053#define _SHADOW_RMAP_MASK 0x7
1054#define _SHADOW_RMAP_REGION1 0x5
1055#define _SHADOW_RMAP_REGION2 0x4
1056#define _SHADOW_RMAP_REGION3 0x3
1057#define _SHADOW_RMAP_SEGMENT 0x2
1058#define _SHADOW_RMAP_PGTABLE 0x1
1059
1060/**
1061 * gmap_idte_one - invalidate a single region or segment table entry
1062 * @asce: region or segment table *origin* + table-type bits
1063 * @vaddr: virtual address to identify the table entry to flush
1064 *
1065 * The invalid bit of a single region or segment table entry is set
1066 * and the associated TLB entries depending on the entry are flushed.
1067 * The table-type of the @asce identifies the portion of the @vaddr
1068 * that is used as the invalidation index.
1069 */
1070static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
1071{
1072 asm volatile(
1073 " .insn rrf,0xb98e0000,%0,%1,0,0"
1074 : : "a" (asce), "a" (vaddr) : "cc", "memory");
1075}
1076
1077/**
1078 * gmap_unshadow_page - remove a page from a shadow page table
1079 * @sg: pointer to the shadow guest address space structure
1080 * @raddr: rmap address in the shadow guest address space
1081 *
1082 * Called with the sg->guest_table_lock
1083 */
1084static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
1085{
1086 unsigned long *table;
1087
1088 BUG_ON(!gmap_is_shadow(sg));
1089 table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
1090 if (!table || *table & _PAGE_INVALID)
1091 return;
1092 gmap_call_notifier(sg, raddr, raddr + (1UL << 12) - 1);
1093 ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
1094}
1095
1096/**
1097 * __gmap_unshadow_pgt - remove all entries from a shadow page table
1098 * @sg: pointer to the shadow guest address space structure
1099 * @raddr: rmap address in the shadow guest address space
1100 * @pgt: pointer to the start of a shadow page table
1101 *
1102 * Called with the sg->guest_table_lock
1103 */
1104static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
1105 unsigned long *pgt)
1106{
1107 int i;
1108
1109 BUG_ON(!gmap_is_shadow(sg));
1110 for (i = 0; i < 256; i++, raddr += 1UL << 12)
1111 pgt[i] = _PAGE_INVALID;
1112}
1113
1114/**
1115 * gmap_unshadow_pgt - remove a shadow page table from a segment entry
1116 * @sg: pointer to the shadow guest address space structure
1117 * @raddr: address in the shadow guest address space
1118 *
1119 * Called with the sg->guest_table_lock
1120 */
1121static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
1122{
1123 unsigned long sto, *ste, *pgt;
1124 struct page *page;
1125
1126 BUG_ON(!gmap_is_shadow(sg));
1127 ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
1128 if (!ste || *ste & _SEGMENT_ENTRY_INVALID)
1129 return;
1130 gmap_call_notifier(sg, raddr, raddr + (1UL << 20) - 1);
1131 sto = (unsigned long) (ste - ((raddr >> 20) & 0x7ff));
1132 gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
1133 pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
1134 *ste = _SEGMENT_ENTRY_EMPTY;
1135 __gmap_unshadow_pgt(sg, raddr, pgt);
1136 /* Free page table */
1137 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1138 list_del(&page->lru);
1139 page_table_free_pgste(page);
1140}
1141
1142/**
1143 * __gmap_unshadow_sgt - remove all entries from a shadow segment table
1144 * @sg: pointer to the shadow guest address space structure
1145 * @raddr: rmap address in the shadow guest address space
1146 * @sgt: pointer to the start of a shadow segment table
1147 *
1148 * Called with the sg->guest_table_lock
1149 */
1150static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
1151 unsigned long *sgt)
1152{
1153 unsigned long asce, *pgt;
1154 struct page *page;
1155 int i;
1156
1157 BUG_ON(!gmap_is_shadow(sg));
1158 asce = (unsigned long) sgt | _ASCE_TYPE_SEGMENT;
1159 for (i = 0; i < 2048; i++, raddr += 1UL << 20) {
1160 if (sgt[i] & _SEGMENT_ENTRY_INVALID)
1161 continue;
1162 pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
1163 sgt[i] = _SEGMENT_ENTRY_EMPTY;
1164 __gmap_unshadow_pgt(sg, raddr, pgt);
1165 /* Free page table */
1166 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1167 list_del(&page->lru);
1168 page_table_free_pgste(page);
1169 }
1170}
1171
1172/**
1173 * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
1174 * @sg: pointer to the shadow guest address space structure
1175 * @raddr: rmap address in the shadow guest address space
1176 *
1177 * Called with the shadow->guest_table_lock
1178 */
1179static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
1180{
1181 unsigned long r3o, *r3e, *sgt;
1182 struct page *page;
1183
1184 BUG_ON(!gmap_is_shadow(sg));
1185 r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
1186 if (!r3e || *r3e & _REGION_ENTRY_INVALID)
1187 return;
1188 gmap_call_notifier(sg, raddr, raddr + (1UL << 31) - 1);
1189 r3o = (unsigned long) (r3e - ((raddr >> 31) & 0x7ff));
1190 gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
1191 sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
1192 *r3e = _REGION3_ENTRY_EMPTY;
1193 __gmap_unshadow_sgt(sg, raddr, sgt);
1194 /* Free segment table */
1195 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1196 list_del(&page->lru);
1197 __free_pages(page, 2);
1198}
1199
1200/**
1201 * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
1202 * @sg: pointer to the shadow guest address space structure
1203 * @raddr: address in the shadow guest address space
1204 * @r3t: pointer to the start of a shadow region-3 table
1205 *
1206 * Called with the sg->guest_table_lock
1207 */
1208static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
1209 unsigned long *r3t)
1210{
1211 unsigned long asce, *sgt;
1212 struct page *page;
1213 int i;
1214
1215 BUG_ON(!gmap_is_shadow(sg));
1216 asce = (unsigned long) r3t | _ASCE_TYPE_REGION3;
1217 for (i = 0; i < 2048; i++, raddr += 1UL << 31) {
1218 if (r3t[i] & _REGION_ENTRY_INVALID)
1219 continue;
1220 sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
1221 r3t[i] = _REGION3_ENTRY_EMPTY;
1222 __gmap_unshadow_sgt(sg, raddr, sgt);
1223 /* Free segment table */
1224 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1225 list_del(&page->lru);
1226 __free_pages(page, 2);
1227 }
1228}
1229
1230/**
1231 * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
1232 * @sg: pointer to the shadow guest address space structure
1233 * @raddr: rmap address in the shadow guest address space
1234 *
1235 * Called with the sg->guest_table_lock
1236 */
1237static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
1238{
1239 unsigned long r2o, *r2e, *r3t;
1240 struct page *page;
1241
1242 BUG_ON(!gmap_is_shadow(sg));
1243 r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
1244 if (!r2e || *r2e & _REGION_ENTRY_INVALID)
1245 return;
1246 gmap_call_notifier(sg, raddr, raddr + (1UL << 42) - 1);
1247 r2o = (unsigned long) (r2e - ((raddr >> 42) & 0x7ff));
1248 gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
1249 r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
1250 *r2e = _REGION2_ENTRY_EMPTY;
1251 __gmap_unshadow_r3t(sg, raddr, r3t);
1252 /* Free region 3 table */
1253 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1254 list_del(&page->lru);
1255 __free_pages(page, 2);
1256}
1257
1258/**
1259 * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
1260 * @sg: pointer to the shadow guest address space structure
1261 * @raddr: rmap address in the shadow guest address space
1262 * @r2t: pointer to the start of a shadow region-2 table
1263 *
1264 * Called with the sg->guest_table_lock
1265 */
1266static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
1267 unsigned long *r2t)
1268{
1269 unsigned long asce, *r3t;
1270 struct page *page;
1271 int i;
1272
1273 BUG_ON(!gmap_is_shadow(sg));
1274 asce = (unsigned long) r2t | _ASCE_TYPE_REGION2;
1275 for (i = 0; i < 2048; i++, raddr += 1UL << 42) {
1276 if (r2t[i] & _REGION_ENTRY_INVALID)
1277 continue;
1278 r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
1279 r2t[i] = _REGION2_ENTRY_EMPTY;
1280 __gmap_unshadow_r3t(sg, raddr, r3t);
1281 /* Free region 3 table */
1282 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1283 list_del(&page->lru);
1284 __free_pages(page, 2);
1285 }
1286}
1287
1288/**
1289 * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
1290 * @sg: pointer to the shadow guest address space structure
1291 * @raddr: rmap address in the shadow guest address space
1292 *
1293 * Called with the sg->guest_table_lock
1294 */
1295static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
1296{
1297 unsigned long r1o, *r1e, *r2t;
1298 struct page *page;
1299
1300 BUG_ON(!gmap_is_shadow(sg));
1301 r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
1302 if (!r1e || *r1e & _REGION_ENTRY_INVALID)
1303 return;
1304 gmap_call_notifier(sg, raddr, raddr + (1UL << 53) - 1);
1305 r1o = (unsigned long) (r1e - ((raddr >> 53) & 0x7ff));
1306 gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
1307 r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
1308 *r1e = _REGION1_ENTRY_EMPTY;
1309 __gmap_unshadow_r2t(sg, raddr, r2t);
1310 /* Free region 2 table */
1311 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1312 list_del(&page->lru);
1313 __free_pages(page, 2);
1314}
1315
1316/**
1317 * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
1318 * @sg: pointer to the shadow guest address space structure
1319 * @raddr: rmap address in the shadow guest address space
1320 * @r1t: pointer to the start of a shadow region-1 table
1321 *
1322 * Called with the shadow->guest_table_lock
1323 */
1324static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
1325 unsigned long *r1t)
1326{
1327 unsigned long asce, *r2t;
1328 struct page *page;
1329 int i;
1330
1331 BUG_ON(!gmap_is_shadow(sg));
1332 asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
1333 for (i = 0; i < 2048; i++, raddr += 1UL << 53) {
1334 if (r1t[i] & _REGION_ENTRY_INVALID)
1335 continue;
1336 r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
1337 __gmap_unshadow_r2t(sg, raddr, r2t);
1338 /* Clear entry and flush translation r1t -> r2t */
1339 gmap_idte_one(asce, raddr);
1340 r1t[i] = _REGION1_ENTRY_EMPTY;
1341 /* Free region 2 table */
1342 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1343 list_del(&page->lru);
1344 __free_pages(page, 2);
1345 }
1346}
1347
1348/**
1349 * gmap_unshadow - remove a shadow page table completely
1350 * @sg: pointer to the shadow guest address space structure
1351 *
1352 * Called with sg->guest_table_lock
1353 */
1354static void gmap_unshadow(struct gmap *sg)
1355{
1356 unsigned long *table;
1357
1358 BUG_ON(!gmap_is_shadow(sg));
1359 if (sg->removed)
1360 return;
1361 sg->removed = 1;
1362 gmap_call_notifier(sg, 0, -1UL);
David Hildenbrandeea36782016-04-15 12:45:45 +02001363 gmap_flush_tlb(sg);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001364 table = (unsigned long *)(sg->asce & _ASCE_ORIGIN);
1365 switch (sg->asce & _ASCE_TYPE_MASK) {
1366 case _ASCE_TYPE_REGION1:
1367 __gmap_unshadow_r1t(sg, 0, table);
1368 break;
1369 case _ASCE_TYPE_REGION2:
1370 __gmap_unshadow_r2t(sg, 0, table);
1371 break;
1372 case _ASCE_TYPE_REGION3:
1373 __gmap_unshadow_r3t(sg, 0, table);
1374 break;
1375 case _ASCE_TYPE_SEGMENT:
1376 __gmap_unshadow_sgt(sg, 0, table);
1377 break;
1378 }
1379}
1380
1381/**
1382 * gmap_find_shadow - find a specific asce in the list of shadow tables
1383 * @parent: pointer to the parent gmap
1384 * @asce: ASCE for which the shadow table is created
1385 *
1386 * Returns the pointer to a gmap if a shadow table with the given asce is
1387 * already available, otherwise NULL
1388 */
1389static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce)
1390{
1391 struct gmap *sg;
1392
1393 list_for_each_entry(sg, &parent->children, list) {
1394 if (sg->orig_asce != asce || sg->removed)
1395 continue;
1396 atomic_inc(&sg->ref_count);
1397 return sg;
1398 }
1399 return NULL;
1400}
1401
1402/**
1403 * gmap_shadow - create/find a shadow guest address space
1404 * @parent: pointer to the parent gmap
1405 * @asce: ASCE for which the shadow table is created
1406 *
1407 * The pages of the top level page table referred by the asce parameter
1408 * will be set to read-only and marked in the PGSTEs of the kvm process.
1409 * The shadow table will be removed automatically on any change to the
1410 * PTE mapping for the source table.
1411 *
1412 * Returns a guest address space structure, NULL if out of memory or if
1413 * anything goes wrong while protecting the top level pages.
1414 */
1415struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce)
1416{
1417 struct gmap *sg, *new;
1418 unsigned long limit;
1419 int rc;
1420
1421 BUG_ON(gmap_is_shadow(parent));
1422 spin_lock(&parent->shadow_lock);
1423 sg = gmap_find_shadow(parent, asce);
1424 spin_unlock(&parent->shadow_lock);
1425 if (sg)
1426 return sg;
1427 /* Create a new shadow gmap */
1428 limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
1429 new = gmap_alloc(limit);
1430 if (!new)
1431 return NULL;
1432 new->mm = parent->mm;
1433 new->parent = gmap_get(parent);
1434 new->orig_asce = asce;
1435 down_read(&parent->mm->mmap_sem);
1436 rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
1437 ((asce & _ASCE_TABLE_LENGTH) + 1) * 4096,
1438 PROT_READ, PGSTE_VSIE_BIT);
1439 up_read(&parent->mm->mmap_sem);
1440 if (rc) {
1441 atomic_set(&new->ref_count, 2);
1442 spin_lock(&parent->shadow_lock);
1443 /* Recheck if another CPU created the same shadow */
1444 sg = gmap_find_shadow(parent, asce);
1445 if (!sg) {
1446 list_add(&new->list, &parent->children);
1447 sg = new;
1448 new = NULL;
1449 }
1450 spin_unlock(&parent->shadow_lock);
1451 }
1452 if (new)
1453 gmap_free(new);
1454 return sg;
1455}
1456EXPORT_SYMBOL_GPL(gmap_shadow);
1457
1458/**
1459 * gmap_shadow_r2t - create an empty shadow region 2 table
1460 * @sg: pointer to the shadow guest address space structure
1461 * @saddr: faulting address in the shadow gmap
1462 * @r2t: parent gmap address of the region 2 table to get shadowed
1463 *
1464 * The r2t parameter specifies the address of the source table. The
1465 * four pages of the source table are made read-only in the parent gmap
1466 * address space. A write to the source table area @r2t will automatically
1467 * remove the shadow r2 table and all of its decendents.
1468 *
1469 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1470 * shadow table structure is incomplete, -ENOMEM if out of memory and
1471 * -EFAULT if an address in the parent gmap could not be resolved.
1472 *
1473 * Called with sg->mm->mmap_sem in read.
1474 */
1475int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t)
1476{
1477 unsigned long raddr, origin, offset, len;
1478 unsigned long *s_r2t, *table;
1479 struct page *page;
1480 int rc;
1481
1482 BUG_ON(!gmap_is_shadow(sg));
1483 /* Allocate a shadow region second table */
1484 page = alloc_pages(GFP_KERNEL, 2);
1485 if (!page)
1486 return -ENOMEM;
1487 page->index = r2t & _REGION_ENTRY_ORIGIN;
1488 s_r2t = (unsigned long *) page_to_phys(page);
1489 /* Install shadow region second table */
1490 spin_lock(&sg->guest_table_lock);
1491 table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
1492 if (!table) {
1493 rc = -EAGAIN; /* Race with unshadow */
1494 goto out_free;
1495 }
1496 if (!(*table & _REGION_ENTRY_INVALID)) {
1497 rc = 0; /* Already established */
1498 goto out_free;
1499 }
1500 crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY);
1501 *table = (unsigned long) s_r2t |
1502 _REGION_ENTRY_LENGTH | _REGION_ENTRY_TYPE_R1;
1503 list_add(&page->lru, &sg->crst_list);
1504 spin_unlock(&sg->guest_table_lock);
1505 /* Make r2t read-only in parent gmap page table */
1506 raddr = (saddr & 0xffe0000000000000UL) | _SHADOW_RMAP_REGION1;
1507 origin = r2t & _REGION_ENTRY_ORIGIN;
1508 offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * 4096;
1509 len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
1510 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
1511 if (rc) {
1512 spin_lock(&sg->guest_table_lock);
1513 gmap_unshadow_r2t(sg, raddr);
1514 spin_unlock(&sg->guest_table_lock);
1515 }
1516 return rc;
1517out_free:
1518 spin_unlock(&sg->guest_table_lock);
1519 __free_pages(page, 2);
1520 return rc;
1521}
1522EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
1523
1524/**
1525 * gmap_shadow_r3t - create a shadow region 3 table
1526 * @sg: pointer to the shadow guest address space structure
1527 * @saddr: faulting address in the shadow gmap
1528 * @r3t: parent gmap address of the region 3 table to get shadowed
1529 *
1530 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1531 * shadow table structure is incomplete, -ENOMEM if out of memory and
1532 * -EFAULT if an address in the parent gmap could not be resolved.
1533 *
1534 * Called with sg->mm->mmap_sem in read.
1535 */
1536int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t)
1537{
1538 unsigned long raddr, origin, offset, len;
1539 unsigned long *s_r3t, *table;
1540 struct page *page;
1541 int rc;
1542
1543 BUG_ON(!gmap_is_shadow(sg));
1544 /* Allocate a shadow region second table */
1545 page = alloc_pages(GFP_KERNEL, 2);
1546 if (!page)
1547 return -ENOMEM;
1548 page->index = r3t & _REGION_ENTRY_ORIGIN;
1549 s_r3t = (unsigned long *) page_to_phys(page);
1550 /* Install shadow region second table */
1551 spin_lock(&sg->guest_table_lock);
1552 table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
1553 if (!table) {
1554 rc = -EAGAIN; /* Race with unshadow */
1555 goto out_free;
1556 }
1557 if (!(*table & _REGION_ENTRY_INVALID)) {
1558 rc = 0; /* Already established */
1559 goto out_free;
1560 }
1561 crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
1562 *table = (unsigned long) s_r3t |
1563 _REGION_ENTRY_LENGTH | _REGION_ENTRY_TYPE_R2;
1564 list_add(&page->lru, &sg->crst_list);
1565 spin_unlock(&sg->guest_table_lock);
1566 /* Make r3t read-only in parent gmap page table */
1567 raddr = (saddr & 0xfffffc0000000000UL) | _SHADOW_RMAP_REGION2;
1568 origin = r3t & _REGION_ENTRY_ORIGIN;
1569 offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * 4096;
1570 len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
1571 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
1572 if (rc) {
1573 spin_lock(&sg->guest_table_lock);
1574 gmap_unshadow_r3t(sg, raddr);
1575 spin_unlock(&sg->guest_table_lock);
1576 }
1577 return rc;
1578out_free:
1579 spin_unlock(&sg->guest_table_lock);
1580 __free_pages(page, 2);
1581 return rc;
1582}
1583EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
1584
1585/**
1586 * gmap_shadow_sgt - create a shadow segment table
1587 * @sg: pointer to the shadow guest address space structure
1588 * @saddr: faulting address in the shadow gmap
1589 * @sgt: parent gmap address of the segment table to get shadowed
1590 *
1591 * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
1592 * shadow table structure is incomplete, -ENOMEM if out of memory and
1593 * -EFAULT if an address in the parent gmap could not be resolved.
1594 *
1595 * Called with sg->mm->mmap_sem in read.
1596 */
1597int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt)
1598{
1599 unsigned long raddr, origin, offset, len;
1600 unsigned long *s_sgt, *table;
1601 struct page *page;
1602 int rc;
1603
1604 BUG_ON(!gmap_is_shadow(sg));
1605 /* Allocate a shadow segment table */
1606 page = alloc_pages(GFP_KERNEL, 2);
1607 if (!page)
1608 return -ENOMEM;
1609 page->index = sgt & _REGION_ENTRY_ORIGIN;
1610 s_sgt = (unsigned long *) page_to_phys(page);
1611 /* Install shadow region second table */
1612 spin_lock(&sg->guest_table_lock);
1613 table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
1614 if (!table) {
1615 rc = -EAGAIN; /* Race with unshadow */
1616 goto out_free;
1617 }
1618 if (!(*table & _REGION_ENTRY_INVALID)) {
1619 rc = 0; /* Already established */
1620 goto out_free;
1621 }
1622 crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY);
1623 *table = (unsigned long) s_sgt |
1624 _REGION_ENTRY_LENGTH | _REGION_ENTRY_TYPE_R3;
1625 list_add(&page->lru, &sg->crst_list);
1626 spin_unlock(&sg->guest_table_lock);
1627 /* Make sgt read-only in parent gmap page table */
1628 raddr = (saddr & 0xffffffff80000000UL) | _SHADOW_RMAP_REGION3;
1629 origin = sgt & _REGION_ENTRY_ORIGIN;
1630 offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * 4096;
1631 len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
1632 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
1633 if (rc) {
1634 spin_lock(&sg->guest_table_lock);
1635 gmap_unshadow_sgt(sg, raddr);
1636 spin_unlock(&sg->guest_table_lock);
1637 }
1638 return rc;
1639out_free:
1640 spin_unlock(&sg->guest_table_lock);
1641 __free_pages(page, 2);
1642 return rc;
1643}
1644EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
1645
1646/**
1647 * gmap_shadow_lookup_pgtable - find a shadow page table
1648 * @sg: pointer to the shadow guest address space structure
1649 * @saddr: the address in the shadow aguest address space
1650 * @pgt: parent gmap address of the page table to get shadowed
1651 * @dat_protection: if the pgtable is marked as protected by dat
1652 *
1653 * Returns 0 if the shadow page table was found and -EAGAIN if the page
1654 * table was not found.
1655 *
1656 * Called with sg->mm->mmap_sem in read.
1657 */
1658int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
1659 unsigned long *pgt, int *dat_protection)
1660{
1661 unsigned long *table;
1662 struct page *page;
1663 int rc;
1664
1665 BUG_ON(!gmap_is_shadow(sg));
1666 spin_lock(&sg->guest_table_lock);
1667 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1668 if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
1669 /* Shadow page tables are full pages (pte+pgste) */
1670 page = pfn_to_page(*table >> PAGE_SHIFT);
1671 *pgt = page->index;
1672 *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
1673 rc = 0;
1674 } else {
1675 rc = -EAGAIN;
1676 }
1677 spin_unlock(&sg->guest_table_lock);
1678 return rc;
1679
1680}
1681EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
1682
1683/**
1684 * gmap_shadow_pgt - instantiate a shadow page table
1685 * @sg: pointer to the shadow guest address space structure
1686 * @saddr: faulting address in the shadow gmap
1687 * @pgt: parent gmap address of the page table to get shadowed
1688 *
1689 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1690 * shadow table structure is incomplete, -ENOMEM if out of memory,
1691 * -EFAULT if an address in the parent gmap could not be resolved and
1692 *
1693 * Called with gmap->mm->mmap_sem in read
1694 */
1695int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt)
1696{
1697 unsigned long raddr, origin;
1698 unsigned long *s_pgt, *table;
1699 struct page *page;
1700 int rc;
1701
1702 BUG_ON(!gmap_is_shadow(sg));
1703 /* Allocate a shadow page table */
1704 page = page_table_alloc_pgste(sg->mm);
1705 if (!page)
1706 return -ENOMEM;
1707 page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
1708 s_pgt = (unsigned long *) page_to_phys(page);
1709 /* Install shadow page table */
1710 spin_lock(&sg->guest_table_lock);
1711 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1712 if (!table) {
1713 rc = -EAGAIN; /* Race with unshadow */
1714 goto out_free;
1715 }
1716 if (!(*table & _SEGMENT_ENTRY_INVALID)) {
1717 rc = 0; /* Already established */
1718 goto out_free;
1719 }
1720 *table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
1721 (pgt & _SEGMENT_ENTRY_PROTECT);
1722 list_add(&page->lru, &sg->pt_list);
1723 spin_unlock(&sg->guest_table_lock);
1724 /* Make pgt read-only in parent gmap page table (not the pgste) */
1725 raddr = (saddr & 0xfffffffffff00000UL) | _SHADOW_RMAP_SEGMENT;
1726 origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
1727 rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE, PROT_READ);
1728 if (rc) {
1729 spin_lock(&sg->guest_table_lock);
1730 gmap_unshadow_pgt(sg, raddr);
1731 spin_unlock(&sg->guest_table_lock);
1732 }
1733 return rc;
1734out_free:
1735 spin_unlock(&sg->guest_table_lock);
1736 page_table_free_pgste(page);
1737 return rc;
1738
1739}
1740EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
1741
1742/**
1743 * gmap_shadow_page - create a shadow page mapping
1744 * @sg: pointer to the shadow guest address space structure
1745 * @saddr: faulting address in the shadow gmap
1746 * @paddr: parent gmap address to get mapped at @saddr
1747 * @write: =1 map r/w, =0 map r/o
1748 *
1749 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1750 * shadow table structure is incomplete, -ENOMEM if out of memory and
1751 * -EFAULT if an address in the parent gmap could not be resolved.
1752 *
1753 * Called with sg->mm->mmap_sem in read.
1754 */
1755int gmap_shadow_page(struct gmap *sg, unsigned long saddr,
1756 unsigned long paddr, int write)
1757{
1758 struct gmap *parent;
1759 struct gmap_rmap *rmap;
1760 unsigned long vmaddr;
1761 spinlock_t *ptl;
1762 pte_t *sptep, *tptep;
1763 int rc;
1764
1765 BUG_ON(!gmap_is_shadow(sg));
1766 parent = sg->parent;
1767
1768 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
1769 if (!rmap)
1770 return -ENOMEM;
1771 rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
1772
1773 while (1) {
1774 vmaddr = __gmap_translate(parent, paddr);
1775 if (IS_ERR_VALUE(vmaddr)) {
1776 rc = vmaddr;
1777 break;
1778 }
1779 rc = radix_tree_preload(GFP_KERNEL);
1780 if (rc)
1781 break;
1782 rc = -EAGAIN;
1783 sptep = gmap_pte_op_walk(parent, paddr, &ptl);
1784 if (sptep) {
1785 spin_lock(&sg->guest_table_lock);
1786 /* Get page table pointer */
1787 tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
1788 if (!tptep) {
1789 spin_unlock(&sg->guest_table_lock);
1790 gmap_pte_op_end(ptl);
1791 radix_tree_preload_end();
1792 break;
1793 }
1794 rc = ptep_shadow_pte(sg->mm, saddr,
1795 sptep, tptep, write);
1796 if (rc > 0) {
1797 /* Success and a new mapping */
1798 gmap_insert_rmap(sg, vmaddr, rmap);
1799 rmap = NULL;
1800 rc = 0;
1801 }
1802 gmap_pte_op_end(ptl);
1803 spin_unlock(&sg->guest_table_lock);
1804 }
1805 radix_tree_preload_end();
1806 if (!rc)
1807 break;
1808 rc = gmap_pte_op_fixup(parent, paddr, vmaddr);
1809 if (rc)
1810 break;
1811 }
1812 kfree(rmap);
1813 return rc;
1814}
1815EXPORT_SYMBOL_GPL(gmap_shadow_page);
1816
1817/**
1818 * gmap_shadow_notify - handle notifications for shadow gmap
1819 *
1820 * Called with sg->parent->shadow_lock.
1821 */
1822static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
1823 unsigned long offset, pte_t *pte)
1824{
1825 struct gmap_rmap *rmap, *rnext, *head;
1826 unsigned long gaddr, start, end, bits, raddr;
1827 unsigned long *table;
1828
1829 BUG_ON(!gmap_is_shadow(sg));
1830 spin_lock(&sg->parent->guest_table_lock);
1831 table = radix_tree_lookup(&sg->parent->host_to_guest,
1832 vmaddr >> PMD_SHIFT);
1833 gaddr = table ? __gmap_segment_gaddr(table) + offset : 0;
1834 spin_unlock(&sg->parent->guest_table_lock);
1835 if (!table)
1836 return;
1837
1838 spin_lock(&sg->guest_table_lock);
1839 if (sg->removed) {
1840 spin_unlock(&sg->guest_table_lock);
1841 return;
1842 }
1843 /* Check for top level table */
1844 start = sg->orig_asce & _ASCE_ORIGIN;
1845 end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * 4096;
1846 if (gaddr >= start && gaddr < end) {
1847 /* The complete shadow table has to go */
1848 gmap_unshadow(sg);
1849 spin_unlock(&sg->guest_table_lock);
1850 list_del(&sg->list);
1851 gmap_put(sg);
1852 return;
1853 }
1854 /* Remove the page table tree from on specific entry */
1855 head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> 12);
1856 gmap_for_each_rmap_safe(rmap, rnext, head) {
1857 bits = rmap->raddr & _SHADOW_RMAP_MASK;
1858 raddr = rmap->raddr ^ bits;
1859 switch (bits) {
1860 case _SHADOW_RMAP_REGION1:
1861 gmap_unshadow_r2t(sg, raddr);
1862 break;
1863 case _SHADOW_RMAP_REGION2:
1864 gmap_unshadow_r3t(sg, raddr);
1865 break;
1866 case _SHADOW_RMAP_REGION3:
1867 gmap_unshadow_sgt(sg, raddr);
1868 break;
1869 case _SHADOW_RMAP_SEGMENT:
1870 gmap_unshadow_pgt(sg, raddr);
1871 break;
1872 case _SHADOW_RMAP_PGTABLE:
1873 gmap_unshadow_page(sg, raddr);
1874 break;
1875 }
1876 kfree(rmap);
1877 }
1878 spin_unlock(&sg->guest_table_lock);
1879}
1880
1881/**
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001882 * ptep_notify - call all invalidation callbacks for a specific pte.
1883 * @mm: pointer to the process mm_struct
1884 * @addr: virtual address in the process address space
1885 * @pte: pointer to the page table entry
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001886 * @bits: bits from the pgste that caused the notify call
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001887 *
1888 * This function is assumed to be called with the page table lock held
1889 * for the pte to notify.
1890 */
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001891void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
1892 pte_t *pte, unsigned long bits)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001893{
1894 unsigned long offset, gaddr;
1895 unsigned long *table;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001896 struct gmap *gmap, *sg, *next;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001897
1898 offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
1899 offset = offset * (4096 / sizeof(pte_t));
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +01001900 rcu_read_lock();
1901 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001902 if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
1903 spin_lock(&gmap->shadow_lock);
1904 list_for_each_entry_safe(sg, next,
1905 &gmap->children, list)
1906 gmap_shadow_notify(sg, vmaddr, offset, pte);
1907 spin_unlock(&gmap->shadow_lock);
1908 }
1909 if (!(bits & PGSTE_IN_BIT))
1910 continue;
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +01001911 spin_lock(&gmap->guest_table_lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001912 table = radix_tree_lookup(&gmap->host_to_guest,
1913 vmaddr >> PMD_SHIFT);
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +01001914 if (table)
1915 gaddr = __gmap_segment_gaddr(table) + offset;
1916 spin_unlock(&gmap->guest_table_lock);
1917 if (table)
1918 gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001919 }
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +01001920 rcu_read_unlock();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001921}
1922EXPORT_SYMBOL_GPL(ptep_notify);
1923
1924static inline void thp_split_mm(struct mm_struct *mm)
1925{
1926#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1927 struct vm_area_struct *vma;
1928 unsigned long addr;
1929
1930 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
1931 for (addr = vma->vm_start;
1932 addr < vma->vm_end;
1933 addr += PAGE_SIZE)
1934 follow_page(vma, addr, FOLL_SPLIT);
1935 vma->vm_flags &= ~VM_HUGEPAGE;
1936 vma->vm_flags |= VM_NOHUGEPAGE;
1937 }
1938 mm->def_flags |= VM_NOHUGEPAGE;
1939#endif
1940}
1941
1942/*
1943 * switch on pgstes for its userspace process (for kvm)
1944 */
1945int s390_enable_sie(void)
1946{
1947 struct mm_struct *mm = current->mm;
1948
1949 /* Do we have pgstes? if yes, we are done */
1950 if (mm_has_pgste(mm))
1951 return 0;
1952 /* Fail if the page tables are 2K */
1953 if (!mm_alloc_pgste(mm))
1954 return -EINVAL;
1955 down_write(&mm->mmap_sem);
1956 mm->context.has_pgste = 1;
1957 /* split thp mappings and disable thp for future mappings */
1958 thp_split_mm(mm);
1959 up_write(&mm->mmap_sem);
1960 return 0;
1961}
1962EXPORT_SYMBOL_GPL(s390_enable_sie);
1963
1964/*
1965 * Enable storage key handling from now on and initialize the storage
1966 * keys with the default key.
1967 */
1968static int __s390_enable_skey(pte_t *pte, unsigned long addr,
1969 unsigned long next, struct mm_walk *walk)
1970{
1971 /*
1972 * Remove all zero page mappings,
1973 * after establishing a policy to forbid zero page mappings
1974 * following faults for that page will get fresh anonymous pages
1975 */
1976 if (is_zero_pfn(pte_pfn(*pte)))
1977 ptep_xchg_direct(walk->mm, addr, pte, __pte(_PAGE_INVALID));
1978 /* Clear storage key */
1979 ptep_zap_key(walk->mm, addr, pte);
1980 return 0;
1981}
1982
1983int s390_enable_skey(void)
1984{
1985 struct mm_walk walk = { .pte_entry = __s390_enable_skey };
1986 struct mm_struct *mm = current->mm;
1987 struct vm_area_struct *vma;
1988 int rc = 0;
1989
1990 down_write(&mm->mmap_sem);
1991 if (mm_use_skey(mm))
1992 goto out_up;
1993
1994 mm->context.use_skey = 1;
1995 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1996 if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
1997 MADV_UNMERGEABLE, &vma->vm_flags)) {
1998 mm->context.use_skey = 0;
1999 rc = -ENOMEM;
2000 goto out_up;
2001 }
2002 }
2003 mm->def_flags &= ~VM_MERGEABLE;
2004
2005 walk.mm = mm;
2006 walk_page_range(0, TASK_SIZE, &walk);
2007
2008out_up:
2009 up_write(&mm->mmap_sem);
2010 return rc;
2011}
2012EXPORT_SYMBOL_GPL(s390_enable_skey);
2013
2014/*
2015 * Reset CMMA state, make all pages stable again.
2016 */
2017static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
2018 unsigned long next, struct mm_walk *walk)
2019{
2020 ptep_zap_unused(walk->mm, addr, pte, 1);
2021 return 0;
2022}
2023
2024void s390_reset_cmma(struct mm_struct *mm)
2025{
2026 struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
2027
2028 down_write(&mm->mmap_sem);
2029 walk.mm = mm;
2030 walk_page_range(0, TASK_SIZE, &walk);
2031 up_write(&mm->mmap_sem);
2032}
2033EXPORT_SYMBOL_GPL(s390_reset_cmma);