blob: d00e4abb559e10b934276a0b36095e2d98536100 [file] [log] [blame]
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001/*
2 * KVM guest address space mapping code
3 *
4 * Copyright IBM Corp. 2007, 2016
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/mm.h>
10#include <linux/swap.h>
11#include <linux/smp.h>
12#include <linux/spinlock.h>
13#include <linux/slab.h>
14#include <linux/swapops.h>
15#include <linux/ksm.h>
16#include <linux/mman.h>
17
18#include <asm/pgtable.h>
19#include <asm/pgalloc.h>
20#include <asm/gmap.h>
21#include <asm/tlb.h>
22
David Hildenbrandfd8d4e32016-04-18 13:24:52 +020023#define GMAP_SHADOW_FAKE_TABLE 1ULL
24
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010025/**
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +010026 * gmap_alloc - allocate and initialize a guest address space
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010027 * @mm: pointer to the parent mm_struct
Christian Borntraeger9c650d02016-04-04 09:41:32 +020028 * @limit: maximum address of the gmap address space
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010029 *
30 * Returns a guest address space structure.
31 */
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +010032static struct gmap *gmap_alloc(unsigned long limit)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010033{
34 struct gmap *gmap;
35 struct page *page;
36 unsigned long *table;
37 unsigned long etype, atype;
38
39 if (limit < (1UL << 31)) {
40 limit = (1UL << 31) - 1;
41 atype = _ASCE_TYPE_SEGMENT;
42 etype = _SEGMENT_ENTRY_EMPTY;
43 } else if (limit < (1UL << 42)) {
44 limit = (1UL << 42) - 1;
45 atype = _ASCE_TYPE_REGION3;
46 etype = _REGION3_ENTRY_EMPTY;
47 } else if (limit < (1UL << 53)) {
48 limit = (1UL << 53) - 1;
49 atype = _ASCE_TYPE_REGION2;
50 etype = _REGION2_ENTRY_EMPTY;
51 } else {
52 limit = -1UL;
53 atype = _ASCE_TYPE_REGION1;
54 etype = _REGION1_ENTRY_EMPTY;
55 }
56 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
57 if (!gmap)
58 goto out;
59 INIT_LIST_HEAD(&gmap->crst_list);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +010060 INIT_LIST_HEAD(&gmap->children);
61 INIT_LIST_HEAD(&gmap->pt_list);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010062 INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
63 INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +010064 INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010065 spin_lock_init(&gmap->guest_table_lock);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +010066 spin_lock_init(&gmap->shadow_lock);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +010067 atomic_set(&gmap->ref_count, 1);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010068 page = alloc_pages(GFP_KERNEL, 2);
69 if (!page)
70 goto out_free;
71 page->index = 0;
72 list_add(&page->lru, &gmap->crst_list);
73 table = (unsigned long *) page_to_phys(page);
74 crst_table_init(table, etype);
75 gmap->table = table;
76 gmap->asce = atype | _ASCE_TABLE_LENGTH |
77 _ASCE_USER_BITS | __pa(table);
78 gmap->asce_end = limit;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010079 return gmap;
80
81out_free:
82 kfree(gmap);
83out:
84 return NULL;
85}
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +010086
87/**
88 * gmap_create - create a guest address space
89 * @mm: pointer to the parent mm_struct
90 * @limit: maximum size of the gmap address space
91 *
92 * Returns a guest address space structure.
93 */
94struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
95{
96 struct gmap *gmap;
97
98 gmap = gmap_alloc(limit);
99 if (!gmap)
100 return NULL;
101 gmap->mm = mm;
102 spin_lock(&mm->context.gmap_lock);
103 list_add_rcu(&gmap->list, &mm->context.gmap_list);
104 spin_unlock(&mm->context.gmap_lock);
105 return gmap;
106}
107EXPORT_SYMBOL_GPL(gmap_create);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100108
109static void gmap_flush_tlb(struct gmap *gmap)
110{
111 if (MACHINE_HAS_IDTE)
112 __tlb_flush_asce(gmap->mm, gmap->asce);
113 else
114 __tlb_flush_global();
115}
116
117static void gmap_radix_tree_free(struct radix_tree_root *root)
118{
119 struct radix_tree_iter iter;
120 unsigned long indices[16];
121 unsigned long index;
122 void **slot;
123 int i, nr;
124
125 /* A radix tree is freed by deleting all of its entries */
126 index = 0;
127 do {
128 nr = 0;
129 radix_tree_for_each_slot(slot, root, &iter, index) {
130 indices[nr] = iter.index;
131 if (++nr == 16)
132 break;
133 }
134 for (i = 0; i < nr; i++) {
135 index = indices[i];
136 radix_tree_delete(root, index);
137 }
138 } while (nr > 0);
139}
140
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100141static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
142{
143 struct gmap_rmap *rmap, *rnext, *head;
144 struct radix_tree_iter iter;
145 unsigned long indices[16];
146 unsigned long index;
147 void **slot;
148 int i, nr;
149
150 /* A radix tree is freed by deleting all of its entries */
151 index = 0;
152 do {
153 nr = 0;
154 radix_tree_for_each_slot(slot, root, &iter, index) {
155 indices[nr] = iter.index;
156 if (++nr == 16)
157 break;
158 }
159 for (i = 0; i < nr; i++) {
160 index = indices[i];
161 head = radix_tree_delete(root, index);
162 gmap_for_each_rmap_safe(rmap, rnext, head)
163 kfree(rmap);
164 }
165 } while (nr > 0);
166}
167
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100168/**
169 * gmap_free - free a guest address space
170 * @gmap: pointer to the guest address space structure
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100171 *
172 * No locks required. There are no references to this gmap anymore.
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100173 */
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100174static void gmap_free(struct gmap *gmap)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100175{
176 struct page *page, *next;
177
David Hildenbrandeea36782016-04-15 12:45:45 +0200178 /* Flush tlb of all gmaps (if not already done for shadows) */
179 if (!(gmap_is_shadow(gmap) && gmap->removed))
180 gmap_flush_tlb(gmap);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100181 /* Free all segment & region tables. */
182 list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
183 __free_pages(page, 2);
184 gmap_radix_tree_free(&gmap->guest_to_host);
185 gmap_radix_tree_free(&gmap->host_to_guest);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100186
187 /* Free additional data for a shadow gmap */
188 if (gmap_is_shadow(gmap)) {
189 /* Free all page tables. */
190 list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
191 page_table_free_pgste(page);
192 gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
193 /* Release reference to the parent */
194 gmap_put(gmap->parent);
195 }
196
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100197 kfree(gmap);
198}
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100199
200/**
201 * gmap_get - increase reference counter for guest address space
202 * @gmap: pointer to the guest address space structure
203 *
204 * Returns the gmap pointer
205 */
206struct gmap *gmap_get(struct gmap *gmap)
207{
208 atomic_inc(&gmap->ref_count);
209 return gmap;
210}
211EXPORT_SYMBOL_GPL(gmap_get);
212
213/**
214 * gmap_put - decrease reference counter for guest address space
215 * @gmap: pointer to the guest address space structure
216 *
217 * If the reference counter reaches zero the guest address space is freed.
218 */
219void gmap_put(struct gmap *gmap)
220{
221 if (atomic_dec_return(&gmap->ref_count) == 0)
222 gmap_free(gmap);
223}
224EXPORT_SYMBOL_GPL(gmap_put);
225
226/**
227 * gmap_remove - remove a guest address space but do not free it yet
228 * @gmap: pointer to the guest address space structure
229 */
230void gmap_remove(struct gmap *gmap)
231{
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100232 struct gmap *sg, *next;
233
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100234 /* Remove all shadow gmaps linked to this gmap */
235 if (!list_empty(&gmap->children)) {
236 spin_lock(&gmap->shadow_lock);
237 list_for_each_entry_safe(sg, next, &gmap->children, list) {
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100238 list_del(&sg->list);
239 gmap_put(sg);
240 }
241 spin_unlock(&gmap->shadow_lock);
242 }
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100243 /* Remove gmap from the pre-mm list */
244 spin_lock(&gmap->mm->context.gmap_lock);
245 list_del_rcu(&gmap->list);
246 spin_unlock(&gmap->mm->context.gmap_lock);
247 synchronize_rcu();
248 /* Put reference */
249 gmap_put(gmap);
250}
251EXPORT_SYMBOL_GPL(gmap_remove);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100252
253/**
254 * gmap_enable - switch primary space to the guest address space
255 * @gmap: pointer to the guest address space structure
256 */
257void gmap_enable(struct gmap *gmap)
258{
259 S390_lowcore.gmap = (unsigned long) gmap;
260}
261EXPORT_SYMBOL_GPL(gmap_enable);
262
263/**
264 * gmap_disable - switch back to the standard primary address space
265 * @gmap: pointer to the guest address space structure
266 */
267void gmap_disable(struct gmap *gmap)
268{
269 S390_lowcore.gmap = 0UL;
270}
271EXPORT_SYMBOL_GPL(gmap_disable);
272
273/*
274 * gmap_alloc_table is assumed to be called with mmap_sem held
275 */
276static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
277 unsigned long init, unsigned long gaddr)
278{
279 struct page *page;
280 unsigned long *new;
281
282 /* since we dont free the gmap table until gmap_free we can unlock */
283 page = alloc_pages(GFP_KERNEL, 2);
284 if (!page)
285 return -ENOMEM;
286 new = (unsigned long *) page_to_phys(page);
287 crst_table_init(new, init);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100288 spin_lock(&gmap->guest_table_lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100289 if (*table & _REGION_ENTRY_INVALID) {
290 list_add(&page->lru, &gmap->crst_list);
291 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
292 (*table & _REGION_ENTRY_TYPE_MASK);
293 page->index = gaddr;
294 page = NULL;
295 }
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100296 spin_unlock(&gmap->guest_table_lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100297 if (page)
298 __free_pages(page, 2);
299 return 0;
300}
301
302/**
303 * __gmap_segment_gaddr - find virtual address from segment pointer
304 * @entry: pointer to a segment table entry in the guest address space
305 *
306 * Returns the virtual address in the guest address space for the segment
307 */
308static unsigned long __gmap_segment_gaddr(unsigned long *entry)
309{
310 struct page *page;
311 unsigned long offset, mask;
312
313 offset = (unsigned long) entry / sizeof(unsigned long);
314 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
315 mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
316 page = virt_to_page((void *)((unsigned long) entry & mask));
317 return page->index + offset;
318}
319
320/**
321 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
322 * @gmap: pointer to the guest address space structure
323 * @vmaddr: address in the host process address space
324 *
325 * Returns 1 if a TLB flush is required
326 */
327static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
328{
329 unsigned long *entry;
330 int flush = 0;
331
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100332 BUG_ON(gmap_is_shadow(gmap));
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100333 spin_lock(&gmap->guest_table_lock);
334 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
335 if (entry) {
336 flush = (*entry != _SEGMENT_ENTRY_INVALID);
337 *entry = _SEGMENT_ENTRY_INVALID;
338 }
339 spin_unlock(&gmap->guest_table_lock);
340 return flush;
341}
342
343/**
344 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
345 * @gmap: pointer to the guest address space structure
346 * @gaddr: address in the guest address space
347 *
348 * Returns 1 if a TLB flush is required
349 */
350static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
351{
352 unsigned long vmaddr;
353
354 vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
355 gaddr >> PMD_SHIFT);
356 return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
357}
358
359/**
360 * gmap_unmap_segment - unmap segment from the guest address space
361 * @gmap: pointer to the guest address space structure
362 * @to: address in the guest address space
363 * @len: length of the memory area to unmap
364 *
365 * Returns 0 if the unmap succeeded, -EINVAL if not.
366 */
367int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
368{
369 unsigned long off;
370 int flush;
371
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100372 BUG_ON(gmap_is_shadow(gmap));
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100373 if ((to | len) & (PMD_SIZE - 1))
374 return -EINVAL;
375 if (len == 0 || to + len < to)
376 return -EINVAL;
377
378 flush = 0;
379 down_write(&gmap->mm->mmap_sem);
380 for (off = 0; off < len; off += PMD_SIZE)
381 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
382 up_write(&gmap->mm->mmap_sem);
383 if (flush)
384 gmap_flush_tlb(gmap);
385 return 0;
386}
387EXPORT_SYMBOL_GPL(gmap_unmap_segment);
388
389/**
390 * gmap_map_segment - map a segment to the guest address space
391 * @gmap: pointer to the guest address space structure
392 * @from: source address in the parent address space
393 * @to: target address in the guest address space
394 * @len: length of the memory area to map
395 *
396 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
397 */
398int gmap_map_segment(struct gmap *gmap, unsigned long from,
399 unsigned long to, unsigned long len)
400{
401 unsigned long off;
402 int flush;
403
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100404 BUG_ON(gmap_is_shadow(gmap));
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100405 if ((from | to | len) & (PMD_SIZE - 1))
406 return -EINVAL;
407 if (len == 0 || from + len < from || to + len < to ||
Christian Borntraeger9c650d02016-04-04 09:41:32 +0200408 from + len - 1 > TASK_MAX_SIZE || to + len - 1 > gmap->asce_end)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100409 return -EINVAL;
410
411 flush = 0;
412 down_write(&gmap->mm->mmap_sem);
413 for (off = 0; off < len; off += PMD_SIZE) {
414 /* Remove old translation */
415 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
416 /* Store new translation */
417 if (radix_tree_insert(&gmap->guest_to_host,
418 (to + off) >> PMD_SHIFT,
419 (void *) from + off))
420 break;
421 }
422 up_write(&gmap->mm->mmap_sem);
423 if (flush)
424 gmap_flush_tlb(gmap);
425 if (off >= len)
426 return 0;
427 gmap_unmap_segment(gmap, to, len);
428 return -ENOMEM;
429}
430EXPORT_SYMBOL_GPL(gmap_map_segment);
431
432/**
433 * __gmap_translate - translate a guest address to a user space address
434 * @gmap: pointer to guest mapping meta data structure
435 * @gaddr: guest address
436 *
437 * Returns user space address which corresponds to the guest address or
438 * -EFAULT if no such mapping exists.
439 * This function does not establish potentially missing page table entries.
440 * The mmap_sem of the mm that belongs to the address space must be held
441 * when this function gets called.
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100442 *
443 * Note: Can also be called for shadow gmaps.
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100444 */
445unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
446{
447 unsigned long vmaddr;
448
449 vmaddr = (unsigned long)
450 radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100451 /* Note: guest_to_host is empty for a shadow gmap */
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100452 return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
453}
454EXPORT_SYMBOL_GPL(__gmap_translate);
455
456/**
457 * gmap_translate - translate a guest address to a user space address
458 * @gmap: pointer to guest mapping meta data structure
459 * @gaddr: guest address
460 *
461 * Returns user space address which corresponds to the guest address or
462 * -EFAULT if no such mapping exists.
463 * This function does not establish potentially missing page table entries.
464 */
465unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
466{
467 unsigned long rc;
468
469 down_read(&gmap->mm->mmap_sem);
470 rc = __gmap_translate(gmap, gaddr);
471 up_read(&gmap->mm->mmap_sem);
472 return rc;
473}
474EXPORT_SYMBOL_GPL(gmap_translate);
475
476/**
477 * gmap_unlink - disconnect a page table from the gmap shadow tables
478 * @gmap: pointer to guest mapping meta data structure
479 * @table: pointer to the host page table
480 * @vmaddr: vm address associated with the host page table
481 */
482void gmap_unlink(struct mm_struct *mm, unsigned long *table,
483 unsigned long vmaddr)
484{
485 struct gmap *gmap;
486 int flush;
487
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100488 rcu_read_lock();
489 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100490 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
491 if (flush)
492 gmap_flush_tlb(gmap);
493 }
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100494 rcu_read_unlock();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100495}
496
497/**
498 * gmap_link - set up shadow page tables to connect a host to a guest address
499 * @gmap: pointer to guest mapping meta data structure
500 * @gaddr: guest address
501 * @vmaddr: vm address
502 *
503 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
504 * if the vm address is already mapped to a different guest segment.
505 * The mmap_sem of the mm that belongs to the address space must be held
506 * when this function gets called.
507 */
508int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
509{
510 struct mm_struct *mm;
511 unsigned long *table;
512 spinlock_t *ptl;
513 pgd_t *pgd;
514 pud_t *pud;
515 pmd_t *pmd;
516 int rc;
517
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100518 BUG_ON(gmap_is_shadow(gmap));
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100519 /* Create higher level tables in the gmap page table */
520 table = gmap->table;
521 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
522 table += (gaddr >> 53) & 0x7ff;
523 if ((*table & _REGION_ENTRY_INVALID) &&
524 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
525 gaddr & 0xffe0000000000000UL))
526 return -ENOMEM;
527 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
528 }
529 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
530 table += (gaddr >> 42) & 0x7ff;
531 if ((*table & _REGION_ENTRY_INVALID) &&
532 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
533 gaddr & 0xfffffc0000000000UL))
534 return -ENOMEM;
535 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
536 }
537 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
538 table += (gaddr >> 31) & 0x7ff;
539 if ((*table & _REGION_ENTRY_INVALID) &&
540 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
541 gaddr & 0xffffffff80000000UL))
542 return -ENOMEM;
543 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
544 }
545 table += (gaddr >> 20) & 0x7ff;
546 /* Walk the parent mm page table */
547 mm = gmap->mm;
548 pgd = pgd_offset(mm, vmaddr);
549 VM_BUG_ON(pgd_none(*pgd));
550 pud = pud_offset(pgd, vmaddr);
551 VM_BUG_ON(pud_none(*pud));
552 pmd = pmd_offset(pud, vmaddr);
553 VM_BUG_ON(pmd_none(*pmd));
554 /* large pmds cannot yet be handled */
555 if (pmd_large(*pmd))
556 return -EFAULT;
557 /* Link gmap segment table entry location to page table. */
558 rc = radix_tree_preload(GFP_KERNEL);
559 if (rc)
560 return rc;
561 ptl = pmd_lock(mm, pmd);
562 spin_lock(&gmap->guest_table_lock);
563 if (*table == _SEGMENT_ENTRY_INVALID) {
564 rc = radix_tree_insert(&gmap->host_to_guest,
565 vmaddr >> PMD_SHIFT, table);
566 if (!rc)
567 *table = pmd_val(*pmd);
568 } else
569 rc = 0;
570 spin_unlock(&gmap->guest_table_lock);
571 spin_unlock(ptl);
572 radix_tree_preload_end();
573 return rc;
574}
575
576/**
577 * gmap_fault - resolve a fault on a guest address
578 * @gmap: pointer to guest mapping meta data structure
579 * @gaddr: guest address
580 * @fault_flags: flags to pass down to handle_mm_fault()
581 *
582 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
583 * if the vm address is already mapped to a different guest segment.
584 */
585int gmap_fault(struct gmap *gmap, unsigned long gaddr,
586 unsigned int fault_flags)
587{
588 unsigned long vmaddr;
589 int rc;
590 bool unlocked;
591
592 down_read(&gmap->mm->mmap_sem);
593
594retry:
595 unlocked = false;
596 vmaddr = __gmap_translate(gmap, gaddr);
597 if (IS_ERR_VALUE(vmaddr)) {
598 rc = vmaddr;
599 goto out_up;
600 }
601 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
602 &unlocked)) {
603 rc = -EFAULT;
604 goto out_up;
605 }
606 /*
607 * In the case that fixup_user_fault unlocked the mmap_sem during
608 * faultin redo __gmap_translate to not race with a map/unmap_segment.
609 */
610 if (unlocked)
611 goto retry;
612
613 rc = __gmap_link(gmap, gaddr, vmaddr);
614out_up:
615 up_read(&gmap->mm->mmap_sem);
616 return rc;
617}
618EXPORT_SYMBOL_GPL(gmap_fault);
619
620/*
621 * this function is assumed to be called with mmap_sem held
622 */
623void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
624{
625 unsigned long vmaddr;
626 spinlock_t *ptl;
627 pte_t *ptep;
628
629 /* Find the vm address for the guest address */
630 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
631 gaddr >> PMD_SHIFT);
632 if (vmaddr) {
633 vmaddr |= gaddr & ~PMD_MASK;
634 /* Get pointer to the page table entry */
635 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
636 if (likely(ptep))
637 ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
638 pte_unmap_unlock(ptep, ptl);
639 }
640}
641EXPORT_SYMBOL_GPL(__gmap_zap);
642
643void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
644{
645 unsigned long gaddr, vmaddr, size;
646 struct vm_area_struct *vma;
647
648 down_read(&gmap->mm->mmap_sem);
649 for (gaddr = from; gaddr < to;
650 gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
651 /* Find the vm address for the guest address */
652 vmaddr = (unsigned long)
653 radix_tree_lookup(&gmap->guest_to_host,
654 gaddr >> PMD_SHIFT);
655 if (!vmaddr)
656 continue;
657 vmaddr |= gaddr & ~PMD_MASK;
658 /* Find vma in the parent mm */
659 vma = find_vma(gmap->mm, vmaddr);
660 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
661 zap_page_range(vma, vmaddr, size, NULL);
662 }
663 up_read(&gmap->mm->mmap_sem);
664}
665EXPORT_SYMBOL_GPL(gmap_discard);
666
667static LIST_HEAD(gmap_notifier_list);
668static DEFINE_SPINLOCK(gmap_notifier_lock);
669
670/**
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100671 * gmap_register_pte_notifier - register a pte invalidation callback
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100672 * @nb: pointer to the gmap notifier block
673 */
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100674void gmap_register_pte_notifier(struct gmap_notifier *nb)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100675{
676 spin_lock(&gmap_notifier_lock);
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100677 list_add_rcu(&nb->list, &gmap_notifier_list);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100678 spin_unlock(&gmap_notifier_lock);
679}
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100680EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100681
682/**
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100683 * gmap_unregister_pte_notifier - remove a pte invalidation callback
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100684 * @nb: pointer to the gmap notifier block
685 */
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100686void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100687{
688 spin_lock(&gmap_notifier_lock);
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100689 list_del_rcu(&nb->list);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100690 spin_unlock(&gmap_notifier_lock);
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100691 synchronize_rcu();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100692}
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100693EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100694
695/**
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100696 * gmap_call_notifier - call all registered invalidation callbacks
697 * @gmap: pointer to guest mapping meta data structure
698 * @start: start virtual address in the guest address space
699 * @end: end virtual address in the guest address space
700 */
701static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
702 unsigned long end)
703{
704 struct gmap_notifier *nb;
705
706 list_for_each_entry(nb, &gmap_notifier_list, list)
707 nb->notifier_call(gmap, start, end);
708}
709
710/**
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100711 * gmap_table_walk - walk the gmap page tables
712 * @gmap: pointer to guest mapping meta data structure
713 * @gaddr: virtual address in the guest address space
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100714 * @level: page table level to stop at
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100715 *
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100716 * Returns a table entry pointer for the given guest address and @level
717 * @level=0 : returns a pointer to a page table table entry (or NULL)
718 * @level=1 : returns a pointer to a segment table entry (or NULL)
719 * @level=2 : returns a pointer to a region-3 table entry (or NULL)
720 * @level=3 : returns a pointer to a region-2 table entry (or NULL)
721 * @level=4 : returns a pointer to a region-1 table entry (or NULL)
722 *
723 * Returns NULL if the gmap page tables could not be walked to the
724 * requested level.
725 *
726 * Note: Can also be called for shadow gmaps.
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100727 */
728static inline unsigned long *gmap_table_walk(struct gmap *gmap,
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100729 unsigned long gaddr, int level)
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100730{
731 unsigned long *table;
732
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100733 if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
734 return NULL;
735 if (gmap_is_shadow(gmap) && gmap->removed)
736 return NULL;
737 if (gaddr & (-1UL << (31 + ((gmap->asce & _ASCE_TYPE_MASK) >> 2)*11)))
738 return NULL;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100739 table = gmap->table;
740 switch (gmap->asce & _ASCE_TYPE_MASK) {
741 case _ASCE_TYPE_REGION1:
742 table += (gaddr >> 53) & 0x7ff;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100743 if (level == 4)
744 break;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100745 if (*table & _REGION_ENTRY_INVALID)
746 return NULL;
747 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
748 /* Fallthrough */
749 case _ASCE_TYPE_REGION2:
750 table += (gaddr >> 42) & 0x7ff;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100751 if (level == 3)
752 break;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100753 if (*table & _REGION_ENTRY_INVALID)
754 return NULL;
755 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
756 /* Fallthrough */
757 case _ASCE_TYPE_REGION3:
758 table += (gaddr >> 31) & 0x7ff;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100759 if (level == 2)
760 break;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100761 if (*table & _REGION_ENTRY_INVALID)
762 return NULL;
763 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
764 /* Fallthrough */
765 case _ASCE_TYPE_SEGMENT:
766 table += (gaddr >> 20) & 0x7ff;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100767 if (level == 1)
768 break;
769 if (*table & _REGION_ENTRY_INVALID)
770 return NULL;
771 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
772 table += (gaddr >> 12) & 0xff;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100773 }
774 return table;
775}
776
777/**
778 * gmap_pte_op_walk - walk the gmap page table, get the page table lock
779 * and return the pte pointer
780 * @gmap: pointer to guest mapping meta data structure
781 * @gaddr: virtual address in the guest address space
782 * @ptl: pointer to the spinlock pointer
783 *
784 * Returns a pointer to the locked pte for a guest address, or NULL
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100785 *
786 * Note: Can also be called for shadow gmaps.
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100787 */
788static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
789 spinlock_t **ptl)
790{
791 unsigned long *table;
792
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100793 if (gmap_is_shadow(gmap))
794 spin_lock(&gmap->guest_table_lock);
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100795 /* Walk the gmap page table, lock and get pte pointer */
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100796 table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
797 if (!table || *table & _SEGMENT_ENTRY_INVALID) {
798 if (gmap_is_shadow(gmap))
799 spin_unlock(&gmap->guest_table_lock);
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100800 return NULL;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100801 }
802 if (gmap_is_shadow(gmap)) {
803 *ptl = &gmap->guest_table_lock;
804 return pte_offset_map((pmd_t *) table, gaddr);
805 }
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100806 return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
807}
808
809/**
810 * gmap_pte_op_fixup - force a page in and connect the gmap page table
811 * @gmap: pointer to guest mapping meta data structure
812 * @gaddr: virtual address in the guest address space
813 * @vmaddr: address in the host process address space
814 *
815 * Returns 0 if the caller can retry __gmap_translate (might fail again),
816 * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
817 * up or connecting the gmap page table.
818 */
819static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
820 unsigned long vmaddr)
821{
822 struct mm_struct *mm = gmap->mm;
823 bool unlocked = false;
824
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100825 BUG_ON(gmap_is_shadow(gmap));
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100826 if (fixup_user_fault(current, mm, vmaddr, FAULT_FLAG_WRITE, &unlocked))
827 return -EFAULT;
828 if (unlocked)
829 /* lost mmap_sem, caller has to retry __gmap_translate */
830 return 0;
831 /* Connect the page tables */
832 return __gmap_link(gmap, gaddr, vmaddr);
833}
834
835/**
836 * gmap_pte_op_end - release the page table lock
837 * @ptl: pointer to the spinlock pointer
838 */
839static void gmap_pte_op_end(spinlock_t *ptl)
840{
841 spin_unlock(ptl);
842}
843
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100844/*
845 * gmap_protect_range - remove access rights to memory and set pgste bits
846 * @gmap: pointer to guest mapping meta data structure
847 * @gaddr: virtual address in the guest address space
848 * @len: size of area
849 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
850 * @bits: pgste notification bits to set
851 *
852 * Returns 0 if successfully protected, -ENOMEM if out of memory and
853 * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
854 *
855 * Called with sg->mm->mmap_sem in read.
856 *
857 * Note: Can also be called for shadow gmaps.
858 */
859static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
860 unsigned long len, int prot, unsigned long bits)
861{
862 unsigned long vmaddr;
863 spinlock_t *ptl;
864 pte_t *ptep;
865 int rc;
866
867 while (len) {
868 rc = -EAGAIN;
869 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
870 if (ptep) {
871 rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, bits);
872 gmap_pte_op_end(ptl);
873 }
874 if (rc) {
875 vmaddr = __gmap_translate(gmap, gaddr);
876 if (IS_ERR_VALUE(vmaddr))
877 return vmaddr;
878 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr);
879 if (rc)
880 return rc;
881 continue;
882 }
883 gaddr += PAGE_SIZE;
884 len -= PAGE_SIZE;
885 }
886 return 0;
887}
888
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100889/**
890 * gmap_mprotect_notify - change access rights for a range of ptes and
891 * call the notifier if any pte changes again
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100892 * @gmap: pointer to guest mapping meta data structure
893 * @gaddr: virtual address in the guest address space
894 * @len: size of area
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100895 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100896 *
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100897 * Returns 0 if for each page in the given range a gmap mapping exists,
898 * the new access rights could be set and the notifier could be armed.
899 * If the gmap mapping is missing for one or more pages -EFAULT is
900 * returned. If no memory could be allocated -ENOMEM is returned.
901 * This function establishes missing page table entries.
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100902 */
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100903int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
904 unsigned long len, int prot)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100905{
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100906 int rc;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100907
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100908 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100909 return -EINVAL;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100910 if (!MACHINE_HAS_ESOP && prot == PROT_READ)
911 return -EINVAL;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100912 down_read(&gmap->mm->mmap_sem);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100913 rc = gmap_protect_range(gmap, gaddr, len, prot, PGSTE_IN_BIT);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100914 up_read(&gmap->mm->mmap_sem);
915 return rc;
916}
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100917EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100918
919/**
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100920 * gmap_read_table - get an unsigned long value from a guest page table using
921 * absolute addressing, without marking the page referenced.
922 * @gmap: pointer to guest mapping meta data structure
923 * @gaddr: virtual address in the guest address space
924 * @val: pointer to the unsigned long value to return
925 *
926 * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
927 * if reading using the virtual address failed.
928 *
929 * Called with gmap->mm->mmap_sem in read.
930 */
931int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
932{
933 unsigned long address, vmaddr;
934 spinlock_t *ptl;
935 pte_t *ptep, pte;
936 int rc;
937
938 while (1) {
939 rc = -EAGAIN;
940 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
941 if (ptep) {
942 pte = *ptep;
943 if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
944 address = pte_val(pte) & PAGE_MASK;
945 address += gaddr & ~PAGE_MASK;
946 *val = *(unsigned long *) address;
947 pte_val(*ptep) |= _PAGE_YOUNG;
948 /* Do *NOT* clear the _PAGE_INVALID bit! */
949 rc = 0;
950 }
951 gmap_pte_op_end(ptl);
952 }
953 if (!rc)
954 break;
955 vmaddr = __gmap_translate(gmap, gaddr);
956 if (IS_ERR_VALUE(vmaddr)) {
957 rc = vmaddr;
958 break;
959 }
960 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr);
961 if (rc)
962 break;
963 }
964 return rc;
965}
966EXPORT_SYMBOL_GPL(gmap_read_table);
967
968/**
969 * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
970 * @sg: pointer to the shadow guest address space structure
971 * @vmaddr: vm address associated with the rmap
972 * @rmap: pointer to the rmap structure
973 *
974 * Called with the sg->guest_table_lock
975 */
976static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
977 struct gmap_rmap *rmap)
978{
979 void **slot;
980
981 BUG_ON(!gmap_is_shadow(sg));
982 slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
983 if (slot) {
984 rmap->next = radix_tree_deref_slot_protected(slot,
985 &sg->guest_table_lock);
986 radix_tree_replace_slot(slot, rmap);
987 } else {
988 rmap->next = NULL;
989 radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
990 rmap);
991 }
992}
993
994/**
995 * gmap_protect_rmap - modify access rights to memory and create an rmap
996 * @sg: pointer to the shadow guest address space structure
997 * @raddr: rmap address in the shadow gmap
998 * @paddr: address in the parent guest address space
999 * @len: length of the memory area to protect
1000 * @prot: indicates access rights: none, read-only or read-write
1001 *
1002 * Returns 0 if successfully protected and the rmap was created, -ENOMEM
1003 * if out of memory and -EFAULT if paddr is invalid.
1004 */
1005static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
1006 unsigned long paddr, unsigned long len, int prot)
1007{
1008 struct gmap *parent;
1009 struct gmap_rmap *rmap;
1010 unsigned long vmaddr;
1011 spinlock_t *ptl;
1012 pte_t *ptep;
1013 int rc;
1014
1015 BUG_ON(!gmap_is_shadow(sg));
1016 parent = sg->parent;
1017 while (len) {
1018 vmaddr = __gmap_translate(parent, paddr);
1019 if (IS_ERR_VALUE(vmaddr))
1020 return vmaddr;
1021 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
1022 if (!rmap)
1023 return -ENOMEM;
1024 rmap->raddr = raddr;
1025 rc = radix_tree_preload(GFP_KERNEL);
1026 if (rc) {
1027 kfree(rmap);
1028 return rc;
1029 }
1030 rc = -EAGAIN;
1031 ptep = gmap_pte_op_walk(parent, paddr, &ptl);
1032 if (ptep) {
1033 spin_lock(&sg->guest_table_lock);
1034 rc = ptep_force_prot(parent->mm, paddr, ptep, prot,
1035 PGSTE_VSIE_BIT);
1036 if (!rc)
1037 gmap_insert_rmap(sg, vmaddr, rmap);
1038 spin_unlock(&sg->guest_table_lock);
1039 gmap_pte_op_end(ptl);
1040 }
1041 radix_tree_preload_end();
1042 if (rc) {
1043 kfree(rmap);
1044 rc = gmap_pte_op_fixup(parent, paddr, vmaddr);
1045 if (rc)
1046 return rc;
1047 continue;
1048 }
1049 paddr += PAGE_SIZE;
1050 len -= PAGE_SIZE;
1051 }
1052 return 0;
1053}
1054
1055#define _SHADOW_RMAP_MASK 0x7
1056#define _SHADOW_RMAP_REGION1 0x5
1057#define _SHADOW_RMAP_REGION2 0x4
1058#define _SHADOW_RMAP_REGION3 0x3
1059#define _SHADOW_RMAP_SEGMENT 0x2
1060#define _SHADOW_RMAP_PGTABLE 0x1
1061
1062/**
1063 * gmap_idte_one - invalidate a single region or segment table entry
1064 * @asce: region or segment table *origin* + table-type bits
1065 * @vaddr: virtual address to identify the table entry to flush
1066 *
1067 * The invalid bit of a single region or segment table entry is set
1068 * and the associated TLB entries depending on the entry are flushed.
1069 * The table-type of the @asce identifies the portion of the @vaddr
1070 * that is used as the invalidation index.
1071 */
1072static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
1073{
1074 asm volatile(
1075 " .insn rrf,0xb98e0000,%0,%1,0,0"
1076 : : "a" (asce), "a" (vaddr) : "cc", "memory");
1077}
1078
1079/**
1080 * gmap_unshadow_page - remove a page from a shadow page table
1081 * @sg: pointer to the shadow guest address space structure
1082 * @raddr: rmap address in the shadow guest address space
1083 *
1084 * Called with the sg->guest_table_lock
1085 */
1086static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
1087{
1088 unsigned long *table;
1089
1090 BUG_ON(!gmap_is_shadow(sg));
1091 table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
1092 if (!table || *table & _PAGE_INVALID)
1093 return;
1094 gmap_call_notifier(sg, raddr, raddr + (1UL << 12) - 1);
1095 ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
1096}
1097
1098/**
1099 * __gmap_unshadow_pgt - remove all entries from a shadow page table
1100 * @sg: pointer to the shadow guest address space structure
1101 * @raddr: rmap address in the shadow guest address space
1102 * @pgt: pointer to the start of a shadow page table
1103 *
1104 * Called with the sg->guest_table_lock
1105 */
1106static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
1107 unsigned long *pgt)
1108{
1109 int i;
1110
1111 BUG_ON(!gmap_is_shadow(sg));
1112 for (i = 0; i < 256; i++, raddr += 1UL << 12)
1113 pgt[i] = _PAGE_INVALID;
1114}
1115
1116/**
1117 * gmap_unshadow_pgt - remove a shadow page table from a segment entry
1118 * @sg: pointer to the shadow guest address space structure
1119 * @raddr: address in the shadow guest address space
1120 *
1121 * Called with the sg->guest_table_lock
1122 */
1123static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
1124{
1125 unsigned long sto, *ste, *pgt;
1126 struct page *page;
1127
1128 BUG_ON(!gmap_is_shadow(sg));
1129 ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
David Hildenbrand998f6372016-03-08 12:23:38 +01001130 if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001131 return;
1132 gmap_call_notifier(sg, raddr, raddr + (1UL << 20) - 1);
1133 sto = (unsigned long) (ste - ((raddr >> 20) & 0x7ff));
1134 gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
1135 pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
1136 *ste = _SEGMENT_ENTRY_EMPTY;
1137 __gmap_unshadow_pgt(sg, raddr, pgt);
1138 /* Free page table */
1139 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1140 list_del(&page->lru);
1141 page_table_free_pgste(page);
1142}
1143
1144/**
1145 * __gmap_unshadow_sgt - remove all entries from a shadow segment table
1146 * @sg: pointer to the shadow guest address space structure
1147 * @raddr: rmap address in the shadow guest address space
1148 * @sgt: pointer to the start of a shadow segment table
1149 *
1150 * Called with the sg->guest_table_lock
1151 */
1152static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
1153 unsigned long *sgt)
1154{
1155 unsigned long asce, *pgt;
1156 struct page *page;
1157 int i;
1158
1159 BUG_ON(!gmap_is_shadow(sg));
1160 asce = (unsigned long) sgt | _ASCE_TYPE_SEGMENT;
1161 for (i = 0; i < 2048; i++, raddr += 1UL << 20) {
David Hildenbrand998f6372016-03-08 12:23:38 +01001162 if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001163 continue;
1164 pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
1165 sgt[i] = _SEGMENT_ENTRY_EMPTY;
1166 __gmap_unshadow_pgt(sg, raddr, pgt);
1167 /* Free page table */
1168 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1169 list_del(&page->lru);
1170 page_table_free_pgste(page);
1171 }
1172}
1173
1174/**
1175 * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
1176 * @sg: pointer to the shadow guest address space structure
1177 * @raddr: rmap address in the shadow guest address space
1178 *
1179 * Called with the shadow->guest_table_lock
1180 */
1181static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
1182{
1183 unsigned long r3o, *r3e, *sgt;
1184 struct page *page;
1185
1186 BUG_ON(!gmap_is_shadow(sg));
1187 r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
David Hildenbrand998f6372016-03-08 12:23:38 +01001188 if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001189 return;
1190 gmap_call_notifier(sg, raddr, raddr + (1UL << 31) - 1);
1191 r3o = (unsigned long) (r3e - ((raddr >> 31) & 0x7ff));
1192 gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
1193 sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
1194 *r3e = _REGION3_ENTRY_EMPTY;
1195 __gmap_unshadow_sgt(sg, raddr, sgt);
1196 /* Free segment table */
1197 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1198 list_del(&page->lru);
1199 __free_pages(page, 2);
1200}
1201
1202/**
1203 * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
1204 * @sg: pointer to the shadow guest address space structure
1205 * @raddr: address in the shadow guest address space
1206 * @r3t: pointer to the start of a shadow region-3 table
1207 *
1208 * Called with the sg->guest_table_lock
1209 */
1210static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
1211 unsigned long *r3t)
1212{
1213 unsigned long asce, *sgt;
1214 struct page *page;
1215 int i;
1216
1217 BUG_ON(!gmap_is_shadow(sg));
1218 asce = (unsigned long) r3t | _ASCE_TYPE_REGION3;
1219 for (i = 0; i < 2048; i++, raddr += 1UL << 31) {
David Hildenbrand998f6372016-03-08 12:23:38 +01001220 if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001221 continue;
1222 sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
1223 r3t[i] = _REGION3_ENTRY_EMPTY;
1224 __gmap_unshadow_sgt(sg, raddr, sgt);
1225 /* Free segment table */
1226 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1227 list_del(&page->lru);
1228 __free_pages(page, 2);
1229 }
1230}
1231
1232/**
1233 * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
1234 * @sg: pointer to the shadow guest address space structure
1235 * @raddr: rmap address in the shadow guest address space
1236 *
1237 * Called with the sg->guest_table_lock
1238 */
1239static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
1240{
1241 unsigned long r2o, *r2e, *r3t;
1242 struct page *page;
1243
1244 BUG_ON(!gmap_is_shadow(sg));
1245 r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
David Hildenbrand998f6372016-03-08 12:23:38 +01001246 if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001247 return;
1248 gmap_call_notifier(sg, raddr, raddr + (1UL << 42) - 1);
1249 r2o = (unsigned long) (r2e - ((raddr >> 42) & 0x7ff));
1250 gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
1251 r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
1252 *r2e = _REGION2_ENTRY_EMPTY;
1253 __gmap_unshadow_r3t(sg, raddr, r3t);
1254 /* Free region 3 table */
1255 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1256 list_del(&page->lru);
1257 __free_pages(page, 2);
1258}
1259
1260/**
1261 * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
1262 * @sg: pointer to the shadow guest address space structure
1263 * @raddr: rmap address in the shadow guest address space
1264 * @r2t: pointer to the start of a shadow region-2 table
1265 *
1266 * Called with the sg->guest_table_lock
1267 */
1268static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
1269 unsigned long *r2t)
1270{
1271 unsigned long asce, *r3t;
1272 struct page *page;
1273 int i;
1274
1275 BUG_ON(!gmap_is_shadow(sg));
1276 asce = (unsigned long) r2t | _ASCE_TYPE_REGION2;
1277 for (i = 0; i < 2048; i++, raddr += 1UL << 42) {
David Hildenbrand998f6372016-03-08 12:23:38 +01001278 if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001279 continue;
1280 r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
1281 r2t[i] = _REGION2_ENTRY_EMPTY;
1282 __gmap_unshadow_r3t(sg, raddr, r3t);
1283 /* Free region 3 table */
1284 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1285 list_del(&page->lru);
1286 __free_pages(page, 2);
1287 }
1288}
1289
1290/**
1291 * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
1292 * @sg: pointer to the shadow guest address space structure
1293 * @raddr: rmap address in the shadow guest address space
1294 *
1295 * Called with the sg->guest_table_lock
1296 */
1297static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
1298{
1299 unsigned long r1o, *r1e, *r2t;
1300 struct page *page;
1301
1302 BUG_ON(!gmap_is_shadow(sg));
1303 r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
David Hildenbrand998f6372016-03-08 12:23:38 +01001304 if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001305 return;
1306 gmap_call_notifier(sg, raddr, raddr + (1UL << 53) - 1);
1307 r1o = (unsigned long) (r1e - ((raddr >> 53) & 0x7ff));
1308 gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
1309 r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
1310 *r1e = _REGION1_ENTRY_EMPTY;
1311 __gmap_unshadow_r2t(sg, raddr, r2t);
1312 /* Free region 2 table */
1313 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1314 list_del(&page->lru);
1315 __free_pages(page, 2);
1316}
1317
1318/**
1319 * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
1320 * @sg: pointer to the shadow guest address space structure
1321 * @raddr: rmap address in the shadow guest address space
1322 * @r1t: pointer to the start of a shadow region-1 table
1323 *
1324 * Called with the shadow->guest_table_lock
1325 */
1326static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
1327 unsigned long *r1t)
1328{
1329 unsigned long asce, *r2t;
1330 struct page *page;
1331 int i;
1332
1333 BUG_ON(!gmap_is_shadow(sg));
1334 asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
1335 for (i = 0; i < 2048; i++, raddr += 1UL << 53) {
David Hildenbrand998f6372016-03-08 12:23:38 +01001336 if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001337 continue;
1338 r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
1339 __gmap_unshadow_r2t(sg, raddr, r2t);
1340 /* Clear entry and flush translation r1t -> r2t */
1341 gmap_idte_one(asce, raddr);
1342 r1t[i] = _REGION1_ENTRY_EMPTY;
1343 /* Free region 2 table */
1344 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1345 list_del(&page->lru);
1346 __free_pages(page, 2);
1347 }
1348}
1349
1350/**
1351 * gmap_unshadow - remove a shadow page table completely
1352 * @sg: pointer to the shadow guest address space structure
1353 *
1354 * Called with sg->guest_table_lock
1355 */
1356static void gmap_unshadow(struct gmap *sg)
1357{
1358 unsigned long *table;
1359
1360 BUG_ON(!gmap_is_shadow(sg));
1361 if (sg->removed)
1362 return;
1363 sg->removed = 1;
1364 gmap_call_notifier(sg, 0, -1UL);
David Hildenbrandeea36782016-04-15 12:45:45 +02001365 gmap_flush_tlb(sg);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001366 table = (unsigned long *)(sg->asce & _ASCE_ORIGIN);
1367 switch (sg->asce & _ASCE_TYPE_MASK) {
1368 case _ASCE_TYPE_REGION1:
1369 __gmap_unshadow_r1t(sg, 0, table);
1370 break;
1371 case _ASCE_TYPE_REGION2:
1372 __gmap_unshadow_r2t(sg, 0, table);
1373 break;
1374 case _ASCE_TYPE_REGION3:
1375 __gmap_unshadow_r3t(sg, 0, table);
1376 break;
1377 case _ASCE_TYPE_SEGMENT:
1378 __gmap_unshadow_sgt(sg, 0, table);
1379 break;
1380 }
1381}
1382
1383/**
1384 * gmap_find_shadow - find a specific asce in the list of shadow tables
1385 * @parent: pointer to the parent gmap
1386 * @asce: ASCE for which the shadow table is created
David Hildenbrand5b062bd2016-03-08 12:17:40 +01001387 * @edat_level: edat level to be used for the shadow translation
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001388 *
1389 * Returns the pointer to a gmap if a shadow table with the given asce is
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001390 * already available, ERR_PTR(-EAGAIN) if another one is just being created,
1391 * otherwise NULL
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001392 */
David Hildenbrand5b062bd2016-03-08 12:17:40 +01001393static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
1394 int edat_level)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001395{
1396 struct gmap *sg;
1397
1398 list_for_each_entry(sg, &parent->children, list) {
David Hildenbrand5b062bd2016-03-08 12:17:40 +01001399 if (sg->orig_asce != asce || sg->edat_level != edat_level ||
1400 sg->removed)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001401 continue;
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001402 if (!sg->initialized)
1403 return ERR_PTR(-EAGAIN);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001404 atomic_inc(&sg->ref_count);
1405 return sg;
1406 }
1407 return NULL;
1408}
1409
1410/**
David Hildenbrand5b6c9632016-05-27 18:57:33 +02001411 * gmap_shadow_valid - check if a shadow guest address space matches the
1412 * given properties and is still valid
1413 * @sg: pointer to the shadow guest address space structure
1414 * @asce: ASCE for which the shadow table is requested
1415 * @edat_level: edat level to be used for the shadow translation
1416 *
1417 * Returns 1 if the gmap shadow is still valid and matches the given
1418 * properties, the caller can continue using it. Returns 0 otherwise, the
1419 * caller has to request a new shadow gmap in this case.
1420 *
1421 */
1422int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
1423{
1424 if (sg->removed)
1425 return 0;
1426 return sg->orig_asce == asce && sg->edat_level == edat_level;
1427}
1428EXPORT_SYMBOL_GPL(gmap_shadow_valid);
1429
1430/**
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001431 * gmap_shadow - create/find a shadow guest address space
1432 * @parent: pointer to the parent gmap
1433 * @asce: ASCE for which the shadow table is created
David Hildenbrand5b062bd2016-03-08 12:17:40 +01001434 * @edat_level: edat level to be used for the shadow translation
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001435 *
1436 * The pages of the top level page table referred by the asce parameter
1437 * will be set to read-only and marked in the PGSTEs of the kvm process.
1438 * The shadow table will be removed automatically on any change to the
1439 * PTE mapping for the source table.
1440 *
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001441 * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
1442 * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
1443 * parent gmap table could not be protected.
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001444 */
David Hildenbrand5b062bd2016-03-08 12:17:40 +01001445struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
1446 int edat_level)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001447{
1448 struct gmap *sg, *new;
1449 unsigned long limit;
1450 int rc;
1451
1452 BUG_ON(gmap_is_shadow(parent));
1453 spin_lock(&parent->shadow_lock);
David Hildenbrand5b062bd2016-03-08 12:17:40 +01001454 sg = gmap_find_shadow(parent, asce, edat_level);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001455 spin_unlock(&parent->shadow_lock);
1456 if (sg)
1457 return sg;
1458 /* Create a new shadow gmap */
1459 limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
David Hildenbrand3218f702016-04-18 16:22:24 +02001460 if (asce & _ASCE_REAL_SPACE)
1461 limit = -1UL;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001462 new = gmap_alloc(limit);
1463 if (!new)
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001464 return ERR_PTR(-ENOMEM);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001465 new->mm = parent->mm;
1466 new->parent = gmap_get(parent);
1467 new->orig_asce = asce;
David Hildenbrand5b062bd2016-03-08 12:17:40 +01001468 new->edat_level = edat_level;
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001469 new->initialized = false;
1470 spin_lock(&parent->shadow_lock);
1471 /* Recheck if another CPU created the same shadow */
David Hildenbrand5b062bd2016-03-08 12:17:40 +01001472 sg = gmap_find_shadow(parent, asce, edat_level);
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001473 if (sg) {
1474 spin_unlock(&parent->shadow_lock);
1475 gmap_free(new);
1476 return sg;
1477 }
David Hildenbrand717c0552016-05-02 12:10:17 +02001478 if (asce & _ASCE_REAL_SPACE) {
1479 /* only allow one real-space gmap shadow */
1480 list_for_each_entry(sg, &parent->children, list) {
1481 if (sg->orig_asce & _ASCE_REAL_SPACE) {
1482 spin_lock(&sg->guest_table_lock);
1483 gmap_unshadow(sg);
1484 spin_unlock(&sg->guest_table_lock);
1485 list_del(&sg->list);
1486 gmap_put(sg);
1487 break;
1488 }
1489 }
1490 }
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001491 atomic_set(&new->ref_count, 2);
1492 list_add(&new->list, &parent->children);
David Hildenbrand3218f702016-04-18 16:22:24 +02001493 if (asce & _ASCE_REAL_SPACE) {
1494 /* nothing to protect, return right away */
1495 new->initialized = true;
1496 spin_unlock(&parent->shadow_lock);
1497 return new;
1498 }
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001499 spin_unlock(&parent->shadow_lock);
1500 /* protect after insertion, so it will get properly invalidated */
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001501 down_read(&parent->mm->mmap_sem);
1502 rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
1503 ((asce & _ASCE_TABLE_LENGTH) + 1) * 4096,
1504 PROT_READ, PGSTE_VSIE_BIT);
1505 up_read(&parent->mm->mmap_sem);
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001506 spin_lock(&parent->shadow_lock);
1507 new->initialized = true;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001508 if (rc) {
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001509 list_del(&new->list);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001510 gmap_free(new);
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001511 new = ERR_PTR(rc);
1512 }
1513 spin_unlock(&parent->shadow_lock);
1514 return new;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001515}
1516EXPORT_SYMBOL_GPL(gmap_shadow);
1517
1518/**
1519 * gmap_shadow_r2t - create an empty shadow region 2 table
1520 * @sg: pointer to the shadow guest address space structure
1521 * @saddr: faulting address in the shadow gmap
1522 * @r2t: parent gmap address of the region 2 table to get shadowed
David Hildenbrand3218f702016-04-18 16:22:24 +02001523 * @fake: r2t references contiguous guest memory block, not a r2t
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001524 *
1525 * The r2t parameter specifies the address of the source table. The
1526 * four pages of the source table are made read-only in the parent gmap
1527 * address space. A write to the source table area @r2t will automatically
1528 * remove the shadow r2 table and all of its decendents.
1529 *
1530 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1531 * shadow table structure is incomplete, -ENOMEM if out of memory and
1532 * -EFAULT if an address in the parent gmap could not be resolved.
1533 *
1534 * Called with sg->mm->mmap_sem in read.
1535 */
David Hildenbrand3218f702016-04-18 16:22:24 +02001536int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
1537 int fake)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001538{
1539 unsigned long raddr, origin, offset, len;
1540 unsigned long *s_r2t, *table;
1541 struct page *page;
1542 int rc;
1543
1544 BUG_ON(!gmap_is_shadow(sg));
1545 /* Allocate a shadow region second table */
1546 page = alloc_pages(GFP_KERNEL, 2);
1547 if (!page)
1548 return -ENOMEM;
1549 page->index = r2t & _REGION_ENTRY_ORIGIN;
David Hildenbrand3218f702016-04-18 16:22:24 +02001550 if (fake)
1551 page->index |= GMAP_SHADOW_FAKE_TABLE;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001552 s_r2t = (unsigned long *) page_to_phys(page);
1553 /* Install shadow region second table */
1554 spin_lock(&sg->guest_table_lock);
1555 table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
1556 if (!table) {
1557 rc = -EAGAIN; /* Race with unshadow */
1558 goto out_free;
1559 }
1560 if (!(*table & _REGION_ENTRY_INVALID)) {
1561 rc = 0; /* Already established */
1562 goto out_free;
David Hildenbrand998f6372016-03-08 12:23:38 +01001563 } else if (*table & _REGION_ENTRY_ORIGIN) {
1564 rc = -EAGAIN; /* Race with shadow */
1565 goto out_free;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001566 }
1567 crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY);
David Hildenbrand998f6372016-03-08 12:23:38 +01001568 /* mark as invalid as long as the parent table is not protected */
1569 *table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH |
1570 _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001571 if (sg->edat_level >= 1)
1572 *table |= (r2t & _REGION_ENTRY_PROTECT);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001573 list_add(&page->lru, &sg->crst_list);
David Hildenbrand3218f702016-04-18 16:22:24 +02001574 if (fake) {
1575 /* nothing to protect for fake tables */
1576 *table &= ~_REGION_ENTRY_INVALID;
1577 spin_unlock(&sg->guest_table_lock);
1578 return 0;
1579 }
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001580 spin_unlock(&sg->guest_table_lock);
1581 /* Make r2t read-only in parent gmap page table */
1582 raddr = (saddr & 0xffe0000000000000UL) | _SHADOW_RMAP_REGION1;
1583 origin = r2t & _REGION_ENTRY_ORIGIN;
1584 offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * 4096;
1585 len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
1586 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
David Hildenbrand998f6372016-03-08 12:23:38 +01001587 spin_lock(&sg->guest_table_lock);
1588 if (!rc) {
1589 table = gmap_table_walk(sg, saddr, 4);
1590 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1591 (unsigned long) s_r2t)
1592 rc = -EAGAIN; /* Race with unshadow */
1593 else
1594 *table &= ~_REGION_ENTRY_INVALID;
1595 } else {
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001596 gmap_unshadow_r2t(sg, raddr);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001597 }
David Hildenbrand998f6372016-03-08 12:23:38 +01001598 spin_unlock(&sg->guest_table_lock);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001599 return rc;
1600out_free:
1601 spin_unlock(&sg->guest_table_lock);
1602 __free_pages(page, 2);
1603 return rc;
1604}
1605EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
1606
1607/**
1608 * gmap_shadow_r3t - create a shadow region 3 table
1609 * @sg: pointer to the shadow guest address space structure
1610 * @saddr: faulting address in the shadow gmap
1611 * @r3t: parent gmap address of the region 3 table to get shadowed
David Hildenbrand3218f702016-04-18 16:22:24 +02001612 * @fake: r3t references contiguous guest memory block, not a r3t
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001613 *
1614 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1615 * shadow table structure is incomplete, -ENOMEM if out of memory and
1616 * -EFAULT if an address in the parent gmap could not be resolved.
1617 *
1618 * Called with sg->mm->mmap_sem in read.
1619 */
David Hildenbrand3218f702016-04-18 16:22:24 +02001620int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
1621 int fake)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001622{
1623 unsigned long raddr, origin, offset, len;
1624 unsigned long *s_r3t, *table;
1625 struct page *page;
1626 int rc;
1627
1628 BUG_ON(!gmap_is_shadow(sg));
1629 /* Allocate a shadow region second table */
1630 page = alloc_pages(GFP_KERNEL, 2);
1631 if (!page)
1632 return -ENOMEM;
1633 page->index = r3t & _REGION_ENTRY_ORIGIN;
David Hildenbrand3218f702016-04-18 16:22:24 +02001634 if (fake)
1635 page->index |= GMAP_SHADOW_FAKE_TABLE;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001636 s_r3t = (unsigned long *) page_to_phys(page);
1637 /* Install shadow region second table */
1638 spin_lock(&sg->guest_table_lock);
1639 table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
1640 if (!table) {
1641 rc = -EAGAIN; /* Race with unshadow */
1642 goto out_free;
1643 }
1644 if (!(*table & _REGION_ENTRY_INVALID)) {
1645 rc = 0; /* Already established */
1646 goto out_free;
David Hildenbrand998f6372016-03-08 12:23:38 +01001647 } else if (*table & _REGION_ENTRY_ORIGIN) {
1648 rc = -EAGAIN; /* Race with shadow */
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001649 }
1650 crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
David Hildenbrand998f6372016-03-08 12:23:38 +01001651 /* mark as invalid as long as the parent table is not protected */
1652 *table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH |
1653 _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001654 if (sg->edat_level >= 1)
1655 *table |= (r3t & _REGION_ENTRY_PROTECT);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001656 list_add(&page->lru, &sg->crst_list);
David Hildenbrand3218f702016-04-18 16:22:24 +02001657 if (fake) {
1658 /* nothing to protect for fake tables */
1659 *table &= ~_REGION_ENTRY_INVALID;
1660 spin_unlock(&sg->guest_table_lock);
1661 return 0;
1662 }
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001663 spin_unlock(&sg->guest_table_lock);
1664 /* Make r3t read-only in parent gmap page table */
1665 raddr = (saddr & 0xfffffc0000000000UL) | _SHADOW_RMAP_REGION2;
1666 origin = r3t & _REGION_ENTRY_ORIGIN;
1667 offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * 4096;
1668 len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
1669 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
David Hildenbrand998f6372016-03-08 12:23:38 +01001670 spin_lock(&sg->guest_table_lock);
1671 if (!rc) {
1672 table = gmap_table_walk(sg, saddr, 3);
1673 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1674 (unsigned long) s_r3t)
1675 rc = -EAGAIN; /* Race with unshadow */
1676 else
1677 *table &= ~_REGION_ENTRY_INVALID;
1678 } else {
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001679 gmap_unshadow_r3t(sg, raddr);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001680 }
David Hildenbrand998f6372016-03-08 12:23:38 +01001681 spin_unlock(&sg->guest_table_lock);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001682 return rc;
1683out_free:
1684 spin_unlock(&sg->guest_table_lock);
1685 __free_pages(page, 2);
1686 return rc;
1687}
1688EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
1689
1690/**
1691 * gmap_shadow_sgt - create a shadow segment table
1692 * @sg: pointer to the shadow guest address space structure
1693 * @saddr: faulting address in the shadow gmap
1694 * @sgt: parent gmap address of the segment table to get shadowed
David Hildenbrand18b898092016-04-18 13:42:05 +02001695 * @fake: sgt references contiguous guest memory block, not a sgt
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001696 *
1697 * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
1698 * shadow table structure is incomplete, -ENOMEM if out of memory and
1699 * -EFAULT if an address in the parent gmap could not be resolved.
1700 *
1701 * Called with sg->mm->mmap_sem in read.
1702 */
David Hildenbrand18b898092016-04-18 13:42:05 +02001703int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
1704 int fake)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001705{
1706 unsigned long raddr, origin, offset, len;
1707 unsigned long *s_sgt, *table;
1708 struct page *page;
1709 int rc;
1710
David Hildenbrand18b898092016-04-18 13:42:05 +02001711 BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001712 /* Allocate a shadow segment table */
1713 page = alloc_pages(GFP_KERNEL, 2);
1714 if (!page)
1715 return -ENOMEM;
1716 page->index = sgt & _REGION_ENTRY_ORIGIN;
David Hildenbrand18b898092016-04-18 13:42:05 +02001717 if (fake)
1718 page->index |= GMAP_SHADOW_FAKE_TABLE;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001719 s_sgt = (unsigned long *) page_to_phys(page);
1720 /* Install shadow region second table */
1721 spin_lock(&sg->guest_table_lock);
1722 table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
1723 if (!table) {
1724 rc = -EAGAIN; /* Race with unshadow */
1725 goto out_free;
1726 }
1727 if (!(*table & _REGION_ENTRY_INVALID)) {
1728 rc = 0; /* Already established */
1729 goto out_free;
David Hildenbrand998f6372016-03-08 12:23:38 +01001730 } else if (*table & _REGION_ENTRY_ORIGIN) {
1731 rc = -EAGAIN; /* Race with shadow */
1732 goto out_free;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001733 }
1734 crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY);
David Hildenbrand998f6372016-03-08 12:23:38 +01001735 /* mark as invalid as long as the parent table is not protected */
1736 *table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH |
1737 _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001738 if (sg->edat_level >= 1)
1739 *table |= sgt & _REGION_ENTRY_PROTECT;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001740 list_add(&page->lru, &sg->crst_list);
David Hildenbrand18b898092016-04-18 13:42:05 +02001741 if (fake) {
1742 /* nothing to protect for fake tables */
1743 *table &= ~_REGION_ENTRY_INVALID;
1744 spin_unlock(&sg->guest_table_lock);
1745 return 0;
1746 }
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001747 spin_unlock(&sg->guest_table_lock);
1748 /* Make sgt read-only in parent gmap page table */
1749 raddr = (saddr & 0xffffffff80000000UL) | _SHADOW_RMAP_REGION3;
1750 origin = sgt & _REGION_ENTRY_ORIGIN;
1751 offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * 4096;
1752 len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
1753 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
David Hildenbrand998f6372016-03-08 12:23:38 +01001754 spin_lock(&sg->guest_table_lock);
1755 if (!rc) {
1756 table = gmap_table_walk(sg, saddr, 2);
1757 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1758 (unsigned long) s_sgt)
1759 rc = -EAGAIN; /* Race with unshadow */
1760 else
1761 *table &= ~_REGION_ENTRY_INVALID;
1762 } else {
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001763 gmap_unshadow_sgt(sg, raddr);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001764 }
David Hildenbrand998f6372016-03-08 12:23:38 +01001765 spin_unlock(&sg->guest_table_lock);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001766 return rc;
1767out_free:
1768 spin_unlock(&sg->guest_table_lock);
1769 __free_pages(page, 2);
1770 return rc;
1771}
1772EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
1773
1774/**
1775 * gmap_shadow_lookup_pgtable - find a shadow page table
1776 * @sg: pointer to the shadow guest address space structure
1777 * @saddr: the address in the shadow aguest address space
1778 * @pgt: parent gmap address of the page table to get shadowed
1779 * @dat_protection: if the pgtable is marked as protected by dat
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001780 * @fake: pgt references contiguous guest memory block, not a pgtable
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001781 *
1782 * Returns 0 if the shadow page table was found and -EAGAIN if the page
1783 * table was not found.
1784 *
1785 * Called with sg->mm->mmap_sem in read.
1786 */
1787int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001788 unsigned long *pgt, int *dat_protection,
1789 int *fake)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001790{
1791 unsigned long *table;
1792 struct page *page;
1793 int rc;
1794
1795 BUG_ON(!gmap_is_shadow(sg));
1796 spin_lock(&sg->guest_table_lock);
1797 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1798 if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
1799 /* Shadow page tables are full pages (pte+pgste) */
1800 page = pfn_to_page(*table >> PAGE_SHIFT);
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001801 *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001802 *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001803 *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001804 rc = 0;
1805 } else {
1806 rc = -EAGAIN;
1807 }
1808 spin_unlock(&sg->guest_table_lock);
1809 return rc;
1810
1811}
1812EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
1813
1814/**
1815 * gmap_shadow_pgt - instantiate a shadow page table
1816 * @sg: pointer to the shadow guest address space structure
1817 * @saddr: faulting address in the shadow gmap
1818 * @pgt: parent gmap address of the page table to get shadowed
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001819 * @fake: pgt references contiguous guest memory block, not a pgtable
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001820 *
1821 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1822 * shadow table structure is incomplete, -ENOMEM if out of memory,
1823 * -EFAULT if an address in the parent gmap could not be resolved and
1824 *
1825 * Called with gmap->mm->mmap_sem in read
1826 */
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001827int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
1828 int fake)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001829{
1830 unsigned long raddr, origin;
1831 unsigned long *s_pgt, *table;
1832 struct page *page;
1833 int rc;
1834
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001835 BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE));
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001836 /* Allocate a shadow page table */
1837 page = page_table_alloc_pgste(sg->mm);
1838 if (!page)
1839 return -ENOMEM;
1840 page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001841 if (fake)
1842 page->index |= GMAP_SHADOW_FAKE_TABLE;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001843 s_pgt = (unsigned long *) page_to_phys(page);
1844 /* Install shadow page table */
1845 spin_lock(&sg->guest_table_lock);
1846 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1847 if (!table) {
1848 rc = -EAGAIN; /* Race with unshadow */
1849 goto out_free;
1850 }
1851 if (!(*table & _SEGMENT_ENTRY_INVALID)) {
1852 rc = 0; /* Already established */
1853 goto out_free;
David Hildenbrand998f6372016-03-08 12:23:38 +01001854 } else if (*table & _SEGMENT_ENTRY_ORIGIN) {
1855 rc = -EAGAIN; /* Race with shadow */
1856 goto out_free;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001857 }
David Hildenbrand998f6372016-03-08 12:23:38 +01001858 /* mark as invalid as long as the parent table is not protected */
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001859 *table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
David Hildenbrand998f6372016-03-08 12:23:38 +01001860 (pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001861 list_add(&page->lru, &sg->pt_list);
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001862 if (fake) {
1863 /* nothing to protect for fake tables */
1864 *table &= ~_SEGMENT_ENTRY_INVALID;
1865 spin_unlock(&sg->guest_table_lock);
1866 return 0;
1867 }
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001868 spin_unlock(&sg->guest_table_lock);
1869 /* Make pgt read-only in parent gmap page table (not the pgste) */
1870 raddr = (saddr & 0xfffffffffff00000UL) | _SHADOW_RMAP_SEGMENT;
1871 origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
1872 rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE, PROT_READ);
David Hildenbrand998f6372016-03-08 12:23:38 +01001873 spin_lock(&sg->guest_table_lock);
1874 if (!rc) {
1875 table = gmap_table_walk(sg, saddr, 1);
1876 if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) !=
1877 (unsigned long) s_pgt)
1878 rc = -EAGAIN; /* Race with unshadow */
1879 else
1880 *table &= ~_SEGMENT_ENTRY_INVALID;
1881 } else {
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001882 gmap_unshadow_pgt(sg, raddr);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001883 }
David Hildenbrand998f6372016-03-08 12:23:38 +01001884 spin_unlock(&sg->guest_table_lock);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001885 return rc;
1886out_free:
1887 spin_unlock(&sg->guest_table_lock);
1888 page_table_free_pgste(page);
1889 return rc;
1890
1891}
1892EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
1893
1894/**
1895 * gmap_shadow_page - create a shadow page mapping
1896 * @sg: pointer to the shadow guest address space structure
1897 * @saddr: faulting address in the shadow gmap
David Hildenbranda9d23e72016-03-08 12:21:41 +01001898 * @pte: pte in parent gmap address space to get shadowed
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001899 *
1900 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1901 * shadow table structure is incomplete, -ENOMEM if out of memory and
1902 * -EFAULT if an address in the parent gmap could not be resolved.
1903 *
1904 * Called with sg->mm->mmap_sem in read.
1905 */
David Hildenbranda9d23e72016-03-08 12:21:41 +01001906int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001907{
1908 struct gmap *parent;
1909 struct gmap_rmap *rmap;
David Hildenbranda9d23e72016-03-08 12:21:41 +01001910 unsigned long vmaddr, paddr;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001911 spinlock_t *ptl;
1912 pte_t *sptep, *tptep;
1913 int rc;
1914
1915 BUG_ON(!gmap_is_shadow(sg));
1916 parent = sg->parent;
1917
1918 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
1919 if (!rmap)
1920 return -ENOMEM;
1921 rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
1922
1923 while (1) {
David Hildenbranda9d23e72016-03-08 12:21:41 +01001924 paddr = pte_val(pte) & PAGE_MASK;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001925 vmaddr = __gmap_translate(parent, paddr);
1926 if (IS_ERR_VALUE(vmaddr)) {
1927 rc = vmaddr;
1928 break;
1929 }
1930 rc = radix_tree_preload(GFP_KERNEL);
1931 if (rc)
1932 break;
1933 rc = -EAGAIN;
1934 sptep = gmap_pte_op_walk(parent, paddr, &ptl);
1935 if (sptep) {
1936 spin_lock(&sg->guest_table_lock);
1937 /* Get page table pointer */
1938 tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
1939 if (!tptep) {
1940 spin_unlock(&sg->guest_table_lock);
1941 gmap_pte_op_end(ptl);
1942 radix_tree_preload_end();
1943 break;
1944 }
David Hildenbranda9d23e72016-03-08 12:21:41 +01001945 rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001946 if (rc > 0) {
1947 /* Success and a new mapping */
1948 gmap_insert_rmap(sg, vmaddr, rmap);
1949 rmap = NULL;
1950 rc = 0;
1951 }
1952 gmap_pte_op_end(ptl);
1953 spin_unlock(&sg->guest_table_lock);
1954 }
1955 radix_tree_preload_end();
1956 if (!rc)
1957 break;
1958 rc = gmap_pte_op_fixup(parent, paddr, vmaddr);
1959 if (rc)
1960 break;
1961 }
1962 kfree(rmap);
1963 return rc;
1964}
1965EXPORT_SYMBOL_GPL(gmap_shadow_page);
1966
1967/**
1968 * gmap_shadow_notify - handle notifications for shadow gmap
1969 *
1970 * Called with sg->parent->shadow_lock.
1971 */
1972static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
1973 unsigned long offset, pte_t *pte)
1974{
1975 struct gmap_rmap *rmap, *rnext, *head;
1976 unsigned long gaddr, start, end, bits, raddr;
1977 unsigned long *table;
1978
1979 BUG_ON(!gmap_is_shadow(sg));
1980 spin_lock(&sg->parent->guest_table_lock);
1981 table = radix_tree_lookup(&sg->parent->host_to_guest,
1982 vmaddr >> PMD_SHIFT);
1983 gaddr = table ? __gmap_segment_gaddr(table) + offset : 0;
1984 spin_unlock(&sg->parent->guest_table_lock);
1985 if (!table)
1986 return;
1987
1988 spin_lock(&sg->guest_table_lock);
1989 if (sg->removed) {
1990 spin_unlock(&sg->guest_table_lock);
1991 return;
1992 }
1993 /* Check for top level table */
1994 start = sg->orig_asce & _ASCE_ORIGIN;
1995 end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * 4096;
David Hildenbrand3218f702016-04-18 16:22:24 +02001996 if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
1997 gaddr < end) {
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001998 /* The complete shadow table has to go */
1999 gmap_unshadow(sg);
2000 spin_unlock(&sg->guest_table_lock);
2001 list_del(&sg->list);
2002 gmap_put(sg);
2003 return;
2004 }
2005 /* Remove the page table tree from on specific entry */
2006 head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> 12);
2007 gmap_for_each_rmap_safe(rmap, rnext, head) {
2008 bits = rmap->raddr & _SHADOW_RMAP_MASK;
2009 raddr = rmap->raddr ^ bits;
2010 switch (bits) {
2011 case _SHADOW_RMAP_REGION1:
2012 gmap_unshadow_r2t(sg, raddr);
2013 break;
2014 case _SHADOW_RMAP_REGION2:
2015 gmap_unshadow_r3t(sg, raddr);
2016 break;
2017 case _SHADOW_RMAP_REGION3:
2018 gmap_unshadow_sgt(sg, raddr);
2019 break;
2020 case _SHADOW_RMAP_SEGMENT:
2021 gmap_unshadow_pgt(sg, raddr);
2022 break;
2023 case _SHADOW_RMAP_PGTABLE:
2024 gmap_unshadow_page(sg, raddr);
2025 break;
2026 }
2027 kfree(rmap);
2028 }
2029 spin_unlock(&sg->guest_table_lock);
2030}
2031
2032/**
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002033 * ptep_notify - call all invalidation callbacks for a specific pte.
2034 * @mm: pointer to the process mm_struct
2035 * @addr: virtual address in the process address space
2036 * @pte: pointer to the page table entry
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002037 * @bits: bits from the pgste that caused the notify call
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002038 *
2039 * This function is assumed to be called with the page table lock held
2040 * for the pte to notify.
2041 */
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002042void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
2043 pte_t *pte, unsigned long bits)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002044{
2045 unsigned long offset, gaddr;
2046 unsigned long *table;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002047 struct gmap *gmap, *sg, *next;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002048
2049 offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
2050 offset = offset * (4096 / sizeof(pte_t));
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +01002051 rcu_read_lock();
2052 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002053 if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
2054 spin_lock(&gmap->shadow_lock);
2055 list_for_each_entry_safe(sg, next,
2056 &gmap->children, list)
2057 gmap_shadow_notify(sg, vmaddr, offset, pte);
2058 spin_unlock(&gmap->shadow_lock);
2059 }
2060 if (!(bits & PGSTE_IN_BIT))
2061 continue;
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +01002062 spin_lock(&gmap->guest_table_lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002063 table = radix_tree_lookup(&gmap->host_to_guest,
2064 vmaddr >> PMD_SHIFT);
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +01002065 if (table)
2066 gaddr = __gmap_segment_gaddr(table) + offset;
2067 spin_unlock(&gmap->guest_table_lock);
2068 if (table)
2069 gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002070 }
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +01002071 rcu_read_unlock();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002072}
2073EXPORT_SYMBOL_GPL(ptep_notify);
2074
2075static inline void thp_split_mm(struct mm_struct *mm)
2076{
2077#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2078 struct vm_area_struct *vma;
2079 unsigned long addr;
2080
2081 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
2082 for (addr = vma->vm_start;
2083 addr < vma->vm_end;
2084 addr += PAGE_SIZE)
2085 follow_page(vma, addr, FOLL_SPLIT);
2086 vma->vm_flags &= ~VM_HUGEPAGE;
2087 vma->vm_flags |= VM_NOHUGEPAGE;
2088 }
2089 mm->def_flags |= VM_NOHUGEPAGE;
2090#endif
2091}
2092
2093/*
2094 * switch on pgstes for its userspace process (for kvm)
2095 */
2096int s390_enable_sie(void)
2097{
2098 struct mm_struct *mm = current->mm;
2099
2100 /* Do we have pgstes? if yes, we are done */
2101 if (mm_has_pgste(mm))
2102 return 0;
2103 /* Fail if the page tables are 2K */
2104 if (!mm_alloc_pgste(mm))
2105 return -EINVAL;
2106 down_write(&mm->mmap_sem);
2107 mm->context.has_pgste = 1;
2108 /* split thp mappings and disable thp for future mappings */
2109 thp_split_mm(mm);
2110 up_write(&mm->mmap_sem);
2111 return 0;
2112}
2113EXPORT_SYMBOL_GPL(s390_enable_sie);
2114
2115/*
2116 * Enable storage key handling from now on and initialize the storage
2117 * keys with the default key.
2118 */
2119static int __s390_enable_skey(pte_t *pte, unsigned long addr,
2120 unsigned long next, struct mm_walk *walk)
2121{
2122 /*
2123 * Remove all zero page mappings,
2124 * after establishing a policy to forbid zero page mappings
2125 * following faults for that page will get fresh anonymous pages
2126 */
2127 if (is_zero_pfn(pte_pfn(*pte)))
2128 ptep_xchg_direct(walk->mm, addr, pte, __pte(_PAGE_INVALID));
2129 /* Clear storage key */
2130 ptep_zap_key(walk->mm, addr, pte);
2131 return 0;
2132}
2133
2134int s390_enable_skey(void)
2135{
2136 struct mm_walk walk = { .pte_entry = __s390_enable_skey };
2137 struct mm_struct *mm = current->mm;
2138 struct vm_area_struct *vma;
2139 int rc = 0;
2140
2141 down_write(&mm->mmap_sem);
2142 if (mm_use_skey(mm))
2143 goto out_up;
2144
2145 mm->context.use_skey = 1;
2146 for (vma = mm->mmap; vma; vma = vma->vm_next) {
2147 if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
2148 MADV_UNMERGEABLE, &vma->vm_flags)) {
2149 mm->context.use_skey = 0;
2150 rc = -ENOMEM;
2151 goto out_up;
2152 }
2153 }
2154 mm->def_flags &= ~VM_MERGEABLE;
2155
2156 walk.mm = mm;
2157 walk_page_range(0, TASK_SIZE, &walk);
2158
2159out_up:
2160 up_write(&mm->mmap_sem);
2161 return rc;
2162}
2163EXPORT_SYMBOL_GPL(s390_enable_skey);
2164
2165/*
2166 * Reset CMMA state, make all pages stable again.
2167 */
2168static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
2169 unsigned long next, struct mm_walk *walk)
2170{
2171 ptep_zap_unused(walk->mm, addr, pte, 1);
2172 return 0;
2173}
2174
2175void s390_reset_cmma(struct mm_struct *mm)
2176{
2177 struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
2178
2179 down_write(&mm->mmap_sem);
2180 walk.mm = mm;
2181 walk_page_range(0, TASK_SIZE, &walk);
2182 up_write(&mm->mmap_sem);
2183}
2184EXPORT_SYMBOL_GPL(s390_reset_cmma);