blob: b2c140193b0af72273ffcbafd78a4ee38417ca42 [file] [log] [blame]
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001/*
2 * KVM guest address space mapping code
3 *
4 * Copyright IBM Corp. 2007, 2016
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/mm.h>
10#include <linux/swap.h>
11#include <linux/smp.h>
12#include <linux/spinlock.h>
13#include <linux/slab.h>
14#include <linux/swapops.h>
15#include <linux/ksm.h>
16#include <linux/mman.h>
17
18#include <asm/pgtable.h>
19#include <asm/pgalloc.h>
20#include <asm/gmap.h>
21#include <asm/tlb.h>
22
David Hildenbrandfd8d4e32016-04-18 13:24:52 +020023#define GMAP_SHADOW_FAKE_TABLE 1ULL
24
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010025/**
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +010026 * gmap_alloc - allocate and initialize a guest address space
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010027 * @mm: pointer to the parent mm_struct
Christian Borntraeger9c650d02016-04-04 09:41:32 +020028 * @limit: maximum address of the gmap address space
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010029 *
30 * Returns a guest address space structure.
31 */
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +010032static struct gmap *gmap_alloc(unsigned long limit)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010033{
34 struct gmap *gmap;
35 struct page *page;
36 unsigned long *table;
37 unsigned long etype, atype;
38
Heiko Carstensf1c11742017-07-05 07:37:27 +020039 if (limit < _REGION3_SIZE) {
40 limit = _REGION3_SIZE - 1;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010041 atype = _ASCE_TYPE_SEGMENT;
42 etype = _SEGMENT_ENTRY_EMPTY;
Heiko Carstensf1c11742017-07-05 07:37:27 +020043 } else if (limit < _REGION2_SIZE) {
44 limit = _REGION2_SIZE - 1;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010045 atype = _ASCE_TYPE_REGION3;
46 etype = _REGION3_ENTRY_EMPTY;
Heiko Carstensf1c11742017-07-05 07:37:27 +020047 } else if (limit < _REGION1_SIZE) {
48 limit = _REGION1_SIZE - 1;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010049 atype = _ASCE_TYPE_REGION2;
50 etype = _REGION2_ENTRY_EMPTY;
51 } else {
52 limit = -1UL;
53 atype = _ASCE_TYPE_REGION1;
54 etype = _REGION1_ENTRY_EMPTY;
55 }
56 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
57 if (!gmap)
58 goto out;
59 INIT_LIST_HEAD(&gmap->crst_list);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +010060 INIT_LIST_HEAD(&gmap->children);
61 INIT_LIST_HEAD(&gmap->pt_list);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010062 INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
63 INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +010064 INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010065 spin_lock_init(&gmap->guest_table_lock);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +010066 spin_lock_init(&gmap->shadow_lock);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +010067 atomic_set(&gmap->ref_count, 1);
Heiko Carstensf1c11742017-07-05 07:37:27 +020068 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010069 if (!page)
70 goto out_free;
71 page->index = 0;
72 list_add(&page->lru, &gmap->crst_list);
73 table = (unsigned long *) page_to_phys(page);
74 crst_table_init(table, etype);
75 gmap->table = table;
76 gmap->asce = atype | _ASCE_TABLE_LENGTH |
77 _ASCE_USER_BITS | __pa(table);
78 gmap->asce_end = limit;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010079 return gmap;
80
81out_free:
82 kfree(gmap);
83out:
84 return NULL;
85}
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +010086
87/**
88 * gmap_create - create a guest address space
89 * @mm: pointer to the parent mm_struct
90 * @limit: maximum size of the gmap address space
91 *
92 * Returns a guest address space structure.
93 */
94struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
95{
96 struct gmap *gmap;
Martin Schwidefsky44b6cc82016-06-13 10:36:00 +020097 unsigned long gmap_asce;
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +010098
99 gmap = gmap_alloc(limit);
100 if (!gmap)
101 return NULL;
102 gmap->mm = mm;
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200103 spin_lock(&mm->context.lock);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100104 list_add_rcu(&gmap->list, &mm->context.gmap_list);
Martin Schwidefsky44b6cc82016-06-13 10:36:00 +0200105 if (list_is_singular(&mm->context.gmap_list))
106 gmap_asce = gmap->asce;
107 else
108 gmap_asce = -1UL;
109 WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200110 spin_unlock(&mm->context.lock);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100111 return gmap;
112}
113EXPORT_SYMBOL_GPL(gmap_create);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100114
115static void gmap_flush_tlb(struct gmap *gmap)
116{
117 if (MACHINE_HAS_IDTE)
David Hildenbrandf0454022016-07-07 10:44:10 +0200118 __tlb_flush_idte(gmap->asce);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100119 else
120 __tlb_flush_global();
121}
122
123static void gmap_radix_tree_free(struct radix_tree_root *root)
124{
125 struct radix_tree_iter iter;
126 unsigned long indices[16];
127 unsigned long index;
Heiko Carstensd12a3d62017-05-09 13:44:43 +0200128 void __rcu **slot;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100129 int i, nr;
130
131 /* A radix tree is freed by deleting all of its entries */
132 index = 0;
133 do {
134 nr = 0;
135 radix_tree_for_each_slot(slot, root, &iter, index) {
136 indices[nr] = iter.index;
137 if (++nr == 16)
138 break;
139 }
140 for (i = 0; i < nr; i++) {
141 index = indices[i];
142 radix_tree_delete(root, index);
143 }
144 } while (nr > 0);
145}
146
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100147static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
148{
149 struct gmap_rmap *rmap, *rnext, *head;
150 struct radix_tree_iter iter;
151 unsigned long indices[16];
152 unsigned long index;
Heiko Carstensd12a3d62017-05-09 13:44:43 +0200153 void __rcu **slot;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100154 int i, nr;
155
156 /* A radix tree is freed by deleting all of its entries */
157 index = 0;
158 do {
159 nr = 0;
160 radix_tree_for_each_slot(slot, root, &iter, index) {
161 indices[nr] = iter.index;
162 if (++nr == 16)
163 break;
164 }
165 for (i = 0; i < nr; i++) {
166 index = indices[i];
167 head = radix_tree_delete(root, index);
168 gmap_for_each_rmap_safe(rmap, rnext, head)
169 kfree(rmap);
170 }
171 } while (nr > 0);
172}
173
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100174/**
175 * gmap_free - free a guest address space
176 * @gmap: pointer to the guest address space structure
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100177 *
178 * No locks required. There are no references to this gmap anymore.
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100179 */
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100180static void gmap_free(struct gmap *gmap)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100181{
182 struct page *page, *next;
183
David Hildenbrandeea36782016-04-15 12:45:45 +0200184 /* Flush tlb of all gmaps (if not already done for shadows) */
185 if (!(gmap_is_shadow(gmap) && gmap->removed))
186 gmap_flush_tlb(gmap);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100187 /* Free all segment & region tables. */
188 list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
Heiko Carstensf1c11742017-07-05 07:37:27 +0200189 __free_pages(page, CRST_ALLOC_ORDER);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100190 gmap_radix_tree_free(&gmap->guest_to_host);
191 gmap_radix_tree_free(&gmap->host_to_guest);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100192
193 /* Free additional data for a shadow gmap */
194 if (gmap_is_shadow(gmap)) {
195 /* Free all page tables. */
196 list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
197 page_table_free_pgste(page);
198 gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
199 /* Release reference to the parent */
200 gmap_put(gmap->parent);
201 }
202
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100203 kfree(gmap);
204}
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100205
206/**
207 * gmap_get - increase reference counter for guest address space
208 * @gmap: pointer to the guest address space structure
209 *
210 * Returns the gmap pointer
211 */
212struct gmap *gmap_get(struct gmap *gmap)
213{
214 atomic_inc(&gmap->ref_count);
215 return gmap;
216}
217EXPORT_SYMBOL_GPL(gmap_get);
218
219/**
220 * gmap_put - decrease reference counter for guest address space
221 * @gmap: pointer to the guest address space structure
222 *
223 * If the reference counter reaches zero the guest address space is freed.
224 */
225void gmap_put(struct gmap *gmap)
226{
227 if (atomic_dec_return(&gmap->ref_count) == 0)
228 gmap_free(gmap);
229}
230EXPORT_SYMBOL_GPL(gmap_put);
231
232/**
233 * gmap_remove - remove a guest address space but do not free it yet
234 * @gmap: pointer to the guest address space structure
235 */
236void gmap_remove(struct gmap *gmap)
237{
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100238 struct gmap *sg, *next;
Martin Schwidefsky44b6cc82016-06-13 10:36:00 +0200239 unsigned long gmap_asce;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100240
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100241 /* Remove all shadow gmaps linked to this gmap */
242 if (!list_empty(&gmap->children)) {
243 spin_lock(&gmap->shadow_lock);
244 list_for_each_entry_safe(sg, next, &gmap->children, list) {
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100245 list_del(&sg->list);
246 gmap_put(sg);
247 }
248 spin_unlock(&gmap->shadow_lock);
249 }
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100250 /* Remove gmap from the pre-mm list */
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200251 spin_lock(&gmap->mm->context.lock);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100252 list_del_rcu(&gmap->list);
Martin Schwidefsky44b6cc82016-06-13 10:36:00 +0200253 if (list_empty(&gmap->mm->context.gmap_list))
254 gmap_asce = 0;
255 else if (list_is_singular(&gmap->mm->context.gmap_list))
256 gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
257 struct gmap, list)->asce;
258 else
259 gmap_asce = -1UL;
260 WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200261 spin_unlock(&gmap->mm->context.lock);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100262 synchronize_rcu();
263 /* Put reference */
264 gmap_put(gmap);
265}
266EXPORT_SYMBOL_GPL(gmap_remove);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100267
268/**
269 * gmap_enable - switch primary space to the guest address space
270 * @gmap: pointer to the guest address space structure
271 */
272void gmap_enable(struct gmap *gmap)
273{
274 S390_lowcore.gmap = (unsigned long) gmap;
275}
276EXPORT_SYMBOL_GPL(gmap_enable);
277
278/**
279 * gmap_disable - switch back to the standard primary address space
280 * @gmap: pointer to the guest address space structure
281 */
282void gmap_disable(struct gmap *gmap)
283{
284 S390_lowcore.gmap = 0UL;
285}
286EXPORT_SYMBOL_GPL(gmap_disable);
287
David Hildenbrand37d9df92015-03-11 16:47:33 +0100288/**
289 * gmap_get_enabled - get a pointer to the currently enabled gmap
290 *
291 * Returns a pointer to the currently enabled gmap. 0 if none is enabled.
292 */
293struct gmap *gmap_get_enabled(void)
294{
295 return (struct gmap *) S390_lowcore.gmap;
296}
297EXPORT_SYMBOL_GPL(gmap_get_enabled);
298
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100299/*
300 * gmap_alloc_table is assumed to be called with mmap_sem held
301 */
302static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
303 unsigned long init, unsigned long gaddr)
304{
305 struct page *page;
306 unsigned long *new;
307
308 /* since we dont free the gmap table until gmap_free we can unlock */
Heiko Carstensf1c11742017-07-05 07:37:27 +0200309 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100310 if (!page)
311 return -ENOMEM;
312 new = (unsigned long *) page_to_phys(page);
313 crst_table_init(new, init);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100314 spin_lock(&gmap->guest_table_lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100315 if (*table & _REGION_ENTRY_INVALID) {
316 list_add(&page->lru, &gmap->crst_list);
317 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
318 (*table & _REGION_ENTRY_TYPE_MASK);
319 page->index = gaddr;
320 page = NULL;
321 }
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100322 spin_unlock(&gmap->guest_table_lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100323 if (page)
Heiko Carstensf1c11742017-07-05 07:37:27 +0200324 __free_pages(page, CRST_ALLOC_ORDER);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100325 return 0;
326}
327
328/**
329 * __gmap_segment_gaddr - find virtual address from segment pointer
330 * @entry: pointer to a segment table entry in the guest address space
331 *
332 * Returns the virtual address in the guest address space for the segment
333 */
334static unsigned long __gmap_segment_gaddr(unsigned long *entry)
335{
336 struct page *page;
337 unsigned long offset, mask;
338
339 offset = (unsigned long) entry / sizeof(unsigned long);
340 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
341 mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
342 page = virt_to_page((void *)((unsigned long) entry & mask));
343 return page->index + offset;
344}
345
346/**
347 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
348 * @gmap: pointer to the guest address space structure
349 * @vmaddr: address in the host process address space
350 *
351 * Returns 1 if a TLB flush is required
352 */
353static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
354{
355 unsigned long *entry;
356 int flush = 0;
357
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100358 BUG_ON(gmap_is_shadow(gmap));
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100359 spin_lock(&gmap->guest_table_lock);
360 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
361 if (entry) {
Dominik Dingel54397bb2016-04-27 11:43:07 +0200362 flush = (*entry != _SEGMENT_ENTRY_EMPTY);
363 *entry = _SEGMENT_ENTRY_EMPTY;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100364 }
365 spin_unlock(&gmap->guest_table_lock);
366 return flush;
367}
368
369/**
370 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
371 * @gmap: pointer to the guest address space structure
372 * @gaddr: address in the guest address space
373 *
374 * Returns 1 if a TLB flush is required
375 */
376static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
377{
378 unsigned long vmaddr;
379
380 vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
381 gaddr >> PMD_SHIFT);
382 return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
383}
384
385/**
386 * gmap_unmap_segment - unmap segment from the guest address space
387 * @gmap: pointer to the guest address space structure
388 * @to: address in the guest address space
389 * @len: length of the memory area to unmap
390 *
391 * Returns 0 if the unmap succeeded, -EINVAL if not.
392 */
393int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
394{
395 unsigned long off;
396 int flush;
397
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100398 BUG_ON(gmap_is_shadow(gmap));
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100399 if ((to | len) & (PMD_SIZE - 1))
400 return -EINVAL;
401 if (len == 0 || to + len < to)
402 return -EINVAL;
403
404 flush = 0;
405 down_write(&gmap->mm->mmap_sem);
406 for (off = 0; off < len; off += PMD_SIZE)
407 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
408 up_write(&gmap->mm->mmap_sem);
409 if (flush)
410 gmap_flush_tlb(gmap);
411 return 0;
412}
413EXPORT_SYMBOL_GPL(gmap_unmap_segment);
414
415/**
416 * gmap_map_segment - map a segment to the guest address space
417 * @gmap: pointer to the guest address space structure
418 * @from: source address in the parent address space
419 * @to: target address in the guest address space
420 * @len: length of the memory area to map
421 *
422 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
423 */
424int gmap_map_segment(struct gmap *gmap, unsigned long from,
425 unsigned long to, unsigned long len)
426{
427 unsigned long off;
428 int flush;
429
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100430 BUG_ON(gmap_is_shadow(gmap));
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100431 if ((from | to | len) & (PMD_SIZE - 1))
432 return -EINVAL;
433 if (len == 0 || from + len < from || to + len < to ||
Martin Schwidefskyee71d162017-04-20 14:43:51 +0200434 from + len - 1 > TASK_SIZE_MAX || to + len - 1 > gmap->asce_end)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100435 return -EINVAL;
436
437 flush = 0;
438 down_write(&gmap->mm->mmap_sem);
439 for (off = 0; off < len; off += PMD_SIZE) {
440 /* Remove old translation */
441 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
442 /* Store new translation */
443 if (radix_tree_insert(&gmap->guest_to_host,
444 (to + off) >> PMD_SHIFT,
445 (void *) from + off))
446 break;
447 }
448 up_write(&gmap->mm->mmap_sem);
449 if (flush)
450 gmap_flush_tlb(gmap);
451 if (off >= len)
452 return 0;
453 gmap_unmap_segment(gmap, to, len);
454 return -ENOMEM;
455}
456EXPORT_SYMBOL_GPL(gmap_map_segment);
457
458/**
459 * __gmap_translate - translate a guest address to a user space address
460 * @gmap: pointer to guest mapping meta data structure
461 * @gaddr: guest address
462 *
463 * Returns user space address which corresponds to the guest address or
464 * -EFAULT if no such mapping exists.
465 * This function does not establish potentially missing page table entries.
466 * The mmap_sem of the mm that belongs to the address space must be held
467 * when this function gets called.
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100468 *
469 * Note: Can also be called for shadow gmaps.
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100470 */
471unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
472{
473 unsigned long vmaddr;
474
475 vmaddr = (unsigned long)
476 radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100477 /* Note: guest_to_host is empty for a shadow gmap */
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100478 return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
479}
480EXPORT_SYMBOL_GPL(__gmap_translate);
481
482/**
483 * gmap_translate - translate a guest address to a user space address
484 * @gmap: pointer to guest mapping meta data structure
485 * @gaddr: guest address
486 *
487 * Returns user space address which corresponds to the guest address or
488 * -EFAULT if no such mapping exists.
489 * This function does not establish potentially missing page table entries.
490 */
491unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
492{
493 unsigned long rc;
494
495 down_read(&gmap->mm->mmap_sem);
496 rc = __gmap_translate(gmap, gaddr);
497 up_read(&gmap->mm->mmap_sem);
498 return rc;
499}
500EXPORT_SYMBOL_GPL(gmap_translate);
501
502/**
503 * gmap_unlink - disconnect a page table from the gmap shadow tables
504 * @gmap: pointer to guest mapping meta data structure
505 * @table: pointer to the host page table
506 * @vmaddr: vm address associated with the host page table
507 */
508void gmap_unlink(struct mm_struct *mm, unsigned long *table,
509 unsigned long vmaddr)
510{
511 struct gmap *gmap;
512 int flush;
513
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100514 rcu_read_lock();
515 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100516 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
517 if (flush)
518 gmap_flush_tlb(gmap);
519 }
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100520 rcu_read_unlock();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100521}
522
523/**
524 * gmap_link - set up shadow page tables to connect a host to a guest address
525 * @gmap: pointer to guest mapping meta data structure
526 * @gaddr: guest address
527 * @vmaddr: vm address
528 *
529 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
530 * if the vm address is already mapped to a different guest segment.
531 * The mmap_sem of the mm that belongs to the address space must be held
532 * when this function gets called.
533 */
534int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
535{
536 struct mm_struct *mm;
537 unsigned long *table;
538 spinlock_t *ptl;
539 pgd_t *pgd;
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200540 p4d_t *p4d;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100541 pud_t *pud;
542 pmd_t *pmd;
543 int rc;
544
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100545 BUG_ON(gmap_is_shadow(gmap));
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100546 /* Create higher level tables in the gmap page table */
547 table = gmap->table;
548 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
Heiko Carstensf1c11742017-07-05 07:37:27 +0200549 table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100550 if ((*table & _REGION_ENTRY_INVALID) &&
551 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
Heiko Carstensf1c11742017-07-05 07:37:27 +0200552 gaddr & _REGION1_MASK))
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100553 return -ENOMEM;
554 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
555 }
556 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
Heiko Carstensf1c11742017-07-05 07:37:27 +0200557 table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100558 if ((*table & _REGION_ENTRY_INVALID) &&
559 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
Heiko Carstensf1c11742017-07-05 07:37:27 +0200560 gaddr & _REGION2_MASK))
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100561 return -ENOMEM;
562 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
563 }
564 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
Heiko Carstensf1c11742017-07-05 07:37:27 +0200565 table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100566 if ((*table & _REGION_ENTRY_INVALID) &&
567 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
Heiko Carstensf1c11742017-07-05 07:37:27 +0200568 gaddr & _REGION3_MASK))
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100569 return -ENOMEM;
570 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
571 }
Heiko Carstensf1c11742017-07-05 07:37:27 +0200572 table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100573 /* Walk the parent mm page table */
574 mm = gmap->mm;
575 pgd = pgd_offset(mm, vmaddr);
576 VM_BUG_ON(pgd_none(*pgd));
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200577 p4d = p4d_offset(pgd, vmaddr);
578 VM_BUG_ON(p4d_none(*p4d));
579 pud = pud_offset(p4d, vmaddr);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100580 VM_BUG_ON(pud_none(*pud));
Gerald Schaeferd08de8e2016-07-04 14:47:01 +0200581 /* large puds cannot yet be handled */
582 if (pud_large(*pud))
583 return -EFAULT;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100584 pmd = pmd_offset(pud, vmaddr);
585 VM_BUG_ON(pmd_none(*pmd));
586 /* large pmds cannot yet be handled */
587 if (pmd_large(*pmd))
588 return -EFAULT;
589 /* Link gmap segment table entry location to page table. */
590 rc = radix_tree_preload(GFP_KERNEL);
591 if (rc)
592 return rc;
593 ptl = pmd_lock(mm, pmd);
594 spin_lock(&gmap->guest_table_lock);
Dominik Dingel54397bb2016-04-27 11:43:07 +0200595 if (*table == _SEGMENT_ENTRY_EMPTY) {
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100596 rc = radix_tree_insert(&gmap->host_to_guest,
597 vmaddr >> PMD_SHIFT, table);
598 if (!rc)
599 *table = pmd_val(*pmd);
600 } else
601 rc = 0;
602 spin_unlock(&gmap->guest_table_lock);
603 spin_unlock(ptl);
604 radix_tree_preload_end();
605 return rc;
606}
607
608/**
609 * gmap_fault - resolve a fault on a guest address
610 * @gmap: pointer to guest mapping meta data structure
611 * @gaddr: guest address
612 * @fault_flags: flags to pass down to handle_mm_fault()
613 *
614 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
615 * if the vm address is already mapped to a different guest segment.
616 */
617int gmap_fault(struct gmap *gmap, unsigned long gaddr,
618 unsigned int fault_flags)
619{
620 unsigned long vmaddr;
621 int rc;
622 bool unlocked;
623
624 down_read(&gmap->mm->mmap_sem);
625
626retry:
627 unlocked = false;
628 vmaddr = __gmap_translate(gmap, gaddr);
629 if (IS_ERR_VALUE(vmaddr)) {
630 rc = vmaddr;
631 goto out_up;
632 }
633 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
634 &unlocked)) {
635 rc = -EFAULT;
636 goto out_up;
637 }
638 /*
639 * In the case that fixup_user_fault unlocked the mmap_sem during
640 * faultin redo __gmap_translate to not race with a map/unmap_segment.
641 */
642 if (unlocked)
643 goto retry;
644
645 rc = __gmap_link(gmap, gaddr, vmaddr);
646out_up:
647 up_read(&gmap->mm->mmap_sem);
648 return rc;
649}
650EXPORT_SYMBOL_GPL(gmap_fault);
651
652/*
653 * this function is assumed to be called with mmap_sem held
654 */
655void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
656{
657 unsigned long vmaddr;
658 spinlock_t *ptl;
659 pte_t *ptep;
660
661 /* Find the vm address for the guest address */
662 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
663 gaddr >> PMD_SHIFT);
664 if (vmaddr) {
665 vmaddr |= gaddr & ~PMD_MASK;
666 /* Get pointer to the page table entry */
667 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
668 if (likely(ptep))
669 ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
670 pte_unmap_unlock(ptep, ptl);
671 }
672}
673EXPORT_SYMBOL_GPL(__gmap_zap);
674
675void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
676{
677 unsigned long gaddr, vmaddr, size;
678 struct vm_area_struct *vma;
679
680 down_read(&gmap->mm->mmap_sem);
681 for (gaddr = from; gaddr < to;
682 gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
683 /* Find the vm address for the guest address */
684 vmaddr = (unsigned long)
685 radix_tree_lookup(&gmap->guest_to_host,
686 gaddr >> PMD_SHIFT);
687 if (!vmaddr)
688 continue;
689 vmaddr |= gaddr & ~PMD_MASK;
690 /* Find vma in the parent mm */
691 vma = find_vma(gmap->mm, vmaddr);
692 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
Kirill A. Shutemovecf13852017-02-22 15:46:37 -0800693 zap_page_range(vma, vmaddr, size);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100694 }
695 up_read(&gmap->mm->mmap_sem);
696}
697EXPORT_SYMBOL_GPL(gmap_discard);
698
699static LIST_HEAD(gmap_notifier_list);
700static DEFINE_SPINLOCK(gmap_notifier_lock);
701
702/**
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100703 * gmap_register_pte_notifier - register a pte invalidation callback
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100704 * @nb: pointer to the gmap notifier block
705 */
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100706void gmap_register_pte_notifier(struct gmap_notifier *nb)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100707{
708 spin_lock(&gmap_notifier_lock);
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100709 list_add_rcu(&nb->list, &gmap_notifier_list);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100710 spin_unlock(&gmap_notifier_lock);
711}
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100712EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100713
714/**
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100715 * gmap_unregister_pte_notifier - remove a pte invalidation callback
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100716 * @nb: pointer to the gmap notifier block
717 */
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100718void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100719{
720 spin_lock(&gmap_notifier_lock);
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100721 list_del_rcu(&nb->list);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100722 spin_unlock(&gmap_notifier_lock);
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100723 synchronize_rcu();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100724}
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100725EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100726
727/**
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100728 * gmap_call_notifier - call all registered invalidation callbacks
729 * @gmap: pointer to guest mapping meta data structure
730 * @start: start virtual address in the guest address space
731 * @end: end virtual address in the guest address space
732 */
733static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
734 unsigned long end)
735{
736 struct gmap_notifier *nb;
737
738 list_for_each_entry(nb, &gmap_notifier_list, list)
739 nb->notifier_call(gmap, start, end);
740}
741
742/**
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100743 * gmap_table_walk - walk the gmap page tables
744 * @gmap: pointer to guest mapping meta data structure
745 * @gaddr: virtual address in the guest address space
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100746 * @level: page table level to stop at
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100747 *
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100748 * Returns a table entry pointer for the given guest address and @level
749 * @level=0 : returns a pointer to a page table table entry (or NULL)
750 * @level=1 : returns a pointer to a segment table entry (or NULL)
751 * @level=2 : returns a pointer to a region-3 table entry (or NULL)
752 * @level=3 : returns a pointer to a region-2 table entry (or NULL)
753 * @level=4 : returns a pointer to a region-1 table entry (or NULL)
754 *
755 * Returns NULL if the gmap page tables could not be walked to the
756 * requested level.
757 *
758 * Note: Can also be called for shadow gmaps.
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100759 */
760static inline unsigned long *gmap_table_walk(struct gmap *gmap,
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100761 unsigned long gaddr, int level)
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100762{
763 unsigned long *table;
764
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100765 if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
766 return NULL;
767 if (gmap_is_shadow(gmap) && gmap->removed)
768 return NULL;
769 if (gaddr & (-1UL << (31 + ((gmap->asce & _ASCE_TYPE_MASK) >> 2)*11)))
770 return NULL;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100771 table = gmap->table;
772 switch (gmap->asce & _ASCE_TYPE_MASK) {
773 case _ASCE_TYPE_REGION1:
Heiko Carstensf1c11742017-07-05 07:37:27 +0200774 table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100775 if (level == 4)
776 break;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100777 if (*table & _REGION_ENTRY_INVALID)
778 return NULL;
779 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
780 /* Fallthrough */
781 case _ASCE_TYPE_REGION2:
Heiko Carstensf1c11742017-07-05 07:37:27 +0200782 table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100783 if (level == 3)
784 break;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100785 if (*table & _REGION_ENTRY_INVALID)
786 return NULL;
787 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
788 /* Fallthrough */
789 case _ASCE_TYPE_REGION3:
Heiko Carstensf1c11742017-07-05 07:37:27 +0200790 table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100791 if (level == 2)
792 break;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100793 if (*table & _REGION_ENTRY_INVALID)
794 return NULL;
795 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
796 /* Fallthrough */
797 case _ASCE_TYPE_SEGMENT:
Heiko Carstensf1c11742017-07-05 07:37:27 +0200798 table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100799 if (level == 1)
800 break;
801 if (*table & _REGION_ENTRY_INVALID)
802 return NULL;
803 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
Heiko Carstensf1c11742017-07-05 07:37:27 +0200804 table += (gaddr & _PAGE_INDEX) >> _PAGE_SHIFT;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100805 }
806 return table;
807}
808
809/**
810 * gmap_pte_op_walk - walk the gmap page table, get the page table lock
811 * and return the pte pointer
812 * @gmap: pointer to guest mapping meta data structure
813 * @gaddr: virtual address in the guest address space
814 * @ptl: pointer to the spinlock pointer
815 *
816 * Returns a pointer to the locked pte for a guest address, or NULL
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100817 *
818 * Note: Can also be called for shadow gmaps.
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100819 */
820static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
821 spinlock_t **ptl)
822{
823 unsigned long *table;
824
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100825 if (gmap_is_shadow(gmap))
826 spin_lock(&gmap->guest_table_lock);
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100827 /* Walk the gmap page table, lock and get pte pointer */
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100828 table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
829 if (!table || *table & _SEGMENT_ENTRY_INVALID) {
830 if (gmap_is_shadow(gmap))
831 spin_unlock(&gmap->guest_table_lock);
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100832 return NULL;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100833 }
834 if (gmap_is_shadow(gmap)) {
835 *ptl = &gmap->guest_table_lock;
836 return pte_offset_map((pmd_t *) table, gaddr);
837 }
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100838 return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
839}
840
841/**
842 * gmap_pte_op_fixup - force a page in and connect the gmap page table
843 * @gmap: pointer to guest mapping meta data structure
844 * @gaddr: virtual address in the guest address space
845 * @vmaddr: address in the host process address space
David Hildenbrand01f71912016-06-13 10:49:04 +0200846 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100847 *
848 * Returns 0 if the caller can retry __gmap_translate (might fail again),
849 * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
850 * up or connecting the gmap page table.
851 */
852static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
David Hildenbrand01f71912016-06-13 10:49:04 +0200853 unsigned long vmaddr, int prot)
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100854{
855 struct mm_struct *mm = gmap->mm;
David Hildenbrand01f71912016-06-13 10:49:04 +0200856 unsigned int fault_flags;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100857 bool unlocked = false;
858
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100859 BUG_ON(gmap_is_shadow(gmap));
David Hildenbrand01f71912016-06-13 10:49:04 +0200860 fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
861 if (fixup_user_fault(current, mm, vmaddr, fault_flags, &unlocked))
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100862 return -EFAULT;
863 if (unlocked)
864 /* lost mmap_sem, caller has to retry __gmap_translate */
865 return 0;
866 /* Connect the page tables */
867 return __gmap_link(gmap, gaddr, vmaddr);
868}
869
870/**
871 * gmap_pte_op_end - release the page table lock
872 * @ptl: pointer to the spinlock pointer
873 */
874static void gmap_pte_op_end(spinlock_t *ptl)
875{
876 spin_unlock(ptl);
877}
878
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100879/*
880 * gmap_protect_range - remove access rights to memory and set pgste bits
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100881 * @gmap: pointer to guest mapping meta data structure
882 * @gaddr: virtual address in the guest address space
883 * @len: size of area
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100884 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
885 * @bits: pgste notification bits to set
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100886 *
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100887 * Returns 0 if successfully protected, -ENOMEM if out of memory and
888 * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
889 *
890 * Called with sg->mm->mmap_sem in read.
891 *
892 * Note: Can also be called for shadow gmaps.
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100893 */
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100894static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
895 unsigned long len, int prot, unsigned long bits)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100896{
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100897 unsigned long vmaddr;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100898 spinlock_t *ptl;
899 pte_t *ptep;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100900 int rc;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100901
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100902 while (len) {
903 rc = -EAGAIN;
904 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
905 if (ptep) {
906 rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, bits);
907 gmap_pte_op_end(ptl);
908 }
909 if (rc) {
910 vmaddr = __gmap_translate(gmap, gaddr);
911 if (IS_ERR_VALUE(vmaddr))
912 return vmaddr;
David Hildenbrand01f71912016-06-13 10:49:04 +0200913 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100914 if (rc)
915 return rc;
916 continue;
917 }
918 gaddr += PAGE_SIZE;
919 len -= PAGE_SIZE;
920 }
921 return 0;
922}
923
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100924/**
925 * gmap_mprotect_notify - change access rights for a range of ptes and
926 * call the notifier if any pte changes again
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100927 * @gmap: pointer to guest mapping meta data structure
928 * @gaddr: virtual address in the guest address space
929 * @len: size of area
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100930 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100931 *
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100932 * Returns 0 if for each page in the given range a gmap mapping exists,
933 * the new access rights could be set and the notifier could be armed.
934 * If the gmap mapping is missing for one or more pages -EFAULT is
935 * returned. If no memory could be allocated -ENOMEM is returned.
936 * This function establishes missing page table entries.
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100937 */
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100938int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
939 unsigned long len, int prot)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100940{
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100941 int rc;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100942
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100943 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100944 return -EINVAL;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100945 if (!MACHINE_HAS_ESOP && prot == PROT_READ)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100946 return -EINVAL;
947 down_read(&gmap->mm->mmap_sem);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100948 rc = gmap_protect_range(gmap, gaddr, len, prot, PGSTE_IN_BIT);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100949 up_read(&gmap->mm->mmap_sem);
950 return rc;
951}
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100952EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100953
954/**
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100955 * gmap_read_table - get an unsigned long value from a guest page table using
956 * absolute addressing, without marking the page referenced.
957 * @gmap: pointer to guest mapping meta data structure
958 * @gaddr: virtual address in the guest address space
959 * @val: pointer to the unsigned long value to return
960 *
961 * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
962 * if reading using the virtual address failed.
963 *
964 * Called with gmap->mm->mmap_sem in read.
965 */
966int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
967{
968 unsigned long address, vmaddr;
969 spinlock_t *ptl;
970 pte_t *ptep, pte;
971 int rc;
972
973 while (1) {
974 rc = -EAGAIN;
975 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
976 if (ptep) {
977 pte = *ptep;
978 if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
979 address = pte_val(pte) & PAGE_MASK;
980 address += gaddr & ~PAGE_MASK;
981 *val = *(unsigned long *) address;
982 pte_val(*ptep) |= _PAGE_YOUNG;
983 /* Do *NOT* clear the _PAGE_INVALID bit! */
984 rc = 0;
985 }
986 gmap_pte_op_end(ptl);
987 }
988 if (!rc)
989 break;
990 vmaddr = __gmap_translate(gmap, gaddr);
991 if (IS_ERR_VALUE(vmaddr)) {
992 rc = vmaddr;
993 break;
994 }
David Hildenbrand01f71912016-06-13 10:49:04 +0200995 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, PROT_READ);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100996 if (rc)
997 break;
998 }
999 return rc;
1000}
1001EXPORT_SYMBOL_GPL(gmap_read_table);
1002
1003/**
1004 * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
1005 * @sg: pointer to the shadow guest address space structure
1006 * @vmaddr: vm address associated with the rmap
1007 * @rmap: pointer to the rmap structure
1008 *
1009 * Called with the sg->guest_table_lock
1010 */
1011static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
1012 struct gmap_rmap *rmap)
1013{
Heiko Carstensd12a3d62017-05-09 13:44:43 +02001014 void __rcu **slot;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001015
1016 BUG_ON(!gmap_is_shadow(sg));
1017 slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
1018 if (slot) {
1019 rmap->next = radix_tree_deref_slot_protected(slot,
1020 &sg->guest_table_lock);
Johannes Weiner6d75f362016-12-12 16:43:43 -08001021 radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001022 } else {
1023 rmap->next = NULL;
1024 radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
1025 rmap);
1026 }
1027}
1028
1029/**
1030 * gmap_protect_rmap - modify access rights to memory and create an rmap
1031 * @sg: pointer to the shadow guest address space structure
1032 * @raddr: rmap address in the shadow gmap
1033 * @paddr: address in the parent guest address space
1034 * @len: length of the memory area to protect
1035 * @prot: indicates access rights: none, read-only or read-write
1036 *
1037 * Returns 0 if successfully protected and the rmap was created, -ENOMEM
1038 * if out of memory and -EFAULT if paddr is invalid.
1039 */
1040static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
1041 unsigned long paddr, unsigned long len, int prot)
1042{
1043 struct gmap *parent;
1044 struct gmap_rmap *rmap;
1045 unsigned long vmaddr;
1046 spinlock_t *ptl;
1047 pte_t *ptep;
1048 int rc;
1049
1050 BUG_ON(!gmap_is_shadow(sg));
1051 parent = sg->parent;
1052 while (len) {
1053 vmaddr = __gmap_translate(parent, paddr);
1054 if (IS_ERR_VALUE(vmaddr))
1055 return vmaddr;
1056 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
1057 if (!rmap)
1058 return -ENOMEM;
1059 rmap->raddr = raddr;
1060 rc = radix_tree_preload(GFP_KERNEL);
1061 if (rc) {
1062 kfree(rmap);
1063 return rc;
1064 }
1065 rc = -EAGAIN;
1066 ptep = gmap_pte_op_walk(parent, paddr, &ptl);
1067 if (ptep) {
1068 spin_lock(&sg->guest_table_lock);
1069 rc = ptep_force_prot(parent->mm, paddr, ptep, prot,
1070 PGSTE_VSIE_BIT);
1071 if (!rc)
1072 gmap_insert_rmap(sg, vmaddr, rmap);
1073 spin_unlock(&sg->guest_table_lock);
1074 gmap_pte_op_end(ptl);
1075 }
1076 radix_tree_preload_end();
1077 if (rc) {
1078 kfree(rmap);
David Hildenbrand01f71912016-06-13 10:49:04 +02001079 rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001080 if (rc)
1081 return rc;
1082 continue;
1083 }
1084 paddr += PAGE_SIZE;
1085 len -= PAGE_SIZE;
1086 }
1087 return 0;
1088}
1089
1090#define _SHADOW_RMAP_MASK 0x7
1091#define _SHADOW_RMAP_REGION1 0x5
1092#define _SHADOW_RMAP_REGION2 0x4
1093#define _SHADOW_RMAP_REGION3 0x3
1094#define _SHADOW_RMAP_SEGMENT 0x2
1095#define _SHADOW_RMAP_PGTABLE 0x1
1096
1097/**
1098 * gmap_idte_one - invalidate a single region or segment table entry
1099 * @asce: region or segment table *origin* + table-type bits
1100 * @vaddr: virtual address to identify the table entry to flush
1101 *
1102 * The invalid bit of a single region or segment table entry is set
1103 * and the associated TLB entries depending on the entry are flushed.
1104 * The table-type of the @asce identifies the portion of the @vaddr
1105 * that is used as the invalidation index.
1106 */
1107static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
1108{
1109 asm volatile(
1110 " .insn rrf,0xb98e0000,%0,%1,0,0"
1111 : : "a" (asce), "a" (vaddr) : "cc", "memory");
1112}
1113
1114/**
1115 * gmap_unshadow_page - remove a page from a shadow page table
1116 * @sg: pointer to the shadow guest address space structure
1117 * @raddr: rmap address in the shadow guest address space
1118 *
1119 * Called with the sg->guest_table_lock
1120 */
1121static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
1122{
1123 unsigned long *table;
1124
1125 BUG_ON(!gmap_is_shadow(sg));
1126 table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
1127 if (!table || *table & _PAGE_INVALID)
1128 return;
Heiko Carstensf1c11742017-07-05 07:37:27 +02001129 gmap_call_notifier(sg, raddr, raddr + _PAGE_SIZE - 1);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001130 ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
1131}
1132
1133/**
1134 * __gmap_unshadow_pgt - remove all entries from a shadow page table
1135 * @sg: pointer to the shadow guest address space structure
1136 * @raddr: rmap address in the shadow guest address space
1137 * @pgt: pointer to the start of a shadow page table
1138 *
1139 * Called with the sg->guest_table_lock
1140 */
1141static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
1142 unsigned long *pgt)
1143{
1144 int i;
1145
1146 BUG_ON(!gmap_is_shadow(sg));
Heiko Carstensf1c11742017-07-05 07:37:27 +02001147 for (i = 0; i < _PAGE_ENTRIES; i++, raddr += _PAGE_SIZE)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001148 pgt[i] = _PAGE_INVALID;
1149}
1150
1151/**
1152 * gmap_unshadow_pgt - remove a shadow page table from a segment entry
1153 * @sg: pointer to the shadow guest address space structure
1154 * @raddr: address in the shadow guest address space
1155 *
1156 * Called with the sg->guest_table_lock
1157 */
1158static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
1159{
1160 unsigned long sto, *ste, *pgt;
1161 struct page *page;
1162
1163 BUG_ON(!gmap_is_shadow(sg));
1164 ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
David Hildenbrand998f6372016-03-08 12:23:38 +01001165 if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001166 return;
Heiko Carstensf1c11742017-07-05 07:37:27 +02001167 gmap_call_notifier(sg, raddr, raddr + _SEGMENT_SIZE - 1);
1168 sto = (unsigned long) (ste - ((raddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT));
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001169 gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
1170 pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
1171 *ste = _SEGMENT_ENTRY_EMPTY;
1172 __gmap_unshadow_pgt(sg, raddr, pgt);
1173 /* Free page table */
1174 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1175 list_del(&page->lru);
1176 page_table_free_pgste(page);
1177}
1178
1179/**
1180 * __gmap_unshadow_sgt - remove all entries from a shadow segment table
1181 * @sg: pointer to the shadow guest address space structure
1182 * @raddr: rmap address in the shadow guest address space
1183 * @sgt: pointer to the start of a shadow segment table
1184 *
1185 * Called with the sg->guest_table_lock
1186 */
1187static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
1188 unsigned long *sgt)
1189{
Heiko Carstens2be1da82017-11-14 14:50:08 +01001190 unsigned long *pgt;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001191 struct page *page;
1192 int i;
1193
1194 BUG_ON(!gmap_is_shadow(sg));
Heiko Carstensf1c11742017-07-05 07:37:27 +02001195 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) {
David Hildenbrand998f6372016-03-08 12:23:38 +01001196 if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001197 continue;
1198 pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
1199 sgt[i] = _SEGMENT_ENTRY_EMPTY;
1200 __gmap_unshadow_pgt(sg, raddr, pgt);
1201 /* Free page table */
1202 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1203 list_del(&page->lru);
1204 page_table_free_pgste(page);
1205 }
1206}
1207
1208/**
1209 * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
1210 * @sg: pointer to the shadow guest address space structure
1211 * @raddr: rmap address in the shadow guest address space
1212 *
1213 * Called with the shadow->guest_table_lock
1214 */
1215static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
1216{
1217 unsigned long r3o, *r3e, *sgt;
1218 struct page *page;
1219
1220 BUG_ON(!gmap_is_shadow(sg));
1221 r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
David Hildenbrand998f6372016-03-08 12:23:38 +01001222 if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001223 return;
Heiko Carstensf1c11742017-07-05 07:37:27 +02001224 gmap_call_notifier(sg, raddr, raddr + _REGION3_SIZE - 1);
1225 r3o = (unsigned long) (r3e - ((raddr & _REGION3_INDEX) >> _REGION3_SHIFT));
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001226 gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
1227 sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
1228 *r3e = _REGION3_ENTRY_EMPTY;
1229 __gmap_unshadow_sgt(sg, raddr, sgt);
1230 /* Free segment table */
1231 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1232 list_del(&page->lru);
Heiko Carstensf1c11742017-07-05 07:37:27 +02001233 __free_pages(page, CRST_ALLOC_ORDER);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001234}
1235
1236/**
1237 * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
1238 * @sg: pointer to the shadow guest address space structure
1239 * @raddr: address in the shadow guest address space
1240 * @r3t: pointer to the start of a shadow region-3 table
1241 *
1242 * Called with the sg->guest_table_lock
1243 */
1244static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
1245 unsigned long *r3t)
1246{
Heiko Carstens2be1da82017-11-14 14:50:08 +01001247 unsigned long *sgt;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001248 struct page *page;
1249 int i;
1250
1251 BUG_ON(!gmap_is_shadow(sg));
Heiko Carstensf1c11742017-07-05 07:37:27 +02001252 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION3_SIZE) {
David Hildenbrand998f6372016-03-08 12:23:38 +01001253 if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001254 continue;
1255 sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
1256 r3t[i] = _REGION3_ENTRY_EMPTY;
1257 __gmap_unshadow_sgt(sg, raddr, sgt);
1258 /* Free segment table */
1259 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1260 list_del(&page->lru);
Heiko Carstensf1c11742017-07-05 07:37:27 +02001261 __free_pages(page, CRST_ALLOC_ORDER);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001262 }
1263}
1264
1265/**
1266 * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
1267 * @sg: pointer to the shadow guest address space structure
1268 * @raddr: rmap address in the shadow guest address space
1269 *
1270 * Called with the sg->guest_table_lock
1271 */
1272static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
1273{
1274 unsigned long r2o, *r2e, *r3t;
1275 struct page *page;
1276
1277 BUG_ON(!gmap_is_shadow(sg));
1278 r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
David Hildenbrand998f6372016-03-08 12:23:38 +01001279 if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001280 return;
Heiko Carstensf1c11742017-07-05 07:37:27 +02001281 gmap_call_notifier(sg, raddr, raddr + _REGION2_SIZE - 1);
1282 r2o = (unsigned long) (r2e - ((raddr & _REGION2_INDEX) >> _REGION2_SHIFT));
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001283 gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
1284 r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
1285 *r2e = _REGION2_ENTRY_EMPTY;
1286 __gmap_unshadow_r3t(sg, raddr, r3t);
1287 /* Free region 3 table */
1288 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1289 list_del(&page->lru);
Heiko Carstensf1c11742017-07-05 07:37:27 +02001290 __free_pages(page, CRST_ALLOC_ORDER);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001291}
1292
1293/**
1294 * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
1295 * @sg: pointer to the shadow guest address space structure
1296 * @raddr: rmap address in the shadow guest address space
1297 * @r2t: pointer to the start of a shadow region-2 table
1298 *
1299 * Called with the sg->guest_table_lock
1300 */
1301static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
1302 unsigned long *r2t)
1303{
Heiko Carstens2be1da82017-11-14 14:50:08 +01001304 unsigned long *r3t;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001305 struct page *page;
1306 int i;
1307
1308 BUG_ON(!gmap_is_shadow(sg));
Heiko Carstensf1c11742017-07-05 07:37:27 +02001309 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION2_SIZE) {
David Hildenbrand998f6372016-03-08 12:23:38 +01001310 if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001311 continue;
1312 r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
1313 r2t[i] = _REGION2_ENTRY_EMPTY;
1314 __gmap_unshadow_r3t(sg, raddr, r3t);
1315 /* Free region 3 table */
1316 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1317 list_del(&page->lru);
Heiko Carstensf1c11742017-07-05 07:37:27 +02001318 __free_pages(page, CRST_ALLOC_ORDER);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001319 }
1320}
1321
1322/**
1323 * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
1324 * @sg: pointer to the shadow guest address space structure
1325 * @raddr: rmap address in the shadow guest address space
1326 *
1327 * Called with the sg->guest_table_lock
1328 */
1329static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
1330{
1331 unsigned long r1o, *r1e, *r2t;
1332 struct page *page;
1333
1334 BUG_ON(!gmap_is_shadow(sg));
1335 r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
David Hildenbrand998f6372016-03-08 12:23:38 +01001336 if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001337 return;
Heiko Carstensf1c11742017-07-05 07:37:27 +02001338 gmap_call_notifier(sg, raddr, raddr + _REGION1_SIZE - 1);
1339 r1o = (unsigned long) (r1e - ((raddr & _REGION1_INDEX) >> _REGION1_SHIFT));
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001340 gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
1341 r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
1342 *r1e = _REGION1_ENTRY_EMPTY;
1343 __gmap_unshadow_r2t(sg, raddr, r2t);
1344 /* Free region 2 table */
1345 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1346 list_del(&page->lru);
Heiko Carstensf1c11742017-07-05 07:37:27 +02001347 __free_pages(page, CRST_ALLOC_ORDER);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001348}
1349
1350/**
1351 * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
1352 * @sg: pointer to the shadow guest address space structure
1353 * @raddr: rmap address in the shadow guest address space
1354 * @r1t: pointer to the start of a shadow region-1 table
1355 *
1356 * Called with the shadow->guest_table_lock
1357 */
1358static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
1359 unsigned long *r1t)
1360{
1361 unsigned long asce, *r2t;
1362 struct page *page;
1363 int i;
1364
1365 BUG_ON(!gmap_is_shadow(sg));
1366 asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
Heiko Carstensf1c11742017-07-05 07:37:27 +02001367 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION1_SIZE) {
David Hildenbrand998f6372016-03-08 12:23:38 +01001368 if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001369 continue;
1370 r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
1371 __gmap_unshadow_r2t(sg, raddr, r2t);
1372 /* Clear entry and flush translation r1t -> r2t */
1373 gmap_idte_one(asce, raddr);
1374 r1t[i] = _REGION1_ENTRY_EMPTY;
1375 /* Free region 2 table */
1376 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1377 list_del(&page->lru);
Heiko Carstensf1c11742017-07-05 07:37:27 +02001378 __free_pages(page, CRST_ALLOC_ORDER);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001379 }
1380}
1381
1382/**
1383 * gmap_unshadow - remove a shadow page table completely
1384 * @sg: pointer to the shadow guest address space structure
1385 *
1386 * Called with sg->guest_table_lock
1387 */
1388static void gmap_unshadow(struct gmap *sg)
1389{
1390 unsigned long *table;
1391
1392 BUG_ON(!gmap_is_shadow(sg));
1393 if (sg->removed)
1394 return;
1395 sg->removed = 1;
1396 gmap_call_notifier(sg, 0, -1UL);
David Hildenbrandeea36782016-04-15 12:45:45 +02001397 gmap_flush_tlb(sg);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001398 table = (unsigned long *)(sg->asce & _ASCE_ORIGIN);
1399 switch (sg->asce & _ASCE_TYPE_MASK) {
1400 case _ASCE_TYPE_REGION1:
1401 __gmap_unshadow_r1t(sg, 0, table);
1402 break;
1403 case _ASCE_TYPE_REGION2:
1404 __gmap_unshadow_r2t(sg, 0, table);
1405 break;
1406 case _ASCE_TYPE_REGION3:
1407 __gmap_unshadow_r3t(sg, 0, table);
1408 break;
1409 case _ASCE_TYPE_SEGMENT:
1410 __gmap_unshadow_sgt(sg, 0, table);
1411 break;
1412 }
1413}
1414
1415/**
1416 * gmap_find_shadow - find a specific asce in the list of shadow tables
1417 * @parent: pointer to the parent gmap
1418 * @asce: ASCE for which the shadow table is created
David Hildenbrand5b062bd2016-03-08 12:17:40 +01001419 * @edat_level: edat level to be used for the shadow translation
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001420 *
1421 * Returns the pointer to a gmap if a shadow table with the given asce is
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001422 * already available, ERR_PTR(-EAGAIN) if another one is just being created,
1423 * otherwise NULL
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001424 */
David Hildenbrand5b062bd2016-03-08 12:17:40 +01001425static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
1426 int edat_level)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001427{
1428 struct gmap *sg;
1429
1430 list_for_each_entry(sg, &parent->children, list) {
David Hildenbrand5b062bd2016-03-08 12:17:40 +01001431 if (sg->orig_asce != asce || sg->edat_level != edat_level ||
1432 sg->removed)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001433 continue;
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001434 if (!sg->initialized)
1435 return ERR_PTR(-EAGAIN);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001436 atomic_inc(&sg->ref_count);
1437 return sg;
1438 }
1439 return NULL;
1440}
1441
1442/**
David Hildenbrand5b6c9632016-05-27 18:57:33 +02001443 * gmap_shadow_valid - check if a shadow guest address space matches the
1444 * given properties and is still valid
1445 * @sg: pointer to the shadow guest address space structure
1446 * @asce: ASCE for which the shadow table is requested
1447 * @edat_level: edat level to be used for the shadow translation
1448 *
1449 * Returns 1 if the gmap shadow is still valid and matches the given
1450 * properties, the caller can continue using it. Returns 0 otherwise, the
1451 * caller has to request a new shadow gmap in this case.
1452 *
1453 */
1454int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
1455{
1456 if (sg->removed)
1457 return 0;
1458 return sg->orig_asce == asce && sg->edat_level == edat_level;
1459}
1460EXPORT_SYMBOL_GPL(gmap_shadow_valid);
1461
1462/**
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001463 * gmap_shadow - create/find a shadow guest address space
1464 * @parent: pointer to the parent gmap
1465 * @asce: ASCE for which the shadow table is created
David Hildenbrand5b062bd2016-03-08 12:17:40 +01001466 * @edat_level: edat level to be used for the shadow translation
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001467 *
1468 * The pages of the top level page table referred by the asce parameter
1469 * will be set to read-only and marked in the PGSTEs of the kvm process.
1470 * The shadow table will be removed automatically on any change to the
1471 * PTE mapping for the source table.
1472 *
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001473 * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
1474 * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
1475 * parent gmap table could not be protected.
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001476 */
David Hildenbrand5b062bd2016-03-08 12:17:40 +01001477struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
1478 int edat_level)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001479{
1480 struct gmap *sg, *new;
1481 unsigned long limit;
1482 int rc;
1483
1484 BUG_ON(gmap_is_shadow(parent));
1485 spin_lock(&parent->shadow_lock);
David Hildenbrand5b062bd2016-03-08 12:17:40 +01001486 sg = gmap_find_shadow(parent, asce, edat_level);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001487 spin_unlock(&parent->shadow_lock);
1488 if (sg)
1489 return sg;
1490 /* Create a new shadow gmap */
1491 limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
David Hildenbrand3218f702016-04-18 16:22:24 +02001492 if (asce & _ASCE_REAL_SPACE)
1493 limit = -1UL;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001494 new = gmap_alloc(limit);
1495 if (!new)
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001496 return ERR_PTR(-ENOMEM);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001497 new->mm = parent->mm;
1498 new->parent = gmap_get(parent);
1499 new->orig_asce = asce;
David Hildenbrand5b062bd2016-03-08 12:17:40 +01001500 new->edat_level = edat_level;
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001501 new->initialized = false;
1502 spin_lock(&parent->shadow_lock);
1503 /* Recheck if another CPU created the same shadow */
David Hildenbrand5b062bd2016-03-08 12:17:40 +01001504 sg = gmap_find_shadow(parent, asce, edat_level);
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001505 if (sg) {
1506 spin_unlock(&parent->shadow_lock);
1507 gmap_free(new);
1508 return sg;
1509 }
David Hildenbrand717c0552016-05-02 12:10:17 +02001510 if (asce & _ASCE_REAL_SPACE) {
1511 /* only allow one real-space gmap shadow */
1512 list_for_each_entry(sg, &parent->children, list) {
1513 if (sg->orig_asce & _ASCE_REAL_SPACE) {
1514 spin_lock(&sg->guest_table_lock);
1515 gmap_unshadow(sg);
1516 spin_unlock(&sg->guest_table_lock);
1517 list_del(&sg->list);
1518 gmap_put(sg);
1519 break;
1520 }
1521 }
1522 }
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001523 atomic_set(&new->ref_count, 2);
1524 list_add(&new->list, &parent->children);
David Hildenbrand3218f702016-04-18 16:22:24 +02001525 if (asce & _ASCE_REAL_SPACE) {
1526 /* nothing to protect, return right away */
1527 new->initialized = true;
1528 spin_unlock(&parent->shadow_lock);
1529 return new;
1530 }
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001531 spin_unlock(&parent->shadow_lock);
1532 /* protect after insertion, so it will get properly invalidated */
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001533 down_read(&parent->mm->mmap_sem);
1534 rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
Heiko Carstensf1c11742017-07-05 07:37:27 +02001535 ((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001536 PROT_READ, PGSTE_VSIE_BIT);
1537 up_read(&parent->mm->mmap_sem);
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001538 spin_lock(&parent->shadow_lock);
1539 new->initialized = true;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001540 if (rc) {
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001541 list_del(&new->list);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001542 gmap_free(new);
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001543 new = ERR_PTR(rc);
1544 }
1545 spin_unlock(&parent->shadow_lock);
1546 return new;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001547}
1548EXPORT_SYMBOL_GPL(gmap_shadow);
1549
1550/**
1551 * gmap_shadow_r2t - create an empty shadow region 2 table
1552 * @sg: pointer to the shadow guest address space structure
1553 * @saddr: faulting address in the shadow gmap
1554 * @r2t: parent gmap address of the region 2 table to get shadowed
David Hildenbrand3218f702016-04-18 16:22:24 +02001555 * @fake: r2t references contiguous guest memory block, not a r2t
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001556 *
1557 * The r2t parameter specifies the address of the source table. The
1558 * four pages of the source table are made read-only in the parent gmap
1559 * address space. A write to the source table area @r2t will automatically
1560 * remove the shadow r2 table and all of its decendents.
1561 *
1562 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1563 * shadow table structure is incomplete, -ENOMEM if out of memory and
1564 * -EFAULT if an address in the parent gmap could not be resolved.
1565 *
1566 * Called with sg->mm->mmap_sem in read.
1567 */
David Hildenbrand3218f702016-04-18 16:22:24 +02001568int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
1569 int fake)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001570{
1571 unsigned long raddr, origin, offset, len;
1572 unsigned long *s_r2t, *table;
1573 struct page *page;
1574 int rc;
1575
1576 BUG_ON(!gmap_is_shadow(sg));
1577 /* Allocate a shadow region second table */
Heiko Carstensf1c11742017-07-05 07:37:27 +02001578 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001579 if (!page)
1580 return -ENOMEM;
1581 page->index = r2t & _REGION_ENTRY_ORIGIN;
David Hildenbrand3218f702016-04-18 16:22:24 +02001582 if (fake)
1583 page->index |= GMAP_SHADOW_FAKE_TABLE;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001584 s_r2t = (unsigned long *) page_to_phys(page);
1585 /* Install shadow region second table */
1586 spin_lock(&sg->guest_table_lock);
1587 table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
1588 if (!table) {
1589 rc = -EAGAIN; /* Race with unshadow */
1590 goto out_free;
1591 }
1592 if (!(*table & _REGION_ENTRY_INVALID)) {
1593 rc = 0; /* Already established */
1594 goto out_free;
David Hildenbrand998f6372016-03-08 12:23:38 +01001595 } else if (*table & _REGION_ENTRY_ORIGIN) {
1596 rc = -EAGAIN; /* Race with shadow */
1597 goto out_free;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001598 }
1599 crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY);
David Hildenbrand998f6372016-03-08 12:23:38 +01001600 /* mark as invalid as long as the parent table is not protected */
1601 *table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH |
1602 _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001603 if (sg->edat_level >= 1)
1604 *table |= (r2t & _REGION_ENTRY_PROTECT);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001605 list_add(&page->lru, &sg->crst_list);
David Hildenbrand3218f702016-04-18 16:22:24 +02001606 if (fake) {
1607 /* nothing to protect for fake tables */
1608 *table &= ~_REGION_ENTRY_INVALID;
1609 spin_unlock(&sg->guest_table_lock);
1610 return 0;
1611 }
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001612 spin_unlock(&sg->guest_table_lock);
1613 /* Make r2t read-only in parent gmap page table */
Heiko Carstensf1c11742017-07-05 07:37:27 +02001614 raddr = (saddr & _REGION1_MASK) | _SHADOW_RMAP_REGION1;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001615 origin = r2t & _REGION_ENTRY_ORIGIN;
Heiko Carstensf1c11742017-07-05 07:37:27 +02001616 offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1617 len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001618 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
David Hildenbrand998f6372016-03-08 12:23:38 +01001619 spin_lock(&sg->guest_table_lock);
1620 if (!rc) {
1621 table = gmap_table_walk(sg, saddr, 4);
1622 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1623 (unsigned long) s_r2t)
1624 rc = -EAGAIN; /* Race with unshadow */
1625 else
1626 *table &= ~_REGION_ENTRY_INVALID;
1627 } else {
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001628 gmap_unshadow_r2t(sg, raddr);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001629 }
David Hildenbrand998f6372016-03-08 12:23:38 +01001630 spin_unlock(&sg->guest_table_lock);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001631 return rc;
1632out_free:
1633 spin_unlock(&sg->guest_table_lock);
Heiko Carstensf1c11742017-07-05 07:37:27 +02001634 __free_pages(page, CRST_ALLOC_ORDER);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001635 return rc;
1636}
1637EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
1638
1639/**
1640 * gmap_shadow_r3t - create a shadow region 3 table
1641 * @sg: pointer to the shadow guest address space structure
1642 * @saddr: faulting address in the shadow gmap
1643 * @r3t: parent gmap address of the region 3 table to get shadowed
David Hildenbrand3218f702016-04-18 16:22:24 +02001644 * @fake: r3t references contiguous guest memory block, not a r3t
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001645 *
1646 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1647 * shadow table structure is incomplete, -ENOMEM if out of memory and
1648 * -EFAULT if an address in the parent gmap could not be resolved.
1649 *
1650 * Called with sg->mm->mmap_sem in read.
1651 */
David Hildenbrand3218f702016-04-18 16:22:24 +02001652int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
1653 int fake)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001654{
1655 unsigned long raddr, origin, offset, len;
1656 unsigned long *s_r3t, *table;
1657 struct page *page;
1658 int rc;
1659
1660 BUG_ON(!gmap_is_shadow(sg));
1661 /* Allocate a shadow region second table */
Heiko Carstensf1c11742017-07-05 07:37:27 +02001662 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001663 if (!page)
1664 return -ENOMEM;
1665 page->index = r3t & _REGION_ENTRY_ORIGIN;
David Hildenbrand3218f702016-04-18 16:22:24 +02001666 if (fake)
1667 page->index |= GMAP_SHADOW_FAKE_TABLE;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001668 s_r3t = (unsigned long *) page_to_phys(page);
1669 /* Install shadow region second table */
1670 spin_lock(&sg->guest_table_lock);
1671 table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
1672 if (!table) {
1673 rc = -EAGAIN; /* Race with unshadow */
1674 goto out_free;
1675 }
1676 if (!(*table & _REGION_ENTRY_INVALID)) {
1677 rc = 0; /* Already established */
1678 goto out_free;
David Hildenbrand998f6372016-03-08 12:23:38 +01001679 } else if (*table & _REGION_ENTRY_ORIGIN) {
1680 rc = -EAGAIN; /* Race with shadow */
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001681 }
1682 crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
David Hildenbrand998f6372016-03-08 12:23:38 +01001683 /* mark as invalid as long as the parent table is not protected */
1684 *table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH |
1685 _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001686 if (sg->edat_level >= 1)
1687 *table |= (r3t & _REGION_ENTRY_PROTECT);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001688 list_add(&page->lru, &sg->crst_list);
David Hildenbrand3218f702016-04-18 16:22:24 +02001689 if (fake) {
1690 /* nothing to protect for fake tables */
1691 *table &= ~_REGION_ENTRY_INVALID;
1692 spin_unlock(&sg->guest_table_lock);
1693 return 0;
1694 }
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001695 spin_unlock(&sg->guest_table_lock);
1696 /* Make r3t read-only in parent gmap page table */
Heiko Carstensf1c11742017-07-05 07:37:27 +02001697 raddr = (saddr & _REGION2_MASK) | _SHADOW_RMAP_REGION2;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001698 origin = r3t & _REGION_ENTRY_ORIGIN;
Heiko Carstensf1c11742017-07-05 07:37:27 +02001699 offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1700 len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001701 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
David Hildenbrand998f6372016-03-08 12:23:38 +01001702 spin_lock(&sg->guest_table_lock);
1703 if (!rc) {
1704 table = gmap_table_walk(sg, saddr, 3);
1705 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1706 (unsigned long) s_r3t)
1707 rc = -EAGAIN; /* Race with unshadow */
1708 else
1709 *table &= ~_REGION_ENTRY_INVALID;
1710 } else {
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001711 gmap_unshadow_r3t(sg, raddr);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001712 }
David Hildenbrand998f6372016-03-08 12:23:38 +01001713 spin_unlock(&sg->guest_table_lock);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001714 return rc;
1715out_free:
1716 spin_unlock(&sg->guest_table_lock);
Heiko Carstensf1c11742017-07-05 07:37:27 +02001717 __free_pages(page, CRST_ALLOC_ORDER);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001718 return rc;
1719}
1720EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
1721
1722/**
1723 * gmap_shadow_sgt - create a shadow segment table
1724 * @sg: pointer to the shadow guest address space structure
1725 * @saddr: faulting address in the shadow gmap
1726 * @sgt: parent gmap address of the segment table to get shadowed
David Hildenbrand18b898092016-04-18 13:42:05 +02001727 * @fake: sgt references contiguous guest memory block, not a sgt
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001728 *
1729 * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
1730 * shadow table structure is incomplete, -ENOMEM if out of memory and
1731 * -EFAULT if an address in the parent gmap could not be resolved.
1732 *
1733 * Called with sg->mm->mmap_sem in read.
1734 */
David Hildenbrand18b898092016-04-18 13:42:05 +02001735int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
1736 int fake)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001737{
1738 unsigned long raddr, origin, offset, len;
1739 unsigned long *s_sgt, *table;
1740 struct page *page;
1741 int rc;
1742
David Hildenbrand18b898092016-04-18 13:42:05 +02001743 BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001744 /* Allocate a shadow segment table */
Heiko Carstensf1c11742017-07-05 07:37:27 +02001745 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001746 if (!page)
1747 return -ENOMEM;
1748 page->index = sgt & _REGION_ENTRY_ORIGIN;
David Hildenbrand18b898092016-04-18 13:42:05 +02001749 if (fake)
1750 page->index |= GMAP_SHADOW_FAKE_TABLE;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001751 s_sgt = (unsigned long *) page_to_phys(page);
1752 /* Install shadow region second table */
1753 spin_lock(&sg->guest_table_lock);
1754 table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
1755 if (!table) {
1756 rc = -EAGAIN; /* Race with unshadow */
1757 goto out_free;
1758 }
1759 if (!(*table & _REGION_ENTRY_INVALID)) {
1760 rc = 0; /* Already established */
1761 goto out_free;
David Hildenbrand998f6372016-03-08 12:23:38 +01001762 } else if (*table & _REGION_ENTRY_ORIGIN) {
1763 rc = -EAGAIN; /* Race with shadow */
1764 goto out_free;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001765 }
1766 crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY);
David Hildenbrand998f6372016-03-08 12:23:38 +01001767 /* mark as invalid as long as the parent table is not protected */
1768 *table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH |
1769 _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001770 if (sg->edat_level >= 1)
1771 *table |= sgt & _REGION_ENTRY_PROTECT;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001772 list_add(&page->lru, &sg->crst_list);
David Hildenbrand18b898092016-04-18 13:42:05 +02001773 if (fake) {
1774 /* nothing to protect for fake tables */
1775 *table &= ~_REGION_ENTRY_INVALID;
1776 spin_unlock(&sg->guest_table_lock);
1777 return 0;
1778 }
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001779 spin_unlock(&sg->guest_table_lock);
1780 /* Make sgt read-only in parent gmap page table */
Heiko Carstensf1c11742017-07-05 07:37:27 +02001781 raddr = (saddr & _REGION3_MASK) | _SHADOW_RMAP_REGION3;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001782 origin = sgt & _REGION_ENTRY_ORIGIN;
Heiko Carstensf1c11742017-07-05 07:37:27 +02001783 offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1784 len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001785 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
David Hildenbrand998f6372016-03-08 12:23:38 +01001786 spin_lock(&sg->guest_table_lock);
1787 if (!rc) {
1788 table = gmap_table_walk(sg, saddr, 2);
1789 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1790 (unsigned long) s_sgt)
1791 rc = -EAGAIN; /* Race with unshadow */
1792 else
1793 *table &= ~_REGION_ENTRY_INVALID;
1794 } else {
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001795 gmap_unshadow_sgt(sg, raddr);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001796 }
David Hildenbrand998f6372016-03-08 12:23:38 +01001797 spin_unlock(&sg->guest_table_lock);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001798 return rc;
1799out_free:
1800 spin_unlock(&sg->guest_table_lock);
Heiko Carstensf1c11742017-07-05 07:37:27 +02001801 __free_pages(page, CRST_ALLOC_ORDER);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001802 return rc;
1803}
1804EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
1805
1806/**
1807 * gmap_shadow_lookup_pgtable - find a shadow page table
1808 * @sg: pointer to the shadow guest address space structure
1809 * @saddr: the address in the shadow aguest address space
1810 * @pgt: parent gmap address of the page table to get shadowed
1811 * @dat_protection: if the pgtable is marked as protected by dat
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001812 * @fake: pgt references contiguous guest memory block, not a pgtable
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001813 *
1814 * Returns 0 if the shadow page table was found and -EAGAIN if the page
1815 * table was not found.
1816 *
1817 * Called with sg->mm->mmap_sem in read.
1818 */
1819int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001820 unsigned long *pgt, int *dat_protection,
1821 int *fake)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001822{
1823 unsigned long *table;
1824 struct page *page;
1825 int rc;
1826
1827 BUG_ON(!gmap_is_shadow(sg));
1828 spin_lock(&sg->guest_table_lock);
1829 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1830 if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
1831 /* Shadow page tables are full pages (pte+pgste) */
1832 page = pfn_to_page(*table >> PAGE_SHIFT);
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001833 *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001834 *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001835 *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001836 rc = 0;
1837 } else {
1838 rc = -EAGAIN;
1839 }
1840 spin_unlock(&sg->guest_table_lock);
1841 return rc;
1842
1843}
1844EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
1845
1846/**
1847 * gmap_shadow_pgt - instantiate a shadow page table
1848 * @sg: pointer to the shadow guest address space structure
1849 * @saddr: faulting address in the shadow gmap
1850 * @pgt: parent gmap address of the page table to get shadowed
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001851 * @fake: pgt references contiguous guest memory block, not a pgtable
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001852 *
1853 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1854 * shadow table structure is incomplete, -ENOMEM if out of memory,
1855 * -EFAULT if an address in the parent gmap could not be resolved and
1856 *
1857 * Called with gmap->mm->mmap_sem in read
1858 */
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001859int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
1860 int fake)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001861{
1862 unsigned long raddr, origin;
1863 unsigned long *s_pgt, *table;
1864 struct page *page;
1865 int rc;
1866
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001867 BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE));
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001868 /* Allocate a shadow page table */
1869 page = page_table_alloc_pgste(sg->mm);
1870 if (!page)
1871 return -ENOMEM;
1872 page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001873 if (fake)
1874 page->index |= GMAP_SHADOW_FAKE_TABLE;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001875 s_pgt = (unsigned long *) page_to_phys(page);
1876 /* Install shadow page table */
1877 spin_lock(&sg->guest_table_lock);
1878 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1879 if (!table) {
1880 rc = -EAGAIN; /* Race with unshadow */
1881 goto out_free;
1882 }
1883 if (!(*table & _SEGMENT_ENTRY_INVALID)) {
1884 rc = 0; /* Already established */
1885 goto out_free;
David Hildenbrand998f6372016-03-08 12:23:38 +01001886 } else if (*table & _SEGMENT_ENTRY_ORIGIN) {
1887 rc = -EAGAIN; /* Race with shadow */
1888 goto out_free;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001889 }
David Hildenbrand998f6372016-03-08 12:23:38 +01001890 /* mark as invalid as long as the parent table is not protected */
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001891 *table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
David Hildenbrand998f6372016-03-08 12:23:38 +01001892 (pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001893 list_add(&page->lru, &sg->pt_list);
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001894 if (fake) {
1895 /* nothing to protect for fake tables */
1896 *table &= ~_SEGMENT_ENTRY_INVALID;
1897 spin_unlock(&sg->guest_table_lock);
1898 return 0;
1899 }
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001900 spin_unlock(&sg->guest_table_lock);
1901 /* Make pgt read-only in parent gmap page table (not the pgste) */
Heiko Carstensf1c11742017-07-05 07:37:27 +02001902 raddr = (saddr & _SEGMENT_MASK) | _SHADOW_RMAP_SEGMENT;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001903 origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
1904 rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE, PROT_READ);
David Hildenbrand998f6372016-03-08 12:23:38 +01001905 spin_lock(&sg->guest_table_lock);
1906 if (!rc) {
1907 table = gmap_table_walk(sg, saddr, 1);
1908 if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) !=
1909 (unsigned long) s_pgt)
1910 rc = -EAGAIN; /* Race with unshadow */
1911 else
1912 *table &= ~_SEGMENT_ENTRY_INVALID;
1913 } else {
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001914 gmap_unshadow_pgt(sg, raddr);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001915 }
David Hildenbrand998f6372016-03-08 12:23:38 +01001916 spin_unlock(&sg->guest_table_lock);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001917 return rc;
1918out_free:
1919 spin_unlock(&sg->guest_table_lock);
1920 page_table_free_pgste(page);
1921 return rc;
1922
1923}
1924EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
1925
1926/**
1927 * gmap_shadow_page - create a shadow page mapping
1928 * @sg: pointer to the shadow guest address space structure
1929 * @saddr: faulting address in the shadow gmap
David Hildenbranda9d23e72016-03-08 12:21:41 +01001930 * @pte: pte in parent gmap address space to get shadowed
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001931 *
1932 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1933 * shadow table structure is incomplete, -ENOMEM if out of memory and
1934 * -EFAULT if an address in the parent gmap could not be resolved.
1935 *
1936 * Called with sg->mm->mmap_sem in read.
1937 */
David Hildenbranda9d23e72016-03-08 12:21:41 +01001938int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001939{
1940 struct gmap *parent;
1941 struct gmap_rmap *rmap;
David Hildenbranda9d23e72016-03-08 12:21:41 +01001942 unsigned long vmaddr, paddr;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001943 spinlock_t *ptl;
1944 pte_t *sptep, *tptep;
David Hildenbrand01f71912016-06-13 10:49:04 +02001945 int prot;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001946 int rc;
1947
1948 BUG_ON(!gmap_is_shadow(sg));
1949 parent = sg->parent;
David Hildenbrand01f71912016-06-13 10:49:04 +02001950 prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001951
1952 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
1953 if (!rmap)
1954 return -ENOMEM;
1955 rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
1956
1957 while (1) {
David Hildenbranda9d23e72016-03-08 12:21:41 +01001958 paddr = pte_val(pte) & PAGE_MASK;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001959 vmaddr = __gmap_translate(parent, paddr);
1960 if (IS_ERR_VALUE(vmaddr)) {
1961 rc = vmaddr;
1962 break;
1963 }
1964 rc = radix_tree_preload(GFP_KERNEL);
1965 if (rc)
1966 break;
1967 rc = -EAGAIN;
1968 sptep = gmap_pte_op_walk(parent, paddr, &ptl);
1969 if (sptep) {
1970 spin_lock(&sg->guest_table_lock);
1971 /* Get page table pointer */
1972 tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
1973 if (!tptep) {
1974 spin_unlock(&sg->guest_table_lock);
1975 gmap_pte_op_end(ptl);
1976 radix_tree_preload_end();
1977 break;
1978 }
David Hildenbranda9d23e72016-03-08 12:21:41 +01001979 rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001980 if (rc > 0) {
1981 /* Success and a new mapping */
1982 gmap_insert_rmap(sg, vmaddr, rmap);
1983 rmap = NULL;
1984 rc = 0;
1985 }
1986 gmap_pte_op_end(ptl);
1987 spin_unlock(&sg->guest_table_lock);
1988 }
1989 radix_tree_preload_end();
1990 if (!rc)
1991 break;
David Hildenbrand01f71912016-06-13 10:49:04 +02001992 rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001993 if (rc)
1994 break;
1995 }
1996 kfree(rmap);
1997 return rc;
1998}
1999EXPORT_SYMBOL_GPL(gmap_shadow_page);
2000
2001/**
2002 * gmap_shadow_notify - handle notifications for shadow gmap
2003 *
2004 * Called with sg->parent->shadow_lock.
2005 */
2006static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
Janosch Frank2fa5ed72017-02-08 08:59:56 +01002007 unsigned long gaddr, pte_t *pte)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002008{
2009 struct gmap_rmap *rmap, *rnext, *head;
Janosch Frank2fa5ed72017-02-08 08:59:56 +01002010 unsigned long start, end, bits, raddr;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002011
2012 BUG_ON(!gmap_is_shadow(sg));
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002013
2014 spin_lock(&sg->guest_table_lock);
2015 if (sg->removed) {
2016 spin_unlock(&sg->guest_table_lock);
2017 return;
2018 }
2019 /* Check for top level table */
2020 start = sg->orig_asce & _ASCE_ORIGIN;
Heiko Carstensf1c11742017-07-05 07:37:27 +02002021 end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE;
David Hildenbrand3218f702016-04-18 16:22:24 +02002022 if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
2023 gaddr < end) {
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002024 /* The complete shadow table has to go */
2025 gmap_unshadow(sg);
2026 spin_unlock(&sg->guest_table_lock);
2027 list_del(&sg->list);
2028 gmap_put(sg);
2029 return;
2030 }
2031 /* Remove the page table tree from on specific entry */
Heiko Carstensf1c11742017-07-05 07:37:27 +02002032 head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002033 gmap_for_each_rmap_safe(rmap, rnext, head) {
2034 bits = rmap->raddr & _SHADOW_RMAP_MASK;
2035 raddr = rmap->raddr ^ bits;
2036 switch (bits) {
2037 case _SHADOW_RMAP_REGION1:
2038 gmap_unshadow_r2t(sg, raddr);
2039 break;
2040 case _SHADOW_RMAP_REGION2:
2041 gmap_unshadow_r3t(sg, raddr);
2042 break;
2043 case _SHADOW_RMAP_REGION3:
2044 gmap_unshadow_sgt(sg, raddr);
2045 break;
2046 case _SHADOW_RMAP_SEGMENT:
2047 gmap_unshadow_pgt(sg, raddr);
2048 break;
2049 case _SHADOW_RMAP_PGTABLE:
2050 gmap_unshadow_page(sg, raddr);
2051 break;
2052 }
2053 kfree(rmap);
2054 }
2055 spin_unlock(&sg->guest_table_lock);
2056}
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002057
2058/**
2059 * ptep_notify - call all invalidation callbacks for a specific pte.
2060 * @mm: pointer to the process mm_struct
2061 * @addr: virtual address in the process address space
2062 * @pte: pointer to the page table entry
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002063 * @bits: bits from the pgste that caused the notify call
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002064 *
2065 * This function is assumed to be called with the page table lock held
2066 * for the pte to notify.
2067 */
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002068void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
2069 pte_t *pte, unsigned long bits)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002070{
Janosch Frank2fa5ed72017-02-08 08:59:56 +01002071 unsigned long offset, gaddr = 0;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002072 unsigned long *table;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002073 struct gmap *gmap, *sg, *next;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002074
2075 offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
Heiko Carstensf1c11742017-07-05 07:37:27 +02002076 offset = offset * (PAGE_SIZE / sizeof(pte_t));
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +01002077 rcu_read_lock();
2078 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2079 spin_lock(&gmap->guest_table_lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002080 table = radix_tree_lookup(&gmap->host_to_guest,
2081 vmaddr >> PMD_SHIFT);
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +01002082 if (table)
2083 gaddr = __gmap_segment_gaddr(table) + offset;
2084 spin_unlock(&gmap->guest_table_lock);
Janosch Frank2fa5ed72017-02-08 08:59:56 +01002085 if (!table)
2086 continue;
2087
2088 if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
2089 spin_lock(&gmap->shadow_lock);
2090 list_for_each_entry_safe(sg, next,
2091 &gmap->children, list)
2092 gmap_shadow_notify(sg, vmaddr, gaddr, pte);
2093 spin_unlock(&gmap->shadow_lock);
2094 }
2095 if (bits & PGSTE_IN_BIT)
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +01002096 gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002097 }
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +01002098 rcu_read_unlock();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002099}
2100EXPORT_SYMBOL_GPL(ptep_notify);
2101
2102static inline void thp_split_mm(struct mm_struct *mm)
2103{
2104#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2105 struct vm_area_struct *vma;
2106 unsigned long addr;
2107
2108 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
2109 for (addr = vma->vm_start;
2110 addr < vma->vm_end;
2111 addr += PAGE_SIZE)
2112 follow_page(vma, addr, FOLL_SPLIT);
2113 vma->vm_flags &= ~VM_HUGEPAGE;
2114 vma->vm_flags |= VM_NOHUGEPAGE;
2115 }
2116 mm->def_flags |= VM_NOHUGEPAGE;
2117#endif
2118}
2119
2120/*
Christian Borntraegerfa41ba02017-08-24 12:55:08 +02002121 * Remove all empty zero pages from the mapping for lazy refaulting
2122 * - This must be called after mm->context.has_pgste is set, to avoid
2123 * future creation of zero pages
2124 * - This must be called after THP was enabled
2125 */
2126static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
2127 unsigned long end, struct mm_walk *walk)
2128{
2129 unsigned long addr;
2130
2131 for (addr = start; addr != end; addr += PAGE_SIZE) {
2132 pte_t *ptep;
2133 spinlock_t *ptl;
2134
2135 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
2136 if (is_zero_pfn(pte_pfn(*ptep)))
2137 ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
2138 pte_unmap_unlock(ptep, ptl);
2139 }
2140 return 0;
2141}
2142
2143static inline void zap_zero_pages(struct mm_struct *mm)
2144{
2145 struct mm_walk walk = { .pmd_entry = __zap_zero_pages };
2146
2147 walk.mm = mm;
2148 walk_page_range(0, TASK_SIZE, &walk);
2149}
2150
2151/*
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002152 * switch on pgstes for its userspace process (for kvm)
2153 */
2154int s390_enable_sie(void)
2155{
2156 struct mm_struct *mm = current->mm;
2157
2158 /* Do we have pgstes? if yes, we are done */
2159 if (mm_has_pgste(mm))
2160 return 0;
2161 /* Fail if the page tables are 2K */
2162 if (!mm_alloc_pgste(mm))
2163 return -EINVAL;
2164 down_write(&mm->mmap_sem);
2165 mm->context.has_pgste = 1;
2166 /* split thp mappings and disable thp for future mappings */
2167 thp_split_mm(mm);
Christian Borntraegerfa41ba02017-08-24 12:55:08 +02002168 zap_zero_pages(mm);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002169 up_write(&mm->mmap_sem);
2170 return 0;
2171}
2172EXPORT_SYMBOL_GPL(s390_enable_sie);
2173
2174/*
2175 * Enable storage key handling from now on and initialize the storage
2176 * keys with the default key.
2177 */
2178static int __s390_enable_skey(pte_t *pte, unsigned long addr,
2179 unsigned long next, struct mm_walk *walk)
2180{
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002181 /* Clear storage key */
2182 ptep_zap_key(walk->mm, addr, pte);
2183 return 0;
2184}
2185
2186int s390_enable_skey(void)
2187{
2188 struct mm_walk walk = { .pte_entry = __s390_enable_skey };
2189 struct mm_struct *mm = current->mm;
2190 struct vm_area_struct *vma;
2191 int rc = 0;
2192
2193 down_write(&mm->mmap_sem);
2194 if (mm_use_skey(mm))
2195 goto out_up;
2196
2197 mm->context.use_skey = 1;
2198 for (vma = mm->mmap; vma; vma = vma->vm_next) {
2199 if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
2200 MADV_UNMERGEABLE, &vma->vm_flags)) {
2201 mm->context.use_skey = 0;
2202 rc = -ENOMEM;
2203 goto out_up;
2204 }
2205 }
2206 mm->def_flags &= ~VM_MERGEABLE;
2207
2208 walk.mm = mm;
2209 walk_page_range(0, TASK_SIZE, &walk);
2210
2211out_up:
2212 up_write(&mm->mmap_sem);
2213 return rc;
2214}
2215EXPORT_SYMBOL_GPL(s390_enable_skey);
2216
2217/*
2218 * Reset CMMA state, make all pages stable again.
2219 */
2220static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
2221 unsigned long next, struct mm_walk *walk)
2222{
2223 ptep_zap_unused(walk->mm, addr, pte, 1);
2224 return 0;
2225}
2226
2227void s390_reset_cmma(struct mm_struct *mm)
2228{
2229 struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
2230
2231 down_write(&mm->mmap_sem);
2232 walk.mm = mm;
2233 walk_page_range(0, TASK_SIZE, &walk);
2234 up_write(&mm->mmap_sem);
2235}
2236EXPORT_SYMBOL_GPL(s390_reset_cmma);