blob: 5109827883acaefc6afc0ac913956fcb79f8f8a0 [file] [log] [blame]
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * Copyright IBM Corp. 2007, 2011
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02003 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4 */
5
6#include <linux/sched.h>
7#include <linux/kernel.h>
8#include <linux/errno.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/gfp.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020010#include <linux/mm.h>
11#include <linux/swap.h>
12#include <linux/smp.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020013#include <linux/spinlock.h>
Martin Schwidefsky80217142010-10-25 16:10:11 +020014#include <linux/rcupdate.h>
Martin Schwidefskye5992f22011-07-24 10:48:20 +020015#include <linux/slab.h>
Konstantin Weitzb31288f2013-04-17 17:36:29 +020016#include <linux/swapops.h>
Martin Schwidefsky0b46e0a2015-04-15 13:23:26 +020017#include <linux/sysctl.h>
Dominik Dingel3ac8e382014-10-23 12:09:17 +020018#include <linux/ksm.h>
19#include <linux/mman.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020020
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020021#include <asm/pgtable.h>
22#include <asm/pgalloc.h>
23#include <asm/tlb.h>
24#include <asm/tlbflush.h>
Martin Schwidefsky6252d702008-02-09 18:24:37 +010025#include <asm/mmu_context.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020026
Martin Schwidefsky043d0702011-05-23 10:24:23 +020027unsigned long *crst_table_alloc(struct mm_struct *mm)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020028{
Martin Schwidefsky78fb9072015-08-14 14:58:50 +020029 struct page *page = alloc_pages(GFP_KERNEL, 2);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020030
31 if (!page)
32 return NULL;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020033 return (unsigned long *) page_to_phys(page);
34}
35
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010036void crst_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020037{
Martin Schwidefsky78fb9072015-08-14 14:58:50 +020038 free_pages((unsigned long) table, 2);
Martin Schwidefsky80217142010-10-25 16:10:11 +020039}
40
Martin Schwidefsky10607862013-10-28 14:48:30 +010041static void __crst_table_upgrade(void *arg)
42{
43 struct mm_struct *mm = arg;
44
Martin Schwidefskybeef5602014-04-14 15:11:26 +020045 if (current->active_mm == mm) {
46 clear_user_asce();
47 set_user_asce(mm);
48 }
Martin Schwidefsky10607862013-10-28 14:48:30 +010049 __tlb_flush_local();
50}
51
Martin Schwidefsky6252d702008-02-09 18:24:37 +010052int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
53{
54 unsigned long *table, *pgd;
55 unsigned long entry;
Martin Schwidefsky10607862013-10-28 14:48:30 +010056 int flush;
Martin Schwidefsky6252d702008-02-09 18:24:37 +010057
Dominik Dingela9d7ab92016-01-11 11:47:12 +010058 BUG_ON(limit > TASK_MAX_SIZE);
Martin Schwidefsky10607862013-10-28 14:48:30 +010059 flush = 0;
Martin Schwidefsky6252d702008-02-09 18:24:37 +010060repeat:
Martin Schwidefsky043d0702011-05-23 10:24:23 +020061 table = crst_table_alloc(mm);
Martin Schwidefsky6252d702008-02-09 18:24:37 +010062 if (!table)
63 return -ENOMEM;
Martin Schwidefsky80217142010-10-25 16:10:11 +020064 spin_lock_bh(&mm->page_table_lock);
Martin Schwidefsky6252d702008-02-09 18:24:37 +010065 if (mm->context.asce_limit < limit) {
66 pgd = (unsigned long *) mm->pgd;
67 if (mm->context.asce_limit <= (1UL << 31)) {
68 entry = _REGION3_ENTRY_EMPTY;
69 mm->context.asce_limit = 1UL << 42;
70 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
71 _ASCE_USER_BITS |
72 _ASCE_TYPE_REGION3;
73 } else {
74 entry = _REGION2_ENTRY_EMPTY;
75 mm->context.asce_limit = 1UL << 53;
76 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
77 _ASCE_USER_BITS |
78 _ASCE_TYPE_REGION2;
79 }
80 crst_table_init(table, entry);
81 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
82 mm->pgd = (pgd_t *) table;
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +010083 mm->task_size = mm->context.asce_limit;
Martin Schwidefsky6252d702008-02-09 18:24:37 +010084 table = NULL;
Martin Schwidefsky10607862013-10-28 14:48:30 +010085 flush = 1;
Martin Schwidefsky6252d702008-02-09 18:24:37 +010086 }
Martin Schwidefsky80217142010-10-25 16:10:11 +020087 spin_unlock_bh(&mm->page_table_lock);
Martin Schwidefsky6252d702008-02-09 18:24:37 +010088 if (table)
89 crst_table_free(mm, table);
90 if (mm->context.asce_limit < limit)
91 goto repeat;
Martin Schwidefsky10607862013-10-28 14:48:30 +010092 if (flush)
93 on_each_cpu(__crst_table_upgrade, mm, 0);
Martin Schwidefsky6252d702008-02-09 18:24:37 +010094 return 0;
95}
96
97void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
98{
99 pgd_t *pgd;
100
Martin Schwidefsky02a8f3a2014-04-03 13:54:59 +0200101 if (current->active_mm == mm) {
Martin Schwidefskybeef5602014-04-14 15:11:26 +0200102 clear_user_asce();
Martin Schwidefsky10607862013-10-28 14:48:30 +0100103 __tlb_flush_mm(mm);
Martin Schwidefsky02a8f3a2014-04-03 13:54:59 +0200104 }
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100105 while (mm->context.asce_limit > limit) {
106 pgd = mm->pgd;
107 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
108 case _REGION_ENTRY_TYPE_R2:
109 mm->context.asce_limit = 1UL << 42;
110 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
111 _ASCE_USER_BITS |
112 _ASCE_TYPE_REGION3;
113 break;
114 case _REGION_ENTRY_TYPE_R3:
115 mm->context.asce_limit = 1UL << 31;
116 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
117 _ASCE_USER_BITS |
118 _ASCE_TYPE_SEGMENT;
119 break;
120 default:
121 BUG();
122 }
123 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +0100124 mm->task_size = mm->context.asce_limit;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100125 crst_table_free(mm, (unsigned long *) pgd);
126 }
Martin Schwidefsky10607862013-10-28 14:48:30 +0100127 if (current->active_mm == mm)
Martin Schwidefskybeef5602014-04-14 15:11:26 +0200128 set_user_asce(mm);
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100129}
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100130
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200131#ifdef CONFIG_PGSTE
132
133/**
134 * gmap_alloc - allocate a guest address space
135 * @mm: pointer to the parent mm_struct
Dominik Dingela3a92c32014-12-01 17:24:42 +0100136 * @limit: maximum address of the gmap address space
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200137 *
138 * Returns a guest address space structure.
139 */
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200140struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit)
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200141{
142 struct gmap *gmap;
143 struct page *page;
144 unsigned long *table;
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200145 unsigned long etype, atype;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200146
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200147 if (limit < (1UL << 31)) {
148 limit = (1UL << 31) - 1;
149 atype = _ASCE_TYPE_SEGMENT;
150 etype = _SEGMENT_ENTRY_EMPTY;
151 } else if (limit < (1UL << 42)) {
152 limit = (1UL << 42) - 1;
153 atype = _ASCE_TYPE_REGION3;
154 etype = _REGION3_ENTRY_EMPTY;
155 } else if (limit < (1UL << 53)) {
156 limit = (1UL << 53) - 1;
157 atype = _ASCE_TYPE_REGION2;
158 etype = _REGION2_ENTRY_EMPTY;
159 } else {
160 limit = -1UL;
161 atype = _ASCE_TYPE_REGION1;
162 etype = _REGION1_ENTRY_EMPTY;
163 }
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200164 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
165 if (!gmap)
166 goto out;
167 INIT_LIST_HEAD(&gmap->crst_list);
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200168 INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
169 INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
170 spin_lock_init(&gmap->guest_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200171 gmap->mm = mm;
Martin Schwidefsky78fb9072015-08-14 14:58:50 +0200172 page = alloc_pages(GFP_KERNEL, 2);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200173 if (!page)
174 goto out_free;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200175 page->index = 0;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200176 list_add(&page->lru, &gmap->crst_list);
177 table = (unsigned long *) page_to_phys(page);
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200178 crst_table_init(table, etype);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200179 gmap->table = table;
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200180 gmap->asce = atype | _ASCE_TABLE_LENGTH |
181 _ASCE_USER_BITS | __pa(table);
182 gmap->asce_end = limit;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200183 down_write(&mm->mmap_sem);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200184 list_add(&gmap->list, &mm->context.gmap_list);
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200185 up_write(&mm->mmap_sem);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200186 return gmap;
187
188out_free:
189 kfree(gmap);
190out:
191 return NULL;
192}
193EXPORT_SYMBOL_GPL(gmap_alloc);
194
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200195static void gmap_flush_tlb(struct gmap *gmap)
196{
197 if (MACHINE_HAS_IDTE)
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200198 __tlb_flush_asce(gmap->mm, gmap->asce);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200199 else
200 __tlb_flush_global();
201}
202
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200203static void gmap_radix_tree_free(struct radix_tree_root *root)
204{
205 struct radix_tree_iter iter;
206 unsigned long indices[16];
207 unsigned long index;
208 void **slot;
209 int i, nr;
210
211 /* A radix tree is freed by deleting all of its entries */
212 index = 0;
213 do {
214 nr = 0;
215 radix_tree_for_each_slot(slot, root, &iter, index) {
216 indices[nr] = iter.index;
217 if (++nr == 16)
218 break;
219 }
220 for (i = 0; i < nr; i++) {
221 index = indices[i];
222 radix_tree_delete(root, index);
223 }
224 } while (nr > 0);
225}
226
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200227/**
228 * gmap_free - free a guest address space
229 * @gmap: pointer to the guest address space structure
230 */
231void gmap_free(struct gmap *gmap)
232{
233 struct page *page, *next;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200234
235 /* Flush tlb. */
236 if (MACHINE_HAS_IDTE)
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200237 __tlb_flush_asce(gmap->mm, gmap->asce);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200238 else
239 __tlb_flush_global();
240
241 /* Free all segment & region tables. */
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200242 list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
Martin Schwidefsky78fb9072015-08-14 14:58:50 +0200243 __free_pages(page, 2);
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200244 gmap_radix_tree_free(&gmap->guest_to_host);
245 gmap_radix_tree_free(&gmap->host_to_guest);
246 down_write(&gmap->mm->mmap_sem);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200247 list_del(&gmap->list);
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200248 up_write(&gmap->mm->mmap_sem);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200249 kfree(gmap);
250}
251EXPORT_SYMBOL_GPL(gmap_free);
252
253/**
254 * gmap_enable - switch primary space to the guest address space
255 * @gmap: pointer to the guest address space structure
256 */
257void gmap_enable(struct gmap *gmap)
258{
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200259 S390_lowcore.gmap = (unsigned long) gmap;
260}
261EXPORT_SYMBOL_GPL(gmap_enable);
262
263/**
264 * gmap_disable - switch back to the standard primary address space
265 * @gmap: pointer to the guest address space structure
266 */
267void gmap_disable(struct gmap *gmap)
268{
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200269 S390_lowcore.gmap = 0UL;
270}
271EXPORT_SYMBOL_GPL(gmap_disable);
272
Carsten Ottea9162f22011-10-30 15:17:00 +0100273/*
274 * gmap_alloc_table is assumed to be called with mmap_sem held
275 */
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200276static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
277 unsigned long init, unsigned long gaddr)
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200278{
279 struct page *page;
280 unsigned long *new;
281
Christian Borntraegerc86cce22011-12-27 11:25:47 +0100282 /* since we dont free the gmap table until gmap_free we can unlock */
Martin Schwidefsky78fb9072015-08-14 14:58:50 +0200283 page = alloc_pages(GFP_KERNEL, 2);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200284 if (!page)
285 return -ENOMEM;
286 new = (unsigned long *) page_to_phys(page);
287 crst_table_init(new, init);
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200288 spin_lock(&gmap->mm->page_table_lock);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200289 if (*table & _REGION_ENTRY_INVALID) {
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200290 list_add(&page->lru, &gmap->crst_list);
291 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
292 (*table & _REGION_ENTRY_TYPE_MASK);
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200293 page->index = gaddr;
294 page = NULL;
295 }
296 spin_unlock(&gmap->mm->page_table_lock);
297 if (page)
Martin Schwidefsky78fb9072015-08-14 14:58:50 +0200298 __free_pages(page, 2);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200299 return 0;
300}
301
302/**
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200303 * __gmap_segment_gaddr - find virtual address from segment pointer
304 * @entry: pointer to a segment table entry in the guest address space
305 *
306 * Returns the virtual address in the guest address space for the segment
307 */
308static unsigned long __gmap_segment_gaddr(unsigned long *entry)
309{
310 struct page *page;
Martin Schwidefskyfbc89c92015-01-07 11:00:02 +0100311 unsigned long offset, mask;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200312
313 offset = (unsigned long) entry / sizeof(unsigned long);
314 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
Martin Schwidefskyfbc89c92015-01-07 11:00:02 +0100315 mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
316 page = virt_to_page((void *)((unsigned long) entry & mask));
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200317 return page->index + offset;
318}
319
320/**
321 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
322 * @gmap: pointer to the guest address space structure
323 * @vmaddr: address in the host process address space
324 *
325 * Returns 1 if a TLB flush is required
326 */
327static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
328{
329 unsigned long *entry;
330 int flush = 0;
331
332 spin_lock(&gmap->guest_table_lock);
333 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
334 if (entry) {
335 flush = (*entry != _SEGMENT_ENTRY_INVALID);
336 *entry = _SEGMENT_ENTRY_INVALID;
337 }
338 spin_unlock(&gmap->guest_table_lock);
339 return flush;
340}
341
342/**
343 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
344 * @gmap: pointer to the guest address space structure
345 * @gaddr: address in the guest address space
346 *
347 * Returns 1 if a TLB flush is required
348 */
349static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
350{
351 unsigned long vmaddr;
352
353 vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
354 gaddr >> PMD_SHIFT);
355 return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
356}
357
358/**
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200359 * gmap_unmap_segment - unmap segment from the guest address space
360 * @gmap: pointer to the guest address space structure
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200361 * @to: address in the guest address space
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200362 * @len: length of the memory area to unmap
363 *
Hendrik Bruecknerb4a96012013-12-13 12:53:42 +0100364 * Returns 0 if the unmap succeeded, -EINVAL if not.
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200365 */
366int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
367{
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200368 unsigned long off;
369 int flush;
370
371 if ((to | len) & (PMD_SIZE - 1))
372 return -EINVAL;
373 if (len == 0 || to + len < to)
374 return -EINVAL;
375
376 flush = 0;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200377 down_write(&gmap->mm->mmap_sem);
378 for (off = 0; off < len; off += PMD_SIZE)
379 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
380 up_write(&gmap->mm->mmap_sem);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200381 if (flush)
382 gmap_flush_tlb(gmap);
383 return 0;
384}
385EXPORT_SYMBOL_GPL(gmap_unmap_segment);
386
387/**
388 * gmap_mmap_segment - map a segment to the guest address space
389 * @gmap: pointer to the guest address space structure
390 * @from: source address in the parent address space
391 * @to: target address in the guest address space
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200392 * @len: length of the memory area to map
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200393 *
Hendrik Bruecknerb4a96012013-12-13 12:53:42 +0100394 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200395 */
396int gmap_map_segment(struct gmap *gmap, unsigned long from,
397 unsigned long to, unsigned long len)
398{
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200399 unsigned long off;
400 int flush;
401
402 if ((from | to | len) & (PMD_SIZE - 1))
403 return -EINVAL;
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200404 if (len == 0 || from + len < from || to + len < to ||
Dominik Dingela3a92c32014-12-01 17:24:42 +0100405 from + len - 1 > TASK_MAX_SIZE || to + len - 1 > gmap->asce_end)
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200406 return -EINVAL;
407
408 flush = 0;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200409 down_write(&gmap->mm->mmap_sem);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200410 for (off = 0; off < len; off += PMD_SIZE) {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200411 /* Remove old translation */
412 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
413 /* Store new translation */
414 if (radix_tree_insert(&gmap->guest_to_host,
415 (to + off) >> PMD_SHIFT,
416 (void *) from + off))
417 break;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200418 }
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200419 up_write(&gmap->mm->mmap_sem);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200420 if (flush)
421 gmap_flush_tlb(gmap);
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200422 if (off >= len)
423 return 0;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200424 gmap_unmap_segment(gmap, to, len);
425 return -ENOMEM;
426}
427EXPORT_SYMBOL_GPL(gmap_map_segment);
428
Heiko Carstensc5034942012-09-10 16:14:33 +0200429/**
430 * __gmap_translate - translate a guest address to a user space address
Heiko Carstensc5034942012-09-10 16:14:33 +0200431 * @gmap: pointer to guest mapping meta data structure
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200432 * @gaddr: guest address
Heiko Carstensc5034942012-09-10 16:14:33 +0200433 *
434 * Returns user space address which corresponds to the guest address or
435 * -EFAULT if no such mapping exists.
436 * This function does not establish potentially missing page table entries.
437 * The mmap_sem of the mm that belongs to the address space must be held
438 * when this function gets called.
439 */
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200440unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
Heiko Carstensc5034942012-09-10 16:14:33 +0200441{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200442 unsigned long vmaddr;
Heiko Carstensc5034942012-09-10 16:14:33 +0200443
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200444 vmaddr = (unsigned long)
445 radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
446 return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
Heiko Carstensc5034942012-09-10 16:14:33 +0200447}
448EXPORT_SYMBOL_GPL(__gmap_translate);
449
450/**
451 * gmap_translate - translate a guest address to a user space address
Heiko Carstensc5034942012-09-10 16:14:33 +0200452 * @gmap: pointer to guest mapping meta data structure
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200453 * @gaddr: guest address
Heiko Carstensc5034942012-09-10 16:14:33 +0200454 *
455 * Returns user space address which corresponds to the guest address or
456 * -EFAULT if no such mapping exists.
457 * This function does not establish potentially missing page table entries.
458 */
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200459unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
Heiko Carstensc5034942012-09-10 16:14:33 +0200460{
461 unsigned long rc;
462
463 down_read(&gmap->mm->mmap_sem);
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200464 rc = __gmap_translate(gmap, gaddr);
Heiko Carstensc5034942012-09-10 16:14:33 +0200465 up_read(&gmap->mm->mmap_sem);
466 return rc;
467}
468EXPORT_SYMBOL_GPL(gmap_translate);
469
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200470/**
471 * gmap_unlink - disconnect a page table from the gmap shadow tables
472 * @gmap: pointer to guest mapping meta data structure
473 * @table: pointer to the host page table
474 * @vmaddr: vm address associated with the host page table
475 */
476static void gmap_unlink(struct mm_struct *mm, unsigned long *table,
477 unsigned long vmaddr)
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200478{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200479 struct gmap *gmap;
480 int flush;
481
482 list_for_each_entry(gmap, &mm->context.gmap_list, list) {
483 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
484 if (flush)
485 gmap_flush_tlb(gmap);
486 }
487}
488
489/**
490 * gmap_link - set up shadow page tables to connect a host to a guest address
491 * @gmap: pointer to guest mapping meta data structure
492 * @gaddr: guest address
493 * @vmaddr: vm address
494 *
495 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
496 * if the vm address is already mapped to a different guest segment.
497 * The mmap_sem of the mm that belongs to the address space must be held
498 * when this function gets called.
499 */
500int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
501{
Heiko Carstensc5034942012-09-10 16:14:33 +0200502 struct mm_struct *mm;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200503 unsigned long *table;
504 spinlock_t *ptl;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200505 pgd_t *pgd;
506 pud_t *pud;
507 pmd_t *pmd;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200508 int rc;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200509
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200510 /* Create higher level tables in the gmap page table */
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200511 table = gmap->table;
512 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
513 table += (gaddr >> 53) & 0x7ff;
514 if ((*table & _REGION_ENTRY_INVALID) &&
515 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
Heiko Carstens925dfc02014-12-12 13:04:21 +0100516 gaddr & 0xffe0000000000000UL))
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200517 return -ENOMEM;
518 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
519 }
520 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
521 table += (gaddr >> 42) & 0x7ff;
522 if ((*table & _REGION_ENTRY_INVALID) &&
523 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
Heiko Carstens925dfc02014-12-12 13:04:21 +0100524 gaddr & 0xfffffc0000000000UL))
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200525 return -ENOMEM;
526 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
527 }
528 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
529 table += (gaddr >> 31) & 0x7ff;
530 if ((*table & _REGION_ENTRY_INVALID) &&
531 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
Heiko Carstens925dfc02014-12-12 13:04:21 +0100532 gaddr & 0xffffffff80000000UL))
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200533 return -ENOMEM;
534 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
535 }
536 table += (gaddr >> 20) & 0x7ff;
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200537 /* Walk the parent mm page table */
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200538 mm = gmap->mm;
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200539 pgd = pgd_offset(mm, vmaddr);
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200540 VM_BUG_ON(pgd_none(*pgd));
541 pud = pud_offset(pgd, vmaddr);
542 VM_BUG_ON(pud_none(*pud));
543 pmd = pmd_offset(pud, vmaddr);
544 VM_BUG_ON(pmd_none(*pmd));
Alex Thorlton1e1836e2014-04-07 15:37:09 -0700545 /* large pmds cannot yet be handled */
546 if (pmd_large(*pmd))
547 return -EFAULT;
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200548 /* Link gmap segment table entry location to page table. */
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200549 rc = radix_tree_preload(GFP_KERNEL);
550 if (rc)
551 return rc;
552 ptl = pmd_lock(mm, pmd);
553 spin_lock(&gmap->guest_table_lock);
554 if (*table == _SEGMENT_ENTRY_INVALID) {
555 rc = radix_tree_insert(&gmap->host_to_guest,
556 vmaddr >> PMD_SHIFT, table);
557 if (!rc)
558 *table = pmd_val(*pmd);
559 } else
560 rc = 0;
561 spin_unlock(&gmap->guest_table_lock);
562 spin_unlock(ptl);
563 radix_tree_preload_end();
564 return rc;
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200565}
566
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200567/**
568 * gmap_fault - resolve a fault on a guest address
569 * @gmap: pointer to guest mapping meta data structure
570 * @gaddr: guest address
571 * @fault_flags: flags to pass down to handle_mm_fault()
572 *
573 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
574 * if the vm address is already mapped to a different guest segment.
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200575 */
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200576int gmap_fault(struct gmap *gmap, unsigned long gaddr,
577 unsigned int fault_flags)
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200578{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200579 unsigned long vmaddr;
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200580 int rc;
Dominik Dingelfef89532016-01-15 16:57:07 -0800581 bool unlocked;
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200582
Carsten Otte499069e2011-10-30 15:17:02 +0100583 down_read(&gmap->mm->mmap_sem);
Dominik Dingelfef89532016-01-15 16:57:07 -0800584
585retry:
586 unlocked = false;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200587 vmaddr = __gmap_translate(gmap, gaddr);
588 if (IS_ERR_VALUE(vmaddr)) {
589 rc = vmaddr;
590 goto out_up;
591 }
Dominik Dingelfef89532016-01-15 16:57:07 -0800592 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
593 &unlocked)) {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200594 rc = -EFAULT;
595 goto out_up;
596 }
Dominik Dingelfef89532016-01-15 16:57:07 -0800597 /*
598 * In the case that fixup_user_fault unlocked the mmap_sem during
599 * faultin redo __gmap_translate to not race with a map/unmap_segment.
600 */
601 if (unlocked)
602 goto retry;
603
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200604 rc = __gmap_link(gmap, gaddr, vmaddr);
605out_up:
Carsten Otte499069e2011-10-30 15:17:02 +0100606 up_read(&gmap->mm->mmap_sem);
Carsten Otte499069e2011-10-30 15:17:02 +0100607 return rc;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200608}
609EXPORT_SYMBOL_GPL(gmap_fault);
610
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200611static void gmap_zap_swap_entry(swp_entry_t entry, struct mm_struct *mm)
612{
613 if (!non_swap_entry(entry))
614 dec_mm_counter(mm, MM_SWAPENTS);
615 else if (is_migration_entry(entry)) {
616 struct page *page = migration_entry_to_page(entry);
617
Jerome Marchandeca56ff2016-01-14 15:19:26 -0800618 dec_mm_counter(mm, mm_counter(page));
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200619 }
620 free_swap_and_cache(entry);
621}
622
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200623/*
624 * this function is assumed to be called with mmap_sem held
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200625 */
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200626void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200627{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200628 unsigned long vmaddr, ptev, pgstev;
629 pte_t *ptep, pte;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200630 spinlock_t *ptl;
631 pgste_t pgste;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200632
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200633 /* Find the vm address for the guest address */
634 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
635 gaddr >> PMD_SHIFT);
636 if (!vmaddr)
637 return;
638 vmaddr |= gaddr & ~PMD_MASK;
639 /* Get pointer to the page table entry */
640 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200641 if (unlikely(!ptep))
642 return;
643 pte = *ptep;
644 if (!pte_swap(pte))
645 goto out_pte;
646 /* Zap unused and logically-zero pages */
647 pgste = pgste_get_lock(ptep);
648 pgstev = pgste_val(pgste);
649 ptev = pte_val(pte);
650 if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) ||
651 ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID))) {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200652 gmap_zap_swap_entry(pte_to_swp_entry(pte), gmap->mm);
653 pte_clear(gmap->mm, vmaddr, ptep);
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200654 }
655 pgste_set_unlock(ptep, pgste);
656out_pte:
Dominik Dingel66e9bbd2014-10-06 16:34:44 +0200657 pte_unmap_unlock(ptep, ptl);
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200658}
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200659EXPORT_SYMBOL_GPL(__gmap_zap);
660
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200661void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
Christian Borntraeger388186b2011-10-30 15:17:03 +0100662{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200663 unsigned long gaddr, vmaddr, size;
Christian Borntraeger388186b2011-10-30 15:17:03 +0100664 struct vm_area_struct *vma;
Christian Borntraeger388186b2011-10-30 15:17:03 +0100665
666 down_read(&gmap->mm->mmap_sem);
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200667 for (gaddr = from; gaddr < to;
668 gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
669 /* Find the vm address for the guest address */
670 vmaddr = (unsigned long)
671 radix_tree_lookup(&gmap->guest_to_host,
672 gaddr >> PMD_SHIFT);
673 if (!vmaddr)
Christian Borntraeger388186b2011-10-30 15:17:03 +0100674 continue;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200675 vmaddr |= gaddr & ~PMD_MASK;
676 /* Find vma in the parent mm */
677 vma = find_vma(gmap->mm, vmaddr);
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200678 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200679 zap_page_range(vma, vmaddr, size, NULL);
Christian Borntraeger388186b2011-10-30 15:17:03 +0100680 }
681 up_read(&gmap->mm->mmap_sem);
682}
683EXPORT_SYMBOL_GPL(gmap_discard);
684
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200685static LIST_HEAD(gmap_notifier_list);
686static DEFINE_SPINLOCK(gmap_notifier_lock);
687
688/**
689 * gmap_register_ipte_notifier - register a pte invalidation callback
690 * @nb: pointer to the gmap notifier block
691 */
692void gmap_register_ipte_notifier(struct gmap_notifier *nb)
693{
694 spin_lock(&gmap_notifier_lock);
695 list_add(&nb->list, &gmap_notifier_list);
696 spin_unlock(&gmap_notifier_lock);
697}
698EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier);
699
700/**
701 * gmap_unregister_ipte_notifier - remove a pte invalidation callback
702 * @nb: pointer to the gmap notifier block
703 */
704void gmap_unregister_ipte_notifier(struct gmap_notifier *nb)
705{
706 spin_lock(&gmap_notifier_lock);
707 list_del_init(&nb->list);
708 spin_unlock(&gmap_notifier_lock);
709}
710EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
711
712/**
713 * gmap_ipte_notify - mark a range of ptes for invalidation notification
714 * @gmap: pointer to guest mapping meta data structure
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200715 * @gaddr: virtual address in the guest address space
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200716 * @len: size of area
717 *
718 * Returns 0 if for each page in the given range a gmap mapping exists and
719 * the invalidation notification could be set. If the gmap mapping is missing
720 * for one or more pages -EFAULT is returned. If no memory could be allocated
721 * -ENOMEM is returned. This function establishes missing page table entries.
722 */
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200723int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200724{
725 unsigned long addr;
726 spinlock_t *ptl;
727 pte_t *ptep, entry;
728 pgste_t pgste;
Dominik Dingelfef89532016-01-15 16:57:07 -0800729 bool unlocked;
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200730 int rc = 0;
731
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200732 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK))
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200733 return -EINVAL;
734 down_read(&gmap->mm->mmap_sem);
735 while (len) {
Dominik Dingelfef89532016-01-15 16:57:07 -0800736 unlocked = false;
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200737 /* Convert gmap address and connect the page tables */
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200738 addr = __gmap_translate(gmap, gaddr);
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200739 if (IS_ERR_VALUE(addr)) {
740 rc = addr;
741 break;
742 }
743 /* Get the page mapped */
Dominik Dingel4a9e1cd2016-01-15 16:57:04 -0800744 if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE,
Dominik Dingelfef89532016-01-15 16:57:07 -0800745 &unlocked)) {
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200746 rc = -EFAULT;
747 break;
748 }
Dominik Dingelfef89532016-01-15 16:57:07 -0800749 /* While trying to map mmap_sem got unlocked. Let us retry */
750 if (unlocked)
751 continue;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200752 rc = __gmap_link(gmap, gaddr, addr);
753 if (rc)
754 break;
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200755 /* Walk the process page table, lock and get pte pointer */
756 ptep = get_locked_pte(gmap->mm, addr, &ptl);
Dominik Dingel6972cae2014-10-15 15:29:01 +0200757 VM_BUG_ON(!ptep);
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200758 /* Set notification bit in the pgste of the pte */
759 entry = *ptep;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200760 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200761 pgste = pgste_get_lock(ptep);
Martin Schwidefsky0d0dafc2013-05-17 14:41:33 +0200762 pgste_val(pgste) |= PGSTE_IN_BIT;
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200763 pgste_set_unlock(ptep, pgste);
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200764 gaddr += PAGE_SIZE;
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200765 len -= PAGE_SIZE;
766 }
Martin Schwidefskya697e052014-10-30 10:55:37 +0100767 pte_unmap_unlock(ptep, ptl);
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200768 }
769 up_read(&gmap->mm->mmap_sem);
770 return rc;
771}
772EXPORT_SYMBOL_GPL(gmap_ipte_notify);
773
774/**
775 * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte.
776 * @mm: pointer to the process mm_struct
Martin Schwidefsky9da4e382014-04-30 14:46:26 +0200777 * @addr: virtual address in the process address space
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200778 * @pte: pointer to the page table entry
779 *
780 * This function is assumed to be called with the page table lock held
781 * for the pte to notify.
782 */
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200783void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long vmaddr, pte_t *pte)
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200784{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200785 unsigned long offset, gaddr;
786 unsigned long *table;
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200787 struct gmap_notifier *nb;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200788 struct gmap *gmap;
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200789
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200790 offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
791 offset = offset * (4096 / sizeof(pte_t));
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200792 spin_lock(&gmap_notifier_lock);
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200793 list_for_each_entry(gmap, &mm->context.gmap_list, list) {
794 table = radix_tree_lookup(&gmap->host_to_guest,
795 vmaddr >> PMD_SHIFT);
796 if (!table)
797 continue;
798 gaddr = __gmap_segment_gaddr(table) + offset;
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200799 list_for_each_entry(nb, &gmap_notifier_list, list)
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200800 nb->notifier_call(gmap, gaddr);
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200801 }
802 spin_unlock(&gmap_notifier_lock);
803}
Martin Schwidefsky0a61b222013-10-18 12:03:41 +0200804EXPORT_SYMBOL_GPL(gmap_do_ipte_notify);
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200805
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200806int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
807 unsigned long key, bool nq)
808{
809 spinlock_t *ptl;
810 pgste_t old, new;
811 pte_t *ptep;
Dominik Dingelfef89532016-01-15 16:57:07 -0800812 bool unlocked;
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200813
814 down_read(&mm->mmap_sem);
Christian Borntraegerab3f2852014-08-19 16:19:35 +0200815retry:
Dominik Dingelfef89532016-01-15 16:57:07 -0800816 unlocked = false;
Jason J. Herneedeb69e2014-10-07 13:31:37 -0400817 ptep = get_locked_pte(mm, addr, &ptl);
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200818 if (unlikely(!ptep)) {
819 up_read(&mm->mmap_sem);
820 return -EFAULT;
821 }
Christian Borntraegerab3f2852014-08-19 16:19:35 +0200822 if (!(pte_val(*ptep) & _PAGE_INVALID) &&
823 (pte_val(*ptep) & _PAGE_PROTECT)) {
Dominik Dingel66e9bbd2014-10-06 16:34:44 +0200824 pte_unmap_unlock(ptep, ptl);
Dominik Dingelfef89532016-01-15 16:57:07 -0800825 /*
826 * We do not really care about unlocked. We will retry either
827 * way. But this allows fixup_user_fault to enable userfaultfd.
828 */
Dominik Dingel4a9e1cd2016-01-15 16:57:04 -0800829 if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE,
Dominik Dingelfef89532016-01-15 16:57:07 -0800830 &unlocked)) {
Christian Borntraegerdc77d342014-08-27 12:20:02 +0200831 up_read(&mm->mmap_sem);
832 return -EFAULT;
Christian Borntraegerab3f2852014-08-19 16:19:35 +0200833 }
Christian Borntraegerdc77d342014-08-27 12:20:02 +0200834 goto retry;
835 }
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200836
837 new = old = pgste_get_lock(ptep);
838 pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
839 PGSTE_ACC_BITS | PGSTE_FP_BIT);
840 pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
841 pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
842 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200843 unsigned long address, bits, skey;
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200844
845 address = pte_val(*ptep) & PAGE_MASK;
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200846 skey = (unsigned long) page_get_storage_key(address);
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200847 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200848 skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200849 /* Set storage key ACC and FP */
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200850 page_set_storage_key(address, skey, !nq);
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200851 /* Merge host changed & referenced into pgste */
852 pgste_val(new) |= bits << 52;
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200853 }
854 /* changing the guest storage key is considered a change of the page */
855 if ((pgste_val(new) ^ pgste_val(old)) &
856 (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
Martin Schwidefsky0a61b222013-10-18 12:03:41 +0200857 pgste_val(new) |= PGSTE_UC_BIT;
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200858
859 pgste_set_unlock(ptep, new);
Dominik Dingel66e9bbd2014-10-06 16:34:44 +0200860 pte_unmap_unlock(ptep, ptl);
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200861 up_read(&mm->mmap_sem);
862 return 0;
863}
864EXPORT_SYMBOL(set_guest_storage_key);
865
Jason J. Herne9fcf93b2014-09-23 09:18:57 -0400866unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
867{
868 spinlock_t *ptl;
869 pgste_t pgste;
870 pte_t *ptep;
871 uint64_t physaddr;
872 unsigned long key = 0;
873
874 down_read(&mm->mmap_sem);
875 ptep = get_locked_pte(mm, addr, &ptl);
876 if (unlikely(!ptep)) {
877 up_read(&mm->mmap_sem);
878 return -EFAULT;
879 }
880 pgste = pgste_get_lock(ptep);
881
882 if (pte_val(*ptep) & _PAGE_INVALID) {
883 key |= (pgste_val(pgste) & PGSTE_ACC_BITS) >> 56;
884 key |= (pgste_val(pgste) & PGSTE_FP_BIT) >> 56;
885 key |= (pgste_val(pgste) & PGSTE_GR_BIT) >> 48;
886 key |= (pgste_val(pgste) & PGSTE_GC_BIT) >> 48;
887 } else {
888 physaddr = pte_val(*ptep) & PAGE_MASK;
889 key = page_get_storage_key(physaddr);
890
891 /* Reflect guest's logical view, not physical */
892 if (pgste_val(pgste) & PGSTE_GR_BIT)
893 key |= _PAGE_REFERENCED;
894 if (pgste_val(pgste) & PGSTE_GC_BIT)
895 key |= _PAGE_CHANGED;
896 }
897
898 pgste_set_unlock(ptep, pgste);
899 pte_unmap_unlock(ptep, ptl);
900 up_read(&mm->mmap_sem);
901 return key;
902}
903EXPORT_SYMBOL(get_guest_storage_key);
904
Martin Schwidefsky0b46e0a2015-04-15 13:23:26 +0200905static int page_table_allocate_pgste_min = 0;
906static int page_table_allocate_pgste_max = 1;
907int page_table_allocate_pgste = 0;
908EXPORT_SYMBOL(page_table_allocate_pgste);
909
910static struct ctl_table page_table_sysctl[] = {
911 {
912 .procname = "allocate_pgste",
913 .data = &page_table_allocate_pgste,
914 .maxlen = sizeof(int),
915 .mode = S_IRUGO | S_IWUSR,
916 .proc_handler = proc_dointvec,
917 .extra1 = &page_table_allocate_pgste_min,
918 .extra2 = &page_table_allocate_pgste_max,
919 },
920 { }
921};
922
923static struct ctl_table page_table_sysctl_dir[] = {
924 {
925 .procname = "vm",
926 .maxlen = 0,
927 .mode = 0555,
928 .child = page_table_sysctl,
929 },
930 { }
931};
932
933static int __init page_table_register_sysctl(void)
934{
935 return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
936}
937__initcall(page_table_register_sysctl);
938
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200939#else /* CONFIG_PGSTE */
940
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200941static inline void gmap_unlink(struct mm_struct *mm, unsigned long *table,
942 unsigned long vmaddr)
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200943{
944}
945
946#endif /* CONFIG_PGSTE */
947
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200948static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
949{
950 unsigned int old, new;
951
952 do {
953 old = atomic_read(v);
954 new = old ^ bits;
955 } while (atomic_cmpxchg(v, old, new) != old);
956 return new;
957}
958
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200959/*
960 * page table entry allocation/free routines.
961 */
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200962unsigned long *page_table_alloc(struct mm_struct *mm)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200963{
Martin Schwidefsky78fb9072015-08-14 14:58:50 +0200964 unsigned long *table;
965 struct page *page;
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200966 unsigned int mask, bit;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200967
Martin Schwidefsky78fb9072015-08-14 14:58:50 +0200968 /* Try to get a fragment of a 4K page as a 2K page table */
969 if (!mm_alloc_pgste(mm)) {
970 table = NULL;
971 spin_lock_bh(&mm->context.list_lock);
972 if (!list_empty(&mm->context.pgtable_list)) {
973 page = list_first_entry(&mm->context.pgtable_list,
974 struct page, lru);
975 mask = atomic_read(&page->_mapcount);
976 mask = (mask | (mask >> 4)) & 3;
977 if (mask != 3) {
978 table = (unsigned long *) page_to_phys(page);
979 bit = mask & 1; /* =1 -> second 2K */
980 if (bit)
981 table += PTRS_PER_PTE;
982 atomic_xor_bits(&page->_mapcount, 1U << bit);
983 list_del(&page->lru);
984 }
Kirill A. Shutemove89cfa52013-11-14 14:31:39 -0800985 }
Martin Schwidefsky78fb9072015-08-14 14:58:50 +0200986 spin_unlock_bh(&mm->context.list_lock);
987 if (table)
988 return table;
989 }
990 /* Allocate a fresh page */
991 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
992 if (!page)
993 return NULL;
994 if (!pgtable_page_ctor(page)) {
995 __free_page(page);
996 return NULL;
997 }
998 /* Initialize page table */
999 table = (unsigned long *) page_to_phys(page);
1000 if (mm_alloc_pgste(mm)) {
1001 /* Return 4K page table with PGSTEs */
1002 atomic_set(&page->_mapcount, 3);
1003 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
1004 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
1005 } else {
1006 /* Return the first 2K fragment of the page */
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001007 atomic_set(&page->_mapcount, 1);
Martin Schwidefskye5098612013-07-23 20:57:57 +02001008 clear_table(table, _PAGE_INVALID, PAGE_SIZE);
Martin Schwidefsky80217142010-10-25 16:10:11 +02001009 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +01001010 list_add(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky78fb9072015-08-14 14:58:50 +02001011 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +01001012 }
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02001013 return table;
1014}
1015
Martin Schwidefsky146e4b32008-02-09 18:24:35 +01001016void page_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02001017{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +01001018 struct page *page;
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001019 unsigned int bit, mask;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02001020
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001021 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
Martin Schwidefsky78fb9072015-08-14 14:58:50 +02001022 if (!mm_alloc_pgste(mm)) {
1023 /* Free 2K page table fragment of a 4K page */
1024 bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
1025 spin_lock_bh(&mm->context.list_lock);
1026 mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
1027 if (mask & 3)
1028 list_add(&page->lru, &mm->context.pgtable_list);
1029 else
1030 list_del(&page->lru);
1031 spin_unlock_bh(&mm->context.list_lock);
1032 if (mask != 0)
1033 return;
Martin Schwidefsky146e4b32008-02-09 18:24:35 +01001034 }
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02001035
Martin Schwidefsky78fb9072015-08-14 14:58:50 +02001036 pgtable_page_dtor(page);
1037 atomic_set(&page->_mapcount, -1);
1038 __free_page(page);
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001039}
1040
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001041void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
1042 unsigned long vmaddr)
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001043{
1044 struct mm_struct *mm;
1045 struct page *page;
1046 unsigned int bit, mask;
1047
1048 mm = tlb->mm;
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001049 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
Martin Schwidefsky78fb9072015-08-14 14:58:50 +02001050 if (mm_alloc_pgste(mm)) {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001051 gmap_unlink(mm, table, vmaddr);
Martin Schwidefsky78fb9072015-08-14 14:58:50 +02001052 table = (unsigned long *) (__pa(table) | 3);
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001053 tlb_remove_table(tlb, table);
1054 return;
Martin Schwidefsky80217142010-10-25 16:10:11 +02001055 }
Martin Schwidefsky78fb9072015-08-14 14:58:50 +02001056 bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
Martin Schwidefsky80217142010-10-25 16:10:11 +02001057 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky78fb9072015-08-14 14:58:50 +02001058 mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
1059 if (mask & 3)
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001060 list_add_tail(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky78fb9072015-08-14 14:58:50 +02001061 else
1062 list_del(&page->lru);
Martin Schwidefsky80217142010-10-25 16:10:11 +02001063 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky78fb9072015-08-14 14:58:50 +02001064 table = (unsigned long *) (__pa(table) | (1U << bit));
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001065 tlb_remove_table(tlb, table);
Martin Schwidefsky80217142010-10-25 16:10:11 +02001066}
1067
Heiko Carstens63df41d62013-09-06 19:10:48 +02001068static void __tlb_remove_table(void *_table)
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001069{
Martin Schwidefsky78fb9072015-08-14 14:58:50 +02001070 unsigned int mask = (unsigned long) _table & 3;
1071 void *table = (void *)((unsigned long) _table ^ mask);
1072 struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001073
Martin Schwidefsky78fb9072015-08-14 14:58:50 +02001074 switch (mask) {
1075 case 0: /* pmd or pud */
1076 free_pages((unsigned long) table, 2);
1077 break;
1078 case 1: /* lower 2K of a 4K page table */
1079 case 2: /* higher 2K of a 4K page table */
1080 if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
1081 break;
1082 /* fallthrough */
1083 case 3: /* 4K page table with pgstes */
1084 pgtable_page_dtor(page);
1085 atomic_set(&page->_mapcount, -1);
1086 __free_page(page);
1087 break;
1088 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001089}
1090
Martin Schwidefskycd941542012-04-11 14:28:07 +02001091static void tlb_remove_table_smp_sync(void *arg)
1092{
1093 /* Simply deliver the interrupt */
1094}
1095
1096static void tlb_remove_table_one(void *table)
1097{
1098 /*
1099 * This isn't an RCU grace period and hence the page-tables cannot be
1100 * assumed to be actually RCU-freed.
1101 *
1102 * It is however sufficient for software page-table walkers that rely
1103 * on IRQ disabling. See the comment near struct mmu_table_batch.
1104 */
1105 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
1106 __tlb_remove_table(table);
1107}
1108
1109static void tlb_remove_table_rcu(struct rcu_head *head)
1110{
1111 struct mmu_table_batch *batch;
1112 int i;
1113
1114 batch = container_of(head, struct mmu_table_batch, rcu);
1115
1116 for (i = 0; i < batch->nr; i++)
1117 __tlb_remove_table(batch->tables[i]);
1118
1119 free_page((unsigned long)batch);
1120}
1121
1122void tlb_table_flush(struct mmu_gather *tlb)
1123{
1124 struct mmu_table_batch **batch = &tlb->batch;
1125
1126 if (*batch) {
Martin Schwidefskycd941542012-04-11 14:28:07 +02001127 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
1128 *batch = NULL;
1129 }
1130}
1131
1132void tlb_remove_table(struct mmu_gather *tlb, void *table)
1133{
1134 struct mmu_table_batch **batch = &tlb->batch;
1135
Martin Schwidefsky5c474a12013-08-16 13:31:40 +02001136 tlb->mm->context.flush_mm = 1;
Martin Schwidefskycd941542012-04-11 14:28:07 +02001137 if (*batch == NULL) {
1138 *batch = (struct mmu_table_batch *)
1139 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
1140 if (*batch == NULL) {
Martin Schwidefsky5c474a12013-08-16 13:31:40 +02001141 __tlb_flush_mm_lazy(tlb->mm);
Martin Schwidefskycd941542012-04-11 14:28:07 +02001142 tlb_remove_table_one(table);
1143 return;
1144 }
1145 (*batch)->nr = 0;
1146 }
1147 (*batch)->tables[(*batch)->nr++] = table;
1148 if ((*batch)->nr == MAX_TABLE_BATCH)
Martin Schwidefsky5c474a12013-08-16 13:31:40 +02001149 tlb_flush_mmu(tlb);
Martin Schwidefskycd941542012-04-11 14:28:07 +02001150}
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001151
Gerald Schaefer274023d2012-10-08 16:30:21 -07001152#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001153static inline void thp_split_vma(struct vm_area_struct *vma)
Gerald Schaefer274023d2012-10-08 16:30:21 -07001154{
1155 unsigned long addr;
Gerald Schaefer274023d2012-10-08 16:30:21 -07001156
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001157 for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE)
1158 follow_page(vma, addr, FOLL_SPLIT);
Gerald Schaefer274023d2012-10-08 16:30:21 -07001159}
1160
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001161static inline void thp_split_mm(struct mm_struct *mm)
Gerald Schaefer274023d2012-10-08 16:30:21 -07001162{
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001163 struct vm_area_struct *vma;
Gerald Schaefer274023d2012-10-08 16:30:21 -07001164
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001165 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
Gerald Schaefer274023d2012-10-08 16:30:21 -07001166 thp_split_vma(vma);
1167 vma->vm_flags &= ~VM_HUGEPAGE;
1168 vma->vm_flags |= VM_NOHUGEPAGE;
Gerald Schaefer274023d2012-10-08 16:30:21 -07001169 }
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001170 mm->def_flags |= VM_NOHUGEPAGE;
1171}
1172#else
1173static inline void thp_split_mm(struct mm_struct *mm)
1174{
Gerald Schaefer274023d2012-10-08 16:30:21 -07001175}
1176#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1177
Carsten Otte402b0862008-03-25 18:47:10 +01001178/*
1179 * switch on pgstes for its userspace process (for kvm)
1180 */
1181int s390_enable_sie(void)
1182{
Martin Schwidefsky0b46e0a2015-04-15 13:23:26 +02001183 struct mm_struct *mm = current->mm;
Carsten Otte402b0862008-03-25 18:47:10 +01001184
Christian Borntraeger74b6b522008-05-21 13:37:29 +02001185 /* Do we have pgstes? if yes, we are done */
Martin Schwidefsky0b46e0a2015-04-15 13:23:26 +02001186 if (mm_has_pgste(mm))
Christian Borntraeger74b6b522008-05-21 13:37:29 +02001187 return 0;
Martin Schwidefsky0b46e0a2015-04-15 13:23:26 +02001188 /* Fail if the page tables are 2K */
1189 if (!mm_alloc_pgste(mm))
1190 return -EINVAL;
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001191 down_write(&mm->mmap_sem);
Martin Schwidefsky0b46e0a2015-04-15 13:23:26 +02001192 mm->context.has_pgste = 1;
Gerald Schaefer274023d2012-10-08 16:30:21 -07001193 /* split thp mappings and disable thp for future mappings */
1194 thp_split_mm(mm);
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001195 up_write(&mm->mmap_sem);
Martin Schwidefsky0b46e0a2015-04-15 13:23:26 +02001196 return 0;
Carsten Otte402b0862008-03-25 18:47:10 +01001197}
1198EXPORT_SYMBOL_GPL(s390_enable_sie);
Hans-Joachim Picht7db11a32009-06-16 10:30:26 +02001199
Dominik Dingel934bc132014-01-14 18:10:17 +01001200/*
1201 * Enable storage key handling from now on and initialize the storage
1202 * keys with the default key.
1203 */
Dominik Dingela13cff32014-10-23 12:07:14 +02001204static int __s390_enable_skey(pte_t *pte, unsigned long addr,
1205 unsigned long next, struct mm_walk *walk)
1206{
1207 unsigned long ptev;
1208 pgste_t pgste;
1209
1210 pgste = pgste_get_lock(pte);
Dominik Dingel2faee8f2014-10-23 12:08:38 +02001211 /*
1212 * Remove all zero page mappings,
1213 * after establishing a policy to forbid zero page mappings
1214 * following faults for that page will get fresh anonymous pages
1215 */
1216 if (is_zero_pfn(pte_pfn(*pte))) {
1217 ptep_flush_direct(walk->mm, addr, pte);
1218 pte_val(*pte) = _PAGE_INVALID;
1219 }
Dominik Dingela13cff32014-10-23 12:07:14 +02001220 /* Clear storage key */
1221 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
1222 PGSTE_GR_BIT | PGSTE_GC_BIT);
1223 ptev = pte_val(*pte);
1224 if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
1225 page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
1226 pgste_set_unlock(pte, pgste);
1227 return 0;
1228}
1229
Dominik Dingel3ac8e382014-10-23 12:09:17 +02001230int s390_enable_skey(void)
Dominik Dingel934bc132014-01-14 18:10:17 +01001231{
Dominik Dingela13cff32014-10-23 12:07:14 +02001232 struct mm_walk walk = { .pte_entry = __s390_enable_skey };
1233 struct mm_struct *mm = current->mm;
Dominik Dingel3ac8e382014-10-23 12:09:17 +02001234 struct vm_area_struct *vma;
1235 int rc = 0;
Dominik Dingela13cff32014-10-23 12:07:14 +02001236
1237 down_write(&mm->mmap_sem);
1238 if (mm_use_skey(mm))
1239 goto out_up;
Dominik Dingel2faee8f2014-10-23 12:08:38 +02001240
1241 mm->context.use_skey = 1;
Dominik Dingel3ac8e382014-10-23 12:09:17 +02001242 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1243 if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
1244 MADV_UNMERGEABLE, &vma->vm_flags)) {
1245 mm->context.use_skey = 0;
1246 rc = -ENOMEM;
1247 goto out_up;
1248 }
1249 }
1250 mm->def_flags &= ~VM_MERGEABLE;
Dominik Dingel2faee8f2014-10-23 12:08:38 +02001251
Dominik Dingela13cff32014-10-23 12:07:14 +02001252 walk.mm = mm;
1253 walk_page_range(0, TASK_SIZE, &walk);
Dominik Dingela13cff32014-10-23 12:07:14 +02001254
1255out_up:
1256 up_write(&mm->mmap_sem);
Dominik Dingel3ac8e382014-10-23 12:09:17 +02001257 return rc;
Dominik Dingel934bc132014-01-14 18:10:17 +01001258}
1259EXPORT_SYMBOL_GPL(s390_enable_skey);
1260
Dominik Dingela0bf4f12014-03-24 14:27:58 +01001261/*
Dominik Dingela13cff32014-10-23 12:07:14 +02001262 * Reset CMMA state, make all pages stable again.
1263 */
1264static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
1265 unsigned long next, struct mm_walk *walk)
1266{
1267 pgste_t pgste;
1268
1269 pgste = pgste_get_lock(pte);
1270 pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
1271 pgste_set_unlock(pte, pgste);
1272 return 0;
1273}
1274
1275void s390_reset_cmma(struct mm_struct *mm)
1276{
1277 struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
1278
1279 down_write(&mm->mmap_sem);
1280 walk.mm = mm;
1281 walk_page_range(0, TASK_SIZE, &walk);
1282 up_write(&mm->mmap_sem);
1283}
1284EXPORT_SYMBOL_GPL(s390_reset_cmma);
1285
1286/*
Dominik Dingela0bf4f12014-03-24 14:27:58 +01001287 * Test and reset if a guest page is dirty
1288 */
1289bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap)
1290{
1291 pte_t *pte;
1292 spinlock_t *ptl;
1293 bool dirty = false;
1294
1295 pte = get_locked_pte(gmap->mm, address, &ptl);
1296 if (unlikely(!pte))
1297 return false;
1298
1299 if (ptep_test_and_clear_user_dirty(gmap->mm, address, pte))
1300 dirty = true;
1301
1302 spin_unlock(ptl);
1303 return dirty;
1304}
1305EXPORT_SYMBOL_GPL(gmap_test_and_clear_dirty);
1306
Gerald Schaefer75077af2012-10-08 16:30:15 -07001307#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001308int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
1309 pmd_t *pmdp)
1310{
1311 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1312 /* No need to flush TLB
1313 * On s390 reference bits are in storage key and never in TLB */
1314 return pmdp_test_and_clear_young(vma, address, pmdp);
1315}
1316
1317int pmdp_set_access_flags(struct vm_area_struct *vma,
1318 unsigned long address, pmd_t *pmdp,
1319 pmd_t entry, int dirty)
1320{
1321 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1322
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001323 entry = pmd_mkyoung(entry);
1324 if (dirty)
1325 entry = pmd_mkdirty(entry);
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001326 if (pmd_same(*pmdp, entry))
1327 return 0;
1328 pmdp_invalidate(vma, address, pmdp);
1329 set_pmd_at(vma->vm_mm, address, pmdp, entry);
1330 return 1;
1331}
1332
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -07001333void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1334 pgtable_t pgtable)
Gerald Schaefer9501d092012-10-08 16:30:18 -07001335{
1336 struct list_head *lh = (struct list_head *) pgtable;
1337
Martin Schwidefskyec66ad62014-02-12 14:16:18 +01001338 assert_spin_locked(pmd_lockptr(mm, pmdp));
Gerald Schaefer9501d092012-10-08 16:30:18 -07001339
1340 /* FIFO */
Kirill A. Shutemovc389a252013-11-14 14:30:59 -08001341 if (!pmd_huge_pte(mm, pmdp))
Gerald Schaefer9501d092012-10-08 16:30:18 -07001342 INIT_LIST_HEAD(lh);
1343 else
Kirill A. Shutemovc389a252013-11-14 14:30:59 -08001344 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
1345 pmd_huge_pte(mm, pmdp) = pgtable;
Gerald Schaefer9501d092012-10-08 16:30:18 -07001346}
1347
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -07001348pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
Gerald Schaefer9501d092012-10-08 16:30:18 -07001349{
1350 struct list_head *lh;
1351 pgtable_t pgtable;
1352 pte_t *ptep;
1353
Martin Schwidefskyec66ad62014-02-12 14:16:18 +01001354 assert_spin_locked(pmd_lockptr(mm, pmdp));
Gerald Schaefer9501d092012-10-08 16:30:18 -07001355
1356 /* FIFO */
Kirill A. Shutemovc389a252013-11-14 14:30:59 -08001357 pgtable = pmd_huge_pte(mm, pmdp);
Gerald Schaefer9501d092012-10-08 16:30:18 -07001358 lh = (struct list_head *) pgtable;
1359 if (list_empty(lh))
Kirill A. Shutemovc389a252013-11-14 14:30:59 -08001360 pmd_huge_pte(mm, pmdp) = NULL;
Gerald Schaefer9501d092012-10-08 16:30:18 -07001361 else {
Kirill A. Shutemovc389a252013-11-14 14:30:59 -08001362 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
Gerald Schaefer9501d092012-10-08 16:30:18 -07001363 list_del(lh);
1364 }
1365 ptep = (pte_t *) pgtable;
Martin Schwidefskye5098612013-07-23 20:57:57 +02001366 pte_val(*ptep) = _PAGE_INVALID;
Gerald Schaefer9501d092012-10-08 16:30:18 -07001367 ptep++;
Martin Schwidefskye5098612013-07-23 20:57:57 +02001368 pte_val(*ptep) = _PAGE_INVALID;
Gerald Schaefer9501d092012-10-08 16:30:18 -07001369 return pgtable;
1370}
Gerald Schaefer75077af2012-10-08 16:30:15 -07001371#endif /* CONFIG_TRANSPARENT_HUGEPAGE */