blob: 7805ddca833d7092149958e8ed56522598b2db07 [file] [log] [blame]
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * Copyright IBM Corp. 2007, 2011
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02003 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4 */
5
6#include <linux/sched.h>
7#include <linux/kernel.h>
8#include <linux/errno.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/gfp.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020010#include <linux/mm.h>
11#include <linux/swap.h>
12#include <linux/smp.h>
13#include <linux/highmem.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020014#include <linux/pagemap.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/quicklist.h>
Martin Schwidefsky80217142010-10-25 16:10:11 +020018#include <linux/rcupdate.h>
Martin Schwidefskye5992f22011-07-24 10:48:20 +020019#include <linux/slab.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020020
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020021#include <asm/pgtable.h>
22#include <asm/pgalloc.h>
23#include <asm/tlb.h>
24#include <asm/tlbflush.h>
Martin Schwidefsky6252d702008-02-09 18:24:37 +010025#include <asm/mmu_context.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020026
27#ifndef CONFIG_64BIT
28#define ALLOC_ORDER 1
Martin Schwidefsky36409f62011-06-06 14:14:41 +020029#define FRAG_MASK 0x0f
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020030#else
31#define ALLOC_ORDER 2
Martin Schwidefsky36409f62011-06-06 14:14:41 +020032#define FRAG_MASK 0x03
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020033#endif
34
Heiko Carstens239a64252009-06-12 10:26:33 +020035
Martin Schwidefsky043d0702011-05-23 10:24:23 +020036unsigned long *crst_table_alloc(struct mm_struct *mm)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020037{
38 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
39
40 if (!page)
41 return NULL;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020042 return (unsigned long *) page_to_phys(page);
43}
44
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010045void crst_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020046{
Martin Schwidefsky043d0702011-05-23 10:24:23 +020047 free_pages((unsigned long) table, ALLOC_ORDER);
Martin Schwidefsky80217142010-10-25 16:10:11 +020048}
49
Martin Schwidefsky6252d702008-02-09 18:24:37 +010050#ifdef CONFIG_64BIT
51int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
52{
53 unsigned long *table, *pgd;
54 unsigned long entry;
55
56 BUG_ON(limit > (1UL << 53));
57repeat:
Martin Schwidefsky043d0702011-05-23 10:24:23 +020058 table = crst_table_alloc(mm);
Martin Schwidefsky6252d702008-02-09 18:24:37 +010059 if (!table)
60 return -ENOMEM;
Martin Schwidefsky80217142010-10-25 16:10:11 +020061 spin_lock_bh(&mm->page_table_lock);
Martin Schwidefsky6252d702008-02-09 18:24:37 +010062 if (mm->context.asce_limit < limit) {
63 pgd = (unsigned long *) mm->pgd;
64 if (mm->context.asce_limit <= (1UL << 31)) {
65 entry = _REGION3_ENTRY_EMPTY;
66 mm->context.asce_limit = 1UL << 42;
67 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
68 _ASCE_USER_BITS |
69 _ASCE_TYPE_REGION3;
70 } else {
71 entry = _REGION2_ENTRY_EMPTY;
72 mm->context.asce_limit = 1UL << 53;
73 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
74 _ASCE_USER_BITS |
75 _ASCE_TYPE_REGION2;
76 }
77 crst_table_init(table, entry);
78 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
79 mm->pgd = (pgd_t *) table;
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +010080 mm->task_size = mm->context.asce_limit;
Martin Schwidefsky6252d702008-02-09 18:24:37 +010081 table = NULL;
82 }
Martin Schwidefsky80217142010-10-25 16:10:11 +020083 spin_unlock_bh(&mm->page_table_lock);
Martin Schwidefsky6252d702008-02-09 18:24:37 +010084 if (table)
85 crst_table_free(mm, table);
86 if (mm->context.asce_limit < limit)
87 goto repeat;
Martin Schwidefsky6252d702008-02-09 18:24:37 +010088 return 0;
89}
90
91void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
92{
93 pgd_t *pgd;
94
Martin Schwidefsky6252d702008-02-09 18:24:37 +010095 while (mm->context.asce_limit > limit) {
96 pgd = mm->pgd;
97 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
98 case _REGION_ENTRY_TYPE_R2:
99 mm->context.asce_limit = 1UL << 42;
100 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
101 _ASCE_USER_BITS |
102 _ASCE_TYPE_REGION3;
103 break;
104 case _REGION_ENTRY_TYPE_R3:
105 mm->context.asce_limit = 1UL << 31;
106 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
107 _ASCE_USER_BITS |
108 _ASCE_TYPE_SEGMENT;
109 break;
110 default:
111 BUG();
112 }
113 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +0100114 mm->task_size = mm->context.asce_limit;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100115 crst_table_free(mm, (unsigned long *) pgd);
116 }
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100117}
118#endif
119
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200120#ifdef CONFIG_PGSTE
121
122/**
123 * gmap_alloc - allocate a guest address space
124 * @mm: pointer to the parent mm_struct
125 *
126 * Returns a guest address space structure.
127 */
128struct gmap *gmap_alloc(struct mm_struct *mm)
129{
130 struct gmap *gmap;
131 struct page *page;
132 unsigned long *table;
133
134 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
135 if (!gmap)
136 goto out;
137 INIT_LIST_HEAD(&gmap->crst_list);
138 gmap->mm = mm;
139 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
140 if (!page)
141 goto out_free;
142 list_add(&page->lru, &gmap->crst_list);
143 table = (unsigned long *) page_to_phys(page);
144 crst_table_init(table, _REGION1_ENTRY_EMPTY);
145 gmap->table = table;
Christian Borntraeger480e5922011-09-20 17:07:28 +0200146 gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
147 _ASCE_USER_BITS | __pa(table);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200148 list_add(&gmap->list, &mm->context.gmap_list);
149 return gmap;
150
151out_free:
152 kfree(gmap);
153out:
154 return NULL;
155}
156EXPORT_SYMBOL_GPL(gmap_alloc);
157
158static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
159{
160 struct gmap_pgtable *mp;
161 struct gmap_rmap *rmap;
162 struct page *page;
163
164 if (*table & _SEGMENT_ENTRY_INV)
165 return 0;
166 page = pfn_to_page(*table >> PAGE_SHIFT);
167 mp = (struct gmap_pgtable *) page->index;
168 list_for_each_entry(rmap, &mp->mapper, list) {
169 if (rmap->entry != table)
170 continue;
171 list_del(&rmap->list);
172 kfree(rmap);
173 break;
174 }
175 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
176 return 1;
177}
178
179static void gmap_flush_tlb(struct gmap *gmap)
180{
181 if (MACHINE_HAS_IDTE)
182 __tlb_flush_idte((unsigned long) gmap->table |
183 _ASCE_TYPE_REGION1);
184 else
185 __tlb_flush_global();
186}
187
188/**
189 * gmap_free - free a guest address space
190 * @gmap: pointer to the guest address space structure
191 */
192void gmap_free(struct gmap *gmap)
193{
194 struct page *page, *next;
195 unsigned long *table;
196 int i;
197
198
199 /* Flush tlb. */
200 if (MACHINE_HAS_IDTE)
201 __tlb_flush_idte((unsigned long) gmap->table |
202 _ASCE_TYPE_REGION1);
203 else
204 __tlb_flush_global();
205
206 /* Free all segment & region tables. */
207 down_read(&gmap->mm->mmap_sem);
Carsten Ottecc772452011-10-30 15:17:01 +0100208 spin_lock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200209 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
210 table = (unsigned long *) page_to_phys(page);
211 if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
212 /* Remove gmap rmap structures for segment table. */
213 for (i = 0; i < PTRS_PER_PMD; i++, table++)
214 gmap_unlink_segment(gmap, table);
215 __free_pages(page, ALLOC_ORDER);
216 }
Carsten Ottecc772452011-10-30 15:17:01 +0100217 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200218 up_read(&gmap->mm->mmap_sem);
219 list_del(&gmap->list);
220 kfree(gmap);
221}
222EXPORT_SYMBOL_GPL(gmap_free);
223
224/**
225 * gmap_enable - switch primary space to the guest address space
226 * @gmap: pointer to the guest address space structure
227 */
228void gmap_enable(struct gmap *gmap)
229{
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200230 S390_lowcore.gmap = (unsigned long) gmap;
231}
232EXPORT_SYMBOL_GPL(gmap_enable);
233
234/**
235 * gmap_disable - switch back to the standard primary address space
236 * @gmap: pointer to the guest address space structure
237 */
238void gmap_disable(struct gmap *gmap)
239{
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200240 S390_lowcore.gmap = 0UL;
241}
242EXPORT_SYMBOL_GPL(gmap_disable);
243
Carsten Ottea9162f22011-10-30 15:17:00 +0100244/*
245 * gmap_alloc_table is assumed to be called with mmap_sem held
246 */
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200247static int gmap_alloc_table(struct gmap *gmap,
248 unsigned long *table, unsigned long init)
249{
250 struct page *page;
251 unsigned long *new;
252
Christian Borntraegerc86cce22011-12-27 11:25:47 +0100253 /* since we dont free the gmap table until gmap_free we can unlock */
254 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200255 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
Christian Borntraegerc86cce22011-12-27 11:25:47 +0100256 spin_lock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200257 if (!page)
258 return -ENOMEM;
259 new = (unsigned long *) page_to_phys(page);
260 crst_table_init(new, init);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200261 if (*table & _REGION_ENTRY_INV) {
262 list_add(&page->lru, &gmap->crst_list);
263 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
264 (*table & _REGION_ENTRY_TYPE_MASK);
265 } else
266 __free_pages(page, ALLOC_ORDER);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200267 return 0;
268}
269
270/**
271 * gmap_unmap_segment - unmap segment from the guest address space
272 * @gmap: pointer to the guest address space structure
273 * @addr: address in the guest address space
274 * @len: length of the memory area to unmap
275 *
276 * Returns 0 if the unmap succeded, -EINVAL if not.
277 */
278int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
279{
280 unsigned long *table;
281 unsigned long off;
282 int flush;
283
284 if ((to | len) & (PMD_SIZE - 1))
285 return -EINVAL;
286 if (len == 0 || to + len < to)
287 return -EINVAL;
288
289 flush = 0;
290 down_read(&gmap->mm->mmap_sem);
Carsten Ottecc772452011-10-30 15:17:01 +0100291 spin_lock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200292 for (off = 0; off < len; off += PMD_SIZE) {
293 /* Walk the guest addr space page table */
294 table = gmap->table + (((to + off) >> 53) & 0x7ff);
295 if (*table & _REGION_ENTRY_INV)
Carsten Otte05873df2011-09-26 16:40:34 +0200296 goto out;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200297 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
298 table = table + (((to + off) >> 42) & 0x7ff);
299 if (*table & _REGION_ENTRY_INV)
Carsten Otte05873df2011-09-26 16:40:34 +0200300 goto out;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200301 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
302 table = table + (((to + off) >> 31) & 0x7ff);
303 if (*table & _REGION_ENTRY_INV)
Carsten Otte05873df2011-09-26 16:40:34 +0200304 goto out;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200305 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
306 table = table + (((to + off) >> 20) & 0x7ff);
307
308 /* Clear segment table entry in guest address space. */
309 flush |= gmap_unlink_segment(gmap, table);
310 *table = _SEGMENT_ENTRY_INV;
311 }
Carsten Otte05873df2011-09-26 16:40:34 +0200312out:
Carsten Ottecc772452011-10-30 15:17:01 +0100313 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200314 up_read(&gmap->mm->mmap_sem);
315 if (flush)
316 gmap_flush_tlb(gmap);
317 return 0;
318}
319EXPORT_SYMBOL_GPL(gmap_unmap_segment);
320
321/**
322 * gmap_mmap_segment - map a segment to the guest address space
323 * @gmap: pointer to the guest address space structure
324 * @from: source address in the parent address space
325 * @to: target address in the guest address space
326 *
327 * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
328 */
329int gmap_map_segment(struct gmap *gmap, unsigned long from,
330 unsigned long to, unsigned long len)
331{
332 unsigned long *table;
333 unsigned long off;
334 int flush;
335
336 if ((from | to | len) & (PMD_SIZE - 1))
337 return -EINVAL;
338 if (len == 0 || from + len > PGDIR_SIZE ||
339 from + len < from || to + len < to)
340 return -EINVAL;
341
342 flush = 0;
343 down_read(&gmap->mm->mmap_sem);
Carsten Ottecc772452011-10-30 15:17:01 +0100344 spin_lock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200345 for (off = 0; off < len; off += PMD_SIZE) {
346 /* Walk the gmap address space page table */
347 table = gmap->table + (((to + off) >> 53) & 0x7ff);
348 if ((*table & _REGION_ENTRY_INV) &&
349 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
350 goto out_unmap;
351 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
352 table = table + (((to + off) >> 42) & 0x7ff);
353 if ((*table & _REGION_ENTRY_INV) &&
354 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
355 goto out_unmap;
356 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
357 table = table + (((to + off) >> 31) & 0x7ff);
358 if ((*table & _REGION_ENTRY_INV) &&
359 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
360 goto out_unmap;
361 table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
362 table = table + (((to + off) >> 20) & 0x7ff);
363
364 /* Store 'from' address in an invalid segment table entry. */
365 flush |= gmap_unlink_segment(gmap, table);
366 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
367 }
Carsten Ottecc772452011-10-30 15:17:01 +0100368 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200369 up_read(&gmap->mm->mmap_sem);
370 if (flush)
371 gmap_flush_tlb(gmap);
372 return 0;
373
374out_unmap:
Carsten Ottecc772452011-10-30 15:17:01 +0100375 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200376 up_read(&gmap->mm->mmap_sem);
377 gmap_unmap_segment(gmap, to, len);
378 return -ENOMEM;
379}
380EXPORT_SYMBOL_GPL(gmap_map_segment);
381
Heiko Carstensc5034942012-09-10 16:14:33 +0200382static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
383{
384 unsigned long *table;
385
386 table = gmap->table + ((address >> 53) & 0x7ff);
387 if (unlikely(*table & _REGION_ENTRY_INV))
388 return ERR_PTR(-EFAULT);
389 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
390 table = table + ((address >> 42) & 0x7ff);
391 if (unlikely(*table & _REGION_ENTRY_INV))
392 return ERR_PTR(-EFAULT);
393 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
394 table = table + ((address >> 31) & 0x7ff);
395 if (unlikely(*table & _REGION_ENTRY_INV))
396 return ERR_PTR(-EFAULT);
397 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
398 table = table + ((address >> 20) & 0x7ff);
399 return table;
400}
401
402/**
403 * __gmap_translate - translate a guest address to a user space address
404 * @address: guest address
405 * @gmap: pointer to guest mapping meta data structure
406 *
407 * Returns user space address which corresponds to the guest address or
408 * -EFAULT if no such mapping exists.
409 * This function does not establish potentially missing page table entries.
410 * The mmap_sem of the mm that belongs to the address space must be held
411 * when this function gets called.
412 */
413unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
414{
415 unsigned long *segment_ptr, vmaddr, segment;
416 struct gmap_pgtable *mp;
417 struct page *page;
418
419 current->thread.gmap_addr = address;
420 segment_ptr = gmap_table_walk(address, gmap);
421 if (IS_ERR(segment_ptr))
422 return PTR_ERR(segment_ptr);
423 /* Convert the gmap address to an mm address. */
424 segment = *segment_ptr;
425 if (!(segment & _SEGMENT_ENTRY_INV)) {
426 page = pfn_to_page(segment >> PAGE_SHIFT);
427 mp = (struct gmap_pgtable *) page->index;
428 return mp->vmaddr | (address & ~PMD_MASK);
429 } else if (segment & _SEGMENT_ENTRY_RO) {
430 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
431 return vmaddr | (address & ~PMD_MASK);
432 }
433 return -EFAULT;
434}
435EXPORT_SYMBOL_GPL(__gmap_translate);
436
437/**
438 * gmap_translate - translate a guest address to a user space address
439 * @address: guest address
440 * @gmap: pointer to guest mapping meta data structure
441 *
442 * Returns user space address which corresponds to the guest address or
443 * -EFAULT if no such mapping exists.
444 * This function does not establish potentially missing page table entries.
445 */
446unsigned long gmap_translate(unsigned long address, struct gmap *gmap)
447{
448 unsigned long rc;
449
450 down_read(&gmap->mm->mmap_sem);
451 rc = __gmap_translate(address, gmap);
452 up_read(&gmap->mm->mmap_sem);
453 return rc;
454}
455EXPORT_SYMBOL_GPL(gmap_translate);
456
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200457static int gmap_connect_pgtable(unsigned long address, unsigned long segment,
458 unsigned long *segment_ptr, struct gmap *gmap)
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200459{
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200460 unsigned long vmaddr;
Heiko Carstensc5034942012-09-10 16:14:33 +0200461 struct vm_area_struct *vma;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200462 struct gmap_pgtable *mp;
463 struct gmap_rmap *rmap;
Heiko Carstensc5034942012-09-10 16:14:33 +0200464 struct mm_struct *mm;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200465 struct page *page;
466 pgd_t *pgd;
467 pud_t *pud;
468 pmd_t *pmd;
469
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200470 mm = gmap->mm;
471 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
472 vma = find_vma(mm, vmaddr);
473 if (!vma || vma->vm_start > vmaddr)
474 return -EFAULT;
475 /* Walk the parent mm page table */
476 pgd = pgd_offset(mm, vmaddr);
477 pud = pud_alloc(mm, pgd, vmaddr);
478 if (!pud)
479 return -ENOMEM;
480 pmd = pmd_alloc(mm, pud, vmaddr);
481 if (!pmd)
482 return -ENOMEM;
483 if (!pmd_present(*pmd) &&
484 __pte_alloc(mm, vma, pmd, vmaddr))
485 return -ENOMEM;
486 /* pmd now points to a valid segment table entry. */
487 rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
488 if (!rmap)
489 return -ENOMEM;
490 /* Link gmap segment table entry location to page table. */
491 page = pmd_page(*pmd);
492 mp = (struct gmap_pgtable *) page->index;
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200493 rmap->gmap = gmap;
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200494 rmap->entry = segment_ptr;
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200495 rmap->vmaddr = address;
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200496 spin_lock(&mm->page_table_lock);
497 if (*segment_ptr == segment) {
498 list_add(&rmap->list, &mp->mapper);
499 /* Set gmap segment table entry to page table. */
500 *segment_ptr = pmd_val(*pmd) & PAGE_MASK;
501 rmap = NULL;
502 }
503 spin_unlock(&mm->page_table_lock);
504 kfree(rmap);
505 return 0;
506}
507
508static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
509{
510 struct gmap_rmap *rmap, *next;
511 struct gmap_pgtable *mp;
512 struct page *page;
513 int flush;
514
515 flush = 0;
516 spin_lock(&mm->page_table_lock);
517 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
518 mp = (struct gmap_pgtable *) page->index;
519 list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
520 *rmap->entry =
521 _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
522 list_del(&rmap->list);
523 kfree(rmap);
524 flush = 1;
525 }
526 spin_unlock(&mm->page_table_lock);
527 if (flush)
528 __tlb_flush_global();
529}
530
531/*
532 * this function is assumed to be called with mmap_sem held
533 */
534unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
535{
536 unsigned long *segment_ptr, segment;
537 struct gmap_pgtable *mp;
538 struct page *page;
539 int rc;
540
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200541 current->thread.gmap_addr = address;
Heiko Carstensc5034942012-09-10 16:14:33 +0200542 segment_ptr = gmap_table_walk(address, gmap);
543 if (IS_ERR(segment_ptr))
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200544 return -EFAULT;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200545 /* Convert the gmap address to an mm address. */
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200546 while (1) {
547 segment = *segment_ptr;
548 if (!(segment & _SEGMENT_ENTRY_INV)) {
549 /* Page table is present */
550 page = pfn_to_page(segment >> PAGE_SHIFT);
551 mp = (struct gmap_pgtable *) page->index;
552 return mp->vmaddr | (address & ~PMD_MASK);
553 }
554 if (!(segment & _SEGMENT_ENTRY_RO))
555 /* Nothing mapped in the gmap address space. */
556 break;
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200557 rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap);
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200558 if (rc)
559 return rc;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200560 }
561 return -EFAULT;
Carsten Otte499069e2011-10-30 15:17:02 +0100562}
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200563
Carsten Otte499069e2011-10-30 15:17:02 +0100564unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
565{
566 unsigned long rc;
567
568 down_read(&gmap->mm->mmap_sem);
569 rc = __gmap_fault(address, gmap);
570 up_read(&gmap->mm->mmap_sem);
571
572 return rc;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200573}
574EXPORT_SYMBOL_GPL(gmap_fault);
575
Christian Borntraeger388186b2011-10-30 15:17:03 +0100576void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
577{
578
579 unsigned long *table, address, size;
580 struct vm_area_struct *vma;
581 struct gmap_pgtable *mp;
582 struct page *page;
583
584 down_read(&gmap->mm->mmap_sem);
585 address = from;
586 while (address < to) {
587 /* Walk the gmap address space page table */
588 table = gmap->table + ((address >> 53) & 0x7ff);
589 if (unlikely(*table & _REGION_ENTRY_INV)) {
590 address = (address + PMD_SIZE) & PMD_MASK;
591 continue;
592 }
593 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
594 table = table + ((address >> 42) & 0x7ff);
595 if (unlikely(*table & _REGION_ENTRY_INV)) {
596 address = (address + PMD_SIZE) & PMD_MASK;
597 continue;
598 }
599 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
600 table = table + ((address >> 31) & 0x7ff);
601 if (unlikely(*table & _REGION_ENTRY_INV)) {
602 address = (address + PMD_SIZE) & PMD_MASK;
603 continue;
604 }
605 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
606 table = table + ((address >> 20) & 0x7ff);
607 if (unlikely(*table & _SEGMENT_ENTRY_INV)) {
608 address = (address + PMD_SIZE) & PMD_MASK;
609 continue;
610 }
611 page = pfn_to_page(*table >> PAGE_SHIFT);
612 mp = (struct gmap_pgtable *) page->index;
613 vma = find_vma(gmap->mm, mp->vmaddr);
614 size = min(to - address, PMD_SIZE - (address & ~PMD_MASK));
615 zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK),
616 size, NULL);
617 address = (address + PMD_SIZE) & PMD_MASK;
618 }
619 up_read(&gmap->mm->mmap_sem);
620}
621EXPORT_SYMBOL_GPL(gmap_discard);
622
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200623static LIST_HEAD(gmap_notifier_list);
624static DEFINE_SPINLOCK(gmap_notifier_lock);
625
626/**
627 * gmap_register_ipte_notifier - register a pte invalidation callback
628 * @nb: pointer to the gmap notifier block
629 */
630void gmap_register_ipte_notifier(struct gmap_notifier *nb)
631{
632 spin_lock(&gmap_notifier_lock);
633 list_add(&nb->list, &gmap_notifier_list);
634 spin_unlock(&gmap_notifier_lock);
635}
636EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier);
637
638/**
639 * gmap_unregister_ipte_notifier - remove a pte invalidation callback
640 * @nb: pointer to the gmap notifier block
641 */
642void gmap_unregister_ipte_notifier(struct gmap_notifier *nb)
643{
644 spin_lock(&gmap_notifier_lock);
645 list_del_init(&nb->list);
646 spin_unlock(&gmap_notifier_lock);
647}
648EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
649
650/**
651 * gmap_ipte_notify - mark a range of ptes for invalidation notification
652 * @gmap: pointer to guest mapping meta data structure
653 * @address: virtual address in the guest address space
654 * @len: size of area
655 *
656 * Returns 0 if for each page in the given range a gmap mapping exists and
657 * the invalidation notification could be set. If the gmap mapping is missing
658 * for one or more pages -EFAULT is returned. If no memory could be allocated
659 * -ENOMEM is returned. This function establishes missing page table entries.
660 */
661int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len)
662{
663 unsigned long addr;
664 spinlock_t *ptl;
665 pte_t *ptep, entry;
666 pgste_t pgste;
667 int rc = 0;
668
669 if ((start & ~PAGE_MASK) || (len & ~PAGE_MASK))
670 return -EINVAL;
671 down_read(&gmap->mm->mmap_sem);
672 while (len) {
673 /* Convert gmap address and connect the page tables */
674 addr = __gmap_fault(start, gmap);
675 if (IS_ERR_VALUE(addr)) {
676 rc = addr;
677 break;
678 }
679 /* Get the page mapped */
680 if (get_user_pages(current, gmap->mm, addr, 1, 1, 0,
681 NULL, NULL) != 1) {
682 rc = -EFAULT;
683 break;
684 }
685 /* Walk the process page table, lock and get pte pointer */
686 ptep = get_locked_pte(gmap->mm, addr, &ptl);
687 if (unlikely(!ptep))
688 continue;
689 /* Set notification bit in the pgste of the pte */
690 entry = *ptep;
691 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_RO)) == 0) {
692 pgste = pgste_get_lock(ptep);
693 pgste_val(pgste) |= RCP_IN_BIT;
694 pgste_set_unlock(ptep, pgste);
695 start += PAGE_SIZE;
696 len -= PAGE_SIZE;
697 }
698 spin_unlock(ptl);
699 }
700 up_read(&gmap->mm->mmap_sem);
701 return rc;
702}
703EXPORT_SYMBOL_GPL(gmap_ipte_notify);
704
705/**
706 * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte.
707 * @mm: pointer to the process mm_struct
708 * @addr: virtual address in the process address space
709 * @pte: pointer to the page table entry
710 *
711 * This function is assumed to be called with the page table lock held
712 * for the pte to notify.
713 */
714void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long addr, pte_t *pte)
715{
716 unsigned long segment_offset;
717 struct gmap_notifier *nb;
718 struct gmap_pgtable *mp;
719 struct gmap_rmap *rmap;
720 struct page *page;
721
722 segment_offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
723 segment_offset = segment_offset * (4096 / sizeof(pte_t));
724 page = pfn_to_page(__pa(pte) >> PAGE_SHIFT);
725 mp = (struct gmap_pgtable *) page->index;
726 spin_lock(&gmap_notifier_lock);
727 list_for_each_entry(rmap, &mp->mapper, list) {
728 list_for_each_entry(nb, &gmap_notifier_list, list)
729 nb->notifier_call(rmap->gmap,
730 rmap->vmaddr + segment_offset);
731 }
732 spin_unlock(&gmap_notifier_lock);
733}
734
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200735static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
736 unsigned long vmaddr)
737{
738 struct page *page;
739 unsigned long *table;
740 struct gmap_pgtable *mp;
741
742 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
743 if (!page)
744 return NULL;
745 mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
746 if (!mp) {
747 __free_page(page);
748 return NULL;
749 }
750 pgtable_page_ctor(page);
751 mp->vmaddr = vmaddr & PMD_MASK;
752 INIT_LIST_HEAD(&mp->mapper);
753 page->index = (unsigned long) mp;
754 atomic_set(&page->_mapcount, 3);
755 table = (unsigned long *) page_to_phys(page);
756 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
757 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
758 return table;
759}
760
761static inline void page_table_free_pgste(unsigned long *table)
762{
763 struct page *page;
764 struct gmap_pgtable *mp;
765
766 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
767 mp = (struct gmap_pgtable *) page->index;
768 BUG_ON(!list_empty(&mp->mapper));
Martin Schwidefsky2320c572012-02-17 10:29:21 +0100769 pgtable_page_dtor(page);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200770 atomic_set(&page->_mapcount, -1);
771 kfree(mp);
772 __free_page(page);
773}
774
775#else /* CONFIG_PGSTE */
776
777static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
778 unsigned long vmaddr)
779{
Jan Glauber944291d2011-08-03 16:44:18 +0200780 return NULL;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200781}
782
783static inline void page_table_free_pgste(unsigned long *table)
784{
785}
786
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200787static inline void gmap_disconnect_pgtable(struct mm_struct *mm,
788 unsigned long *table)
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200789{
790}
791
792#endif /* CONFIG_PGSTE */
793
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200794static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
795{
796 unsigned int old, new;
797
798 do {
799 old = atomic_read(v);
800 new = old ^ bits;
801 } while (atomic_cmpxchg(v, old, new) != old);
802 return new;
803}
804
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200805/*
806 * page table entry allocation/free routines.
807 */
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200808unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200809{
Heiko Carstens41459d32012-09-14 11:09:52 +0200810 unsigned long *uninitialized_var(table);
811 struct page *uninitialized_var(page);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200812 unsigned int mask, bit;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200813
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200814 if (mm_has_pgste(mm))
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200815 return page_table_alloc_pgste(mm, vmaddr);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200816 /* Allocate fragments of a 4K page as 1K/2K page table */
Martin Schwidefsky80217142010-10-25 16:10:11 +0200817 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200818 mask = FRAG_MASK;
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100819 if (!list_empty(&mm->context.pgtable_list)) {
820 page = list_first_entry(&mm->context.pgtable_list,
821 struct page, lru);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200822 table = (unsigned long *) page_to_phys(page);
823 mask = atomic_read(&page->_mapcount);
824 mask = mask | (mask >> 4);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200825 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200826 if ((mask & FRAG_MASK) == FRAG_MASK) {
Martin Schwidefsky80217142010-10-25 16:10:11 +0200827 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100828 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
829 if (!page)
830 return NULL;
831 pgtable_page_ctor(page);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200832 atomic_set(&page->_mapcount, 1);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100833 table = (unsigned long *) page_to_phys(page);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200834 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200835 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100836 list_add(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200837 } else {
838 for (bit = 1; mask & bit; bit <<= 1)
839 table += PTRS_PER_PTE;
840 mask = atomic_xor_bits(&page->_mapcount, bit);
841 if ((mask & FRAG_MASK) == FRAG_MASK)
842 list_del(&page->lru);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100843 }
Martin Schwidefsky80217142010-10-25 16:10:11 +0200844 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200845 return table;
846}
847
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100848void page_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200849{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100850 struct page *page;
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200851 unsigned int bit, mask;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200852
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200853 if (mm_has_pgste(mm)) {
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200854 gmap_disconnect_pgtable(mm, table);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200855 return page_table_free_pgste(table);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200856 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200857 /* Free 1K/2K page table fragment of a 4K page */
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100858 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200859 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
Martin Schwidefsky80217142010-10-25 16:10:11 +0200860 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200861 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100862 list_del(&page->lru);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200863 mask = atomic_xor_bits(&page->_mapcount, bit);
864 if (mask & FRAG_MASK)
865 list_add(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200866 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200867 if (mask == 0) {
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100868 pgtable_page_dtor(page);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200869 atomic_set(&page->_mapcount, -1);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100870 __free_page(page);
871 }
872}
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200873
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200874static void __page_table_free_rcu(void *table, unsigned bit)
875{
876 struct page *page;
877
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200878 if (bit == FRAG_MASK)
879 return page_table_free_pgste(table);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200880 /* Free 1K/2K page table fragment of a 4K page */
881 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
882 if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
883 pgtable_page_dtor(page);
884 atomic_set(&page->_mapcount, -1);
885 __free_page(page);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200886 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200887}
888
889void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
890{
891 struct mm_struct *mm;
892 struct page *page;
893 unsigned int bit, mask;
894
895 mm = tlb->mm;
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200896 if (mm_has_pgste(mm)) {
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200897 gmap_disconnect_pgtable(mm, table);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200898 table = (unsigned long *) (__pa(table) | FRAG_MASK);
899 tlb_remove_table(tlb, table);
900 return;
Martin Schwidefsky80217142010-10-25 16:10:11 +0200901 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200902 bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
Martin Schwidefsky80217142010-10-25 16:10:11 +0200903 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
904 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200905 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
906 list_del(&page->lru);
907 mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
908 if (mask & FRAG_MASK)
909 list_add_tail(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200910 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200911 table = (unsigned long *) (__pa(table) | (bit << 4));
912 tlb_remove_table(tlb, table);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200913}
914
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200915void __tlb_remove_table(void *_table)
916{
Martin Schwidefskye73b7ff2011-10-30 15:16:08 +0100917 const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
918 void *table = (void *)((unsigned long) _table & ~mask);
919 unsigned type = (unsigned long) _table & mask;
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200920
921 if (type)
922 __page_table_free_rcu(table, type);
923 else
924 free_pages((unsigned long) table, ALLOC_ORDER);
925}
926
Martin Schwidefskycd941542012-04-11 14:28:07 +0200927static void tlb_remove_table_smp_sync(void *arg)
928{
929 /* Simply deliver the interrupt */
930}
931
932static void tlb_remove_table_one(void *table)
933{
934 /*
935 * This isn't an RCU grace period and hence the page-tables cannot be
936 * assumed to be actually RCU-freed.
937 *
938 * It is however sufficient for software page-table walkers that rely
939 * on IRQ disabling. See the comment near struct mmu_table_batch.
940 */
941 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
942 __tlb_remove_table(table);
943}
944
945static void tlb_remove_table_rcu(struct rcu_head *head)
946{
947 struct mmu_table_batch *batch;
948 int i;
949
950 batch = container_of(head, struct mmu_table_batch, rcu);
951
952 for (i = 0; i < batch->nr; i++)
953 __tlb_remove_table(batch->tables[i]);
954
955 free_page((unsigned long)batch);
956}
957
958void tlb_table_flush(struct mmu_gather *tlb)
959{
960 struct mmu_table_batch **batch = &tlb->batch;
961
962 if (*batch) {
963 __tlb_flush_mm(tlb->mm);
964 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
965 *batch = NULL;
966 }
967}
968
969void tlb_remove_table(struct mmu_gather *tlb, void *table)
970{
971 struct mmu_table_batch **batch = &tlb->batch;
972
973 if (*batch == NULL) {
974 *batch = (struct mmu_table_batch *)
975 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
976 if (*batch == NULL) {
977 __tlb_flush_mm(tlb->mm);
978 tlb_remove_table_one(table);
979 return;
980 }
981 (*batch)->nr = 0;
982 }
983 (*batch)->tables[(*batch)->nr++] = table;
984 if ((*batch)->nr == MAX_TABLE_BATCH)
985 tlb_table_flush(tlb);
986}
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200987
Gerald Schaefer274023d2012-10-08 16:30:21 -0700988#ifdef CONFIG_TRANSPARENT_HUGEPAGE
989void thp_split_vma(struct vm_area_struct *vma)
990{
991 unsigned long addr;
992 struct page *page;
993
994 for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
995 page = follow_page(vma, addr, FOLL_SPLIT);
996 }
997}
998
999void thp_split_mm(struct mm_struct *mm)
1000{
1001 struct vm_area_struct *vma = mm->mmap;
1002
1003 while (vma != NULL) {
1004 thp_split_vma(vma);
1005 vma->vm_flags &= ~VM_HUGEPAGE;
1006 vma->vm_flags |= VM_NOHUGEPAGE;
1007 vma = vma->vm_next;
1008 }
1009}
1010#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1011
Carsten Otte402b0862008-03-25 18:47:10 +01001012/*
1013 * switch on pgstes for its userspace process (for kvm)
1014 */
1015int s390_enable_sie(void)
1016{
1017 struct task_struct *tsk = current;
Christian Borntraeger74b6b522008-05-21 13:37:29 +02001018 struct mm_struct *mm, *old_mm;
Carsten Otte402b0862008-03-25 18:47:10 +01001019
Carsten Otte702d9e52009-03-26 15:23:57 +01001020 /* Do we have switched amode? If no, we cannot do sie */
Heiko Carstensd1b0d842012-09-02 11:02:23 +02001021 if (s390_user_mode == HOME_SPACE_MODE)
Carsten Otte702d9e52009-03-26 15:23:57 +01001022 return -EINVAL;
1023
Christian Borntraeger74b6b522008-05-21 13:37:29 +02001024 /* Do we have pgstes? if yes, we are done */
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001025 if (mm_has_pgste(tsk->mm))
Christian Borntraeger74b6b522008-05-21 13:37:29 +02001026 return 0;
Carsten Otte402b0862008-03-25 18:47:10 +01001027
Christian Borntraeger74b6b522008-05-21 13:37:29 +02001028 /* lets check if we are allowed to replace the mm */
1029 task_lock(tsk);
Carsten Otte402b0862008-03-25 18:47:10 +01001030 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
Martin Schwidefsky52a21f22009-10-06 10:33:55 +02001031#ifdef CONFIG_AIO
1032 !hlist_empty(&tsk->mm->ioctx_list) ||
1033#endif
1034 tsk->mm != tsk->active_mm) {
Christian Borntraeger74b6b522008-05-21 13:37:29 +02001035 task_unlock(tsk);
1036 return -EINVAL;
1037 }
1038 task_unlock(tsk);
Carsten Otte402b0862008-03-25 18:47:10 +01001039
Christian Borntraeger250cf772008-10-28 11:10:15 +01001040 /* we copy the mm and let dup_mm create the page tables with_pgstes */
1041 tsk->mm->context.alloc_pgste = 1;
Christian Borntraeger2739b6d2012-05-09 16:27:38 +02001042 /* make sure that both mms have a correct rss state */
1043 sync_mm_rss(tsk->mm);
Carsten Otte402b0862008-03-25 18:47:10 +01001044 mm = dup_mm(tsk);
Christian Borntraeger250cf772008-10-28 11:10:15 +01001045 tsk->mm->context.alloc_pgste = 0;
Carsten Otte402b0862008-03-25 18:47:10 +01001046 if (!mm)
Christian Borntraeger74b6b522008-05-21 13:37:29 +02001047 return -ENOMEM;
1048
Gerald Schaefer274023d2012-10-08 16:30:21 -07001049#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1050 /* split thp mappings and disable thp for future mappings */
1051 thp_split_mm(mm);
1052 mm->def_flags |= VM_NOHUGEPAGE;
1053#endif
1054
Christian Borntraeger250cf772008-10-28 11:10:15 +01001055 /* Now lets check again if something happened */
Christian Borntraeger74b6b522008-05-21 13:37:29 +02001056 task_lock(tsk);
1057 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
Martin Schwidefsky52a21f22009-10-06 10:33:55 +02001058#ifdef CONFIG_AIO
1059 !hlist_empty(&tsk->mm->ioctx_list) ||
1060#endif
1061 tsk->mm != tsk->active_mm) {
Christian Borntraeger74b6b522008-05-21 13:37:29 +02001062 mmput(mm);
1063 task_unlock(tsk);
1064 return -EINVAL;
1065 }
1066
1067 /* ok, we are alone. No ptrace, no threads, etc. */
1068 old_mm = tsk->mm;
Carsten Otte402b0862008-03-25 18:47:10 +01001069 tsk->mm = tsk->active_mm = mm;
1070 preempt_disable();
1071 update_mm(mm, tsk);
Christian Borntraegere05ef9b2010-10-25 16:10:45 +02001072 atomic_inc(&mm->context.attach_count);
1073 atomic_dec(&old_mm->context.attach_count);
Rusty Russell005f8ee2009-03-26 15:25:01 +01001074 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
Carsten Otte402b0862008-03-25 18:47:10 +01001075 preempt_enable();
Carsten Otte402b0862008-03-25 18:47:10 +01001076 task_unlock(tsk);
Christian Borntraeger74b6b522008-05-21 13:37:29 +02001077 mmput(old_mm);
1078 return 0;
Carsten Otte402b0862008-03-25 18:47:10 +01001079}
1080EXPORT_SYMBOL_GPL(s390_enable_sie);
Hans-Joachim Picht7db11a32009-06-16 10:30:26 +02001081
Gerald Schaefer75077af2012-10-08 16:30:15 -07001082#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001083int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
1084 pmd_t *pmdp)
1085{
1086 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1087 /* No need to flush TLB
1088 * On s390 reference bits are in storage key and never in TLB */
1089 return pmdp_test_and_clear_young(vma, address, pmdp);
1090}
1091
1092int pmdp_set_access_flags(struct vm_area_struct *vma,
1093 unsigned long address, pmd_t *pmdp,
1094 pmd_t entry, int dirty)
1095{
1096 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1097
1098 if (pmd_same(*pmdp, entry))
1099 return 0;
1100 pmdp_invalidate(vma, address, pmdp);
1101 set_pmd_at(vma->vm_mm, address, pmdp, entry);
1102 return 1;
1103}
1104
Gerald Schaefer75077af2012-10-08 16:30:15 -07001105static void pmdp_splitting_flush_sync(void *arg)
1106{
1107 /* Simply deliver the interrupt */
1108}
1109
1110void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
1111 pmd_t *pmdp)
1112{
1113 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1114 if (!test_and_set_bit(_SEGMENT_ENTRY_SPLIT_BIT,
1115 (unsigned long *) pmdp)) {
1116 /* need to serialize against gup-fast (IRQ disabled) */
1117 smp_call_function(pmdp_splitting_flush_sync, NULL, 1);
1118 }
1119}
Gerald Schaefer9501d092012-10-08 16:30:18 -07001120
1121void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable)
1122{
1123 struct list_head *lh = (struct list_head *) pgtable;
1124
1125 assert_spin_locked(&mm->page_table_lock);
1126
1127 /* FIFO */
1128 if (!mm->pmd_huge_pte)
1129 INIT_LIST_HEAD(lh);
1130 else
1131 list_add(lh, (struct list_head *) mm->pmd_huge_pte);
1132 mm->pmd_huge_pte = pgtable;
1133}
1134
1135pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm)
1136{
1137 struct list_head *lh;
1138 pgtable_t pgtable;
1139 pte_t *ptep;
1140
1141 assert_spin_locked(&mm->page_table_lock);
1142
1143 /* FIFO */
1144 pgtable = mm->pmd_huge_pte;
1145 lh = (struct list_head *) pgtable;
1146 if (list_empty(lh))
1147 mm->pmd_huge_pte = NULL;
1148 else {
1149 mm->pmd_huge_pte = (pgtable_t) lh->next;
1150 list_del(lh);
1151 }
1152 ptep = (pte_t *) pgtable;
1153 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
1154 ptep++;
1155 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
1156 return pgtable;
1157}
Gerald Schaefer75077af2012-10-08 16:30:15 -07001158#endif /* CONFIG_TRANSPARENT_HUGEPAGE */