blob: 8d999249d3574fb880da55af3fb5ff40d7f32fe8 [file] [log] [blame]
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02001/*
Heiko Carstens239a64252009-06-12 10:26:33 +02002 * Copyright IBM Corp. 2007,2009
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02003 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4 */
5
6#include <linux/sched.h>
7#include <linux/kernel.h>
8#include <linux/errno.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/gfp.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020010#include <linux/mm.h>
11#include <linux/swap.h>
12#include <linux/smp.h>
13#include <linux/highmem.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020014#include <linux/pagemap.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/quicklist.h>
18
19#include <asm/system.h>
20#include <asm/pgtable.h>
21#include <asm/pgalloc.h>
22#include <asm/tlb.h>
23#include <asm/tlbflush.h>
Martin Schwidefsky6252d702008-02-09 18:24:37 +010024#include <asm/mmu_context.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020025
26#ifndef CONFIG_64BIT
27#define ALLOC_ORDER 1
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010028#define TABLES_PER_PAGE 4
29#define FRAG_MASK 15UL
30#define SECOND_HALVES 10UL
Carsten Otte402b0862008-03-25 18:47:10 +010031
32void clear_table_pgstes(unsigned long *table)
33{
34 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
35 memset(table + 256, 0, PAGE_SIZE/4);
36 clear_table(table + 512, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
37 memset(table + 768, 0, PAGE_SIZE/4);
38}
39
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020040#else
41#define ALLOC_ORDER 2
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010042#define TABLES_PER_PAGE 2
43#define FRAG_MASK 3UL
44#define SECOND_HALVES 2UL
Carsten Otte402b0862008-03-25 18:47:10 +010045
46void clear_table_pgstes(unsigned long *table)
47{
48 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
49 memset(table + 256, 0, PAGE_SIZE/2);
50}
51
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020052#endif
53
Heiko Carstens239a64252009-06-12 10:26:33 +020054unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
55EXPORT_SYMBOL(VMALLOC_START);
56
57static int __init parse_vmalloc(char *arg)
58{
59 if (!arg)
60 return -EINVAL;
61 VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK;
62 return 0;
63}
64early_param("vmalloc", parse_vmalloc);
65
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020066unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
67{
68 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
69
70 if (!page)
71 return NULL;
72 page->index = 0;
73 if (noexec) {
74 struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
75 if (!shadow) {
76 __free_pages(page, ALLOC_ORDER);
77 return NULL;
78 }
79 page->index = page_to_phys(shadow);
80 }
Martin Schwidefsky50aa98b2009-09-11 10:28:57 +020081 spin_lock(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010082 list_add(&page->lru, &mm->context.crst_list);
Martin Schwidefsky50aa98b2009-09-11 10:28:57 +020083 spin_unlock(&mm->context.list_lock);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020084 return (unsigned long *) page_to_phys(page);
85}
86
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010087void crst_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020088{
89 unsigned long *shadow = get_shadow_table(table);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010090 struct page *page = virt_to_page(table);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020091
Martin Schwidefsky50aa98b2009-09-11 10:28:57 +020092 spin_lock(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010093 list_del(&page->lru);
Martin Schwidefsky50aa98b2009-09-11 10:28:57 +020094 spin_unlock(&mm->context.list_lock);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020095 if (shadow)
96 free_pages((unsigned long) shadow, ALLOC_ORDER);
97 free_pages((unsigned long) table, ALLOC_ORDER);
98}
99
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100100#ifdef CONFIG_64BIT
101int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
102{
103 unsigned long *table, *pgd;
104 unsigned long entry;
105
106 BUG_ON(limit > (1UL << 53));
107repeat:
108 table = crst_table_alloc(mm, mm->context.noexec);
109 if (!table)
110 return -ENOMEM;
111 spin_lock(&mm->page_table_lock);
112 if (mm->context.asce_limit < limit) {
113 pgd = (unsigned long *) mm->pgd;
114 if (mm->context.asce_limit <= (1UL << 31)) {
115 entry = _REGION3_ENTRY_EMPTY;
116 mm->context.asce_limit = 1UL << 42;
117 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
118 _ASCE_USER_BITS |
119 _ASCE_TYPE_REGION3;
120 } else {
121 entry = _REGION2_ENTRY_EMPTY;
122 mm->context.asce_limit = 1UL << 53;
123 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
124 _ASCE_USER_BITS |
125 _ASCE_TYPE_REGION2;
126 }
127 crst_table_init(table, entry);
128 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
129 mm->pgd = (pgd_t *) table;
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +0100130 mm->task_size = mm->context.asce_limit;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100131 table = NULL;
132 }
133 spin_unlock(&mm->page_table_lock);
134 if (table)
135 crst_table_free(mm, table);
136 if (mm->context.asce_limit < limit)
137 goto repeat;
138 update_mm(mm, current);
139 return 0;
140}
141
142void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
143{
144 pgd_t *pgd;
145
146 if (mm->context.asce_limit <= limit)
147 return;
148 __tlb_flush_mm(mm);
149 while (mm->context.asce_limit > limit) {
150 pgd = mm->pgd;
151 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
152 case _REGION_ENTRY_TYPE_R2:
153 mm->context.asce_limit = 1UL << 42;
154 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
155 _ASCE_USER_BITS |
156 _ASCE_TYPE_REGION3;
157 break;
158 case _REGION_ENTRY_TYPE_R3:
159 mm->context.asce_limit = 1UL << 31;
160 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
161 _ASCE_USER_BITS |
162 _ASCE_TYPE_SEGMENT;
163 break;
164 default:
165 BUG();
166 }
167 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +0100168 mm->task_size = mm->context.asce_limit;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100169 crst_table_free(mm, (unsigned long *) pgd);
170 }
171 update_mm(mm, current);
172}
173#endif
174
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200175/*
176 * page table entry allocation/free routines.
177 */
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100178unsigned long *page_table_alloc(struct mm_struct *mm)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200179{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100180 struct page *page;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200181 unsigned long *table;
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100182 unsigned long bits;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200183
Christian Borntraeger250cf772008-10-28 11:10:15 +0100184 bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
Martin Schwidefsky50aa98b2009-09-11 10:28:57 +0200185 spin_lock(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100186 page = NULL;
187 if (!list_empty(&mm->context.pgtable_list)) {
188 page = list_first_entry(&mm->context.pgtable_list,
189 struct page, lru);
190 if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
191 page = NULL;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200192 }
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100193 if (!page) {
Martin Schwidefsky50aa98b2009-09-11 10:28:57 +0200194 spin_unlock(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100195 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
196 if (!page)
197 return NULL;
198 pgtable_page_ctor(page);
199 page->flags &= ~FRAG_MASK;
200 table = (unsigned long *) page_to_phys(page);
Christian Borntraeger250cf772008-10-28 11:10:15 +0100201 if (mm->context.has_pgste)
Carsten Otte402b0862008-03-25 18:47:10 +0100202 clear_table_pgstes(table);
203 else
204 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
Martin Schwidefsky50aa98b2009-09-11 10:28:57 +0200205 spin_lock(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100206 list_add(&page->lru, &mm->context.pgtable_list);
207 }
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200208 table = (unsigned long *) page_to_phys(page);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100209 while (page->flags & bits) {
210 table += 256;
211 bits <<= 1;
212 }
213 page->flags |= bits;
214 if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
215 list_move_tail(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky50aa98b2009-09-11 10:28:57 +0200216 spin_unlock(&mm->context.list_lock);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200217 return table;
218}
219
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100220void page_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200221{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100222 struct page *page;
223 unsigned long bits;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200224
Christian Borntraeger250cf772008-10-28 11:10:15 +0100225 bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100226 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
227 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
Martin Schwidefsky50aa98b2009-09-11 10:28:57 +0200228 spin_lock(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100229 page->flags ^= bits;
230 if (page->flags & FRAG_MASK) {
231 /* Page now has some free pgtable fragments. */
232 list_move(&page->lru, &mm->context.pgtable_list);
233 page = NULL;
234 } else
235 /* All fragments of the 4K page have been freed. */
236 list_del(&page->lru);
Martin Schwidefsky50aa98b2009-09-11 10:28:57 +0200237 spin_unlock(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100238 if (page) {
239 pgtable_page_dtor(page);
240 __free_page(page);
241 }
242}
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200243
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100244void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
245{
246 struct page *page;
247
Martin Schwidefsky50aa98b2009-09-11 10:28:57 +0200248 spin_lock(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100249 /* Free shadow region and segment tables. */
250 list_for_each_entry(page, &mm->context.crst_list, lru)
251 if (page->index) {
252 free_pages((unsigned long) page->index, ALLOC_ORDER);
253 page->index = 0;
254 }
255 /* "Free" second halves of page tables. */
256 list_for_each_entry(page, &mm->context.pgtable_list, lru)
257 page->flags &= ~SECOND_HALVES;
Martin Schwidefsky50aa98b2009-09-11 10:28:57 +0200258 spin_unlock(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100259 mm->context.noexec = 0;
260 update_mm(mm, tsk);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200261}
Carsten Otte402b0862008-03-25 18:47:10 +0100262
263/*
264 * switch on pgstes for its userspace process (for kvm)
265 */
266int s390_enable_sie(void)
267{
268 struct task_struct *tsk = current;
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200269 struct mm_struct *mm, *old_mm;
Carsten Otte402b0862008-03-25 18:47:10 +0100270
Carsten Otte702d9e52009-03-26 15:23:57 +0100271 /* Do we have switched amode? If no, we cannot do sie */
Martin Schwidefskyb11b5332009-12-07 12:51:43 +0100272 if (user_mode == HOME_SPACE_MODE)
Carsten Otte702d9e52009-03-26 15:23:57 +0100273 return -EINVAL;
274
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200275 /* Do we have pgstes? if yes, we are done */
Christian Borntraeger250cf772008-10-28 11:10:15 +0100276 if (tsk->mm->context.has_pgste)
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200277 return 0;
Carsten Otte402b0862008-03-25 18:47:10 +0100278
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200279 /* lets check if we are allowed to replace the mm */
280 task_lock(tsk);
Carsten Otte402b0862008-03-25 18:47:10 +0100281 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
Martin Schwidefsky52a21f22009-10-06 10:33:55 +0200282#ifdef CONFIG_AIO
283 !hlist_empty(&tsk->mm->ioctx_list) ||
284#endif
285 tsk->mm != tsk->active_mm) {
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200286 task_unlock(tsk);
287 return -EINVAL;
288 }
289 task_unlock(tsk);
Carsten Otte402b0862008-03-25 18:47:10 +0100290
Christian Borntraeger250cf772008-10-28 11:10:15 +0100291 /* we copy the mm and let dup_mm create the page tables with_pgstes */
292 tsk->mm->context.alloc_pgste = 1;
Carsten Otte402b0862008-03-25 18:47:10 +0100293 mm = dup_mm(tsk);
Christian Borntraeger250cf772008-10-28 11:10:15 +0100294 tsk->mm->context.alloc_pgste = 0;
Carsten Otte402b0862008-03-25 18:47:10 +0100295 if (!mm)
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200296 return -ENOMEM;
297
Christian Borntraeger250cf772008-10-28 11:10:15 +0100298 /* Now lets check again if something happened */
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200299 task_lock(tsk);
300 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
Martin Schwidefsky52a21f22009-10-06 10:33:55 +0200301#ifdef CONFIG_AIO
302 !hlist_empty(&tsk->mm->ioctx_list) ||
303#endif
304 tsk->mm != tsk->active_mm) {
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200305 mmput(mm);
306 task_unlock(tsk);
307 return -EINVAL;
308 }
309
310 /* ok, we are alone. No ptrace, no threads, etc. */
311 old_mm = tsk->mm;
Carsten Otte402b0862008-03-25 18:47:10 +0100312 tsk->mm = tsk->active_mm = mm;
313 preempt_disable();
314 update_mm(mm, tsk);
Rusty Russell005f8ee2009-03-26 15:25:01 +0100315 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
Carsten Otte402b0862008-03-25 18:47:10 +0100316 preempt_enable();
Carsten Otte402b0862008-03-25 18:47:10 +0100317 task_unlock(tsk);
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200318 mmput(old_mm);
319 return 0;
Carsten Otte402b0862008-03-25 18:47:10 +0100320}
321EXPORT_SYMBOL_GPL(s390_enable_sie);
Hans-Joachim Picht7db11a32009-06-16 10:30:26 +0200322
Heiko Carstens87458ff2009-09-22 22:58:46 +0200323#if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
Hans-Joachim Picht7db11a32009-06-16 10:30:26 +0200324bool kernel_page_present(struct page *page)
325{
326 unsigned long addr;
327 int cc;
328
329 addr = page_to_phys(page);
Heiko Carstens87458ff2009-09-22 22:58:46 +0200330 asm volatile(
331 " lra %1,0(%1)\n"
332 " ipm %0\n"
333 " srl %0,28"
334 : "=d" (cc), "+a" (addr) : : "cc");
Hans-Joachim Picht7db11a32009-06-16 10:30:26 +0200335 return cc == 0;
336}
Heiko Carstens87458ff2009-09-22 22:58:46 +0200337#endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */