blob: 14c6fae6fe6ba61e8ae0afbdfdd821464ae3a00b [file] [log] [blame]
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02001/*
Heiko Carstens239a64252009-06-12 10:26:33 +02002 * Copyright IBM Corp. 2007,2009
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02003 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4 */
5
6#include <linux/sched.h>
7#include <linux/kernel.h>
8#include <linux/errno.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/gfp.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020010#include <linux/mm.h>
11#include <linux/swap.h>
12#include <linux/smp.h>
13#include <linux/highmem.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020014#include <linux/pagemap.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/quicklist.h>
Martin Schwidefsky80217142010-10-25 16:10:11 +020018#include <linux/rcupdate.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020019
20#include <asm/system.h>
21#include <asm/pgtable.h>
22#include <asm/pgalloc.h>
23#include <asm/tlb.h>
24#include <asm/tlbflush.h>
Martin Schwidefsky6252d702008-02-09 18:24:37 +010025#include <asm/mmu_context.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020026
Martin Schwidefsky80217142010-10-25 16:10:11 +020027struct rcu_table_freelist {
28 struct rcu_head rcu;
29 struct mm_struct *mm;
30 unsigned int pgt_index;
31 unsigned int crst_index;
32 unsigned long *table[0];
33};
34
35#define RCU_FREELIST_SIZE \
36 ((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \
37 / sizeof(unsigned long))
38
Martin Schwidefsky80217142010-10-25 16:10:11 +020039static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist);
40
41static void __page_table_free(struct mm_struct *mm, unsigned long *table);
Martin Schwidefsky80217142010-10-25 16:10:11 +020042
43static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm)
44{
45 struct rcu_table_freelist **batchp = &__get_cpu_var(rcu_table_freelist);
46 struct rcu_table_freelist *batch = *batchp;
47
48 if (batch)
49 return batch;
50 batch = (struct rcu_table_freelist *) __get_free_page(GFP_ATOMIC);
51 if (batch) {
52 batch->mm = mm;
53 batch->pgt_index = 0;
54 batch->crst_index = RCU_FREELIST_SIZE;
55 *batchp = batch;
56 }
57 return batch;
58}
59
60static void rcu_table_freelist_callback(struct rcu_head *head)
61{
62 struct rcu_table_freelist *batch =
63 container_of(head, struct rcu_table_freelist, rcu);
64
65 while (batch->pgt_index > 0)
66 __page_table_free(batch->mm, batch->table[--batch->pgt_index]);
67 while (batch->crst_index < RCU_FREELIST_SIZE)
Martin Schwidefsky043d0702011-05-23 10:24:23 +020068 crst_table_free(batch->mm, batch->table[batch->crst_index++]);
Martin Schwidefsky80217142010-10-25 16:10:11 +020069 free_page((unsigned long) batch);
70}
71
72void rcu_table_freelist_finish(void)
73{
74 struct rcu_table_freelist *batch = __get_cpu_var(rcu_table_freelist);
75
76 if (!batch)
77 return;
78 call_rcu(&batch->rcu, rcu_table_freelist_callback);
79 __get_cpu_var(rcu_table_freelist) = NULL;
80}
81
82static void smp_sync(void *arg)
83{
84}
85
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020086#ifndef CONFIG_64BIT
87#define ALLOC_ORDER 1
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010088#define TABLES_PER_PAGE 4
89#define FRAG_MASK 15UL
90#define SECOND_HALVES 10UL
Carsten Otte402b0862008-03-25 18:47:10 +010091
92void clear_table_pgstes(unsigned long *table)
93{
94 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
95 memset(table + 256, 0, PAGE_SIZE/4);
96 clear_table(table + 512, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
97 memset(table + 768, 0, PAGE_SIZE/4);
98}
99
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200100#else
101#define ALLOC_ORDER 2
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100102#define TABLES_PER_PAGE 2
103#define FRAG_MASK 3UL
104#define SECOND_HALVES 2UL
Carsten Otte402b0862008-03-25 18:47:10 +0100105
106void clear_table_pgstes(unsigned long *table)
107{
108 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
109 memset(table + 256, 0, PAGE_SIZE/2);
110}
111
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200112#endif
113
Heiko Carstens239a64252009-06-12 10:26:33 +0200114unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
115EXPORT_SYMBOL(VMALLOC_START);
116
117static int __init parse_vmalloc(char *arg)
118{
119 if (!arg)
120 return -EINVAL;
121 VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK;
122 return 0;
123}
124early_param("vmalloc", parse_vmalloc);
125
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200126unsigned long *crst_table_alloc(struct mm_struct *mm)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200127{
128 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
129
130 if (!page)
131 return NULL;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200132 return (unsigned long *) page_to_phys(page);
133}
134
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100135void crst_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200136{
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200137 free_pages((unsigned long) table, ALLOC_ORDER);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200138}
139
140void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
141{
142 struct rcu_table_freelist *batch;
Martin Schwidefsky80217142010-10-25 16:10:11 +0200143
Martin Schwidefsky80217142010-10-25 16:10:11 +0200144 if (atomic_read(&mm->mm_users) < 2 &&
145 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200146 crst_table_free(mm, table);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200147 return;
148 }
149 batch = rcu_table_freelist_get(mm);
150 if (!batch) {
151 smp_call_function(smp_sync, NULL, 1);
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200152 crst_table_free(mm, table);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200153 return;
154 }
155 batch->table[--batch->crst_index] = table;
156 if (batch->pgt_index >= batch->crst_index)
157 rcu_table_freelist_finish();
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200158}
159
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100160#ifdef CONFIG_64BIT
161int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
162{
163 unsigned long *table, *pgd;
164 unsigned long entry;
165
166 BUG_ON(limit > (1UL << 53));
167repeat:
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200168 table = crst_table_alloc(mm);
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100169 if (!table)
170 return -ENOMEM;
Martin Schwidefsky80217142010-10-25 16:10:11 +0200171 spin_lock_bh(&mm->page_table_lock);
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100172 if (mm->context.asce_limit < limit) {
173 pgd = (unsigned long *) mm->pgd;
174 if (mm->context.asce_limit <= (1UL << 31)) {
175 entry = _REGION3_ENTRY_EMPTY;
176 mm->context.asce_limit = 1UL << 42;
177 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
178 _ASCE_USER_BITS |
179 _ASCE_TYPE_REGION3;
180 } else {
181 entry = _REGION2_ENTRY_EMPTY;
182 mm->context.asce_limit = 1UL << 53;
183 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
184 _ASCE_USER_BITS |
185 _ASCE_TYPE_REGION2;
186 }
187 crst_table_init(table, entry);
188 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
189 mm->pgd = (pgd_t *) table;
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +0100190 mm->task_size = mm->context.asce_limit;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100191 table = NULL;
192 }
Martin Schwidefsky80217142010-10-25 16:10:11 +0200193 spin_unlock_bh(&mm->page_table_lock);
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100194 if (table)
195 crst_table_free(mm, table);
196 if (mm->context.asce_limit < limit)
197 goto repeat;
198 update_mm(mm, current);
199 return 0;
200}
201
202void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
203{
204 pgd_t *pgd;
205
206 if (mm->context.asce_limit <= limit)
207 return;
208 __tlb_flush_mm(mm);
209 while (mm->context.asce_limit > limit) {
210 pgd = mm->pgd;
211 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
212 case _REGION_ENTRY_TYPE_R2:
213 mm->context.asce_limit = 1UL << 42;
214 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
215 _ASCE_USER_BITS |
216 _ASCE_TYPE_REGION3;
217 break;
218 case _REGION_ENTRY_TYPE_R3:
219 mm->context.asce_limit = 1UL << 31;
220 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
221 _ASCE_USER_BITS |
222 _ASCE_TYPE_SEGMENT;
223 break;
224 default:
225 BUG();
226 }
227 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +0100228 mm->task_size = mm->context.asce_limit;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100229 crst_table_free(mm, (unsigned long *) pgd);
230 }
231 update_mm(mm, current);
232}
233#endif
234
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200235/*
236 * page table entry allocation/free routines.
237 */
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100238unsigned long *page_table_alloc(struct mm_struct *mm)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200239{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100240 struct page *page;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200241 unsigned long *table;
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100242 unsigned long bits;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200243
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200244 bits = (mm->context.has_pgste) ? 3UL : 1UL;
Martin Schwidefsky80217142010-10-25 16:10:11 +0200245 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100246 page = NULL;
247 if (!list_empty(&mm->context.pgtable_list)) {
248 page = list_first_entry(&mm->context.pgtable_list,
249 struct page, lru);
250 if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
251 page = NULL;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200252 }
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100253 if (!page) {
Martin Schwidefsky80217142010-10-25 16:10:11 +0200254 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100255 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
256 if (!page)
257 return NULL;
258 pgtable_page_ctor(page);
259 page->flags &= ~FRAG_MASK;
260 table = (unsigned long *) page_to_phys(page);
Christian Borntraeger250cf772008-10-28 11:10:15 +0100261 if (mm->context.has_pgste)
Carsten Otte402b0862008-03-25 18:47:10 +0100262 clear_table_pgstes(table);
263 else
264 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200265 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100266 list_add(&page->lru, &mm->context.pgtable_list);
267 }
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200268 table = (unsigned long *) page_to_phys(page);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100269 while (page->flags & bits) {
270 table += 256;
271 bits <<= 1;
272 }
273 page->flags |= bits;
274 if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
275 list_move_tail(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200276 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200277 return table;
278}
279
Martin Schwidefsky80217142010-10-25 16:10:11 +0200280static void __page_table_free(struct mm_struct *mm, unsigned long *table)
281{
282 struct page *page;
283 unsigned long bits;
284
285 bits = ((unsigned long) table) & 15;
286 table = (unsigned long *)(((unsigned long) table) ^ bits);
287 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
288 page->flags ^= bits;
289 if (!(page->flags & FRAG_MASK)) {
290 pgtable_page_dtor(page);
291 __free_page(page);
292 }
293}
294
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100295void page_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200296{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100297 struct page *page;
298 unsigned long bits;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200299
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200300 bits = (mm->context.has_pgste) ? 3UL : 1UL;
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100301 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
302 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200303 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100304 page->flags ^= bits;
305 if (page->flags & FRAG_MASK) {
306 /* Page now has some free pgtable fragments. */
Martin Schwidefskyf1be77b2011-01-31 11:30:04 +0100307 if (!list_empty(&page->lru))
308 list_move(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100309 page = NULL;
310 } else
311 /* All fragments of the 4K page have been freed. */
312 list_del(&page->lru);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200313 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100314 if (page) {
315 pgtable_page_dtor(page);
316 __free_page(page);
317 }
318}
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200319
Martin Schwidefsky80217142010-10-25 16:10:11 +0200320void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
321{
322 struct rcu_table_freelist *batch;
323 struct page *page;
324 unsigned long bits;
325
326 if (atomic_read(&mm->mm_users) < 2 &&
327 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
328 page_table_free(mm, table);
329 return;
330 }
331 batch = rcu_table_freelist_get(mm);
332 if (!batch) {
333 smp_call_function(smp_sync, NULL, 1);
334 page_table_free(mm, table);
335 return;
336 }
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200337 bits = (mm->context.has_pgste) ? 3UL : 1UL;
Martin Schwidefsky80217142010-10-25 16:10:11 +0200338 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
339 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
340 spin_lock_bh(&mm->context.list_lock);
341 /* Delayed freeing with rcu prevents reuse of pgtable fragments */
342 list_del_init(&page->lru);
343 spin_unlock_bh(&mm->context.list_lock);
344 table = (unsigned long *)(((unsigned long) table) | bits);
345 batch->table[batch->pgt_index++] = table;
346 if (batch->pgt_index >= batch->crst_index)
347 rcu_table_freelist_finish();
348}
349
Carsten Otte402b0862008-03-25 18:47:10 +0100350/*
351 * switch on pgstes for its userspace process (for kvm)
352 */
353int s390_enable_sie(void)
354{
355 struct task_struct *tsk = current;
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200356 struct mm_struct *mm, *old_mm;
Carsten Otte402b0862008-03-25 18:47:10 +0100357
Carsten Otte702d9e52009-03-26 15:23:57 +0100358 /* Do we have switched amode? If no, we cannot do sie */
Martin Schwidefskyb11b5332009-12-07 12:51:43 +0100359 if (user_mode == HOME_SPACE_MODE)
Carsten Otte702d9e52009-03-26 15:23:57 +0100360 return -EINVAL;
361
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200362 /* Do we have pgstes? if yes, we are done */
Christian Borntraeger250cf772008-10-28 11:10:15 +0100363 if (tsk->mm->context.has_pgste)
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200364 return 0;
Carsten Otte402b0862008-03-25 18:47:10 +0100365
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200366 /* lets check if we are allowed to replace the mm */
367 task_lock(tsk);
Carsten Otte402b0862008-03-25 18:47:10 +0100368 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
Martin Schwidefsky52a21f22009-10-06 10:33:55 +0200369#ifdef CONFIG_AIO
370 !hlist_empty(&tsk->mm->ioctx_list) ||
371#endif
372 tsk->mm != tsk->active_mm) {
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200373 task_unlock(tsk);
374 return -EINVAL;
375 }
376 task_unlock(tsk);
Carsten Otte402b0862008-03-25 18:47:10 +0100377
Christian Borntraeger250cf772008-10-28 11:10:15 +0100378 /* we copy the mm and let dup_mm create the page tables with_pgstes */
379 tsk->mm->context.alloc_pgste = 1;
Carsten Otte402b0862008-03-25 18:47:10 +0100380 mm = dup_mm(tsk);
Christian Borntraeger250cf772008-10-28 11:10:15 +0100381 tsk->mm->context.alloc_pgste = 0;
Carsten Otte402b0862008-03-25 18:47:10 +0100382 if (!mm)
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200383 return -ENOMEM;
384
Christian Borntraeger250cf772008-10-28 11:10:15 +0100385 /* Now lets check again if something happened */
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200386 task_lock(tsk);
387 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
Martin Schwidefsky52a21f22009-10-06 10:33:55 +0200388#ifdef CONFIG_AIO
389 !hlist_empty(&tsk->mm->ioctx_list) ||
390#endif
391 tsk->mm != tsk->active_mm) {
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200392 mmput(mm);
393 task_unlock(tsk);
394 return -EINVAL;
395 }
396
397 /* ok, we are alone. No ptrace, no threads, etc. */
398 old_mm = tsk->mm;
Carsten Otte402b0862008-03-25 18:47:10 +0100399 tsk->mm = tsk->active_mm = mm;
400 preempt_disable();
401 update_mm(mm, tsk);
Christian Borntraegere05ef9b2010-10-25 16:10:45 +0200402 atomic_inc(&mm->context.attach_count);
403 atomic_dec(&old_mm->context.attach_count);
Rusty Russell005f8ee2009-03-26 15:25:01 +0100404 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
Carsten Otte402b0862008-03-25 18:47:10 +0100405 preempt_enable();
Carsten Otte402b0862008-03-25 18:47:10 +0100406 task_unlock(tsk);
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200407 mmput(old_mm);
408 return 0;
Carsten Otte402b0862008-03-25 18:47:10 +0100409}
410EXPORT_SYMBOL_GPL(s390_enable_sie);
Hans-Joachim Picht7db11a32009-06-16 10:30:26 +0200411
Heiko Carstens87458ff2009-09-22 22:58:46 +0200412#if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
Hans-Joachim Picht7db11a32009-06-16 10:30:26 +0200413bool kernel_page_present(struct page *page)
414{
415 unsigned long addr;
416 int cc;
417
418 addr = page_to_phys(page);
Heiko Carstens87458ff2009-09-22 22:58:46 +0200419 asm volatile(
420 " lra %1,0(%1)\n"
421 " ipm %0\n"
422 " srl %0,28"
423 : "=d" (cc), "+a" (addr) : : "cc");
Hans-Joachim Picht7db11a32009-06-16 10:30:26 +0200424 return cc == 0;
425}
Heiko Carstens87458ff2009-09-22 22:58:46 +0200426#endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */