blob: e1850c28cd68453b2bde08189269e21702b3aa42 [file] [log] [blame]
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02001/*
Heiko Carstens239a6422009-06-12 10:26:33 +02002 * Copyright IBM Corp. 2007,2009
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02003 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4 */
5
6#include <linux/sched.h>
7#include <linux/kernel.h>
8#include <linux/errno.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/gfp.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020010#include <linux/mm.h>
11#include <linux/swap.h>
12#include <linux/smp.h>
13#include <linux/highmem.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020014#include <linux/pagemap.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/quicklist.h>
Martin Schwidefsky80217142010-10-25 16:10:11 +020018#include <linux/rcupdate.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020019
20#include <asm/system.h>
21#include <asm/pgtable.h>
22#include <asm/pgalloc.h>
23#include <asm/tlb.h>
24#include <asm/tlbflush.h>
Martin Schwidefsky6252d702008-02-09 18:24:37 +010025#include <asm/mmu_context.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020026
Martin Schwidefsky80217142010-10-25 16:10:11 +020027struct rcu_table_freelist {
28 struct rcu_head rcu;
29 struct mm_struct *mm;
30 unsigned int pgt_index;
31 unsigned int crst_index;
32 unsigned long *table[0];
33};
34
35#define RCU_FREELIST_SIZE \
36 ((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \
37 / sizeof(unsigned long))
38
39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
40static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist);
41
42static void __page_table_free(struct mm_struct *mm, unsigned long *table);
43static void __crst_table_free(struct mm_struct *mm, unsigned long *table);
44
45static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm)
46{
47 struct rcu_table_freelist **batchp = &__get_cpu_var(rcu_table_freelist);
48 struct rcu_table_freelist *batch = *batchp;
49
50 if (batch)
51 return batch;
52 batch = (struct rcu_table_freelist *) __get_free_page(GFP_ATOMIC);
53 if (batch) {
54 batch->mm = mm;
55 batch->pgt_index = 0;
56 batch->crst_index = RCU_FREELIST_SIZE;
57 *batchp = batch;
58 }
59 return batch;
60}
61
62static void rcu_table_freelist_callback(struct rcu_head *head)
63{
64 struct rcu_table_freelist *batch =
65 container_of(head, struct rcu_table_freelist, rcu);
66
67 while (batch->pgt_index > 0)
68 __page_table_free(batch->mm, batch->table[--batch->pgt_index]);
69 while (batch->crst_index < RCU_FREELIST_SIZE)
70 __crst_table_free(batch->mm, batch->table[batch->crst_index++]);
71 free_page((unsigned long) batch);
72}
73
74void rcu_table_freelist_finish(void)
75{
76 struct rcu_table_freelist *batch = __get_cpu_var(rcu_table_freelist);
77
78 if (!batch)
79 return;
80 call_rcu(&batch->rcu, rcu_table_freelist_callback);
81 __get_cpu_var(rcu_table_freelist) = NULL;
82}
83
84static void smp_sync(void *arg)
85{
86}
87
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020088#ifndef CONFIG_64BIT
89#define ALLOC_ORDER 1
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010090#define TABLES_PER_PAGE 4
91#define FRAG_MASK 15UL
92#define SECOND_HALVES 10UL
Carsten Otte402b0862008-03-25 18:47:10 +010093
94void clear_table_pgstes(unsigned long *table)
95{
96 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
97 memset(table + 256, 0, PAGE_SIZE/4);
98 clear_table(table + 512, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
99 memset(table + 768, 0, PAGE_SIZE/4);
100}
101
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200102#else
103#define ALLOC_ORDER 2
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100104#define TABLES_PER_PAGE 2
105#define FRAG_MASK 3UL
106#define SECOND_HALVES 2UL
Carsten Otte402b0862008-03-25 18:47:10 +0100107
108void clear_table_pgstes(unsigned long *table)
109{
110 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
111 memset(table + 256, 0, PAGE_SIZE/2);
112}
113
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200114#endif
115
Heiko Carstens239a6422009-06-12 10:26:33 +0200116unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
117EXPORT_SYMBOL(VMALLOC_START);
118
119static int __init parse_vmalloc(char *arg)
120{
121 if (!arg)
122 return -EINVAL;
123 VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK;
124 return 0;
125}
126early_param("vmalloc", parse_vmalloc);
127
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200128unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
129{
130 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
131
132 if (!page)
133 return NULL;
134 page->index = 0;
135 if (noexec) {
136 struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
137 if (!shadow) {
138 __free_pages(page, ALLOC_ORDER);
139 return NULL;
140 }
141 page->index = page_to_phys(shadow);
142 }
Martin Schwidefsky80217142010-10-25 16:10:11 +0200143 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100144 list_add(&page->lru, &mm->context.crst_list);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200145 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200146 return (unsigned long *) page_to_phys(page);
147}
148
Martin Schwidefsky80217142010-10-25 16:10:11 +0200149static void __crst_table_free(struct mm_struct *mm, unsigned long *table)
150{
151 unsigned long *shadow = get_shadow_table(table);
152
153 if (shadow)
154 free_pages((unsigned long) shadow, ALLOC_ORDER);
155 free_pages((unsigned long) table, ALLOC_ORDER);
156}
157
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100158void crst_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200159{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100160 struct page *page = virt_to_page(table);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200161
Martin Schwidefsky80217142010-10-25 16:10:11 +0200162 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100163 list_del(&page->lru);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200164 spin_unlock_bh(&mm->context.list_lock);
165 __crst_table_free(mm, table);
166}
167
168void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
169{
170 struct rcu_table_freelist *batch;
171 struct page *page = virt_to_page(table);
172
173 spin_lock_bh(&mm->context.list_lock);
174 list_del(&page->lru);
175 spin_unlock_bh(&mm->context.list_lock);
176 if (atomic_read(&mm->mm_users) < 2 &&
177 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
178 __crst_table_free(mm, table);
179 return;
180 }
181 batch = rcu_table_freelist_get(mm);
182 if (!batch) {
183 smp_call_function(smp_sync, NULL, 1);
184 __crst_table_free(mm, table);
185 return;
186 }
187 batch->table[--batch->crst_index] = table;
188 if (batch->pgt_index >= batch->crst_index)
189 rcu_table_freelist_finish();
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200190}
191
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100192#ifdef CONFIG_64BIT
193int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
194{
195 unsigned long *table, *pgd;
196 unsigned long entry;
197
198 BUG_ON(limit > (1UL << 53));
199repeat:
200 table = crst_table_alloc(mm, mm->context.noexec);
201 if (!table)
202 return -ENOMEM;
Martin Schwidefsky80217142010-10-25 16:10:11 +0200203 spin_lock_bh(&mm->page_table_lock);
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100204 if (mm->context.asce_limit < limit) {
205 pgd = (unsigned long *) mm->pgd;
206 if (mm->context.asce_limit <= (1UL << 31)) {
207 entry = _REGION3_ENTRY_EMPTY;
208 mm->context.asce_limit = 1UL << 42;
209 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
210 _ASCE_USER_BITS |
211 _ASCE_TYPE_REGION3;
212 } else {
213 entry = _REGION2_ENTRY_EMPTY;
214 mm->context.asce_limit = 1UL << 53;
215 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
216 _ASCE_USER_BITS |
217 _ASCE_TYPE_REGION2;
218 }
219 crst_table_init(table, entry);
220 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
221 mm->pgd = (pgd_t *) table;
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +0100222 mm->task_size = mm->context.asce_limit;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100223 table = NULL;
224 }
Martin Schwidefsky80217142010-10-25 16:10:11 +0200225 spin_unlock_bh(&mm->page_table_lock);
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100226 if (table)
227 crst_table_free(mm, table);
228 if (mm->context.asce_limit < limit)
229 goto repeat;
230 update_mm(mm, current);
231 return 0;
232}
233
234void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
235{
236 pgd_t *pgd;
237
238 if (mm->context.asce_limit <= limit)
239 return;
240 __tlb_flush_mm(mm);
241 while (mm->context.asce_limit > limit) {
242 pgd = mm->pgd;
243 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
244 case _REGION_ENTRY_TYPE_R2:
245 mm->context.asce_limit = 1UL << 42;
246 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
247 _ASCE_USER_BITS |
248 _ASCE_TYPE_REGION3;
249 break;
250 case _REGION_ENTRY_TYPE_R3:
251 mm->context.asce_limit = 1UL << 31;
252 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
253 _ASCE_USER_BITS |
254 _ASCE_TYPE_SEGMENT;
255 break;
256 default:
257 BUG();
258 }
259 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +0100260 mm->task_size = mm->context.asce_limit;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100261 crst_table_free(mm, (unsigned long *) pgd);
262 }
263 update_mm(mm, current);
264}
265#endif
266
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200267/*
268 * page table entry allocation/free routines.
269 */
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100270unsigned long *page_table_alloc(struct mm_struct *mm)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200271{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100272 struct page *page;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200273 unsigned long *table;
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100274 unsigned long bits;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200275
Christian Borntraeger250cf772008-10-28 11:10:15 +0100276 bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
Martin Schwidefsky80217142010-10-25 16:10:11 +0200277 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100278 page = NULL;
279 if (!list_empty(&mm->context.pgtable_list)) {
280 page = list_first_entry(&mm->context.pgtable_list,
281 struct page, lru);
282 if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
283 page = NULL;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200284 }
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100285 if (!page) {
Martin Schwidefsky80217142010-10-25 16:10:11 +0200286 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100287 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
288 if (!page)
289 return NULL;
290 pgtable_page_ctor(page);
291 page->flags &= ~FRAG_MASK;
292 table = (unsigned long *) page_to_phys(page);
Christian Borntraeger250cf772008-10-28 11:10:15 +0100293 if (mm->context.has_pgste)
Carsten Otte402b0862008-03-25 18:47:10 +0100294 clear_table_pgstes(table);
295 else
296 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200297 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100298 list_add(&page->lru, &mm->context.pgtable_list);
299 }
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200300 table = (unsigned long *) page_to_phys(page);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100301 while (page->flags & bits) {
302 table += 256;
303 bits <<= 1;
304 }
305 page->flags |= bits;
306 if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
307 list_move_tail(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200308 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200309 return table;
310}
311
Martin Schwidefsky80217142010-10-25 16:10:11 +0200312static void __page_table_free(struct mm_struct *mm, unsigned long *table)
313{
314 struct page *page;
315 unsigned long bits;
316
317 bits = ((unsigned long) table) & 15;
318 table = (unsigned long *)(((unsigned long) table) ^ bits);
319 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
320 page->flags ^= bits;
321 if (!(page->flags & FRAG_MASK)) {
322 pgtable_page_dtor(page);
323 __free_page(page);
324 }
325}
326
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100327void page_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200328{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100329 struct page *page;
330 unsigned long bits;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200331
Christian Borntraeger250cf772008-10-28 11:10:15 +0100332 bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100333 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
334 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200335 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100336 page->flags ^= bits;
337 if (page->flags & FRAG_MASK) {
338 /* Page now has some free pgtable fragments. */
Martin Schwidefskyf1be77b2011-01-31 11:30:04 +0100339 if (!list_empty(&page->lru))
340 list_move(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100341 page = NULL;
342 } else
343 /* All fragments of the 4K page have been freed. */
344 list_del(&page->lru);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200345 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100346 if (page) {
347 pgtable_page_dtor(page);
348 __free_page(page);
349 }
350}
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200351
Martin Schwidefsky80217142010-10-25 16:10:11 +0200352void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
353{
354 struct rcu_table_freelist *batch;
355 struct page *page;
356 unsigned long bits;
357
358 if (atomic_read(&mm->mm_users) < 2 &&
359 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
360 page_table_free(mm, table);
361 return;
362 }
363 batch = rcu_table_freelist_get(mm);
364 if (!batch) {
365 smp_call_function(smp_sync, NULL, 1);
366 page_table_free(mm, table);
367 return;
368 }
369 bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
370 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
371 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
372 spin_lock_bh(&mm->context.list_lock);
373 /* Delayed freeing with rcu prevents reuse of pgtable fragments */
374 list_del_init(&page->lru);
375 spin_unlock_bh(&mm->context.list_lock);
376 table = (unsigned long *)(((unsigned long) table) | bits);
377 batch->table[batch->pgt_index++] = table;
378 if (batch->pgt_index >= batch->crst_index)
379 rcu_table_freelist_finish();
380}
381
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100382void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
383{
384 struct page *page;
385
Martin Schwidefsky80217142010-10-25 16:10:11 +0200386 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100387 /* Free shadow region and segment tables. */
388 list_for_each_entry(page, &mm->context.crst_list, lru)
389 if (page->index) {
390 free_pages((unsigned long) page->index, ALLOC_ORDER);
391 page->index = 0;
392 }
393 /* "Free" second halves of page tables. */
394 list_for_each_entry(page, &mm->context.pgtable_list, lru)
395 page->flags &= ~SECOND_HALVES;
Martin Schwidefsky80217142010-10-25 16:10:11 +0200396 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100397 mm->context.noexec = 0;
398 update_mm(mm, tsk);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200399}
Carsten Otte402b0862008-03-25 18:47:10 +0100400
401/*
402 * switch on pgstes for its userspace process (for kvm)
403 */
404int s390_enable_sie(void)
405{
406 struct task_struct *tsk = current;
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200407 struct mm_struct *mm, *old_mm;
Carsten Otte402b0862008-03-25 18:47:10 +0100408
Carsten Otte702d9e52009-03-26 15:23:57 +0100409 /* Do we have switched amode? If no, we cannot do sie */
Martin Schwidefskyb11b5332009-12-07 12:51:43 +0100410 if (user_mode == HOME_SPACE_MODE)
Carsten Otte702d9e52009-03-26 15:23:57 +0100411 return -EINVAL;
412
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200413 /* Do we have pgstes? if yes, we are done */
Christian Borntraeger250cf772008-10-28 11:10:15 +0100414 if (tsk->mm->context.has_pgste)
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200415 return 0;
Carsten Otte402b0862008-03-25 18:47:10 +0100416
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200417 /* lets check if we are allowed to replace the mm */
418 task_lock(tsk);
Carsten Otte402b0862008-03-25 18:47:10 +0100419 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
Martin Schwidefsky52a21f22009-10-06 10:33:55 +0200420#ifdef CONFIG_AIO
421 !hlist_empty(&tsk->mm->ioctx_list) ||
422#endif
423 tsk->mm != tsk->active_mm) {
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200424 task_unlock(tsk);
425 return -EINVAL;
426 }
427 task_unlock(tsk);
Carsten Otte402b0862008-03-25 18:47:10 +0100428
Christian Borntraeger250cf772008-10-28 11:10:15 +0100429 /* we copy the mm and let dup_mm create the page tables with_pgstes */
430 tsk->mm->context.alloc_pgste = 1;
Carsten Otte402b0862008-03-25 18:47:10 +0100431 mm = dup_mm(tsk);
Christian Borntraeger250cf772008-10-28 11:10:15 +0100432 tsk->mm->context.alloc_pgste = 0;
Carsten Otte402b0862008-03-25 18:47:10 +0100433 if (!mm)
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200434 return -ENOMEM;
435
Christian Borntraeger250cf772008-10-28 11:10:15 +0100436 /* Now lets check again if something happened */
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200437 task_lock(tsk);
438 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
Martin Schwidefsky52a21f22009-10-06 10:33:55 +0200439#ifdef CONFIG_AIO
440 !hlist_empty(&tsk->mm->ioctx_list) ||
441#endif
442 tsk->mm != tsk->active_mm) {
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200443 mmput(mm);
444 task_unlock(tsk);
445 return -EINVAL;
446 }
447
448 /* ok, we are alone. No ptrace, no threads, etc. */
449 old_mm = tsk->mm;
Carsten Otte402b0862008-03-25 18:47:10 +0100450 tsk->mm = tsk->active_mm = mm;
451 preempt_disable();
452 update_mm(mm, tsk);
Christian Borntraegere05ef9b2010-10-25 16:10:45 +0200453 atomic_inc(&mm->context.attach_count);
454 atomic_dec(&old_mm->context.attach_count);
Rusty Russell005f8ee2009-03-26 15:25:01 +0100455 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
Carsten Otte402b0862008-03-25 18:47:10 +0100456 preempt_enable();
Carsten Otte402b0862008-03-25 18:47:10 +0100457 task_unlock(tsk);
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200458 mmput(old_mm);
459 return 0;
Carsten Otte402b0862008-03-25 18:47:10 +0100460}
461EXPORT_SYMBOL_GPL(s390_enable_sie);
Hans-Joachim Picht7db11a32009-06-16 10:30:26 +0200462
Heiko Carstens87458ff2009-09-22 22:58:46 +0200463#if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
Hans-Joachim Picht7db11a32009-06-16 10:30:26 +0200464bool kernel_page_present(struct page *page)
465{
466 unsigned long addr;
467 int cc;
468
469 addr = page_to_phys(page);
Heiko Carstens87458ff2009-09-22 22:58:46 +0200470 asm volatile(
471 " lra %1,0(%1)\n"
472 " ipm %0\n"
473 " srl %0,28"
474 : "=d" (cc), "+a" (addr) : : "cc");
Hans-Joachim Picht7db11a32009-06-16 10:30:26 +0200475 return cc == 0;
476}
Heiko Carstens87458ff2009-09-22 22:58:46 +0200477#endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */