blob: b09763fe5da1a5385f1b77c17f5949c0ffdd592d [file] [log] [blame]
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02001/*
Heiko Carstens239a64252009-06-12 10:26:33 +02002 * Copyright IBM Corp. 2007,2009
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02003 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4 */
5
6#include <linux/sched.h>
7#include <linux/kernel.h>
8#include <linux/errno.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/gfp.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020010#include <linux/mm.h>
11#include <linux/swap.h>
12#include <linux/smp.h>
13#include <linux/highmem.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020014#include <linux/pagemap.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/quicklist.h>
Martin Schwidefsky80217142010-10-25 16:10:11 +020018#include <linux/rcupdate.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020019
20#include <asm/system.h>
21#include <asm/pgtable.h>
22#include <asm/pgalloc.h>
23#include <asm/tlb.h>
24#include <asm/tlbflush.h>
Martin Schwidefsky6252d702008-02-09 18:24:37 +010025#include <asm/mmu_context.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020026
Martin Schwidefsky80217142010-10-25 16:10:11 +020027struct rcu_table_freelist {
28 struct rcu_head rcu;
29 struct mm_struct *mm;
30 unsigned int pgt_index;
31 unsigned int crst_index;
32 unsigned long *table[0];
33};
34
35#define RCU_FREELIST_SIZE \
36 ((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \
37 / sizeof(unsigned long))
38
Martin Schwidefsky80217142010-10-25 16:10:11 +020039static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist);
40
41static void __page_table_free(struct mm_struct *mm, unsigned long *table);
Martin Schwidefsky80217142010-10-25 16:10:11 +020042
43static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm)
44{
45 struct rcu_table_freelist **batchp = &__get_cpu_var(rcu_table_freelist);
46 struct rcu_table_freelist *batch = *batchp;
47
48 if (batch)
49 return batch;
50 batch = (struct rcu_table_freelist *) __get_free_page(GFP_ATOMIC);
51 if (batch) {
52 batch->mm = mm;
53 batch->pgt_index = 0;
54 batch->crst_index = RCU_FREELIST_SIZE;
55 *batchp = batch;
56 }
57 return batch;
58}
59
60static void rcu_table_freelist_callback(struct rcu_head *head)
61{
62 struct rcu_table_freelist *batch =
63 container_of(head, struct rcu_table_freelist, rcu);
64
65 while (batch->pgt_index > 0)
66 __page_table_free(batch->mm, batch->table[--batch->pgt_index]);
67 while (batch->crst_index < RCU_FREELIST_SIZE)
Martin Schwidefsky043d0702011-05-23 10:24:23 +020068 crst_table_free(batch->mm, batch->table[batch->crst_index++]);
Martin Schwidefsky80217142010-10-25 16:10:11 +020069 free_page((unsigned long) batch);
70}
71
72void rcu_table_freelist_finish(void)
73{
Heiko Carstens3c5cffb2011-05-29 12:40:51 +020074 struct rcu_table_freelist **batchp = &get_cpu_var(rcu_table_freelist);
75 struct rcu_table_freelist *batch = *batchp;
Martin Schwidefsky80217142010-10-25 16:10:11 +020076
77 if (!batch)
Heiko Carstens3c5cffb2011-05-29 12:40:51 +020078 goto out;
Martin Schwidefsky80217142010-10-25 16:10:11 +020079 call_rcu(&batch->rcu, rcu_table_freelist_callback);
Heiko Carstens3c5cffb2011-05-29 12:40:51 +020080 *batchp = NULL;
81out:
82 put_cpu_var(rcu_table_freelist);
Martin Schwidefsky80217142010-10-25 16:10:11 +020083}
84
85static void smp_sync(void *arg)
86{
87}
88
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020089#ifndef CONFIG_64BIT
90#define ALLOC_ORDER 1
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010091#define TABLES_PER_PAGE 4
92#define FRAG_MASK 15UL
93#define SECOND_HALVES 10UL
Carsten Otte402b0862008-03-25 18:47:10 +010094
95void clear_table_pgstes(unsigned long *table)
96{
97 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
98 memset(table + 256, 0, PAGE_SIZE/4);
99 clear_table(table + 512, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
100 memset(table + 768, 0, PAGE_SIZE/4);
101}
102
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200103#else
104#define ALLOC_ORDER 2
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100105#define TABLES_PER_PAGE 2
106#define FRAG_MASK 3UL
107#define SECOND_HALVES 2UL
Carsten Otte402b0862008-03-25 18:47:10 +0100108
109void clear_table_pgstes(unsigned long *table)
110{
111 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
112 memset(table + 256, 0, PAGE_SIZE/2);
113}
114
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200115#endif
116
Heiko Carstens239a64252009-06-12 10:26:33 +0200117unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
118EXPORT_SYMBOL(VMALLOC_START);
119
120static int __init parse_vmalloc(char *arg)
121{
122 if (!arg)
123 return -EINVAL;
124 VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK;
125 return 0;
126}
127early_param("vmalloc", parse_vmalloc);
128
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200129unsigned long *crst_table_alloc(struct mm_struct *mm)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200130{
131 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
132
133 if (!page)
134 return NULL;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200135 return (unsigned long *) page_to_phys(page);
136}
137
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100138void crst_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200139{
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200140 free_pages((unsigned long) table, ALLOC_ORDER);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200141}
142
143void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
144{
145 struct rcu_table_freelist *batch;
Martin Schwidefsky80217142010-10-25 16:10:11 +0200146
Heiko Carstens3c5cffb2011-05-29 12:40:51 +0200147 preempt_disable();
Martin Schwidefsky80217142010-10-25 16:10:11 +0200148 if (atomic_read(&mm->mm_users) < 2 &&
149 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200150 crst_table_free(mm, table);
Heiko Carstens3c5cffb2011-05-29 12:40:51 +0200151 goto out;
Martin Schwidefsky80217142010-10-25 16:10:11 +0200152 }
153 batch = rcu_table_freelist_get(mm);
154 if (!batch) {
155 smp_call_function(smp_sync, NULL, 1);
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200156 crst_table_free(mm, table);
Heiko Carstens3c5cffb2011-05-29 12:40:51 +0200157 goto out;
Martin Schwidefsky80217142010-10-25 16:10:11 +0200158 }
159 batch->table[--batch->crst_index] = table;
160 if (batch->pgt_index >= batch->crst_index)
161 rcu_table_freelist_finish();
Heiko Carstens3c5cffb2011-05-29 12:40:51 +0200162out:
163 preempt_enable();
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200164}
165
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100166#ifdef CONFIG_64BIT
167int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
168{
169 unsigned long *table, *pgd;
170 unsigned long entry;
171
172 BUG_ON(limit > (1UL << 53));
173repeat:
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200174 table = crst_table_alloc(mm);
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100175 if (!table)
176 return -ENOMEM;
Martin Schwidefsky80217142010-10-25 16:10:11 +0200177 spin_lock_bh(&mm->page_table_lock);
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100178 if (mm->context.asce_limit < limit) {
179 pgd = (unsigned long *) mm->pgd;
180 if (mm->context.asce_limit <= (1UL << 31)) {
181 entry = _REGION3_ENTRY_EMPTY;
182 mm->context.asce_limit = 1UL << 42;
183 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
184 _ASCE_USER_BITS |
185 _ASCE_TYPE_REGION3;
186 } else {
187 entry = _REGION2_ENTRY_EMPTY;
188 mm->context.asce_limit = 1UL << 53;
189 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
190 _ASCE_USER_BITS |
191 _ASCE_TYPE_REGION2;
192 }
193 crst_table_init(table, entry);
194 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
195 mm->pgd = (pgd_t *) table;
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +0100196 mm->task_size = mm->context.asce_limit;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100197 table = NULL;
198 }
Martin Schwidefsky80217142010-10-25 16:10:11 +0200199 spin_unlock_bh(&mm->page_table_lock);
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100200 if (table)
201 crst_table_free(mm, table);
202 if (mm->context.asce_limit < limit)
203 goto repeat;
204 update_mm(mm, current);
205 return 0;
206}
207
208void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
209{
210 pgd_t *pgd;
211
212 if (mm->context.asce_limit <= limit)
213 return;
214 __tlb_flush_mm(mm);
215 while (mm->context.asce_limit > limit) {
216 pgd = mm->pgd;
217 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
218 case _REGION_ENTRY_TYPE_R2:
219 mm->context.asce_limit = 1UL << 42;
220 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
221 _ASCE_USER_BITS |
222 _ASCE_TYPE_REGION3;
223 break;
224 case _REGION_ENTRY_TYPE_R3:
225 mm->context.asce_limit = 1UL << 31;
226 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
227 _ASCE_USER_BITS |
228 _ASCE_TYPE_SEGMENT;
229 break;
230 default:
231 BUG();
232 }
233 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +0100234 mm->task_size = mm->context.asce_limit;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100235 crst_table_free(mm, (unsigned long *) pgd);
236 }
237 update_mm(mm, current);
238}
239#endif
240
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200241/*
242 * page table entry allocation/free routines.
243 */
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100244unsigned long *page_table_alloc(struct mm_struct *mm)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200245{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100246 struct page *page;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200247 unsigned long *table;
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100248 unsigned long bits;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200249
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200250 bits = (mm->context.has_pgste) ? 3UL : 1UL;
Martin Schwidefsky80217142010-10-25 16:10:11 +0200251 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100252 page = NULL;
253 if (!list_empty(&mm->context.pgtable_list)) {
254 page = list_first_entry(&mm->context.pgtable_list,
255 struct page, lru);
256 if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
257 page = NULL;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200258 }
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100259 if (!page) {
Martin Schwidefsky80217142010-10-25 16:10:11 +0200260 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100261 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
262 if (!page)
263 return NULL;
264 pgtable_page_ctor(page);
265 page->flags &= ~FRAG_MASK;
266 table = (unsigned long *) page_to_phys(page);
Christian Borntraeger250cf772008-10-28 11:10:15 +0100267 if (mm->context.has_pgste)
Carsten Otte402b0862008-03-25 18:47:10 +0100268 clear_table_pgstes(table);
269 else
270 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200271 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100272 list_add(&page->lru, &mm->context.pgtable_list);
273 }
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200274 table = (unsigned long *) page_to_phys(page);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100275 while (page->flags & bits) {
276 table += 256;
277 bits <<= 1;
278 }
279 page->flags |= bits;
280 if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
281 list_move_tail(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200282 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200283 return table;
284}
285
Martin Schwidefsky80217142010-10-25 16:10:11 +0200286static void __page_table_free(struct mm_struct *mm, unsigned long *table)
287{
288 struct page *page;
289 unsigned long bits;
290
291 bits = ((unsigned long) table) & 15;
292 table = (unsigned long *)(((unsigned long) table) ^ bits);
293 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
294 page->flags ^= bits;
295 if (!(page->flags & FRAG_MASK)) {
296 pgtable_page_dtor(page);
297 __free_page(page);
298 }
299}
300
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100301void page_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200302{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100303 struct page *page;
304 unsigned long bits;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200305
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200306 bits = (mm->context.has_pgste) ? 3UL : 1UL;
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100307 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
308 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200309 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100310 page->flags ^= bits;
311 if (page->flags & FRAG_MASK) {
312 /* Page now has some free pgtable fragments. */
Martin Schwidefskyf1be77b2011-01-31 11:30:04 +0100313 if (!list_empty(&page->lru))
314 list_move(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100315 page = NULL;
316 } else
317 /* All fragments of the 4K page have been freed. */
318 list_del(&page->lru);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200319 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100320 if (page) {
321 pgtable_page_dtor(page);
322 __free_page(page);
323 }
324}
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200325
Martin Schwidefsky80217142010-10-25 16:10:11 +0200326void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
327{
328 struct rcu_table_freelist *batch;
329 struct page *page;
330 unsigned long bits;
331
Heiko Carstens3c5cffb2011-05-29 12:40:51 +0200332 preempt_disable();
Martin Schwidefsky80217142010-10-25 16:10:11 +0200333 if (atomic_read(&mm->mm_users) < 2 &&
334 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
335 page_table_free(mm, table);
Heiko Carstens3c5cffb2011-05-29 12:40:51 +0200336 goto out;
Martin Schwidefsky80217142010-10-25 16:10:11 +0200337 }
338 batch = rcu_table_freelist_get(mm);
339 if (!batch) {
340 smp_call_function(smp_sync, NULL, 1);
341 page_table_free(mm, table);
Heiko Carstens3c5cffb2011-05-29 12:40:51 +0200342 goto out;
Martin Schwidefsky80217142010-10-25 16:10:11 +0200343 }
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200344 bits = (mm->context.has_pgste) ? 3UL : 1UL;
Martin Schwidefsky80217142010-10-25 16:10:11 +0200345 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
346 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
347 spin_lock_bh(&mm->context.list_lock);
348 /* Delayed freeing with rcu prevents reuse of pgtable fragments */
349 list_del_init(&page->lru);
350 spin_unlock_bh(&mm->context.list_lock);
351 table = (unsigned long *)(((unsigned long) table) | bits);
352 batch->table[batch->pgt_index++] = table;
353 if (batch->pgt_index >= batch->crst_index)
354 rcu_table_freelist_finish();
Heiko Carstens3c5cffb2011-05-29 12:40:51 +0200355out:
356 preempt_enable();
Martin Schwidefsky80217142010-10-25 16:10:11 +0200357}
358
Carsten Otte402b0862008-03-25 18:47:10 +0100359/*
360 * switch on pgstes for its userspace process (for kvm)
361 */
362int s390_enable_sie(void)
363{
364 struct task_struct *tsk = current;
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200365 struct mm_struct *mm, *old_mm;
Carsten Otte402b0862008-03-25 18:47:10 +0100366
Carsten Otte702d9e52009-03-26 15:23:57 +0100367 /* Do we have switched amode? If no, we cannot do sie */
Martin Schwidefskyb11b5332009-12-07 12:51:43 +0100368 if (user_mode == HOME_SPACE_MODE)
Carsten Otte702d9e52009-03-26 15:23:57 +0100369 return -EINVAL;
370
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200371 /* Do we have pgstes? if yes, we are done */
Christian Borntraeger250cf772008-10-28 11:10:15 +0100372 if (tsk->mm->context.has_pgste)
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200373 return 0;
Carsten Otte402b0862008-03-25 18:47:10 +0100374
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200375 /* lets check if we are allowed to replace the mm */
376 task_lock(tsk);
Carsten Otte402b0862008-03-25 18:47:10 +0100377 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
Martin Schwidefsky52a21f22009-10-06 10:33:55 +0200378#ifdef CONFIG_AIO
379 !hlist_empty(&tsk->mm->ioctx_list) ||
380#endif
381 tsk->mm != tsk->active_mm) {
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200382 task_unlock(tsk);
383 return -EINVAL;
384 }
385 task_unlock(tsk);
Carsten Otte402b0862008-03-25 18:47:10 +0100386
Christian Borntraeger250cf772008-10-28 11:10:15 +0100387 /* we copy the mm and let dup_mm create the page tables with_pgstes */
388 tsk->mm->context.alloc_pgste = 1;
Carsten Otte402b0862008-03-25 18:47:10 +0100389 mm = dup_mm(tsk);
Christian Borntraeger250cf772008-10-28 11:10:15 +0100390 tsk->mm->context.alloc_pgste = 0;
Carsten Otte402b0862008-03-25 18:47:10 +0100391 if (!mm)
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200392 return -ENOMEM;
393
Christian Borntraeger250cf772008-10-28 11:10:15 +0100394 /* Now lets check again if something happened */
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200395 task_lock(tsk);
396 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
Martin Schwidefsky52a21f22009-10-06 10:33:55 +0200397#ifdef CONFIG_AIO
398 !hlist_empty(&tsk->mm->ioctx_list) ||
399#endif
400 tsk->mm != tsk->active_mm) {
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200401 mmput(mm);
402 task_unlock(tsk);
403 return -EINVAL;
404 }
405
406 /* ok, we are alone. No ptrace, no threads, etc. */
407 old_mm = tsk->mm;
Carsten Otte402b0862008-03-25 18:47:10 +0100408 tsk->mm = tsk->active_mm = mm;
409 preempt_disable();
410 update_mm(mm, tsk);
Christian Borntraegere05ef9b2010-10-25 16:10:45 +0200411 atomic_inc(&mm->context.attach_count);
412 atomic_dec(&old_mm->context.attach_count);
Rusty Russell005f8ee2009-03-26 15:25:01 +0100413 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
Carsten Otte402b0862008-03-25 18:47:10 +0100414 preempt_enable();
Carsten Otte402b0862008-03-25 18:47:10 +0100415 task_unlock(tsk);
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200416 mmput(old_mm);
417 return 0;
Carsten Otte402b0862008-03-25 18:47:10 +0100418}
419EXPORT_SYMBOL_GPL(s390_enable_sie);
Hans-Joachim Picht7db11a32009-06-16 10:30:26 +0200420
Heiko Carstens87458ff2009-09-22 22:58:46 +0200421#if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
Hans-Joachim Picht7db11a32009-06-16 10:30:26 +0200422bool kernel_page_present(struct page *page)
423{
424 unsigned long addr;
425 int cc;
426
427 addr = page_to_phys(page);
Heiko Carstens87458ff2009-09-22 22:58:46 +0200428 asm volatile(
429 " lra %1,0(%1)\n"
430 " ipm %0\n"
431 " srl %0,28"
432 : "=d" (cc), "+a" (addr) : : "cc");
Hans-Joachim Picht7db11a32009-06-16 10:30:26 +0200433 return cc == 0;
434}
Heiko Carstens87458ff2009-09-22 22:58:46 +0200435#endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */