blob: fc0ad73ffd902ec243bbb06de79eff7428132d95 [file] [log] [blame]
Martin Schwidefsky45e576b2008-05-07 09:22:59 +02001/*
2 * arch/s390/mm/page-states.c
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * Guest page hinting for unused pages.
7 *
8 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
9 */
10
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/types.h>
14#include <linux/mm.h>
15#include <linux/init.h>
16
17#define ESSA_SET_STABLE 1
18#define ESSA_SET_UNUSED 2
19
20static int cmma_flag;
21
22static int __init cmma(char *str)
23{
24 char *parm;
25 parm = strstrip(str);
26 if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) {
27 cmma_flag = 1;
28 return 1;
29 }
30 cmma_flag = 0;
31 if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0)
32 return 1;
33 return 0;
34}
35
36__setup("cmma=", cmma);
37
38void __init cmma_init(void)
39{
40 register unsigned long tmp asm("0") = 0;
41 register int rc asm("1") = -EOPNOTSUPP;
42
43 if (!cmma_flag)
44 return;
45 asm volatile(
46 " .insn rrf,0xb9ab0000,%1,%1,0,0\n"
47 "0: la %0,0\n"
48 "1:\n"
49 EX_TABLE(0b,1b)
50 : "+&d" (rc), "+&d" (tmp));
51 if (rc)
52 cmma_flag = 0;
53}
54
55void arch_free_page(struct page *page, int order)
56{
57 int i, rc;
58
59 if (!cmma_flag)
60 return;
61 for (i = 0; i < (1 << order); i++)
62 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
63 : "=&d" (rc)
64 : "a" ((page_to_pfn(page) + i) << PAGE_SHIFT),
65 "i" (ESSA_SET_UNUSED));
66}
67
68void arch_alloc_page(struct page *page, int order)
69{
70 int i, rc;
71
72 if (!cmma_flag)
73 return;
74 for (i = 0; i < (1 << order); i++)
75 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
76 : "=&d" (rc)
77 : "a" ((page_to_pfn(page) + i) << PAGE_SHIFT),
78 "i" (ESSA_SET_STABLE));
79}