blob: 098923ae458fb04563c72a541e0cd7b287870343 [file] [log] [blame]
Martin Schwidefsky45e576b2008-05-07 09:22:59 +02001/*
Martin Schwidefsky45e576b2008-05-07 09:22:59 +02002 * Copyright IBM Corp. 2008
3 *
4 * Guest page hinting for unused pages.
5 *
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
9#include <linux/kernel.h>
10#include <linux/errno.h>
11#include <linux/types.h>
12#include <linux/mm.h>
13#include <linux/init.h>
14
15#define ESSA_SET_STABLE 1
16#define ESSA_SET_UNUSED 2
17
Heiko Carstens2ddddf32009-09-11 10:29:01 +020018static int cmma_flag = 1;
Martin Schwidefsky45e576b2008-05-07 09:22:59 +020019
20static int __init cmma(char *str)
21{
22 char *parm;
Heiko Carstens2ddddf32009-09-11 10:29:01 +020023
Martin Schwidefsky45e576b2008-05-07 09:22:59 +020024 parm = strstrip(str);
25 if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) {
26 cmma_flag = 1;
27 return 1;
28 }
29 cmma_flag = 0;
30 if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0)
31 return 1;
32 return 0;
33}
Martin Schwidefsky45e576b2008-05-07 09:22:59 +020034__setup("cmma=", cmma);
35
36void __init cmma_init(void)
37{
38 register unsigned long tmp asm("0") = 0;
39 register int rc asm("1") = -EOPNOTSUPP;
40
41 if (!cmma_flag)
42 return;
43 asm volatile(
44 " .insn rrf,0xb9ab0000,%1,%1,0,0\n"
45 "0: la %0,0\n"
46 "1:\n"
47 EX_TABLE(0b,1b)
48 : "+&d" (rc), "+&d" (tmp));
49 if (rc)
50 cmma_flag = 0;
51}
52
Heiko Carstens846955c2009-09-22 22:58:44 +020053static inline void set_page_unstable(struct page *page, int order)
Martin Schwidefsky45e576b2008-05-07 09:22:59 +020054{
55 int i, rc;
56
Martin Schwidefsky45e576b2008-05-07 09:22:59 +020057 for (i = 0; i < (1 << order); i++)
58 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
59 : "=&d" (rc)
Heiko Carstens846955c2009-09-22 22:58:44 +020060 : "a" (page_to_phys(page + i)),
Martin Schwidefsky45e576b2008-05-07 09:22:59 +020061 "i" (ESSA_SET_UNUSED));
62}
63
Heiko Carstens846955c2009-09-22 22:58:44 +020064void arch_free_page(struct page *page, int order)
65{
66 if (!cmma_flag)
67 return;
68 set_page_unstable(page, order);
69}
70
71static inline void set_page_stable(struct page *page, int order)
72{
73 int i, rc;
74
75 for (i = 0; i < (1 << order); i++)
76 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
77 : "=&d" (rc)
78 : "a" (page_to_phys(page + i)),
79 "i" (ESSA_SET_STABLE));
80}
81
Martin Schwidefsky45e576b2008-05-07 09:22:59 +020082void arch_alloc_page(struct page *page, int order)
83{
Heiko Carstens846955c2009-09-22 22:58:44 +020084 if (!cmma_flag)
85 return;
86 set_page_stable(page, order);
87}
88
89void arch_set_page_states(int make_stable)
90{
91 unsigned long flags, order, t;
92 struct list_head *l;
93 struct page *page;
94 struct zone *zone;
Martin Schwidefsky45e576b2008-05-07 09:22:59 +020095
96 if (!cmma_flag)
97 return;
Heiko Carstens846955c2009-09-22 22:58:44 +020098 if (make_stable)
99 drain_local_pages(NULL);
100 for_each_populated_zone(zone) {
101 spin_lock_irqsave(&zone->lock, flags);
102 for_each_migratetype_order(order, t) {
103 list_for_each(l, &zone->free_area[order].free_list[t]) {
104 page = list_entry(l, struct page, lru);
105 if (make_stable)
106 set_page_stable(page, order);
107 else
108 set_page_unstable(page, order);
109 }
110 }
111 spin_unlock_irqrestore(&zone->lock, flags);
112 }
Martin Schwidefsky45e576b2008-05-07 09:22:59 +0200113}