Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 1 | /* |
Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 2 | * Copyright IBM Corp. 2008 |
| 3 | * |
| 4 | * Guest page hinting for unused pages. |
| 5 | * |
| 6 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> |
| 7 | */ |
| 8 | |
| 9 | #include <linux/kernel.h> |
| 10 | #include <linux/errno.h> |
| 11 | #include <linux/types.h> |
| 12 | #include <linux/mm.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 13 | #include <linux/gfp.h> |
Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 14 | #include <linux/init.h> |
Michael Holzheu | d7736ff | 2014-01-30 16:14:02 +0100 | [diff] [blame] | 15 | #include <asm/setup.h> |
| 16 | #include <asm/ipl.h> |
Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 17 | |
| 18 | #define ESSA_SET_STABLE 1 |
| 19 | #define ESSA_SET_UNUSED 2 |
| 20 | |
Heiko Carstens | 2ddddf3 | 2009-09-11 10:29:01 +0200 | [diff] [blame] | 21 | static int cmma_flag = 1; |
Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 22 | |
| 23 | static int __init cmma(char *str) |
| 24 | { |
| 25 | char *parm; |
Heiko Carstens | 2ddddf3 | 2009-09-11 10:29:01 +0200 | [diff] [blame] | 26 | |
Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 27 | parm = strstrip(str); |
| 28 | if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) { |
| 29 | cmma_flag = 1; |
| 30 | return 1; |
| 31 | } |
| 32 | cmma_flag = 0; |
| 33 | if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0) |
| 34 | return 1; |
| 35 | return 0; |
| 36 | } |
Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 37 | __setup("cmma=", cmma); |
| 38 | |
| 39 | void __init cmma_init(void) |
| 40 | { |
| 41 | register unsigned long tmp asm("0") = 0; |
| 42 | register int rc asm("1") = -EOPNOTSUPP; |
| 43 | |
| 44 | if (!cmma_flag) |
| 45 | return; |
Michael Holzheu | d7736ff | 2014-01-30 16:14:02 +0100 | [diff] [blame] | 46 | /* |
| 47 | * Disable CMM for dump, otherwise the tprot based memory |
| 48 | * detection can fail because of unstable pages. |
| 49 | */ |
| 50 | if (OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP) { |
| 51 | cmma_flag = 0; |
| 52 | return; |
| 53 | } |
Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 54 | asm volatile( |
| 55 | " .insn rrf,0xb9ab0000,%1,%1,0,0\n" |
| 56 | "0: la %0,0\n" |
| 57 | "1:\n" |
| 58 | EX_TABLE(0b,1b) |
| 59 | : "+&d" (rc), "+&d" (tmp)); |
| 60 | if (rc) |
| 61 | cmma_flag = 0; |
| 62 | } |
| 63 | |
Heiko Carstens | 846955c | 2009-09-22 22:58:44 +0200 | [diff] [blame] | 64 | static inline void set_page_unstable(struct page *page, int order) |
Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 65 | { |
| 66 | int i, rc; |
| 67 | |
Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 68 | for (i = 0; i < (1 << order); i++) |
| 69 | asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" |
| 70 | : "=&d" (rc) |
Heiko Carstens | 846955c | 2009-09-22 22:58:44 +0200 | [diff] [blame] | 71 | : "a" (page_to_phys(page + i)), |
Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 72 | "i" (ESSA_SET_UNUSED)); |
| 73 | } |
| 74 | |
Heiko Carstens | 846955c | 2009-09-22 22:58:44 +0200 | [diff] [blame] | 75 | void arch_free_page(struct page *page, int order) |
| 76 | { |
| 77 | if (!cmma_flag) |
| 78 | return; |
| 79 | set_page_unstable(page, order); |
| 80 | } |
| 81 | |
| 82 | static inline void set_page_stable(struct page *page, int order) |
| 83 | { |
| 84 | int i, rc; |
| 85 | |
| 86 | for (i = 0; i < (1 << order); i++) |
| 87 | asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" |
| 88 | : "=&d" (rc) |
| 89 | : "a" (page_to_phys(page + i)), |
| 90 | "i" (ESSA_SET_STABLE)); |
| 91 | } |
| 92 | |
Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 93 | void arch_alloc_page(struct page *page, int order) |
| 94 | { |
Heiko Carstens | 846955c | 2009-09-22 22:58:44 +0200 | [diff] [blame] | 95 | if (!cmma_flag) |
| 96 | return; |
| 97 | set_page_stable(page, order); |
| 98 | } |
| 99 | |
| 100 | void arch_set_page_states(int make_stable) |
| 101 | { |
| 102 | unsigned long flags, order, t; |
| 103 | struct list_head *l; |
| 104 | struct page *page; |
| 105 | struct zone *zone; |
Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 106 | |
| 107 | if (!cmma_flag) |
| 108 | return; |
Heiko Carstens | 846955c | 2009-09-22 22:58:44 +0200 | [diff] [blame] | 109 | if (make_stable) |
| 110 | drain_local_pages(NULL); |
| 111 | for_each_populated_zone(zone) { |
| 112 | spin_lock_irqsave(&zone->lock, flags); |
| 113 | for_each_migratetype_order(order, t) { |
| 114 | list_for_each(l, &zone->free_area[order].free_list[t]) { |
| 115 | page = list_entry(l, struct page, lru); |
| 116 | if (make_stable) |
| 117 | set_page_stable(page, order); |
| 118 | else |
| 119 | set_page_unstable(page, order); |
| 120 | } |
| 121 | } |
| 122 | spin_unlock_irqrestore(&zone->lock, flags); |
| 123 | } |
Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 124 | } |