Heiko Carstens | 23d1742 | 2008-07-14 09:59:21 +0200 | [diff] [blame] | 1 | /* |
Hans-Joachim Picht | 155af2f | 2009-06-16 10:30:52 +0200 | [diff] [blame] | 2 | * Copyright IBM Corp. 2008, 2009 |
| 3 | * |
| 4 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> |
Heiko Carstens | 23d1742 | 2008-07-14 09:59:21 +0200 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #include <linux/kernel.h> |
| 8 | #include <linux/module.h> |
| 9 | #include <asm/ipl.h> |
| 10 | #include <asm/sclp.h> |
| 11 | #include <asm/setup.h> |
| 12 | |
Heiko Carstens | 23d1742 | 2008-07-14 09:59:21 +0200 | [diff] [blame] | 13 | #define ADDR2G (1ULL << 31) |
| 14 | |
Heiko Carstens | df1bd59 | 2013-04-30 10:34:04 +0200 | [diff] [blame] | 15 | static void find_memory_chunks(struct mem_chunk chunk[], unsigned long maxsize) |
Heiko Carstens | 23d1742 | 2008-07-14 09:59:21 +0200 | [diff] [blame] | 16 | { |
| 17 | unsigned long long memsize, rnmax, rzm; |
| 18 | unsigned long addr = 0, size; |
| 19 | int i = 0, type; |
| 20 | |
| 21 | rzm = sclp_get_rzm(); |
| 22 | rnmax = sclp_get_rnmax(); |
| 23 | memsize = rzm * rnmax; |
| 24 | if (!rzm) |
| 25 | rzm = 1ULL << 17; |
| 26 | if (sizeof(long) == 4) { |
| 27 | rzm = min(ADDR2G, rzm); |
| 28 | memsize = memsize ? min(ADDR2G, memsize) : ADDR2G; |
| 29 | } |
Heiko Carstens | df1bd59 | 2013-04-30 10:34:04 +0200 | [diff] [blame] | 30 | if (maxsize) |
| 31 | memsize = memsize ? min((unsigned long)memsize, maxsize) : maxsize; |
Heiko Carstens | 23d1742 | 2008-07-14 09:59:21 +0200 | [diff] [blame] | 32 | do { |
| 33 | size = 0; |
| 34 | type = tprot(addr); |
| 35 | do { |
| 36 | size += rzm; |
| 37 | if (memsize && addr + size >= memsize) |
| 38 | break; |
| 39 | } while (type == tprot(addr + size)); |
| 40 | if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) { |
Heiko Carstens | df1bd59 | 2013-04-30 10:34:04 +0200 | [diff] [blame] | 41 | if (memsize && (addr + size > memsize)) |
| 42 | size = memsize - addr; |
Heiko Carstens | 23d1742 | 2008-07-14 09:59:21 +0200 | [diff] [blame] | 43 | chunk[i].addr = addr; |
| 44 | chunk[i].size = size; |
| 45 | chunk[i].type = type; |
| 46 | i++; |
| 47 | } |
| 48 | addr += size; |
| 49 | } while (addr < memsize && i < MEMORY_CHUNKS); |
| 50 | } |
| 51 | |
Heiko Carstens | df1bd59 | 2013-04-30 10:34:04 +0200 | [diff] [blame] | 52 | /** |
| 53 | * detect_memory_layout - fill mem_chunk array with memory layout data |
| 54 | * @chunk: mem_chunk array to be filled |
| 55 | * @maxsize: maximum address where memory detection should stop |
| 56 | * |
| 57 | * Fills the passed in memory chunk array with the memory layout of the |
| 58 | * machine. The array must have a size of at least MEMORY_CHUNKS and will |
| 59 | * be fully initialized afterwards. |
| 60 | * If the maxsize paramater has a value > 0 memory detection will stop at |
| 61 | * that address. It is guaranteed that all chunks have an ending address |
| 62 | * that is smaller than maxsize. |
| 63 | * If maxsize is 0 all memory will be detected. |
| 64 | */ |
| 65 | void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize) |
Heiko Carstens | 23d1742 | 2008-07-14 09:59:21 +0200 | [diff] [blame] | 66 | { |
Heiko Carstens | d009f4d | 2013-04-27 12:42:18 +0200 | [diff] [blame] | 67 | unsigned long flags, flags_dat, cr0; |
Heiko Carstens | 23d1742 | 2008-07-14 09:59:21 +0200 | [diff] [blame] | 68 | |
| 69 | memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk)); |
Heiko Carstens | 0a69406 | 2013-04-27 13:07:17 +0200 | [diff] [blame] | 70 | /* |
| 71 | * Disable IRQs, DAT and low address protection so tprot does the |
Heiko Carstens | 23d1742 | 2008-07-14 09:59:21 +0200 | [diff] [blame] | 72 | * right thing and we don't get scheduled away with low address |
| 73 | * protection disabled. |
| 74 | */ |
Heiko Carstens | d009f4d | 2013-04-27 12:42:18 +0200 | [diff] [blame] | 75 | local_irq_save(flags); |
| 76 | flags_dat = __arch_local_irq_stnsm(0xfb); |
Heiko Carstens | 0a69406 | 2013-04-27 13:07:17 +0200 | [diff] [blame] | 77 | /* |
| 78 | * In case DAT was enabled, make sure chunk doesn't reside in vmalloc |
| 79 | * space. We have disabled DAT and any access to vmalloc area will |
| 80 | * cause an exception. |
| 81 | * If DAT was disabled we are called from early ipl code. |
| 82 | */ |
| 83 | if (test_bit(5, &flags_dat)) { |
| 84 | if (WARN_ON_ONCE(is_vmalloc_or_module_addr(chunk))) |
| 85 | goto out; |
| 86 | } |
Heiko Carstens | 23d1742 | 2008-07-14 09:59:21 +0200 | [diff] [blame] | 87 | __ctl_store(cr0, 0, 0); |
| 88 | __ctl_clear_bit(0, 28); |
Heiko Carstens | df1bd59 | 2013-04-30 10:34:04 +0200 | [diff] [blame] | 89 | find_memory_chunks(chunk, maxsize); |
Heiko Carstens | 23d1742 | 2008-07-14 09:59:21 +0200 | [diff] [blame] | 90 | __ctl_load(cr0, 0, 0); |
Heiko Carstens | 0a69406 | 2013-04-27 13:07:17 +0200 | [diff] [blame] | 91 | out: |
Heiko Carstens | d009f4d | 2013-04-27 12:42:18 +0200 | [diff] [blame] | 92 | __arch_local_irq_ssm(flags_dat); |
| 93 | local_irq_restore(flags); |
Heiko Carstens | 23d1742 | 2008-07-14 09:59:21 +0200 | [diff] [blame] | 94 | } |
| 95 | EXPORT_SYMBOL(detect_memory_layout); |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 96 | |
| 97 | /* |
Heiko Carstens | 996b4a7 | 2013-04-30 19:19:58 +0200 | [diff] [blame] | 98 | * Create memory hole with given address and size. |
Michael Holzheu | 44e5ddc | 2011-12-27 11:27:05 +0100 | [diff] [blame] | 99 | */ |
Heiko Carstens | 996b4a7 | 2013-04-30 19:19:58 +0200 | [diff] [blame] | 100 | void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, |
| 101 | unsigned long size) |
Michael Holzheu | 44e5ddc | 2011-12-27 11:27:05 +0100 | [diff] [blame] | 102 | { |
Heiko Carstens | 996b4a7 | 2013-04-30 19:19:58 +0200 | [diff] [blame] | 103 | int i; |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 104 | |
| 105 | for (i = 0; i < MEMORY_CHUNKS; i++) { |
Heiko Carstens | 996b4a7 | 2013-04-30 19:19:58 +0200 | [diff] [blame] | 106 | struct mem_chunk *chunk = &mem_chunk[i]; |
| 107 | |
| 108 | if (chunk->size == 0) |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 109 | continue; |
Heiko Carstens | 996b4a7 | 2013-04-30 19:19:58 +0200 | [diff] [blame] | 110 | if (addr > chunk->addr + chunk->size) |
| 111 | continue; |
| 112 | if (addr + size <= chunk->addr) |
| 113 | continue; |
| 114 | /* Split */ |
| 115 | if ((addr > chunk->addr) && |
| 116 | (addr + size < chunk->addr + chunk->size)) { |
| 117 | struct mem_chunk *new = chunk + 1; |
Michael Holzheu | 44e5ddc | 2011-12-27 11:27:05 +0100 | [diff] [blame] | 118 | |
Heiko Carstens | 996b4a7 | 2013-04-30 19:19:58 +0200 | [diff] [blame] | 119 | memmove(new, chunk, (MEMORY_CHUNKS-i-1) * sizeof(*new)); |
| 120 | new->addr = addr + size; |
| 121 | new->size = chunk->addr + chunk->size - new->addr; |
| 122 | chunk->size = addr - chunk->addr; |
| 123 | continue; |
| 124 | } else if ((addr <= chunk->addr) && |
| 125 | (addr + size >= chunk->addr + chunk->size)) { |
Heiko Carstens | 35b03ae | 2013-06-19 09:16:54 +0200 | [diff] [blame^] | 126 | memmove(chunk, chunk + 1, (MEMORY_CHUNKS-i-1) * sizeof(*chunk)); |
| 127 | memset(&mem_chunk[MEMORY_CHUNKS-1], 0, sizeof(*chunk)); |
Heiko Carstens | 996b4a7 | 2013-04-30 19:19:58 +0200 | [diff] [blame] | 128 | } else if (addr + size < chunk->addr + chunk->size) { |
| 129 | chunk->size = chunk->addr + chunk->size - addr - size; |
| 130 | chunk->addr = addr + size; |
| 131 | } else if (addr > chunk->addr) { |
| 132 | chunk->size = addr - chunk->addr; |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 133 | } |
| 134 | } |
| 135 | } |