blob: cca388253a39f5636af7e10f0d416589abca59df [file] [log] [blame]
Heiko Carstens23d17422008-07-14 09:59:21 +02001/*
Hans-Joachim Picht155af2f2009-06-16 10:30:52 +02002 * Copyright IBM Corp. 2008, 2009
3 *
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
Heiko Carstens23d17422008-07-14 09:59:21 +02005 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <asm/ipl.h>
10#include <asm/sclp.h>
11#include <asm/setup.h>
12
Heiko Carstens23d17422008-07-14 09:59:21 +020013#define ADDR2G (1ULL << 31)
14
Heiko Carstensdf1bd592013-04-30 10:34:04 +020015static void find_memory_chunks(struct mem_chunk chunk[], unsigned long maxsize)
Heiko Carstens23d17422008-07-14 09:59:21 +020016{
17 unsigned long long memsize, rnmax, rzm;
18 unsigned long addr = 0, size;
19 int i = 0, type;
20
21 rzm = sclp_get_rzm();
22 rnmax = sclp_get_rnmax();
23 memsize = rzm * rnmax;
24 if (!rzm)
25 rzm = 1ULL << 17;
26 if (sizeof(long) == 4) {
27 rzm = min(ADDR2G, rzm);
28 memsize = memsize ? min(ADDR2G, memsize) : ADDR2G;
29 }
Heiko Carstensdf1bd592013-04-30 10:34:04 +020030 if (maxsize)
31 memsize = memsize ? min((unsigned long)memsize, maxsize) : maxsize;
Heiko Carstens23d17422008-07-14 09:59:21 +020032 do {
33 size = 0;
34 type = tprot(addr);
35 do {
36 size += rzm;
37 if (memsize && addr + size >= memsize)
38 break;
39 } while (type == tprot(addr + size));
40 if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
Heiko Carstensdf1bd592013-04-30 10:34:04 +020041 if (memsize && (addr + size > memsize))
42 size = memsize - addr;
Heiko Carstens23d17422008-07-14 09:59:21 +020043 chunk[i].addr = addr;
44 chunk[i].size = size;
45 chunk[i].type = type;
46 i++;
47 }
48 addr += size;
49 } while (addr < memsize && i < MEMORY_CHUNKS);
50}
51
Heiko Carstensdf1bd592013-04-30 10:34:04 +020052/**
53 * detect_memory_layout - fill mem_chunk array with memory layout data
54 * @chunk: mem_chunk array to be filled
55 * @maxsize: maximum address where memory detection should stop
56 *
57 * Fills the passed in memory chunk array with the memory layout of the
58 * machine. The array must have a size of at least MEMORY_CHUNKS and will
59 * be fully initialized afterwards.
60 * If the maxsize paramater has a value > 0 memory detection will stop at
61 * that address. It is guaranteed that all chunks have an ending address
62 * that is smaller than maxsize.
63 * If maxsize is 0 all memory will be detected.
64 */
65void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize)
Heiko Carstens23d17422008-07-14 09:59:21 +020066{
Heiko Carstensd009f4d2013-04-27 12:42:18 +020067 unsigned long flags, flags_dat, cr0;
Heiko Carstens23d17422008-07-14 09:59:21 +020068
69 memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk));
Heiko Carstens0a694062013-04-27 13:07:17 +020070 /*
71 * Disable IRQs, DAT and low address protection so tprot does the
Heiko Carstens23d17422008-07-14 09:59:21 +020072 * right thing and we don't get scheduled away with low address
73 * protection disabled.
74 */
Heiko Carstensd009f4d2013-04-27 12:42:18 +020075 local_irq_save(flags);
76 flags_dat = __arch_local_irq_stnsm(0xfb);
Heiko Carstens0a694062013-04-27 13:07:17 +020077 /*
78 * In case DAT was enabled, make sure chunk doesn't reside in vmalloc
79 * space. We have disabled DAT and any access to vmalloc area will
80 * cause an exception.
81 * If DAT was disabled we are called from early ipl code.
82 */
83 if (test_bit(5, &flags_dat)) {
84 if (WARN_ON_ONCE(is_vmalloc_or_module_addr(chunk)))
85 goto out;
86 }
Heiko Carstens23d17422008-07-14 09:59:21 +020087 __ctl_store(cr0, 0, 0);
88 __ctl_clear_bit(0, 28);
Heiko Carstensdf1bd592013-04-30 10:34:04 +020089 find_memory_chunks(chunk, maxsize);
Heiko Carstens23d17422008-07-14 09:59:21 +020090 __ctl_load(cr0, 0, 0);
Heiko Carstens0a694062013-04-27 13:07:17 +020091out:
Heiko Carstensd009f4d2013-04-27 12:42:18 +020092 __arch_local_irq_ssm(flags_dat);
93 local_irq_restore(flags);
Heiko Carstens23d17422008-07-14 09:59:21 +020094}
95EXPORT_SYMBOL(detect_memory_layout);
Michael Holzheu60a0c682011-10-30 15:16:40 +010096
97/*
Heiko Carstens996b4a72013-04-30 19:19:58 +020098 * Create memory hole with given address and size.
Michael Holzheu44e5ddc2011-12-27 11:27:05 +010099 */
Heiko Carstens996b4a72013-04-30 19:19:58 +0200100void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
101 unsigned long size)
Michael Holzheu44e5ddc2011-12-27 11:27:05 +0100102{
Heiko Carstens996b4a72013-04-30 19:19:58 +0200103 int i;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100104
105 for (i = 0; i < MEMORY_CHUNKS; i++) {
Heiko Carstens996b4a72013-04-30 19:19:58 +0200106 struct mem_chunk *chunk = &mem_chunk[i];
107
108 if (chunk->size == 0)
Michael Holzheu60a0c682011-10-30 15:16:40 +0100109 continue;
Heiko Carstens996b4a72013-04-30 19:19:58 +0200110 if (addr > chunk->addr + chunk->size)
111 continue;
112 if (addr + size <= chunk->addr)
113 continue;
114 /* Split */
115 if ((addr > chunk->addr) &&
116 (addr + size < chunk->addr + chunk->size)) {
117 struct mem_chunk *new = chunk + 1;
Michael Holzheu44e5ddc2011-12-27 11:27:05 +0100118
Heiko Carstens996b4a72013-04-30 19:19:58 +0200119 memmove(new, chunk, (MEMORY_CHUNKS-i-1) * sizeof(*new));
120 new->addr = addr + size;
121 new->size = chunk->addr + chunk->size - new->addr;
122 chunk->size = addr - chunk->addr;
123 continue;
124 } else if ((addr <= chunk->addr) &&
125 (addr + size >= chunk->addr + chunk->size)) {
Heiko Carstens35b03ae2013-06-19 09:16:54 +0200126 memmove(chunk, chunk + 1, (MEMORY_CHUNKS-i-1) * sizeof(*chunk));
127 memset(&mem_chunk[MEMORY_CHUNKS-1], 0, sizeof(*chunk));
Heiko Carstens996b4a72013-04-30 19:19:58 +0200128 } else if (addr + size < chunk->addr + chunk->size) {
129 chunk->size = chunk->addr + chunk->size - addr - size;
130 chunk->addr = addr + size;
131 } else if (addr > chunk->addr) {
132 chunk->size = addr - chunk->addr;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100133 }
134 }
135}