blob: c22abf900c9e8a1e5b1db3bb36b9f77530dd0901 [file] [log] [blame]
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01001/*
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01002 * Copyright IBM Corp. 2006
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4 */
5
6#include <linux/bootmem.h>
7#include <linux/pfn.h>
8#include <linux/mm.h>
9#include <linux/module.h>
10#include <linux/list.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +020011#include <linux/hugetlb.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010013#include <asm/pgalloc.h>
14#include <asm/pgtable.h>
15#include <asm/setup.h>
16#include <asm/tlbflush.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +020017#include <asm/sections.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010018
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010019static DEFINE_MUTEX(vmem_mutex);
20
21struct memory_segment {
22 struct list_head list;
23 unsigned long start;
24 unsigned long size;
25};
26
27static LIST_HEAD(mem_segs);
28
Heiko Carstens67060d92008-05-30 10:03:27 +020029static void __ref *vmem_alloc_pages(unsigned int order)
30{
31 if (slab_is_available())
32 return (void *)__get_free_pages(GFP_KERNEL, order);
33 return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
34}
35
36static inline pud_t *vmem_pud_alloc(void)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010037{
38 pud_t *pud = NULL;
39
40#ifdef CONFIG_64BIT
Heiko Carstens67060d92008-05-30 10:03:27 +020041 pud = vmem_alloc_pages(2);
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010042 if (!pud)
43 return NULL;
Heiko Carstens8fc63652008-04-30 13:38:44 +020044 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010045#endif
46 return pud;
47}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020048
Heiko Carstens67060d92008-05-30 10:03:27 +020049static inline pmd_t *vmem_pmd_alloc(void)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010050{
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020051 pmd_t *pmd = NULL;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010052
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020053#ifdef CONFIG_64BIT
Heiko Carstens67060d92008-05-30 10:03:27 +020054 pmd = vmem_alloc_pages(2);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010055 if (!pmd)
56 return NULL;
Heiko Carstens8fc63652008-04-30 13:38:44 +020057 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020058#endif
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010059 return pmd;
60}
61
Martin Schwidefskye5992f22011-07-24 10:48:20 +020062static pte_t __ref *vmem_pte_alloc(unsigned long address)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010063{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010064 pte_t *pte;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010065
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010066 if (slab_is_available())
Martin Schwidefskye5992f22011-07-24 10:48:20 +020067 pte = (pte_t *) page_table_alloc(&init_mm, address);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010068 else
69 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010070 if (!pte)
71 return NULL;
Christian Borntraeger6af7eea2010-04-09 13:43:01 +020072 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
73 PTRS_PER_PTE * sizeof(pte_t));
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010074 return pte;
75}
76
77/*
78 * Add a physical memory range to the 1:1 mapping.
79 */
Heiko Carstens17f34582008-04-30 13:38:47 +020080static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010081{
82 unsigned long address;
83 pgd_t *pg_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020084 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010085 pmd_t *pm_dir;
86 pte_t *pt_dir;
87 pte_t pte;
88 int ret = -ENOMEM;
89
90 for (address = start; address < start + size; address += PAGE_SIZE) {
91 pg_dir = pgd_offset_k(address);
92 if (pgd_none(*pg_dir)) {
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020093 pu_dir = vmem_pud_alloc();
94 if (!pu_dir)
95 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +020096 pgd_populate(&init_mm, pg_dir, pu_dir);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020097 }
98
99 pu_dir = pud_offset(pg_dir, address);
100 if (pud_none(*pu_dir)) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100101 pm_dir = vmem_pmd_alloc();
102 if (!pm_dir)
103 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200104 pud_populate(&init_mm, pu_dir, pm_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100105 }
106
Gerald Schaefer53492b12008-04-30 13:38:46 +0200107 pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200108 pm_dir = pmd_offset(pu_dir, address);
Gerald Schaefer53492b12008-04-30 13:38:46 +0200109
Gerald Schaefer648609e2012-08-21 12:36:34 +0200110#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
Gerald Schaefer53492b12008-04-30 13:38:46 +0200111 if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
112 (address + HPAGE_SIZE <= start + size) &&
113 (address >= HPAGE_SIZE)) {
Christian Borntraeger6af7eea2010-04-09 13:43:01 +0200114 pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200115 pmd_val(*pm_dir) = pte_val(pte);
116 address += HPAGE_SIZE - PAGE_SIZE;
117 continue;
118 }
119#endif
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100120 if (pmd_none(*pm_dir)) {
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200121 pt_dir = vmem_pte_alloc(address);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100122 if (!pt_dir)
123 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200124 pmd_populate(&init_mm, pm_dir, pt_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100125 }
126
127 pt_dir = pte_offset_kernel(pm_dir, address);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100128 *pt_dir = pte;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100129 }
130 ret = 0;
131out:
132 flush_tlb_kernel_range(start, start + size);
133 return ret;
134}
135
136/*
137 * Remove a physical memory range from the 1:1 mapping.
138 * Currently only invalidates page table entries.
139 */
140static void vmem_remove_range(unsigned long start, unsigned long size)
141{
142 unsigned long address;
143 pgd_t *pg_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200144 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100145 pmd_t *pm_dir;
146 pte_t *pt_dir;
147 pte_t pte;
148
149 pte_val(pte) = _PAGE_TYPE_EMPTY;
150 for (address = start; address < start + size; address += PAGE_SIZE) {
151 pg_dir = pgd_offset_k(address);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200152 pu_dir = pud_offset(pg_dir, address);
153 if (pud_none(*pu_dir))
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100154 continue;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200155 pm_dir = pmd_offset(pu_dir, address);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100156 if (pmd_none(*pm_dir))
157 continue;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200158
159 if (pmd_huge(*pm_dir)) {
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200160 pmd_clear(pm_dir);
Gerald Schaefer53492b12008-04-30 13:38:46 +0200161 address += HPAGE_SIZE - PAGE_SIZE;
162 continue;
163 }
164
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100165 pt_dir = pte_offset_kernel(pm_dir, address);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100166 *pt_dir = pte;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100167 }
168 flush_tlb_kernel_range(start, start + size);
169}
170
171/*
172 * Add a backed mem_map array to the virtual mem_map array.
173 */
Heiko Carstens17f34582008-04-30 13:38:47 +0200174int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100175{
176 unsigned long address, start_addr, end_addr;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100177 pgd_t *pg_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200178 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100179 pmd_t *pm_dir;
180 pte_t *pt_dir;
181 pte_t pte;
182 int ret = -ENOMEM;
183
Heiko Carstens17f34582008-04-30 13:38:47 +0200184 start_addr = (unsigned long) start;
185 end_addr = (unsigned long) (start + nr);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100186
187 for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
188 pg_dir = pgd_offset_k(address);
189 if (pgd_none(*pg_dir)) {
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200190 pu_dir = vmem_pud_alloc();
191 if (!pu_dir)
192 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200193 pgd_populate(&init_mm, pg_dir, pu_dir);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200194 }
195
196 pu_dir = pud_offset(pg_dir, address);
197 if (pud_none(*pu_dir)) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100198 pm_dir = vmem_pmd_alloc();
199 if (!pm_dir)
200 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200201 pud_populate(&init_mm, pu_dir, pm_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100202 }
203
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200204 pm_dir = pmd_offset(pu_dir, address);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100205 if (pmd_none(*pm_dir)) {
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200206 pt_dir = vmem_pte_alloc(address);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100207 if (!pt_dir)
208 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200209 pmd_populate(&init_mm, pm_dir, pt_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100210 }
211
212 pt_dir = pte_offset_kernel(pm_dir, address);
213 if (pte_none(*pt_dir)) {
214 unsigned long new_page;
215
Heiko Carstens67060d92008-05-30 10:03:27 +0200216 new_page =__pa(vmem_alloc_pages(0));
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100217 if (!new_page)
218 goto out;
219 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100220 *pt_dir = pte;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100221 }
222 }
Heiko Carstens67060d92008-05-30 10:03:27 +0200223 memset(start, 0, nr * sizeof(struct page));
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100224 ret = 0;
225out:
226 flush_tlb_kernel_range(start_addr, end_addr);
227 return ret;
228}
229
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100230/*
231 * Add memory segment to the segment list if it doesn't overlap with
232 * an already present segment.
233 */
234static int insert_memory_segment(struct memory_segment *seg)
235{
236 struct memory_segment *tmp;
237
Heiko Carstensee0ddad2008-06-10 10:03:20 +0200238 if (seg->start + seg->size > VMEM_MAX_PHYS ||
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100239 seg->start + seg->size < seg->start)
240 return -ERANGE;
241
242 list_for_each_entry(tmp, &mem_segs, list) {
243 if (seg->start >= tmp->start + tmp->size)
244 continue;
245 if (seg->start + seg->size <= tmp->start)
246 continue;
247 return -ENOSPC;
248 }
249 list_add(&seg->list, &mem_segs);
250 return 0;
251}
252
253/*
254 * Remove memory segment from the segment list.
255 */
256static void remove_memory_segment(struct memory_segment *seg)
257{
258 list_del(&seg->list);
259}
260
261static void __remove_shared_memory(struct memory_segment *seg)
262{
263 remove_memory_segment(seg);
264 vmem_remove_range(seg->start, seg->size);
265}
266
Heiko Carstens17f34582008-04-30 13:38:47 +0200267int vmem_remove_mapping(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100268{
269 struct memory_segment *seg;
270 int ret;
271
272 mutex_lock(&vmem_mutex);
273
274 ret = -ENOENT;
275 list_for_each_entry(seg, &mem_segs, list) {
276 if (seg->start == start && seg->size == size)
277 break;
278 }
279
280 if (seg->start != start || seg->size != size)
281 goto out;
282
283 ret = 0;
284 __remove_shared_memory(seg);
285 kfree(seg);
286out:
287 mutex_unlock(&vmem_mutex);
288 return ret;
289}
290
Heiko Carstens17f34582008-04-30 13:38:47 +0200291int vmem_add_mapping(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100292{
293 struct memory_segment *seg;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100294 int ret;
295
296 mutex_lock(&vmem_mutex);
297 ret = -ENOMEM;
298 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
299 if (!seg)
300 goto out;
301 seg->start = start;
302 seg->size = size;
303
304 ret = insert_memory_segment(seg);
305 if (ret)
306 goto out_free;
307
Gerald Schaefer53492b12008-04-30 13:38:46 +0200308 ret = vmem_add_mem(start, size, 0);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100309 if (ret)
310 goto out_remove;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100311 goto out;
312
313out_remove:
314 __remove_shared_memory(seg);
315out_free:
316 kfree(seg);
317out:
318 mutex_unlock(&vmem_mutex);
319 return ret;
320}
321
322/*
323 * map whole physical memory to virtual memory (identity mapping)
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +0100324 * we reserve enough space in the vmalloc area for vmemmap to hotplug
325 * additional memory segments.
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100326 */
327void __init vmem_map_init(void)
328{
Gerald Schaefer53492b12008-04-30 13:38:46 +0200329 unsigned long ro_start, ro_end;
330 unsigned long start, end;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100331 int i;
332
Gerald Schaefer53492b12008-04-30 13:38:46 +0200333 ro_start = ((unsigned long)&_stext) & PAGE_MASK;
334 ro_end = PFN_ALIGN((unsigned long)&_eshared);
335 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
Michael Holzheu60a0c682011-10-30 15:16:40 +0100336 if (memory_chunk[i].type == CHUNK_CRASHK ||
337 memory_chunk[i].type == CHUNK_OLDMEM)
338 continue;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200339 start = memory_chunk[i].addr;
340 end = memory_chunk[i].addr + memory_chunk[i].size;
341 if (start >= ro_end || end <= ro_start)
342 vmem_add_mem(start, end - start, 0);
343 else if (start >= ro_start && end <= ro_end)
344 vmem_add_mem(start, end - start, 1);
345 else if (start >= ro_start) {
346 vmem_add_mem(start, ro_end - start, 1);
347 vmem_add_mem(ro_end, end - ro_end, 0);
348 } else if (end < ro_end) {
349 vmem_add_mem(start, ro_start - start, 0);
350 vmem_add_mem(ro_start, end - ro_start, 1);
351 } else {
352 vmem_add_mem(start, ro_start - start, 0);
353 vmem_add_mem(ro_start, ro_end - ro_start, 1);
354 vmem_add_mem(ro_end, end - ro_end, 0);
355 }
356 }
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100357}
358
359/*
360 * Convert memory chunk array to a memory segment list so there is a single
361 * list that contains both r/w memory and shared memory segments.
362 */
363static int __init vmem_convert_memory_chunk(void)
364{
365 struct memory_segment *seg;
366 int i;
367
368 mutex_lock(&vmem_mutex);
Heiko Carstens9f4b0ba2008-01-26 14:11:02 +0100369 for (i = 0; i < MEMORY_CHUNKS; i++) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100370 if (!memory_chunk[i].size)
371 continue;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100372 if (memory_chunk[i].type == CHUNK_CRASHK ||
373 memory_chunk[i].type == CHUNK_OLDMEM)
374 continue;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100375 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
376 if (!seg)
377 panic("Out of memory...\n");
378 seg->start = memory_chunk[i].addr;
379 seg->size = memory_chunk[i].size;
380 insert_memory_segment(seg);
381 }
382 mutex_unlock(&vmem_mutex);
383 return 0;
384}
385
386core_initcall(vmem_convert_memory_chunk);