blob: b1593c2f751a89aa71675f1375d412d6736a6673 [file] [log] [blame]
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01001/*
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01002 * Copyright IBM Corp. 2006
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4 */
5
6#include <linux/bootmem.h>
7#include <linux/pfn.h>
8#include <linux/mm.h>
9#include <linux/module.h>
10#include <linux/list.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +020011#include <linux/hugetlb.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Philipp Hachtmann50be6342014-01-29 18:16:01 +010013#include <linux/memblock.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010014#include <asm/pgalloc.h>
15#include <asm/pgtable.h>
16#include <asm/setup.h>
17#include <asm/tlbflush.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +020018#include <asm/sections.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010019
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010020static DEFINE_MUTEX(vmem_mutex);
21
22struct memory_segment {
23 struct list_head list;
24 unsigned long start;
25 unsigned long size;
26};
27
28static LIST_HEAD(mem_segs);
29
Heiko Carstens67060d92008-05-30 10:03:27 +020030static void __ref *vmem_alloc_pages(unsigned int order)
31{
32 if (slab_is_available())
33 return (void *)__get_free_pages(GFP_KERNEL, order);
34 return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
35}
36
37static inline pud_t *vmem_pud_alloc(void)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010038{
39 pud_t *pud = NULL;
40
41#ifdef CONFIG_64BIT
Heiko Carstens67060d92008-05-30 10:03:27 +020042 pud = vmem_alloc_pages(2);
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010043 if (!pud)
44 return NULL;
Heiko Carstens8fc63652008-04-30 13:38:44 +020045 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010046#endif
47 return pud;
48}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020049
Heiko Carstens67060d92008-05-30 10:03:27 +020050static inline pmd_t *vmem_pmd_alloc(void)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010051{
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020052 pmd_t *pmd = NULL;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010053
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020054#ifdef CONFIG_64BIT
Heiko Carstens67060d92008-05-30 10:03:27 +020055 pmd = vmem_alloc_pages(2);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010056 if (!pmd)
57 return NULL;
Heiko Carstens8fc63652008-04-30 13:38:44 +020058 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020059#endif
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010060 return pmd;
61}
62
Martin Schwidefskye5992f22011-07-24 10:48:20 +020063static pte_t __ref *vmem_pte_alloc(unsigned long address)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010064{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010065 pte_t *pte;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010066
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010067 if (slab_is_available())
Martin Schwidefsky527e30b2014-04-30 16:04:25 +020068 pte = (pte_t *) page_table_alloc(&init_mm);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010069 else
Philipp Hachtmann50be6342014-01-29 18:16:01 +010070 pte = alloc_bootmem_align(PTRS_PER_PTE * sizeof(pte_t),
71 PTRS_PER_PTE * sizeof(pte_t));
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010072 if (!pte)
73 return NULL;
Martin Schwidefskye5098612013-07-23 20:57:57 +020074 clear_table((unsigned long *) pte, _PAGE_INVALID,
Christian Borntraeger6af7eea2010-04-09 13:43:01 +020075 PTRS_PER_PTE * sizeof(pte_t));
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010076 return pte;
77}
78
79/*
80 * Add a physical memory range to the 1:1 mapping.
81 */
Heiko Carstens17f34582008-04-30 13:38:47 +020082static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010083{
Heiko Carstens378b1e72012-10-01 12:58:34 +020084 unsigned long end = start + size;
85 unsigned long address = start;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010086 pgd_t *pg_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020087 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010088 pmd_t *pm_dir;
89 pte_t *pt_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010090 int ret = -ENOMEM;
91
Heiko Carstens378b1e72012-10-01 12:58:34 +020092 while (address < end) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010093 pg_dir = pgd_offset_k(address);
94 if (pgd_none(*pg_dir)) {
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020095 pu_dir = vmem_pud_alloc();
96 if (!pu_dir)
97 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +020098 pgd_populate(&init_mm, pg_dir, pu_dir);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020099 }
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200100 pu_dir = pud_offset(pg_dir, address);
Heiko Carstens18da2362012-10-08 09:18:26 +0200101#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
102 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
103 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100104 pud_val(*pu_dir) = __pa(address) |
105 _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE |
Martin Schwidefskye5098612013-07-23 20:57:57 +0200106 (ro ? _REGION_ENTRY_PROTECT : 0);
Heiko Carstens18da2362012-10-08 09:18:26 +0200107 address += PUD_SIZE;
108 continue;
109 }
110#endif
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200111 if (pud_none(*pu_dir)) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100112 pm_dir = vmem_pmd_alloc();
113 if (!pm_dir)
114 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200115 pud_populate(&init_mm, pu_dir, pm_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100116 }
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200117 pm_dir = pmd_offset(pu_dir, address);
Gerald Schaefer648609e2012-08-21 12:36:34 +0200118#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200119 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
120 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100121 pmd_val(*pm_dir) = __pa(address) |
122 _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200123 _SEGMENT_ENTRY_YOUNG |
Martin Schwidefskye5098612013-07-23 20:57:57 +0200124 (ro ? _SEGMENT_ENTRY_PROTECT : 0);
Heiko Carstens378b1e72012-10-01 12:58:34 +0200125 address += PMD_SIZE;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200126 continue;
127 }
128#endif
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100129 if (pmd_none(*pm_dir)) {
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200130 pt_dir = vmem_pte_alloc(address);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100131 if (!pt_dir)
132 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200133 pmd_populate(&init_mm, pm_dir, pt_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100134 }
135
136 pt_dir = pte_offset_kernel(pm_dir, address);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200137 pte_val(*pt_dir) = __pa(address) |
138 pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
Heiko Carstens378b1e72012-10-01 12:58:34 +0200139 address += PAGE_SIZE;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100140 }
141 ret = 0;
142out:
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100143 return ret;
144}
145
146/*
147 * Remove a physical memory range from the 1:1 mapping.
148 * Currently only invalidates page table entries.
149 */
150static void vmem_remove_range(unsigned long start, unsigned long size)
151{
Heiko Carstens378b1e72012-10-01 12:58:34 +0200152 unsigned long end = start + size;
153 unsigned long address = start;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100154 pgd_t *pg_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200155 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100156 pmd_t *pm_dir;
157 pte_t *pt_dir;
158 pte_t pte;
159
Martin Schwidefskye5098612013-07-23 20:57:57 +0200160 pte_val(pte) = _PAGE_INVALID;
Heiko Carstens378b1e72012-10-01 12:58:34 +0200161 while (address < end) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100162 pg_dir = pgd_offset_k(address);
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200163 if (pgd_none(*pg_dir)) {
164 address += PGDIR_SIZE;
165 continue;
166 }
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200167 pu_dir = pud_offset(pg_dir, address);
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200168 if (pud_none(*pu_dir)) {
169 address += PUD_SIZE;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100170 continue;
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200171 }
Heiko Carstens18da2362012-10-08 09:18:26 +0200172 if (pud_large(*pu_dir)) {
173 pud_clear(pu_dir);
174 address += PUD_SIZE;
175 continue;
176 }
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200177 pm_dir = pmd_offset(pu_dir, address);
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200178 if (pmd_none(*pm_dir)) {
179 address += PMD_SIZE;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100180 continue;
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200181 }
Heiko Carstens378b1e72012-10-01 12:58:34 +0200182 if (pmd_large(*pm_dir)) {
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200183 pmd_clear(pm_dir);
Heiko Carstens378b1e72012-10-01 12:58:34 +0200184 address += PMD_SIZE;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200185 continue;
186 }
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100187 pt_dir = pte_offset_kernel(pm_dir, address);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100188 *pt_dir = pte;
Heiko Carstens378b1e72012-10-01 12:58:34 +0200189 address += PAGE_SIZE;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100190 }
Heiko Carstens378b1e72012-10-01 12:58:34 +0200191 flush_tlb_kernel_range(start, end);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100192}
193
194/*
195 * Add a backed mem_map array to the virtual mem_map array.
196 */
Johannes Weiner0aad8182013-04-29 15:07:50 -0700197int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100198{
Johannes Weiner0aad8182013-04-29 15:07:50 -0700199 unsigned long address = start;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100200 pgd_t *pg_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200201 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100202 pmd_t *pm_dir;
203 pte_t *pt_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100204 int ret = -ENOMEM;
205
Johannes Weiner0aad8182013-04-29 15:07:50 -0700206 for (address = start; address < end;) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100207 pg_dir = pgd_offset_k(address);
208 if (pgd_none(*pg_dir)) {
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200209 pu_dir = vmem_pud_alloc();
210 if (!pu_dir)
211 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200212 pgd_populate(&init_mm, pg_dir, pu_dir);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200213 }
214
215 pu_dir = pud_offset(pg_dir, address);
216 if (pud_none(*pu_dir)) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100217 pm_dir = vmem_pmd_alloc();
218 if (!pm_dir)
219 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200220 pud_populate(&init_mm, pu_dir, pm_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100221 }
222
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200223 pm_dir = pmd_offset(pu_dir, address);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100224 if (pmd_none(*pm_dir)) {
Heiko Carstensf7817962012-10-17 12:18:05 +0200225#ifdef CONFIG_64BIT
226 /* Use 1MB frames for vmemmap if available. We always
227 * use large frames even if they are only partially
228 * used.
229 * Otherwise we would have also page tables since
230 * vmemmap_populate gets called for each section
231 * separately. */
232 if (MACHINE_HAS_EDAT1) {
233 void *new_page;
234
235 new_page = vmemmap_alloc_block(PMD_SIZE, node);
236 if (!new_page)
237 goto out;
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100238 pmd_val(*pm_dir) = __pa(new_page) |
Heiko Carstens6a5c1482014-09-22 08:50:51 +0200239 _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE;
Heiko Carstensf7817962012-10-17 12:18:05 +0200240 address = (address + PMD_SIZE) & PMD_MASK;
241 continue;
242 }
243#endif
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200244 pt_dir = vmem_pte_alloc(address);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100245 if (!pt_dir)
246 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200247 pmd_populate(&init_mm, pm_dir, pt_dir);
Heiko Carstensf7817962012-10-17 12:18:05 +0200248 } else if (pmd_large(*pm_dir)) {
249 address = (address + PMD_SIZE) & PMD_MASK;
250 continue;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100251 }
252
253 pt_dir = pte_offset_kernel(pm_dir, address);
254 if (pte_none(*pt_dir)) {
Heiko Carstens70c9d292014-09-20 11:12:08 +0200255 void *new_page;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100256
Heiko Carstens70c9d292014-09-20 11:12:08 +0200257 new_page = vmemmap_alloc_block(PAGE_SIZE, node);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100258 if (!new_page)
259 goto out;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200260 pte_val(*pt_dir) =
261 __pa(new_page) | pgprot_val(PAGE_KERNEL);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100262 }
Heiko Carstensf7817962012-10-17 12:18:05 +0200263 address += PAGE_SIZE;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100264 }
265 ret = 0;
266out:
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100267 return ret;
268}
269
Johannes Weiner0aad8182013-04-29 15:07:50 -0700270void vmemmap_free(unsigned long start, unsigned long end)
Tang Chen01975182013-02-22 16:33:08 -0800271{
272}
273
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100274/*
275 * Add memory segment to the segment list if it doesn't overlap with
276 * an already present segment.
277 */
278static int insert_memory_segment(struct memory_segment *seg)
279{
280 struct memory_segment *tmp;
281
Heiko Carstensee0ddad2008-06-10 10:03:20 +0200282 if (seg->start + seg->size > VMEM_MAX_PHYS ||
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100283 seg->start + seg->size < seg->start)
284 return -ERANGE;
285
286 list_for_each_entry(tmp, &mem_segs, list) {
287 if (seg->start >= tmp->start + tmp->size)
288 continue;
289 if (seg->start + seg->size <= tmp->start)
290 continue;
291 return -ENOSPC;
292 }
293 list_add(&seg->list, &mem_segs);
294 return 0;
295}
296
297/*
298 * Remove memory segment from the segment list.
299 */
300static void remove_memory_segment(struct memory_segment *seg)
301{
302 list_del(&seg->list);
303}
304
305static void __remove_shared_memory(struct memory_segment *seg)
306{
307 remove_memory_segment(seg);
308 vmem_remove_range(seg->start, seg->size);
309}
310
Heiko Carstens17f34582008-04-30 13:38:47 +0200311int vmem_remove_mapping(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100312{
313 struct memory_segment *seg;
314 int ret;
315
316 mutex_lock(&vmem_mutex);
317
318 ret = -ENOENT;
319 list_for_each_entry(seg, &mem_segs, list) {
320 if (seg->start == start && seg->size == size)
321 break;
322 }
323
324 if (seg->start != start || seg->size != size)
325 goto out;
326
327 ret = 0;
328 __remove_shared_memory(seg);
329 kfree(seg);
330out:
331 mutex_unlock(&vmem_mutex);
332 return ret;
333}
334
Heiko Carstens17f34582008-04-30 13:38:47 +0200335int vmem_add_mapping(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100336{
337 struct memory_segment *seg;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100338 int ret;
339
340 mutex_lock(&vmem_mutex);
341 ret = -ENOMEM;
342 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
343 if (!seg)
344 goto out;
345 seg->start = start;
346 seg->size = size;
347
348 ret = insert_memory_segment(seg);
349 if (ret)
350 goto out_free;
351
Gerald Schaefer53492b12008-04-30 13:38:46 +0200352 ret = vmem_add_mem(start, size, 0);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100353 if (ret)
354 goto out_remove;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100355 goto out;
356
357out_remove:
358 __remove_shared_memory(seg);
359out_free:
360 kfree(seg);
361out:
362 mutex_unlock(&vmem_mutex);
363 return ret;
364}
365
366/*
367 * map whole physical memory to virtual memory (identity mapping)
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +0100368 * we reserve enough space in the vmalloc area for vmemmap to hotplug
369 * additional memory segments.
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100370 */
371void __init vmem_map_init(void)
372{
Gerald Schaefer53492b12008-04-30 13:38:46 +0200373 unsigned long ro_start, ro_end;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100374 struct memblock_region *reg;
375 phys_addr_t start, end;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100376
Heiko Carstens8fe234d2012-10-04 17:02:02 +0200377 ro_start = PFN_ALIGN((unsigned long)&_stext);
378 ro_end = (unsigned long)&_eshared & PAGE_MASK;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100379 for_each_memblock(memory, reg) {
380 start = reg->base;
381 end = reg->base + reg->size - 1;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200382 if (start >= ro_end || end <= ro_start)
383 vmem_add_mem(start, end - start, 0);
384 else if (start >= ro_start && end <= ro_end)
385 vmem_add_mem(start, end - start, 1);
386 else if (start >= ro_start) {
387 vmem_add_mem(start, ro_end - start, 1);
388 vmem_add_mem(ro_end, end - ro_end, 0);
389 } else if (end < ro_end) {
390 vmem_add_mem(start, ro_start - start, 0);
391 vmem_add_mem(ro_start, end - ro_start, 1);
392 } else {
393 vmem_add_mem(start, ro_start - start, 0);
394 vmem_add_mem(ro_start, ro_end - ro_start, 1);
395 vmem_add_mem(ro_end, end - ro_end, 0);
396 }
397 }
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100398}
399
400/*
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100401 * Convert memblock.memory to a memory segment list so there is a single
402 * list that contains all memory segments.
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100403 */
404static int __init vmem_convert_memory_chunk(void)
405{
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100406 struct memblock_region *reg;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100407 struct memory_segment *seg;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100408
409 mutex_lock(&vmem_mutex);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100410 for_each_memblock(memory, reg) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100411 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
412 if (!seg)
413 panic("Out of memory...\n");
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100414 seg->start = reg->base;
415 seg->size = reg->size;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100416 insert_memory_segment(seg);
417 }
418 mutex_unlock(&vmem_mutex);
419 return 0;
420}
421
422core_initcall(vmem_convert_memory_chunk);