blob: ef7d6c8fea66ea62eeddbe43420081d837c44274 [file] [log] [blame]
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01001/*
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01002 * Copyright IBM Corp. 2006
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4 */
5
6#include <linux/bootmem.h>
7#include <linux/pfn.h>
8#include <linux/mm.h>
9#include <linux/module.h>
10#include <linux/list.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +020011#include <linux/hugetlb.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Philipp Hachtmann50be6342014-01-29 18:16:01 +010013#include <linux/memblock.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010014#include <asm/pgalloc.h>
15#include <asm/pgtable.h>
16#include <asm/setup.h>
17#include <asm/tlbflush.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +020018#include <asm/sections.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010019
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010020static DEFINE_MUTEX(vmem_mutex);
21
22struct memory_segment {
23 struct list_head list;
24 unsigned long start;
25 unsigned long size;
26};
27
28static LIST_HEAD(mem_segs);
29
Heiko Carstens67060d92008-05-30 10:03:27 +020030static void __ref *vmem_alloc_pages(unsigned int order)
31{
32 if (slab_is_available())
33 return (void *)__get_free_pages(GFP_KERNEL, order);
34 return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
35}
36
37static inline pud_t *vmem_pud_alloc(void)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010038{
39 pud_t *pud = NULL;
40
Heiko Carstens67060d92008-05-30 10:03:27 +020041 pud = vmem_alloc_pages(2);
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010042 if (!pud)
43 return NULL;
Heiko Carstens8fc63652008-04-30 13:38:44 +020044 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010045 return pud;
46}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020047
Heiko Carstens67060d92008-05-30 10:03:27 +020048static inline pmd_t *vmem_pmd_alloc(void)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010049{
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020050 pmd_t *pmd = NULL;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010051
Heiko Carstens67060d92008-05-30 10:03:27 +020052 pmd = vmem_alloc_pages(2);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010053 if (!pmd)
54 return NULL;
Heiko Carstens8fc63652008-04-30 13:38:44 +020055 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010056 return pmd;
57}
58
Martin Schwidefskye5992f22011-07-24 10:48:20 +020059static pte_t __ref *vmem_pte_alloc(unsigned long address)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010060{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010061 pte_t *pte;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010062
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010063 if (slab_is_available())
Martin Schwidefsky527e30b2014-04-30 16:04:25 +020064 pte = (pte_t *) page_table_alloc(&init_mm);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010065 else
Philipp Hachtmann50be6342014-01-29 18:16:01 +010066 pte = alloc_bootmem_align(PTRS_PER_PTE * sizeof(pte_t),
67 PTRS_PER_PTE * sizeof(pte_t));
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010068 if (!pte)
69 return NULL;
Martin Schwidefskye5098612013-07-23 20:57:57 +020070 clear_table((unsigned long *) pte, _PAGE_INVALID,
Christian Borntraeger6af7eea2010-04-09 13:43:01 +020071 PTRS_PER_PTE * sizeof(pte_t));
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010072 return pte;
73}
74
75/*
76 * Add a physical memory range to the 1:1 mapping.
77 */
Heiko Carstens17f34582008-04-30 13:38:47 +020078static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010079{
Heiko Carstens378b1e72012-10-01 12:58:34 +020080 unsigned long end = start + size;
81 unsigned long address = start;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010082 pgd_t *pg_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020083 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010084 pmd_t *pm_dir;
85 pte_t *pt_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010086 int ret = -ENOMEM;
87
Heiko Carstens378b1e72012-10-01 12:58:34 +020088 while (address < end) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010089 pg_dir = pgd_offset_k(address);
90 if (pgd_none(*pg_dir)) {
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020091 pu_dir = vmem_pud_alloc();
92 if (!pu_dir)
93 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +020094 pgd_populate(&init_mm, pg_dir, pu_dir);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020095 }
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020096 pu_dir = pud_offset(pg_dir, address);
Heiko Carstens5a798592015-02-12 13:08:27 +010097#ifndef CONFIG_DEBUG_PAGEALLOC
Heiko Carstens18da2362012-10-08 09:18:26 +020098 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
99 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100100 pud_val(*pu_dir) = __pa(address) |
101 _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE |
Martin Schwidefskye5098612013-07-23 20:57:57 +0200102 (ro ? _REGION_ENTRY_PROTECT : 0);
Heiko Carstens18da2362012-10-08 09:18:26 +0200103 address += PUD_SIZE;
104 continue;
105 }
106#endif
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200107 if (pud_none(*pu_dir)) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100108 pm_dir = vmem_pmd_alloc();
109 if (!pm_dir)
110 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200111 pud_populate(&init_mm, pu_dir, pm_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100112 }
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200113 pm_dir = pmd_offset(pu_dir, address);
Heiko Carstens5a798592015-02-12 13:08:27 +0100114#ifndef CONFIG_DEBUG_PAGEALLOC
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200115 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
116 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100117 pmd_val(*pm_dir) = __pa(address) |
118 _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200119 _SEGMENT_ENTRY_YOUNG |
Martin Schwidefskye5098612013-07-23 20:57:57 +0200120 (ro ? _SEGMENT_ENTRY_PROTECT : 0);
Heiko Carstens378b1e72012-10-01 12:58:34 +0200121 address += PMD_SIZE;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200122 continue;
123 }
124#endif
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100125 if (pmd_none(*pm_dir)) {
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200126 pt_dir = vmem_pte_alloc(address);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100127 if (!pt_dir)
128 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200129 pmd_populate(&init_mm, pm_dir, pt_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100130 }
131
132 pt_dir = pte_offset_kernel(pm_dir, address);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200133 pte_val(*pt_dir) = __pa(address) |
134 pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
Heiko Carstens378b1e72012-10-01 12:58:34 +0200135 address += PAGE_SIZE;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100136 }
137 ret = 0;
138out:
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100139 return ret;
140}
141
142/*
143 * Remove a physical memory range from the 1:1 mapping.
144 * Currently only invalidates page table entries.
145 */
146static void vmem_remove_range(unsigned long start, unsigned long size)
147{
Heiko Carstens378b1e72012-10-01 12:58:34 +0200148 unsigned long end = start + size;
149 unsigned long address = start;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100150 pgd_t *pg_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200151 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100152 pmd_t *pm_dir;
153 pte_t *pt_dir;
154 pte_t pte;
155
Martin Schwidefskye5098612013-07-23 20:57:57 +0200156 pte_val(pte) = _PAGE_INVALID;
Heiko Carstens378b1e72012-10-01 12:58:34 +0200157 while (address < end) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100158 pg_dir = pgd_offset_k(address);
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200159 if (pgd_none(*pg_dir)) {
160 address += PGDIR_SIZE;
161 continue;
162 }
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200163 pu_dir = pud_offset(pg_dir, address);
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200164 if (pud_none(*pu_dir)) {
165 address += PUD_SIZE;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100166 continue;
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200167 }
Heiko Carstens18da2362012-10-08 09:18:26 +0200168 if (pud_large(*pu_dir)) {
169 pud_clear(pu_dir);
170 address += PUD_SIZE;
171 continue;
172 }
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200173 pm_dir = pmd_offset(pu_dir, address);
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200174 if (pmd_none(*pm_dir)) {
175 address += PMD_SIZE;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100176 continue;
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200177 }
Heiko Carstens378b1e72012-10-01 12:58:34 +0200178 if (pmd_large(*pm_dir)) {
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200179 pmd_clear(pm_dir);
Heiko Carstens378b1e72012-10-01 12:58:34 +0200180 address += PMD_SIZE;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200181 continue;
182 }
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100183 pt_dir = pte_offset_kernel(pm_dir, address);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100184 *pt_dir = pte;
Heiko Carstens378b1e72012-10-01 12:58:34 +0200185 address += PAGE_SIZE;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100186 }
Heiko Carstens378b1e72012-10-01 12:58:34 +0200187 flush_tlb_kernel_range(start, end);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100188}
189
190/*
191 * Add a backed mem_map array to the virtual mem_map array.
192 */
Johannes Weiner0aad8182013-04-29 15:07:50 -0700193int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100194{
Johannes Weiner0aad8182013-04-29 15:07:50 -0700195 unsigned long address = start;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100196 pgd_t *pg_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200197 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100198 pmd_t *pm_dir;
199 pte_t *pt_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100200 int ret = -ENOMEM;
201
Johannes Weiner0aad8182013-04-29 15:07:50 -0700202 for (address = start; address < end;) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100203 pg_dir = pgd_offset_k(address);
204 if (pgd_none(*pg_dir)) {
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200205 pu_dir = vmem_pud_alloc();
206 if (!pu_dir)
207 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200208 pgd_populate(&init_mm, pg_dir, pu_dir);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200209 }
210
211 pu_dir = pud_offset(pg_dir, address);
212 if (pud_none(*pu_dir)) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100213 pm_dir = vmem_pmd_alloc();
214 if (!pm_dir)
215 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200216 pud_populate(&init_mm, pu_dir, pm_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100217 }
218
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200219 pm_dir = pmd_offset(pu_dir, address);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100220 if (pmd_none(*pm_dir)) {
Heiko Carstensf7817962012-10-17 12:18:05 +0200221 /* Use 1MB frames for vmemmap if available. We always
222 * use large frames even if they are only partially
223 * used.
224 * Otherwise we would have also page tables since
225 * vmemmap_populate gets called for each section
226 * separately. */
227 if (MACHINE_HAS_EDAT1) {
228 void *new_page;
229
230 new_page = vmemmap_alloc_block(PMD_SIZE, node);
231 if (!new_page)
232 goto out;
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100233 pmd_val(*pm_dir) = __pa(new_page) |
Heiko Carstens6a5c1482014-09-22 08:50:51 +0200234 _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE;
Heiko Carstensf7817962012-10-17 12:18:05 +0200235 address = (address + PMD_SIZE) & PMD_MASK;
236 continue;
237 }
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200238 pt_dir = vmem_pte_alloc(address);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100239 if (!pt_dir)
240 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200241 pmd_populate(&init_mm, pm_dir, pt_dir);
Heiko Carstensf7817962012-10-17 12:18:05 +0200242 } else if (pmd_large(*pm_dir)) {
243 address = (address + PMD_SIZE) & PMD_MASK;
244 continue;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100245 }
246
247 pt_dir = pte_offset_kernel(pm_dir, address);
248 if (pte_none(*pt_dir)) {
Heiko Carstens70c9d292014-09-20 11:12:08 +0200249 void *new_page;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100250
Heiko Carstens70c9d292014-09-20 11:12:08 +0200251 new_page = vmemmap_alloc_block(PAGE_SIZE, node);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100252 if (!new_page)
253 goto out;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200254 pte_val(*pt_dir) =
255 __pa(new_page) | pgprot_val(PAGE_KERNEL);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100256 }
Heiko Carstensf7817962012-10-17 12:18:05 +0200257 address += PAGE_SIZE;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100258 }
259 ret = 0;
260out:
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100261 return ret;
262}
263
Johannes Weiner0aad8182013-04-29 15:07:50 -0700264void vmemmap_free(unsigned long start, unsigned long end)
Tang Chen01975182013-02-22 16:33:08 -0800265{
266}
267
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100268/*
269 * Add memory segment to the segment list if it doesn't overlap with
270 * an already present segment.
271 */
272static int insert_memory_segment(struct memory_segment *seg)
273{
274 struct memory_segment *tmp;
275
Heiko Carstensee0ddad2008-06-10 10:03:20 +0200276 if (seg->start + seg->size > VMEM_MAX_PHYS ||
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100277 seg->start + seg->size < seg->start)
278 return -ERANGE;
279
280 list_for_each_entry(tmp, &mem_segs, list) {
281 if (seg->start >= tmp->start + tmp->size)
282 continue;
283 if (seg->start + seg->size <= tmp->start)
284 continue;
285 return -ENOSPC;
286 }
287 list_add(&seg->list, &mem_segs);
288 return 0;
289}
290
291/*
292 * Remove memory segment from the segment list.
293 */
294static void remove_memory_segment(struct memory_segment *seg)
295{
296 list_del(&seg->list);
297}
298
299static void __remove_shared_memory(struct memory_segment *seg)
300{
301 remove_memory_segment(seg);
302 vmem_remove_range(seg->start, seg->size);
303}
304
Heiko Carstens17f34582008-04-30 13:38:47 +0200305int vmem_remove_mapping(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100306{
307 struct memory_segment *seg;
308 int ret;
309
310 mutex_lock(&vmem_mutex);
311
312 ret = -ENOENT;
313 list_for_each_entry(seg, &mem_segs, list) {
314 if (seg->start == start && seg->size == size)
315 break;
316 }
317
318 if (seg->start != start || seg->size != size)
319 goto out;
320
321 ret = 0;
322 __remove_shared_memory(seg);
323 kfree(seg);
324out:
325 mutex_unlock(&vmem_mutex);
326 return ret;
327}
328
Heiko Carstens17f34582008-04-30 13:38:47 +0200329int vmem_add_mapping(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100330{
331 struct memory_segment *seg;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100332 int ret;
333
334 mutex_lock(&vmem_mutex);
335 ret = -ENOMEM;
336 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
337 if (!seg)
338 goto out;
339 seg->start = start;
340 seg->size = size;
341
342 ret = insert_memory_segment(seg);
343 if (ret)
344 goto out_free;
345
Gerald Schaefer53492b12008-04-30 13:38:46 +0200346 ret = vmem_add_mem(start, size, 0);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100347 if (ret)
348 goto out_remove;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100349 goto out;
350
351out_remove:
352 __remove_shared_memory(seg);
353out_free:
354 kfree(seg);
355out:
356 mutex_unlock(&vmem_mutex);
357 return ret;
358}
359
360/*
361 * map whole physical memory to virtual memory (identity mapping)
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +0100362 * we reserve enough space in the vmalloc area for vmemmap to hotplug
363 * additional memory segments.
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100364 */
365void __init vmem_map_init(void)
366{
Gerald Schaefer53492b12008-04-30 13:38:46 +0200367 unsigned long ro_start, ro_end;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100368 struct memblock_region *reg;
369 phys_addr_t start, end;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100370
Heiko Carstens8fe234d2012-10-04 17:02:02 +0200371 ro_start = PFN_ALIGN((unsigned long)&_stext);
372 ro_end = (unsigned long)&_eshared & PAGE_MASK;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100373 for_each_memblock(memory, reg) {
374 start = reg->base;
375 end = reg->base + reg->size - 1;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200376 if (start >= ro_end || end <= ro_start)
377 vmem_add_mem(start, end - start, 0);
378 else if (start >= ro_start && end <= ro_end)
379 vmem_add_mem(start, end - start, 1);
380 else if (start >= ro_start) {
381 vmem_add_mem(start, ro_end - start, 1);
382 vmem_add_mem(ro_end, end - ro_end, 0);
383 } else if (end < ro_end) {
384 vmem_add_mem(start, ro_start - start, 0);
385 vmem_add_mem(ro_start, end - ro_start, 1);
386 } else {
387 vmem_add_mem(start, ro_start - start, 0);
388 vmem_add_mem(ro_start, ro_end - ro_start, 1);
389 vmem_add_mem(ro_end, end - ro_end, 0);
390 }
391 }
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100392}
393
394/*
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100395 * Convert memblock.memory to a memory segment list so there is a single
396 * list that contains all memory segments.
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100397 */
398static int __init vmem_convert_memory_chunk(void)
399{
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100400 struct memblock_region *reg;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100401 struct memory_segment *seg;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100402
403 mutex_lock(&vmem_mutex);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100404 for_each_memblock(memory, reg) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100405 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
406 if (!seg)
407 panic("Out of memory...\n");
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100408 seg->start = reg->base;
409 seg->size = reg->size;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100410 insert_memory_segment(seg);
411 }
412 mutex_unlock(&vmem_mutex);
413 return 0;
414}
415
416core_initcall(vmem_convert_memory_chunk);