blob: 45becc8a44ec660c8799ef13868db8b8036f1c9f [file] [log] [blame]
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01001/*
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01002 * Copyright IBM Corp. 2006
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4 */
5
6#include <linux/bootmem.h>
7#include <linux/pfn.h>
8#include <linux/mm.h>
9#include <linux/module.h>
10#include <linux/list.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +020011#include <linux/hugetlb.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Philipp Hachtmann50be6342014-01-29 18:16:01 +010013#include <linux/memblock.h>
Heiko Carstensbab247f2016-05-10 16:28:28 +020014#include <asm/cacheflush.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010015#include <asm/pgalloc.h>
16#include <asm/pgtable.h>
17#include <asm/setup.h>
18#include <asm/tlbflush.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +020019#include <asm/sections.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010020
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010021static DEFINE_MUTEX(vmem_mutex);
22
23struct memory_segment {
24 struct list_head list;
25 unsigned long start;
26 unsigned long size;
27};
28
29static LIST_HEAD(mem_segs);
30
Heiko Carstens67060d92008-05-30 10:03:27 +020031static void __ref *vmem_alloc_pages(unsigned int order)
32{
Heiko Carstens2e9996f2016-05-13 11:10:09 +020033 unsigned long size = PAGE_SIZE << order;
34
Heiko Carstens67060d92008-05-30 10:03:27 +020035 if (slab_is_available())
36 return (void *)__get_free_pages(GFP_KERNEL, order);
Heiko Carstens9e427362016-10-18 13:35:32 +020037 return (void *) memblock_alloc(size, size);
Heiko Carstens67060d92008-05-30 10:03:27 +020038}
39
40static inline pud_t *vmem_pud_alloc(void)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010041{
42 pud_t *pud = NULL;
43
Heiko Carstens67060d92008-05-30 10:03:27 +020044 pud = vmem_alloc_pages(2);
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010045 if (!pud)
46 return NULL;
Heiko Carstens8fc63652008-04-30 13:38:44 +020047 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010048 return pud;
49}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020050
Heiko Carstense8a97e42016-05-17 10:50:15 +020051pmd_t *vmem_pmd_alloc(void)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010052{
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020053 pmd_t *pmd = NULL;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010054
Heiko Carstens67060d92008-05-30 10:03:27 +020055 pmd = vmem_alloc_pages(2);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010056 if (!pmd)
57 return NULL;
Heiko Carstens8fc63652008-04-30 13:38:44 +020058 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010059 return pmd;
60}
61
Heiko Carstense8a97e42016-05-17 10:50:15 +020062pte_t __ref *vmem_pte_alloc(void)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010063{
Heiko Carstens9e427362016-10-18 13:35:32 +020064 unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010065 pte_t *pte;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010066
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010067 if (slab_is_available())
Martin Schwidefsky527e30b2014-04-30 16:04:25 +020068 pte = (pte_t *) page_table_alloc(&init_mm);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010069 else
Heiko Carstens9e427362016-10-18 13:35:32 +020070 pte = (pte_t *) memblock_alloc(size, size);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010071 if (!pte)
72 return NULL;
Heiko Carstens9e427362016-10-18 13:35:32 +020073 clear_table((unsigned long *) pte, _PAGE_INVALID, size);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010074 return pte;
75}
76
77/*
78 * Add a physical memory range to the 1:1 mapping.
79 */
Heiko Carstensbab247f2016-05-10 16:28:28 +020080static int vmem_add_mem(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010081{
Heiko Carstens37cd9442016-05-20 08:08:14 +020082 unsigned long pages4k, pages1m, pages2g;
Heiko Carstens378b1e72012-10-01 12:58:34 +020083 unsigned long end = start + size;
84 unsigned long address = start;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010085 pgd_t *pg_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020086 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010087 pmd_t *pm_dir;
88 pte_t *pt_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010089 int ret = -ENOMEM;
90
Heiko Carstens37cd9442016-05-20 08:08:14 +020091 pages4k = pages1m = pages2g = 0;
Heiko Carstens378b1e72012-10-01 12:58:34 +020092 while (address < end) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010093 pg_dir = pgd_offset_k(address);
94 if (pgd_none(*pg_dir)) {
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020095 pu_dir = vmem_pud_alloc();
96 if (!pu_dir)
97 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +020098 pgd_populate(&init_mm, pg_dir, pu_dir);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020099 }
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200100 pu_dir = pud_offset(pg_dir, address);
Heiko Carstens18da2362012-10-08 09:18:26 +0200101 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
Christian Borntraeger10917b82016-03-15 14:57:36 -0700102 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
103 !debug_pagealloc_enabled()) {
Heiko Carstensbab247f2016-05-10 16:28:28 +0200104 pud_val(*pu_dir) = address | pgprot_val(REGION3_KERNEL);
Heiko Carstens18da2362012-10-08 09:18:26 +0200105 address += PUD_SIZE;
Heiko Carstens37cd9442016-05-20 08:08:14 +0200106 pages2g++;
Heiko Carstens18da2362012-10-08 09:18:26 +0200107 continue;
108 }
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200109 if (pud_none(*pu_dir)) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100110 pm_dir = vmem_pmd_alloc();
111 if (!pm_dir)
112 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200113 pud_populate(&init_mm, pu_dir, pm_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100114 }
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200115 pm_dir = pmd_offset(pu_dir, address);
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200116 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
Christian Borntraeger10917b82016-03-15 14:57:36 -0700117 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) &&
118 !debug_pagealloc_enabled()) {
Heiko Carstensbab247f2016-05-10 16:28:28 +0200119 pmd_val(*pm_dir) = address | pgprot_val(SEGMENT_KERNEL);
Heiko Carstens378b1e72012-10-01 12:58:34 +0200120 address += PMD_SIZE;
Heiko Carstens37cd9442016-05-20 08:08:14 +0200121 pages1m++;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200122 continue;
123 }
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100124 if (pmd_none(*pm_dir)) {
Heiko Carstensc53db522016-05-09 15:52:28 +0200125 pt_dir = vmem_pte_alloc();
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100126 if (!pt_dir)
127 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200128 pmd_populate(&init_mm, pm_dir, pt_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100129 }
130
131 pt_dir = pte_offset_kernel(pm_dir, address);
Heiko Carstensbab247f2016-05-10 16:28:28 +0200132 pte_val(*pt_dir) = address | pgprot_val(PAGE_KERNEL);
Heiko Carstens378b1e72012-10-01 12:58:34 +0200133 address += PAGE_SIZE;
Heiko Carstens37cd9442016-05-20 08:08:14 +0200134 pages4k++;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100135 }
136 ret = 0;
137out:
Heiko Carstens37cd9442016-05-20 08:08:14 +0200138 update_page_count(PG_DIRECT_MAP_4K, pages4k);
139 update_page_count(PG_DIRECT_MAP_1M, pages1m);
140 update_page_count(PG_DIRECT_MAP_2G, pages2g);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100141 return ret;
142}
143
144/*
145 * Remove a physical memory range from the 1:1 mapping.
146 * Currently only invalidates page table entries.
147 */
148static void vmem_remove_range(unsigned long start, unsigned long size)
149{
Heiko Carstens37cd9442016-05-20 08:08:14 +0200150 unsigned long pages4k, pages1m, pages2g;
Heiko Carstens378b1e72012-10-01 12:58:34 +0200151 unsigned long end = start + size;
152 unsigned long address = start;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100153 pgd_t *pg_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200154 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100155 pmd_t *pm_dir;
156 pte_t *pt_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100157
Heiko Carstens37cd9442016-05-20 08:08:14 +0200158 pages4k = pages1m = pages2g = 0;
Heiko Carstens378b1e72012-10-01 12:58:34 +0200159 while (address < end) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100160 pg_dir = pgd_offset_k(address);
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200161 if (pgd_none(*pg_dir)) {
162 address += PGDIR_SIZE;
163 continue;
164 }
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200165 pu_dir = pud_offset(pg_dir, address);
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200166 if (pud_none(*pu_dir)) {
167 address += PUD_SIZE;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100168 continue;
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200169 }
Heiko Carstens18da2362012-10-08 09:18:26 +0200170 if (pud_large(*pu_dir)) {
171 pud_clear(pu_dir);
172 address += PUD_SIZE;
Heiko Carstens37cd9442016-05-20 08:08:14 +0200173 pages2g++;
Heiko Carstens18da2362012-10-08 09:18:26 +0200174 continue;
175 }
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200176 pm_dir = pmd_offset(pu_dir, address);
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200177 if (pmd_none(*pm_dir)) {
178 address += PMD_SIZE;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100179 continue;
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200180 }
Heiko Carstens378b1e72012-10-01 12:58:34 +0200181 if (pmd_large(*pm_dir)) {
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200182 pmd_clear(pm_dir);
Heiko Carstens378b1e72012-10-01 12:58:34 +0200183 address += PMD_SIZE;
Heiko Carstens37cd9442016-05-20 08:08:14 +0200184 pages1m++;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200185 continue;
186 }
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100187 pt_dir = pte_offset_kernel(pm_dir, address);
Heiko Carstens5aa29972016-05-17 12:17:51 +0200188 pte_clear(&init_mm, address, pt_dir);
Heiko Carstens378b1e72012-10-01 12:58:34 +0200189 address += PAGE_SIZE;
Heiko Carstens37cd9442016-05-20 08:08:14 +0200190 pages4k++;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100191 }
Heiko Carstens378b1e72012-10-01 12:58:34 +0200192 flush_tlb_kernel_range(start, end);
Heiko Carstens37cd9442016-05-20 08:08:14 +0200193 update_page_count(PG_DIRECT_MAP_4K, -pages4k);
194 update_page_count(PG_DIRECT_MAP_1M, -pages1m);
195 update_page_count(PG_DIRECT_MAP_2G, -pages2g);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100196}
197
198/*
199 * Add a backed mem_map array to the virtual mem_map array.
200 */
Johannes Weiner0aad8182013-04-29 15:07:50 -0700201int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100202{
Johannes Weiner0aad8182013-04-29 15:07:50 -0700203 unsigned long address = start;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100204 pgd_t *pg_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200205 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100206 pmd_t *pm_dir;
207 pte_t *pt_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100208 int ret = -ENOMEM;
209
Johannes Weiner0aad8182013-04-29 15:07:50 -0700210 for (address = start; address < end;) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100211 pg_dir = pgd_offset_k(address);
212 if (pgd_none(*pg_dir)) {
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200213 pu_dir = vmem_pud_alloc();
214 if (!pu_dir)
215 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200216 pgd_populate(&init_mm, pg_dir, pu_dir);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200217 }
218
219 pu_dir = pud_offset(pg_dir, address);
220 if (pud_none(*pu_dir)) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100221 pm_dir = vmem_pmd_alloc();
222 if (!pm_dir)
223 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200224 pud_populate(&init_mm, pu_dir, pm_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100225 }
226
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200227 pm_dir = pmd_offset(pu_dir, address);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100228 if (pmd_none(*pm_dir)) {
Heiko Carstensf7817962012-10-17 12:18:05 +0200229 /* Use 1MB frames for vmemmap if available. We always
230 * use large frames even if they are only partially
231 * used.
232 * Otherwise we would have also page tables since
233 * vmemmap_populate gets called for each section
234 * separately. */
235 if (MACHINE_HAS_EDAT1) {
236 void *new_page;
237
238 new_page = vmemmap_alloc_block(PMD_SIZE, node);
239 if (!new_page)
240 goto out;
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100241 pmd_val(*pm_dir) = __pa(new_page) |
Heiko Carstens6a5c1482014-09-22 08:50:51 +0200242 _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE;
Heiko Carstensf7817962012-10-17 12:18:05 +0200243 address = (address + PMD_SIZE) & PMD_MASK;
244 continue;
245 }
Heiko Carstensc53db522016-05-09 15:52:28 +0200246 pt_dir = vmem_pte_alloc();
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100247 if (!pt_dir)
248 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200249 pmd_populate(&init_mm, pm_dir, pt_dir);
Heiko Carstensf7817962012-10-17 12:18:05 +0200250 } else if (pmd_large(*pm_dir)) {
251 address = (address + PMD_SIZE) & PMD_MASK;
252 continue;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100253 }
254
255 pt_dir = pte_offset_kernel(pm_dir, address);
256 if (pte_none(*pt_dir)) {
Heiko Carstens70c9d292014-09-20 11:12:08 +0200257 void *new_page;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100258
Heiko Carstens70c9d292014-09-20 11:12:08 +0200259 new_page = vmemmap_alloc_block(PAGE_SIZE, node);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100260 if (!new_page)
261 goto out;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200262 pte_val(*pt_dir) =
263 __pa(new_page) | pgprot_val(PAGE_KERNEL);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100264 }
Heiko Carstensf7817962012-10-17 12:18:05 +0200265 address += PAGE_SIZE;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100266 }
267 ret = 0;
268out:
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100269 return ret;
270}
271
Johannes Weiner0aad8182013-04-29 15:07:50 -0700272void vmemmap_free(unsigned long start, unsigned long end)
Tang Chen01975182013-02-22 16:33:08 -0800273{
274}
275
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100276/*
277 * Add memory segment to the segment list if it doesn't overlap with
278 * an already present segment.
279 */
280static int insert_memory_segment(struct memory_segment *seg)
281{
282 struct memory_segment *tmp;
283
Heiko Carstensee0ddad2008-06-10 10:03:20 +0200284 if (seg->start + seg->size > VMEM_MAX_PHYS ||
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100285 seg->start + seg->size < seg->start)
286 return -ERANGE;
287
288 list_for_each_entry(tmp, &mem_segs, list) {
289 if (seg->start >= tmp->start + tmp->size)
290 continue;
291 if (seg->start + seg->size <= tmp->start)
292 continue;
293 return -ENOSPC;
294 }
295 list_add(&seg->list, &mem_segs);
296 return 0;
297}
298
299/*
300 * Remove memory segment from the segment list.
301 */
302static void remove_memory_segment(struct memory_segment *seg)
303{
304 list_del(&seg->list);
305}
306
307static void __remove_shared_memory(struct memory_segment *seg)
308{
309 remove_memory_segment(seg);
310 vmem_remove_range(seg->start, seg->size);
311}
312
Heiko Carstens17f34582008-04-30 13:38:47 +0200313int vmem_remove_mapping(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100314{
315 struct memory_segment *seg;
316 int ret;
317
318 mutex_lock(&vmem_mutex);
319
320 ret = -ENOENT;
321 list_for_each_entry(seg, &mem_segs, list) {
322 if (seg->start == start && seg->size == size)
323 break;
324 }
325
326 if (seg->start != start || seg->size != size)
327 goto out;
328
329 ret = 0;
330 __remove_shared_memory(seg);
331 kfree(seg);
332out:
333 mutex_unlock(&vmem_mutex);
334 return ret;
335}
336
Heiko Carstens17f34582008-04-30 13:38:47 +0200337int vmem_add_mapping(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100338{
339 struct memory_segment *seg;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100340 int ret;
341
342 mutex_lock(&vmem_mutex);
343 ret = -ENOMEM;
344 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
345 if (!seg)
346 goto out;
347 seg->start = start;
348 seg->size = size;
349
350 ret = insert_memory_segment(seg);
351 if (ret)
352 goto out_free;
353
Heiko Carstensbab247f2016-05-10 16:28:28 +0200354 ret = vmem_add_mem(start, size);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100355 if (ret)
356 goto out_remove;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100357 goto out;
358
359out_remove:
360 __remove_shared_memory(seg);
361out_free:
362 kfree(seg);
363out:
364 mutex_unlock(&vmem_mutex);
365 return ret;
366}
367
368/*
369 * map whole physical memory to virtual memory (identity mapping)
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +0100370 * we reserve enough space in the vmalloc area for vmemmap to hotplug
371 * additional memory segments.
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100372 */
373void __init vmem_map_init(void)
374{
Heiko Carstensd07a9802016-06-07 10:12:55 +0200375 unsigned long size = _eshared - _stext;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100376 struct memblock_region *reg;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100377
Heiko Carstensbab247f2016-05-10 16:28:28 +0200378 for_each_memblock(memory, reg)
379 vmem_add_mem(reg->base, reg->size);
Heiko Carstensd07a9802016-06-07 10:12:55 +0200380 set_memory_ro((unsigned long)_stext, size >> PAGE_SHIFT);
381 pr_info("Write protected kernel read-only data: %luk\n", size >> 10);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100382}
383
384/*
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100385 * Convert memblock.memory to a memory segment list so there is a single
386 * list that contains all memory segments.
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100387 */
388static int __init vmem_convert_memory_chunk(void)
389{
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100390 struct memblock_region *reg;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100391 struct memory_segment *seg;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100392
393 mutex_lock(&vmem_mutex);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100394 for_each_memblock(memory, reg) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100395 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
396 if (!seg)
397 panic("Out of memory...\n");
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100398 seg->start = reg->base;
399 seg->size = reg->size;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100400 insert_memory_segment(seg);
401 }
402 mutex_unlock(&vmem_mutex);
403 return 0;
404}
405
406core_initcall(vmem_convert_memory_chunk);