blob: 1848292766ef136868419e40aa2ca256249c9530 [file] [log] [blame]
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01001/*
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01002 * Copyright IBM Corp. 2006
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4 */
5
6#include <linux/bootmem.h>
7#include <linux/pfn.h>
8#include <linux/mm.h>
9#include <linux/module.h>
10#include <linux/list.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +020011#include <linux/hugetlb.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Philipp Hachtmann50be6342014-01-29 18:16:01 +010013#include <linux/memblock.h>
Heiko Carstensbab247f2016-05-10 16:28:28 +020014#include <asm/cacheflush.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010015#include <asm/pgalloc.h>
16#include <asm/pgtable.h>
17#include <asm/setup.h>
18#include <asm/tlbflush.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +020019#include <asm/sections.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010020
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010021static DEFINE_MUTEX(vmem_mutex);
22
23struct memory_segment {
24 struct list_head list;
25 unsigned long start;
26 unsigned long size;
27};
28
29static LIST_HEAD(mem_segs);
30
Heiko Carstens67060d92008-05-30 10:03:27 +020031static void __ref *vmem_alloc_pages(unsigned int order)
32{
Heiko Carstens2e9996f2016-05-13 11:10:09 +020033 unsigned long size = PAGE_SIZE << order;
34
Heiko Carstens67060d92008-05-30 10:03:27 +020035 if (slab_is_available())
36 return (void *)__get_free_pages(GFP_KERNEL, order);
Heiko Carstens2e9996f2016-05-13 11:10:09 +020037 return alloc_bootmem_align(size, size);
Heiko Carstens67060d92008-05-30 10:03:27 +020038}
39
40static inline pud_t *vmem_pud_alloc(void)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010041{
42 pud_t *pud = NULL;
43
Heiko Carstens67060d92008-05-30 10:03:27 +020044 pud = vmem_alloc_pages(2);
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010045 if (!pud)
46 return NULL;
Heiko Carstens8fc63652008-04-30 13:38:44 +020047 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010048 return pud;
49}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020050
Heiko Carstense8a97e42016-05-17 10:50:15 +020051pmd_t *vmem_pmd_alloc(void)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010052{
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020053 pmd_t *pmd = NULL;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010054
Heiko Carstens67060d92008-05-30 10:03:27 +020055 pmd = vmem_alloc_pages(2);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010056 if (!pmd)
57 return NULL;
Heiko Carstens8fc63652008-04-30 13:38:44 +020058 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010059 return pmd;
60}
61
Heiko Carstense8a97e42016-05-17 10:50:15 +020062pte_t __ref *vmem_pte_alloc(void)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010063{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010064 pte_t *pte;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010065
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010066 if (slab_is_available())
Martin Schwidefsky527e30b2014-04-30 16:04:25 +020067 pte = (pte_t *) page_table_alloc(&init_mm);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010068 else
Philipp Hachtmann50be6342014-01-29 18:16:01 +010069 pte = alloc_bootmem_align(PTRS_PER_PTE * sizeof(pte_t),
70 PTRS_PER_PTE * sizeof(pte_t));
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010071 if (!pte)
72 return NULL;
Martin Schwidefskye5098612013-07-23 20:57:57 +020073 clear_table((unsigned long *) pte, _PAGE_INVALID,
Christian Borntraeger6af7eea2010-04-09 13:43:01 +020074 PTRS_PER_PTE * sizeof(pte_t));
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010075 return pte;
76}
77
78/*
79 * Add a physical memory range to the 1:1 mapping.
80 */
Heiko Carstensbab247f2016-05-10 16:28:28 +020081static int vmem_add_mem(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010082{
Heiko Carstens37cd9442016-05-20 08:08:14 +020083 unsigned long pages4k, pages1m, pages2g;
Heiko Carstens378b1e72012-10-01 12:58:34 +020084 unsigned long end = start + size;
85 unsigned long address = start;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010086 pgd_t *pg_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020087 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010088 pmd_t *pm_dir;
89 pte_t *pt_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010090 int ret = -ENOMEM;
91
Heiko Carstens37cd9442016-05-20 08:08:14 +020092 pages4k = pages1m = pages2g = 0;
Heiko Carstens378b1e72012-10-01 12:58:34 +020093 while (address < end) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010094 pg_dir = pgd_offset_k(address);
95 if (pgd_none(*pg_dir)) {
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020096 pu_dir = vmem_pud_alloc();
97 if (!pu_dir)
98 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +020099 pgd_populate(&init_mm, pg_dir, pu_dir);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200100 }
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200101 pu_dir = pud_offset(pg_dir, address);
Heiko Carstens18da2362012-10-08 09:18:26 +0200102 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
Christian Borntraeger10917b82016-03-15 14:57:36 -0700103 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
104 !debug_pagealloc_enabled()) {
Heiko Carstensbab247f2016-05-10 16:28:28 +0200105 pud_val(*pu_dir) = address | pgprot_val(REGION3_KERNEL);
Heiko Carstens18da2362012-10-08 09:18:26 +0200106 address += PUD_SIZE;
Heiko Carstens37cd9442016-05-20 08:08:14 +0200107 pages2g++;
Heiko Carstens18da2362012-10-08 09:18:26 +0200108 continue;
109 }
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200110 if (pud_none(*pu_dir)) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100111 pm_dir = vmem_pmd_alloc();
112 if (!pm_dir)
113 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200114 pud_populate(&init_mm, pu_dir, pm_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100115 }
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200116 pm_dir = pmd_offset(pu_dir, address);
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200117 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
Christian Borntraeger10917b82016-03-15 14:57:36 -0700118 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) &&
119 !debug_pagealloc_enabled()) {
Heiko Carstensbab247f2016-05-10 16:28:28 +0200120 pmd_val(*pm_dir) = address | pgprot_val(SEGMENT_KERNEL);
Heiko Carstens378b1e72012-10-01 12:58:34 +0200121 address += PMD_SIZE;
Heiko Carstens37cd9442016-05-20 08:08:14 +0200122 pages1m++;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200123 continue;
124 }
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100125 if (pmd_none(*pm_dir)) {
Heiko Carstensc53db522016-05-09 15:52:28 +0200126 pt_dir = vmem_pte_alloc();
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100127 if (!pt_dir)
128 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200129 pmd_populate(&init_mm, pm_dir, pt_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100130 }
131
132 pt_dir = pte_offset_kernel(pm_dir, address);
Heiko Carstensbab247f2016-05-10 16:28:28 +0200133 pte_val(*pt_dir) = address | pgprot_val(PAGE_KERNEL);
Heiko Carstens378b1e72012-10-01 12:58:34 +0200134 address += PAGE_SIZE;
Heiko Carstens37cd9442016-05-20 08:08:14 +0200135 pages4k++;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100136 }
137 ret = 0;
138out:
Heiko Carstens37cd9442016-05-20 08:08:14 +0200139 update_page_count(PG_DIRECT_MAP_4K, pages4k);
140 update_page_count(PG_DIRECT_MAP_1M, pages1m);
141 update_page_count(PG_DIRECT_MAP_2G, pages2g);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100142 return ret;
143}
144
145/*
146 * Remove a physical memory range from the 1:1 mapping.
147 * Currently only invalidates page table entries.
148 */
149static void vmem_remove_range(unsigned long start, unsigned long size)
150{
Heiko Carstens37cd9442016-05-20 08:08:14 +0200151 unsigned long pages4k, pages1m, pages2g;
Heiko Carstens378b1e72012-10-01 12:58:34 +0200152 unsigned long end = start + size;
153 unsigned long address = start;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100154 pgd_t *pg_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200155 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100156 pmd_t *pm_dir;
157 pte_t *pt_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100158
Heiko Carstens37cd9442016-05-20 08:08:14 +0200159 pages4k = pages1m = pages2g = 0;
Heiko Carstens378b1e72012-10-01 12:58:34 +0200160 while (address < end) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100161 pg_dir = pgd_offset_k(address);
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200162 if (pgd_none(*pg_dir)) {
163 address += PGDIR_SIZE;
164 continue;
165 }
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200166 pu_dir = pud_offset(pg_dir, address);
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200167 if (pud_none(*pu_dir)) {
168 address += PUD_SIZE;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100169 continue;
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200170 }
Heiko Carstens18da2362012-10-08 09:18:26 +0200171 if (pud_large(*pu_dir)) {
172 pud_clear(pu_dir);
173 address += PUD_SIZE;
Heiko Carstens37cd9442016-05-20 08:08:14 +0200174 pages2g++;
Heiko Carstens18da2362012-10-08 09:18:26 +0200175 continue;
176 }
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200177 pm_dir = pmd_offset(pu_dir, address);
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200178 if (pmd_none(*pm_dir)) {
179 address += PMD_SIZE;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100180 continue;
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200181 }
Heiko Carstens378b1e72012-10-01 12:58:34 +0200182 if (pmd_large(*pm_dir)) {
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200183 pmd_clear(pm_dir);
Heiko Carstens378b1e72012-10-01 12:58:34 +0200184 address += PMD_SIZE;
Heiko Carstens37cd9442016-05-20 08:08:14 +0200185 pages1m++;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200186 continue;
187 }
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100188 pt_dir = pte_offset_kernel(pm_dir, address);
Heiko Carstens5aa29972016-05-17 12:17:51 +0200189 pte_clear(&init_mm, address, pt_dir);
Heiko Carstens378b1e72012-10-01 12:58:34 +0200190 address += PAGE_SIZE;
Heiko Carstens37cd9442016-05-20 08:08:14 +0200191 pages4k++;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100192 }
Heiko Carstens378b1e72012-10-01 12:58:34 +0200193 flush_tlb_kernel_range(start, end);
Heiko Carstens37cd9442016-05-20 08:08:14 +0200194 update_page_count(PG_DIRECT_MAP_4K, -pages4k);
195 update_page_count(PG_DIRECT_MAP_1M, -pages1m);
196 update_page_count(PG_DIRECT_MAP_2G, -pages2g);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100197}
198
199/*
200 * Add a backed mem_map array to the virtual mem_map array.
201 */
Johannes Weiner0aad8182013-04-29 15:07:50 -0700202int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100203{
Johannes Weiner0aad8182013-04-29 15:07:50 -0700204 unsigned long address = start;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100205 pgd_t *pg_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200206 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100207 pmd_t *pm_dir;
208 pte_t *pt_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100209 int ret = -ENOMEM;
210
Johannes Weiner0aad8182013-04-29 15:07:50 -0700211 for (address = start; address < end;) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100212 pg_dir = pgd_offset_k(address);
213 if (pgd_none(*pg_dir)) {
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200214 pu_dir = vmem_pud_alloc();
215 if (!pu_dir)
216 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200217 pgd_populate(&init_mm, pg_dir, pu_dir);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200218 }
219
220 pu_dir = pud_offset(pg_dir, address);
221 if (pud_none(*pu_dir)) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100222 pm_dir = vmem_pmd_alloc();
223 if (!pm_dir)
224 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200225 pud_populate(&init_mm, pu_dir, pm_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100226 }
227
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200228 pm_dir = pmd_offset(pu_dir, address);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100229 if (pmd_none(*pm_dir)) {
Heiko Carstensf7817962012-10-17 12:18:05 +0200230 /* Use 1MB frames for vmemmap if available. We always
231 * use large frames even if they are only partially
232 * used.
233 * Otherwise we would have also page tables since
234 * vmemmap_populate gets called for each section
235 * separately. */
236 if (MACHINE_HAS_EDAT1) {
237 void *new_page;
238
239 new_page = vmemmap_alloc_block(PMD_SIZE, node);
240 if (!new_page)
241 goto out;
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100242 pmd_val(*pm_dir) = __pa(new_page) |
Heiko Carstens6a5c1482014-09-22 08:50:51 +0200243 _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE;
Heiko Carstensf7817962012-10-17 12:18:05 +0200244 address = (address + PMD_SIZE) & PMD_MASK;
245 continue;
246 }
Heiko Carstensc53db522016-05-09 15:52:28 +0200247 pt_dir = vmem_pte_alloc();
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100248 if (!pt_dir)
249 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200250 pmd_populate(&init_mm, pm_dir, pt_dir);
Heiko Carstensf7817962012-10-17 12:18:05 +0200251 } else if (pmd_large(*pm_dir)) {
252 address = (address + PMD_SIZE) & PMD_MASK;
253 continue;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100254 }
255
256 pt_dir = pte_offset_kernel(pm_dir, address);
257 if (pte_none(*pt_dir)) {
Heiko Carstens70c9d292014-09-20 11:12:08 +0200258 void *new_page;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100259
Heiko Carstens70c9d292014-09-20 11:12:08 +0200260 new_page = vmemmap_alloc_block(PAGE_SIZE, node);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100261 if (!new_page)
262 goto out;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200263 pte_val(*pt_dir) =
264 __pa(new_page) | pgprot_val(PAGE_KERNEL);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100265 }
Heiko Carstensf7817962012-10-17 12:18:05 +0200266 address += PAGE_SIZE;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100267 }
268 ret = 0;
269out:
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100270 return ret;
271}
272
Johannes Weiner0aad8182013-04-29 15:07:50 -0700273void vmemmap_free(unsigned long start, unsigned long end)
Tang Chen01975182013-02-22 16:33:08 -0800274{
275}
276
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100277/*
278 * Add memory segment to the segment list if it doesn't overlap with
279 * an already present segment.
280 */
281static int insert_memory_segment(struct memory_segment *seg)
282{
283 struct memory_segment *tmp;
284
Heiko Carstensee0ddad2008-06-10 10:03:20 +0200285 if (seg->start + seg->size > VMEM_MAX_PHYS ||
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100286 seg->start + seg->size < seg->start)
287 return -ERANGE;
288
289 list_for_each_entry(tmp, &mem_segs, list) {
290 if (seg->start >= tmp->start + tmp->size)
291 continue;
292 if (seg->start + seg->size <= tmp->start)
293 continue;
294 return -ENOSPC;
295 }
296 list_add(&seg->list, &mem_segs);
297 return 0;
298}
299
300/*
301 * Remove memory segment from the segment list.
302 */
303static void remove_memory_segment(struct memory_segment *seg)
304{
305 list_del(&seg->list);
306}
307
308static void __remove_shared_memory(struct memory_segment *seg)
309{
310 remove_memory_segment(seg);
311 vmem_remove_range(seg->start, seg->size);
312}
313
Heiko Carstens17f34582008-04-30 13:38:47 +0200314int vmem_remove_mapping(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100315{
316 struct memory_segment *seg;
317 int ret;
318
319 mutex_lock(&vmem_mutex);
320
321 ret = -ENOENT;
322 list_for_each_entry(seg, &mem_segs, list) {
323 if (seg->start == start && seg->size == size)
324 break;
325 }
326
327 if (seg->start != start || seg->size != size)
328 goto out;
329
330 ret = 0;
331 __remove_shared_memory(seg);
332 kfree(seg);
333out:
334 mutex_unlock(&vmem_mutex);
335 return ret;
336}
337
Heiko Carstens17f34582008-04-30 13:38:47 +0200338int vmem_add_mapping(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100339{
340 struct memory_segment *seg;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100341 int ret;
342
343 mutex_lock(&vmem_mutex);
344 ret = -ENOMEM;
345 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
346 if (!seg)
347 goto out;
348 seg->start = start;
349 seg->size = size;
350
351 ret = insert_memory_segment(seg);
352 if (ret)
353 goto out_free;
354
Heiko Carstensbab247f2016-05-10 16:28:28 +0200355 ret = vmem_add_mem(start, size);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100356 if (ret)
357 goto out_remove;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100358 goto out;
359
360out_remove:
361 __remove_shared_memory(seg);
362out_free:
363 kfree(seg);
364out:
365 mutex_unlock(&vmem_mutex);
366 return ret;
367}
368
369/*
370 * map whole physical memory to virtual memory (identity mapping)
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +0100371 * we reserve enough space in the vmalloc area for vmemmap to hotplug
372 * additional memory segments.
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100373 */
374void __init vmem_map_init(void)
375{
Heiko Carstensd07a9802016-06-07 10:12:55 +0200376 unsigned long size = _eshared - _stext;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100377 struct memblock_region *reg;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100378
Heiko Carstensbab247f2016-05-10 16:28:28 +0200379 for_each_memblock(memory, reg)
380 vmem_add_mem(reg->base, reg->size);
Heiko Carstensd07a9802016-06-07 10:12:55 +0200381 set_memory_ro((unsigned long)_stext, size >> PAGE_SHIFT);
382 pr_info("Write protected kernel read-only data: %luk\n", size >> 10);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100383}
384
385/*
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100386 * Convert memblock.memory to a memory segment list so there is a single
387 * list that contains all memory segments.
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100388 */
389static int __init vmem_convert_memory_chunk(void)
390{
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100391 struct memblock_region *reg;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100392 struct memory_segment *seg;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100393
394 mutex_lock(&vmem_mutex);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100395 for_each_memblock(memory, reg) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100396 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
397 if (!seg)
398 panic("Out of memory...\n");
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100399 seg->start = reg->base;
400 seg->size = reg->size;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100401 insert_memory_segment(seg);
402 }
403 mutex_unlock(&vmem_mutex);
404 return 0;
405}
406
407core_initcall(vmem_convert_memory_chunk);