Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 1 | /* |
| 2 | * arch/s390/mm/vmem.c |
| 3 | * |
| 4 | * Copyright IBM Corp. 2006 |
| 5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> |
| 6 | */ |
| 7 | |
| 8 | #include <linux/bootmem.h> |
| 9 | #include <linux/pfn.h> |
| 10 | #include <linux/mm.h> |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/list.h> |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 13 | #include <linux/hugetlb.h> |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 14 | #include <asm/pgalloc.h> |
| 15 | #include <asm/pgtable.h> |
| 16 | #include <asm/setup.h> |
| 17 | #include <asm/tlbflush.h> |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 18 | #include <asm/sections.h> |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 19 | |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 20 | static DEFINE_MUTEX(vmem_mutex); |
| 21 | |
| 22 | struct memory_segment { |
| 23 | struct list_head list; |
| 24 | unsigned long start; |
| 25 | unsigned long size; |
| 26 | }; |
| 27 | |
| 28 | static LIST_HEAD(mem_segs); |
| 29 | |
Heiko Carstens | 2069e97 | 2008-05-15 16:52:31 +0200 | [diff] [blame] | 30 | static pud_t *vmem_pud_alloc(void) |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 31 | { |
| 32 | pud_t *pud = NULL; |
| 33 | |
| 34 | #ifdef CONFIG_64BIT |
Heiko Carstens | 2069e97 | 2008-05-15 16:52:31 +0200 | [diff] [blame] | 35 | pud = vmemmap_alloc_block(PAGE_SIZE * 4, 0); |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 36 | if (!pud) |
| 37 | return NULL; |
Heiko Carstens | 8fc6365 | 2008-04-30 13:38:44 +0200 | [diff] [blame] | 38 | clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 39 | #endif |
| 40 | return pud; |
| 41 | } |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 42 | |
Heiko Carstens | 2069e97 | 2008-05-15 16:52:31 +0200 | [diff] [blame] | 43 | static pmd_t *vmem_pmd_alloc(void) |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 44 | { |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 45 | pmd_t *pmd = NULL; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 46 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 47 | #ifdef CONFIG_64BIT |
Heiko Carstens | 2069e97 | 2008-05-15 16:52:31 +0200 | [diff] [blame] | 48 | pmd = vmemmap_alloc_block(PAGE_SIZE * 4, 0); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 49 | if (!pmd) |
| 50 | return NULL; |
Heiko Carstens | 8fc6365 | 2008-04-30 13:38:44 +0200 | [diff] [blame] | 51 | clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 52 | #endif |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 53 | return pmd; |
| 54 | } |
| 55 | |
Heiko Carstens | 2069e97 | 2008-05-15 16:52:31 +0200 | [diff] [blame] | 56 | static pte_t __ref *vmem_pte_alloc(void) |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 57 | { |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 58 | pte_t *pte; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 59 | |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 60 | if (slab_is_available()) |
| 61 | pte = (pte_t *) page_table_alloc(&init_mm); |
| 62 | else |
| 63 | pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 64 | if (!pte) |
| 65 | return NULL; |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 66 | clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, |
| 67 | PTRS_PER_PTE * sizeof(pte_t)); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 68 | return pte; |
| 69 | } |
| 70 | |
| 71 | /* |
| 72 | * Add a physical memory range to the 1:1 mapping. |
| 73 | */ |
Heiko Carstens | 17f3458 | 2008-04-30 13:38:47 +0200 | [diff] [blame] | 74 | static int vmem_add_mem(unsigned long start, unsigned long size, int ro) |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 75 | { |
| 76 | unsigned long address; |
| 77 | pgd_t *pg_dir; |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 78 | pud_t *pu_dir; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 79 | pmd_t *pm_dir; |
| 80 | pte_t *pt_dir; |
| 81 | pte_t pte; |
| 82 | int ret = -ENOMEM; |
| 83 | |
| 84 | for (address = start; address < start + size; address += PAGE_SIZE) { |
| 85 | pg_dir = pgd_offset_k(address); |
| 86 | if (pgd_none(*pg_dir)) { |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 87 | pu_dir = vmem_pud_alloc(); |
| 88 | if (!pu_dir) |
| 89 | goto out; |
| 90 | pgd_populate_kernel(&init_mm, pg_dir, pu_dir); |
| 91 | } |
| 92 | |
| 93 | pu_dir = pud_offset(pg_dir, address); |
| 94 | if (pud_none(*pu_dir)) { |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 95 | pm_dir = vmem_pmd_alloc(); |
| 96 | if (!pm_dir) |
| 97 | goto out; |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 98 | pud_populate_kernel(&init_mm, pu_dir, pm_dir); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 99 | } |
| 100 | |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 101 | pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0)); |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 102 | pm_dir = pmd_offset(pu_dir, address); |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 103 | |
| 104 | #ifdef __s390x__ |
| 105 | if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) && |
| 106 | (address + HPAGE_SIZE <= start + size) && |
| 107 | (address >= HPAGE_SIZE)) { |
| 108 | pte_val(pte) |= _SEGMENT_ENTRY_LARGE; |
| 109 | pmd_val(*pm_dir) = pte_val(pte); |
| 110 | address += HPAGE_SIZE - PAGE_SIZE; |
| 111 | continue; |
| 112 | } |
| 113 | #endif |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 114 | if (pmd_none(*pm_dir)) { |
| 115 | pt_dir = vmem_pte_alloc(); |
| 116 | if (!pt_dir) |
| 117 | goto out; |
| 118 | pmd_populate_kernel(&init_mm, pm_dir, pt_dir); |
| 119 | } |
| 120 | |
| 121 | pt_dir = pte_offset_kernel(pm_dir, address); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 122 | *pt_dir = pte; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 123 | } |
| 124 | ret = 0; |
| 125 | out: |
| 126 | flush_tlb_kernel_range(start, start + size); |
| 127 | return ret; |
| 128 | } |
| 129 | |
| 130 | /* |
| 131 | * Remove a physical memory range from the 1:1 mapping. |
| 132 | * Currently only invalidates page table entries. |
| 133 | */ |
| 134 | static void vmem_remove_range(unsigned long start, unsigned long size) |
| 135 | { |
| 136 | unsigned long address; |
| 137 | pgd_t *pg_dir; |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 138 | pud_t *pu_dir; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 139 | pmd_t *pm_dir; |
| 140 | pte_t *pt_dir; |
| 141 | pte_t pte; |
| 142 | |
| 143 | pte_val(pte) = _PAGE_TYPE_EMPTY; |
| 144 | for (address = start; address < start + size; address += PAGE_SIZE) { |
| 145 | pg_dir = pgd_offset_k(address); |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 146 | pu_dir = pud_offset(pg_dir, address); |
| 147 | if (pud_none(*pu_dir)) |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 148 | continue; |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 149 | pm_dir = pmd_offset(pu_dir, address); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 150 | if (pmd_none(*pm_dir)) |
| 151 | continue; |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 152 | |
| 153 | if (pmd_huge(*pm_dir)) { |
| 154 | pmd_clear_kernel(pm_dir); |
| 155 | address += HPAGE_SIZE - PAGE_SIZE; |
| 156 | continue; |
| 157 | } |
| 158 | |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 159 | pt_dir = pte_offset_kernel(pm_dir, address); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 160 | *pt_dir = pte; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 161 | } |
| 162 | flush_tlb_kernel_range(start, start + size); |
| 163 | } |
| 164 | |
| 165 | /* |
| 166 | * Add a backed mem_map array to the virtual mem_map array. |
| 167 | */ |
Heiko Carstens | 17f3458 | 2008-04-30 13:38:47 +0200 | [diff] [blame] | 168 | int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 169 | { |
| 170 | unsigned long address, start_addr, end_addr; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 171 | pgd_t *pg_dir; |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 172 | pud_t *pu_dir; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 173 | pmd_t *pm_dir; |
| 174 | pte_t *pt_dir; |
| 175 | pte_t pte; |
| 176 | int ret = -ENOMEM; |
| 177 | |
Heiko Carstens | 17f3458 | 2008-04-30 13:38:47 +0200 | [diff] [blame] | 178 | start_addr = (unsigned long) start; |
| 179 | end_addr = (unsigned long) (start + nr); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 180 | |
| 181 | for (address = start_addr; address < end_addr; address += PAGE_SIZE) { |
| 182 | pg_dir = pgd_offset_k(address); |
| 183 | if (pgd_none(*pg_dir)) { |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 184 | pu_dir = vmem_pud_alloc(); |
| 185 | if (!pu_dir) |
| 186 | goto out; |
| 187 | pgd_populate_kernel(&init_mm, pg_dir, pu_dir); |
| 188 | } |
| 189 | |
| 190 | pu_dir = pud_offset(pg_dir, address); |
| 191 | if (pud_none(*pu_dir)) { |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 192 | pm_dir = vmem_pmd_alloc(); |
| 193 | if (!pm_dir) |
| 194 | goto out; |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 195 | pud_populate_kernel(&init_mm, pu_dir, pm_dir); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 196 | } |
| 197 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 198 | pm_dir = pmd_offset(pu_dir, address); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 199 | if (pmd_none(*pm_dir)) { |
| 200 | pt_dir = vmem_pte_alloc(); |
| 201 | if (!pt_dir) |
| 202 | goto out; |
| 203 | pmd_populate_kernel(&init_mm, pm_dir, pt_dir); |
| 204 | } |
| 205 | |
| 206 | pt_dir = pte_offset_kernel(pm_dir, address); |
| 207 | if (pte_none(*pt_dir)) { |
| 208 | unsigned long new_page; |
| 209 | |
Heiko Carstens | 2069e97 | 2008-05-15 16:52:31 +0200 | [diff] [blame] | 210 | new_page =__pa(vmemmap_alloc_block(PAGE_SIZE, 0)); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 211 | if (!new_page) |
| 212 | goto out; |
| 213 | pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 214 | *pt_dir = pte; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 215 | } |
| 216 | } |
| 217 | ret = 0; |
| 218 | out: |
| 219 | flush_tlb_kernel_range(start_addr, end_addr); |
| 220 | return ret; |
| 221 | } |
| 222 | |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 223 | /* |
| 224 | * Add memory segment to the segment list if it doesn't overlap with |
| 225 | * an already present segment. |
| 226 | */ |
| 227 | static int insert_memory_segment(struct memory_segment *seg) |
| 228 | { |
| 229 | struct memory_segment *tmp; |
| 230 | |
Heiko Carstens | 0189103 | 2008-02-05 16:50:49 +0100 | [diff] [blame] | 231 | if (seg->start + seg->size >= VMEM_MAX_PHYS || |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 232 | seg->start + seg->size < seg->start) |
| 233 | return -ERANGE; |
| 234 | |
| 235 | list_for_each_entry(tmp, &mem_segs, list) { |
| 236 | if (seg->start >= tmp->start + tmp->size) |
| 237 | continue; |
| 238 | if (seg->start + seg->size <= tmp->start) |
| 239 | continue; |
| 240 | return -ENOSPC; |
| 241 | } |
| 242 | list_add(&seg->list, &mem_segs); |
| 243 | return 0; |
| 244 | } |
| 245 | |
| 246 | /* |
| 247 | * Remove memory segment from the segment list. |
| 248 | */ |
| 249 | static void remove_memory_segment(struct memory_segment *seg) |
| 250 | { |
| 251 | list_del(&seg->list); |
| 252 | } |
| 253 | |
| 254 | static void __remove_shared_memory(struct memory_segment *seg) |
| 255 | { |
| 256 | remove_memory_segment(seg); |
| 257 | vmem_remove_range(seg->start, seg->size); |
| 258 | } |
| 259 | |
Heiko Carstens | 17f3458 | 2008-04-30 13:38:47 +0200 | [diff] [blame] | 260 | int vmem_remove_mapping(unsigned long start, unsigned long size) |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 261 | { |
| 262 | struct memory_segment *seg; |
| 263 | int ret; |
| 264 | |
| 265 | mutex_lock(&vmem_mutex); |
| 266 | |
| 267 | ret = -ENOENT; |
| 268 | list_for_each_entry(seg, &mem_segs, list) { |
| 269 | if (seg->start == start && seg->size == size) |
| 270 | break; |
| 271 | } |
| 272 | |
| 273 | if (seg->start != start || seg->size != size) |
| 274 | goto out; |
| 275 | |
| 276 | ret = 0; |
| 277 | __remove_shared_memory(seg); |
| 278 | kfree(seg); |
| 279 | out: |
| 280 | mutex_unlock(&vmem_mutex); |
| 281 | return ret; |
| 282 | } |
| 283 | |
Heiko Carstens | 17f3458 | 2008-04-30 13:38:47 +0200 | [diff] [blame] | 284 | int vmem_add_mapping(unsigned long start, unsigned long size) |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 285 | { |
| 286 | struct memory_segment *seg; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 287 | int ret; |
| 288 | |
| 289 | mutex_lock(&vmem_mutex); |
| 290 | ret = -ENOMEM; |
| 291 | seg = kzalloc(sizeof(*seg), GFP_KERNEL); |
| 292 | if (!seg) |
| 293 | goto out; |
| 294 | seg->start = start; |
| 295 | seg->size = size; |
| 296 | |
| 297 | ret = insert_memory_segment(seg); |
| 298 | if (ret) |
| 299 | goto out_free; |
| 300 | |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 301 | ret = vmem_add_mem(start, size, 0); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 302 | if (ret) |
| 303 | goto out_remove; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 304 | goto out; |
| 305 | |
| 306 | out_remove: |
| 307 | __remove_shared_memory(seg); |
| 308 | out_free: |
| 309 | kfree(seg); |
| 310 | out: |
| 311 | mutex_unlock(&vmem_mutex); |
| 312 | return ret; |
| 313 | } |
| 314 | |
| 315 | /* |
| 316 | * map whole physical memory to virtual memory (identity mapping) |
Christian Borntraeger | 5fd9c6e | 2008-01-26 14:11:00 +0100 | [diff] [blame] | 317 | * we reserve enough space in the vmalloc area for vmemmap to hotplug |
| 318 | * additional memory segments. |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 319 | */ |
| 320 | void __init vmem_map_init(void) |
| 321 | { |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 322 | unsigned long ro_start, ro_end; |
| 323 | unsigned long start, end; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 324 | int i; |
| 325 | |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 326 | INIT_LIST_HEAD(&init_mm.context.crst_list); |
| 327 | INIT_LIST_HEAD(&init_mm.context.pgtable_list); |
| 328 | init_mm.context.noexec = 0; |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 329 | ro_start = ((unsigned long)&_stext) & PAGE_MASK; |
| 330 | ro_end = PFN_ALIGN((unsigned long)&_eshared); |
| 331 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { |
| 332 | start = memory_chunk[i].addr; |
| 333 | end = memory_chunk[i].addr + memory_chunk[i].size; |
| 334 | if (start >= ro_end || end <= ro_start) |
| 335 | vmem_add_mem(start, end - start, 0); |
| 336 | else if (start >= ro_start && end <= ro_end) |
| 337 | vmem_add_mem(start, end - start, 1); |
| 338 | else if (start >= ro_start) { |
| 339 | vmem_add_mem(start, ro_end - start, 1); |
| 340 | vmem_add_mem(ro_end, end - ro_end, 0); |
| 341 | } else if (end < ro_end) { |
| 342 | vmem_add_mem(start, ro_start - start, 0); |
| 343 | vmem_add_mem(ro_start, end - ro_start, 1); |
| 344 | } else { |
| 345 | vmem_add_mem(start, ro_start - start, 0); |
| 346 | vmem_add_mem(ro_start, ro_end - ro_start, 1); |
| 347 | vmem_add_mem(ro_end, end - ro_end, 0); |
| 348 | } |
| 349 | } |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 350 | } |
| 351 | |
| 352 | /* |
| 353 | * Convert memory chunk array to a memory segment list so there is a single |
| 354 | * list that contains both r/w memory and shared memory segments. |
| 355 | */ |
| 356 | static int __init vmem_convert_memory_chunk(void) |
| 357 | { |
| 358 | struct memory_segment *seg; |
| 359 | int i; |
| 360 | |
| 361 | mutex_lock(&vmem_mutex); |
Heiko Carstens | 9f4b0ba | 2008-01-26 14:11:02 +0100 | [diff] [blame] | 362 | for (i = 0; i < MEMORY_CHUNKS; i++) { |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 363 | if (!memory_chunk[i].size) |
| 364 | continue; |
| 365 | seg = kzalloc(sizeof(*seg), GFP_KERNEL); |
| 366 | if (!seg) |
| 367 | panic("Out of memory...\n"); |
| 368 | seg->start = memory_chunk[i].addr; |
| 369 | seg->size = memory_chunk[i].size; |
| 370 | insert_memory_segment(seg); |
| 371 | } |
| 372 | mutex_unlock(&vmem_mutex); |
| 373 | return 0; |
| 374 | } |
| 375 | |
| 376 | core_initcall(vmem_convert_memory_chunk); |