Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 1 | /* |
| 2 | * arch/s390/mm/vmem.c |
| 3 | * |
| 4 | * Copyright IBM Corp. 2006 |
| 5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> |
| 6 | */ |
| 7 | |
| 8 | #include <linux/bootmem.h> |
| 9 | #include <linux/pfn.h> |
| 10 | #include <linux/mm.h> |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/list.h> |
| 13 | #include <asm/pgalloc.h> |
| 14 | #include <asm/pgtable.h> |
| 15 | #include <asm/setup.h> |
| 16 | #include <asm/tlbflush.h> |
| 17 | |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 18 | static DEFINE_MUTEX(vmem_mutex); |
| 19 | |
| 20 | struct memory_segment { |
| 21 | struct list_head list; |
| 22 | unsigned long start; |
| 23 | unsigned long size; |
| 24 | }; |
| 25 | |
| 26 | static LIST_HEAD(mem_segs); |
| 27 | |
Heiko Carstens | e62133b | 2007-07-27 12:29:13 +0200 | [diff] [blame] | 28 | void __meminit memmap_init(unsigned long size, int nid, unsigned long zone, |
| 29 | unsigned long start_pfn) |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 30 | { |
| 31 | struct page *start, *end; |
| 32 | struct page *map_start, *map_end; |
| 33 | int i; |
| 34 | |
| 35 | start = pfn_to_page(start_pfn); |
| 36 | end = start + size; |
| 37 | |
| 38 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { |
| 39 | unsigned long cstart, cend; |
| 40 | |
| 41 | cstart = PFN_DOWN(memory_chunk[i].addr); |
| 42 | cend = cstart + PFN_DOWN(memory_chunk[i].size); |
| 43 | |
| 44 | map_start = mem_map + cstart; |
| 45 | map_end = mem_map + cend; |
| 46 | |
| 47 | if (map_start < start) |
| 48 | map_start = start; |
| 49 | if (map_end > end) |
| 50 | map_end = end; |
| 51 | |
| 52 | map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) |
| 53 | / sizeof(struct page); |
| 54 | map_end += ((PFN_ALIGN((unsigned long) map_end) |
| 55 | - (unsigned long) map_end) |
| 56 | / sizeof(struct page)); |
| 57 | |
| 58 | if (map_start < map_end) |
| 59 | memmap_init_zone((unsigned long)(map_end - map_start), |
Dave Hansen | a2f3aa0 | 2007-01-10 23:15:30 -0800 | [diff] [blame] | 60 | nid, zone, page_to_pfn(map_start), |
| 61 | MEMMAP_EARLY); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 62 | } |
| 63 | } |
| 64 | |
Heiko Carstens | e62133b | 2007-07-27 12:29:13 +0200 | [diff] [blame] | 65 | static void __init_refok *vmem_alloc_pages(unsigned int order) |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 66 | { |
| 67 | if (slab_is_available()) |
| 68 | return (void *)__get_free_pages(GFP_KERNEL, order); |
| 69 | return alloc_bootmem_pages((1 << order) * PAGE_SIZE); |
| 70 | } |
| 71 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 72 | #define vmem_pud_alloc() ({ BUG(); ((pud_t *) NULL); }) |
| 73 | |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 74 | static inline pmd_t *vmem_pmd_alloc(void) |
| 75 | { |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 76 | pmd_t *pmd = NULL; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 77 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 78 | #ifdef CONFIG_64BIT |
| 79 | pmd = vmem_alloc_pages(2); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 80 | if (!pmd) |
| 81 | return NULL; |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 82 | clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE*4); |
| 83 | #endif |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 84 | return pmd; |
| 85 | } |
| 86 | |
| 87 | static inline pte_t *vmem_pte_alloc(void) |
| 88 | { |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 89 | pte_t *pte = vmem_alloc_pages(0); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 90 | |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 91 | if (!pte) |
| 92 | return NULL; |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 93 | clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, PAGE_SIZE); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 94 | return pte; |
| 95 | } |
| 96 | |
| 97 | /* |
| 98 | * Add a physical memory range to the 1:1 mapping. |
| 99 | */ |
| 100 | static int vmem_add_range(unsigned long start, unsigned long size) |
| 101 | { |
| 102 | unsigned long address; |
| 103 | pgd_t *pg_dir; |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 104 | pud_t *pu_dir; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 105 | pmd_t *pm_dir; |
| 106 | pte_t *pt_dir; |
| 107 | pte_t pte; |
| 108 | int ret = -ENOMEM; |
| 109 | |
| 110 | for (address = start; address < start + size; address += PAGE_SIZE) { |
| 111 | pg_dir = pgd_offset_k(address); |
| 112 | if (pgd_none(*pg_dir)) { |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 113 | pu_dir = vmem_pud_alloc(); |
| 114 | if (!pu_dir) |
| 115 | goto out; |
| 116 | pgd_populate_kernel(&init_mm, pg_dir, pu_dir); |
| 117 | } |
| 118 | |
| 119 | pu_dir = pud_offset(pg_dir, address); |
| 120 | if (pud_none(*pu_dir)) { |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 121 | pm_dir = vmem_pmd_alloc(); |
| 122 | if (!pm_dir) |
| 123 | goto out; |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 124 | pud_populate_kernel(&init_mm, pu_dir, pm_dir); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 125 | } |
| 126 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 127 | pm_dir = pmd_offset(pu_dir, address); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 128 | if (pmd_none(*pm_dir)) { |
| 129 | pt_dir = vmem_pte_alloc(); |
| 130 | if (!pt_dir) |
| 131 | goto out; |
| 132 | pmd_populate_kernel(&init_mm, pm_dir, pt_dir); |
| 133 | } |
| 134 | |
| 135 | pt_dir = pte_offset_kernel(pm_dir, address); |
| 136 | pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 137 | *pt_dir = pte; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 138 | } |
| 139 | ret = 0; |
| 140 | out: |
| 141 | flush_tlb_kernel_range(start, start + size); |
| 142 | return ret; |
| 143 | } |
| 144 | |
| 145 | /* |
| 146 | * Remove a physical memory range from the 1:1 mapping. |
| 147 | * Currently only invalidates page table entries. |
| 148 | */ |
| 149 | static void vmem_remove_range(unsigned long start, unsigned long size) |
| 150 | { |
| 151 | unsigned long address; |
| 152 | pgd_t *pg_dir; |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 153 | pud_t *pu_dir; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 154 | pmd_t *pm_dir; |
| 155 | pte_t *pt_dir; |
| 156 | pte_t pte; |
| 157 | |
| 158 | pte_val(pte) = _PAGE_TYPE_EMPTY; |
| 159 | for (address = start; address < start + size; address += PAGE_SIZE) { |
| 160 | pg_dir = pgd_offset_k(address); |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 161 | pu_dir = pud_offset(pg_dir, address); |
| 162 | if (pud_none(*pu_dir)) |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 163 | continue; |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 164 | pm_dir = pmd_offset(pu_dir, address); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 165 | if (pmd_none(*pm_dir)) |
| 166 | continue; |
| 167 | pt_dir = pte_offset_kernel(pm_dir, address); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 168 | *pt_dir = pte; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 169 | } |
| 170 | flush_tlb_kernel_range(start, start + size); |
| 171 | } |
| 172 | |
| 173 | /* |
| 174 | * Add a backed mem_map array to the virtual mem_map array. |
| 175 | */ |
| 176 | static int vmem_add_mem_map(unsigned long start, unsigned long size) |
| 177 | { |
| 178 | unsigned long address, start_addr, end_addr; |
| 179 | struct page *map_start, *map_end; |
| 180 | pgd_t *pg_dir; |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 181 | pud_t *pu_dir; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 182 | pmd_t *pm_dir; |
| 183 | pte_t *pt_dir; |
| 184 | pte_t pte; |
| 185 | int ret = -ENOMEM; |
| 186 | |
Christian Borntraeger | 5fd9c6e | 2008-01-26 14:11:00 +0100 | [diff] [blame^] | 187 | map_start = VMEM_MAP + PFN_DOWN(start); |
| 188 | map_end = VMEM_MAP + PFN_DOWN(start + size); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 189 | |
| 190 | start_addr = (unsigned long) map_start & PAGE_MASK; |
| 191 | end_addr = PFN_ALIGN((unsigned long) map_end); |
| 192 | |
| 193 | for (address = start_addr; address < end_addr; address += PAGE_SIZE) { |
| 194 | pg_dir = pgd_offset_k(address); |
| 195 | if (pgd_none(*pg_dir)) { |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 196 | pu_dir = vmem_pud_alloc(); |
| 197 | if (!pu_dir) |
| 198 | goto out; |
| 199 | pgd_populate_kernel(&init_mm, pg_dir, pu_dir); |
| 200 | } |
| 201 | |
| 202 | pu_dir = pud_offset(pg_dir, address); |
| 203 | if (pud_none(*pu_dir)) { |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 204 | pm_dir = vmem_pmd_alloc(); |
| 205 | if (!pm_dir) |
| 206 | goto out; |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 207 | pud_populate_kernel(&init_mm, pu_dir, pm_dir); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 208 | } |
| 209 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 210 | pm_dir = pmd_offset(pu_dir, address); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 211 | if (pmd_none(*pm_dir)) { |
| 212 | pt_dir = vmem_pte_alloc(); |
| 213 | if (!pt_dir) |
| 214 | goto out; |
| 215 | pmd_populate_kernel(&init_mm, pm_dir, pt_dir); |
| 216 | } |
| 217 | |
| 218 | pt_dir = pte_offset_kernel(pm_dir, address); |
| 219 | if (pte_none(*pt_dir)) { |
| 220 | unsigned long new_page; |
| 221 | |
| 222 | new_page =__pa(vmem_alloc_pages(0)); |
| 223 | if (!new_page) |
| 224 | goto out; |
| 225 | pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 226 | *pt_dir = pte; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 227 | } |
| 228 | } |
| 229 | ret = 0; |
| 230 | out: |
| 231 | flush_tlb_kernel_range(start_addr, end_addr); |
| 232 | return ret; |
| 233 | } |
| 234 | |
| 235 | static int vmem_add_mem(unsigned long start, unsigned long size) |
| 236 | { |
| 237 | int ret; |
| 238 | |
| 239 | ret = vmem_add_range(start, size); |
| 240 | if (ret) |
| 241 | return ret; |
| 242 | return vmem_add_mem_map(start, size); |
| 243 | } |
| 244 | |
| 245 | /* |
| 246 | * Add memory segment to the segment list if it doesn't overlap with |
| 247 | * an already present segment. |
| 248 | */ |
| 249 | static int insert_memory_segment(struct memory_segment *seg) |
| 250 | { |
| 251 | struct memory_segment *tmp; |
| 252 | |
Christian Borntraeger | 5fd9c6e | 2008-01-26 14:11:00 +0100 | [diff] [blame^] | 253 | if (seg->start + seg->size >= VMALLOC_START || |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 254 | seg->start + seg->size < seg->start) |
| 255 | return -ERANGE; |
| 256 | |
| 257 | list_for_each_entry(tmp, &mem_segs, list) { |
| 258 | if (seg->start >= tmp->start + tmp->size) |
| 259 | continue; |
| 260 | if (seg->start + seg->size <= tmp->start) |
| 261 | continue; |
| 262 | return -ENOSPC; |
| 263 | } |
| 264 | list_add(&seg->list, &mem_segs); |
| 265 | return 0; |
| 266 | } |
| 267 | |
| 268 | /* |
| 269 | * Remove memory segment from the segment list. |
| 270 | */ |
| 271 | static void remove_memory_segment(struct memory_segment *seg) |
| 272 | { |
| 273 | list_del(&seg->list); |
| 274 | } |
| 275 | |
| 276 | static void __remove_shared_memory(struct memory_segment *seg) |
| 277 | { |
| 278 | remove_memory_segment(seg); |
| 279 | vmem_remove_range(seg->start, seg->size); |
| 280 | } |
| 281 | |
| 282 | int remove_shared_memory(unsigned long start, unsigned long size) |
| 283 | { |
| 284 | struct memory_segment *seg; |
| 285 | int ret; |
| 286 | |
| 287 | mutex_lock(&vmem_mutex); |
| 288 | |
| 289 | ret = -ENOENT; |
| 290 | list_for_each_entry(seg, &mem_segs, list) { |
| 291 | if (seg->start == start && seg->size == size) |
| 292 | break; |
| 293 | } |
| 294 | |
| 295 | if (seg->start != start || seg->size != size) |
| 296 | goto out; |
| 297 | |
| 298 | ret = 0; |
| 299 | __remove_shared_memory(seg); |
| 300 | kfree(seg); |
| 301 | out: |
| 302 | mutex_unlock(&vmem_mutex); |
| 303 | return ret; |
| 304 | } |
| 305 | |
| 306 | int add_shared_memory(unsigned long start, unsigned long size) |
| 307 | { |
| 308 | struct memory_segment *seg; |
| 309 | struct page *page; |
| 310 | unsigned long pfn, num_pfn, end_pfn; |
| 311 | int ret; |
| 312 | |
| 313 | mutex_lock(&vmem_mutex); |
| 314 | ret = -ENOMEM; |
| 315 | seg = kzalloc(sizeof(*seg), GFP_KERNEL); |
| 316 | if (!seg) |
| 317 | goto out; |
| 318 | seg->start = start; |
| 319 | seg->size = size; |
| 320 | |
| 321 | ret = insert_memory_segment(seg); |
| 322 | if (ret) |
| 323 | goto out_free; |
| 324 | |
| 325 | ret = vmem_add_mem(start, size); |
| 326 | if (ret) |
| 327 | goto out_remove; |
| 328 | |
| 329 | pfn = PFN_DOWN(start); |
| 330 | num_pfn = PFN_DOWN(size); |
| 331 | end_pfn = pfn + num_pfn; |
| 332 | |
| 333 | page = pfn_to_page(pfn); |
| 334 | memset(page, 0, num_pfn * sizeof(struct page)); |
| 335 | |
| 336 | for (; pfn < end_pfn; pfn++) { |
| 337 | page = pfn_to_page(pfn); |
| 338 | init_page_count(page); |
| 339 | reset_page_mapcount(page); |
| 340 | SetPageReserved(page); |
| 341 | INIT_LIST_HEAD(&page->lru); |
| 342 | } |
| 343 | goto out; |
| 344 | |
| 345 | out_remove: |
| 346 | __remove_shared_memory(seg); |
| 347 | out_free: |
| 348 | kfree(seg); |
| 349 | out: |
| 350 | mutex_unlock(&vmem_mutex); |
| 351 | return ret; |
| 352 | } |
| 353 | |
| 354 | /* |
| 355 | * map whole physical memory to virtual memory (identity mapping) |
Christian Borntraeger | 5fd9c6e | 2008-01-26 14:11:00 +0100 | [diff] [blame^] | 356 | * we reserve enough space in the vmalloc area for vmemmap to hotplug |
| 357 | * additional memory segments. |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 358 | */ |
| 359 | void __init vmem_map_init(void) |
| 360 | { |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 361 | int i; |
| 362 | |
Christian Borntraeger | 5fd9c6e | 2008-01-26 14:11:00 +0100 | [diff] [blame^] | 363 | BUILD_BUG_ON((unsigned long)VMEM_MAP + VMEM_MAP_SIZE > VMEM_MAP_MAX); |
| 364 | NODE_DATA(0)->node_mem_map = VMEM_MAP; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 365 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) |
| 366 | vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size); |
| 367 | } |
| 368 | |
| 369 | /* |
| 370 | * Convert memory chunk array to a memory segment list so there is a single |
| 371 | * list that contains both r/w memory and shared memory segments. |
| 372 | */ |
| 373 | static int __init vmem_convert_memory_chunk(void) |
| 374 | { |
| 375 | struct memory_segment *seg; |
| 376 | int i; |
| 377 | |
| 378 | mutex_lock(&vmem_mutex); |
| 379 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { |
| 380 | if (!memory_chunk[i].size) |
| 381 | continue; |
| 382 | seg = kzalloc(sizeof(*seg), GFP_KERNEL); |
| 383 | if (!seg) |
| 384 | panic("Out of memory...\n"); |
| 385 | seg->start = memory_chunk[i].addr; |
| 386 | seg->size = memory_chunk[i].size; |
| 387 | insert_memory_segment(seg); |
| 388 | } |
| 389 | mutex_unlock(&vmem_mutex); |
| 390 | return 0; |
| 391 | } |
| 392 | |
| 393 | core_initcall(vmem_convert_memory_chunk); |