blob: ee625c8c3b282b5057384842f3d3088ffda61a8c [file] [log] [blame]
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01001/*
2 * arch/s390/mm/vmem.c
3 *
4 * Copyright IBM Corp. 2006
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/bootmem.h>
9#include <linux/pfn.h>
10#include <linux/mm.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <asm/pgalloc.h>
14#include <asm/pgtable.h>
15#include <asm/setup.h>
16#include <asm/tlbflush.h>
17
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010018static DEFINE_MUTEX(vmem_mutex);
19
20struct memory_segment {
21 struct list_head list;
22 unsigned long start;
23 unsigned long size;
24};
25
26static LIST_HEAD(mem_segs);
27
Heiko Carstense62133b2007-07-27 12:29:13 +020028void __meminit memmap_init(unsigned long size, int nid, unsigned long zone,
29 unsigned long start_pfn)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010030{
31 struct page *start, *end;
32 struct page *map_start, *map_end;
33 int i;
34
35 start = pfn_to_page(start_pfn);
36 end = start + size;
37
38 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
39 unsigned long cstart, cend;
40
41 cstart = PFN_DOWN(memory_chunk[i].addr);
42 cend = cstart + PFN_DOWN(memory_chunk[i].size);
43
44 map_start = mem_map + cstart;
45 map_end = mem_map + cend;
46
47 if (map_start < start)
48 map_start = start;
49 if (map_end > end)
50 map_end = end;
51
52 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1))
53 / sizeof(struct page);
54 map_end += ((PFN_ALIGN((unsigned long) map_end)
55 - (unsigned long) map_end)
56 / sizeof(struct page));
57
58 if (map_start < map_end)
59 memmap_init_zone((unsigned long)(map_end - map_start),
Dave Hansena2f3aa02007-01-10 23:15:30 -080060 nid, zone, page_to_pfn(map_start),
61 MEMMAP_EARLY);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010062 }
63}
64
Heiko Carstense62133b2007-07-27 12:29:13 +020065static void __init_refok *vmem_alloc_pages(unsigned int order)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010066{
67 if (slab_is_available())
68 return (void *)__get_free_pages(GFP_KERNEL, order);
69 return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
70}
71
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020072#define vmem_pud_alloc() ({ BUG(); ((pud_t *) NULL); })
73
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010074static inline pmd_t *vmem_pmd_alloc(void)
75{
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020076 pmd_t *pmd = NULL;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010077
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020078#ifdef CONFIG_64BIT
79 pmd = vmem_alloc_pages(2);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010080 if (!pmd)
81 return NULL;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020082 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE*4);
83#endif
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010084 return pmd;
85}
86
87static inline pte_t *vmem_pte_alloc(void)
88{
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020089 pte_t *pte = vmem_alloc_pages(0);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010090
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010091 if (!pte)
92 return NULL;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020093 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, PAGE_SIZE);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010094 return pte;
95}
96
97/*
98 * Add a physical memory range to the 1:1 mapping.
99 */
100static int vmem_add_range(unsigned long start, unsigned long size)
101{
102 unsigned long address;
103 pgd_t *pg_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200104 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100105 pmd_t *pm_dir;
106 pte_t *pt_dir;
107 pte_t pte;
108 int ret = -ENOMEM;
109
110 for (address = start; address < start + size; address += PAGE_SIZE) {
111 pg_dir = pgd_offset_k(address);
112 if (pgd_none(*pg_dir)) {
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200113 pu_dir = vmem_pud_alloc();
114 if (!pu_dir)
115 goto out;
116 pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
117 }
118
119 pu_dir = pud_offset(pg_dir, address);
120 if (pud_none(*pu_dir)) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100121 pm_dir = vmem_pmd_alloc();
122 if (!pm_dir)
123 goto out;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200124 pud_populate_kernel(&init_mm, pu_dir, pm_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100125 }
126
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200127 pm_dir = pmd_offset(pu_dir, address);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100128 if (pmd_none(*pm_dir)) {
129 pt_dir = vmem_pte_alloc();
130 if (!pt_dir)
131 goto out;
132 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
133 }
134
135 pt_dir = pte_offset_kernel(pm_dir, address);
136 pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100137 *pt_dir = pte;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100138 }
139 ret = 0;
140out:
141 flush_tlb_kernel_range(start, start + size);
142 return ret;
143}
144
145/*
146 * Remove a physical memory range from the 1:1 mapping.
147 * Currently only invalidates page table entries.
148 */
149static void vmem_remove_range(unsigned long start, unsigned long size)
150{
151 unsigned long address;
152 pgd_t *pg_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200153 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100154 pmd_t *pm_dir;
155 pte_t *pt_dir;
156 pte_t pte;
157
158 pte_val(pte) = _PAGE_TYPE_EMPTY;
159 for (address = start; address < start + size; address += PAGE_SIZE) {
160 pg_dir = pgd_offset_k(address);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200161 pu_dir = pud_offset(pg_dir, address);
162 if (pud_none(*pu_dir))
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100163 continue;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200164 pm_dir = pmd_offset(pu_dir, address);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100165 if (pmd_none(*pm_dir))
166 continue;
167 pt_dir = pte_offset_kernel(pm_dir, address);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100168 *pt_dir = pte;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100169 }
170 flush_tlb_kernel_range(start, start + size);
171}
172
173/*
174 * Add a backed mem_map array to the virtual mem_map array.
175 */
176static int vmem_add_mem_map(unsigned long start, unsigned long size)
177{
178 unsigned long address, start_addr, end_addr;
179 struct page *map_start, *map_end;
180 pgd_t *pg_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200181 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100182 pmd_t *pm_dir;
183 pte_t *pt_dir;
184 pte_t pte;
185 int ret = -ENOMEM;
186
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +0100187 map_start = VMEM_MAP + PFN_DOWN(start);
188 map_end = VMEM_MAP + PFN_DOWN(start + size);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100189
190 start_addr = (unsigned long) map_start & PAGE_MASK;
191 end_addr = PFN_ALIGN((unsigned long) map_end);
192
193 for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
194 pg_dir = pgd_offset_k(address);
195 if (pgd_none(*pg_dir)) {
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200196 pu_dir = vmem_pud_alloc();
197 if (!pu_dir)
198 goto out;
199 pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
200 }
201
202 pu_dir = pud_offset(pg_dir, address);
203 if (pud_none(*pu_dir)) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100204 pm_dir = vmem_pmd_alloc();
205 if (!pm_dir)
206 goto out;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200207 pud_populate_kernel(&init_mm, pu_dir, pm_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100208 }
209
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200210 pm_dir = pmd_offset(pu_dir, address);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100211 if (pmd_none(*pm_dir)) {
212 pt_dir = vmem_pte_alloc();
213 if (!pt_dir)
214 goto out;
215 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
216 }
217
218 pt_dir = pte_offset_kernel(pm_dir, address);
219 if (pte_none(*pt_dir)) {
220 unsigned long new_page;
221
222 new_page =__pa(vmem_alloc_pages(0));
223 if (!new_page)
224 goto out;
225 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100226 *pt_dir = pte;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100227 }
228 }
229 ret = 0;
230out:
231 flush_tlb_kernel_range(start_addr, end_addr);
232 return ret;
233}
234
235static int vmem_add_mem(unsigned long start, unsigned long size)
236{
237 int ret;
238
239 ret = vmem_add_range(start, size);
240 if (ret)
241 return ret;
242 return vmem_add_mem_map(start, size);
243}
244
245/*
246 * Add memory segment to the segment list if it doesn't overlap with
247 * an already present segment.
248 */
249static int insert_memory_segment(struct memory_segment *seg)
250{
251 struct memory_segment *tmp;
252
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +0100253 if (seg->start + seg->size >= VMALLOC_START ||
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100254 seg->start + seg->size < seg->start)
255 return -ERANGE;
256
257 list_for_each_entry(tmp, &mem_segs, list) {
258 if (seg->start >= tmp->start + tmp->size)
259 continue;
260 if (seg->start + seg->size <= tmp->start)
261 continue;
262 return -ENOSPC;
263 }
264 list_add(&seg->list, &mem_segs);
265 return 0;
266}
267
268/*
269 * Remove memory segment from the segment list.
270 */
271static void remove_memory_segment(struct memory_segment *seg)
272{
273 list_del(&seg->list);
274}
275
276static void __remove_shared_memory(struct memory_segment *seg)
277{
278 remove_memory_segment(seg);
279 vmem_remove_range(seg->start, seg->size);
280}
281
282int remove_shared_memory(unsigned long start, unsigned long size)
283{
284 struct memory_segment *seg;
285 int ret;
286
287 mutex_lock(&vmem_mutex);
288
289 ret = -ENOENT;
290 list_for_each_entry(seg, &mem_segs, list) {
291 if (seg->start == start && seg->size == size)
292 break;
293 }
294
295 if (seg->start != start || seg->size != size)
296 goto out;
297
298 ret = 0;
299 __remove_shared_memory(seg);
300 kfree(seg);
301out:
302 mutex_unlock(&vmem_mutex);
303 return ret;
304}
305
306int add_shared_memory(unsigned long start, unsigned long size)
307{
308 struct memory_segment *seg;
309 struct page *page;
310 unsigned long pfn, num_pfn, end_pfn;
311 int ret;
312
313 mutex_lock(&vmem_mutex);
314 ret = -ENOMEM;
315 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
316 if (!seg)
317 goto out;
318 seg->start = start;
319 seg->size = size;
320
321 ret = insert_memory_segment(seg);
322 if (ret)
323 goto out_free;
324
325 ret = vmem_add_mem(start, size);
326 if (ret)
327 goto out_remove;
328
329 pfn = PFN_DOWN(start);
330 num_pfn = PFN_DOWN(size);
331 end_pfn = pfn + num_pfn;
332
333 page = pfn_to_page(pfn);
334 memset(page, 0, num_pfn * sizeof(struct page));
335
336 for (; pfn < end_pfn; pfn++) {
337 page = pfn_to_page(pfn);
338 init_page_count(page);
339 reset_page_mapcount(page);
340 SetPageReserved(page);
341 INIT_LIST_HEAD(&page->lru);
342 }
343 goto out;
344
345out_remove:
346 __remove_shared_memory(seg);
347out_free:
348 kfree(seg);
349out:
350 mutex_unlock(&vmem_mutex);
351 return ret;
352}
353
354/*
355 * map whole physical memory to virtual memory (identity mapping)
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +0100356 * we reserve enough space in the vmalloc area for vmemmap to hotplug
357 * additional memory segments.
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100358 */
359void __init vmem_map_init(void)
360{
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100361 int i;
362
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +0100363 BUILD_BUG_ON((unsigned long)VMEM_MAP + VMEM_MAP_SIZE > VMEM_MAP_MAX);
364 NODE_DATA(0)->node_mem_map = VMEM_MAP;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100365 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++)
366 vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
367}
368
369/*
370 * Convert memory chunk array to a memory segment list so there is a single
371 * list that contains both r/w memory and shared memory segments.
372 */
373static int __init vmem_convert_memory_chunk(void)
374{
375 struct memory_segment *seg;
376 int i;
377
378 mutex_lock(&vmem_mutex);
379 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
380 if (!memory_chunk[i].size)
381 continue;
382 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
383 if (!seg)
384 panic("Out of memory...\n");
385 seg->start = memory_chunk[i].addr;
386 seg->size = memory_chunk[i].size;
387 insert_memory_segment(seg);
388 }
389 mutex_unlock(&vmem_mutex);
390 return 0;
391}
392
393core_initcall(vmem_convert_memory_chunk);