blob: 90165e7ca04eb2135e57c321de9912208fcea4bc [file] [log] [blame]
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01001/*
2 * arch/s390/mm/vmem.c
3 *
4 * Copyright IBM Corp. 2006
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/bootmem.h>
9#include <linux/pfn.h>
10#include <linux/mm.h>
11#include <linux/module.h>
12#include <linux/list.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +020013#include <linux/hugetlb.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010015#include <asm/pgalloc.h>
16#include <asm/pgtable.h>
17#include <asm/setup.h>
18#include <asm/tlbflush.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +020019#include <asm/sections.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010020
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010021static DEFINE_MUTEX(vmem_mutex);
22
23struct memory_segment {
24 struct list_head list;
25 unsigned long start;
26 unsigned long size;
27};
28
29static LIST_HEAD(mem_segs);
30
Heiko Carstens67060d92008-05-30 10:03:27 +020031static void __ref *vmem_alloc_pages(unsigned int order)
32{
33 if (slab_is_available())
34 return (void *)__get_free_pages(GFP_KERNEL, order);
35 return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
36}
37
38static inline pud_t *vmem_pud_alloc(void)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010039{
40 pud_t *pud = NULL;
41
42#ifdef CONFIG_64BIT
Heiko Carstens67060d92008-05-30 10:03:27 +020043 pud = vmem_alloc_pages(2);
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010044 if (!pud)
45 return NULL;
Heiko Carstens8fc63652008-04-30 13:38:44 +020046 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010047#endif
48 return pud;
49}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020050
Heiko Carstens67060d92008-05-30 10:03:27 +020051static inline pmd_t *vmem_pmd_alloc(void)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010052{
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020053 pmd_t *pmd = NULL;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010054
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020055#ifdef CONFIG_64BIT
Heiko Carstens67060d92008-05-30 10:03:27 +020056 pmd = vmem_alloc_pages(2);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010057 if (!pmd)
58 return NULL;
Heiko Carstens8fc63652008-04-30 13:38:44 +020059 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020060#endif
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010061 return pmd;
62}
63
Heiko Carstens2069e972008-05-15 16:52:31 +020064static pte_t __ref *vmem_pte_alloc(void)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010065{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010066 pte_t *pte;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010067
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010068 if (slab_is_available())
69 pte = (pte_t *) page_table_alloc(&init_mm);
70 else
71 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010072 if (!pte)
73 return NULL;
Christian Borntraeger6af7eea2010-04-09 13:43:01 +020074 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
75 PTRS_PER_PTE * sizeof(pte_t));
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010076 return pte;
77}
78
79/*
80 * Add a physical memory range to the 1:1 mapping.
81 */
Heiko Carstens17f34582008-04-30 13:38:47 +020082static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010083{
84 unsigned long address;
85 pgd_t *pg_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020086 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010087 pmd_t *pm_dir;
88 pte_t *pt_dir;
89 pte_t pte;
90 int ret = -ENOMEM;
91
92 for (address = start; address < start + size; address += PAGE_SIZE) {
93 pg_dir = pgd_offset_k(address);
94 if (pgd_none(*pg_dir)) {
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020095 pu_dir = vmem_pud_alloc();
96 if (!pu_dir)
97 goto out;
98 pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
99 }
100
101 pu_dir = pud_offset(pg_dir, address);
102 if (pud_none(*pu_dir)) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100103 pm_dir = vmem_pmd_alloc();
104 if (!pm_dir)
105 goto out;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200106 pud_populate_kernel(&init_mm, pu_dir, pm_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100107 }
108
Gerald Schaefer53492b12008-04-30 13:38:46 +0200109 pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200110 pm_dir = pmd_offset(pu_dir, address);
Gerald Schaefer53492b12008-04-30 13:38:46 +0200111
112#ifdef __s390x__
113 if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
114 (address + HPAGE_SIZE <= start + size) &&
115 (address >= HPAGE_SIZE)) {
Christian Borntraeger6af7eea2010-04-09 13:43:01 +0200116 pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200117 pmd_val(*pm_dir) = pte_val(pte);
118 address += HPAGE_SIZE - PAGE_SIZE;
119 continue;
120 }
121#endif
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100122 if (pmd_none(*pm_dir)) {
123 pt_dir = vmem_pte_alloc();
124 if (!pt_dir)
125 goto out;
126 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
127 }
128
129 pt_dir = pte_offset_kernel(pm_dir, address);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100130 *pt_dir = pte;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100131 }
132 ret = 0;
133out:
134 flush_tlb_kernel_range(start, start + size);
135 return ret;
136}
137
138/*
139 * Remove a physical memory range from the 1:1 mapping.
140 * Currently only invalidates page table entries.
141 */
142static void vmem_remove_range(unsigned long start, unsigned long size)
143{
144 unsigned long address;
145 pgd_t *pg_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200146 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100147 pmd_t *pm_dir;
148 pte_t *pt_dir;
149 pte_t pte;
150
151 pte_val(pte) = _PAGE_TYPE_EMPTY;
152 for (address = start; address < start + size; address += PAGE_SIZE) {
153 pg_dir = pgd_offset_k(address);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200154 pu_dir = pud_offset(pg_dir, address);
155 if (pud_none(*pu_dir))
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100156 continue;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200157 pm_dir = pmd_offset(pu_dir, address);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100158 if (pmd_none(*pm_dir))
159 continue;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200160
161 if (pmd_huge(*pm_dir)) {
162 pmd_clear_kernel(pm_dir);
163 address += HPAGE_SIZE - PAGE_SIZE;
164 continue;
165 }
166
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100167 pt_dir = pte_offset_kernel(pm_dir, address);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100168 *pt_dir = pte;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100169 }
170 flush_tlb_kernel_range(start, start + size);
171}
172
173/*
174 * Add a backed mem_map array to the virtual mem_map array.
175 */
Heiko Carstens17f34582008-04-30 13:38:47 +0200176int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100177{
178 unsigned long address, start_addr, end_addr;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100179 pgd_t *pg_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200180 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100181 pmd_t *pm_dir;
182 pte_t *pt_dir;
183 pte_t pte;
184 int ret = -ENOMEM;
185
Heiko Carstens17f34582008-04-30 13:38:47 +0200186 start_addr = (unsigned long) start;
187 end_addr = (unsigned long) (start + nr);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100188
189 for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
190 pg_dir = pgd_offset_k(address);
191 if (pgd_none(*pg_dir)) {
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200192 pu_dir = vmem_pud_alloc();
193 if (!pu_dir)
194 goto out;
195 pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
196 }
197
198 pu_dir = pud_offset(pg_dir, address);
199 if (pud_none(*pu_dir)) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100200 pm_dir = vmem_pmd_alloc();
201 if (!pm_dir)
202 goto out;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200203 pud_populate_kernel(&init_mm, pu_dir, pm_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100204 }
205
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200206 pm_dir = pmd_offset(pu_dir, address);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100207 if (pmd_none(*pm_dir)) {
208 pt_dir = vmem_pte_alloc();
209 if (!pt_dir)
210 goto out;
211 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
212 }
213
214 pt_dir = pte_offset_kernel(pm_dir, address);
215 if (pte_none(*pt_dir)) {
216 unsigned long new_page;
217
Heiko Carstens67060d92008-05-30 10:03:27 +0200218 new_page =__pa(vmem_alloc_pages(0));
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100219 if (!new_page)
220 goto out;
221 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100222 *pt_dir = pte;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100223 }
224 }
Heiko Carstens67060d92008-05-30 10:03:27 +0200225 memset(start, 0, nr * sizeof(struct page));
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100226 ret = 0;
227out:
228 flush_tlb_kernel_range(start_addr, end_addr);
229 return ret;
230}
231
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100232/*
233 * Add memory segment to the segment list if it doesn't overlap with
234 * an already present segment.
235 */
236static int insert_memory_segment(struct memory_segment *seg)
237{
238 struct memory_segment *tmp;
239
Heiko Carstensee0ddad2008-06-10 10:03:20 +0200240 if (seg->start + seg->size > VMEM_MAX_PHYS ||
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100241 seg->start + seg->size < seg->start)
242 return -ERANGE;
243
244 list_for_each_entry(tmp, &mem_segs, list) {
245 if (seg->start >= tmp->start + tmp->size)
246 continue;
247 if (seg->start + seg->size <= tmp->start)
248 continue;
249 return -ENOSPC;
250 }
251 list_add(&seg->list, &mem_segs);
252 return 0;
253}
254
255/*
256 * Remove memory segment from the segment list.
257 */
258static void remove_memory_segment(struct memory_segment *seg)
259{
260 list_del(&seg->list);
261}
262
263static void __remove_shared_memory(struct memory_segment *seg)
264{
265 remove_memory_segment(seg);
266 vmem_remove_range(seg->start, seg->size);
267}
268
Heiko Carstens17f34582008-04-30 13:38:47 +0200269int vmem_remove_mapping(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100270{
271 struct memory_segment *seg;
272 int ret;
273
274 mutex_lock(&vmem_mutex);
275
276 ret = -ENOENT;
277 list_for_each_entry(seg, &mem_segs, list) {
278 if (seg->start == start && seg->size == size)
279 break;
280 }
281
282 if (seg->start != start || seg->size != size)
283 goto out;
284
285 ret = 0;
286 __remove_shared_memory(seg);
287 kfree(seg);
288out:
289 mutex_unlock(&vmem_mutex);
290 return ret;
291}
292
Heiko Carstens17f34582008-04-30 13:38:47 +0200293int vmem_add_mapping(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100294{
295 struct memory_segment *seg;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100296 int ret;
297
298 mutex_lock(&vmem_mutex);
299 ret = -ENOMEM;
300 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
301 if (!seg)
302 goto out;
303 seg->start = start;
304 seg->size = size;
305
306 ret = insert_memory_segment(seg);
307 if (ret)
308 goto out_free;
309
Gerald Schaefer53492b12008-04-30 13:38:46 +0200310 ret = vmem_add_mem(start, size, 0);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100311 if (ret)
312 goto out_remove;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100313 goto out;
314
315out_remove:
316 __remove_shared_memory(seg);
317out_free:
318 kfree(seg);
319out:
320 mutex_unlock(&vmem_mutex);
321 return ret;
322}
323
324/*
325 * map whole physical memory to virtual memory (identity mapping)
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +0100326 * we reserve enough space in the vmalloc area for vmemmap to hotplug
327 * additional memory segments.
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100328 */
329void __init vmem_map_init(void)
330{
Gerald Schaefer53492b12008-04-30 13:38:46 +0200331 unsigned long ro_start, ro_end;
332 unsigned long start, end;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100333 int i;
334
Martin Schwidefsky50aa98b2009-09-11 10:28:57 +0200335 spin_lock_init(&init_mm.context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100336 INIT_LIST_HEAD(&init_mm.context.crst_list);
337 INIT_LIST_HEAD(&init_mm.context.pgtable_list);
338 init_mm.context.noexec = 0;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200339 ro_start = ((unsigned long)&_stext) & PAGE_MASK;
340 ro_end = PFN_ALIGN((unsigned long)&_eshared);
341 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
342 start = memory_chunk[i].addr;
343 end = memory_chunk[i].addr + memory_chunk[i].size;
344 if (start >= ro_end || end <= ro_start)
345 vmem_add_mem(start, end - start, 0);
346 else if (start >= ro_start && end <= ro_end)
347 vmem_add_mem(start, end - start, 1);
348 else if (start >= ro_start) {
349 vmem_add_mem(start, ro_end - start, 1);
350 vmem_add_mem(ro_end, end - ro_end, 0);
351 } else if (end < ro_end) {
352 vmem_add_mem(start, ro_start - start, 0);
353 vmem_add_mem(ro_start, end - ro_start, 1);
354 } else {
355 vmem_add_mem(start, ro_start - start, 0);
356 vmem_add_mem(ro_start, ro_end - ro_start, 1);
357 vmem_add_mem(ro_end, end - ro_end, 0);
358 }
359 }
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100360}
361
362/*
363 * Convert memory chunk array to a memory segment list so there is a single
364 * list that contains both r/w memory and shared memory segments.
365 */
366static int __init vmem_convert_memory_chunk(void)
367{
368 struct memory_segment *seg;
369 int i;
370
371 mutex_lock(&vmem_mutex);
Heiko Carstens9f4b0ba2008-01-26 14:11:02 +0100372 for (i = 0; i < MEMORY_CHUNKS; i++) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100373 if (!memory_chunk[i].size)
374 continue;
375 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
376 if (!seg)
377 panic("Out of memory...\n");
378 seg->start = memory_chunk[i].addr;
379 seg->size = memory_chunk[i].size;
380 insert_memory_segment(seg);
381 }
382 mutex_unlock(&vmem_mutex);
383 return 0;
384}
385
386core_initcall(vmem_convert_memory_chunk);