blob: 9677abb6cf8a2b28433ac939a32b40b9719a4089 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86_64/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/signal.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/ptrace.h>
16#include <linux/mman.h>
17#include <linux/mm.h>
18#include <linux/swap.h>
19#include <linux/smp.h>
20#include <linux/init.h>
21#include <linux/pagemap.h>
22#include <linux/bootmem.h>
23#include <linux/proc_fs.h>
Andi Kleen59170892005-11-05 17:25:53 +010024#include <linux/pci.h>
Jan Beulich6fb14752007-05-02 19:27:10 +020025#include <linux/pfn.h>
Randy Dunlapc9cf5522006-06-27 02:53:52 -070026#include <linux/poison.h>
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010027#include <linux/dma-mapping.h>
Matt Tolentino44df75e2006-01-17 07:03:41 +010028#include <linux/module.h>
29#include <linux/memory_hotplug.h>
Konrad Rzeszutekae32b122007-05-02 19:27:11 +020030#include <linux/nmi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
32#include <asm/processor.h>
33#include <asm/system.h>
34#include <asm/uaccess.h>
35#include <asm/pgtable.h>
36#include <asm/pgalloc.h>
37#include <asm/dma.h>
38#include <asm/fixmap.h>
39#include <asm/e820.h>
40#include <asm/apic.h>
41#include <asm/tlb.h>
42#include <asm/mmu_context.h>
43#include <asm/proto.h>
44#include <asm/smp.h>
Andi Kleen2bc04142005-11-05 17:25:53 +010045#include <asm/sections.h>
Thomas Gleixner718fc132008-01-30 13:30:17 +010046#include <asm/kdebug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
48#ifndef Dprintk
49#define Dprintk(x...)
50#endif
51
Stephen Hemmingere6584502007-05-02 19:27:06 +020052const struct dma_mapping_ops* dma_ops;
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010053EXPORT_SYMBOL(dma_ops);
54
Andi Kleene18c6872005-11-05 17:25:53 +010055static unsigned long dma_reserve __initdata;
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
58
59/*
60 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
61 * physical space so we can cache the place of the first one and move
62 * around without checking the pgd every time.
63 */
64
65void show_mem(void)
66{
Andi Kleene92343c2005-09-12 18:49:24 +020067 long i, total = 0, reserved = 0;
68 long shared = 0, cached = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 pg_data_t *pgdat;
70 struct page *page;
71
Andi Kleene92343c2005-09-12 18:49:24 +020072 printk(KERN_INFO "Mem-info:\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 show_free_areas();
Andi Kleene92343c2005-09-12 18:49:24 +020074 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
KAMEZAWA Hiroyukiec936fc2006-03-27 01:15:59 -080076 for_each_online_pgdat(pgdat) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
Konrad Rzeszutekae32b122007-05-02 19:27:11 +020078 /* this loop can take a while with 256 GB and 4k pages
79 so update the NMI watchdog */
80 if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) {
81 touch_nmi_watchdog();
82 }
Bob Picco12710a52007-06-08 13:47:00 -070083 if (!pfn_valid(pgdat->node_start_pfn + i))
84 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 page = pfn_to_page(pgdat->node_start_pfn + i);
86 total++;
Andi Kleene92343c2005-09-12 18:49:24 +020087 if (PageReserved(page))
88 reserved++;
89 else if (PageSwapCache(page))
90 cached++;
91 else if (page_count(page))
92 shared += page_count(page) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 }
94 }
Andi Kleene92343c2005-09-12 18:49:24 +020095 printk(KERN_INFO "%lu pages of RAM\n", total);
96 printk(KERN_INFO "%lu reserved pages\n",reserved);
97 printk(KERN_INFO "%lu pages shared\n",shared);
98 printk(KERN_INFO "%lu pages swap cached\n",cached);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101int after_bootmem;
102
Andi Kleen5f44a662006-03-25 16:30:25 +0100103static __init void *spp_getpage(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
105 void *ptr;
106 if (after_bootmem)
107 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
108 else
109 ptr = alloc_bootmem_pages(PAGE_SIZE);
110 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
111 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
112
113 Dprintk("spp_getpage %p\n", ptr);
114 return ptr;
115}
116
Andi Kleen5f44a662006-03-25 16:30:25 +0100117static __init void set_pte_phys(unsigned long vaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 unsigned long phys, pgprot_t prot)
119{
120 pgd_t *pgd;
121 pud_t *pud;
122 pmd_t *pmd;
123 pte_t *pte, new_pte;
124
125 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
126
127 pgd = pgd_offset_k(vaddr);
128 if (pgd_none(*pgd)) {
129 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
130 return;
131 }
132 pud = pud_offset(pgd, vaddr);
133 if (pud_none(*pud)) {
134 pmd = (pmd_t *) spp_getpage();
135 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
136 if (pmd != pmd_offset(pud, 0)) {
137 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
138 return;
139 }
140 }
141 pmd = pmd_offset(pud, vaddr);
142 if (pmd_none(*pmd)) {
143 pte = (pte_t *) spp_getpage();
144 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
145 if (pte != pte_offset_kernel(pmd, 0)) {
146 printk("PAGETABLE BUG #02!\n");
147 return;
148 }
149 }
150 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
151
152 pte = pte_offset_kernel(pmd, vaddr);
153 if (!pte_none(*pte) &&
154 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
155 pte_ERROR(*pte);
156 set_pte(pte, new_pte);
157
158 /*
159 * It's enough to flush this one mapping.
160 * (PGE mappings get flushed as well)
161 */
162 __flush_tlb_one(vaddr);
163}
164
165/* NOTE: this is meant to be run only at boot */
Andi Kleen5f44a662006-03-25 16:30:25 +0100166void __init
167__set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168{
169 unsigned long address = __fix_to_virt(idx);
170
171 if (idx >= __end_of_fixed_addresses) {
172 printk("Invalid __set_fixmap\n");
173 return;
174 }
175 set_pte_phys(address, phys, prot);
176}
177
Yasunori Gotoa3142c82007-05-08 00:23:07 -0700178unsigned long __meminitdata table_start, table_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200180static __meminit void *alloc_low_page(unsigned long *phys)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181{
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200182 unsigned long pfn = table_end++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 void *adr;
184
Matt Tolentino44df75e2006-01-17 07:03:41 +0100185 if (after_bootmem) {
186 adr = (void *)get_zeroed_page(GFP_ATOMIC);
187 *phys = __pa(adr);
188 return adr;
189 }
190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 if (pfn >= end_pfn)
192 panic("alloc_low_page: ran out of memory");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200194 adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE);
195 memset(adr, 0, PAGE_SIZE);
196 *phys = pfn * PAGE_SIZE;
197 return adr;
198}
199
200static __meminit void unmap_low_page(void *adr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201{
Matt Tolentino44df75e2006-01-17 07:03:41 +0100202
203 if (after_bootmem)
204 return;
205
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200206 early_iounmap(adr, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207}
208
Andi Kleenf2d3efe2006-03-25 16:30:22 +0100209/* Must run before zap_low_mappings */
Yasunori Gotoa3142c82007-05-08 00:23:07 -0700210__meminit void *early_ioremap(unsigned long addr, unsigned long size)
Andi Kleenf2d3efe2006-03-25 16:30:22 +0100211{
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200212 unsigned long vaddr;
213 pmd_t *pmd, *last_pmd;
214 int i, pmds;
Andi Kleenf2d3efe2006-03-25 16:30:22 +0100215
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200216 pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
217 vaddr = __START_KERNEL_map;
218 pmd = level2_kernel_pgt;
219 last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1;
220 for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) {
221 for (i = 0; i < pmds; i++) {
222 if (pmd_present(pmd[i]))
223 goto next;
224 }
225 vaddr += addr & ~PMD_MASK;
226 addr &= PMD_MASK;
227 for (i = 0; i < pmds; i++, addr += PMD_SIZE)
228 set_pmd(pmd + i,__pmd(addr | _KERNPG_TABLE | _PAGE_PSE));
229 __flush_tlb();
230 return (void *)vaddr;
231 next:
232 ;
Andi Kleenf2d3efe2006-03-25 16:30:22 +0100233 }
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200234 printk("early_ioremap(0x%lx, %lu) failed\n", addr, size);
235 return NULL;
Andi Kleenf2d3efe2006-03-25 16:30:22 +0100236}
237
238/* To avoid virtual aliases later */
Yasunori Gotoa3142c82007-05-08 00:23:07 -0700239__meminit void early_iounmap(void *addr, unsigned long size)
Andi Kleenf2d3efe2006-03-25 16:30:22 +0100240{
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200241 unsigned long vaddr;
242 pmd_t *pmd;
243 int i, pmds;
244
245 vaddr = (unsigned long)addr;
246 pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
247 pmd = level2_kernel_pgt + pmd_index(vaddr);
248 for (i = 0; i < pmds; i++)
249 pmd_clear(pmd + i);
Andi Kleenf2d3efe2006-03-25 16:30:22 +0100250 __flush_tlb();
251}
252
Matt Tolentino44df75e2006-01-17 07:03:41 +0100253static void __meminit
Keith Mannthey6ad91652006-09-26 10:52:36 +0200254phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
Matt Tolentino44df75e2006-01-17 07:03:41 +0100255{
Keith Mannthey6ad91652006-09-26 10:52:36 +0200256 int i = pmd_index(address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
Keith Mannthey6ad91652006-09-26 10:52:36 +0200258 for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
Matt Tolentino44df75e2006-01-17 07:03:41 +0100259 unsigned long entry;
Keith Mannthey6ad91652006-09-26 10:52:36 +0200260 pmd_t *pmd = pmd_page + pmd_index(address);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100261
Jan Beulich5f51e132006-06-26 13:59:02 +0200262 if (address >= end) {
263 if (!after_bootmem)
264 for (; i < PTRS_PER_PMD; i++, pmd++)
265 set_pmd(pmd, __pmd(0));
Matt Tolentino44df75e2006-01-17 07:03:41 +0100266 break;
267 }
Keith Mannthey6ad91652006-09-26 10:52:36 +0200268
269 if (pmd_val(*pmd))
270 continue;
271
Matt Tolentino44df75e2006-01-17 07:03:41 +0100272 entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
273 entry &= __supported_pte_mask;
274 set_pmd(pmd, __pmd(entry));
275 }
276}
277
278static void __meminit
279phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
280{
Keith Mannthey6ad91652006-09-26 10:52:36 +0200281 pmd_t *pmd = pmd_offset(pud,0);
282 spin_lock(&init_mm.page_table_lock);
283 phys_pmd_init(pmd, address, end);
284 spin_unlock(&init_mm.page_table_lock);
285 __flush_tlb_all();
Matt Tolentino44df75e2006-01-17 07:03:41 +0100286}
287
Keith Mannthey6ad91652006-09-26 10:52:36 +0200288static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
Matt Tolentino44df75e2006-01-17 07:03:41 +0100289{
Keith Mannthey6ad91652006-09-26 10:52:36 +0200290 int i = pud_index(addr);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100291
Matt Tolentino44df75e2006-01-17 07:03:41 +0100292
Keith Mannthey6ad91652006-09-26 10:52:36 +0200293 for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {
Keith Mannthey6ad91652006-09-26 10:52:36 +0200294 unsigned long pmd_phys;
295 pud_t *pud = pud_page + pud_index(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 pmd_t *pmd;
297
Keith Mannthey6ad91652006-09-26 10:52:36 +0200298 if (addr >= end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
Keith Mannthey6ad91652006-09-26 10:52:36 +0200301 if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 set_pud(pud, __pud(0));
303 continue;
304 }
305
Keith Mannthey6ad91652006-09-26 10:52:36 +0200306 if (pud_val(*pud)) {
307 phys_pmd_update(pud, addr, end);
308 continue;
309 }
310
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200311 pmd = alloc_low_page(&pmd_phys);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100312 spin_lock(&init_mm.page_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
Keith Mannthey6ad91652006-09-26 10:52:36 +0200314 phys_pmd_init(pmd, addr, end);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100315 spin_unlock(&init_mm.page_table_lock);
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200316 unmap_low_page(pmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 }
318 __flush_tlb();
319}
320
321static void __init find_early_table_space(unsigned long end)
322{
Andi Kleen6c5acd12006-01-11 22:46:57 +0100323 unsigned long puds, pmds, tables, start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324
325 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
326 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
327 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
328 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
329
Andi Kleenee408c72006-01-16 01:56:51 +0100330 /* RED-PEN putting page tables only on node 0 could
331 cause a hotspot and fill up ZONE_DMA. The page tables
332 need roughly 0.5KB per GB. */
333 start = 0x8000;
334 table_start = find_e820_area(start, end, tables);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 if (table_start == -1UL)
336 panic("Cannot find space for the kernel page tables");
337
338 table_start >>= PAGE_SHIFT;
339 table_end = table_start;
Matt Tolentino44df75e2006-01-17 07:03:41 +0100340
341 early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
Jan Beulich5f51e132006-06-26 13:59:02 +0200342 end, table_start << PAGE_SHIFT,
343 (table_start << PAGE_SHIFT) + tables);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344}
345
346/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
347 This runs before bootmem is initialized and gets pages directly from the
348 physical memory. To access them they are temporarily mapped. */
KAMEZAWA Hiroyukib6fd6ec2007-11-28 16:21:58 -0800349void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350{
351 unsigned long next;
352
353 Dprintk("init_memory_mapping\n");
354
355 /*
356 * Find space for the kernel direct mapping tables.
357 * Later we should allocate these tables in the local node of the memory
358 * mapped. Unfortunately this is done currently before the nodes are
359 * discovered.
360 */
Matt Tolentino44df75e2006-01-17 07:03:41 +0100361 if (!after_bootmem)
362 find_early_table_space(end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
364 start = (unsigned long)__va(start);
365 end = (unsigned long)__va(end);
366
367 for (; start < end; start = next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 unsigned long pud_phys;
Matt Tolentino44df75e2006-01-17 07:03:41 +0100369 pgd_t *pgd = pgd_offset_k(start);
370 pud_t *pud;
371
372 if (after_bootmem)
Andi Kleend2ae5b52006-06-26 13:57:56 +0200373 pud = pud_offset(pgd, start & PGDIR_MASK);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100374 else
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200375 pud = alloc_low_page(&pud_phys);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100376
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 next = start + PGDIR_SIZE;
378 if (next > end)
379 next = end;
380 phys_pud_init(pud, __pa(start), __pa(next));
Matt Tolentino44df75e2006-01-17 07:03:41 +0100381 if (!after_bootmem)
382 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200383 unmap_low_page(pud);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 }
385
Matt Tolentino44df75e2006-01-17 07:03:41 +0100386 if (!after_bootmem)
Glauber de Oliveira Costaf51c9452007-07-22 11:12:29 +0200387 mmu_cr4_features = read_cr4();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 __flush_tlb_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389}
390
Matt Tolentino2b976902005-06-23 00:08:06 -0700391#ifndef CONFIG_NUMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392void __init paging_init(void)
393{
Mel Gorman6391af12006-10-11 01:20:39 -0700394 unsigned long max_zone_pfns[MAX_NR_ZONES];
395 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
396 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
397 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
398 max_zone_pfns[ZONE_NORMAL] = end_pfn;
399
Matt Tolentino44df75e2006-01-17 07:03:41 +0100400 memory_present(0, 0, end_pfn);
401 sparse_init();
Mel Gorman5cb248a2006-09-27 01:49:52 -0700402 free_area_init_nodes(max_zone_pfns);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403}
404#endif
405
406/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
407 from the CPU leading to inconsistent cache lines. address and size
408 must be aligned to 2MB boundaries.
409 Does nothing when the mapping doesn't exist. */
410void __init clear_kernel_mapping(unsigned long address, unsigned long size)
411{
412 unsigned long end = address + size;
413
414 BUG_ON(address & ~LARGE_PAGE_MASK);
415 BUG_ON(size & ~LARGE_PAGE_MASK);
416
417 for (; address < end; address += LARGE_PAGE_SIZE) {
418 pgd_t *pgd = pgd_offset_k(address);
419 pud_t *pud;
420 pmd_t *pmd;
421 if (pgd_none(*pgd))
422 continue;
423 pud = pud_offset(pgd, address);
424 if (pud_none(*pud))
425 continue;
426 pmd = pmd_offset(pud, address);
427 if (!pmd || pmd_none(*pmd))
428 continue;
429 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
430 /* Could handle this, but it should not happen currently. */
431 printk(KERN_ERR
432 "clear_kernel_mapping: mapping has been split. will leak memory\n");
433 pmd_ERROR(*pmd);
434 }
435 set_pmd(pmd, __pmd(0));
436 }
437 __flush_tlb_all();
438}
439
Matt Tolentino44df75e2006-01-17 07:03:41 +0100440/*
441 * Memory hotplug specific functions
Matt Tolentino44df75e2006-01-17 07:03:41 +0100442 */
Matt Tolentino44df75e2006-01-17 07:03:41 +0100443void online_page(struct page *page)
444{
445 ClearPageReserved(page);
Nick Piggin7835e982006-03-22 00:08:40 -0800446 init_page_count(page);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100447 __free_page(page);
448 totalram_pages++;
449 num_physpages++;
450}
451
Yasunori Gotobc02af92006-06-27 02:53:30 -0700452#ifdef CONFIG_MEMORY_HOTPLUG
453/*
Yasunori Gotobc02af92006-06-27 02:53:30 -0700454 * Memory is added always to NORMAL zone. This means you will never get
455 * additional DMA/DMA32 memory.
456 */
457int arch_add_memory(int nid, u64 start, u64 size)
458{
459 struct pglist_data *pgdat = NODE_DATA(nid);
Christoph Lameter776ed982006-09-25 23:31:09 -0700460 struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
Yasunori Gotobc02af92006-06-27 02:53:30 -0700461 unsigned long start_pfn = start >> PAGE_SHIFT;
462 unsigned long nr_pages = size >> PAGE_SHIFT;
463 int ret;
464
Keith Mannthey45e0b782006-09-30 23:27:09 -0700465 init_memory_mapping(start, (start + size -1));
466
Yasunori Gotobc02af92006-06-27 02:53:30 -0700467 ret = __add_pages(zone, start_pfn, nr_pages);
468 if (ret)
469 goto error;
470
Yasunori Gotobc02af92006-06-27 02:53:30 -0700471 return ret;
472error:
473 printk("%s: Problem encountered in __add_pages!\n", __func__);
474 return ret;
475}
476EXPORT_SYMBOL_GPL(arch_add_memory);
477
Yasunori Goto82432292006-11-18 22:19:40 -0800478#if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA)
Keith Mannthey4942e992006-09-30 23:27:06 -0700479int memory_add_physaddr_to_nid(u64 start)
480{
481 return 0;
482}
Keith Mannthey8c2676a2006-09-30 23:27:07 -0700483EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
Keith Mannthey4942e992006-09-30 23:27:06 -0700484#endif
485
Keith Mannthey45e0b782006-09-30 23:27:09 -0700486#endif /* CONFIG_MEMORY_HOTPLUG */
487
488#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
Andi Kleen9d99aaa2006-04-07 19:49:15 +0200489/*
490 * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
491 * just online the pages.
492 */
493int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
494{
495 int err = -EIO;
496 unsigned long pfn;
497 unsigned long total = 0, mem = 0;
498 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
Andi Kleen68a3a7f2006-04-07 19:49:18 +0200499 if (pfn_valid(pfn)) {
Andi Kleen9d99aaa2006-04-07 19:49:15 +0200500 online_page(pfn_to_page(pfn));
501 err = 0;
502 mem++;
503 }
504 total++;
505 }
506 if (!err) {
507 z->spanned_pages += total;
508 z->present_pages += mem;
509 z->zone_pgdat->node_spanned_pages += total;
510 z->zone_pgdat->node_present_pages += mem;
511 }
512 return err;
513}
Keith Mannthey45e0b782006-09-30 23:27:09 -0700514#endif
Matt Tolentino44df75e2006-01-17 07:03:41 +0100515
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
517 kcore_vsyscall;
518
519void __init mem_init(void)
520{
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200521 long codesize, reservedpages, datasize, initsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
Jon Mason0dc243a2006-06-26 13:58:11 +0200523 pci_iommu_alloc();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 /* clear the zero-page */
526 memset(empty_zero_page, 0, PAGE_SIZE);
527
528 reservedpages = 0;
529
530 /* this will put all low memory onto the freelists */
Matt Tolentino2b976902005-06-23 00:08:06 -0700531#ifdef CONFIG_NUMA
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200532 totalram_pages = numa_free_all_bootmem();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533#else
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200534 totalram_pages = free_all_bootmem();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535#endif
Mel Gorman5cb248a2006-09-27 01:49:52 -0700536 reservedpages = end_pfn - totalram_pages -
537 absent_pages_in_range(0, end_pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
539 after_bootmem = 1;
540
541 codesize = (unsigned long) &_etext - (unsigned long) &_text;
542 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
543 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
544
545 /* Register memory areas for /proc/kcore */
546 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
547 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
548 VMALLOC_END-VMALLOC_START);
549 kclist_add(&kcore_kernel, &_stext, _end - _stext);
550 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
551 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
552 VSYSCALL_END - VSYSCALL_START);
553
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200554 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
556 end_pfn << (PAGE_SHIFT-10),
557 codesize >> 10,
558 reservedpages << (PAGE_SHIFT-10),
559 datasize >> 10,
560 initsize >> 10);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561}
562
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200563void free_init_pages(char *what, unsigned long begin, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564{
565 unsigned long addr;
566
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200567 if (begin >= end)
568 return;
569
Jan Beulich6fb14752007-05-02 19:27:10 +0200570 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200571 for (addr = begin; addr < end; addr += PAGE_SIZE) {
Linus Torvaldse3ebadd2007-05-07 08:44:24 -0700572 ClearPageReserved(virt_to_page(addr));
573 init_page_count(virt_to_page(addr));
574 memset((void *)(addr & ~(PAGE_SIZE-1)),
575 POISON_FREE_INITMEM, PAGE_SIZE);
Jan Beulich6fb14752007-05-02 19:27:10 +0200576 if (addr >= __START_KERNEL_map)
577 change_page_attr_addr(addr, 1, __pgprot(0));
Linus Torvaldse3ebadd2007-05-07 08:44:24 -0700578 free_page(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 totalram_pages++;
580 }
Jan Beulich6fb14752007-05-02 19:27:10 +0200581 if (addr > __START_KERNEL_map)
582 global_flush_tlb();
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200583}
584
585void free_initmem(void)
586{
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200587 free_init_pages("unused kernel memory",
Linus Torvaldse3ebadd2007-05-07 08:44:24 -0700588 (unsigned long)(&__init_begin),
589 (unsigned long)(&__init_end));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590}
591
Arjan van de Ven67df1972006-01-06 00:12:04 -0800592#ifdef CONFIG_DEBUG_RODATA
593
Arjan van de Ven67df1972006-01-06 00:12:04 -0800594void mark_rodata_ro(void)
595{
Linus Torvaldse3ebadd2007-05-07 08:44:24 -0700596 unsigned long start = (unsigned long)_stext, end;
Arjan van de Ven67df1972006-01-06 00:12:04 -0800597
Linus Torvalds602033e2007-07-26 12:07:21 -0700598#ifdef CONFIG_HOTPLUG_CPU
599 /* It must still be possible to apply SMP alternatives. */
600 if (num_possible_cpus() > 1)
601 start = (unsigned long)_etext;
602#endif
603
604#ifdef CONFIG_KPROBES
605 start = (unsigned long)__start_rodata;
606#endif
607
Linus Torvaldse3ebadd2007-05-07 08:44:24 -0700608 end = (unsigned long)__end_rodata;
609 start = (start + PAGE_SIZE - 1) & PAGE_MASK;
610 end &= PAGE_MASK;
611 if (end <= start)
612 return;
613
614 change_page_attr_addr(start, (end - start) >> PAGE_SHIFT, PAGE_KERNEL_RO);
Arjan van de Ven67df1972006-01-06 00:12:04 -0800615
Jan Beulich6fb14752007-05-02 19:27:10 +0200616 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
Linus Torvaldse3ebadd2007-05-07 08:44:24 -0700617 (end - start) >> 10);
Arjan van de Ven67df1972006-01-06 00:12:04 -0800618
619 /*
620 * change_page_attr_addr() requires a global_flush_tlb() call after it.
621 * We do this after the printk so that if something went wrong in the
622 * change, the printk gets out at least to give a better debug hint
623 * of who is the culprit.
624 */
625 global_flush_tlb();
626}
627#endif
628
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629#ifdef CONFIG_BLK_DEV_INITRD
630void free_initrd_mem(unsigned long start, unsigned long end)
631{
Linus Torvaldse3ebadd2007-05-07 08:44:24 -0700632 free_init_pages("initrd memory", start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633}
634#endif
635
636void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
637{
Matt Tolentino2b976902005-06-23 00:08:06 -0700638#ifdef CONFIG_NUMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 int nid = phys_to_nid(phys);
Andi Kleen5e58a022006-11-14 16:57:46 +0100640#endif
641 unsigned long pfn = phys >> PAGE_SHIFT;
642 if (pfn >= end_pfn) {
643 /* This can happen with kdump kernels when accessing firmware
644 tables. */
645 if (pfn < end_pfn_map)
646 return;
647 printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
648 phys, len);
649 return;
650 }
651
652 /* Should check here against the e820 map to avoid double free */
653#ifdef CONFIG_NUMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 reserve_bootmem_node(NODE_DATA(nid), phys, len);
655#else
656 reserve_bootmem(phys, len);
657#endif
Mel Gorman0e0b8642006-09-27 01:49:56 -0700658 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
Andi Kleene18c6872005-11-05 17:25:53 +0100659 dma_reserve += len / PAGE_SIZE;
Mel Gorman0e0b8642006-09-27 01:49:56 -0700660 set_dma_reserve(dma_reserve);
661 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662}
663
664int kern_addr_valid(unsigned long addr)
665{
666 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
667 pgd_t *pgd;
668 pud_t *pud;
669 pmd_t *pmd;
670 pte_t *pte;
671
672 if (above != 0 && above != -1UL)
673 return 0;
674
675 pgd = pgd_offset_k(addr);
676 if (pgd_none(*pgd))
677 return 0;
678
679 pud = pud_offset(pgd, addr);
680 if (pud_none(*pud))
681 return 0;
682
683 pmd = pmd_offset(pud, addr);
684 if (pmd_none(*pmd))
685 return 0;
686 if (pmd_large(*pmd))
687 return pfn_valid(pmd_pfn(*pmd));
688
689 pte = pte_offset_kernel(pmd, addr);
690 if (pte_none(*pte))
691 return 0;
692 return pfn_valid(pte_pfn(*pte));
693}
694
Ernie Petrides103efcd2006-12-07 02:14:09 +0100695/* A pseudo VMA to allow ptrace access for the vsyscall page. This only
Andi Kleen1e014412005-04-16 15:24:55 -0700696 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
697 not need special handling anymore. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
699static struct vm_area_struct gate_vma = {
700 .vm_start = VSYSCALL_START,
Ernie Petrides103efcd2006-12-07 02:14:09 +0100701 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES << PAGE_SHIFT),
702 .vm_page_prot = PAGE_READONLY_EXEC,
703 .vm_flags = VM_READ | VM_EXEC
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704};
705
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
707{
708#ifdef CONFIG_IA32_EMULATION
Andi Kleen1e014412005-04-16 15:24:55 -0700709 if (test_tsk_thread_flag(tsk, TIF_IA32))
710 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711#endif
712 return &gate_vma;
713}
714
715int in_gate_area(struct task_struct *task, unsigned long addr)
716{
717 struct vm_area_struct *vma = get_gate_vma(task);
Andi Kleen1e014412005-04-16 15:24:55 -0700718 if (!vma)
719 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 return (addr >= vma->vm_start) && (addr < vma->vm_end);
721}
722
723/* Use this when you have no reliable task/vma, typically from interrupt
724 * context. It is less reliable than using the task's vma and may give
725 * false positives.
726 */
727int in_gate_area_no_task(unsigned long addr)
728{
Andi Kleen1e014412005-04-16 15:24:55 -0700729 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730}
Zou Nan hai2e1c49d2007-06-01 00:46:28 -0700731
Andi Kleen2aae9502007-07-21 17:10:01 +0200732const char *arch_vma_name(struct vm_area_struct *vma)
733{
734 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
735 return "[vdso]";
736 if (vma == &gate_vma)
737 return "[vsyscall]";
738 return NULL;
739}
Christoph Lameter0889eba2007-10-16 01:24:15 -0700740
741#ifdef CONFIG_SPARSEMEM_VMEMMAP
742/*
743 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
744 */
745int __meminit vmemmap_populate(struct page *start_page,
746 unsigned long size, int node)
747{
748 unsigned long addr = (unsigned long)start_page;
749 unsigned long end = (unsigned long)(start_page + size);
750 unsigned long next;
751 pgd_t *pgd;
752 pud_t *pud;
753 pmd_t *pmd;
754
755 for (; addr < end; addr = next) {
756 next = pmd_addr_end(addr, end);
757
758 pgd = vmemmap_pgd_populate(addr, node);
759 if (!pgd)
760 return -ENOMEM;
761 pud = vmemmap_pud_populate(pgd, addr, node);
762 if (!pud)
763 return -ENOMEM;
764
765 pmd = pmd_offset(pud, addr);
766 if (pmd_none(*pmd)) {
767 pte_t entry;
768 void *p = vmemmap_alloc_block(PMD_SIZE, node);
769 if (!p)
770 return -ENOMEM;
771
772 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
773 mk_pte_huge(entry);
774 set_pmd(pmd, __pmd(pte_val(entry)));
775
776 printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n",
777 addr, addr + PMD_SIZE - 1, p, node);
778 } else
779 vmemmap_verify((pte_t *)pmd, node, addr, next);
780 }
781
782 return 0;
783}
784#endif