blob: a8119cb4fa32cb1637f8f8b5b757e4ac36c3e398 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* $Id: init.c,v 1.209 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/mm/init.c
3 *
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8#include <linux/config.h>
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/string.h>
12#include <linux/init.h>
13#include <linux/bootmem.h>
14#include <linux/mm.h>
15#include <linux/hugetlb.h>
16#include <linux/slab.h>
17#include <linux/initrd.h>
18#include <linux/swap.h>
19#include <linux/pagemap.h>
20#include <linux/fs.h>
21#include <linux/seq_file.h>
Prasanna S Panchamukhi05e14cb2005-09-06 15:19:30 -070022#include <linux/kprobes.h>
David S. Miller1ac4f5e2005-09-21 21:49:32 -070023#include <linux/cache.h>
David S. Miller13edad72005-09-29 17:58:26 -070024#include <linux/sort.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
26#include <asm/head.h>
27#include <asm/system.h>
28#include <asm/page.h>
29#include <asm/pgalloc.h>
30#include <asm/pgtable.h>
31#include <asm/oplib.h>
32#include <asm/iommu.h>
33#include <asm/io.h>
34#include <asm/uaccess.h>
35#include <asm/mmu_context.h>
36#include <asm/tlbflush.h>
37#include <asm/dma.h>
38#include <asm/starfire.h>
39#include <asm/tlb.h>
40#include <asm/spitfire.h>
41#include <asm/sections.h>
42
43extern void device_scan(void);
44
David S. Miller13edad72005-09-29 17:58:26 -070045#define MAX_BANKS 32
David S. Miller10147572005-09-28 21:46:43 -070046
David S. Miller13edad72005-09-29 17:58:26 -070047static struct linux_prom64_registers pavail[MAX_BANKS] __initdata;
48static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
49static int pavail_ents __initdata;
50static int pavail_rescan_ents __initdata;
David S. Miller10147572005-09-28 21:46:43 -070051
David S. Miller13edad72005-09-29 17:58:26 -070052static int cmp_p64(const void *a, const void *b)
53{
54 const struct linux_prom64_registers *x = a, *y = b;
55
56 if (x->phys_addr > y->phys_addr)
57 return 1;
58 if (x->phys_addr < y->phys_addr)
59 return -1;
60 return 0;
61}
62
63static void __init read_obp_memory(const char *property,
64 struct linux_prom64_registers *regs,
65 int *num_ents)
66{
67 int node = prom_finddevice("/memory");
68 int prop_size = prom_getproplen(node, property);
69 int ents, ret, i;
70
71 ents = prop_size / sizeof(struct linux_prom64_registers);
72 if (ents > MAX_BANKS) {
73 prom_printf("The machine has more %s property entries than "
74 "this kernel can support (%d).\n",
75 property, MAX_BANKS);
76 prom_halt();
77 }
78
79 ret = prom_getproperty(node, property, (char *) regs, prop_size);
80 if (ret == -1) {
81 prom_printf("Couldn't get %s property from /memory.\n");
82 prom_halt();
83 }
84
85 *num_ents = ents;
86
87 /* Sanitize what we got from the firmware, by page aligning
88 * everything.
89 */
90 for (i = 0; i < ents; i++) {
91 unsigned long base, size;
92
93 base = regs[i].phys_addr;
94 size = regs[i].reg_size;
95
96 size &= PAGE_MASK;
97 if (base & ~PAGE_MASK) {
98 unsigned long new_base = PAGE_ALIGN(base);
99
100 size -= new_base - base;
101 if ((long) size < 0L)
102 size = 0UL;
103 base = new_base;
104 }
105 regs[i].phys_addr = base;
106 regs[i].reg_size = size;
107 }
David S. Millerc9c10832005-10-12 12:22:46 -0700108 sort(regs, ents, sizeof(struct linux_prom64_registers),
David S. Miller13edad72005-09-29 17:58:26 -0700109 cmp_p64, NULL);
110}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
David S. Miller2bdb3cb2005-09-22 01:08:57 -0700112unsigned long *sparc64_valid_addr_bitmap __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
114/* Ugly, but necessary... -DaveM */
David S. Miller1ac4f5e2005-09-21 21:49:32 -0700115unsigned long phys_base __read_mostly;
116unsigned long kern_base __read_mostly;
117unsigned long kern_size __read_mostly;
118unsigned long pfn_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120/* get_new_mmu_context() uses "cache + 1". */
121DEFINE_SPINLOCK(ctx_alloc_lock);
122unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
123#define CTX_BMAP_SLOTS (1UL << (CTX_NR_BITS - 6))
124unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
125
126/* References to special section boundaries */
127extern char _start[], _end[];
128
129/* Initial ramdisk setup */
130extern unsigned long sparc_ramdisk_image64;
131extern unsigned int sparc_ramdisk_image;
132extern unsigned int sparc_ramdisk_size;
133
David S. Miller1ac4f5e2005-09-21 21:49:32 -0700134struct page *mem_map_zero __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
David S. Miller0835ae02005-10-04 15:23:20 -0700136unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
137
138unsigned long sparc64_kern_pri_context __read_mostly;
139unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
140unsigned long sparc64_kern_sec_context __read_mostly;
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142int bigkernel = 0;
143
David S. Miller3c936462006-01-31 18:30:27 -0800144kmem_cache_t *pgtable_cache __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
David S. Miller3c936462006-01-31 18:30:27 -0800146static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147{
David S. Miller3c936462006-01-31 18:30:27 -0800148 clear_page(addr);
149}
150
151void pgtable_cache_init(void)
152{
153 pgtable_cache = kmem_cache_create("pgtable_cache",
154 PAGE_SIZE, PAGE_SIZE,
155 SLAB_HWCACHE_ALIGN |
156 SLAB_MUST_HWCACHE_ALIGN,
157 zero_ctor,
158 NULL);
159 if (!pgtable_cache) {
160 prom_printf("pgtable_cache_init(): Could not create!\n");
161 prom_halt();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163}
164
165#ifdef CONFIG_DEBUG_DCFLUSH
166atomic_t dcpage_flushes = ATOMIC_INIT(0);
167#ifdef CONFIG_SMP
168atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
169#endif
170#endif
171
172__inline__ void flush_dcache_page_impl(struct page *page)
173{
174#ifdef CONFIG_DEBUG_DCFLUSH
175 atomic_inc(&dcpage_flushes);
176#endif
177
178#ifdef DCACHE_ALIASING_POSSIBLE
179 __flush_dcache_page(page_address(page),
180 ((tlb_type == spitfire) &&
181 page_mapping(page) != NULL));
182#else
183 if (page_mapping(page) != NULL &&
184 tlb_type == spitfire)
185 __flush_icache_page(__pa(page_address(page)));
186#endif
187}
188
189#define PG_dcache_dirty PG_arch_1
David S. Miller48b0e542005-07-27 16:08:44 -0700190#define PG_dcache_cpu_shift 24
191#define PG_dcache_cpu_mask (256 - 1)
192
193#if NR_CPUS > 256
194#error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus
195#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
197#define dcache_dirty_cpu(page) \
David S. Miller48b0e542005-07-27 16:08:44 -0700198 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
200static __inline__ void set_dcache_dirty(struct page *page, int this_cpu)
201{
202 unsigned long mask = this_cpu;
David S. Miller48b0e542005-07-27 16:08:44 -0700203 unsigned long non_cpu_bits;
204
205 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
206 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
207
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 __asm__ __volatile__("1:\n\t"
209 "ldx [%2], %%g7\n\t"
210 "and %%g7, %1, %%g1\n\t"
211 "or %%g1, %0, %%g1\n\t"
212 "casx [%2], %%g7, %%g1\n\t"
213 "cmp %%g7, %%g1\n\t"
David S. Millerb445e262005-06-27 15:42:04 -0700214 "membar #StoreLoad | #StoreStore\n\t"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 "bne,pn %%xcc, 1b\n\t"
David S. Millerb445e262005-06-27 15:42:04 -0700216 " nop"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 : /* no outputs */
218 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
219 : "g1", "g7");
220}
221
222static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
223{
224 unsigned long mask = (1UL << PG_dcache_dirty);
225
226 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
227 "1:\n\t"
228 "ldx [%2], %%g7\n\t"
David S. Miller48b0e542005-07-27 16:08:44 -0700229 "srlx %%g7, %4, %%g1\n\t"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 "and %%g1, %3, %%g1\n\t"
231 "cmp %%g1, %0\n\t"
232 "bne,pn %%icc, 2f\n\t"
233 " andn %%g7, %1, %%g1\n\t"
234 "casx [%2], %%g7, %%g1\n\t"
235 "cmp %%g7, %%g1\n\t"
David S. Millerb445e262005-06-27 15:42:04 -0700236 "membar #StoreLoad | #StoreStore\n\t"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 "bne,pn %%xcc, 1b\n\t"
David S. Millerb445e262005-06-27 15:42:04 -0700238 " nop\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 "2:"
240 : /* no outputs */
241 : "r" (cpu), "r" (mask), "r" (&page->flags),
David S. Miller48b0e542005-07-27 16:08:44 -0700242 "i" (PG_dcache_cpu_mask),
243 "i" (PG_dcache_cpu_shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 : "g1", "g7");
245}
246
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
248{
David S. Millerbd407912006-01-31 18:31:38 -0800249 struct mm_struct *mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 struct page *page;
251 unsigned long pfn;
252 unsigned long pg_flags;
David S. Millerbd407912006-01-31 18:31:38 -0800253 unsigned long mm_rss;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
255 pfn = pte_pfn(pte);
256 if (pfn_valid(pfn) &&
257 (page = pfn_to_page(pfn), page_mapping(page)) &&
258 ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
David S. Miller48b0e542005-07-27 16:08:44 -0700259 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
260 PG_dcache_cpu_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 int this_cpu = get_cpu();
262
263 /* This is just to optimize away some function calls
264 * in the SMP case.
265 */
266 if (cpu == this_cpu)
267 flush_dcache_page_impl(page);
268 else
269 smp_flush_dcache_page_impl(page, cpu);
270
271 clear_dcache_dirty_cpu(page, cpu);
272
273 put_cpu();
274 }
David S. Millerbd407912006-01-31 18:31:38 -0800275
276 mm = vma->vm_mm;
277 mm_rss = get_mm_rss(mm);
278 if (mm_rss >= mm->context.tsb_rss_limit)
279 tsb_grow(mm, mm_rss, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280}
281
282void flush_dcache_page(struct page *page)
283{
David S. Millera9546f52005-04-17 18:03:09 -0700284 struct address_space *mapping;
285 int this_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
David S. Millera9546f52005-04-17 18:03:09 -0700287 /* Do not bother with the expensive D-cache flush if it
288 * is merely the zero page. The 'bigcore' testcase in GDB
289 * causes this case to run millions of times.
290 */
291 if (page == ZERO_PAGE(0))
292 return;
293
294 this_cpu = get_cpu();
295
296 mapping = page_mapping(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 if (mapping && !mapping_mapped(mapping)) {
David S. Millera9546f52005-04-17 18:03:09 -0700298 int dirty = test_bit(PG_dcache_dirty, &page->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 if (dirty) {
David S. Millera9546f52005-04-17 18:03:09 -0700300 int dirty_cpu = dcache_dirty_cpu(page);
301
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 if (dirty_cpu == this_cpu)
303 goto out;
304 smp_flush_dcache_page_impl(page, dirty_cpu);
305 }
306 set_dcache_dirty(page, this_cpu);
307 } else {
308 /* We could delay the flush for the !page_mapping
309 * case too. But that case is for exec env/arg
310 * pages and those are %99 certainly going to get
311 * faulted into the tlb (and thus flushed) anyways.
312 */
313 flush_dcache_page_impl(page);
314 }
315
316out:
317 put_cpu();
318}
319
Prasanna S Panchamukhi05e14cb2005-09-06 15:19:30 -0700320void __kprobes flush_icache_range(unsigned long start, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321{
322 /* Cheetah has coherent I-cache. */
323 if (tlb_type == spitfire) {
324 unsigned long kaddr;
325
326 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE)
327 __flush_icache_page(__get_phys(kaddr));
328 }
329}
330
331unsigned long page_to_pfn(struct page *page)
332{
333 return (unsigned long) ((page - mem_map) + pfn_base);
334}
335
336struct page *pfn_to_page(unsigned long pfn)
337{
338 return (mem_map + (pfn - pfn_base));
339}
340
341void show_mem(void)
342{
343 printk("Mem-info:\n");
344 show_free_areas();
345 printk("Free swap: %6ldkB\n",
346 nr_swap_pages << (PAGE_SHIFT-10));
347 printk("%ld pages of RAM\n", num_physpages);
348 printk("%d free pages\n", nr_free_pages());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349}
350
351void mmu_info(struct seq_file *m)
352{
353 if (tlb_type == cheetah)
354 seq_printf(m, "MMU Type\t: Cheetah\n");
355 else if (tlb_type == cheetah_plus)
356 seq_printf(m, "MMU Type\t: Cheetah+\n");
357 else if (tlb_type == spitfire)
358 seq_printf(m, "MMU Type\t: Spitfire\n");
359 else
360 seq_printf(m, "MMU Type\t: ???\n");
361
362#ifdef CONFIG_DEBUG_DCFLUSH
363 seq_printf(m, "DCPageFlushes\t: %d\n",
364 atomic_read(&dcpage_flushes));
365#ifdef CONFIG_SMP
366 seq_printf(m, "DCPageFlushesXC\t: %d\n",
367 atomic_read(&dcpage_flushes_xcall));
368#endif /* CONFIG_SMP */
369#endif /* CONFIG_DEBUG_DCFLUSH */
370}
371
372struct linux_prom_translation {
373 unsigned long virt;
374 unsigned long size;
375 unsigned long data;
376};
David S. Millerc9c10832005-10-12 12:22:46 -0700377
378/* Exported for kernel TLB miss handling in ktlb.S */
379struct linux_prom_translation prom_trans[512] __read_mostly;
380unsigned int prom_trans_ents __read_mostly;
381unsigned int swapper_pgd_zero __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382
383extern unsigned long prom_boot_page;
384extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
385extern int prom_get_mmu_ihandle(void);
386extern void register_prom_callbacks(void);
387
388/* Exported for SMP bootup purposes. */
389unsigned long kern_locked_tte_data;
390
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391/*
392 * Translate PROM's mapping we capture at boot time into physical address.
393 * The second parameter is only set from prom_callback() invocations.
394 */
395unsigned long prom_virt_to_phys(unsigned long promva, int *error)
396{
David S. Millerc9c10832005-10-12 12:22:46 -0700397 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398
David S. Millerc9c10832005-10-12 12:22:46 -0700399 for (i = 0; i < prom_trans_ents; i++) {
400 struct linux_prom_translation *p = &prom_trans[i];
David S. Miller9ad98c52005-10-05 15:12:00 -0700401
David S. Millerc9c10832005-10-12 12:22:46 -0700402 if (promva >= p->virt &&
403 promva < (p->virt + p->size)) {
404 unsigned long base = p->data & _PAGE_PADDR;
405
406 if (error)
407 *error = 0;
408 return base + (promva & (8192 - 1));
409 }
410 }
411 if (error)
412 *error = 1;
413 return 0UL;
David S. Miller405599b2005-09-22 00:12:35 -0700414}
415
416/* The obp translations are saved based on 8k pagesize, since obp can
417 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
David S. Miller74bf4312006-01-31 18:29:18 -0800418 * HI_OBP_ADDRESS range are handled in ktlb.S.
David S. Miller405599b2005-09-22 00:12:35 -0700419 */
David S. Miller5085b4a2005-09-22 00:45:41 -0700420static inline int in_obp_range(unsigned long vaddr)
421{
422 return (vaddr >= LOW_OBP_ADDRESS &&
423 vaddr < HI_OBP_ADDRESS);
424}
425
David S. Millerc9c10832005-10-12 12:22:46 -0700426static int cmp_ptrans(const void *a, const void *b)
David S. Miller405599b2005-09-22 00:12:35 -0700427{
David S. Millerc9c10832005-10-12 12:22:46 -0700428 const struct linux_prom_translation *x = a, *y = b;
David S. Miller405599b2005-09-22 00:12:35 -0700429
David S. Millerc9c10832005-10-12 12:22:46 -0700430 if (x->virt > y->virt)
431 return 1;
432 if (x->virt < y->virt)
433 return -1;
434 return 0;
David S. Miller405599b2005-09-22 00:12:35 -0700435}
436
David S. Millerc9c10832005-10-12 12:22:46 -0700437/* Read OBP translations property into 'prom_trans[]'. */
David S. Miller9ad98c52005-10-05 15:12:00 -0700438static void __init read_obp_translations(void)
David S. Miller405599b2005-09-22 00:12:35 -0700439{
David S. Millerc9c10832005-10-12 12:22:46 -0700440 int n, node, ents, first, last, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441
442 node = prom_finddevice("/virtual-memory");
443 n = prom_getproplen(node, "translations");
David S. Miller405599b2005-09-22 00:12:35 -0700444 if (unlikely(n == 0 || n == -1)) {
David S. Millerb206fc42005-09-21 22:31:13 -0700445 prom_printf("prom_mappings: Couldn't get size.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 prom_halt();
447 }
David S. Miller405599b2005-09-22 00:12:35 -0700448 if (unlikely(n > sizeof(prom_trans))) {
449 prom_printf("prom_mappings: Size %Zd is too big.\n", n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 prom_halt();
451 }
David S. Miller405599b2005-09-22 00:12:35 -0700452
David S. Millerb206fc42005-09-21 22:31:13 -0700453 if ((n = prom_getproperty(node, "translations",
David S. Miller405599b2005-09-22 00:12:35 -0700454 (char *)&prom_trans[0],
455 sizeof(prom_trans))) == -1) {
David S. Millerb206fc42005-09-21 22:31:13 -0700456 prom_printf("prom_mappings: Couldn't get property.\n");
457 prom_halt();
458 }
David S. Miller9ad98c52005-10-05 15:12:00 -0700459
David S. Millerb206fc42005-09-21 22:31:13 -0700460 n = n / sizeof(struct linux_prom_translation);
David S. Miller9ad98c52005-10-05 15:12:00 -0700461
David S. Millerc9c10832005-10-12 12:22:46 -0700462 ents = n;
463
464 sort(prom_trans, ents, sizeof(struct linux_prom_translation),
465 cmp_ptrans, NULL);
466
467 /* Now kick out all the non-OBP entries. */
468 for (i = 0; i < ents; i++) {
469 if (in_obp_range(prom_trans[i].virt))
470 break;
471 }
472 first = i;
473 for (; i < ents; i++) {
474 if (!in_obp_range(prom_trans[i].virt))
475 break;
476 }
477 last = i;
478
479 for (i = 0; i < (last - first); i++) {
480 struct linux_prom_translation *src = &prom_trans[i + first];
481 struct linux_prom_translation *dest = &prom_trans[i];
482
483 *dest = *src;
484 }
485 for (; i < ents; i++) {
486 struct linux_prom_translation *dest = &prom_trans[i];
487 dest->virt = dest->size = dest->data = 0x0UL;
488 }
489
490 prom_trans_ents = last - first;
491
492 if (tlb_type == spitfire) {
493 /* Clear diag TTE bits. */
494 for (i = 0; i < prom_trans_ents; i++)
495 prom_trans[i].data &= ~0x0003fe0000000000UL;
496 }
David S. Miller405599b2005-09-22 00:12:35 -0700497}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498
David S. Miller898cf0e2005-09-23 11:59:44 -0700499static void __init remap_kernel(void)
David S. Miller405599b2005-09-22 00:12:35 -0700500{
501 unsigned long phys_page, tte_vaddr, tte_data;
David S. Miller405599b2005-09-22 00:12:35 -0700502 int tlb_ent = sparc64_highest_locked_tlbent();
503
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 tte_vaddr = (unsigned long) KERNBASE;
David S. Millerbff06d52005-09-22 20:11:33 -0700505 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
506 tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB |
507 _PAGE_CP | _PAGE_CV | _PAGE_P |
508 _PAGE_L | _PAGE_W));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509
510 kern_locked_tte_data = tte_data;
511
David S. Millerbff06d52005-09-22 20:11:33 -0700512 /* Now lock us into the TLBs via OBP. */
David S. Miller405599b2005-09-22 00:12:35 -0700513 prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
514 prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 if (bigkernel) {
David S. Miller0835ae02005-10-04 15:23:20 -0700516 tlb_ent -= 1;
517 prom_dtlb_load(tlb_ent,
David S. Miller405599b2005-09-22 00:12:35 -0700518 tte_data + 0x400000,
519 tte_vaddr + 0x400000);
David S. Miller0835ae02005-10-04 15:23:20 -0700520 prom_itlb_load(tlb_ent,
David S. Miller405599b2005-09-22 00:12:35 -0700521 tte_data + 0x400000,
522 tte_vaddr + 0x400000);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 }
David S. Miller0835ae02005-10-04 15:23:20 -0700524 sparc64_highest_unlocked_tlb_ent = tlb_ent - 1;
525 if (tlb_type == cheetah_plus) {
526 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
527 CTX_CHEETAH_PLUS_NUC);
528 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
529 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
530 }
David S. Miller405599b2005-09-22 00:12:35 -0700531}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532
David S. Miller405599b2005-09-22 00:12:35 -0700533
David S. Millerc9c10832005-10-12 12:22:46 -0700534static void __init inherit_prom_mappings(void)
David S. Miller9ad98c52005-10-05 15:12:00 -0700535{
536 read_obp_translations();
David S. Miller405599b2005-09-22 00:12:35 -0700537
538 /* Now fixup OBP's idea about where we really are mapped. */
539 prom_printf("Remapping the kernel... ");
540 remap_kernel();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 prom_printf("done.\n");
542
David S. Millerc9c10832005-10-12 12:22:46 -0700543 prom_printf("Registering callbacks... ");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 register_prom_callbacks();
David S. Millerc9c10832005-10-12 12:22:46 -0700545 prom_printf("done.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546}
547
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548static int prom_ditlb_set;
549struct prom_tlb_entry {
550 int tlb_ent;
551 unsigned long tlb_tag;
552 unsigned long tlb_data;
553};
554struct prom_tlb_entry prom_itlb[16], prom_dtlb[16];
555
556void prom_world(int enter)
557{
558 unsigned long pstate;
559 int i;
560
561 if (!enter)
562 set_fs((mm_segment_t) { get_thread_current_ds() });
563
564 if (!prom_ditlb_set)
565 return;
566
567 /* Make sure the following runs atomically. */
568 __asm__ __volatile__("flushw\n\t"
569 "rdpr %%pstate, %0\n\t"
570 "wrpr %0, %1, %%pstate"
571 : "=r" (pstate)
572 : "i" (PSTATE_IE));
573
574 if (enter) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 /* Install PROM world. */
576 for (i = 0; i < 16; i++) {
577 if (prom_dtlb[i].tlb_ent != -1) {
578 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
579 "membar #Sync"
580 : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
581 "i" (ASI_DMMU));
582 if (tlb_type == spitfire)
583 spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
584 prom_dtlb[i].tlb_data);
585 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
586 cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent,
587 prom_dtlb[i].tlb_data);
588 }
589 if (prom_itlb[i].tlb_ent != -1) {
590 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
591 "membar #Sync"
592 : : "r" (prom_itlb[i].tlb_tag),
593 "r" (TLB_TAG_ACCESS),
594 "i" (ASI_IMMU));
595 if (tlb_type == spitfire)
596 spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
597 prom_itlb[i].tlb_data);
598 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
599 cheetah_put_litlb_data(prom_itlb[i].tlb_ent,
600 prom_itlb[i].tlb_data);
601 }
602 }
603 } else {
604 for (i = 0; i < 16; i++) {
605 if (prom_dtlb[i].tlb_ent != -1) {
606 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
607 "membar #Sync"
608 : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
609 if (tlb_type == spitfire)
610 spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, 0x0UL);
611 else
612 cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, 0x0UL);
613 }
614 if (prom_itlb[i].tlb_ent != -1) {
615 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
616 "membar #Sync"
617 : : "r" (TLB_TAG_ACCESS),
618 "i" (ASI_IMMU));
619 if (tlb_type == spitfire)
620 spitfire_put_itlb_data(prom_itlb[i].tlb_ent, 0x0UL);
621 else
622 cheetah_put_litlb_data(prom_itlb[i].tlb_ent, 0x0UL);
623 }
624 }
625 }
626 __asm__ __volatile__("wrpr %0, 0, %%pstate"
627 : : "r" (pstate));
628}
629
630void inherit_locked_prom_mappings(int save_p)
631{
632 int i;
633 int dtlb_seen = 0;
634 int itlb_seen = 0;
635
636 /* Fucking losing PROM has more mappings in the TLB, but
637 * it (conveniently) fails to mention any of these in the
638 * translations property. The only ones that matter are
639 * the locked PROM tlb entries, so we impose the following
640 * irrecovable rule on the PROM, it is allowed 8 locked
641 * entries in the ITLB and 8 in the DTLB.
642 *
643 * Supposedly the upper 16GB of the address space is
644 * reserved for OBP, BUT I WISH THIS WAS DOCUMENTED
645 * SOMEWHERE!!!!!!!!!!!!!!!!! Furthermore the entire interface
646 * used between the client program and the firmware on sun5
647 * systems to coordinate mmu mappings is also COMPLETELY
648 * UNDOCUMENTED!!!!!! Thanks S(t)un!
649 */
650 if (save_p) {
651 for (i = 0; i < 16; i++) {
652 prom_itlb[i].tlb_ent = -1;
653 prom_dtlb[i].tlb_ent = -1;
654 }
655 }
656 if (tlb_type == spitfire) {
David S. Miller0835ae02005-10-04 15:23:20 -0700657 int high = sparc64_highest_unlocked_tlb_ent;
658 for (i = 0; i <= high; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 unsigned long data;
660
661 /* Spitfire Errata #32 workaround */
662 /* NOTE: Always runs on spitfire, so no cheetah+
663 * page size encodings.
664 */
665 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
666 "flush %%g6"
667 : /* No outputs */
668 : "r" (0),
669 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
670
671 data = spitfire_get_dtlb_data(i);
672 if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
673 unsigned long tag;
674
675 /* Spitfire Errata #32 workaround */
676 /* NOTE: Always runs on spitfire, so no
677 * cheetah+ page size encodings.
678 */
679 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
680 "flush %%g6"
681 : /* No outputs */
682 : "r" (0),
683 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
684
685 tag = spitfire_get_dtlb_tag(i);
686 if (save_p) {
687 prom_dtlb[dtlb_seen].tlb_ent = i;
688 prom_dtlb[dtlb_seen].tlb_tag = tag;
689 prom_dtlb[dtlb_seen].tlb_data = data;
690 }
691 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
692 "membar #Sync"
693 : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
694 spitfire_put_dtlb_data(i, 0x0UL);
695
696 dtlb_seen++;
697 if (dtlb_seen > 15)
698 break;
699 }
700 }
701
702 for (i = 0; i < high; i++) {
703 unsigned long data;
704
705 /* Spitfire Errata #32 workaround */
706 /* NOTE: Always runs on spitfire, so no
707 * cheetah+ page size encodings.
708 */
709 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
710 "flush %%g6"
711 : /* No outputs */
712 : "r" (0),
713 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
714
715 data = spitfire_get_itlb_data(i);
716 if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
717 unsigned long tag;
718
719 /* Spitfire Errata #32 workaround */
720 /* NOTE: Always runs on spitfire, so no
721 * cheetah+ page size encodings.
722 */
723 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
724 "flush %%g6"
725 : /* No outputs */
726 : "r" (0),
727 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
728
729 tag = spitfire_get_itlb_tag(i);
730 if (save_p) {
731 prom_itlb[itlb_seen].tlb_ent = i;
732 prom_itlb[itlb_seen].tlb_tag = tag;
733 prom_itlb[itlb_seen].tlb_data = data;
734 }
735 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
736 "membar #Sync"
737 : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
738 spitfire_put_itlb_data(i, 0x0UL);
739
740 itlb_seen++;
741 if (itlb_seen > 15)
742 break;
743 }
744 }
745 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
David S. Miller0835ae02005-10-04 15:23:20 -0700746 int high = sparc64_highest_unlocked_tlb_ent;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747
David S. Miller0835ae02005-10-04 15:23:20 -0700748 for (i = 0; i <= high; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 unsigned long data;
750
751 data = cheetah_get_ldtlb_data(i);
752 if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
753 unsigned long tag;
754
755 tag = cheetah_get_ldtlb_tag(i);
756 if (save_p) {
757 prom_dtlb[dtlb_seen].tlb_ent = i;
758 prom_dtlb[dtlb_seen].tlb_tag = tag;
759 prom_dtlb[dtlb_seen].tlb_data = data;
760 }
761 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
762 "membar #Sync"
763 : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
764 cheetah_put_ldtlb_data(i, 0x0UL);
765
766 dtlb_seen++;
767 if (dtlb_seen > 15)
768 break;
769 }
770 }
771
772 for (i = 0; i < high; i++) {
773 unsigned long data;
774
775 data = cheetah_get_litlb_data(i);
776 if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
777 unsigned long tag;
778
779 tag = cheetah_get_litlb_tag(i);
780 if (save_p) {
781 prom_itlb[itlb_seen].tlb_ent = i;
782 prom_itlb[itlb_seen].tlb_tag = tag;
783 prom_itlb[itlb_seen].tlb_data = data;
784 }
785 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
786 "membar #Sync"
787 : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
788 cheetah_put_litlb_data(i, 0x0UL);
789
790 itlb_seen++;
791 if (itlb_seen > 15)
792 break;
793 }
794 }
795 } else {
796 /* Implement me :-) */
797 BUG();
798 }
799 if (save_p)
800 prom_ditlb_set = 1;
801}
802
803/* Give PROM back his world, done during reboots... */
804void prom_reload_locked(void)
805{
806 int i;
807
808 for (i = 0; i < 16; i++) {
809 if (prom_dtlb[i].tlb_ent != -1) {
810 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
811 "membar #Sync"
812 : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
813 "i" (ASI_DMMU));
814 if (tlb_type == spitfire)
815 spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
816 prom_dtlb[i].tlb_data);
817 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
818 cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent,
819 prom_dtlb[i].tlb_data);
820 }
821
822 if (prom_itlb[i].tlb_ent != -1) {
823 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
824 "membar #Sync"
825 : : "r" (prom_itlb[i].tlb_tag),
826 "r" (TLB_TAG_ACCESS),
827 "i" (ASI_IMMU));
828 if (tlb_type == spitfire)
829 spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
830 prom_itlb[i].tlb_data);
831 else
832 cheetah_put_litlb_data(prom_itlb[i].tlb_ent,
833 prom_itlb[i].tlb_data);
834 }
835 }
836}
837
838#ifdef DCACHE_ALIASING_POSSIBLE
839void __flush_dcache_range(unsigned long start, unsigned long end)
840{
841 unsigned long va;
842
843 if (tlb_type == spitfire) {
844 int n = 0;
845
846 for (va = start; va < end; va += 32) {
847 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
848 if (++n >= 512)
849 break;
850 }
851 } else {
852 start = __pa(start);
853 end = __pa(end);
854 for (va = start; va < end; va += 32)
855 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
856 "membar #Sync"
857 : /* no outputs */
858 : "r" (va),
859 "i" (ASI_DCACHE_INVALIDATE));
860 }
861}
862#endif /* DCACHE_ALIASING_POSSIBLE */
863
864/* If not locked, zap it. */
865void __flush_tlb_all(void)
866{
867 unsigned long pstate;
868 int i;
869
870 __asm__ __volatile__("flushw\n\t"
871 "rdpr %%pstate, %0\n\t"
872 "wrpr %0, %1, %%pstate"
873 : "=r" (pstate)
874 : "i" (PSTATE_IE));
875 if (tlb_type == spitfire) {
876 for (i = 0; i < 64; i++) {
877 /* Spitfire Errata #32 workaround */
878 /* NOTE: Always runs on spitfire, so no
879 * cheetah+ page size encodings.
880 */
881 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
882 "flush %%g6"
883 : /* No outputs */
884 : "r" (0),
885 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
886
887 if (!(spitfire_get_dtlb_data(i) & _PAGE_L)) {
888 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
889 "membar #Sync"
890 : /* no outputs */
891 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
892 spitfire_put_dtlb_data(i, 0x0UL);
893 }
894
895 /* Spitfire Errata #32 workaround */
896 /* NOTE: Always runs on spitfire, so no
897 * cheetah+ page size encodings.
898 */
899 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
900 "flush %%g6"
901 : /* No outputs */
902 : "r" (0),
903 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
904
905 if (!(spitfire_get_itlb_data(i) & _PAGE_L)) {
906 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
907 "membar #Sync"
908 : /* no outputs */
909 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
910 spitfire_put_itlb_data(i, 0x0UL);
911 }
912 }
913 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
914 cheetah_flush_dtlb_all();
915 cheetah_flush_itlb_all();
916 }
917 __asm__ __volatile__("wrpr %0, 0, %%pstate"
918 : : "r" (pstate));
919}
920
921/* Caller does TLB context flushing on local CPU if necessary.
922 * The caller also ensures that CTX_VALID(mm->context) is false.
923 *
924 * We must be careful about boundary cases so that we never
925 * let the user have CTX 0 (nucleus) or we ever use a CTX
926 * version of zero (and thus NO_CONTEXT would not be caught
927 * by version mis-match tests in mmu_context.h).
928 */
929void get_new_mmu_context(struct mm_struct *mm)
930{
931 unsigned long ctx, new_ctx;
932 unsigned long orig_pgsz_bits;
933
934
935 spin_lock(&ctx_alloc_lock);
936 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
937 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
938 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
939 if (new_ctx >= (1 << CTX_NR_BITS)) {
940 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
941 if (new_ctx >= ctx) {
942 int i;
943 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
944 CTX_FIRST_VERSION;
945 if (new_ctx == 1)
946 new_ctx = CTX_FIRST_VERSION;
947
948 /* Don't call memset, for 16 entries that's just
949 * plain silly...
950 */
951 mmu_context_bmap[0] = 3;
952 mmu_context_bmap[1] = 0;
953 mmu_context_bmap[2] = 0;
954 mmu_context_bmap[3] = 0;
955 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
956 mmu_context_bmap[i + 0] = 0;
957 mmu_context_bmap[i + 1] = 0;
958 mmu_context_bmap[i + 2] = 0;
959 mmu_context_bmap[i + 3] = 0;
960 }
961 goto out;
962 }
963 }
964 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
965 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
966out:
967 tlb_context_cache = new_ctx;
968 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
969 spin_unlock(&ctx_alloc_lock);
970}
971
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972void sparc_ultra_dump_itlb(void)
973{
974 int slot;
975
976 if (tlb_type == spitfire) {
977 printk ("Contents of itlb: ");
978 for (slot = 0; slot < 14; slot++) printk (" ");
979 printk ("%2x:%016lx,%016lx\n",
980 0,
981 spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0));
982 for (slot = 1; slot < 64; slot+=3) {
983 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
984 slot,
985 spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot),
986 slot+1,
987 spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1),
988 slot+2,
989 spitfire_get_itlb_tag(slot+2), spitfire_get_itlb_data(slot+2));
990 }
991 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
992 printk ("Contents of itlb0:\n");
993 for (slot = 0; slot < 16; slot+=2) {
994 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
995 slot,
996 cheetah_get_litlb_tag(slot), cheetah_get_litlb_data(slot),
997 slot+1,
998 cheetah_get_litlb_tag(slot+1), cheetah_get_litlb_data(slot+1));
999 }
1000 printk ("Contents of itlb2:\n");
1001 for (slot = 0; slot < 128; slot+=2) {
1002 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1003 slot,
1004 cheetah_get_itlb_tag(slot), cheetah_get_itlb_data(slot),
1005 slot+1,
1006 cheetah_get_itlb_tag(slot+1), cheetah_get_itlb_data(slot+1));
1007 }
1008 }
1009}
1010
1011void sparc_ultra_dump_dtlb(void)
1012{
1013 int slot;
1014
1015 if (tlb_type == spitfire) {
1016 printk ("Contents of dtlb: ");
1017 for (slot = 0; slot < 14; slot++) printk (" ");
1018 printk ("%2x:%016lx,%016lx\n", 0,
1019 spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0));
1020 for (slot = 1; slot < 64; slot+=3) {
1021 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1022 slot,
1023 spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot),
1024 slot+1,
1025 spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1),
1026 slot+2,
1027 spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2));
1028 }
1029 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1030 printk ("Contents of dtlb0:\n");
1031 for (slot = 0; slot < 16; slot+=2) {
1032 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1033 slot,
1034 cheetah_get_ldtlb_tag(slot), cheetah_get_ldtlb_data(slot),
1035 slot+1,
1036 cheetah_get_ldtlb_tag(slot+1), cheetah_get_ldtlb_data(slot+1));
1037 }
1038 printk ("Contents of dtlb2:\n");
1039 for (slot = 0; slot < 512; slot+=2) {
1040 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1041 slot,
1042 cheetah_get_dtlb_tag(slot, 2), cheetah_get_dtlb_data(slot, 2),
1043 slot+1,
1044 cheetah_get_dtlb_tag(slot+1, 2), cheetah_get_dtlb_data(slot+1, 2));
1045 }
1046 if (tlb_type == cheetah_plus) {
1047 printk ("Contents of dtlb3:\n");
1048 for (slot = 0; slot < 512; slot+=2) {
1049 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1050 slot,
1051 cheetah_get_dtlb_tag(slot, 3), cheetah_get_dtlb_data(slot, 3),
1052 slot+1,
1053 cheetah_get_dtlb_tag(slot+1, 3), cheetah_get_dtlb_data(slot+1, 3));
1054 }
1055 }
1056 }
1057}
1058
1059extern unsigned long cmdline_memory_size;
1060
1061unsigned long __init bootmem_init(unsigned long *pages_avail)
1062{
1063 unsigned long bootmap_size, start_pfn, end_pfn;
1064 unsigned long end_of_phys_memory = 0UL;
1065 unsigned long bootmap_pfn, bytes_avail, size;
1066 int i;
1067
1068#ifdef CONFIG_DEBUG_BOOTMEM
David S. Miller13edad72005-09-29 17:58:26 -07001069 prom_printf("bootmem_init: Scan pavail, ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070#endif
1071
1072 bytes_avail = 0UL;
David S. Miller13edad72005-09-29 17:58:26 -07001073 for (i = 0; i < pavail_ents; i++) {
1074 end_of_phys_memory = pavail[i].phys_addr +
1075 pavail[i].reg_size;
1076 bytes_avail += pavail[i].reg_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077 if (cmdline_memory_size) {
1078 if (bytes_avail > cmdline_memory_size) {
1079 unsigned long slack = bytes_avail - cmdline_memory_size;
1080
1081 bytes_avail -= slack;
1082 end_of_phys_memory -= slack;
1083
David S. Miller13edad72005-09-29 17:58:26 -07001084 pavail[i].reg_size -= slack;
1085 if ((long)pavail[i].reg_size <= 0L) {
1086 pavail[i].phys_addr = 0xdeadbeefUL;
1087 pavail[i].reg_size = 0UL;
1088 pavail_ents = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 } else {
David S. Miller13edad72005-09-29 17:58:26 -07001090 pavail[i+1].reg_size = 0Ul;
1091 pavail[i+1].phys_addr = 0xdeadbeefUL;
1092 pavail_ents = i + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 }
1094 break;
1095 }
1096 }
1097 }
1098
1099 *pages_avail = bytes_avail >> PAGE_SHIFT;
1100
1101 /* Start with page aligned address of last symbol in kernel
1102 * image. The kernel is hard mapped below PAGE_OFFSET in a
1103 * 4MB locked TLB translation.
1104 */
1105 start_pfn = PAGE_ALIGN(kern_base + kern_size) >> PAGE_SHIFT;
1106
1107 bootmap_pfn = start_pfn;
1108
1109 end_pfn = end_of_phys_memory >> PAGE_SHIFT;
1110
1111#ifdef CONFIG_BLK_DEV_INITRD
1112 /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
1113 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
1114 unsigned long ramdisk_image = sparc_ramdisk_image ?
1115 sparc_ramdisk_image : sparc_ramdisk_image64;
1116 if (ramdisk_image >= (unsigned long)_end - 2 * PAGE_SIZE)
1117 ramdisk_image -= KERNBASE;
1118 initrd_start = ramdisk_image + phys_base;
1119 initrd_end = initrd_start + sparc_ramdisk_size;
1120 if (initrd_end > end_of_phys_memory) {
1121 printk(KERN_CRIT "initrd extends beyond end of memory "
1122 "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
1123 initrd_end, end_of_phys_memory);
1124 initrd_start = 0;
1125 }
1126 if (initrd_start) {
1127 if (initrd_start >= (start_pfn << PAGE_SHIFT) &&
1128 initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE)
1129 bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT;
1130 }
1131 }
1132#endif
1133 /* Initialize the boot-time allocator. */
1134 max_pfn = max_low_pfn = end_pfn;
1135 min_low_pfn = pfn_base;
1136
1137#ifdef CONFIG_DEBUG_BOOTMEM
1138 prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n",
1139 min_low_pfn, bootmap_pfn, max_low_pfn);
1140#endif
1141 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn);
1142
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 /* Now register the available physical memory with the
1144 * allocator.
1145 */
David S. Miller13edad72005-09-29 17:58:26 -07001146 for (i = 0; i < pavail_ents; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147#ifdef CONFIG_DEBUG_BOOTMEM
David S. Miller13edad72005-09-29 17:58:26 -07001148 prom_printf("free_bootmem(pavail:%d): base[%lx] size[%lx]\n",
1149 i, pavail[i].phys_addr, pavail[i].reg_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150#endif
David S. Miller13edad72005-09-29 17:58:26 -07001151 free_bootmem(pavail[i].phys_addr, pavail[i].reg_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 }
1153
1154#ifdef CONFIG_BLK_DEV_INITRD
1155 if (initrd_start) {
1156 size = initrd_end - initrd_start;
1157
1158 /* Resert the initrd image area. */
1159#ifdef CONFIG_DEBUG_BOOTMEM
1160 prom_printf("reserve_bootmem(initrd): base[%llx] size[%lx]\n",
1161 initrd_start, initrd_end);
1162#endif
1163 reserve_bootmem(initrd_start, size);
1164 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
1165
1166 initrd_start += PAGE_OFFSET;
1167 initrd_end += PAGE_OFFSET;
1168 }
1169#endif
1170 /* Reserve the kernel text/data/bss. */
1171#ifdef CONFIG_DEBUG_BOOTMEM
1172 prom_printf("reserve_bootmem(kernel): base[%lx] size[%lx]\n", kern_base, kern_size);
1173#endif
1174 reserve_bootmem(kern_base, kern_size);
1175 *pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT;
1176
1177 /* Reserve the bootmem map. We do not account for it
1178 * in pages_avail because we will release that memory
1179 * in free_all_bootmem.
1180 */
1181 size = bootmap_size;
1182#ifdef CONFIG_DEBUG_BOOTMEM
1183 prom_printf("reserve_bootmem(bootmap): base[%lx] size[%lx]\n",
1184 (bootmap_pfn << PAGE_SHIFT), size);
1185#endif
1186 reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
1187 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
1188
1189 return end_pfn;
1190}
1191
David S. Miller56425302005-09-25 16:46:57 -07001192#ifdef CONFIG_DEBUG_PAGEALLOC
1193static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot)
1194{
1195 unsigned long vstart = PAGE_OFFSET + pstart;
1196 unsigned long vend = PAGE_OFFSET + pend;
1197 unsigned long alloc_bytes = 0UL;
1198
1199 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
David S. Miller13edad72005-09-29 17:58:26 -07001200 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
David S. Miller56425302005-09-25 16:46:57 -07001201 vstart, vend);
1202 prom_halt();
1203 }
1204
1205 while (vstart < vend) {
1206 unsigned long this_end, paddr = __pa(vstart);
1207 pgd_t *pgd = pgd_offset_k(vstart);
1208 pud_t *pud;
1209 pmd_t *pmd;
1210 pte_t *pte;
1211
1212 pud = pud_offset(pgd, vstart);
1213 if (pud_none(*pud)) {
1214 pmd_t *new;
1215
1216 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1217 alloc_bytes += PAGE_SIZE;
1218 pud_populate(&init_mm, pud, new);
1219 }
1220
1221 pmd = pmd_offset(pud, vstart);
1222 if (!pmd_present(*pmd)) {
1223 pte_t *new;
1224
1225 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1226 alloc_bytes += PAGE_SIZE;
1227 pmd_populate_kernel(&init_mm, pmd, new);
1228 }
1229
1230 pte = pte_offset_kernel(pmd, vstart);
1231 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1232 if (this_end > vend)
1233 this_end = vend;
1234
1235 while (vstart < this_end) {
1236 pte_val(*pte) = (paddr | pgprot_val(prot));
1237
1238 vstart += PAGE_SIZE;
1239 paddr += PAGE_SIZE;
1240 pte++;
1241 }
1242 }
1243
1244 return alloc_bytes;
1245}
1246
David S. Miller13edad72005-09-29 17:58:26 -07001247static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1248static int pall_ents __initdata;
1249
David S. Miller56425302005-09-25 16:46:57 -07001250extern unsigned int kvmap_linear_patch[1];
1251
1252static void __init kernel_physical_mapping_init(void)
1253{
David S. Miller13edad72005-09-29 17:58:26 -07001254 unsigned long i, mem_alloced = 0UL;
David S. Miller56425302005-09-25 16:46:57 -07001255
David S. Miller13edad72005-09-29 17:58:26 -07001256 read_obp_memory("reg", &pall[0], &pall_ents);
1257
1258 for (i = 0; i < pall_ents; i++) {
David S. Miller56425302005-09-25 16:46:57 -07001259 unsigned long phys_start, phys_end;
1260
David S. Miller13edad72005-09-29 17:58:26 -07001261 phys_start = pall[i].phys_addr;
1262 phys_end = phys_start + pall[i].reg_size;
David S. Miller56425302005-09-25 16:46:57 -07001263 mem_alloced += kernel_map_range(phys_start, phys_end,
1264 PAGE_KERNEL);
David S. Miller56425302005-09-25 16:46:57 -07001265 }
1266
1267 printk("Allocated %ld bytes for kernel page tables.\n",
1268 mem_alloced);
1269
1270 kvmap_linear_patch[0] = 0x01000000; /* nop */
1271 flushi(&kvmap_linear_patch[0]);
1272
1273 __flush_tlb_all();
1274}
1275
1276void kernel_map_pages(struct page *page, int numpages, int enable)
1277{
1278 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1279 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1280
1281 kernel_map_range(phys_start, phys_end,
1282 (enable ? PAGE_KERNEL : __pgprot(0)));
1283
David S. Miller74bf4312006-01-31 18:29:18 -08001284 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1285 PAGE_OFFSET + phys_end);
1286
David S. Miller56425302005-09-25 16:46:57 -07001287 /* we should perform an IPI and flush all tlbs,
1288 * but that can deadlock->flush only current cpu.
1289 */
1290 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1291 PAGE_OFFSET + phys_end);
1292}
1293#endif
1294
David S. Miller10147572005-09-28 21:46:43 -07001295unsigned long __init find_ecache_flush_span(unsigned long size)
1296{
David S. Miller13edad72005-09-29 17:58:26 -07001297 int i;
David S. Miller10147572005-09-28 21:46:43 -07001298
David S. Miller13edad72005-09-29 17:58:26 -07001299 for (i = 0; i < pavail_ents; i++) {
1300 if (pavail[i].reg_size >= size)
1301 return pavail[i].phys_addr;
David S. Miller10147572005-09-28 21:46:43 -07001302 }
1303
1304 return ~0UL;
1305}
1306
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307/* paging_init() sets up the page tables */
1308
1309extern void cheetah_ecache_flush_init(void);
1310
1311static unsigned long last_valid_pfn;
David S. Miller56425302005-09-25 16:46:57 -07001312pgd_t swapper_pg_dir[2048];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
1314void __init paging_init(void)
1315{
David S. Miller2bdb3cb2005-09-22 01:08:57 -07001316 unsigned long end_pfn, pages_avail, shift;
David S. Miller0836a0e2005-09-28 21:38:08 -07001317 unsigned long real_end, i;
1318
David S. Miller13edad72005-09-29 17:58:26 -07001319 /* Find available physical memory... */
1320 read_obp_memory("available", &pavail[0], &pavail_ents);
David S. Miller0836a0e2005-09-28 21:38:08 -07001321
1322 phys_base = 0xffffffffffffffffUL;
David S. Miller13edad72005-09-29 17:58:26 -07001323 for (i = 0; i < pavail_ents; i++)
1324 phys_base = min(phys_base, pavail[i].phys_addr);
David S. Miller0836a0e2005-09-28 21:38:08 -07001325
David S. Miller0836a0e2005-09-28 21:38:08 -07001326 pfn_base = phys_base >> PAGE_SHIFT;
1327
1328 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1329 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330
1331 set_bit(0, mmu_context_bmap);
1332
David S. Miller2bdb3cb2005-09-22 01:08:57 -07001333 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
1334
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 real_end = (unsigned long)_end;
1336 if ((real_end > ((unsigned long)KERNBASE + 0x400000)))
1337 bigkernel = 1;
David S. Miller2bdb3cb2005-09-22 01:08:57 -07001338 if ((real_end > ((unsigned long)KERNBASE + 0x800000))) {
1339 prom_printf("paging_init: Kernel > 8MB, too large.\n");
1340 prom_halt();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 }
David S. Miller2bdb3cb2005-09-22 01:08:57 -07001342
1343 /* Set kernel pgd to upper alias so physical page computations
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 * work.
1345 */
1346 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
1347
David S. Miller56425302005-09-25 16:46:57 -07001348 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349
1350 /* Now can init the kernel/bad page tables. */
1351 pud_set(pud_offset(&swapper_pg_dir[0], 0),
David S. Miller56425302005-09-25 16:46:57 -07001352 swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353
David S. Miller2bdb3cb2005-09-22 01:08:57 -07001354 swapper_pgd_zero = pgd_val(swapper_pg_dir[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355
David S. Millerc9c10832005-10-12 12:22:46 -07001356 inherit_prom_mappings();
David S. Miller5085b4a2005-09-22 00:45:41 -07001357
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 /* Ok, we can use our TLB miss and window trap handlers safely.
1359 * We need to do a quick peek here to see if we are on StarFire
1360 * or not, so setup_tba can setup the IRQ globals correctly (it
1361 * needs to get the hard smp processor id correctly).
1362 */
1363 {
1364 extern void setup_tba(int);
1365 setup_tba(this_is_starfire);
1366 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367
David S. Millerc9c10832005-10-12 12:22:46 -07001368 inherit_locked_prom_mappings(1);
1369
1370 __flush_tlb_all();
David S. Miller9ad98c52005-10-05 15:12:00 -07001371
David S. Miller2bdb3cb2005-09-22 01:08:57 -07001372 /* Setup bootmem... */
1373 pages_avail = 0;
1374 last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
1375
David S. Miller56425302005-09-25 16:46:57 -07001376#ifdef CONFIG_DEBUG_PAGEALLOC
1377 kernel_physical_mapping_init();
1378#endif
1379
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 {
1381 unsigned long zones_size[MAX_NR_ZONES];
1382 unsigned long zholes_size[MAX_NR_ZONES];
1383 unsigned long npages;
1384 int znum;
1385
1386 for (znum = 0; znum < MAX_NR_ZONES; znum++)
1387 zones_size[znum] = zholes_size[znum] = 0;
1388
1389 npages = end_pfn - pfn_base;
1390 zones_size[ZONE_DMA] = npages;
1391 zholes_size[ZONE_DMA] = npages - pages_avail;
1392
1393 free_area_init_node(0, &contig_page_data, zones_size,
1394 phys_base >> PAGE_SHIFT, zholes_size);
1395 }
1396
1397 device_scan();
1398}
1399
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400static void __init taint_real_pages(void)
1401{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 int i;
1403
David S. Miller13edad72005-09-29 17:58:26 -07001404 read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405
David S. Miller13edad72005-09-29 17:58:26 -07001406 /* Find changes discovered in the physmem available rescan and
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 * reserve the lost portions in the bootmem maps.
1408 */
David S. Miller13edad72005-09-29 17:58:26 -07001409 for (i = 0; i < pavail_ents; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 unsigned long old_start, old_end;
1411
David S. Miller13edad72005-09-29 17:58:26 -07001412 old_start = pavail[i].phys_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 old_end = old_start +
David S. Miller13edad72005-09-29 17:58:26 -07001414 pavail[i].reg_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 while (old_start < old_end) {
1416 int n;
1417
David S. Miller13edad72005-09-29 17:58:26 -07001418 for (n = 0; pavail_rescan_ents; n++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 unsigned long new_start, new_end;
1420
David S. Miller13edad72005-09-29 17:58:26 -07001421 new_start = pavail_rescan[n].phys_addr;
1422 new_end = new_start +
1423 pavail_rescan[n].reg_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424
1425 if (new_start <= old_start &&
1426 new_end >= (old_start + PAGE_SIZE)) {
David S. Miller13edad72005-09-29 17:58:26 -07001427 set_bit(old_start >> 22,
1428 sparc64_valid_addr_bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 goto do_next_page;
1430 }
1431 }
1432 reserve_bootmem(old_start, PAGE_SIZE);
1433
1434 do_next_page:
1435 old_start += PAGE_SIZE;
1436 }
1437 }
1438}
1439
1440void __init mem_init(void)
1441{
1442 unsigned long codepages, datapages, initpages;
1443 unsigned long addr, last;
1444 int i;
1445
1446 i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
1447 i += 1;
David S. Miller2bdb3cb2005-09-22 01:08:57 -07001448 sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 if (sparc64_valid_addr_bitmap == NULL) {
1450 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
1451 prom_halt();
1452 }
1453 memset(sparc64_valid_addr_bitmap, 0, i << 3);
1454
1455 addr = PAGE_OFFSET + kern_base;
1456 last = PAGE_ALIGN(kern_size) + addr;
1457 while (addr < last) {
1458 set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
1459 addr += PAGE_SIZE;
1460 }
1461
1462 taint_real_pages();
1463
1464 max_mapnr = last_valid_pfn - pfn_base;
1465 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
1466
1467#ifdef CONFIG_DEBUG_BOOTMEM
1468 prom_printf("mem_init: Calling free_all_bootmem().\n");
1469#endif
1470 totalram_pages = num_physpages = free_all_bootmem() - 1;
1471
1472 /*
1473 * Set up the zero page, mark it reserved, so that page count
1474 * is not manipulated when freeing the page from user ptes.
1475 */
1476 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
1477 if (mem_map_zero == NULL) {
1478 prom_printf("paging_init: Cannot alloc zero page.\n");
1479 prom_halt();
1480 }
1481 SetPageReserved(mem_map_zero);
1482
1483 codepages = (((unsigned long) _etext) - ((unsigned long) _start));
1484 codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
1485 datapages = (((unsigned long) _edata) - ((unsigned long) _etext));
1486 datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
1487 initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
1488 initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
1489
1490 printk("Memory: %uk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
1491 nr_free_pages() << (PAGE_SHIFT-10),
1492 codepages << (PAGE_SHIFT-10),
1493 datapages << (PAGE_SHIFT-10),
1494 initpages << (PAGE_SHIFT-10),
1495 PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
1496
1497 if (tlb_type == cheetah || tlb_type == cheetah_plus)
1498 cheetah_ecache_flush_init();
1499}
1500
David S. Miller898cf0e2005-09-23 11:59:44 -07001501void free_initmem(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502{
1503 unsigned long addr, initend;
1504
1505 /*
1506 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
1507 */
1508 addr = PAGE_ALIGN((unsigned long)(__init_begin));
1509 initend = (unsigned long)(__init_end) & PAGE_MASK;
1510 for (; addr < initend; addr += PAGE_SIZE) {
1511 unsigned long page;
1512 struct page *p;
1513
1514 page = (addr +
1515 ((unsigned long) __va(kern_base)) -
1516 ((unsigned long) KERNBASE));
1517 memset((void *)addr, 0xcc, PAGE_SIZE);
1518 p = virt_to_page(page);
1519
1520 ClearPageReserved(p);
1521 set_page_count(p, 1);
1522 __free_page(p);
1523 num_physpages++;
1524 totalram_pages++;
1525 }
1526}
1527
1528#ifdef CONFIG_BLK_DEV_INITRD
1529void free_initrd_mem(unsigned long start, unsigned long end)
1530{
1531 if (start < end)
1532 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
1533 for (; start < end; start += PAGE_SIZE) {
1534 struct page *p = virt_to_page(start);
1535
1536 ClearPageReserved(p);
1537 set_page_count(p, 1);
1538 __free_page(p);
1539 num_physpages++;
1540 totalram_pages++;
1541 }
1542}
1543#endif