blob: 9bbd0bf64af0f44fabfc1f2857256f71b7556a5f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* $Id: init.c,v 1.209 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/mm/init.c
3 *
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8#include <linux/config.h>
David S. Millerc4bce902006-02-11 21:57:54 -08009#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/string.h>
13#include <linux/init.h>
14#include <linux/bootmem.h>
15#include <linux/mm.h>
16#include <linux/hugetlb.h>
17#include <linux/slab.h>
18#include <linux/initrd.h>
19#include <linux/swap.h>
20#include <linux/pagemap.h>
21#include <linux/fs.h>
22#include <linux/seq_file.h>
Prasanna S Panchamukhi05e14cb2005-09-06 15:19:30 -070023#include <linux/kprobes.h>
David S. Miller1ac4f5e2005-09-21 21:49:32 -070024#include <linux/cache.h>
David S. Miller13edad72005-09-29 17:58:26 -070025#include <linux/sort.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
27#include <asm/head.h>
28#include <asm/system.h>
29#include <asm/page.h>
30#include <asm/pgalloc.h>
31#include <asm/pgtable.h>
32#include <asm/oplib.h>
33#include <asm/iommu.h>
34#include <asm/io.h>
35#include <asm/uaccess.h>
36#include <asm/mmu_context.h>
37#include <asm/tlbflush.h>
38#include <asm/dma.h>
39#include <asm/starfire.h>
40#include <asm/tlb.h>
41#include <asm/spitfire.h>
42#include <asm/sections.h>
David S. Miller517af332006-02-01 15:55:21 -080043#include <asm/tsb.h>
David S. Miller481295f2006-02-07 21:51:08 -080044#include <asm/hypervisor.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46extern void device_scan(void);
47
David S. Miller9cc3a1a2006-02-21 20:51:13 -080048#define MAX_PHYS_ADDRESS (1UL << 42UL)
49#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
50#define KPTE_BITMAP_BYTES \
51 ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8)
52
53unsigned long kern_linear_pte_xor[2] __read_mostly;
54
55/* A bitmap, one bit for every 256MB of physical memory. If the bit
56 * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else
57 * if set we should use a 256MB page (via kern_linear_pte_xor[1]).
58 */
59unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
60
David S. Millerd7744a02006-02-21 22:31:11 -080061/* A special kernel TSB for 4MB and 256MB linear mappings. */
62struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
63
David S. Miller13edad72005-09-29 17:58:26 -070064#define MAX_BANKS 32
David S. Miller10147572005-09-28 21:46:43 -070065
David S. Miller13edad72005-09-29 17:58:26 -070066static struct linux_prom64_registers pavail[MAX_BANKS] __initdata;
67static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
68static int pavail_ents __initdata;
69static int pavail_rescan_ents __initdata;
David S. Miller10147572005-09-28 21:46:43 -070070
David S. Miller13edad72005-09-29 17:58:26 -070071static int cmp_p64(const void *a, const void *b)
72{
73 const struct linux_prom64_registers *x = a, *y = b;
74
75 if (x->phys_addr > y->phys_addr)
76 return 1;
77 if (x->phys_addr < y->phys_addr)
78 return -1;
79 return 0;
80}
81
82static void __init read_obp_memory(const char *property,
83 struct linux_prom64_registers *regs,
84 int *num_ents)
85{
86 int node = prom_finddevice("/memory");
87 int prop_size = prom_getproplen(node, property);
88 int ents, ret, i;
89
90 ents = prop_size / sizeof(struct linux_prom64_registers);
91 if (ents > MAX_BANKS) {
92 prom_printf("The machine has more %s property entries than "
93 "this kernel can support (%d).\n",
94 property, MAX_BANKS);
95 prom_halt();
96 }
97
98 ret = prom_getproperty(node, property, (char *) regs, prop_size);
99 if (ret == -1) {
100 prom_printf("Couldn't get %s property from /memory.\n");
101 prom_halt();
102 }
103
104 *num_ents = ents;
105
106 /* Sanitize what we got from the firmware, by page aligning
107 * everything.
108 */
109 for (i = 0; i < ents; i++) {
110 unsigned long base, size;
111
112 base = regs[i].phys_addr;
113 size = regs[i].reg_size;
114
115 size &= PAGE_MASK;
116 if (base & ~PAGE_MASK) {
117 unsigned long new_base = PAGE_ALIGN(base);
118
119 size -= new_base - base;
120 if ((long) size < 0L)
121 size = 0UL;
122 base = new_base;
123 }
124 regs[i].phys_addr = base;
125 regs[i].reg_size = size;
126 }
David S. Millerc9c10832005-10-12 12:22:46 -0700127 sort(regs, ents, sizeof(struct linux_prom64_registers),
David S. Miller13edad72005-09-29 17:58:26 -0700128 cmp_p64, NULL);
129}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
David S. Miller2bdb3cb2005-09-22 01:08:57 -0700131unsigned long *sparc64_valid_addr_bitmap __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
133/* Ugly, but necessary... -DaveM */
David S. Miller1ac4f5e2005-09-21 21:49:32 -0700134unsigned long phys_base __read_mostly;
135unsigned long kern_base __read_mostly;
136unsigned long kern_size __read_mostly;
137unsigned long pfn_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139/* get_new_mmu_context() uses "cache + 1". */
140DEFINE_SPINLOCK(ctx_alloc_lock);
141unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
142#define CTX_BMAP_SLOTS (1UL << (CTX_NR_BITS - 6))
143unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
144
145/* References to special section boundaries */
146extern char _start[], _end[];
147
148/* Initial ramdisk setup */
149extern unsigned long sparc_ramdisk_image64;
150extern unsigned int sparc_ramdisk_image;
151extern unsigned int sparc_ramdisk_size;
152
David S. Miller1ac4f5e2005-09-21 21:49:32 -0700153struct page *mem_map_zero __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
David S. Miller0835ae02005-10-04 15:23:20 -0700155unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
156
157unsigned long sparc64_kern_pri_context __read_mostly;
158unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
159unsigned long sparc64_kern_sec_context __read_mostly;
160
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161int bigkernel = 0;
162
David S. Miller3c936462006-01-31 18:30:27 -0800163kmem_cache_t *pgtable_cache __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
David S. Miller3c936462006-01-31 18:30:27 -0800165static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166{
David S. Miller3c936462006-01-31 18:30:27 -0800167 clear_page(addr);
168}
169
170void pgtable_cache_init(void)
171{
172 pgtable_cache = kmem_cache_create("pgtable_cache",
173 PAGE_SIZE, PAGE_SIZE,
174 SLAB_HWCACHE_ALIGN |
175 SLAB_MUST_HWCACHE_ALIGN,
176 zero_ctor,
177 NULL);
178 if (!pgtable_cache) {
179 prom_printf("pgtable_cache_init(): Could not create!\n");
180 prom_halt();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182}
183
184#ifdef CONFIG_DEBUG_DCFLUSH
185atomic_t dcpage_flushes = ATOMIC_INIT(0);
186#ifdef CONFIG_SMP
187atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
188#endif
189#endif
190
David S. Miller7a591cf2006-02-26 19:44:50 -0800191inline void flush_dcache_page_impl(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192{
David S. Miller7a591cf2006-02-26 19:44:50 -0800193 BUG_ON(tlb_type == hypervisor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194#ifdef CONFIG_DEBUG_DCFLUSH
195 atomic_inc(&dcpage_flushes);
196#endif
197
198#ifdef DCACHE_ALIASING_POSSIBLE
199 __flush_dcache_page(page_address(page),
200 ((tlb_type == spitfire) &&
201 page_mapping(page) != NULL));
202#else
203 if (page_mapping(page) != NULL &&
204 tlb_type == spitfire)
205 __flush_icache_page(__pa(page_address(page)));
206#endif
207}
208
209#define PG_dcache_dirty PG_arch_1
David S. Miller48b0e542005-07-27 16:08:44 -0700210#define PG_dcache_cpu_shift 24
211#define PG_dcache_cpu_mask (256 - 1)
212
213#if NR_CPUS > 256
214#error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus
215#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
217#define dcache_dirty_cpu(page) \
David S. Miller48b0e542005-07-27 16:08:44 -0700218 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
220static __inline__ void set_dcache_dirty(struct page *page, int this_cpu)
221{
222 unsigned long mask = this_cpu;
David S. Miller48b0e542005-07-27 16:08:44 -0700223 unsigned long non_cpu_bits;
224
225 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
226 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
227
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 __asm__ __volatile__("1:\n\t"
229 "ldx [%2], %%g7\n\t"
230 "and %%g7, %1, %%g1\n\t"
231 "or %%g1, %0, %%g1\n\t"
232 "casx [%2], %%g7, %%g1\n\t"
233 "cmp %%g7, %%g1\n\t"
David S. Millerb445e262005-06-27 15:42:04 -0700234 "membar #StoreLoad | #StoreStore\n\t"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 "bne,pn %%xcc, 1b\n\t"
David S. Millerb445e262005-06-27 15:42:04 -0700236 " nop"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 : /* no outputs */
238 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
239 : "g1", "g7");
240}
241
242static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
243{
244 unsigned long mask = (1UL << PG_dcache_dirty);
245
246 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
247 "1:\n\t"
248 "ldx [%2], %%g7\n\t"
David S. Miller48b0e542005-07-27 16:08:44 -0700249 "srlx %%g7, %4, %%g1\n\t"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 "and %%g1, %3, %%g1\n\t"
251 "cmp %%g1, %0\n\t"
252 "bne,pn %%icc, 2f\n\t"
253 " andn %%g7, %1, %%g1\n\t"
254 "casx [%2], %%g7, %%g1\n\t"
255 "cmp %%g7, %%g1\n\t"
David S. Millerb445e262005-06-27 15:42:04 -0700256 "membar #StoreLoad | #StoreStore\n\t"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 "bne,pn %%xcc, 1b\n\t"
David S. Millerb445e262005-06-27 15:42:04 -0700258 " nop\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 "2:"
260 : /* no outputs */
261 : "r" (cpu), "r" (mask), "r" (&page->flags),
David S. Miller48b0e542005-07-27 16:08:44 -0700262 "i" (PG_dcache_cpu_mask),
263 "i" (PG_dcache_cpu_shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 : "g1", "g7");
265}
266
David S. Miller517af332006-02-01 15:55:21 -0800267static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
268{
269 unsigned long tsb_addr = (unsigned long) ent;
270
David S. Miller3b3ab2e2006-02-17 09:54:42 -0800271 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
David S. Miller517af332006-02-01 15:55:21 -0800272 tsb_addr = __pa(tsb_addr);
273
274 __tsb_insert(tsb_addr, tag, pte);
275}
276
David S. Millerc4bce902006-02-11 21:57:54 -0800277unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
278unsigned long _PAGE_SZBITS __read_mostly;
279
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
281{
David S. Millerbd407912006-01-31 18:31:38 -0800282 struct mm_struct *mm;
David S. Miller74ae9982006-03-05 18:26:24 -0800283 struct tsb *tsb;
284 unsigned long tag;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285
David S. Miller7a591cf2006-02-26 19:44:50 -0800286 if (tlb_type != hypervisor) {
287 unsigned long pfn = pte_pfn(pte);
288 unsigned long pg_flags;
289 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290
David S. Miller7a591cf2006-02-26 19:44:50 -0800291 if (pfn_valid(pfn) &&
292 (page = pfn_to_page(pfn), page_mapping(page)) &&
293 ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
294 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
295 PG_dcache_cpu_mask);
296 int this_cpu = get_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297
David S. Miller7a591cf2006-02-26 19:44:50 -0800298 /* This is just to optimize away some function calls
299 * in the SMP case.
300 */
301 if (cpu == this_cpu)
302 flush_dcache_page_impl(page);
303 else
304 smp_flush_dcache_page_impl(page, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
David S. Miller7a591cf2006-02-26 19:44:50 -0800306 clear_dcache_dirty_cpu(page, cpu);
307
308 put_cpu();
309 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 }
David S. Millerbd407912006-01-31 18:31:38 -0800311
312 mm = vma->vm_mm;
David S. Miller74ae9982006-03-05 18:26:24 -0800313 tsb = &mm->context.tsb[(address >> PAGE_SHIFT) &
314 (mm->context.tsb_nentries - 1UL)];
315 tag = (address >> 22UL);
316 tsb_insert(tsb, tag, pte_val(pte));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317}
318
319void flush_dcache_page(struct page *page)
320{
David S. Millera9546f52005-04-17 18:03:09 -0700321 struct address_space *mapping;
322 int this_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323
David S. Miller7a591cf2006-02-26 19:44:50 -0800324 if (tlb_type == hypervisor)
325 return;
326
David S. Millera9546f52005-04-17 18:03:09 -0700327 /* Do not bother with the expensive D-cache flush if it
328 * is merely the zero page. The 'bigcore' testcase in GDB
329 * causes this case to run millions of times.
330 */
331 if (page == ZERO_PAGE(0))
332 return;
333
334 this_cpu = get_cpu();
335
336 mapping = page_mapping(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 if (mapping && !mapping_mapped(mapping)) {
David S. Millera9546f52005-04-17 18:03:09 -0700338 int dirty = test_bit(PG_dcache_dirty, &page->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 if (dirty) {
David S. Millera9546f52005-04-17 18:03:09 -0700340 int dirty_cpu = dcache_dirty_cpu(page);
341
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 if (dirty_cpu == this_cpu)
343 goto out;
344 smp_flush_dcache_page_impl(page, dirty_cpu);
345 }
346 set_dcache_dirty(page, this_cpu);
347 } else {
348 /* We could delay the flush for the !page_mapping
349 * case too. But that case is for exec env/arg
350 * pages and those are %99 certainly going to get
351 * faulted into the tlb (and thus flushed) anyways.
352 */
353 flush_dcache_page_impl(page);
354 }
355
356out:
357 put_cpu();
358}
359
Prasanna S Panchamukhi05e14cb2005-09-06 15:19:30 -0700360void __kprobes flush_icache_range(unsigned long start, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
David S. Millera43fe0e2006-02-04 03:10:53 -0800362 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 if (tlb_type == spitfire) {
364 unsigned long kaddr;
365
366 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE)
367 __flush_icache_page(__get_phys(kaddr));
368 }
369}
370
371unsigned long page_to_pfn(struct page *page)
372{
373 return (unsigned long) ((page - mem_map) + pfn_base);
374}
375
376struct page *pfn_to_page(unsigned long pfn)
377{
378 return (mem_map + (pfn - pfn_base));
379}
380
381void show_mem(void)
382{
383 printk("Mem-info:\n");
384 show_free_areas();
385 printk("Free swap: %6ldkB\n",
386 nr_swap_pages << (PAGE_SHIFT-10));
387 printk("%ld pages of RAM\n", num_physpages);
388 printk("%d free pages\n", nr_free_pages());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389}
390
391void mmu_info(struct seq_file *m)
392{
393 if (tlb_type == cheetah)
394 seq_printf(m, "MMU Type\t: Cheetah\n");
395 else if (tlb_type == cheetah_plus)
396 seq_printf(m, "MMU Type\t: Cheetah+\n");
397 else if (tlb_type == spitfire)
398 seq_printf(m, "MMU Type\t: Spitfire\n");
David S. Millera43fe0e2006-02-04 03:10:53 -0800399 else if (tlb_type == hypervisor)
400 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 else
402 seq_printf(m, "MMU Type\t: ???\n");
403
404#ifdef CONFIG_DEBUG_DCFLUSH
405 seq_printf(m, "DCPageFlushes\t: %d\n",
406 atomic_read(&dcpage_flushes));
407#ifdef CONFIG_SMP
408 seq_printf(m, "DCPageFlushesXC\t: %d\n",
409 atomic_read(&dcpage_flushes_xcall));
410#endif /* CONFIG_SMP */
411#endif /* CONFIG_DEBUG_DCFLUSH */
412}
413
414struct linux_prom_translation {
415 unsigned long virt;
416 unsigned long size;
417 unsigned long data;
418};
David S. Millerc9c10832005-10-12 12:22:46 -0700419
420/* Exported for kernel TLB miss handling in ktlb.S */
421struct linux_prom_translation prom_trans[512] __read_mostly;
422unsigned int prom_trans_ents __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424/* Exported for SMP bootup purposes. */
425unsigned long kern_locked_tte_data;
426
David S. Miller405599b2005-09-22 00:12:35 -0700427/* The obp translations are saved based on 8k pagesize, since obp can
428 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
David S. Miller74bf4312006-01-31 18:29:18 -0800429 * HI_OBP_ADDRESS range are handled in ktlb.S.
David S. Miller405599b2005-09-22 00:12:35 -0700430 */
David S. Miller5085b4a2005-09-22 00:45:41 -0700431static inline int in_obp_range(unsigned long vaddr)
432{
433 return (vaddr >= LOW_OBP_ADDRESS &&
434 vaddr < HI_OBP_ADDRESS);
435}
436
David S. Millerc9c10832005-10-12 12:22:46 -0700437static int cmp_ptrans(const void *a, const void *b)
David S. Miller405599b2005-09-22 00:12:35 -0700438{
David S. Millerc9c10832005-10-12 12:22:46 -0700439 const struct linux_prom_translation *x = a, *y = b;
David S. Miller405599b2005-09-22 00:12:35 -0700440
David S. Millerc9c10832005-10-12 12:22:46 -0700441 if (x->virt > y->virt)
442 return 1;
443 if (x->virt < y->virt)
444 return -1;
445 return 0;
David S. Miller405599b2005-09-22 00:12:35 -0700446}
447
David S. Millerc9c10832005-10-12 12:22:46 -0700448/* Read OBP translations property into 'prom_trans[]'. */
David S. Miller9ad98c52005-10-05 15:12:00 -0700449static void __init read_obp_translations(void)
David S. Miller405599b2005-09-22 00:12:35 -0700450{
David S. Millerc9c10832005-10-12 12:22:46 -0700451 int n, node, ents, first, last, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452
453 node = prom_finddevice("/virtual-memory");
454 n = prom_getproplen(node, "translations");
David S. Miller405599b2005-09-22 00:12:35 -0700455 if (unlikely(n == 0 || n == -1)) {
David S. Millerb206fc42005-09-21 22:31:13 -0700456 prom_printf("prom_mappings: Couldn't get size.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 prom_halt();
458 }
David S. Miller405599b2005-09-22 00:12:35 -0700459 if (unlikely(n > sizeof(prom_trans))) {
460 prom_printf("prom_mappings: Size %Zd is too big.\n", n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 prom_halt();
462 }
David S. Miller405599b2005-09-22 00:12:35 -0700463
David S. Millerb206fc42005-09-21 22:31:13 -0700464 if ((n = prom_getproperty(node, "translations",
David S. Miller405599b2005-09-22 00:12:35 -0700465 (char *)&prom_trans[0],
466 sizeof(prom_trans))) == -1) {
David S. Millerb206fc42005-09-21 22:31:13 -0700467 prom_printf("prom_mappings: Couldn't get property.\n");
468 prom_halt();
469 }
David S. Miller9ad98c52005-10-05 15:12:00 -0700470
David S. Millerb206fc42005-09-21 22:31:13 -0700471 n = n / sizeof(struct linux_prom_translation);
David S. Miller9ad98c52005-10-05 15:12:00 -0700472
David S. Millerc9c10832005-10-12 12:22:46 -0700473 ents = n;
474
475 sort(prom_trans, ents, sizeof(struct linux_prom_translation),
476 cmp_ptrans, NULL);
477
478 /* Now kick out all the non-OBP entries. */
479 for (i = 0; i < ents; i++) {
480 if (in_obp_range(prom_trans[i].virt))
481 break;
482 }
483 first = i;
484 for (; i < ents; i++) {
485 if (!in_obp_range(prom_trans[i].virt))
486 break;
487 }
488 last = i;
489
490 for (i = 0; i < (last - first); i++) {
491 struct linux_prom_translation *src = &prom_trans[i + first];
492 struct linux_prom_translation *dest = &prom_trans[i];
493
494 *dest = *src;
495 }
496 for (; i < ents; i++) {
497 struct linux_prom_translation *dest = &prom_trans[i];
498 dest->virt = dest->size = dest->data = 0x0UL;
499 }
500
501 prom_trans_ents = last - first;
502
503 if (tlb_type == spitfire) {
504 /* Clear diag TTE bits. */
505 for (i = 0; i < prom_trans_ents; i++)
506 prom_trans[i].data &= ~0x0003fe0000000000UL;
507 }
David S. Miller405599b2005-09-22 00:12:35 -0700508}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509
David S. Millerd82ace72006-02-09 02:52:44 -0800510static void __init hypervisor_tlb_lock(unsigned long vaddr,
511 unsigned long pte,
512 unsigned long mmu)
513{
David S. Miller164c2202006-02-09 22:57:21 -0800514 register unsigned long func asm("%o5");
515 register unsigned long arg0 asm("%o0");
516 register unsigned long arg1 asm("%o1");
517 register unsigned long arg2 asm("%o2");
518 register unsigned long arg3 asm("%o3");
David S. Millerd82ace72006-02-09 02:52:44 -0800519
520 func = HV_FAST_MMU_MAP_PERM_ADDR;
521 arg0 = vaddr;
522 arg1 = 0;
523 arg2 = pte;
524 arg3 = mmu;
525 __asm__ __volatile__("ta 0x80"
526 : "=&r" (func), "=&r" (arg0),
527 "=&r" (arg1), "=&r" (arg2),
528 "=&r" (arg3)
529 : "0" (func), "1" (arg0), "2" (arg1),
530 "3" (arg2), "4" (arg3));
David S. Miller12e126a2006-02-17 14:40:30 -0800531 if (arg0 != 0) {
532 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: "
533 "errors with %lx\n", vaddr, 0, pte, mmu, arg0);
534 prom_halt();
535 }
David S. Millerd82ace72006-02-09 02:52:44 -0800536}
537
David S. Millerc4bce902006-02-11 21:57:54 -0800538static unsigned long kern_large_tte(unsigned long paddr);
539
David S. Miller898cf0e2005-09-23 11:59:44 -0700540static void __init remap_kernel(void)
David S. Miller405599b2005-09-22 00:12:35 -0700541{
542 unsigned long phys_page, tte_vaddr, tte_data;
David S. Miller405599b2005-09-22 00:12:35 -0700543 int tlb_ent = sparc64_highest_locked_tlbent();
544
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 tte_vaddr = (unsigned long) KERNBASE;
David S. Millerbff06d52005-09-22 20:11:33 -0700546 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
David S. Millerc4bce902006-02-11 21:57:54 -0800547 tte_data = kern_large_tte(phys_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548
549 kern_locked_tte_data = tte_data;
550
David S. Millerd82ace72006-02-09 02:52:44 -0800551 /* Now lock us into the TLBs via Hypervisor or OBP. */
552 if (tlb_type == hypervisor) {
553 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
554 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
555 if (bigkernel) {
556 tte_vaddr += 0x400000;
557 tte_data += 0x400000;
558 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
559 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
560 }
561 } else {
562 prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
563 prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
564 if (bigkernel) {
565 tlb_ent -= 1;
566 prom_dtlb_load(tlb_ent,
567 tte_data + 0x400000,
568 tte_vaddr + 0x400000);
569 prom_itlb_load(tlb_ent,
570 tte_data + 0x400000,
571 tte_vaddr + 0x400000);
572 }
573 sparc64_highest_unlocked_tlb_ent = tlb_ent - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 }
David S. Miller0835ae02005-10-04 15:23:20 -0700575 if (tlb_type == cheetah_plus) {
576 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
577 CTX_CHEETAH_PLUS_NUC);
578 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
579 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
580 }
David S. Miller405599b2005-09-22 00:12:35 -0700581}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
David S. Miller405599b2005-09-22 00:12:35 -0700583
David S. Millerc9c10832005-10-12 12:22:46 -0700584static void __init inherit_prom_mappings(void)
David S. Miller9ad98c52005-10-05 15:12:00 -0700585{
586 read_obp_translations();
David S. Miller405599b2005-09-22 00:12:35 -0700587
588 /* Now fixup OBP's idea about where we really are mapped. */
589 prom_printf("Remapping the kernel... ");
590 remap_kernel();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 prom_printf("done.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592}
593
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594void prom_world(int enter)
595{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 if (!enter)
597 set_fs((mm_segment_t) { get_thread_current_ds() });
598
David S. Miller3487d1d2006-01-31 18:33:25 -0800599 __asm__ __volatile__("flushw");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600}
601
602#ifdef DCACHE_ALIASING_POSSIBLE
603void __flush_dcache_range(unsigned long start, unsigned long end)
604{
605 unsigned long va;
606
607 if (tlb_type == spitfire) {
608 int n = 0;
609
610 for (va = start; va < end; va += 32) {
611 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
612 if (++n >= 512)
613 break;
614 }
David S. Millera43fe0e2006-02-04 03:10:53 -0800615 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 start = __pa(start);
617 end = __pa(end);
618 for (va = start; va < end; va += 32)
619 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
620 "membar #Sync"
621 : /* no outputs */
622 : "r" (va),
623 "i" (ASI_DCACHE_INVALIDATE));
624 }
625}
626#endif /* DCACHE_ALIASING_POSSIBLE */
627
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628/* Caller does TLB context flushing on local CPU if necessary.
629 * The caller also ensures that CTX_VALID(mm->context) is false.
630 *
631 * We must be careful about boundary cases so that we never
632 * let the user have CTX 0 (nucleus) or we ever use a CTX
633 * version of zero (and thus NO_CONTEXT would not be caught
634 * by version mis-match tests in mmu_context.h).
David S. Millera0663a72006-02-23 14:19:28 -0800635 *
636 * Always invoked with interrupts disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 */
638void get_new_mmu_context(struct mm_struct *mm)
639{
640 unsigned long ctx, new_ctx;
641 unsigned long orig_pgsz_bits;
David S. Millera0663a72006-02-23 14:19:28 -0800642 int new_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643
644 spin_lock(&ctx_alloc_lock);
645 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
646 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
647 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
David S. Millera0663a72006-02-23 14:19:28 -0800648 new_version = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 if (new_ctx >= (1 << CTX_NR_BITS)) {
650 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
651 if (new_ctx >= ctx) {
652 int i;
653 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
654 CTX_FIRST_VERSION;
655 if (new_ctx == 1)
656 new_ctx = CTX_FIRST_VERSION;
657
658 /* Don't call memset, for 16 entries that's just
659 * plain silly...
660 */
661 mmu_context_bmap[0] = 3;
662 mmu_context_bmap[1] = 0;
663 mmu_context_bmap[2] = 0;
664 mmu_context_bmap[3] = 0;
665 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
666 mmu_context_bmap[i + 0] = 0;
667 mmu_context_bmap[i + 1] = 0;
668 mmu_context_bmap[i + 2] = 0;
669 mmu_context_bmap[i + 3] = 0;
670 }
David S. Millera0663a72006-02-23 14:19:28 -0800671 new_version = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 goto out;
673 }
674 }
675 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
676 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
677out:
678 tlb_context_cache = new_ctx;
679 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
680 spin_unlock(&ctx_alloc_lock);
David S. Millera0663a72006-02-23 14:19:28 -0800681
682 if (unlikely(new_version))
683 smp_new_mmu_context_version();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684}
685
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686void sparc_ultra_dump_itlb(void)
687{
688 int slot;
689
690 if (tlb_type == spitfire) {
691 printk ("Contents of itlb: ");
692 for (slot = 0; slot < 14; slot++) printk (" ");
693 printk ("%2x:%016lx,%016lx\n",
694 0,
695 spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0));
696 for (slot = 1; slot < 64; slot+=3) {
697 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
698 slot,
699 spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot),
700 slot+1,
701 spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1),
702 slot+2,
703 spitfire_get_itlb_tag(slot+2), spitfire_get_itlb_data(slot+2));
704 }
705 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
706 printk ("Contents of itlb0:\n");
707 for (slot = 0; slot < 16; slot+=2) {
708 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
709 slot,
710 cheetah_get_litlb_tag(slot), cheetah_get_litlb_data(slot),
711 slot+1,
712 cheetah_get_litlb_tag(slot+1), cheetah_get_litlb_data(slot+1));
713 }
714 printk ("Contents of itlb2:\n");
715 for (slot = 0; slot < 128; slot+=2) {
716 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
717 slot,
718 cheetah_get_itlb_tag(slot), cheetah_get_itlb_data(slot),
719 slot+1,
720 cheetah_get_itlb_tag(slot+1), cheetah_get_itlb_data(slot+1));
721 }
722 }
723}
724
725void sparc_ultra_dump_dtlb(void)
726{
727 int slot;
728
729 if (tlb_type == spitfire) {
730 printk ("Contents of dtlb: ");
731 for (slot = 0; slot < 14; slot++) printk (" ");
732 printk ("%2x:%016lx,%016lx\n", 0,
733 spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0));
734 for (slot = 1; slot < 64; slot+=3) {
735 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
736 slot,
737 spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot),
738 slot+1,
739 spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1),
740 slot+2,
741 spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2));
742 }
743 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
744 printk ("Contents of dtlb0:\n");
745 for (slot = 0; slot < 16; slot+=2) {
746 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
747 slot,
748 cheetah_get_ldtlb_tag(slot), cheetah_get_ldtlb_data(slot),
749 slot+1,
750 cheetah_get_ldtlb_tag(slot+1), cheetah_get_ldtlb_data(slot+1));
751 }
752 printk ("Contents of dtlb2:\n");
753 for (slot = 0; slot < 512; slot+=2) {
754 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
755 slot,
756 cheetah_get_dtlb_tag(slot, 2), cheetah_get_dtlb_data(slot, 2),
757 slot+1,
758 cheetah_get_dtlb_tag(slot+1, 2), cheetah_get_dtlb_data(slot+1, 2));
759 }
760 if (tlb_type == cheetah_plus) {
761 printk ("Contents of dtlb3:\n");
762 for (slot = 0; slot < 512; slot+=2) {
763 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
764 slot,
765 cheetah_get_dtlb_tag(slot, 3), cheetah_get_dtlb_data(slot, 3),
766 slot+1,
767 cheetah_get_dtlb_tag(slot+1, 3), cheetah_get_dtlb_data(slot+1, 3));
768 }
769 }
770 }
771}
772
773extern unsigned long cmdline_memory_size;
774
775unsigned long __init bootmem_init(unsigned long *pages_avail)
776{
777 unsigned long bootmap_size, start_pfn, end_pfn;
778 unsigned long end_of_phys_memory = 0UL;
779 unsigned long bootmap_pfn, bytes_avail, size;
780 int i;
781
782#ifdef CONFIG_DEBUG_BOOTMEM
David S. Miller13edad72005-09-29 17:58:26 -0700783 prom_printf("bootmem_init: Scan pavail, ");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784#endif
785
786 bytes_avail = 0UL;
David S. Miller13edad72005-09-29 17:58:26 -0700787 for (i = 0; i < pavail_ents; i++) {
788 end_of_phys_memory = pavail[i].phys_addr +
789 pavail[i].reg_size;
790 bytes_avail += pavail[i].reg_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 if (cmdline_memory_size) {
792 if (bytes_avail > cmdline_memory_size) {
793 unsigned long slack = bytes_avail - cmdline_memory_size;
794
795 bytes_avail -= slack;
796 end_of_phys_memory -= slack;
797
David S. Miller13edad72005-09-29 17:58:26 -0700798 pavail[i].reg_size -= slack;
799 if ((long)pavail[i].reg_size <= 0L) {
800 pavail[i].phys_addr = 0xdeadbeefUL;
801 pavail[i].reg_size = 0UL;
802 pavail_ents = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 } else {
David S. Miller13edad72005-09-29 17:58:26 -0700804 pavail[i+1].reg_size = 0Ul;
805 pavail[i+1].phys_addr = 0xdeadbeefUL;
806 pavail_ents = i + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 }
808 break;
809 }
810 }
811 }
812
813 *pages_avail = bytes_avail >> PAGE_SHIFT;
814
815 /* Start with page aligned address of last symbol in kernel
816 * image. The kernel is hard mapped below PAGE_OFFSET in a
817 * 4MB locked TLB translation.
818 */
819 start_pfn = PAGE_ALIGN(kern_base + kern_size) >> PAGE_SHIFT;
820
821 bootmap_pfn = start_pfn;
822
823 end_pfn = end_of_phys_memory >> PAGE_SHIFT;
824
825#ifdef CONFIG_BLK_DEV_INITRD
826 /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
827 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
828 unsigned long ramdisk_image = sparc_ramdisk_image ?
829 sparc_ramdisk_image : sparc_ramdisk_image64;
830 if (ramdisk_image >= (unsigned long)_end - 2 * PAGE_SIZE)
831 ramdisk_image -= KERNBASE;
832 initrd_start = ramdisk_image + phys_base;
833 initrd_end = initrd_start + sparc_ramdisk_size;
834 if (initrd_end > end_of_phys_memory) {
835 printk(KERN_CRIT "initrd extends beyond end of memory "
836 "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
837 initrd_end, end_of_phys_memory);
838 initrd_start = 0;
839 }
840 if (initrd_start) {
841 if (initrd_start >= (start_pfn << PAGE_SHIFT) &&
842 initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE)
843 bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT;
844 }
845 }
846#endif
847 /* Initialize the boot-time allocator. */
848 max_pfn = max_low_pfn = end_pfn;
849 min_low_pfn = pfn_base;
850
851#ifdef CONFIG_DEBUG_BOOTMEM
852 prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n",
853 min_low_pfn, bootmap_pfn, max_low_pfn);
854#endif
855 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn);
856
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 /* Now register the available physical memory with the
858 * allocator.
859 */
David S. Miller13edad72005-09-29 17:58:26 -0700860 for (i = 0; i < pavail_ents; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861#ifdef CONFIG_DEBUG_BOOTMEM
David S. Miller13edad72005-09-29 17:58:26 -0700862 prom_printf("free_bootmem(pavail:%d): base[%lx] size[%lx]\n",
863 i, pavail[i].phys_addr, pavail[i].reg_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864#endif
David S. Miller13edad72005-09-29 17:58:26 -0700865 free_bootmem(pavail[i].phys_addr, pavail[i].reg_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 }
867
868#ifdef CONFIG_BLK_DEV_INITRD
869 if (initrd_start) {
870 size = initrd_end - initrd_start;
871
872 /* Resert the initrd image area. */
873#ifdef CONFIG_DEBUG_BOOTMEM
874 prom_printf("reserve_bootmem(initrd): base[%llx] size[%lx]\n",
875 initrd_start, initrd_end);
876#endif
877 reserve_bootmem(initrd_start, size);
878 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
879
880 initrd_start += PAGE_OFFSET;
881 initrd_end += PAGE_OFFSET;
882 }
883#endif
884 /* Reserve the kernel text/data/bss. */
885#ifdef CONFIG_DEBUG_BOOTMEM
886 prom_printf("reserve_bootmem(kernel): base[%lx] size[%lx]\n", kern_base, kern_size);
887#endif
888 reserve_bootmem(kern_base, kern_size);
889 *pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT;
890
891 /* Reserve the bootmem map. We do not account for it
892 * in pages_avail because we will release that memory
893 * in free_all_bootmem.
894 */
895 size = bootmap_size;
896#ifdef CONFIG_DEBUG_BOOTMEM
897 prom_printf("reserve_bootmem(bootmap): base[%lx] size[%lx]\n",
898 (bootmap_pfn << PAGE_SHIFT), size);
899#endif
900 reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
901 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
902
903 return end_pfn;
904}
905
David S. Miller9cc3a1a2006-02-21 20:51:13 -0800906static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
907static int pall_ents __initdata;
908
David S. Miller56425302005-09-25 16:46:57 -0700909#ifdef CONFIG_DEBUG_PAGEALLOC
910static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot)
911{
912 unsigned long vstart = PAGE_OFFSET + pstart;
913 unsigned long vend = PAGE_OFFSET + pend;
914 unsigned long alloc_bytes = 0UL;
915
916 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
David S. Miller13edad72005-09-29 17:58:26 -0700917 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
David S. Miller56425302005-09-25 16:46:57 -0700918 vstart, vend);
919 prom_halt();
920 }
921
922 while (vstart < vend) {
923 unsigned long this_end, paddr = __pa(vstart);
924 pgd_t *pgd = pgd_offset_k(vstart);
925 pud_t *pud;
926 pmd_t *pmd;
927 pte_t *pte;
928
929 pud = pud_offset(pgd, vstart);
930 if (pud_none(*pud)) {
931 pmd_t *new;
932
933 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
934 alloc_bytes += PAGE_SIZE;
935 pud_populate(&init_mm, pud, new);
936 }
937
938 pmd = pmd_offset(pud, vstart);
939 if (!pmd_present(*pmd)) {
940 pte_t *new;
941
942 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
943 alloc_bytes += PAGE_SIZE;
944 pmd_populate_kernel(&init_mm, pmd, new);
945 }
946
947 pte = pte_offset_kernel(pmd, vstart);
948 this_end = (vstart + PMD_SIZE) & PMD_MASK;
949 if (this_end > vend)
950 this_end = vend;
951
952 while (vstart < this_end) {
953 pte_val(*pte) = (paddr | pgprot_val(prot));
954
955 vstart += PAGE_SIZE;
956 paddr += PAGE_SIZE;
957 pte++;
958 }
959 }
960
961 return alloc_bytes;
962}
963
David S. Miller56425302005-09-25 16:46:57 -0700964extern unsigned int kvmap_linear_patch[1];
David S. Miller9cc3a1a2006-02-21 20:51:13 -0800965#endif /* CONFIG_DEBUG_PAGEALLOC */
966
967static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
968{
969 const unsigned long shift_256MB = 28;
970 const unsigned long mask_256MB = ((1UL << shift_256MB) - 1UL);
971 const unsigned long size_256MB = (1UL << shift_256MB);
972
973 while (start < end) {
974 long remains;
975
David S. Millerf7c00332006-03-05 22:18:50 -0800976 remains = end - start;
977 if (remains < size_256MB)
978 break;
979
David S. Miller9cc3a1a2006-02-21 20:51:13 -0800980 if (start & mask_256MB) {
981 start = (start + size_256MB) & ~mask_256MB;
982 continue;
983 }
984
David S. Miller9cc3a1a2006-02-21 20:51:13 -0800985 while (remains >= size_256MB) {
986 unsigned long index = start >> shift_256MB;
987
988 __set_bit(index, kpte_linear_bitmap);
989
990 start += size_256MB;
991 remains -= size_256MB;
992 }
993 }
994}
David S. Miller56425302005-09-25 16:46:57 -0700995
996static void __init kernel_physical_mapping_init(void)
997{
David S. Miller9cc3a1a2006-02-21 20:51:13 -0800998 unsigned long i;
999#ifdef CONFIG_DEBUG_PAGEALLOC
1000 unsigned long mem_alloced = 0UL;
1001#endif
David S. Miller56425302005-09-25 16:46:57 -07001002
David S. Miller13edad72005-09-29 17:58:26 -07001003 read_obp_memory("reg", &pall[0], &pall_ents);
1004
1005 for (i = 0; i < pall_ents; i++) {
David S. Miller56425302005-09-25 16:46:57 -07001006 unsigned long phys_start, phys_end;
1007
David S. Miller13edad72005-09-29 17:58:26 -07001008 phys_start = pall[i].phys_addr;
1009 phys_end = phys_start + pall[i].reg_size;
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001010
1011 mark_kpte_bitmap(phys_start, phys_end);
1012
1013#ifdef CONFIG_DEBUG_PAGEALLOC
David S. Miller56425302005-09-25 16:46:57 -07001014 mem_alloced += kernel_map_range(phys_start, phys_end,
1015 PAGE_KERNEL);
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001016#endif
David S. Miller56425302005-09-25 16:46:57 -07001017 }
1018
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001019#ifdef CONFIG_DEBUG_PAGEALLOC
David S. Miller56425302005-09-25 16:46:57 -07001020 printk("Allocated %ld bytes for kernel page tables.\n",
1021 mem_alloced);
1022
1023 kvmap_linear_patch[0] = 0x01000000; /* nop */
1024 flushi(&kvmap_linear_patch[0]);
1025
1026 __flush_tlb_all();
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001027#endif
David S. Miller56425302005-09-25 16:46:57 -07001028}
1029
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001030#ifdef CONFIG_DEBUG_PAGEALLOC
David S. Miller56425302005-09-25 16:46:57 -07001031void kernel_map_pages(struct page *page, int numpages, int enable)
1032{
1033 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1034 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1035
1036 kernel_map_range(phys_start, phys_end,
1037 (enable ? PAGE_KERNEL : __pgprot(0)));
1038
David S. Miller74bf4312006-01-31 18:29:18 -08001039 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1040 PAGE_OFFSET + phys_end);
1041
David S. Miller56425302005-09-25 16:46:57 -07001042 /* we should perform an IPI and flush all tlbs,
1043 * but that can deadlock->flush only current cpu.
1044 */
1045 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1046 PAGE_OFFSET + phys_end);
1047}
1048#endif
1049
David S. Miller10147572005-09-28 21:46:43 -07001050unsigned long __init find_ecache_flush_span(unsigned long size)
1051{
David S. Miller13edad72005-09-29 17:58:26 -07001052 int i;
David S. Miller10147572005-09-28 21:46:43 -07001053
David S. Miller13edad72005-09-29 17:58:26 -07001054 for (i = 0; i < pavail_ents; i++) {
1055 if (pavail[i].reg_size >= size)
1056 return pavail[i].phys_addr;
David S. Miller10147572005-09-28 21:46:43 -07001057 }
1058
1059 return ~0UL;
1060}
1061
David S. Miller517af332006-02-01 15:55:21 -08001062static void __init tsb_phys_patch(void)
1063{
David S. Millerd257d5d2006-02-06 23:44:37 -08001064 struct tsb_ldquad_phys_patch_entry *pquad;
David S. Miller517af332006-02-01 15:55:21 -08001065 struct tsb_phys_patch_entry *p;
1066
David S. Millerd257d5d2006-02-06 23:44:37 -08001067 pquad = &__tsb_ldquad_phys_patch;
1068 while (pquad < &__tsb_ldquad_phys_patch_end) {
1069 unsigned long addr = pquad->addr;
1070
1071 if (tlb_type == hypervisor)
1072 *(unsigned int *) addr = pquad->sun4v_insn;
1073 else
1074 *(unsigned int *) addr = pquad->sun4u_insn;
1075 wmb();
1076 __asm__ __volatile__("flush %0"
1077 : /* no outputs */
1078 : "r" (addr));
1079
1080 pquad++;
1081 }
1082
David S. Miller517af332006-02-01 15:55:21 -08001083 p = &__tsb_phys_patch;
1084 while (p < &__tsb_phys_patch_end) {
1085 unsigned long addr = p->addr;
1086
1087 *(unsigned int *) addr = p->insn;
1088 wmb();
1089 __asm__ __volatile__("flush %0"
1090 : /* no outputs */
1091 : "r" (addr));
1092
1093 p++;
1094 }
1095}
1096
David S. Miller490384e2006-02-11 14:41:18 -08001097/* Don't mark as init, we give this to the Hypervisor. */
1098static struct hv_tsb_descr ktsb_descr[2];
1099extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
1100
1101static void __init sun4v_ktsb_init(void)
1102{
1103 unsigned long ktsb_pa;
1104
David S. Millerd7744a02006-02-21 22:31:11 -08001105 /* First KTSB for PAGE_SIZE mappings. */
David S. Miller490384e2006-02-11 14:41:18 -08001106 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1107
1108 switch (PAGE_SIZE) {
1109 case 8 * 1024:
1110 default:
1111 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
1112 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
1113 break;
1114
1115 case 64 * 1024:
1116 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
1117 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
1118 break;
1119
1120 case 512 * 1024:
1121 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
1122 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
1123 break;
1124
1125 case 4 * 1024 * 1024:
1126 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
1127 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
1128 break;
1129 };
1130
David S. Miller3f19a842006-02-17 12:03:20 -08001131 ktsb_descr[0].assoc = 1;
David S. Miller490384e2006-02-11 14:41:18 -08001132 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
1133 ktsb_descr[0].ctx_idx = 0;
1134 ktsb_descr[0].tsb_base = ktsb_pa;
1135 ktsb_descr[0].resv = 0;
1136
David S. Millerd7744a02006-02-21 22:31:11 -08001137 /* Second KTSB for 4MB/256MB mappings. */
1138 ktsb_pa = (kern_base +
1139 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1140
1141 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
1142 ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB |
1143 HV_PGSZ_MASK_256MB);
1144 ktsb_descr[1].assoc = 1;
1145 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
1146 ktsb_descr[1].ctx_idx = 0;
1147 ktsb_descr[1].tsb_base = ktsb_pa;
1148 ktsb_descr[1].resv = 0;
David S. Miller490384e2006-02-11 14:41:18 -08001149}
1150
1151void __cpuinit sun4v_ktsb_register(void)
1152{
1153 register unsigned long func asm("%o5");
1154 register unsigned long arg0 asm("%o0");
1155 register unsigned long arg1 asm("%o1");
1156 unsigned long pa;
1157
1158 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
1159
1160 func = HV_FAST_MMU_TSB_CTX0;
David S. Millerd7744a02006-02-21 22:31:11 -08001161 arg0 = 2;
David S. Miller490384e2006-02-11 14:41:18 -08001162 arg1 = pa;
1163 __asm__ __volatile__("ta %6"
1164 : "=&r" (func), "=&r" (arg0), "=&r" (arg1)
1165 : "0" (func), "1" (arg0), "2" (arg1),
1166 "i" (HV_FAST_TRAP));
1167}
1168
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169/* paging_init() sets up the page tables */
1170
1171extern void cheetah_ecache_flush_init(void);
David S. Millerd257d5d2006-02-06 23:44:37 -08001172extern void sun4v_patch_tlb_handlers(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173
1174static unsigned long last_valid_pfn;
David S. Miller56425302005-09-25 16:46:57 -07001175pgd_t swapper_pg_dir[2048];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176
David S. Millerc4bce902006-02-11 21:57:54 -08001177static void sun4u_pgprot_init(void);
1178static void sun4v_pgprot_init(void);
1179
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180void __init paging_init(void)
1181{
David S. Miller2bdb3cb2005-09-22 01:08:57 -07001182 unsigned long end_pfn, pages_avail, shift;
David S. Miller0836a0e2005-09-28 21:38:08 -07001183 unsigned long real_end, i;
1184
David S. Miller481295f2006-02-07 21:51:08 -08001185 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1186 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1187
David S. Millerd7744a02006-02-21 22:31:11 -08001188 /* Invalidate both kernel TSBs. */
David S. Miller8b234272006-02-17 18:01:02 -08001189 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
David S. Millerd7744a02006-02-21 22:31:11 -08001190 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
David S. Miller8b234272006-02-17 18:01:02 -08001191
David S. Millerc4bce902006-02-11 21:57:54 -08001192 if (tlb_type == hypervisor)
1193 sun4v_pgprot_init();
1194 else
1195 sun4u_pgprot_init();
1196
David S. Millerd257d5d2006-02-06 23:44:37 -08001197 if (tlb_type == cheetah_plus ||
1198 tlb_type == hypervisor)
David S. Miller517af332006-02-01 15:55:21 -08001199 tsb_phys_patch();
1200
David S. Miller490384e2006-02-11 14:41:18 -08001201 if (tlb_type == hypervisor) {
David S. Millerd257d5d2006-02-06 23:44:37 -08001202 sun4v_patch_tlb_handlers();
David S. Miller490384e2006-02-11 14:41:18 -08001203 sun4v_ktsb_init();
1204 }
David S. Millerd257d5d2006-02-06 23:44:37 -08001205
David S. Miller13edad72005-09-29 17:58:26 -07001206 /* Find available physical memory... */
1207 read_obp_memory("available", &pavail[0], &pavail_ents);
David S. Miller0836a0e2005-09-28 21:38:08 -07001208
1209 phys_base = 0xffffffffffffffffUL;
David S. Miller13edad72005-09-29 17:58:26 -07001210 for (i = 0; i < pavail_ents; i++)
1211 phys_base = min(phys_base, pavail[i].phys_addr);
David S. Miller0836a0e2005-09-28 21:38:08 -07001212
David S. Miller0836a0e2005-09-28 21:38:08 -07001213 pfn_base = phys_base >> PAGE_SHIFT;
1214
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 set_bit(0, mmu_context_bmap);
1216
David S. Miller2bdb3cb2005-09-22 01:08:57 -07001217 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
1218
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 real_end = (unsigned long)_end;
1220 if ((real_end > ((unsigned long)KERNBASE + 0x400000)))
1221 bigkernel = 1;
David S. Miller2bdb3cb2005-09-22 01:08:57 -07001222 if ((real_end > ((unsigned long)KERNBASE + 0x800000))) {
1223 prom_printf("paging_init: Kernel > 8MB, too large.\n");
1224 prom_halt();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 }
David S. Miller2bdb3cb2005-09-22 01:08:57 -07001226
1227 /* Set kernel pgd to upper alias so physical page computations
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 * work.
1229 */
1230 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
1231
David S. Miller56425302005-09-25 16:46:57 -07001232 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233
1234 /* Now can init the kernel/bad page tables. */
1235 pud_set(pud_offset(&swapper_pg_dir[0], 0),
David S. Miller56425302005-09-25 16:46:57 -07001236 swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237
David S. Millerc9c10832005-10-12 12:22:46 -07001238 inherit_prom_mappings();
David S. Miller5085b4a2005-09-22 00:45:41 -07001239
David S. Millera8b900d2006-01-31 18:33:37 -08001240 /* Ok, we can use our TLB miss and window trap handlers safely. */
1241 setup_tba();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242
David S. Millerc9c10832005-10-12 12:22:46 -07001243 __flush_tlb_all();
David S. Miller9ad98c52005-10-05 15:12:00 -07001244
David S. Miller490384e2006-02-11 14:41:18 -08001245 if (tlb_type == hypervisor)
1246 sun4v_ktsb_register();
1247
David S. Miller2bdb3cb2005-09-22 01:08:57 -07001248 /* Setup bootmem... */
1249 pages_avail = 0;
1250 last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
1251
David S. Miller56425302005-09-25 16:46:57 -07001252 kernel_physical_mapping_init();
David S. Miller56425302005-09-25 16:46:57 -07001253
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 {
1255 unsigned long zones_size[MAX_NR_ZONES];
1256 unsigned long zholes_size[MAX_NR_ZONES];
1257 unsigned long npages;
1258 int znum;
1259
1260 for (znum = 0; znum < MAX_NR_ZONES; znum++)
1261 zones_size[znum] = zholes_size[znum] = 0;
1262
1263 npages = end_pfn - pfn_base;
1264 zones_size[ZONE_DMA] = npages;
1265 zholes_size[ZONE_DMA] = npages - pages_avail;
1266
1267 free_area_init_node(0, &contig_page_data, zones_size,
1268 phys_base >> PAGE_SHIFT, zholes_size);
1269 }
1270
1271 device_scan();
1272}
1273
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274static void __init taint_real_pages(void)
1275{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 int i;
1277
David S. Miller13edad72005-09-29 17:58:26 -07001278 read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279
David S. Miller13edad72005-09-29 17:58:26 -07001280 /* Find changes discovered in the physmem available rescan and
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 * reserve the lost portions in the bootmem maps.
1282 */
David S. Miller13edad72005-09-29 17:58:26 -07001283 for (i = 0; i < pavail_ents; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 unsigned long old_start, old_end;
1285
David S. Miller13edad72005-09-29 17:58:26 -07001286 old_start = pavail[i].phys_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287 old_end = old_start +
David S. Miller13edad72005-09-29 17:58:26 -07001288 pavail[i].reg_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 while (old_start < old_end) {
1290 int n;
1291
David S. Miller13edad72005-09-29 17:58:26 -07001292 for (n = 0; pavail_rescan_ents; n++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 unsigned long new_start, new_end;
1294
David S. Miller13edad72005-09-29 17:58:26 -07001295 new_start = pavail_rescan[n].phys_addr;
1296 new_end = new_start +
1297 pavail_rescan[n].reg_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298
1299 if (new_start <= old_start &&
1300 new_end >= (old_start + PAGE_SIZE)) {
David S. Miller13edad72005-09-29 17:58:26 -07001301 set_bit(old_start >> 22,
1302 sparc64_valid_addr_bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 goto do_next_page;
1304 }
1305 }
1306 reserve_bootmem(old_start, PAGE_SIZE);
1307
1308 do_next_page:
1309 old_start += PAGE_SIZE;
1310 }
1311 }
1312}
1313
1314void __init mem_init(void)
1315{
1316 unsigned long codepages, datapages, initpages;
1317 unsigned long addr, last;
1318 int i;
1319
1320 i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
1321 i += 1;
David S. Miller2bdb3cb2005-09-22 01:08:57 -07001322 sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 if (sparc64_valid_addr_bitmap == NULL) {
1324 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
1325 prom_halt();
1326 }
1327 memset(sparc64_valid_addr_bitmap, 0, i << 3);
1328
1329 addr = PAGE_OFFSET + kern_base;
1330 last = PAGE_ALIGN(kern_size) + addr;
1331 while (addr < last) {
1332 set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
1333 addr += PAGE_SIZE;
1334 }
1335
1336 taint_real_pages();
1337
1338 max_mapnr = last_valid_pfn - pfn_base;
1339 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
1340
1341#ifdef CONFIG_DEBUG_BOOTMEM
1342 prom_printf("mem_init: Calling free_all_bootmem().\n");
1343#endif
1344 totalram_pages = num_physpages = free_all_bootmem() - 1;
1345
1346 /*
1347 * Set up the zero page, mark it reserved, so that page count
1348 * is not manipulated when freeing the page from user ptes.
1349 */
1350 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
1351 if (mem_map_zero == NULL) {
1352 prom_printf("paging_init: Cannot alloc zero page.\n");
1353 prom_halt();
1354 }
1355 SetPageReserved(mem_map_zero);
1356
1357 codepages = (((unsigned long) _etext) - ((unsigned long) _start));
1358 codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
1359 datapages = (((unsigned long) _edata) - ((unsigned long) _etext));
1360 datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
1361 initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
1362 initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
1363
1364 printk("Memory: %uk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
1365 nr_free_pages() << (PAGE_SHIFT-10),
1366 codepages << (PAGE_SHIFT-10),
1367 datapages << (PAGE_SHIFT-10),
1368 initpages << (PAGE_SHIFT-10),
1369 PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
1370
1371 if (tlb_type == cheetah || tlb_type == cheetah_plus)
1372 cheetah_ecache_flush_init();
1373}
1374
David S. Miller898cf0e2005-09-23 11:59:44 -07001375void free_initmem(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376{
1377 unsigned long addr, initend;
1378
1379 /*
1380 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
1381 */
1382 addr = PAGE_ALIGN((unsigned long)(__init_begin));
1383 initend = (unsigned long)(__init_end) & PAGE_MASK;
1384 for (; addr < initend; addr += PAGE_SIZE) {
1385 unsigned long page;
1386 struct page *p;
1387
1388 page = (addr +
1389 ((unsigned long) __va(kern_base)) -
1390 ((unsigned long) KERNBASE));
1391 memset((void *)addr, 0xcc, PAGE_SIZE);
1392 p = virt_to_page(page);
1393
1394 ClearPageReserved(p);
1395 set_page_count(p, 1);
1396 __free_page(p);
1397 num_physpages++;
1398 totalram_pages++;
1399 }
1400}
1401
1402#ifdef CONFIG_BLK_DEV_INITRD
1403void free_initrd_mem(unsigned long start, unsigned long end)
1404{
1405 if (start < end)
1406 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
1407 for (; start < end; start += PAGE_SIZE) {
1408 struct page *p = virt_to_page(start);
1409
1410 ClearPageReserved(p);
1411 set_page_count(p, 1);
1412 __free_page(p);
1413 num_physpages++;
1414 totalram_pages++;
1415 }
1416}
1417#endif
David S. Millerc4bce902006-02-11 21:57:54 -08001418
David S. Millerc4bce902006-02-11 21:57:54 -08001419#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
1420#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
1421#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
1422#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
1423#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
1424#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
1425
1426pgprot_t PAGE_KERNEL __read_mostly;
1427EXPORT_SYMBOL(PAGE_KERNEL);
1428
1429pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
1430pgprot_t PAGE_COPY __read_mostly;
David S. Miller0f159522006-02-18 12:43:16 -08001431
1432pgprot_t PAGE_SHARED __read_mostly;
1433EXPORT_SYMBOL(PAGE_SHARED);
1434
David S. Millerc4bce902006-02-11 21:57:54 -08001435pgprot_t PAGE_EXEC __read_mostly;
1436unsigned long pg_iobits __read_mostly;
1437
1438unsigned long _PAGE_IE __read_mostly;
David S. Millerb2bef442006-02-23 01:55:55 -08001439
David S. Millerc4bce902006-02-11 21:57:54 -08001440unsigned long _PAGE_E __read_mostly;
David S. Millerb2bef442006-02-23 01:55:55 -08001441EXPORT_SYMBOL(_PAGE_E);
1442
David S. Millerc4bce902006-02-11 21:57:54 -08001443unsigned long _PAGE_CACHE __read_mostly;
David S. Millerb2bef442006-02-23 01:55:55 -08001444EXPORT_SYMBOL(_PAGE_CACHE);
David S. Millerc4bce902006-02-11 21:57:54 -08001445
1446static void prot_init_common(unsigned long page_none,
1447 unsigned long page_shared,
1448 unsigned long page_copy,
1449 unsigned long page_readonly,
1450 unsigned long page_exec_bit)
1451{
1452 PAGE_COPY = __pgprot(page_copy);
David S. Miller0f159522006-02-18 12:43:16 -08001453 PAGE_SHARED = __pgprot(page_shared);
David S. Millerc4bce902006-02-11 21:57:54 -08001454
1455 protection_map[0x0] = __pgprot(page_none);
1456 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
1457 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
1458 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
1459 protection_map[0x4] = __pgprot(page_readonly);
1460 protection_map[0x5] = __pgprot(page_readonly);
1461 protection_map[0x6] = __pgprot(page_copy);
1462 protection_map[0x7] = __pgprot(page_copy);
1463 protection_map[0x8] = __pgprot(page_none);
1464 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
1465 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
1466 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
1467 protection_map[0xc] = __pgprot(page_readonly);
1468 protection_map[0xd] = __pgprot(page_readonly);
1469 protection_map[0xe] = __pgprot(page_shared);
1470 protection_map[0xf] = __pgprot(page_shared);
1471}
1472
1473static void __init sun4u_pgprot_init(void)
1474{
1475 unsigned long page_none, page_shared, page_copy, page_readonly;
1476 unsigned long page_exec_bit;
1477
1478 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
1479 _PAGE_CACHE_4U | _PAGE_P_4U |
1480 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
1481 _PAGE_EXEC_4U);
1482 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
1483 _PAGE_CACHE_4U | _PAGE_P_4U |
1484 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
1485 _PAGE_EXEC_4U | _PAGE_L_4U);
1486 PAGE_EXEC = __pgprot(_PAGE_EXEC_4U);
1487
1488 _PAGE_IE = _PAGE_IE_4U;
1489 _PAGE_E = _PAGE_E_4U;
1490 _PAGE_CACHE = _PAGE_CACHE_4U;
1491
1492 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
1493 __ACCESS_BITS_4U | _PAGE_E_4U);
1494
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001495 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
David S. Millerc4bce902006-02-11 21:57:54 -08001496 0xfffff80000000000;
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001497 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
1498 _PAGE_P_4U | _PAGE_W_4U);
1499
1500 /* XXX Should use 256MB on Panther. XXX */
1501 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
David S. Millerc4bce902006-02-11 21:57:54 -08001502
1503 _PAGE_SZBITS = _PAGE_SZBITS_4U;
1504 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
1505 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
1506 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
1507
1508
1509 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
1510 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
1511 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
1512 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
1513 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
1514 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
1515 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
1516
1517 page_exec_bit = _PAGE_EXEC_4U;
1518
1519 prot_init_common(page_none, page_shared, page_copy, page_readonly,
1520 page_exec_bit);
1521}
1522
1523static void __init sun4v_pgprot_init(void)
1524{
1525 unsigned long page_none, page_shared, page_copy, page_readonly;
1526 unsigned long page_exec_bit;
1527
1528 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
1529 _PAGE_CACHE_4V | _PAGE_P_4V |
1530 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
1531 _PAGE_EXEC_4V);
1532 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
1533 PAGE_EXEC = __pgprot(_PAGE_EXEC_4V);
1534
1535 _PAGE_IE = _PAGE_IE_4V;
1536 _PAGE_E = _PAGE_E_4V;
1537 _PAGE_CACHE = _PAGE_CACHE_4V;
1538
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001539 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
David S. Millerc4bce902006-02-11 21:57:54 -08001540 0xfffff80000000000;
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001541 kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1542 _PAGE_P_4V | _PAGE_W_4V);
1543
1544 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
1545 0xfffff80000000000;
1546 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1547 _PAGE_P_4V | _PAGE_W_4V);
David S. Millerc4bce902006-02-11 21:57:54 -08001548
1549 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
1550 __ACCESS_BITS_4V | _PAGE_E_4V);
1551
1552 _PAGE_SZBITS = _PAGE_SZBITS_4V;
1553 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
1554 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
1555 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
1556 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
1557
1558 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
1559 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
1560 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
1561 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
1562 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
1563 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
1564 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
1565
1566 page_exec_bit = _PAGE_EXEC_4V;
1567
1568 prot_init_common(page_none, page_shared, page_copy, page_readonly,
1569 page_exec_bit);
1570}
1571
1572unsigned long pte_sz_bits(unsigned long sz)
1573{
1574 if (tlb_type == hypervisor) {
1575 switch (sz) {
1576 case 8 * 1024:
1577 default:
1578 return _PAGE_SZ8K_4V;
1579 case 64 * 1024:
1580 return _PAGE_SZ64K_4V;
1581 case 512 * 1024:
1582 return _PAGE_SZ512K_4V;
1583 case 4 * 1024 * 1024:
1584 return _PAGE_SZ4MB_4V;
1585 };
1586 } else {
1587 switch (sz) {
1588 case 8 * 1024:
1589 default:
1590 return _PAGE_SZ8K_4U;
1591 case 64 * 1024:
1592 return _PAGE_SZ64K_4U;
1593 case 512 * 1024:
1594 return _PAGE_SZ512K_4U;
1595 case 4 * 1024 * 1024:
1596 return _PAGE_SZ4MB_4U;
1597 };
1598 }
1599}
1600
1601pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
1602{
1603 pte_t pte;
David S. Millercf627152006-02-12 21:10:07 -08001604
1605 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
David S. Millerc4bce902006-02-11 21:57:54 -08001606 pte_val(pte) |= (((unsigned long)space) << 32);
1607 pte_val(pte) |= pte_sz_bits(page_size);
David S. Millercf627152006-02-12 21:10:07 -08001608
David S. Millerc4bce902006-02-11 21:57:54 -08001609 return pte;
1610}
1611
David S. Millerc4bce902006-02-11 21:57:54 -08001612static unsigned long kern_large_tte(unsigned long paddr)
1613{
1614 unsigned long val;
1615
1616 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
1617 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
1618 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
1619 if (tlb_type == hypervisor)
1620 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
1621 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
1622 _PAGE_EXEC_4V | _PAGE_W_4V);
1623
1624 return val | paddr;
1625}
1626
1627/*
1628 * Translate PROM's mapping we capture at boot time into physical address.
1629 * The second parameter is only set from prom_callback() invocations.
1630 */
1631unsigned long prom_virt_to_phys(unsigned long promva, int *error)
1632{
1633 unsigned long mask;
1634 int i;
1635
1636 mask = _PAGE_PADDR_4U;
1637 if (tlb_type == hypervisor)
1638 mask = _PAGE_PADDR_4V;
1639
1640 for (i = 0; i < prom_trans_ents; i++) {
1641 struct linux_prom_translation *p = &prom_trans[i];
1642
1643 if (promva >= p->virt &&
1644 promva < (p->virt + p->size)) {
1645 unsigned long base = p->data & mask;
1646
1647 if (error)
1648 *error = 0;
1649 return base + (promva & (8192 - 1));
1650 }
1651 }
1652 if (error)
1653 *error = 1;
1654 return 0UL;
1655}
1656
1657/* XXX We should kill off this ugly thing at so me point. XXX */
1658unsigned long sun4u_get_pte(unsigned long addr)
1659{
1660 pgd_t *pgdp;
1661 pud_t *pudp;
1662 pmd_t *pmdp;
1663 pte_t *ptep;
1664 unsigned long mask = _PAGE_PADDR_4U;
1665
1666 if (tlb_type == hypervisor)
1667 mask = _PAGE_PADDR_4V;
1668
1669 if (addr >= PAGE_OFFSET)
1670 return addr & mask;
1671
1672 if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS))
1673 return prom_virt_to_phys(addr, NULL);
1674
1675 pgdp = pgd_offset_k(addr);
1676 pudp = pud_offset(pgdp, addr);
1677 pmdp = pmd_offset(pudp, addr);
1678 ptep = pte_offset_kernel(pmdp, addr);
1679
1680 return pte_val(*ptep) & mask;
1681}
1682
1683/* If not locked, zap it. */
1684void __flush_tlb_all(void)
1685{
1686 unsigned long pstate;
1687 int i;
1688
1689 __asm__ __volatile__("flushw\n\t"
1690 "rdpr %%pstate, %0\n\t"
1691 "wrpr %0, %1, %%pstate"
1692 : "=r" (pstate)
1693 : "i" (PSTATE_IE));
1694 if (tlb_type == spitfire) {
1695 for (i = 0; i < 64; i++) {
1696 /* Spitfire Errata #32 workaround */
1697 /* NOTE: Always runs on spitfire, so no
1698 * cheetah+ page size encodings.
1699 */
1700 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
1701 "flush %%g6"
1702 : /* No outputs */
1703 : "r" (0),
1704 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
1705
1706 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
1707 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
1708 "membar #Sync"
1709 : /* no outputs */
1710 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
1711 spitfire_put_dtlb_data(i, 0x0UL);
1712 }
1713
1714 /* Spitfire Errata #32 workaround */
1715 /* NOTE: Always runs on spitfire, so no
1716 * cheetah+ page size encodings.
1717 */
1718 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
1719 "flush %%g6"
1720 : /* No outputs */
1721 : "r" (0),
1722 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
1723
1724 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
1725 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
1726 "membar #Sync"
1727 : /* no outputs */
1728 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
1729 spitfire_put_itlb_data(i, 0x0UL);
1730 }
1731 }
1732 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1733 cheetah_flush_dtlb_all();
1734 cheetah_flush_itlb_all();
1735 }
1736 __asm__ __volatile__("wrpr %0, 0, %%pstate"
1737 : : "r" (pstate));
1738}