blob: 9e28a118e6a45250444e69c9062fcb7c78743435 [file] [log] [blame]
Adrian Bunkb00dc832008-05-19 16:52:27 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * arch/sparc64/mm/init.c
3 *
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
David S. Millerc4bce902006-02-11 21:57:54 -08008#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/string.h>
12#include <linux/init.h>
13#include <linux/bootmem.h>
14#include <linux/mm.h>
15#include <linux/hugetlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/initrd.h>
17#include <linux/swap.h>
18#include <linux/pagemap.h>
Randy Dunlapc9cf5522006-06-27 02:53:52 -070019#include <linux/poison.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/fs.h>
21#include <linux/seq_file.h>
Prasanna S Panchamukhi05e14cb2005-09-06 15:19:30 -070022#include <linux/kprobes.h>
David S. Miller1ac4f5e2005-09-21 21:49:32 -070023#include <linux/cache.h>
David S. Miller13edad72005-09-29 17:58:26 -070024#include <linux/sort.h>
David S. Miller5cbc3072007-05-25 15:49:59 -070025#include <linux/percpu.h>
Yinghai Lu95f72d12010-07-12 14:36:09 +100026#include <linux/memblock.h>
David S. Miller919ee672008-04-23 05:40:25 -070027#include <linux/mmzone.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090028#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30#include <asm/head.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <asm/page.h>
32#include <asm/pgalloc.h>
33#include <asm/pgtable.h>
34#include <asm/oplib.h>
35#include <asm/iommu.h>
36#include <asm/io.h>
37#include <asm/uaccess.h>
38#include <asm/mmu_context.h>
39#include <asm/tlbflush.h>
40#include <asm/dma.h>
41#include <asm/starfire.h>
42#include <asm/tlb.h>
43#include <asm/spitfire.h>
44#include <asm/sections.h>
David S. Miller517af332006-02-01 15:55:21 -080045#include <asm/tsb.h>
David S. Miller481295f2006-02-07 21:51:08 -080046#include <asm/hypervisor.h>
David S. Miller372b07b2006-06-21 15:35:28 -070047#include <asm/prom.h>
David S. Miller5cbc3072007-05-25 15:49:59 -070048#include <asm/mdesc.h>
David S. Miller3d5ae6b2008-03-25 21:51:40 -070049#include <asm/cpudata.h>
David S. Miller4f70f7a2008-08-12 18:33:56 -070050#include <asm/irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Sam Ravnborg27137e52008-11-16 20:08:45 -080052#include "init_64.h"
David S. Miller9cc3a1a2006-02-21 20:51:13 -080053
David S. Miller4f93d212012-09-06 18:13:58 -070054unsigned long kern_linear_pte_xor[4] __read_mostly;
David S. Miller9cc3a1a2006-02-21 20:51:13 -080055
David S. Miller4f93d212012-09-06 18:13:58 -070056/* A bitmap, two bits for every 256MB of physical memory. These two
57 * bits determine what page size we use for kernel linear
58 * translations. They form an index into kern_linear_pte_xor[]. The
59 * value in the indexed slot is XOR'd with the TLB miss virtual
60 * address to form the resulting TTE. The mapping is:
61 *
62 * 0 ==> 4MB
63 * 1 ==> 256MB
64 * 2 ==> 2GB
65 * 3 ==> 16GB
66 *
67 * All sun4v chips support 256MB pages. Only SPARC-T4 and later
68 * support 2GB pages, and hopefully future cpus will support the 16GB
69 * pages as well. For slots 2 and 3, we encode a 256MB TTE xor there
70 * if these larger page sizes are not supported by the cpu.
71 *
72 * It would be nice to determine this from the machine description
73 * 'cpu' properties, but we need to have this table setup before the
74 * MDESC is initialized.
David S. Miller9cc3a1a2006-02-21 20:51:13 -080075 */
76unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
77
David S. Millerd1acb422007-03-16 17:20:28 -070078#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Miller4f93d212012-09-06 18:13:58 -070079/* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
80 * Space is allocated for this right after the trap table in
81 * arch/sparc64/kernel/head.S
David S. Miller2d9e2762007-05-29 01:58:31 -070082 */
83extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
David S. Millerd1acb422007-03-16 17:20:28 -070084#endif
David S. Millerd7744a02006-02-21 22:31:11 -080085
David S. Millerce33fdc2012-09-06 19:01:25 -070086static unsigned long cpu_pgsz_mask;
87
David S. Miller13edad72005-09-29 17:58:26 -070088#define MAX_BANKS 32
David S. Miller10147572005-09-28 21:46:43 -070089
David S. Miller9a2ed5c2009-04-07 01:03:58 -070090static struct linux_prom64_registers pavail[MAX_BANKS] __devinitdata;
91static int pavail_ents __devinitdata;
David S. Miller10147572005-09-28 21:46:43 -070092
David S. Miller13edad72005-09-29 17:58:26 -070093static int cmp_p64(const void *a, const void *b)
94{
95 const struct linux_prom64_registers *x = a, *y = b;
96
97 if (x->phys_addr > y->phys_addr)
98 return 1;
99 if (x->phys_addr < y->phys_addr)
100 return -1;
101 return 0;
102}
103
104static void __init read_obp_memory(const char *property,
105 struct linux_prom64_registers *regs,
106 int *num_ents)
107{
Andres Salomon8d125562010-10-08 14:18:11 -0700108 phandle node = prom_finddevice("/memory");
David S. Miller13edad72005-09-29 17:58:26 -0700109 int prop_size = prom_getproplen(node, property);
110 int ents, ret, i;
111
112 ents = prop_size / sizeof(struct linux_prom64_registers);
113 if (ents > MAX_BANKS) {
114 prom_printf("The machine has more %s property entries than "
115 "this kernel can support (%d).\n",
116 property, MAX_BANKS);
117 prom_halt();
118 }
119
120 ret = prom_getproperty(node, property, (char *) regs, prop_size);
121 if (ret == -1) {
Akinobu Mita5da444a2012-09-29 03:14:49 +0000122 prom_printf("Couldn't get %s property from /memory.\n",
123 property);
David S. Miller13edad72005-09-29 17:58:26 -0700124 prom_halt();
125 }
126
David S. Miller13edad72005-09-29 17:58:26 -0700127 /* Sanitize what we got from the firmware, by page aligning
128 * everything.
129 */
130 for (i = 0; i < ents; i++) {
131 unsigned long base, size;
132
133 base = regs[i].phys_addr;
134 size = regs[i].reg_size;
135
136 size &= PAGE_MASK;
137 if (base & ~PAGE_MASK) {
138 unsigned long new_base = PAGE_ALIGN(base);
139
140 size -= new_base - base;
141 if ((long) size < 0L)
142 size = 0UL;
143 base = new_base;
144 }
David S. Miller0015d3d2007-03-15 00:06:34 -0700145 if (size == 0UL) {
146 /* If it is empty, simply get rid of it.
147 * This simplifies the logic of the other
148 * functions that process these arrays.
149 */
150 memmove(&regs[i], &regs[i + 1],
151 (ents - i - 1) * sizeof(regs[0]));
152 i--;
153 ents--;
154 continue;
155 }
David S. Miller13edad72005-09-29 17:58:26 -0700156 regs[i].phys_addr = base;
157 regs[i].reg_size = size;
158 }
David S. Miller486ad102006-06-22 00:00:00 -0700159
David S. Miller486ad102006-06-22 00:00:00 -0700160 *num_ents = ents;
161
David S. Millerc9c10832005-10-12 12:22:46 -0700162 sort(regs, ents, sizeof(struct linux_prom64_registers),
David S. Miller13edad72005-09-29 17:58:26 -0700163 cmp_p64, NULL);
164}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
David S. Millerd8ed1d42009-08-25 16:47:46 -0700166unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES /
167 sizeof(unsigned long)];
Sam Ravnborg917c3662009-01-08 16:58:20 -0800168EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
David S. Millerd1112012006-03-08 02:16:07 -0800170/* Kernel physical address base and size in bytes. */
David S. Miller1ac4f5e2005-09-21 21:49:32 -0700171unsigned long kern_base __read_mostly;
172unsigned long kern_size __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174/* Initial ramdisk setup */
175extern unsigned long sparc_ramdisk_image64;
176extern unsigned int sparc_ramdisk_image;
177extern unsigned int sparc_ramdisk_size;
178
David S. Miller1ac4f5e2005-09-21 21:49:32 -0700179struct page *mem_map_zero __read_mostly;
Aneesh Kumar K.V35802c02008-04-29 08:11:12 -0400180EXPORT_SYMBOL(mem_map_zero);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
David S. Miller0835ae02005-10-04 15:23:20 -0700182unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
183
184unsigned long sparc64_kern_pri_context __read_mostly;
185unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
186unsigned long sparc64_kern_sec_context __read_mostly;
187
David S. Miller64658742008-03-21 17:01:38 -0700188int num_kernel_image_mappings;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190#ifdef CONFIG_DEBUG_DCFLUSH
191atomic_t dcpage_flushes = ATOMIC_INIT(0);
192#ifdef CONFIG_SMP
193atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
194#endif
195#endif
196
David S. Miller7a591cf2006-02-26 19:44:50 -0800197inline void flush_dcache_page_impl(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198{
David S. Miller7a591cf2006-02-26 19:44:50 -0800199 BUG_ON(tlb_type == hypervisor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200#ifdef CONFIG_DEBUG_DCFLUSH
201 atomic_inc(&dcpage_flushes);
202#endif
203
204#ifdef DCACHE_ALIASING_POSSIBLE
205 __flush_dcache_page(page_address(page),
206 ((tlb_type == spitfire) &&
207 page_mapping(page) != NULL));
208#else
209 if (page_mapping(page) != NULL &&
210 tlb_type == spitfire)
211 __flush_icache_page(__pa(page_address(page)));
212#endif
213}
214
215#define PG_dcache_dirty PG_arch_1
David S. Miller22adb352007-05-26 01:14:43 -0700216#define PG_dcache_cpu_shift 32UL
217#define PG_dcache_cpu_mask \
218 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
220#define dcache_dirty_cpu(page) \
David S. Miller48b0e542005-07-27 16:08:44 -0700221 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222
David S. Millerd979f172007-10-27 00:13:04 -0700223static inline void set_dcache_dirty(struct page *page, int this_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224{
225 unsigned long mask = this_cpu;
David S. Miller48b0e542005-07-27 16:08:44 -0700226 unsigned long non_cpu_bits;
227
228 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
229 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 __asm__ __volatile__("1:\n\t"
232 "ldx [%2], %%g7\n\t"
233 "and %%g7, %1, %%g1\n\t"
234 "or %%g1, %0, %%g1\n\t"
235 "casx [%2], %%g7, %%g1\n\t"
236 "cmp %%g7, %%g1\n\t"
237 "bne,pn %%xcc, 1b\n\t"
David S. Millerb445e262005-06-27 15:42:04 -0700238 " nop"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 : /* no outputs */
240 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
241 : "g1", "g7");
242}
243
David S. Millerd979f172007-10-27 00:13:04 -0700244static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245{
246 unsigned long mask = (1UL << PG_dcache_dirty);
247
248 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
249 "1:\n\t"
250 "ldx [%2], %%g7\n\t"
David S. Miller48b0e542005-07-27 16:08:44 -0700251 "srlx %%g7, %4, %%g1\n\t"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 "and %%g1, %3, %%g1\n\t"
253 "cmp %%g1, %0\n\t"
254 "bne,pn %%icc, 2f\n\t"
255 " andn %%g7, %1, %%g1\n\t"
256 "casx [%2], %%g7, %%g1\n\t"
257 "cmp %%g7, %%g1\n\t"
258 "bne,pn %%xcc, 1b\n\t"
David S. Millerb445e262005-06-27 15:42:04 -0700259 " nop\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 "2:"
261 : /* no outputs */
262 : "r" (cpu), "r" (mask), "r" (&page->flags),
David S. Miller48b0e542005-07-27 16:08:44 -0700263 "i" (PG_dcache_cpu_mask),
264 "i" (PG_dcache_cpu_shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 : "g1", "g7");
266}
267
David S. Miller517af332006-02-01 15:55:21 -0800268static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
269{
270 unsigned long tsb_addr = (unsigned long) ent;
271
David S. Miller3b3ab2e2006-02-17 09:54:42 -0800272 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
David S. Miller517af332006-02-01 15:55:21 -0800273 tsb_addr = __pa(tsb_addr);
274
275 __tsb_insert(tsb_addr, tag, pte);
276}
277
David S. Millerc4bce902006-02-11 21:57:54 -0800278unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
David S. Millerc4bce902006-02-11 21:57:54 -0800279
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800280static void flush_dcache(unsigned long pfn)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800282 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800284 page = pfn_to_page(pfn);
David S. Miller1a78ced2009-10-12 03:20:57 -0700285 if (page) {
David S. Miller7a591cf2006-02-26 19:44:50 -0800286 unsigned long pg_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800288 pg_flags = page->flags;
289 if (pg_flags & (1UL << PG_dcache_dirty)) {
David S. Miller7a591cf2006-02-26 19:44:50 -0800290 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
291 PG_dcache_cpu_mask);
292 int this_cpu = get_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293
David S. Miller7a591cf2006-02-26 19:44:50 -0800294 /* This is just to optimize away some function calls
295 * in the SMP case.
296 */
297 if (cpu == this_cpu)
298 flush_dcache_page_impl(page);
299 else
300 smp_flush_dcache_page_impl(page, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301
David S. Miller7a591cf2006-02-26 19:44:50 -0800302 clear_dcache_dirty_cpu(page, cpu);
303
304 put_cpu();
305 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 }
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800307}
308
David Miller9e695d22012-10-08 16:34:29 -0700309/* mm->context.lock must be held */
310static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
311 unsigned long tsb_hash_shift, unsigned long address,
312 unsigned long tte)
313{
314 struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
315 unsigned long tag;
316
317 tsb += ((address >> tsb_hash_shift) &
318 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
319 tag = (address >> 22UL);
320 tsb_insert(tsb, tag, tte);
321}
322
Russell King4b3073e2009-12-18 16:40:18 +0000323void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800324{
David Miller9e695d22012-10-08 16:34:29 -0700325 unsigned long tsb_index, tsb_hash_shift, flags;
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800326 struct mm_struct *mm;
Russell King4b3073e2009-12-18 16:40:18 +0000327 pte_t pte = *ptep;
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800328
329 if (tlb_type != hypervisor) {
330 unsigned long pfn = pte_pfn(pte);
331
332 if (pfn_valid(pfn))
333 flush_dcache(pfn);
334 }
David S. Millerbd407912006-01-31 18:31:38 -0800335
336 mm = vma->vm_mm;
David S. Miller7a1ac522006-03-16 02:02:32 -0800337
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800338 tsb_index = MM_TSB_BASE;
339 tsb_hash_shift = PAGE_SHIFT;
340
David S. Miller7a1ac522006-03-16 02:02:32 -0800341 spin_lock_irqsave(&mm->context.lock, flags);
342
David Miller9e695d22012-10-08 16:34:29 -0700343#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800344 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) {
345 if ((tlb_type == hypervisor &&
346 (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
347 (tlb_type != hypervisor &&
348 (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) {
349 tsb_index = MM_TSB_HUGE;
350 tsb_hash_shift = HPAGE_SHIFT;
351 }
352 }
353#endif
354
David Miller9e695d22012-10-08 16:34:29 -0700355 __update_mmu_tsb_insert(mm, tsb_index, tsb_hash_shift,
356 address, pte_val(pte));
David S. Miller7a1ac522006-03-16 02:02:32 -0800357
358 spin_unlock_irqrestore(&mm->context.lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359}
360
361void flush_dcache_page(struct page *page)
362{
David S. Millera9546f52005-04-17 18:03:09 -0700363 struct address_space *mapping;
364 int this_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
David S. Miller7a591cf2006-02-26 19:44:50 -0800366 if (tlb_type == hypervisor)
367 return;
368
David S. Millera9546f52005-04-17 18:03:09 -0700369 /* Do not bother with the expensive D-cache flush if it
370 * is merely the zero page. The 'bigcore' testcase in GDB
371 * causes this case to run millions of times.
372 */
373 if (page == ZERO_PAGE(0))
374 return;
375
376 this_cpu = get_cpu();
377
378 mapping = page_mapping(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 if (mapping && !mapping_mapped(mapping)) {
David S. Millera9546f52005-04-17 18:03:09 -0700380 int dirty = test_bit(PG_dcache_dirty, &page->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 if (dirty) {
David S. Millera9546f52005-04-17 18:03:09 -0700382 int dirty_cpu = dcache_dirty_cpu(page);
383
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 if (dirty_cpu == this_cpu)
385 goto out;
386 smp_flush_dcache_page_impl(page, dirty_cpu);
387 }
388 set_dcache_dirty(page, this_cpu);
389 } else {
390 /* We could delay the flush for the !page_mapping
391 * case too. But that case is for exec env/arg
392 * pages and those are %99 certainly going to get
393 * faulted into the tlb (and thus flushed) anyways.
394 */
395 flush_dcache_page_impl(page);
396 }
397
398out:
399 put_cpu();
400}
Sam Ravnborg917c3662009-01-08 16:58:20 -0800401EXPORT_SYMBOL(flush_dcache_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402
Prasanna S Panchamukhi05e14cb2005-09-06 15:19:30 -0700403void __kprobes flush_icache_range(unsigned long start, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404{
David S. Millera43fe0e2006-02-04 03:10:53 -0800405 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 if (tlb_type == spitfire) {
407 unsigned long kaddr;
408
David S. Millera94aa252007-03-15 15:50:11 -0700409 /* This code only runs on Spitfire cpus so this is
410 * why we can assume _PAGE_PADDR_4U.
411 */
412 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
413 unsigned long paddr, mask = _PAGE_PADDR_4U;
414
415 if (kaddr >= PAGE_OFFSET)
416 paddr = kaddr & mask;
417 else {
418 pgd_t *pgdp = pgd_offset_k(kaddr);
419 pud_t *pudp = pud_offset(pgdp, kaddr);
420 pmd_t *pmdp = pmd_offset(pudp, kaddr);
421 pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
422
423 paddr = pte_val(*ptep) & mask;
424 }
425 __flush_icache_page(paddr);
426 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 }
428}
Sam Ravnborg917c3662009-01-08 16:58:20 -0800429EXPORT_SYMBOL(flush_icache_range);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431void mmu_info(struct seq_file *m)
432{
David S. Millerce33fdc2012-09-06 19:01:25 -0700433 static const char *pgsz_strings[] = {
434 "8K", "64K", "512K", "4MB", "32MB",
435 "256MB", "2GB", "16GB",
436 };
437 int i, printed;
438
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 if (tlb_type == cheetah)
440 seq_printf(m, "MMU Type\t: Cheetah\n");
441 else if (tlb_type == cheetah_plus)
442 seq_printf(m, "MMU Type\t: Cheetah+\n");
443 else if (tlb_type == spitfire)
444 seq_printf(m, "MMU Type\t: Spitfire\n");
David S. Millera43fe0e2006-02-04 03:10:53 -0800445 else if (tlb_type == hypervisor)
446 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 else
448 seq_printf(m, "MMU Type\t: ???\n");
449
David S. Millerce33fdc2012-09-06 19:01:25 -0700450 seq_printf(m, "MMU PGSZs\t: ");
451 printed = 0;
452 for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) {
453 if (cpu_pgsz_mask & (1UL << i)) {
454 seq_printf(m, "%s%s",
455 printed ? "," : "", pgsz_strings[i]);
456 printed++;
457 }
458 }
459 seq_putc(m, '\n');
460
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461#ifdef CONFIG_DEBUG_DCFLUSH
462 seq_printf(m, "DCPageFlushes\t: %d\n",
463 atomic_read(&dcpage_flushes));
464#ifdef CONFIG_SMP
465 seq_printf(m, "DCPageFlushesXC\t: %d\n",
466 atomic_read(&dcpage_flushes_xcall));
467#endif /* CONFIG_SMP */
468#endif /* CONFIG_DEBUG_DCFLUSH */
469}
470
David S. Millera94aa252007-03-15 15:50:11 -0700471struct linux_prom_translation prom_trans[512] __read_mostly;
472unsigned int prom_trans_ents __read_mostly;
473
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474unsigned long kern_locked_tte_data;
475
David S. Miller405599b2005-09-22 00:12:35 -0700476/* The obp translations are saved based on 8k pagesize, since obp can
477 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
David S. Miller74bf4312006-01-31 18:29:18 -0800478 * HI_OBP_ADDRESS range are handled in ktlb.S.
David S. Miller405599b2005-09-22 00:12:35 -0700479 */
David S. Miller5085b4a2005-09-22 00:45:41 -0700480static inline int in_obp_range(unsigned long vaddr)
481{
482 return (vaddr >= LOW_OBP_ADDRESS &&
483 vaddr < HI_OBP_ADDRESS);
484}
485
David S. Millerc9c10832005-10-12 12:22:46 -0700486static int cmp_ptrans(const void *a, const void *b)
David S. Miller405599b2005-09-22 00:12:35 -0700487{
David S. Millerc9c10832005-10-12 12:22:46 -0700488 const struct linux_prom_translation *x = a, *y = b;
David S. Miller405599b2005-09-22 00:12:35 -0700489
David S. Millerc9c10832005-10-12 12:22:46 -0700490 if (x->virt > y->virt)
491 return 1;
492 if (x->virt < y->virt)
493 return -1;
494 return 0;
David S. Miller405599b2005-09-22 00:12:35 -0700495}
496
David S. Millerc9c10832005-10-12 12:22:46 -0700497/* Read OBP translations property into 'prom_trans[]'. */
David S. Miller9ad98c52005-10-05 15:12:00 -0700498static void __init read_obp_translations(void)
David S. Miller405599b2005-09-22 00:12:35 -0700499{
David S. Millerc9c10832005-10-12 12:22:46 -0700500 int n, node, ents, first, last, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501
502 node = prom_finddevice("/virtual-memory");
503 n = prom_getproplen(node, "translations");
David S. Miller405599b2005-09-22 00:12:35 -0700504 if (unlikely(n == 0 || n == -1)) {
David S. Millerb206fc42005-09-21 22:31:13 -0700505 prom_printf("prom_mappings: Couldn't get size.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 prom_halt();
507 }
David S. Miller405599b2005-09-22 00:12:35 -0700508 if (unlikely(n > sizeof(prom_trans))) {
Akinobu Mita5da444a2012-09-29 03:14:49 +0000509 prom_printf("prom_mappings: Size %d is too big.\n", n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 prom_halt();
511 }
David S. Miller405599b2005-09-22 00:12:35 -0700512
David S. Millerb206fc42005-09-21 22:31:13 -0700513 if ((n = prom_getproperty(node, "translations",
David S. Miller405599b2005-09-22 00:12:35 -0700514 (char *)&prom_trans[0],
515 sizeof(prom_trans))) == -1) {
David S. Millerb206fc42005-09-21 22:31:13 -0700516 prom_printf("prom_mappings: Couldn't get property.\n");
517 prom_halt();
518 }
David S. Miller9ad98c52005-10-05 15:12:00 -0700519
David S. Millerb206fc42005-09-21 22:31:13 -0700520 n = n / sizeof(struct linux_prom_translation);
David S. Miller9ad98c52005-10-05 15:12:00 -0700521
David S. Millerc9c10832005-10-12 12:22:46 -0700522 ents = n;
523
524 sort(prom_trans, ents, sizeof(struct linux_prom_translation),
525 cmp_ptrans, NULL);
526
527 /* Now kick out all the non-OBP entries. */
528 for (i = 0; i < ents; i++) {
529 if (in_obp_range(prom_trans[i].virt))
530 break;
531 }
532 first = i;
533 for (; i < ents; i++) {
534 if (!in_obp_range(prom_trans[i].virt))
535 break;
536 }
537 last = i;
538
539 for (i = 0; i < (last - first); i++) {
540 struct linux_prom_translation *src = &prom_trans[i + first];
541 struct linux_prom_translation *dest = &prom_trans[i];
542
543 *dest = *src;
544 }
545 for (; i < ents; i++) {
546 struct linux_prom_translation *dest = &prom_trans[i];
547 dest->virt = dest->size = dest->data = 0x0UL;
548 }
549
550 prom_trans_ents = last - first;
551
552 if (tlb_type == spitfire) {
553 /* Clear diag TTE bits. */
554 for (i = 0; i < prom_trans_ents; i++)
555 prom_trans[i].data &= ~0x0003fe0000000000UL;
556 }
David S. Millerf4142cb2011-09-29 12:18:59 -0700557
558 /* Force execute bit on. */
559 for (i = 0; i < prom_trans_ents; i++)
560 prom_trans[i].data |= (tlb_type == hypervisor ?
561 _PAGE_EXEC_4V : _PAGE_EXEC_4U);
David S. Miller405599b2005-09-22 00:12:35 -0700562}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563
David S. Millerd82ace72006-02-09 02:52:44 -0800564static void __init hypervisor_tlb_lock(unsigned long vaddr,
565 unsigned long pte,
566 unsigned long mmu)
567{
David S. Miller7db35f32007-05-29 02:22:14 -0700568 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
David S. Millerd82ace72006-02-09 02:52:44 -0800569
David S. Miller7db35f32007-05-29 02:22:14 -0700570 if (ret != 0) {
Akinobu Mita5da444a2012-09-29 03:14:49 +0000571 prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
David S. Miller7db35f32007-05-29 02:22:14 -0700572 "errors with %lx\n", vaddr, 0, pte, mmu, ret);
David S. Miller12e126a2006-02-17 14:40:30 -0800573 prom_halt();
574 }
David S. Millerd82ace72006-02-09 02:52:44 -0800575}
576
David S. Millerc4bce902006-02-11 21:57:54 -0800577static unsigned long kern_large_tte(unsigned long paddr);
578
David S. Miller898cf0e2005-09-23 11:59:44 -0700579static void __init remap_kernel(void)
David S. Miller405599b2005-09-22 00:12:35 -0700580{
581 unsigned long phys_page, tte_vaddr, tte_data;
David S. Miller64658742008-03-21 17:01:38 -0700582 int i, tlb_ent = sparc64_highest_locked_tlbent();
David S. Miller405599b2005-09-22 00:12:35 -0700583
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 tte_vaddr = (unsigned long) KERNBASE;
David S. Millerbff06d52005-09-22 20:11:33 -0700585 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
David S. Millerc4bce902006-02-11 21:57:54 -0800586 tte_data = kern_large_tte(phys_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587
588 kern_locked_tte_data = tte_data;
589
David S. Millerd82ace72006-02-09 02:52:44 -0800590 /* Now lock us into the TLBs via Hypervisor or OBP. */
591 if (tlb_type == hypervisor) {
David S. Miller64658742008-03-21 17:01:38 -0700592 for (i = 0; i < num_kernel_image_mappings; i++) {
David S. Millerd82ace72006-02-09 02:52:44 -0800593 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
594 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
David S. Miller64658742008-03-21 17:01:38 -0700595 tte_vaddr += 0x400000;
596 tte_data += 0x400000;
David S. Millerd82ace72006-02-09 02:52:44 -0800597 }
598 } else {
David S. Miller64658742008-03-21 17:01:38 -0700599 for (i = 0; i < num_kernel_image_mappings; i++) {
600 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
601 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
602 tte_vaddr += 0x400000;
603 tte_data += 0x400000;
David S. Millerd82ace72006-02-09 02:52:44 -0800604 }
David S. Miller64658742008-03-21 17:01:38 -0700605 sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 }
David S. Miller0835ae02005-10-04 15:23:20 -0700607 if (tlb_type == cheetah_plus) {
608 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
609 CTX_CHEETAH_PLUS_NUC);
610 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
611 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
612 }
David S. Miller405599b2005-09-22 00:12:35 -0700613}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614
David S. Miller405599b2005-09-22 00:12:35 -0700615
David S. Millerc9c10832005-10-12 12:22:46 -0700616static void __init inherit_prom_mappings(void)
David S. Miller9ad98c52005-10-05 15:12:00 -0700617{
David S. Miller405599b2005-09-22 00:12:35 -0700618 /* Now fixup OBP's idea about where we really are mapped. */
David S. Miller3c62a2d2008-02-17 23:22:50 -0800619 printk("Remapping the kernel... ");
David S. Miller405599b2005-09-22 00:12:35 -0700620 remap_kernel();
David S. Miller3c62a2d2008-02-17 23:22:50 -0800621 printk("done.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622}
623
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624void prom_world(int enter)
625{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 if (!enter)
627 set_fs((mm_segment_t) { get_thread_current_ds() });
628
David S. Miller3487d1d2006-01-31 18:33:25 -0800629 __asm__ __volatile__("flushw");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630}
631
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632void __flush_dcache_range(unsigned long start, unsigned long end)
633{
634 unsigned long va;
635
636 if (tlb_type == spitfire) {
637 int n = 0;
638
639 for (va = start; va < end; va += 32) {
640 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
641 if (++n >= 512)
642 break;
643 }
David S. Millera43fe0e2006-02-04 03:10:53 -0800644 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 start = __pa(start);
646 end = __pa(end);
647 for (va = start; va < end; va += 32)
648 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
649 "membar #Sync"
650 : /* no outputs */
651 : "r" (va),
652 "i" (ASI_DCACHE_INVALIDATE));
653 }
654}
Sam Ravnborg917c3662009-01-08 16:58:20 -0800655EXPORT_SYMBOL(__flush_dcache_range);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656
David S. Miller85f1e1f2007-03-15 17:51:26 -0700657/* get_new_mmu_context() uses "cache + 1". */
658DEFINE_SPINLOCK(ctx_alloc_lock);
659unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
660#define MAX_CTX_NR (1UL << CTX_NR_BITS)
661#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
662DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
663
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664/* Caller does TLB context flushing on local CPU if necessary.
665 * The caller also ensures that CTX_VALID(mm->context) is false.
666 *
667 * We must be careful about boundary cases so that we never
668 * let the user have CTX 0 (nucleus) or we ever use a CTX
669 * version of zero (and thus NO_CONTEXT would not be caught
670 * by version mis-match tests in mmu_context.h).
David S. Millera0663a72006-02-23 14:19:28 -0800671 *
672 * Always invoked with interrupts disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 */
674void get_new_mmu_context(struct mm_struct *mm)
675{
676 unsigned long ctx, new_ctx;
677 unsigned long orig_pgsz_bits;
David S. Millera77754b2006-03-06 19:59:50 -0800678 unsigned long flags;
David S. Millera0663a72006-02-23 14:19:28 -0800679 int new_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680
David S. Millera77754b2006-03-06 19:59:50 -0800681 spin_lock_irqsave(&ctx_alloc_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
683 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
684 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
David S. Millera0663a72006-02-23 14:19:28 -0800685 new_version = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 if (new_ctx >= (1 << CTX_NR_BITS)) {
687 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
688 if (new_ctx >= ctx) {
689 int i;
690 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
691 CTX_FIRST_VERSION;
692 if (new_ctx == 1)
693 new_ctx = CTX_FIRST_VERSION;
694
695 /* Don't call memset, for 16 entries that's just
696 * plain silly...
697 */
698 mmu_context_bmap[0] = 3;
699 mmu_context_bmap[1] = 0;
700 mmu_context_bmap[2] = 0;
701 mmu_context_bmap[3] = 0;
702 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
703 mmu_context_bmap[i + 0] = 0;
704 mmu_context_bmap[i + 1] = 0;
705 mmu_context_bmap[i + 2] = 0;
706 mmu_context_bmap[i + 3] = 0;
707 }
David S. Millera0663a72006-02-23 14:19:28 -0800708 new_version = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 goto out;
710 }
711 }
712 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
713 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
714out:
715 tlb_context_cache = new_ctx;
716 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
David S. Millera77754b2006-03-06 19:59:50 -0800717 spin_unlock_irqrestore(&ctx_alloc_lock, flags);
David S. Millera0663a72006-02-23 14:19:28 -0800718
719 if (unlikely(new_version))
720 smp_new_mmu_context_version();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721}
722
David S. Miller919ee672008-04-23 05:40:25 -0700723static int numa_enabled = 1;
724static int numa_debug;
725
726static int __init early_numa(char *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727{
David S. Miller919ee672008-04-23 05:40:25 -0700728 if (!p)
729 return 0;
David S. Millerd1112012006-03-08 02:16:07 -0800730
David S. Miller919ee672008-04-23 05:40:25 -0700731 if (strstr(p, "off"))
732 numa_enabled = 0;
David S. Millerd1112012006-03-08 02:16:07 -0800733
David S. Miller919ee672008-04-23 05:40:25 -0700734 if (strstr(p, "debug"))
735 numa_debug = 1;
736
737 return 0;
David S. Millerd1112012006-03-08 02:16:07 -0800738}
David S. Miller919ee672008-04-23 05:40:25 -0700739early_param("numa", early_numa);
740
741#define numadbg(f, a...) \
742do { if (numa_debug) \
743 printk(KERN_INFO f, ## a); \
744} while (0)
David S. Millerd1112012006-03-08 02:16:07 -0800745
David S. Miller4e82c9a2008-02-13 18:00:03 -0800746static void __init find_ramdisk(unsigned long phys_base)
747{
748#ifdef CONFIG_BLK_DEV_INITRD
749 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
750 unsigned long ramdisk_image;
751
752 /* Older versions of the bootloader only supported a
753 * 32-bit physical address for the ramdisk image
754 * location, stored at sparc_ramdisk_image. Newer
755 * SILO versions set sparc_ramdisk_image to zero and
756 * provide a full 64-bit physical address at
757 * sparc_ramdisk_image64.
758 */
759 ramdisk_image = sparc_ramdisk_image;
760 if (!ramdisk_image)
761 ramdisk_image = sparc_ramdisk_image64;
762
763 /* Another bootloader quirk. The bootloader normalizes
764 * the physical address to KERNBASE, so we have to
765 * factor that back out and add in the lowest valid
766 * physical page address to get the true physical address.
767 */
768 ramdisk_image -= KERNBASE;
769 ramdisk_image += phys_base;
770
David S. Miller919ee672008-04-23 05:40:25 -0700771 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
772 ramdisk_image, sparc_ramdisk_size);
773
David S. Miller4e82c9a2008-02-13 18:00:03 -0800774 initrd_start = ramdisk_image;
775 initrd_end = ramdisk_image + sparc_ramdisk_size;
David S. Miller3b2a7e22008-02-13 18:13:20 -0800776
Yinghai Lu95f72d12010-07-12 14:36:09 +1000777 memblock_reserve(initrd_start, sparc_ramdisk_size);
David S. Millerd45100f2008-05-06 15:19:54 -0700778
779 initrd_start += PAGE_OFFSET;
780 initrd_end += PAGE_OFFSET;
David S. Miller4e82c9a2008-02-13 18:00:03 -0800781 }
782#endif
783}
784
David S. Miller919ee672008-04-23 05:40:25 -0700785struct node_mem_mask {
786 unsigned long mask;
787 unsigned long val;
David S. Miller919ee672008-04-23 05:40:25 -0700788};
789static struct node_mem_mask node_masks[MAX_NUMNODES];
790static int num_node_masks;
791
792int numa_cpu_lookup_table[NR_CPUS];
793cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
794
795#ifdef CONFIG_NEED_MULTIPLE_NODES
David S. Miller919ee672008-04-23 05:40:25 -0700796
797struct mdesc_mblock {
798 u64 base;
799 u64 size;
800 u64 offset; /* RA-to-PA */
801};
802static struct mdesc_mblock *mblocks;
803static int num_mblocks;
804
805static unsigned long ra_to_pa(unsigned long addr)
David S. Millerd1112012006-03-08 02:16:07 -0800806{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 int i;
808
David S. Miller919ee672008-04-23 05:40:25 -0700809 for (i = 0; i < num_mblocks; i++) {
810 struct mdesc_mblock *m = &mblocks[i];
David S. Miller6fc5bae2006-12-28 21:00:23 -0800811
David S. Miller919ee672008-04-23 05:40:25 -0700812 if (addr >= m->base &&
813 addr < (m->base + m->size)) {
814 addr += m->offset;
815 break;
816 }
817 }
818 return addr;
819}
820
821static int find_node(unsigned long addr)
822{
823 int i;
824
825 addr = ra_to_pa(addr);
826 for (i = 0; i < num_node_masks; i++) {
827 struct node_mem_mask *p = &node_masks[i];
828
829 if ((addr & p->mask) == p->val)
830 return i;
831 }
832 return -1;
833}
834
Tejun Heof9b18db2011-07-12 10:46:32 +0200835static u64 memblock_nid_range(u64 start, u64 end, int *nid)
David S. Miller919ee672008-04-23 05:40:25 -0700836{
837 *nid = find_node(start);
838 start += PAGE_SIZE;
839 while (start < end) {
840 int n = find_node(start);
841
842 if (n != *nid)
843 break;
844 start += PAGE_SIZE;
845 }
846
David S. Millerc918dcc2008-08-14 01:41:39 -0700847 if (start > end)
848 start = end;
849
David S. Miller919ee672008-04-23 05:40:25 -0700850 return start;
851}
David S. Miller919ee672008-04-23 05:40:25 -0700852#endif
853
854/* This must be invoked after performing all of the necessary
Tejun Heo2a4814d2011-12-08 10:22:08 -0800855 * memblock_set_node() calls for 'nid'. We need to be able to get
David S. Miller919ee672008-04-23 05:40:25 -0700856 * correct data from get_pfn_range_for_nid().
857 */
858static void __init allocate_node_data(int nid)
859{
David S. Miller919ee672008-04-23 05:40:25 -0700860 struct pglist_data *p;
Paul Gortmakeraa6f0792012-05-09 20:44:29 -0400861 unsigned long start_pfn, end_pfn;
David S. Miller919ee672008-04-23 05:40:25 -0700862#ifdef CONFIG_NEED_MULTIPLE_NODES
Paul Gortmakeraa6f0792012-05-09 20:44:29 -0400863 unsigned long paddr;
864
Benjamin Herrenschmidt9d1e2492010-07-06 15:39:17 -0700865 paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
David S. Miller919ee672008-04-23 05:40:25 -0700866 if (!paddr) {
867 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
868 prom_halt();
869 }
870 NODE_DATA(nid) = __va(paddr);
871 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
872
David S. Miller625d6932012-04-25 13:13:43 -0700873 NODE_DATA(nid)->node_id = nid;
David S. Miller919ee672008-04-23 05:40:25 -0700874#endif
875
876 p = NODE_DATA(nid);
877
878 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
879 p->node_start_pfn = start_pfn;
880 p->node_spanned_pages = end_pfn - start_pfn;
David S. Miller919ee672008-04-23 05:40:25 -0700881}
882
883static void init_node_masks_nonnuma(void)
884{
885 int i;
886
887 numadbg("Initializing tables for non-numa.\n");
888
889 node_masks[0].mask = node_masks[0].val = 0;
890 num_node_masks = 1;
891
892 for (i = 0; i < NR_CPUS; i++)
893 numa_cpu_lookup_table[i] = 0;
894
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -0700895 cpumask_setall(&numa_cpumask_lookup_table[0]);
David S. Miller919ee672008-04-23 05:40:25 -0700896}
897
898#ifdef CONFIG_NEED_MULTIPLE_NODES
899struct pglist_data *node_data[MAX_NUMNODES];
900
901EXPORT_SYMBOL(numa_cpu_lookup_table);
902EXPORT_SYMBOL(numa_cpumask_lookup_table);
903EXPORT_SYMBOL(node_data);
904
905struct mdesc_mlgroup {
906 u64 node;
907 u64 latency;
908 u64 match;
909 u64 mask;
910};
911static struct mdesc_mlgroup *mlgroups;
912static int num_mlgroups;
913
914static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
915 u32 cfg_handle)
916{
917 u64 arc;
918
919 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
920 u64 target = mdesc_arc_target(md, arc);
921 const u64 *val;
922
923 val = mdesc_get_property(md, target,
924 "cfg-handle", NULL);
925 if (val && *val == cfg_handle)
926 return 0;
927 }
928 return -ENODEV;
929}
930
931static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
932 u32 cfg_handle)
933{
934 u64 arc, candidate, best_latency = ~(u64)0;
935
936 candidate = MDESC_NODE_NULL;
937 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
938 u64 target = mdesc_arc_target(md, arc);
939 const char *name = mdesc_node_name(md, target);
940 const u64 *val;
941
942 if (strcmp(name, "pio-latency-group"))
943 continue;
944
945 val = mdesc_get_property(md, target, "latency", NULL);
946 if (!val)
947 continue;
948
949 if (*val < best_latency) {
950 candidate = target;
951 best_latency = *val;
952 }
953 }
954
955 if (candidate == MDESC_NODE_NULL)
956 return -ENODEV;
957
958 return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
959}
960
961int of_node_to_nid(struct device_node *dp)
962{
963 const struct linux_prom64_registers *regs;
964 struct mdesc_handle *md;
965 u32 cfg_handle;
966 int count, nid;
967 u64 grp;
968
David S. Miller072bd412008-08-18 20:36:17 -0700969 /* This is the right thing to do on currently supported
970 * SUN4U NUMA platforms as well, as the PCI controller does
971 * not sit behind any particular memory controller.
972 */
David S. Miller919ee672008-04-23 05:40:25 -0700973 if (!mlgroups)
974 return -1;
975
976 regs = of_get_property(dp, "reg", NULL);
977 if (!regs)
978 return -1;
979
980 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
981
982 md = mdesc_grab();
983
984 count = 0;
985 nid = -1;
986 mdesc_for_each_node_by_name(md, grp, "group") {
987 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
988 nid = count;
989 break;
990 }
991 count++;
992 }
993
994 mdesc_release(md);
995
996 return nid;
997}
998
David S. Miller01c453812009-04-07 01:05:22 -0700999static void __init add_node_ranges(void)
David S. Miller919ee672008-04-23 05:40:25 -07001000{
Benjamin Herrenschmidt08b84792010-08-04 13:43:31 +10001001 struct memblock_region *reg;
David S. Miller919ee672008-04-23 05:40:25 -07001002
Benjamin Herrenschmidt08b84792010-08-04 13:43:31 +10001003 for_each_memblock(memory, reg) {
1004 unsigned long size = reg->size;
David S. Miller919ee672008-04-23 05:40:25 -07001005 unsigned long start, end;
1006
Benjamin Herrenschmidt08b84792010-08-04 13:43:31 +10001007 start = reg->base;
David S. Miller919ee672008-04-23 05:40:25 -07001008 end = start + size;
1009 while (start < end) {
1010 unsigned long this_end;
1011 int nid;
1012
Benjamin Herrenschmidt35a1f0b2010-07-06 15:38:58 -07001013 this_end = memblock_nid_range(start, end, &nid);
David S. Miller919ee672008-04-23 05:40:25 -07001014
Tejun Heo2a4814d2011-12-08 10:22:08 -08001015 numadbg("Setting memblock NUMA node nid[%d] "
David S. Miller919ee672008-04-23 05:40:25 -07001016 "start[%lx] end[%lx]\n",
1017 nid, start, this_end);
1018
Tejun Heo2a4814d2011-12-08 10:22:08 -08001019 memblock_set_node(start, this_end - start, nid);
David S. Miller919ee672008-04-23 05:40:25 -07001020 start = this_end;
1021 }
1022 }
1023}
1024
1025static int __init grab_mlgroups(struct mdesc_handle *md)
1026{
1027 unsigned long paddr;
1028 int count = 0;
1029 u64 node;
1030
1031 mdesc_for_each_node_by_name(md, node, "memory-latency-group")
1032 count++;
1033 if (!count)
1034 return -ENOENT;
1035
Yinghai Lu95f72d12010-07-12 14:36:09 +10001036 paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup),
David S. Miller919ee672008-04-23 05:40:25 -07001037 SMP_CACHE_BYTES);
1038 if (!paddr)
1039 return -ENOMEM;
1040
1041 mlgroups = __va(paddr);
1042 num_mlgroups = count;
1043
1044 count = 0;
1045 mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
1046 struct mdesc_mlgroup *m = &mlgroups[count++];
1047 const u64 *val;
1048
1049 m->node = node;
1050
1051 val = mdesc_get_property(md, node, "latency", NULL);
1052 m->latency = *val;
1053 val = mdesc_get_property(md, node, "address-match", NULL);
1054 m->match = *val;
1055 val = mdesc_get_property(md, node, "address-mask", NULL);
1056 m->mask = *val;
1057
Sam Ravnborg90181132009-01-06 13:19:28 -08001058 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
1059 "match[%llx] mask[%llx]\n",
David S. Miller919ee672008-04-23 05:40:25 -07001060 count - 1, m->node, m->latency, m->match, m->mask);
1061 }
1062
1063 return 0;
1064}
1065
1066static int __init grab_mblocks(struct mdesc_handle *md)
1067{
1068 unsigned long paddr;
1069 int count = 0;
1070 u64 node;
1071
1072 mdesc_for_each_node_by_name(md, node, "mblock")
1073 count++;
1074 if (!count)
1075 return -ENOENT;
1076
Yinghai Lu95f72d12010-07-12 14:36:09 +10001077 paddr = memblock_alloc(count * sizeof(struct mdesc_mblock),
David S. Miller919ee672008-04-23 05:40:25 -07001078 SMP_CACHE_BYTES);
1079 if (!paddr)
1080 return -ENOMEM;
1081
1082 mblocks = __va(paddr);
1083 num_mblocks = count;
1084
1085 count = 0;
1086 mdesc_for_each_node_by_name(md, node, "mblock") {
1087 struct mdesc_mblock *m = &mblocks[count++];
1088 const u64 *val;
1089
1090 val = mdesc_get_property(md, node, "base", NULL);
1091 m->base = *val;
1092 val = mdesc_get_property(md, node, "size", NULL);
1093 m->size = *val;
1094 val = mdesc_get_property(md, node,
1095 "address-congruence-offset", NULL);
1096 m->offset = *val;
1097
Sam Ravnborg90181132009-01-06 13:19:28 -08001098 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
David S. Miller919ee672008-04-23 05:40:25 -07001099 count - 1, m->base, m->size, m->offset);
1100 }
1101
1102 return 0;
1103}
1104
1105static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1106 u64 grp, cpumask_t *mask)
1107{
1108 u64 arc;
1109
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001110 cpumask_clear(mask);
David S. Miller919ee672008-04-23 05:40:25 -07001111
1112 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1113 u64 target = mdesc_arc_target(md, arc);
1114 const char *name = mdesc_node_name(md, target);
1115 const u64 *id;
1116
1117 if (strcmp(name, "cpu"))
1118 continue;
1119 id = mdesc_get_property(md, target, "id", NULL);
Rusty Russelle305cb8f2009-03-16 14:40:23 +10301120 if (*id < nr_cpu_ids)
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001121 cpumask_set_cpu(*id, mask);
David S. Miller919ee672008-04-23 05:40:25 -07001122 }
1123}
1124
1125static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
1126{
1127 int i;
1128
1129 for (i = 0; i < num_mlgroups; i++) {
1130 struct mdesc_mlgroup *m = &mlgroups[i];
1131 if (m->node == node)
1132 return m;
1133 }
1134 return NULL;
1135}
1136
1137static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
1138 int index)
1139{
1140 struct mdesc_mlgroup *candidate = NULL;
1141 u64 arc, best_latency = ~(u64)0;
1142 struct node_mem_mask *n;
1143
1144 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1145 u64 target = mdesc_arc_target(md, arc);
1146 struct mdesc_mlgroup *m = find_mlgroup(target);
1147 if (!m)
1148 continue;
1149 if (m->latency < best_latency) {
1150 candidate = m;
1151 best_latency = m->latency;
1152 }
1153 }
1154 if (!candidate)
1155 return -ENOENT;
1156
1157 if (num_node_masks != index) {
1158 printk(KERN_ERR "Inconsistent NUMA state, "
1159 "index[%d] != num_node_masks[%d]\n",
1160 index, num_node_masks);
1161 return -EINVAL;
1162 }
1163
1164 n = &node_masks[num_node_masks++];
1165
1166 n->mask = candidate->mask;
1167 n->val = candidate->match;
1168
Sam Ravnborg90181132009-01-06 13:19:28 -08001169 numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n",
David S. Miller919ee672008-04-23 05:40:25 -07001170 index, n->mask, n->val, candidate->latency);
1171
1172 return 0;
1173}
1174
1175static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1176 int index)
1177{
1178 cpumask_t mask;
1179 int cpu;
1180
1181 numa_parse_mdesc_group_cpus(md, grp, &mask);
1182
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001183 for_each_cpu(cpu, &mask)
David S. Miller919ee672008-04-23 05:40:25 -07001184 numa_cpu_lookup_table[cpu] = index;
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001185 cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
David S. Miller919ee672008-04-23 05:40:25 -07001186
1187 if (numa_debug) {
1188 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001189 for_each_cpu(cpu, &mask)
David S. Miller919ee672008-04-23 05:40:25 -07001190 printk("%d ", cpu);
1191 printk("]\n");
1192 }
1193
1194 return numa_attach_mlgroup(md, grp, index);
1195}
1196
1197static int __init numa_parse_mdesc(void)
1198{
1199 struct mdesc_handle *md = mdesc_grab();
1200 int i, err, count;
1201 u64 node;
1202
1203 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1204 if (node == MDESC_NODE_NULL) {
1205 mdesc_release(md);
1206 return -ENOENT;
1207 }
1208
1209 err = grab_mblocks(md);
1210 if (err < 0)
1211 goto out;
1212
1213 err = grab_mlgroups(md);
1214 if (err < 0)
1215 goto out;
1216
1217 count = 0;
1218 mdesc_for_each_node_by_name(md, node, "group") {
1219 err = numa_parse_mdesc_group(md, node, count);
1220 if (err < 0)
1221 break;
1222 count++;
1223 }
1224
1225 add_node_ranges();
1226
1227 for (i = 0; i < num_node_masks; i++) {
1228 allocate_node_data(i);
1229 node_set_online(i);
1230 }
1231
1232 err = 0;
1233out:
1234 mdesc_release(md);
1235 return err;
1236}
1237
David S. Miller072bd412008-08-18 20:36:17 -07001238static int __init numa_parse_jbus(void)
1239{
1240 unsigned long cpu, index;
1241
1242 /* NUMA node id is encoded in bits 36 and higher, and there is
1243 * a 1-to-1 mapping from CPU ID to NUMA node ID.
1244 */
1245 index = 0;
1246 for_each_present_cpu(cpu) {
1247 numa_cpu_lookup_table[cpu] = index;
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001248 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
David S. Miller072bd412008-08-18 20:36:17 -07001249 node_masks[index].mask = ~((1UL << 36UL) - 1UL);
1250 node_masks[index].val = cpu << 36UL;
1251
1252 index++;
1253 }
1254 num_node_masks = index;
1255
1256 add_node_ranges();
1257
1258 for (index = 0; index < num_node_masks; index++) {
1259 allocate_node_data(index);
1260 node_set_online(index);
1261 }
1262
1263 return 0;
1264}
1265
David S. Miller919ee672008-04-23 05:40:25 -07001266static int __init numa_parse_sun4u(void)
1267{
David S. Miller072bd412008-08-18 20:36:17 -07001268 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1269 unsigned long ver;
1270
1271 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
1272 if ((ver >> 32UL) == __JALAPENO_ID ||
1273 (ver >> 32UL) == __SERRANO_ID)
1274 return numa_parse_jbus();
1275 }
David S. Miller919ee672008-04-23 05:40:25 -07001276 return -1;
1277}
1278
1279static int __init bootmem_init_numa(void)
1280{
1281 int err = -1;
1282
1283 numadbg("bootmem_init_numa()\n");
1284
1285 if (numa_enabled) {
1286 if (tlb_type == hypervisor)
1287 err = numa_parse_mdesc();
1288 else
1289 err = numa_parse_sun4u();
1290 }
1291 return err;
1292}
1293
1294#else
1295
1296static int bootmem_init_numa(void)
1297{
1298 return -1;
1299}
1300
1301#endif
1302
1303static void __init bootmem_init_nonnuma(void)
1304{
Yinghai Lu95f72d12010-07-12 14:36:09 +10001305 unsigned long top_of_ram = memblock_end_of_DRAM();
1306 unsigned long total_ram = memblock_phys_mem_size();
David S. Miller919ee672008-04-23 05:40:25 -07001307
1308 numadbg("bootmem_init_nonnuma()\n");
1309
1310 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1311 top_of_ram, total_ram);
1312 printk(KERN_INFO "Memory hole size: %ldMB\n",
1313 (top_of_ram - total_ram) >> 20);
1314
1315 init_node_masks_nonnuma();
Tejun Heo2a4814d2011-12-08 10:22:08 -08001316 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
David S. Miller919ee672008-04-23 05:40:25 -07001317 allocate_node_data(0);
David S. Miller919ee672008-04-23 05:40:25 -07001318 node_set_online(0);
1319}
1320
David S. Miller919ee672008-04-23 05:40:25 -07001321static unsigned long __init bootmem_init(unsigned long phys_base)
1322{
1323 unsigned long end_pfn;
David S. Miller919ee672008-04-23 05:40:25 -07001324
Yinghai Lu95f72d12010-07-12 14:36:09 +10001325 end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 max_pfn = max_low_pfn = end_pfn;
David S. Millerd1112012006-03-08 02:16:07 -08001327 min_low_pfn = (phys_base >> PAGE_SHIFT);
1328
David S. Miller919ee672008-04-23 05:40:25 -07001329 if (bootmem_init_numa() < 0)
1330 bootmem_init_nonnuma();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331
David S. Miller625d6932012-04-25 13:13:43 -07001332 /* Dump memblock with node info. */
1333 memblock_dump_all();
1334
David S. Miller919ee672008-04-23 05:40:25 -07001335 /* XXX cpu notifier XXX */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336
David S. Miller625d6932012-04-25 13:13:43 -07001337 sparse_memory_present_with_active_regions(MAX_NUMNODES);
David S. Millerd1112012006-03-08 02:16:07 -08001338 sparse_init();
1339
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 return end_pfn;
1341}
1342
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001343static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1344static int pall_ents __initdata;
1345
David S. Miller56425302005-09-25 16:46:57 -07001346#ifdef CONFIG_DEBUG_PAGEALLOC
Sam Ravnborg896aef42008-02-24 19:49:52 -08001347static unsigned long __ref kernel_map_range(unsigned long pstart,
1348 unsigned long pend, pgprot_t prot)
David S. Miller56425302005-09-25 16:46:57 -07001349{
1350 unsigned long vstart = PAGE_OFFSET + pstart;
1351 unsigned long vend = PAGE_OFFSET + pend;
1352 unsigned long alloc_bytes = 0UL;
1353
1354 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
David S. Miller13edad72005-09-29 17:58:26 -07001355 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
David S. Miller56425302005-09-25 16:46:57 -07001356 vstart, vend);
1357 prom_halt();
1358 }
1359
1360 while (vstart < vend) {
1361 unsigned long this_end, paddr = __pa(vstart);
1362 pgd_t *pgd = pgd_offset_k(vstart);
1363 pud_t *pud;
1364 pmd_t *pmd;
1365 pte_t *pte;
1366
1367 pud = pud_offset(pgd, vstart);
1368 if (pud_none(*pud)) {
1369 pmd_t *new;
1370
1371 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1372 alloc_bytes += PAGE_SIZE;
1373 pud_populate(&init_mm, pud, new);
1374 }
1375
1376 pmd = pmd_offset(pud, vstart);
1377 if (!pmd_present(*pmd)) {
1378 pte_t *new;
1379
1380 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1381 alloc_bytes += PAGE_SIZE;
1382 pmd_populate_kernel(&init_mm, pmd, new);
1383 }
1384
1385 pte = pte_offset_kernel(pmd, vstart);
1386 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1387 if (this_end > vend)
1388 this_end = vend;
1389
1390 while (vstart < this_end) {
1391 pte_val(*pte) = (paddr | pgprot_val(prot));
1392
1393 vstart += PAGE_SIZE;
1394 paddr += PAGE_SIZE;
1395 pte++;
1396 }
1397 }
1398
1399 return alloc_bytes;
1400}
1401
David S. Miller56425302005-09-25 16:46:57 -07001402extern unsigned int kvmap_linear_patch[1];
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001403#endif /* CONFIG_DEBUG_PAGEALLOC */
1404
David S. Miller4f93d212012-09-06 18:13:58 -07001405static void __init kpte_set_val(unsigned long index, unsigned long val)
1406{
1407 unsigned long *ptr = kpte_linear_bitmap;
1408
1409 val <<= ((index % (BITS_PER_LONG / 2)) * 2);
1410 ptr += (index / (BITS_PER_LONG / 2));
1411
1412 *ptr |= val;
1413}
1414
1415static const unsigned long kpte_shift_min = 28; /* 256MB */
1416static const unsigned long kpte_shift_max = 34; /* 16GB */
1417static const unsigned long kpte_shift_incr = 3;
1418
1419static unsigned long kpte_mark_using_shift(unsigned long start, unsigned long end,
1420 unsigned long shift)
1421{
1422 unsigned long size = (1UL << shift);
1423 unsigned long mask = (size - 1UL);
1424 unsigned long remains = end - start;
1425 unsigned long val;
1426
1427 if (remains < size || (start & mask))
1428 return start;
1429
1430 /* VAL maps:
1431 *
1432 * shift 28 --> kern_linear_pte_xor index 1
1433 * shift 31 --> kern_linear_pte_xor index 2
1434 * shift 34 --> kern_linear_pte_xor index 3
1435 */
1436 val = ((shift - kpte_shift_min) / kpte_shift_incr) + 1;
1437
1438 remains &= ~mask;
1439 if (shift != kpte_shift_max)
1440 remains = size;
1441
1442 while (remains) {
1443 unsigned long index = start >> kpte_shift_min;
1444
1445 kpte_set_val(index, val);
1446
1447 start += 1UL << kpte_shift_min;
1448 remains -= 1UL << kpte_shift_min;
1449 }
1450
1451 return start;
1452}
1453
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001454static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
1455{
David S. Miller4f93d212012-09-06 18:13:58 -07001456 unsigned long smallest_size, smallest_mask;
1457 unsigned long s;
1458
1459 smallest_size = (1UL << kpte_shift_min);
1460 smallest_mask = (smallest_size - 1UL);
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001461
1462 while (start < end) {
David S. Miller4f93d212012-09-06 18:13:58 -07001463 unsigned long orig_start = start;
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001464
David S. Miller4f93d212012-09-06 18:13:58 -07001465 for (s = kpte_shift_max; s >= kpte_shift_min; s -= kpte_shift_incr) {
1466 start = kpte_mark_using_shift(start, end, s);
David S. Millerf7c00332006-03-05 22:18:50 -08001467
David S. Miller4f93d212012-09-06 18:13:58 -07001468 if (start != orig_start)
1469 break;
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001470 }
1471
David S. Miller4f93d212012-09-06 18:13:58 -07001472 if (start == orig_start)
1473 start = (start + smallest_size) & ~smallest_mask;
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001474 }
1475}
David S. Miller56425302005-09-25 16:46:57 -07001476
David S. Miller8f3614532007-12-13 06:13:38 -08001477static void __init init_kpte_bitmap(void)
David S. Miller56425302005-09-25 16:46:57 -07001478{
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001479 unsigned long i;
David S. Miller13edad72005-09-29 17:58:26 -07001480
1481 for (i = 0; i < pall_ents; i++) {
David S. Miller56425302005-09-25 16:46:57 -07001482 unsigned long phys_start, phys_end;
1483
David S. Miller13edad72005-09-29 17:58:26 -07001484 phys_start = pall[i].phys_addr;
1485 phys_end = phys_start + pall[i].reg_size;
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001486
1487 mark_kpte_bitmap(phys_start, phys_end);
David S. Miller8f3614532007-12-13 06:13:38 -08001488 }
1489}
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001490
David S. Miller8f3614532007-12-13 06:13:38 -08001491static void __init kernel_physical_mapping_init(void)
1492{
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001493#ifdef CONFIG_DEBUG_PAGEALLOC
David S. Miller8f3614532007-12-13 06:13:38 -08001494 unsigned long i, mem_alloced = 0UL;
1495
1496 for (i = 0; i < pall_ents; i++) {
1497 unsigned long phys_start, phys_end;
1498
1499 phys_start = pall[i].phys_addr;
1500 phys_end = phys_start + pall[i].reg_size;
1501
David S. Miller56425302005-09-25 16:46:57 -07001502 mem_alloced += kernel_map_range(phys_start, phys_end,
1503 PAGE_KERNEL);
David S. Miller56425302005-09-25 16:46:57 -07001504 }
1505
1506 printk("Allocated %ld bytes for kernel page tables.\n",
1507 mem_alloced);
1508
1509 kvmap_linear_patch[0] = 0x01000000; /* nop */
1510 flushi(&kvmap_linear_patch[0]);
1511
1512 __flush_tlb_all();
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001513#endif
David S. Miller56425302005-09-25 16:46:57 -07001514}
1515
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001516#ifdef CONFIG_DEBUG_PAGEALLOC
David S. Miller56425302005-09-25 16:46:57 -07001517void kernel_map_pages(struct page *page, int numpages, int enable)
1518{
1519 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1520 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1521
1522 kernel_map_range(phys_start, phys_end,
1523 (enable ? PAGE_KERNEL : __pgprot(0)));
1524
David S. Miller74bf4312006-01-31 18:29:18 -08001525 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1526 PAGE_OFFSET + phys_end);
1527
David S. Miller56425302005-09-25 16:46:57 -07001528 /* we should perform an IPI and flush all tlbs,
1529 * but that can deadlock->flush only current cpu.
1530 */
1531 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1532 PAGE_OFFSET + phys_end);
1533}
1534#endif
1535
David S. Miller10147572005-09-28 21:46:43 -07001536unsigned long __init find_ecache_flush_span(unsigned long size)
1537{
David S. Miller13edad72005-09-29 17:58:26 -07001538 int i;
David S. Miller10147572005-09-28 21:46:43 -07001539
David S. Miller13edad72005-09-29 17:58:26 -07001540 for (i = 0; i < pavail_ents; i++) {
1541 if (pavail[i].reg_size >= size)
1542 return pavail[i].phys_addr;
David S. Miller10147572005-09-28 21:46:43 -07001543 }
1544
1545 return ~0UL;
1546}
1547
David S. Miller517af332006-02-01 15:55:21 -08001548static void __init tsb_phys_patch(void)
1549{
David S. Millerd257d5d2006-02-06 23:44:37 -08001550 struct tsb_ldquad_phys_patch_entry *pquad;
David S. Miller517af332006-02-01 15:55:21 -08001551 struct tsb_phys_patch_entry *p;
1552
David S. Millerd257d5d2006-02-06 23:44:37 -08001553 pquad = &__tsb_ldquad_phys_patch;
1554 while (pquad < &__tsb_ldquad_phys_patch_end) {
1555 unsigned long addr = pquad->addr;
1556
1557 if (tlb_type == hypervisor)
1558 *(unsigned int *) addr = pquad->sun4v_insn;
1559 else
1560 *(unsigned int *) addr = pquad->sun4u_insn;
1561 wmb();
1562 __asm__ __volatile__("flush %0"
1563 : /* no outputs */
1564 : "r" (addr));
1565
1566 pquad++;
1567 }
1568
David S. Miller517af332006-02-01 15:55:21 -08001569 p = &__tsb_phys_patch;
1570 while (p < &__tsb_phys_patch_end) {
1571 unsigned long addr = p->addr;
1572
1573 *(unsigned int *) addr = p->insn;
1574 wmb();
1575 __asm__ __volatile__("flush %0"
1576 : /* no outputs */
1577 : "r" (addr));
1578
1579 p++;
1580 }
1581}
1582
David S. Miller490384e2006-02-11 14:41:18 -08001583/* Don't mark as init, we give this to the Hypervisor. */
David S. Millerd1acb422007-03-16 17:20:28 -07001584#ifndef CONFIG_DEBUG_PAGEALLOC
1585#define NUM_KTSB_DESCR 2
1586#else
1587#define NUM_KTSB_DESCR 1
1588#endif
1589static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
David S. Miller490384e2006-02-11 14:41:18 -08001590extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
1591
David S. Miller9076d0e2011-08-05 00:53:57 -07001592static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
1593{
1594 pa >>= KTSB_PHYS_SHIFT;
1595
1596 while (start < end) {
1597 unsigned int *ia = (unsigned int *)(unsigned long)*start;
1598
1599 ia[0] = (ia[0] & ~0x3fffff) | (pa >> 10);
1600 __asm__ __volatile__("flush %0" : : "r" (ia));
1601
1602 ia[1] = (ia[1] & ~0x3ff) | (pa & 0x3ff);
1603 __asm__ __volatile__("flush %0" : : "r" (ia + 1));
1604
1605 start++;
1606 }
1607}
1608
1609static void ktsb_phys_patch(void)
1610{
1611 extern unsigned int __swapper_tsb_phys_patch;
1612 extern unsigned int __swapper_tsb_phys_patch_end;
David S. Miller9076d0e2011-08-05 00:53:57 -07001613 unsigned long ktsb_pa;
1614
1615 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1616 patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
1617 &__swapper_tsb_phys_patch_end, ktsb_pa);
1618#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Miller0785a8e2011-08-06 05:26:35 -07001619 {
1620 extern unsigned int __swapper_4m_tsb_phys_patch;
1621 extern unsigned int __swapper_4m_tsb_phys_patch_end;
David S. Miller9076d0e2011-08-05 00:53:57 -07001622 ktsb_pa = (kern_base +
1623 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1624 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
1625 &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
David S. Miller0785a8e2011-08-06 05:26:35 -07001626 }
David S. Miller9076d0e2011-08-05 00:53:57 -07001627#endif
1628}
1629
David S. Miller490384e2006-02-11 14:41:18 -08001630static void __init sun4v_ktsb_init(void)
1631{
1632 unsigned long ktsb_pa;
1633
David S. Millerd7744a02006-02-21 22:31:11 -08001634 /* First KTSB for PAGE_SIZE mappings. */
David S. Miller490384e2006-02-11 14:41:18 -08001635 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1636
1637 switch (PAGE_SIZE) {
1638 case 8 * 1024:
1639 default:
1640 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
1641 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
1642 break;
1643
1644 case 64 * 1024:
1645 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
1646 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
1647 break;
1648
1649 case 512 * 1024:
1650 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
1651 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
1652 break;
1653
1654 case 4 * 1024 * 1024:
1655 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
1656 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
1657 break;
Joe Perches6cb79b32011-06-03 14:45:23 +00001658 }
David S. Miller490384e2006-02-11 14:41:18 -08001659
David S. Miller3f19a842006-02-17 12:03:20 -08001660 ktsb_descr[0].assoc = 1;
David S. Miller490384e2006-02-11 14:41:18 -08001661 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
1662 ktsb_descr[0].ctx_idx = 0;
1663 ktsb_descr[0].tsb_base = ktsb_pa;
1664 ktsb_descr[0].resv = 0;
1665
David S. Millerd1acb422007-03-16 17:20:28 -07001666#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Miller4f93d212012-09-06 18:13:58 -07001667 /* Second KTSB for 4MB/256MB/2GB/16GB mappings. */
David S. Millerd7744a02006-02-21 22:31:11 -08001668 ktsb_pa = (kern_base +
1669 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1670
1671 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
David S. Millerc69ad0a2012-09-06 20:35:36 -07001672 ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB |
1673 HV_PGSZ_MASK_256MB |
1674 HV_PGSZ_MASK_2GB |
1675 HV_PGSZ_MASK_16GB) &
1676 cpu_pgsz_mask);
David S. Millerd7744a02006-02-21 22:31:11 -08001677 ktsb_descr[1].assoc = 1;
1678 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
1679 ktsb_descr[1].ctx_idx = 0;
1680 ktsb_descr[1].tsb_base = ktsb_pa;
1681 ktsb_descr[1].resv = 0;
David S. Millerd1acb422007-03-16 17:20:28 -07001682#endif
David S. Miller490384e2006-02-11 14:41:18 -08001683}
1684
1685void __cpuinit sun4v_ktsb_register(void)
1686{
David S. Miller7db35f32007-05-29 02:22:14 -07001687 unsigned long pa, ret;
David S. Miller490384e2006-02-11 14:41:18 -08001688
1689 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
1690
David S. Miller7db35f32007-05-29 02:22:14 -07001691 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
1692 if (ret != 0) {
1693 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
1694 "errors with %lx\n", pa, ret);
1695 prom_halt();
1696 }
David S. Miller490384e2006-02-11 14:41:18 -08001697}
1698
David S. Millerc69ad0a2012-09-06 20:35:36 -07001699static void __init sun4u_linear_pte_xor_finalize(void)
1700{
1701#ifndef CONFIG_DEBUG_PAGEALLOC
1702 /* This is where we would add Panther support for
1703 * 32MB and 256MB pages.
1704 */
1705#endif
1706}
1707
1708static void __init sun4v_linear_pte_xor_finalize(void)
1709{
1710#ifndef CONFIG_DEBUG_PAGEALLOC
1711 if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
1712 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
1713 0xfffff80000000000UL;
1714 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1715 _PAGE_P_4V | _PAGE_W_4V);
1716 } else {
1717 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
1718 }
1719
1720 if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
1721 kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
1722 0xfffff80000000000UL;
1723 kern_linear_pte_xor[2] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1724 _PAGE_P_4V | _PAGE_W_4V);
1725 } else {
1726 kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
1727 }
1728
1729 if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
1730 kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
1731 0xfffff80000000000UL;
1732 kern_linear_pte_xor[3] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1733 _PAGE_P_4V | _PAGE_W_4V);
1734 } else {
1735 kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
1736 }
1737#endif
1738}
1739
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740/* paging_init() sets up the page tables */
1741
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742static unsigned long last_valid_pfn;
David S. Miller56425302005-09-25 16:46:57 -07001743pgd_t swapper_pg_dir[2048];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744
David S. Millerc4bce902006-02-11 21:57:54 -08001745static void sun4u_pgprot_init(void);
1746static void sun4v_pgprot_init(void);
1747
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748void __init paging_init(void)
1749{
David S. Miller919ee672008-04-23 05:40:25 -07001750 unsigned long end_pfn, shift, phys_base;
David S. Miller0836a0e2005-09-28 21:38:08 -07001751 unsigned long real_end, i;
Paul Gortmakeraa6f0792012-05-09 20:44:29 -04001752 int node;
David S. Miller0836a0e2005-09-28 21:38:08 -07001753
David S. Miller22adb352007-05-26 01:14:43 -07001754 /* These build time checkes make sure that the dcache_dirty_cpu()
1755 * page->flags usage will work.
1756 *
1757 * When a page gets marked as dcache-dirty, we store the
1758 * cpu number starting at bit 32 in the page->flags. Also,
1759 * functions like clear_dcache_dirty_cpu use the cpu mask
1760 * in 13-bit signed-immediate instruction fields.
1761 */
Christoph Lameter9223b412008-04-28 02:12:48 -07001762
1763 /*
1764 * Page flags must not reach into upper 32 bits that are used
1765 * for the cpu number
1766 */
1767 BUILD_BUG_ON(NR_PAGEFLAGS > 32);
1768
1769 /*
1770 * The bit fields placed in the high range must not reach below
1771 * the 32 bit boundary. Otherwise we cannot place the cpu field
1772 * at the 32 bit boundary.
1773 */
David S. Miller22adb352007-05-26 01:14:43 -07001774 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
Christoph Lameter9223b412008-04-28 02:12:48 -07001775 ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
1776
David S. Miller22adb352007-05-26 01:14:43 -07001777 BUILD_BUG_ON(NR_CPUS > 4096);
1778
David S. Miller481295f2006-02-07 21:51:08 -08001779 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1780 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1781
David S. Millerd7744a02006-02-21 22:31:11 -08001782 /* Invalidate both kernel TSBs. */
David S. Miller8b234272006-02-17 18:01:02 -08001783 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
David S. Millerd1acb422007-03-16 17:20:28 -07001784#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Millerd7744a02006-02-21 22:31:11 -08001785 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
David S. Millerd1acb422007-03-16 17:20:28 -07001786#endif
David S. Miller8b234272006-02-17 18:01:02 -08001787
David S. Millerc4bce902006-02-11 21:57:54 -08001788 if (tlb_type == hypervisor)
1789 sun4v_pgprot_init();
1790 else
1791 sun4u_pgprot_init();
1792
David S. Millerd257d5d2006-02-06 23:44:37 -08001793 if (tlb_type == cheetah_plus ||
David S. Miller9076d0e2011-08-05 00:53:57 -07001794 tlb_type == hypervisor) {
David S. Miller517af332006-02-01 15:55:21 -08001795 tsb_phys_patch();
David S. Miller9076d0e2011-08-05 00:53:57 -07001796 ktsb_phys_patch();
1797 }
David S. Miller517af332006-02-01 15:55:21 -08001798
David S. Millerc69ad0a2012-09-06 20:35:36 -07001799 if (tlb_type == hypervisor)
David S. Millerd257d5d2006-02-06 23:44:37 -08001800 sun4v_patch_tlb_handlers();
1801
David S. Millera94a1722008-05-11 21:04:48 -07001802 /* Find available physical memory...
1803 *
1804 * Read it twice in order to work around a bug in openfirmware.
1805 * The call to grab this table itself can cause openfirmware to
1806 * allocate memory, which in turn can take away some space from
1807 * the list of available memory. Reading it twice makes sure
1808 * we really do get the final value.
1809 */
1810 read_obp_translations();
1811 read_obp_memory("reg", &pall[0], &pall_ents);
1812 read_obp_memory("available", &pavail[0], &pavail_ents);
David S. Miller13edad72005-09-29 17:58:26 -07001813 read_obp_memory("available", &pavail[0], &pavail_ents);
David S. Miller0836a0e2005-09-28 21:38:08 -07001814
1815 phys_base = 0xffffffffffffffffUL;
David S. Miller3b2a7e22008-02-13 18:13:20 -08001816 for (i = 0; i < pavail_ents; i++) {
David S. Miller13edad72005-09-29 17:58:26 -07001817 phys_base = min(phys_base, pavail[i].phys_addr);
Yinghai Lu95f72d12010-07-12 14:36:09 +10001818 memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
David S. Miller3b2a7e22008-02-13 18:13:20 -08001819 }
1820
Yinghai Lu95f72d12010-07-12 14:36:09 +10001821 memblock_reserve(kern_base, kern_size);
David S. Miller0836a0e2005-09-28 21:38:08 -07001822
David S. Miller4e82c9a2008-02-13 18:00:03 -08001823 find_ramdisk(phys_base);
1824
Yinghai Lu95f72d12010-07-12 14:36:09 +10001825 memblock_enforce_memory_limit(cmdline_memory_size);
David S. Miller25b0c652008-02-13 18:20:14 -08001826
Tejun Heo1aadc052011-12-08 10:22:08 -08001827 memblock_allow_resize();
Yinghai Lu95f72d12010-07-12 14:36:09 +10001828 memblock_dump_all();
David S. Miller3b2a7e22008-02-13 18:13:20 -08001829
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830 set_bit(0, mmu_context_bmap);
1831
David S. Miller2bdb3cb2005-09-22 01:08:57 -07001832 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
1833
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834 real_end = (unsigned long)_end;
David S. Miller64658742008-03-21 17:01:38 -07001835 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22);
1836 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
1837 num_kernel_image_mappings);
David S. Miller2bdb3cb2005-09-22 01:08:57 -07001838
1839 /* Set kernel pgd to upper alias so physical page computations
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 * work.
1841 */
1842 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
1843
David S. Miller56425302005-09-25 16:46:57 -07001844 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845
1846 /* Now can init the kernel/bad page tables. */
1847 pud_set(pud_offset(&swapper_pg_dir[0], 0),
David S. Miller56425302005-09-25 16:46:57 -07001848 swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849
David S. Millerc9c10832005-10-12 12:22:46 -07001850 inherit_prom_mappings();
David S. Miller5085b4a2005-09-22 00:45:41 -07001851
David S. Miller8f3614532007-12-13 06:13:38 -08001852 init_kpte_bitmap();
1853
David S. Millera8b900d2006-01-31 18:33:37 -08001854 /* Ok, we can use our TLB miss and window trap handlers safely. */
1855 setup_tba();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856
David S. Millerc9c10832005-10-12 12:22:46 -07001857 __flush_tlb_all();
David S. Miller9ad98c52005-10-05 15:12:00 -07001858
David S. Millerad072002008-02-13 19:21:51 -08001859 prom_build_devicetree();
David S. Millerb696fdc2009-05-26 22:37:25 -07001860 of_populate_present_mask();
David S. Millerb99c6eb2009-06-18 01:44:19 -07001861#ifndef CONFIG_SMP
1862 of_fill_in_cpu_data();
1863#endif
David S. Millerad072002008-02-13 19:21:51 -08001864
David S. Miller890db402009-04-01 03:13:15 -07001865 if (tlb_type == hypervisor) {
David S. Miller4a283332008-02-13 19:22:23 -08001866 sun4v_mdesc_init();
Stephen Rothwell6ac5c612009-06-15 03:06:18 -07001867 mdesc_populate_present_mask(cpu_all_mask);
David S. Millerb99c6eb2009-06-18 01:44:19 -07001868#ifndef CONFIG_SMP
1869 mdesc_fill_in_cpu_data(cpu_all_mask);
1870#endif
David S. Millerce33fdc2012-09-06 19:01:25 -07001871 mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask);
David S. Millerc69ad0a2012-09-06 20:35:36 -07001872
1873 sun4v_linear_pte_xor_finalize();
1874
1875 sun4v_ktsb_init();
1876 sun4v_ktsb_register();
David S. Millerce33fdc2012-09-06 19:01:25 -07001877 } else {
1878 unsigned long impl, ver;
1879
1880 cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
1881 HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
1882
1883 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
1884 impl = ((ver >> 32) & 0xffff);
1885 if (impl == PANTHER_IMPL)
1886 cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB |
1887 HV_PGSZ_MASK_256MB);
David S. Millerc69ad0a2012-09-06 20:35:36 -07001888
1889 sun4u_linear_pte_xor_finalize();
David S. Miller890db402009-04-01 03:13:15 -07001890 }
David S. Miller4a283332008-02-13 19:22:23 -08001891
David S. Millerc69ad0a2012-09-06 20:35:36 -07001892 /* Flush the TLBs and the 4M TSB so that the updated linear
1893 * pte XOR settings are realized for all mappings.
1894 */
1895 __flush_tlb_all();
1896#ifndef CONFIG_DEBUG_PAGEALLOC
1897 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
1898#endif
1899 __flush_tlb_all();
1900
David S. Miller2bdb3cb2005-09-22 01:08:57 -07001901 /* Setup bootmem... */
David S. Miller919ee672008-04-23 05:40:25 -07001902 last_valid_pfn = end_pfn = bootmem_init(phys_base);
David S. Millerd1112012006-03-08 02:16:07 -08001903
David S. Miller5ed56f12012-04-26 20:50:34 -07001904 /* Once the OF device tree and MDESC have been setup, we know
1905 * the list of possible cpus. Therefore we can allocate the
1906 * IRQ stacks.
1907 */
1908 for_each_possible_cpu(i) {
Paul Gortmakeraa6f0792012-05-09 20:44:29 -04001909 node = cpu_to_node(i);
David S. Miller5ed56f12012-04-26 20:50:34 -07001910
1911 softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
1912 THREAD_SIZE,
1913 THREAD_SIZE, 0);
1914 hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
1915 THREAD_SIZE,
1916 THREAD_SIZE, 0);
1917 }
1918
David S. Miller56425302005-09-25 16:46:57 -07001919 kernel_physical_mapping_init();
David S. Miller56425302005-09-25 16:46:57 -07001920
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 {
David S. Miller919ee672008-04-23 05:40:25 -07001922 unsigned long max_zone_pfns[MAX_NR_ZONES];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923
David S. Miller919ee672008-04-23 05:40:25 -07001924 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925
David S. Miller919ee672008-04-23 05:40:25 -07001926 max_zone_pfns[ZONE_NORMAL] = end_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927
David S. Miller919ee672008-04-23 05:40:25 -07001928 free_area_init_nodes(max_zone_pfns);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 }
1930
David S. Miller3c62a2d2008-02-17 23:22:50 -08001931 printk("Booting Linux...\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932}
1933
David S. Miller9a2ed5c2009-04-07 01:03:58 -07001934int __devinit page_in_phys_avail(unsigned long paddr)
David S. Miller919ee672008-04-23 05:40:25 -07001935{
1936 int i;
1937
1938 paddr &= PAGE_MASK;
1939
1940 for (i = 0; i < pavail_ents; i++) {
1941 unsigned long start, end;
1942
1943 start = pavail[i].phys_addr;
1944 end = start + pavail[i].reg_size;
1945
1946 if (paddr >= start && paddr < end)
1947 return 1;
1948 }
1949 if (paddr >= kern_base && paddr < (kern_base + kern_size))
1950 return 1;
1951#ifdef CONFIG_BLK_DEV_INITRD
1952 if (paddr >= __pa(initrd_start) &&
1953 paddr < __pa(PAGE_ALIGN(initrd_end)))
1954 return 1;
1955#endif
1956
1957 return 0;
1958}
1959
1960static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
1961static int pavail_rescan_ents __initdata;
1962
1963/* Certain OBP calls, such as fetching "available" properties, can
1964 * claim physical memory. So, along with initializing the valid
1965 * address bitmap, what we do here is refetch the physical available
1966 * memory list again, and make sure it provides at least as much
1967 * memory as 'pavail' does.
1968 */
David S. Millerd8ed1d42009-08-25 16:47:46 -07001969static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 int i;
1972
David S. Miller13edad72005-09-29 17:58:26 -07001973 read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974
David S. Miller13edad72005-09-29 17:58:26 -07001975 for (i = 0; i < pavail_ents; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976 unsigned long old_start, old_end;
1977
David S. Miller13edad72005-09-29 17:58:26 -07001978 old_start = pavail[i].phys_addr;
David S. Miller919ee672008-04-23 05:40:25 -07001979 old_end = old_start + pavail[i].reg_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 while (old_start < old_end) {
1981 int n;
1982
David S. Millerc2a5a462006-06-22 00:01:56 -07001983 for (n = 0; n < pavail_rescan_ents; n++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984 unsigned long new_start, new_end;
1985
David S. Miller13edad72005-09-29 17:58:26 -07001986 new_start = pavail_rescan[n].phys_addr;
1987 new_end = new_start +
1988 pavail_rescan[n].reg_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989
1990 if (new_start <= old_start &&
1991 new_end >= (old_start + PAGE_SIZE)) {
David S. Millerd8ed1d42009-08-25 16:47:46 -07001992 set_bit(old_start >> 22, bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 goto do_next_page;
1994 }
1995 }
David S. Miller919ee672008-04-23 05:40:25 -07001996
1997 prom_printf("mem_init: Lost memory in pavail\n");
1998 prom_printf("mem_init: OLD start[%lx] size[%lx]\n",
1999 pavail[i].phys_addr,
2000 pavail[i].reg_size);
2001 prom_printf("mem_init: NEW start[%lx] size[%lx]\n",
2002 pavail_rescan[i].phys_addr,
2003 pavail_rescan[i].reg_size);
2004 prom_printf("mem_init: Cannot continue, aborting.\n");
2005 prom_halt();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006
2007 do_next_page:
2008 old_start += PAGE_SIZE;
2009 }
2010 }
2011}
2012
David S. Millerd8ed1d42009-08-25 16:47:46 -07002013static void __init patch_tlb_miss_handler_bitmap(void)
2014{
2015 extern unsigned int valid_addr_bitmap_insn[];
2016 extern unsigned int valid_addr_bitmap_patch[];
2017
2018 valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1];
2019 mb();
2020 valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0];
2021 flushi(&valid_addr_bitmap_insn[0]);
2022}
2023
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024void __init mem_init(void)
2025{
2026 unsigned long codepages, datapages, initpages;
2027 unsigned long addr, last;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028
2029 addr = PAGE_OFFSET + kern_base;
2030 last = PAGE_ALIGN(kern_size) + addr;
2031 while (addr < last) {
2032 set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
2033 addr += PAGE_SIZE;
2034 }
2035
David S. Millerd8ed1d42009-08-25 16:47:46 -07002036 setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap);
2037 patch_tlb_miss_handler_bitmap();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
2040
David S. Miller919ee672008-04-23 05:40:25 -07002041#ifdef CONFIG_NEED_MULTIPLE_NODES
David S. Millerd8ed1d42009-08-25 16:47:46 -07002042 {
2043 int i;
2044 for_each_online_node(i) {
2045 if (NODE_DATA(i)->node_spanned_pages != 0) {
2046 totalram_pages +=
2047 free_all_bootmem_node(NODE_DATA(i));
2048 }
David S. Miller919ee672008-04-23 05:40:25 -07002049 }
David S. Miller625d6932012-04-25 13:13:43 -07002050 totalram_pages += free_low_memory_core_early(MAX_NUMNODES);
David S. Miller919ee672008-04-23 05:40:25 -07002051 }
2052#else
2053 totalram_pages = free_all_bootmem();
2054#endif
2055
David S. Millerf1cfdb52007-03-15 22:52:18 -07002056 /* We subtract one to account for the mem_map_zero page
2057 * allocated below.
2058 */
David S. Miller919ee672008-04-23 05:40:25 -07002059 totalram_pages -= 1;
2060 num_physpages = totalram_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061
2062 /*
2063 * Set up the zero page, mark it reserved, so that page count
2064 * is not manipulated when freeing the page from user ptes.
2065 */
2066 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
2067 if (mem_map_zero == NULL) {
2068 prom_printf("paging_init: Cannot alloc zero page.\n");
2069 prom_halt();
2070 }
2071 SetPageReserved(mem_map_zero);
2072
2073 codepages = (((unsigned long) _etext) - ((unsigned long) _start));
2074 codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
2075 datapages = (((unsigned long) _edata) - ((unsigned long) _etext));
2076 datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
2077 initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
2078 initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
2079
Christoph Lameter96177292007-02-10 01:43:03 -08002080 printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 nr_free_pages() << (PAGE_SHIFT-10),
2082 codepages << (PAGE_SHIFT-10),
2083 datapages << (PAGE_SHIFT-10),
2084 initpages << (PAGE_SHIFT-10),
2085 PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
2086
2087 if (tlb_type == cheetah || tlb_type == cheetah_plus)
2088 cheetah_ecache_flush_init();
2089}
2090
David S. Miller898cf0e2005-09-23 11:59:44 -07002091void free_initmem(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092{
2093 unsigned long addr, initend;
David S. Millerf2b60792008-08-14 01:45:41 -07002094 int do_free = 1;
2095
2096 /* If the physical memory maps were trimmed by kernel command
2097 * line options, don't even try freeing this initmem stuff up.
2098 * The kernel image could have been in the trimmed out region
2099 * and if so the freeing below will free invalid page structs.
2100 */
2101 if (cmdline_memory_size)
2102 do_free = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103
2104 /*
2105 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2106 */
2107 addr = PAGE_ALIGN((unsigned long)(__init_begin));
2108 initend = (unsigned long)(__init_end) & PAGE_MASK;
2109 for (; addr < initend; addr += PAGE_SIZE) {
2110 unsigned long page;
2111 struct page *p;
2112
2113 page = (addr +
2114 ((unsigned long) __va(kern_base)) -
2115 ((unsigned long) KERNBASE));
Randy Dunlapc9cf5522006-06-27 02:53:52 -07002116 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117
David S. Millerf2b60792008-08-14 01:45:41 -07002118 if (do_free) {
2119 p = virt_to_page(page);
2120
2121 ClearPageReserved(p);
2122 init_page_count(p);
2123 __free_page(p);
2124 num_physpages++;
2125 totalram_pages++;
2126 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127 }
2128}
2129
2130#ifdef CONFIG_BLK_DEV_INITRD
2131void free_initrd_mem(unsigned long start, unsigned long end)
2132{
2133 if (start < end)
2134 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
2135 for (; start < end; start += PAGE_SIZE) {
2136 struct page *p = virt_to_page(start);
2137
2138 ClearPageReserved(p);
Nick Piggin7835e982006-03-22 00:08:40 -08002139 init_page_count(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 __free_page(p);
2141 num_physpages++;
2142 totalram_pages++;
2143 }
2144}
2145#endif
David S. Millerc4bce902006-02-11 21:57:54 -08002146
David S. Millerc4bce902006-02-11 21:57:54 -08002147#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2148#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2149#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2150#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2151#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2152#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2153
2154pgprot_t PAGE_KERNEL __read_mostly;
2155EXPORT_SYMBOL(PAGE_KERNEL);
2156
2157pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
2158pgprot_t PAGE_COPY __read_mostly;
David S. Miller0f159522006-02-18 12:43:16 -08002159
2160pgprot_t PAGE_SHARED __read_mostly;
2161EXPORT_SYMBOL(PAGE_SHARED);
2162
David S. Millerc4bce902006-02-11 21:57:54 -08002163unsigned long pg_iobits __read_mostly;
2164
2165unsigned long _PAGE_IE __read_mostly;
David S. Miller987c74f2006-06-25 01:34:43 -07002166EXPORT_SYMBOL(_PAGE_IE);
David S. Millerb2bef442006-02-23 01:55:55 -08002167
David S. Millerc4bce902006-02-11 21:57:54 -08002168unsigned long _PAGE_E __read_mostly;
David S. Millerb2bef442006-02-23 01:55:55 -08002169EXPORT_SYMBOL(_PAGE_E);
2170
David S. Millerc4bce902006-02-11 21:57:54 -08002171unsigned long _PAGE_CACHE __read_mostly;
David S. Millerb2bef442006-02-23 01:55:55 -08002172EXPORT_SYMBOL(_PAGE_CACHE);
David S. Millerc4bce902006-02-11 21:57:54 -08002173
David Miller46644c22007-10-16 01:24:16 -07002174#ifdef CONFIG_SPARSEMEM_VMEMMAP
David Miller46644c22007-10-16 01:24:16 -07002175unsigned long vmemmap_table[VMEMMAP_SIZE];
2176
David S. Miller2856cc22012-08-15 00:37:29 -07002177static long __meminitdata addr_start, addr_end;
2178static int __meminitdata node_start;
2179
David Miller46644c22007-10-16 01:24:16 -07002180int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
2181{
2182 unsigned long vstart = (unsigned long) start;
2183 unsigned long vend = (unsigned long) (start + nr);
2184 unsigned long phys_start = (vstart - VMEMMAP_BASE);
2185 unsigned long phys_end = (vend - VMEMMAP_BASE);
2186 unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
2187 unsigned long end = VMEMMAP_ALIGN(phys_end);
2188 unsigned long pte_base;
2189
2190 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2191 _PAGE_CP_4U | _PAGE_CV_4U |
2192 _PAGE_P_4U | _PAGE_W_4U);
2193 if (tlb_type == hypervisor)
2194 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2195 _PAGE_CP_4V | _PAGE_CV_4V |
2196 _PAGE_P_4V | _PAGE_W_4V);
2197
2198 for (; addr < end; addr += VMEMMAP_CHUNK) {
2199 unsigned long *vmem_pp =
2200 vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT);
2201 void *block;
2202
2203 if (!(*vmem_pp & _PAGE_VALID)) {
2204 block = vmemmap_alloc_block(1UL << 22, node);
2205 if (!block)
2206 return -ENOMEM;
2207
2208 *vmem_pp = pte_base | __pa(block);
2209
David S. Miller2856cc22012-08-15 00:37:29 -07002210 /* check to see if we have contiguous blocks */
2211 if (addr_end != addr || node_start != node) {
2212 if (addr_start)
2213 printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
2214 addr_start, addr_end-1, node_start);
2215 addr_start = addr;
2216 node_start = node;
2217 }
2218 addr_end = addr + VMEMMAP_CHUNK;
David Miller46644c22007-10-16 01:24:16 -07002219 }
2220 }
2221 return 0;
2222}
David S. Miller2856cc22012-08-15 00:37:29 -07002223
2224void __meminit vmemmap_populate_print_last(void)
2225{
2226 if (addr_start) {
2227 printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
2228 addr_start, addr_end-1, node_start);
2229 addr_start = 0;
2230 addr_end = 0;
2231 node_start = 0;
2232 }
2233}
David Miller46644c22007-10-16 01:24:16 -07002234#endif /* CONFIG_SPARSEMEM_VMEMMAP */
2235
David S. Millerc4bce902006-02-11 21:57:54 -08002236static void prot_init_common(unsigned long page_none,
2237 unsigned long page_shared,
2238 unsigned long page_copy,
2239 unsigned long page_readonly,
2240 unsigned long page_exec_bit)
2241{
2242 PAGE_COPY = __pgprot(page_copy);
David S. Miller0f159522006-02-18 12:43:16 -08002243 PAGE_SHARED = __pgprot(page_shared);
David S. Millerc4bce902006-02-11 21:57:54 -08002244
2245 protection_map[0x0] = __pgprot(page_none);
2246 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
2247 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
2248 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
2249 protection_map[0x4] = __pgprot(page_readonly);
2250 protection_map[0x5] = __pgprot(page_readonly);
2251 protection_map[0x6] = __pgprot(page_copy);
2252 protection_map[0x7] = __pgprot(page_copy);
2253 protection_map[0x8] = __pgprot(page_none);
2254 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
2255 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
2256 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
2257 protection_map[0xc] = __pgprot(page_readonly);
2258 protection_map[0xd] = __pgprot(page_readonly);
2259 protection_map[0xe] = __pgprot(page_shared);
2260 protection_map[0xf] = __pgprot(page_shared);
2261}
2262
2263static void __init sun4u_pgprot_init(void)
2264{
2265 unsigned long page_none, page_shared, page_copy, page_readonly;
2266 unsigned long page_exec_bit;
David S. Miller4f93d212012-09-06 18:13:58 -07002267 int i;
David S. Millerc4bce902006-02-11 21:57:54 -08002268
2269 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2270 _PAGE_CACHE_4U | _PAGE_P_4U |
2271 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2272 _PAGE_EXEC_4U);
2273 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2274 _PAGE_CACHE_4U | _PAGE_P_4U |
2275 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2276 _PAGE_EXEC_4U | _PAGE_L_4U);
David S. Millerc4bce902006-02-11 21:57:54 -08002277
2278 _PAGE_IE = _PAGE_IE_4U;
2279 _PAGE_E = _PAGE_E_4U;
2280 _PAGE_CACHE = _PAGE_CACHE_4U;
2281
2282 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
2283 __ACCESS_BITS_4U | _PAGE_E_4U);
2284
David S. Millerd1acb422007-03-16 17:20:28 -07002285#ifdef CONFIG_DEBUG_PAGEALLOC
David Miller15b93502012-10-08 16:34:19 -07002286 kern_linear_pte_xor[0] = _PAGE_VALID ^ 0xfffff80000000000UL;
David S. Millerd1acb422007-03-16 17:20:28 -07002287#else
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002288 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
David S. Milleraf1ee562008-09-12 00:19:21 -07002289 0xfffff80000000000UL;
David S. Millerd1acb422007-03-16 17:20:28 -07002290#endif
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002291 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2292 _PAGE_P_4U | _PAGE_W_4U);
2293
David S. Miller4f93d212012-09-06 18:13:58 -07002294 for (i = 1; i < 4; i++)
2295 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
David S. Millerc4bce902006-02-11 21:57:54 -08002296
David S. Millerc4bce902006-02-11 21:57:54 -08002297 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
2298 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
2299 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
2300
2301
2302 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
2303 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2304 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
2305 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2306 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2307 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2308 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2309
2310 page_exec_bit = _PAGE_EXEC_4U;
2311
2312 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2313 page_exec_bit);
2314}
2315
2316static void __init sun4v_pgprot_init(void)
2317{
2318 unsigned long page_none, page_shared, page_copy, page_readonly;
2319 unsigned long page_exec_bit;
David S. Miller4f93d212012-09-06 18:13:58 -07002320 int i;
David S. Millerc4bce902006-02-11 21:57:54 -08002321
2322 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
2323 _PAGE_CACHE_4V | _PAGE_P_4V |
2324 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
2325 _PAGE_EXEC_4V);
2326 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
David S. Millerc4bce902006-02-11 21:57:54 -08002327
2328 _PAGE_IE = _PAGE_IE_4V;
2329 _PAGE_E = _PAGE_E_4V;
2330 _PAGE_CACHE = _PAGE_CACHE_4V;
2331
David S. Millerd1acb422007-03-16 17:20:28 -07002332#ifdef CONFIG_DEBUG_PAGEALLOC
David Miller15b93502012-10-08 16:34:19 -07002333 kern_linear_pte_xor[0] = _PAGE_VALID ^ 0xfffff80000000000UL;
David S. Millerd1acb422007-03-16 17:20:28 -07002334#else
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002335 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
David S. Milleraf1ee562008-09-12 00:19:21 -07002336 0xfffff80000000000UL;
David S. Millerd1acb422007-03-16 17:20:28 -07002337#endif
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002338 kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
2339 _PAGE_P_4V | _PAGE_W_4V);
2340
David S. Millerc69ad0a2012-09-06 20:35:36 -07002341 for (i = 1; i < 4; i++)
2342 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
David S. Miller4f93d212012-09-06 18:13:58 -07002343
David S. Millerc4bce902006-02-11 21:57:54 -08002344 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
2345 __ACCESS_BITS_4V | _PAGE_E_4V);
2346
David S. Millerc4bce902006-02-11 21:57:54 -08002347 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
2348 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
2349 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2350 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2351
2352 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
2353 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2354 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
2355 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2356 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2357 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2358 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2359
2360 page_exec_bit = _PAGE_EXEC_4V;
2361
2362 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2363 page_exec_bit);
2364}
2365
2366unsigned long pte_sz_bits(unsigned long sz)
2367{
2368 if (tlb_type == hypervisor) {
2369 switch (sz) {
2370 case 8 * 1024:
2371 default:
2372 return _PAGE_SZ8K_4V;
2373 case 64 * 1024:
2374 return _PAGE_SZ64K_4V;
2375 case 512 * 1024:
2376 return _PAGE_SZ512K_4V;
2377 case 4 * 1024 * 1024:
2378 return _PAGE_SZ4MB_4V;
Joe Perches6cb79b32011-06-03 14:45:23 +00002379 }
David S. Millerc4bce902006-02-11 21:57:54 -08002380 } else {
2381 switch (sz) {
2382 case 8 * 1024:
2383 default:
2384 return _PAGE_SZ8K_4U;
2385 case 64 * 1024:
2386 return _PAGE_SZ64K_4U;
2387 case 512 * 1024:
2388 return _PAGE_SZ512K_4U;
2389 case 4 * 1024 * 1024:
2390 return _PAGE_SZ4MB_4U;
Joe Perches6cb79b32011-06-03 14:45:23 +00002391 }
David S. Millerc4bce902006-02-11 21:57:54 -08002392 }
2393}
2394
2395pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2396{
2397 pte_t pte;
David S. Millercf627152006-02-12 21:10:07 -08002398
2399 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
David S. Millerc4bce902006-02-11 21:57:54 -08002400 pte_val(pte) |= (((unsigned long)space) << 32);
2401 pte_val(pte) |= pte_sz_bits(page_size);
David S. Millercf627152006-02-12 21:10:07 -08002402
David S. Millerc4bce902006-02-11 21:57:54 -08002403 return pte;
2404}
2405
David S. Millerc4bce902006-02-11 21:57:54 -08002406static unsigned long kern_large_tte(unsigned long paddr)
2407{
2408 unsigned long val;
2409
2410 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2411 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
2412 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2413 if (tlb_type == hypervisor)
2414 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2415 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
2416 _PAGE_EXEC_4V | _PAGE_W_4V);
2417
2418 return val | paddr;
2419}
2420
David S. Millerc4bce902006-02-11 21:57:54 -08002421/* If not locked, zap it. */
2422void __flush_tlb_all(void)
2423{
2424 unsigned long pstate;
2425 int i;
2426
2427 __asm__ __volatile__("flushw\n\t"
2428 "rdpr %%pstate, %0\n\t"
2429 "wrpr %0, %1, %%pstate"
2430 : "=r" (pstate)
2431 : "i" (PSTATE_IE));
David S. Miller8f3614532007-12-13 06:13:38 -08002432 if (tlb_type == hypervisor) {
2433 sun4v_mmu_demap_all();
2434 } else if (tlb_type == spitfire) {
David S. Millerc4bce902006-02-11 21:57:54 -08002435 for (i = 0; i < 64; i++) {
2436 /* Spitfire Errata #32 workaround */
2437 /* NOTE: Always runs on spitfire, so no
2438 * cheetah+ page size encodings.
2439 */
2440 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2441 "flush %%g6"
2442 : /* No outputs */
2443 : "r" (0),
2444 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2445
2446 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
2447 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2448 "membar #Sync"
2449 : /* no outputs */
2450 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
2451 spitfire_put_dtlb_data(i, 0x0UL);
2452 }
2453
2454 /* Spitfire Errata #32 workaround */
2455 /* NOTE: Always runs on spitfire, so no
2456 * cheetah+ page size encodings.
2457 */
2458 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2459 "flush %%g6"
2460 : /* No outputs */
2461 : "r" (0),
2462 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2463
2464 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2465 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2466 "membar #Sync"
2467 : /* no outputs */
2468 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
2469 spitfire_put_itlb_data(i, 0x0UL);
2470 }
2471 }
2472 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
2473 cheetah_flush_dtlb_all();
2474 cheetah_flush_itlb_all();
2475 }
2476 __asm__ __volatile__("wrpr %0, 0, %%pstate"
2477 : : "r" (pstate));
2478}
David Millerc460bec2012-10-08 16:34:22 -07002479
2480static pte_t *get_from_cache(struct mm_struct *mm)
2481{
2482 struct page *page;
2483 pte_t *ret;
2484
2485 spin_lock(&mm->page_table_lock);
2486 page = mm->context.pgtable_page;
2487 ret = NULL;
2488 if (page) {
2489 void *p = page_address(page);
2490
2491 mm->context.pgtable_page = NULL;
2492
2493 ret = (pte_t *) (p + (PAGE_SIZE / 2));
2494 }
2495 spin_unlock(&mm->page_table_lock);
2496
2497 return ret;
2498}
2499
2500static struct page *__alloc_for_cache(struct mm_struct *mm)
2501{
2502 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
2503 __GFP_REPEAT | __GFP_ZERO);
2504
2505 if (page) {
2506 spin_lock(&mm->page_table_lock);
2507 if (!mm->context.pgtable_page) {
2508 atomic_set(&page->_count, 2);
2509 mm->context.pgtable_page = page;
2510 }
2511 spin_unlock(&mm->page_table_lock);
2512 }
2513 return page;
2514}
2515
2516pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
2517 unsigned long address)
2518{
2519 struct page *page;
2520 pte_t *pte;
2521
2522 pte = get_from_cache(mm);
2523 if (pte)
2524 return pte;
2525
2526 page = __alloc_for_cache(mm);
2527 if (page)
2528 pte = (pte_t *) page_address(page);
2529
2530 return pte;
2531}
2532
2533pgtable_t pte_alloc_one(struct mm_struct *mm,
2534 unsigned long address)
2535{
2536 struct page *page;
2537 pte_t *pte;
2538
2539 pte = get_from_cache(mm);
2540 if (pte)
2541 return pte;
2542
2543 page = __alloc_for_cache(mm);
2544 if (page) {
2545 pgtable_page_ctor(page);
2546 pte = (pte_t *) page_address(page);
2547 }
2548
2549 return pte;
2550}
2551
2552void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
2553{
2554 struct page *page = virt_to_page(pte);
2555 if (put_page_testzero(page))
2556 free_hot_cold_page(page, 0);
2557}
2558
2559static void __pte_free(pgtable_t pte)
2560{
2561 struct page *page = virt_to_page(pte);
2562 if (put_page_testzero(page)) {
2563 pgtable_page_dtor(page);
2564 free_hot_cold_page(page, 0);
2565 }
2566}
2567
2568void pte_free(struct mm_struct *mm, pgtable_t pte)
2569{
2570 __pte_free(pte);
2571}
2572
2573void pgtable_free(void *table, bool is_page)
2574{
2575 if (is_page)
2576 __pte_free(table);
2577 else
2578 kmem_cache_free(pgtable_cache, table);
2579}
David Miller9e695d22012-10-08 16:34:29 -07002580
2581#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2582static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot, bool for_modify)
2583{
2584 if (pgprot_val(pgprot) & _PAGE_VALID)
2585 pmd_val(pmd) |= PMD_HUGE_PRESENT;
2586 if (tlb_type == hypervisor) {
2587 if (pgprot_val(pgprot) & _PAGE_WRITE_4V)
2588 pmd_val(pmd) |= PMD_HUGE_WRITE;
2589 if (pgprot_val(pgprot) & _PAGE_EXEC_4V)
2590 pmd_val(pmd) |= PMD_HUGE_EXEC;
2591
2592 if (!for_modify) {
2593 if (pgprot_val(pgprot) & _PAGE_ACCESSED_4V)
2594 pmd_val(pmd) |= PMD_HUGE_ACCESSED;
2595 if (pgprot_val(pgprot) & _PAGE_MODIFIED_4V)
2596 pmd_val(pmd) |= PMD_HUGE_DIRTY;
2597 }
2598 } else {
2599 if (pgprot_val(pgprot) & _PAGE_WRITE_4U)
2600 pmd_val(pmd) |= PMD_HUGE_WRITE;
2601 if (pgprot_val(pgprot) & _PAGE_EXEC_4U)
2602 pmd_val(pmd) |= PMD_HUGE_EXEC;
2603
2604 if (!for_modify) {
2605 if (pgprot_val(pgprot) & _PAGE_ACCESSED_4U)
2606 pmd_val(pmd) |= PMD_HUGE_ACCESSED;
2607 if (pgprot_val(pgprot) & _PAGE_MODIFIED_4U)
2608 pmd_val(pmd) |= PMD_HUGE_DIRTY;
2609 }
2610 }
2611
2612 return pmd;
2613}
2614
2615pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
2616{
2617 pmd_t pmd;
2618
2619 pmd_val(pmd) = (page_nr << ((PAGE_SHIFT - PMD_PADDR_SHIFT)));
2620 pmd_val(pmd) |= PMD_ISHUGE;
2621 pmd = pmd_set_protbits(pmd, pgprot, false);
2622 return pmd;
2623}
2624
2625pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
2626{
2627 pmd_val(pmd) &= ~(PMD_HUGE_PRESENT |
2628 PMD_HUGE_WRITE |
2629 PMD_HUGE_EXEC);
2630 pmd = pmd_set_protbits(pmd, newprot, true);
2631 return pmd;
2632}
2633
2634pgprot_t pmd_pgprot(pmd_t entry)
2635{
2636 unsigned long pte = 0;
2637
2638 if (pmd_val(entry) & PMD_HUGE_PRESENT)
2639 pte |= _PAGE_VALID;
2640
2641 if (tlb_type == hypervisor) {
2642 if (pmd_val(entry) & PMD_HUGE_PRESENT)
2643 pte |= _PAGE_PRESENT_4V;
2644 if (pmd_val(entry) & PMD_HUGE_EXEC)
2645 pte |= _PAGE_EXEC_4V;
2646 if (pmd_val(entry) & PMD_HUGE_WRITE)
2647 pte |= _PAGE_W_4V;
2648 if (pmd_val(entry) & PMD_HUGE_ACCESSED)
2649 pte |= _PAGE_ACCESSED_4V;
2650 if (pmd_val(entry) & PMD_HUGE_DIRTY)
2651 pte |= _PAGE_MODIFIED_4V;
2652 pte |= _PAGE_CP_4V|_PAGE_CV_4V;
2653 } else {
2654 if (pmd_val(entry) & PMD_HUGE_PRESENT)
2655 pte |= _PAGE_PRESENT_4U;
2656 if (pmd_val(entry) & PMD_HUGE_EXEC)
2657 pte |= _PAGE_EXEC_4U;
2658 if (pmd_val(entry) & PMD_HUGE_WRITE)
2659 pte |= _PAGE_W_4U;
2660 if (pmd_val(entry) & PMD_HUGE_ACCESSED)
2661 pte |= _PAGE_ACCESSED_4U;
2662 if (pmd_val(entry) & PMD_HUGE_DIRTY)
2663 pte |= _PAGE_MODIFIED_4U;
2664 pte |= _PAGE_CP_4U|_PAGE_CV_4U;
2665 }
2666
2667 return __pgprot(pte);
2668}
2669
2670void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
2671 pmd_t *pmd)
2672{
2673 unsigned long pte, flags;
2674 struct mm_struct *mm;
2675 pmd_t entry = *pmd;
2676 pgprot_t prot;
2677
2678 if (!pmd_large(entry) || !pmd_young(entry))
2679 return;
2680
2681 pte = (pmd_val(entry) & ~PMD_HUGE_PROTBITS);
2682 pte <<= PMD_PADDR_SHIFT;
2683 pte |= _PAGE_VALID;
2684
2685 prot = pmd_pgprot(entry);
2686
2687 if (tlb_type == hypervisor)
2688 pgprot_val(prot) |= _PAGE_SZHUGE_4V;
2689 else
2690 pgprot_val(prot) |= _PAGE_SZHUGE_4U;
2691
2692 pte |= pgprot_val(prot);
2693
2694 mm = vma->vm_mm;
2695
2696 spin_lock_irqsave(&mm->context.lock, flags);
2697
2698 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
2699 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT,
2700 addr, pte);
2701
2702 spin_unlock_irqrestore(&mm->context.lock, flags);
2703}
2704#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2705
2706#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
2707static void context_reload(void *__data)
2708{
2709 struct mm_struct *mm = __data;
2710
2711 if (mm == current->mm)
2712 load_secondary_context(mm);
2713}
2714
2715void hugetlb_setup(struct mm_struct *mm)
2716{
2717 struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE];
2718
2719 if (likely(tp->tsb != NULL))
2720 return;
2721
2722 tsb_grow(mm, MM_TSB_HUGE, 0);
2723 tsb_context_switch(mm);
2724 smp_tsb_sync(mm);
2725
2726 /* On UltraSPARC-III+ and later, configure the second half of
2727 * the Data-TLB for huge pages.
2728 */
2729 if (tlb_type == cheetah_plus) {
2730 unsigned long ctx;
2731
2732 spin_lock(&ctx_alloc_lock);
2733 ctx = mm->context.sparc64_ctx_val;
2734 ctx &= ~CTX_PGSZ_MASK;
2735 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
2736 ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
2737
2738 if (ctx != mm->context.sparc64_ctx_val) {
2739 /* When changing the page size fields, we
2740 * must perform a context flush so that no
2741 * stale entries match. This flush must
2742 * occur with the original context register
2743 * settings.
2744 */
2745 do_flush_tlb_mm(mm);
2746
2747 /* Reload the context register of all processors
2748 * also executing in this address space.
2749 */
2750 mm->context.sparc64_ctx_val = ctx;
2751 on_each_cpu(context_reload, mm, 0);
2752 }
2753 spin_unlock(&ctx_alloc_lock);
2754 }
2755}
2756#endif