blob: db5ddde0b335d03acedab712158bc8d9456b9b9e [file] [log] [blame]
Adrian Bunkb00dc832008-05-19 16:52:27 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * arch/sparc64/mm/init.c
3 *
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
David S. Millerc4bce902006-02-11 21:57:54 -08008#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/string.h>
12#include <linux/init.h>
13#include <linux/bootmem.h>
14#include <linux/mm.h>
15#include <linux/hugetlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/initrd.h>
17#include <linux/swap.h>
18#include <linux/pagemap.h>
Randy Dunlapc9cf5522006-06-27 02:53:52 -070019#include <linux/poison.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/fs.h>
21#include <linux/seq_file.h>
Prasanna S Panchamukhi05e14cb2005-09-06 15:19:30 -070022#include <linux/kprobes.h>
David S. Miller1ac4f5e2005-09-21 21:49:32 -070023#include <linux/cache.h>
David S. Miller13edad72005-09-29 17:58:26 -070024#include <linux/sort.h>
David S. Miller5cbc3072007-05-25 15:49:59 -070025#include <linux/percpu.h>
Yinghai Lu95f72d12010-07-12 14:36:09 +100026#include <linux/memblock.h>
David S. Miller919ee672008-04-23 05:40:25 -070027#include <linux/mmzone.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090028#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30#include <asm/head.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <asm/page.h>
32#include <asm/pgalloc.h>
33#include <asm/pgtable.h>
34#include <asm/oplib.h>
35#include <asm/iommu.h>
36#include <asm/io.h>
37#include <asm/uaccess.h>
38#include <asm/mmu_context.h>
39#include <asm/tlbflush.h>
40#include <asm/dma.h>
41#include <asm/starfire.h>
42#include <asm/tlb.h>
43#include <asm/spitfire.h>
44#include <asm/sections.h>
David S. Miller517af332006-02-01 15:55:21 -080045#include <asm/tsb.h>
David S. Miller481295f2006-02-07 21:51:08 -080046#include <asm/hypervisor.h>
David S. Miller372b07b2006-06-21 15:35:28 -070047#include <asm/prom.h>
David S. Miller5cbc3072007-05-25 15:49:59 -070048#include <asm/mdesc.h>
David S. Miller3d5ae6b2008-03-25 21:51:40 -070049#include <asm/cpudata.h>
Sam Ravnborg59dec132014-05-16 23:26:07 +020050#include <asm/setup.h>
David S. Miller4f70f7a2008-08-12 18:33:56 -070051#include <asm/irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Sam Ravnborg27137e52008-11-16 20:08:45 -080053#include "init_64.h"
David S. Miller9cc3a1a2006-02-21 20:51:13 -080054
David S. Miller4f93d212012-09-06 18:13:58 -070055unsigned long kern_linear_pte_xor[4] __read_mostly;
David S. Miller9cc3a1a2006-02-21 20:51:13 -080056
David S. Miller4f93d212012-09-06 18:13:58 -070057/* A bitmap, two bits for every 256MB of physical memory. These two
58 * bits determine what page size we use for kernel linear
59 * translations. They form an index into kern_linear_pte_xor[]. The
60 * value in the indexed slot is XOR'd with the TLB miss virtual
61 * address to form the resulting TTE. The mapping is:
62 *
63 * 0 ==> 4MB
64 * 1 ==> 256MB
65 * 2 ==> 2GB
66 * 3 ==> 16GB
67 *
68 * All sun4v chips support 256MB pages. Only SPARC-T4 and later
69 * support 2GB pages, and hopefully future cpus will support the 16GB
70 * pages as well. For slots 2 and 3, we encode a 256MB TTE xor there
71 * if these larger page sizes are not supported by the cpu.
72 *
73 * It would be nice to determine this from the machine description
74 * 'cpu' properties, but we need to have this table setup before the
75 * MDESC is initialized.
David S. Miller9cc3a1a2006-02-21 20:51:13 -080076 */
77unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
78
David S. Millerd1acb422007-03-16 17:20:28 -070079#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Miller4f93d212012-09-06 18:13:58 -070080/* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
81 * Space is allocated for this right after the trap table in
82 * arch/sparc64/kernel/head.S
David S. Miller2d9e2762007-05-29 01:58:31 -070083 */
84extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
David S. Millerd1acb422007-03-16 17:20:28 -070085#endif
David S. Millerd7744a02006-02-21 22:31:11 -080086
David S. Millerce33fdc2012-09-06 19:01:25 -070087static unsigned long cpu_pgsz_mask;
88
David S. Miller13edad72005-09-29 17:58:26 -070089#define MAX_BANKS 32
David S. Miller10147572005-09-28 21:46:43 -070090
Greg Kroah-Hartman7c9503b2012-12-21 14:03:26 -080091static struct linux_prom64_registers pavail[MAX_BANKS];
92static int pavail_ents;
David S. Miller10147572005-09-28 21:46:43 -070093
David S. Miller13edad72005-09-29 17:58:26 -070094static int cmp_p64(const void *a, const void *b)
95{
96 const struct linux_prom64_registers *x = a, *y = b;
97
98 if (x->phys_addr > y->phys_addr)
99 return 1;
100 if (x->phys_addr < y->phys_addr)
101 return -1;
102 return 0;
103}
104
105static void __init read_obp_memory(const char *property,
106 struct linux_prom64_registers *regs,
107 int *num_ents)
108{
Andres Salomon8d125562010-10-08 14:18:11 -0700109 phandle node = prom_finddevice("/memory");
David S. Miller13edad72005-09-29 17:58:26 -0700110 int prop_size = prom_getproplen(node, property);
111 int ents, ret, i;
112
113 ents = prop_size / sizeof(struct linux_prom64_registers);
114 if (ents > MAX_BANKS) {
115 prom_printf("The machine has more %s property entries than "
116 "this kernel can support (%d).\n",
117 property, MAX_BANKS);
118 prom_halt();
119 }
120
121 ret = prom_getproperty(node, property, (char *) regs, prop_size);
122 if (ret == -1) {
Akinobu Mita5da444a2012-09-29 03:14:49 +0000123 prom_printf("Couldn't get %s property from /memory.\n",
124 property);
David S. Miller13edad72005-09-29 17:58:26 -0700125 prom_halt();
126 }
127
David S. Miller13edad72005-09-29 17:58:26 -0700128 /* Sanitize what we got from the firmware, by page aligning
129 * everything.
130 */
131 for (i = 0; i < ents; i++) {
132 unsigned long base, size;
133
134 base = regs[i].phys_addr;
135 size = regs[i].reg_size;
136
137 size &= PAGE_MASK;
138 if (base & ~PAGE_MASK) {
139 unsigned long new_base = PAGE_ALIGN(base);
140
141 size -= new_base - base;
142 if ((long) size < 0L)
143 size = 0UL;
144 base = new_base;
145 }
David S. Miller0015d3d2007-03-15 00:06:34 -0700146 if (size == 0UL) {
147 /* If it is empty, simply get rid of it.
148 * This simplifies the logic of the other
149 * functions that process these arrays.
150 */
151 memmove(&regs[i], &regs[i + 1],
152 (ents - i - 1) * sizeof(regs[0]));
153 i--;
154 ents--;
155 continue;
156 }
David S. Miller13edad72005-09-29 17:58:26 -0700157 regs[i].phys_addr = base;
158 regs[i].reg_size = size;
159 }
David S. Miller486ad102006-06-22 00:00:00 -0700160
David S. Miller486ad102006-06-22 00:00:00 -0700161 *num_ents = ents;
162
David S. Millerc9c10832005-10-12 12:22:46 -0700163 sort(regs, ents, sizeof(struct linux_prom64_registers),
David S. Miller13edad72005-09-29 17:58:26 -0700164 cmp_p64, NULL);
165}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
David S. Millerd8ed1d42009-08-25 16:47:46 -0700167unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES /
168 sizeof(unsigned long)];
Sam Ravnborg917c3662009-01-08 16:58:20 -0800169EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
David S. Millerd1112012006-03-08 02:16:07 -0800171/* Kernel physical address base and size in bytes. */
David S. Miller1ac4f5e2005-09-21 21:49:32 -0700172unsigned long kern_base __read_mostly;
173unsigned long kern_size __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175/* Initial ramdisk setup */
176extern unsigned long sparc_ramdisk_image64;
177extern unsigned int sparc_ramdisk_image;
178extern unsigned int sparc_ramdisk_size;
179
David S. Miller1ac4f5e2005-09-21 21:49:32 -0700180struct page *mem_map_zero __read_mostly;
Aneesh Kumar K.V35802c02008-04-29 08:11:12 -0400181EXPORT_SYMBOL(mem_map_zero);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182
David S. Miller0835ae02005-10-04 15:23:20 -0700183unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
184
185unsigned long sparc64_kern_pri_context __read_mostly;
186unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
187unsigned long sparc64_kern_sec_context __read_mostly;
188
David S. Miller64658742008-03-21 17:01:38 -0700189int num_kernel_image_mappings;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191#ifdef CONFIG_DEBUG_DCFLUSH
192atomic_t dcpage_flushes = ATOMIC_INIT(0);
193#ifdef CONFIG_SMP
194atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
195#endif
196#endif
197
David S. Miller7a591cf2006-02-26 19:44:50 -0800198inline void flush_dcache_page_impl(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199{
David S. Miller7a591cf2006-02-26 19:44:50 -0800200 BUG_ON(tlb_type == hypervisor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201#ifdef CONFIG_DEBUG_DCFLUSH
202 atomic_inc(&dcpage_flushes);
203#endif
204
205#ifdef DCACHE_ALIASING_POSSIBLE
206 __flush_dcache_page(page_address(page),
207 ((tlb_type == spitfire) &&
208 page_mapping(page) != NULL));
209#else
210 if (page_mapping(page) != NULL &&
211 tlb_type == spitfire)
212 __flush_icache_page(__pa(page_address(page)));
213#endif
214}
215
216#define PG_dcache_dirty PG_arch_1
David S. Miller22adb352007-05-26 01:14:43 -0700217#define PG_dcache_cpu_shift 32UL
218#define PG_dcache_cpu_mask \
219 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
221#define dcache_dirty_cpu(page) \
David S. Miller48b0e542005-07-27 16:08:44 -0700222 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
David S. Millerd979f172007-10-27 00:13:04 -0700224static inline void set_dcache_dirty(struct page *page, int this_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225{
226 unsigned long mask = this_cpu;
David S. Miller48b0e542005-07-27 16:08:44 -0700227 unsigned long non_cpu_bits;
228
229 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
230 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
231
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 __asm__ __volatile__("1:\n\t"
233 "ldx [%2], %%g7\n\t"
234 "and %%g7, %1, %%g1\n\t"
235 "or %%g1, %0, %%g1\n\t"
236 "casx [%2], %%g7, %%g1\n\t"
237 "cmp %%g7, %%g1\n\t"
238 "bne,pn %%xcc, 1b\n\t"
David S. Millerb445e262005-06-27 15:42:04 -0700239 " nop"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 : /* no outputs */
241 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
242 : "g1", "g7");
243}
244
David S. Millerd979f172007-10-27 00:13:04 -0700245static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246{
247 unsigned long mask = (1UL << PG_dcache_dirty);
248
249 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
250 "1:\n\t"
251 "ldx [%2], %%g7\n\t"
David S. Miller48b0e542005-07-27 16:08:44 -0700252 "srlx %%g7, %4, %%g1\n\t"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 "and %%g1, %3, %%g1\n\t"
254 "cmp %%g1, %0\n\t"
255 "bne,pn %%icc, 2f\n\t"
256 " andn %%g7, %1, %%g1\n\t"
257 "casx [%2], %%g7, %%g1\n\t"
258 "cmp %%g7, %%g1\n\t"
259 "bne,pn %%xcc, 1b\n\t"
David S. Millerb445e262005-06-27 15:42:04 -0700260 " nop\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 "2:"
262 : /* no outputs */
263 : "r" (cpu), "r" (mask), "r" (&page->flags),
David S. Miller48b0e542005-07-27 16:08:44 -0700264 "i" (PG_dcache_cpu_mask),
265 "i" (PG_dcache_cpu_shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 : "g1", "g7");
267}
268
David S. Miller517af332006-02-01 15:55:21 -0800269static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
270{
271 unsigned long tsb_addr = (unsigned long) ent;
272
David S. Miller3b3ab2e2006-02-17 09:54:42 -0800273 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
David S. Miller517af332006-02-01 15:55:21 -0800274 tsb_addr = __pa(tsb_addr);
275
276 __tsb_insert(tsb_addr, tag, pte);
277}
278
David S. Millerc4bce902006-02-11 21:57:54 -0800279unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
David S. Millerc4bce902006-02-11 21:57:54 -0800280
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800281static void flush_dcache(unsigned long pfn)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282{
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800283 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800285 page = pfn_to_page(pfn);
David S. Miller1a78ced2009-10-12 03:20:57 -0700286 if (page) {
David S. Miller7a591cf2006-02-26 19:44:50 -0800287 unsigned long pg_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800289 pg_flags = page->flags;
290 if (pg_flags & (1UL << PG_dcache_dirty)) {
David S. Miller7a591cf2006-02-26 19:44:50 -0800291 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
292 PG_dcache_cpu_mask);
293 int this_cpu = get_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
David S. Miller7a591cf2006-02-26 19:44:50 -0800295 /* This is just to optimize away some function calls
296 * in the SMP case.
297 */
298 if (cpu == this_cpu)
299 flush_dcache_page_impl(page);
300 else
301 smp_flush_dcache_page_impl(page, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
David S. Miller7a591cf2006-02-26 19:44:50 -0800303 clear_dcache_dirty_cpu(page, cpu);
304
305 put_cpu();
306 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 }
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800308}
309
David Miller9e695d22012-10-08 16:34:29 -0700310/* mm->context.lock must be held */
311static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
312 unsigned long tsb_hash_shift, unsigned long address,
313 unsigned long tte)
314{
315 struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
316 unsigned long tag;
317
David S. Millerbcd896b2013-02-19 13:20:08 -0800318 if (unlikely(!tsb))
319 return;
320
David Miller9e695d22012-10-08 16:34:29 -0700321 tsb += ((address >> tsb_hash_shift) &
322 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
323 tag = (address >> 22UL);
324 tsb_insert(tsb, tag, tte);
325}
326
David S. Millerbcd896b2013-02-19 13:20:08 -0800327#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
328static inline bool is_hugetlb_pte(pte_t pte)
329{
330 if ((tlb_type == hypervisor &&
331 (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
332 (tlb_type != hypervisor &&
333 (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U))
334 return true;
335 return false;
336}
337#endif
338
Russell King4b3073e2009-12-18 16:40:18 +0000339void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800340{
341 struct mm_struct *mm;
David S. Millerbcd896b2013-02-19 13:20:08 -0800342 unsigned long flags;
Russell King4b3073e2009-12-18 16:40:18 +0000343 pte_t pte = *ptep;
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800344
345 if (tlb_type != hypervisor) {
346 unsigned long pfn = pte_pfn(pte);
347
348 if (pfn_valid(pfn))
349 flush_dcache(pfn);
350 }
David S. Millerbd407912006-01-31 18:31:38 -0800351
352 mm = vma->vm_mm;
David S. Miller7a1ac522006-03-16 02:02:32 -0800353
David S. Miller18f38132014-08-04 16:34:01 -0700354 /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */
355 if (!pte_accessible(mm, pte))
356 return;
357
David S. Miller7a1ac522006-03-16 02:02:32 -0800358 spin_lock_irqsave(&mm->context.lock, flags);
359
David Miller9e695d22012-10-08 16:34:29 -0700360#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
David S. Millerbcd896b2013-02-19 13:20:08 -0800361 if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
David S. Miller37b3a8f2013-09-25 13:48:49 -0700362 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
David S. Millerbcd896b2013-02-19 13:20:08 -0800363 address, pte_val(pte));
364 else
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800365#endif
David S. Millerbcd896b2013-02-19 13:20:08 -0800366 __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
367 address, pte_val(pte));
David S. Miller7a1ac522006-03-16 02:02:32 -0800368
369 spin_unlock_irqrestore(&mm->context.lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370}
371
372void flush_dcache_page(struct page *page)
373{
David S. Millera9546f52005-04-17 18:03:09 -0700374 struct address_space *mapping;
375 int this_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376
David S. Miller7a591cf2006-02-26 19:44:50 -0800377 if (tlb_type == hypervisor)
378 return;
379
David S. Millera9546f52005-04-17 18:03:09 -0700380 /* Do not bother with the expensive D-cache flush if it
381 * is merely the zero page. The 'bigcore' testcase in GDB
382 * causes this case to run millions of times.
383 */
384 if (page == ZERO_PAGE(0))
385 return;
386
387 this_cpu = get_cpu();
388
389 mapping = page_mapping(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 if (mapping && !mapping_mapped(mapping)) {
David S. Millera9546f52005-04-17 18:03:09 -0700391 int dirty = test_bit(PG_dcache_dirty, &page->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 if (dirty) {
David S. Millera9546f52005-04-17 18:03:09 -0700393 int dirty_cpu = dcache_dirty_cpu(page);
394
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 if (dirty_cpu == this_cpu)
396 goto out;
397 smp_flush_dcache_page_impl(page, dirty_cpu);
398 }
399 set_dcache_dirty(page, this_cpu);
400 } else {
401 /* We could delay the flush for the !page_mapping
402 * case too. But that case is for exec env/arg
403 * pages and those are %99 certainly going to get
404 * faulted into the tlb (and thus flushed) anyways.
405 */
406 flush_dcache_page_impl(page);
407 }
408
409out:
410 put_cpu();
411}
Sam Ravnborg917c3662009-01-08 16:58:20 -0800412EXPORT_SYMBOL(flush_dcache_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
Prasanna S Panchamukhi05e14cb2005-09-06 15:19:30 -0700414void __kprobes flush_icache_range(unsigned long start, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415{
David S. Millera43fe0e2006-02-04 03:10:53 -0800416 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 if (tlb_type == spitfire) {
418 unsigned long kaddr;
419
David S. Millera94aa252007-03-15 15:50:11 -0700420 /* This code only runs on Spitfire cpus so this is
421 * why we can assume _PAGE_PADDR_4U.
422 */
423 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
424 unsigned long paddr, mask = _PAGE_PADDR_4U;
425
426 if (kaddr >= PAGE_OFFSET)
427 paddr = kaddr & mask;
428 else {
429 pgd_t *pgdp = pgd_offset_k(kaddr);
430 pud_t *pudp = pud_offset(pgdp, kaddr);
431 pmd_t *pmdp = pmd_offset(pudp, kaddr);
432 pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
433
434 paddr = pte_val(*ptep) & mask;
435 }
436 __flush_icache_page(paddr);
437 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 }
439}
Sam Ravnborg917c3662009-01-08 16:58:20 -0800440EXPORT_SYMBOL(flush_icache_range);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442void mmu_info(struct seq_file *m)
443{
David S. Millerce33fdc2012-09-06 19:01:25 -0700444 static const char *pgsz_strings[] = {
445 "8K", "64K", "512K", "4MB", "32MB",
446 "256MB", "2GB", "16GB",
447 };
448 int i, printed;
449
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 if (tlb_type == cheetah)
451 seq_printf(m, "MMU Type\t: Cheetah\n");
452 else if (tlb_type == cheetah_plus)
453 seq_printf(m, "MMU Type\t: Cheetah+\n");
454 else if (tlb_type == spitfire)
455 seq_printf(m, "MMU Type\t: Spitfire\n");
David S. Millera43fe0e2006-02-04 03:10:53 -0800456 else if (tlb_type == hypervisor)
457 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 else
459 seq_printf(m, "MMU Type\t: ???\n");
460
David S. Millerce33fdc2012-09-06 19:01:25 -0700461 seq_printf(m, "MMU PGSZs\t: ");
462 printed = 0;
463 for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) {
464 if (cpu_pgsz_mask & (1UL << i)) {
465 seq_printf(m, "%s%s",
466 printed ? "," : "", pgsz_strings[i]);
467 printed++;
468 }
469 }
470 seq_putc(m, '\n');
471
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472#ifdef CONFIG_DEBUG_DCFLUSH
473 seq_printf(m, "DCPageFlushes\t: %d\n",
474 atomic_read(&dcpage_flushes));
475#ifdef CONFIG_SMP
476 seq_printf(m, "DCPageFlushesXC\t: %d\n",
477 atomic_read(&dcpage_flushes_xcall));
478#endif /* CONFIG_SMP */
479#endif /* CONFIG_DEBUG_DCFLUSH */
480}
481
David S. Millera94aa252007-03-15 15:50:11 -0700482struct linux_prom_translation prom_trans[512] __read_mostly;
483unsigned int prom_trans_ents __read_mostly;
484
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485unsigned long kern_locked_tte_data;
486
David S. Miller405599b2005-09-22 00:12:35 -0700487/* The obp translations are saved based on 8k pagesize, since obp can
488 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
David S. Miller74bf4312006-01-31 18:29:18 -0800489 * HI_OBP_ADDRESS range are handled in ktlb.S.
David S. Miller405599b2005-09-22 00:12:35 -0700490 */
David S. Miller5085b4a2005-09-22 00:45:41 -0700491static inline int in_obp_range(unsigned long vaddr)
492{
493 return (vaddr >= LOW_OBP_ADDRESS &&
494 vaddr < HI_OBP_ADDRESS);
495}
496
David S. Millerc9c10832005-10-12 12:22:46 -0700497static int cmp_ptrans(const void *a, const void *b)
David S. Miller405599b2005-09-22 00:12:35 -0700498{
David S. Millerc9c10832005-10-12 12:22:46 -0700499 const struct linux_prom_translation *x = a, *y = b;
David S. Miller405599b2005-09-22 00:12:35 -0700500
David S. Millerc9c10832005-10-12 12:22:46 -0700501 if (x->virt > y->virt)
502 return 1;
503 if (x->virt < y->virt)
504 return -1;
505 return 0;
David S. Miller405599b2005-09-22 00:12:35 -0700506}
507
David S. Millerc9c10832005-10-12 12:22:46 -0700508/* Read OBP translations property into 'prom_trans[]'. */
David S. Miller9ad98c52005-10-05 15:12:00 -0700509static void __init read_obp_translations(void)
David S. Miller405599b2005-09-22 00:12:35 -0700510{
David S. Millerc9c10832005-10-12 12:22:46 -0700511 int n, node, ents, first, last, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512
513 node = prom_finddevice("/virtual-memory");
514 n = prom_getproplen(node, "translations");
David S. Miller405599b2005-09-22 00:12:35 -0700515 if (unlikely(n == 0 || n == -1)) {
David S. Millerb206fc42005-09-21 22:31:13 -0700516 prom_printf("prom_mappings: Couldn't get size.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 prom_halt();
518 }
David S. Miller405599b2005-09-22 00:12:35 -0700519 if (unlikely(n > sizeof(prom_trans))) {
Akinobu Mita5da444a2012-09-29 03:14:49 +0000520 prom_printf("prom_mappings: Size %d is too big.\n", n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 prom_halt();
522 }
David S. Miller405599b2005-09-22 00:12:35 -0700523
David S. Millerb206fc42005-09-21 22:31:13 -0700524 if ((n = prom_getproperty(node, "translations",
David S. Miller405599b2005-09-22 00:12:35 -0700525 (char *)&prom_trans[0],
526 sizeof(prom_trans))) == -1) {
David S. Millerb206fc42005-09-21 22:31:13 -0700527 prom_printf("prom_mappings: Couldn't get property.\n");
528 prom_halt();
529 }
David S. Miller9ad98c52005-10-05 15:12:00 -0700530
David S. Millerb206fc42005-09-21 22:31:13 -0700531 n = n / sizeof(struct linux_prom_translation);
David S. Miller9ad98c52005-10-05 15:12:00 -0700532
David S. Millerc9c10832005-10-12 12:22:46 -0700533 ents = n;
534
535 sort(prom_trans, ents, sizeof(struct linux_prom_translation),
536 cmp_ptrans, NULL);
537
538 /* Now kick out all the non-OBP entries. */
539 for (i = 0; i < ents; i++) {
540 if (in_obp_range(prom_trans[i].virt))
541 break;
542 }
543 first = i;
544 for (; i < ents; i++) {
545 if (!in_obp_range(prom_trans[i].virt))
546 break;
547 }
548 last = i;
549
550 for (i = 0; i < (last - first); i++) {
551 struct linux_prom_translation *src = &prom_trans[i + first];
552 struct linux_prom_translation *dest = &prom_trans[i];
553
554 *dest = *src;
555 }
556 for (; i < ents; i++) {
557 struct linux_prom_translation *dest = &prom_trans[i];
558 dest->virt = dest->size = dest->data = 0x0UL;
559 }
560
561 prom_trans_ents = last - first;
562
563 if (tlb_type == spitfire) {
564 /* Clear diag TTE bits. */
565 for (i = 0; i < prom_trans_ents; i++)
566 prom_trans[i].data &= ~0x0003fe0000000000UL;
567 }
David S. Millerf4142cb2011-09-29 12:18:59 -0700568
569 /* Force execute bit on. */
570 for (i = 0; i < prom_trans_ents; i++)
571 prom_trans[i].data |= (tlb_type == hypervisor ?
572 _PAGE_EXEC_4V : _PAGE_EXEC_4U);
David S. Miller405599b2005-09-22 00:12:35 -0700573}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574
David S. Millerd82ace72006-02-09 02:52:44 -0800575static void __init hypervisor_tlb_lock(unsigned long vaddr,
576 unsigned long pte,
577 unsigned long mmu)
578{
David S. Miller7db35f32007-05-29 02:22:14 -0700579 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
David S. Millerd82ace72006-02-09 02:52:44 -0800580
David S. Miller7db35f32007-05-29 02:22:14 -0700581 if (ret != 0) {
Akinobu Mita5da444a2012-09-29 03:14:49 +0000582 prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
David S. Miller7db35f32007-05-29 02:22:14 -0700583 "errors with %lx\n", vaddr, 0, pte, mmu, ret);
David S. Miller12e126a2006-02-17 14:40:30 -0800584 prom_halt();
585 }
David S. Millerd82ace72006-02-09 02:52:44 -0800586}
587
David S. Millerc4bce902006-02-11 21:57:54 -0800588static unsigned long kern_large_tte(unsigned long paddr);
589
David S. Miller898cf0e2005-09-23 11:59:44 -0700590static void __init remap_kernel(void)
David S. Miller405599b2005-09-22 00:12:35 -0700591{
592 unsigned long phys_page, tte_vaddr, tte_data;
David S. Miller64658742008-03-21 17:01:38 -0700593 int i, tlb_ent = sparc64_highest_locked_tlbent();
David S. Miller405599b2005-09-22 00:12:35 -0700594
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 tte_vaddr = (unsigned long) KERNBASE;
David S. Miller0eef3312014-05-03 22:52:50 -0700596 phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
David S. Millerc4bce902006-02-11 21:57:54 -0800597 tte_data = kern_large_tte(phys_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598
599 kern_locked_tte_data = tte_data;
600
David S. Millerd82ace72006-02-09 02:52:44 -0800601 /* Now lock us into the TLBs via Hypervisor or OBP. */
602 if (tlb_type == hypervisor) {
David S. Miller64658742008-03-21 17:01:38 -0700603 for (i = 0; i < num_kernel_image_mappings; i++) {
David S. Millerd82ace72006-02-09 02:52:44 -0800604 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
605 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
David S. Miller64658742008-03-21 17:01:38 -0700606 tte_vaddr += 0x400000;
607 tte_data += 0x400000;
David S. Millerd82ace72006-02-09 02:52:44 -0800608 }
609 } else {
David S. Miller64658742008-03-21 17:01:38 -0700610 for (i = 0; i < num_kernel_image_mappings; i++) {
611 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
612 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
613 tte_vaddr += 0x400000;
614 tte_data += 0x400000;
David S. Millerd82ace72006-02-09 02:52:44 -0800615 }
David S. Miller64658742008-03-21 17:01:38 -0700616 sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 }
David S. Miller0835ae02005-10-04 15:23:20 -0700618 if (tlb_type == cheetah_plus) {
619 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
620 CTX_CHEETAH_PLUS_NUC);
621 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
622 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
623 }
David S. Miller405599b2005-09-22 00:12:35 -0700624}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625
David S. Miller405599b2005-09-22 00:12:35 -0700626
David S. Millerc9c10832005-10-12 12:22:46 -0700627static void __init inherit_prom_mappings(void)
David S. Miller9ad98c52005-10-05 15:12:00 -0700628{
David S. Miller405599b2005-09-22 00:12:35 -0700629 /* Now fixup OBP's idea about where we really are mapped. */
David S. Miller3c62a2d2008-02-17 23:22:50 -0800630 printk("Remapping the kernel... ");
David S. Miller405599b2005-09-22 00:12:35 -0700631 remap_kernel();
David S. Miller3c62a2d2008-02-17 23:22:50 -0800632 printk("done.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633}
634
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635void prom_world(int enter)
636{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 if (!enter)
Al Virodff933d2012-09-26 01:21:14 -0400638 set_fs(get_fs());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639
David S. Miller3487d1d2006-01-31 18:33:25 -0800640 __asm__ __volatile__("flushw");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641}
642
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643void __flush_dcache_range(unsigned long start, unsigned long end)
644{
645 unsigned long va;
646
647 if (tlb_type == spitfire) {
648 int n = 0;
649
650 for (va = start; va < end; va += 32) {
651 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
652 if (++n >= 512)
653 break;
654 }
David S. Millera43fe0e2006-02-04 03:10:53 -0800655 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 start = __pa(start);
657 end = __pa(end);
658 for (va = start; va < end; va += 32)
659 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
660 "membar #Sync"
661 : /* no outputs */
662 : "r" (va),
663 "i" (ASI_DCACHE_INVALIDATE));
664 }
665}
Sam Ravnborg917c3662009-01-08 16:58:20 -0800666EXPORT_SYMBOL(__flush_dcache_range);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667
David S. Miller85f1e1f2007-03-15 17:51:26 -0700668/* get_new_mmu_context() uses "cache + 1". */
669DEFINE_SPINLOCK(ctx_alloc_lock);
670unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
671#define MAX_CTX_NR (1UL << CTX_NR_BITS)
672#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
673DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
674
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675/* Caller does TLB context flushing on local CPU if necessary.
676 * The caller also ensures that CTX_VALID(mm->context) is false.
677 *
678 * We must be careful about boundary cases so that we never
679 * let the user have CTX 0 (nucleus) or we ever use a CTX
680 * version of zero (and thus NO_CONTEXT would not be caught
681 * by version mis-match tests in mmu_context.h).
David S. Millera0663a72006-02-23 14:19:28 -0800682 *
683 * Always invoked with interrupts disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 */
685void get_new_mmu_context(struct mm_struct *mm)
686{
687 unsigned long ctx, new_ctx;
688 unsigned long orig_pgsz_bits;
David S. Millera0663a72006-02-23 14:19:28 -0800689 int new_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690
Kirill Tkhai07df8412013-04-09 00:29:46 +0400691 spin_lock(&ctx_alloc_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
693 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
694 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
David S. Millera0663a72006-02-23 14:19:28 -0800695 new_version = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 if (new_ctx >= (1 << CTX_NR_BITS)) {
697 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
698 if (new_ctx >= ctx) {
699 int i;
700 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
701 CTX_FIRST_VERSION;
702 if (new_ctx == 1)
703 new_ctx = CTX_FIRST_VERSION;
704
705 /* Don't call memset, for 16 entries that's just
706 * plain silly...
707 */
708 mmu_context_bmap[0] = 3;
709 mmu_context_bmap[1] = 0;
710 mmu_context_bmap[2] = 0;
711 mmu_context_bmap[3] = 0;
712 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
713 mmu_context_bmap[i + 0] = 0;
714 mmu_context_bmap[i + 1] = 0;
715 mmu_context_bmap[i + 2] = 0;
716 mmu_context_bmap[i + 3] = 0;
717 }
David S. Millera0663a72006-02-23 14:19:28 -0800718 new_version = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 goto out;
720 }
721 }
722 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
723 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
724out:
725 tlb_context_cache = new_ctx;
726 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
Kirill Tkhai07df8412013-04-09 00:29:46 +0400727 spin_unlock(&ctx_alloc_lock);
David S. Millera0663a72006-02-23 14:19:28 -0800728
729 if (unlikely(new_version))
730 smp_new_mmu_context_version();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731}
732
David S. Miller919ee672008-04-23 05:40:25 -0700733static int numa_enabled = 1;
734static int numa_debug;
735
736static int __init early_numa(char *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737{
David S. Miller919ee672008-04-23 05:40:25 -0700738 if (!p)
739 return 0;
David S. Millerd1112012006-03-08 02:16:07 -0800740
David S. Miller919ee672008-04-23 05:40:25 -0700741 if (strstr(p, "off"))
742 numa_enabled = 0;
David S. Millerd1112012006-03-08 02:16:07 -0800743
David S. Miller919ee672008-04-23 05:40:25 -0700744 if (strstr(p, "debug"))
745 numa_debug = 1;
746
747 return 0;
David S. Millerd1112012006-03-08 02:16:07 -0800748}
David S. Miller919ee672008-04-23 05:40:25 -0700749early_param("numa", early_numa);
750
751#define numadbg(f, a...) \
752do { if (numa_debug) \
753 printk(KERN_INFO f, ## a); \
754} while (0)
David S. Millerd1112012006-03-08 02:16:07 -0800755
David S. Miller4e82c9a2008-02-13 18:00:03 -0800756static void __init find_ramdisk(unsigned long phys_base)
757{
758#ifdef CONFIG_BLK_DEV_INITRD
759 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
760 unsigned long ramdisk_image;
761
762 /* Older versions of the bootloader only supported a
763 * 32-bit physical address for the ramdisk image
764 * location, stored at sparc_ramdisk_image. Newer
765 * SILO versions set sparc_ramdisk_image to zero and
766 * provide a full 64-bit physical address at
767 * sparc_ramdisk_image64.
768 */
769 ramdisk_image = sparc_ramdisk_image;
770 if (!ramdisk_image)
771 ramdisk_image = sparc_ramdisk_image64;
772
773 /* Another bootloader quirk. The bootloader normalizes
774 * the physical address to KERNBASE, so we have to
775 * factor that back out and add in the lowest valid
776 * physical page address to get the true physical address.
777 */
778 ramdisk_image -= KERNBASE;
779 ramdisk_image += phys_base;
780
David S. Miller919ee672008-04-23 05:40:25 -0700781 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
782 ramdisk_image, sparc_ramdisk_size);
783
David S. Miller4e82c9a2008-02-13 18:00:03 -0800784 initrd_start = ramdisk_image;
785 initrd_end = ramdisk_image + sparc_ramdisk_size;
David S. Miller3b2a7e22008-02-13 18:13:20 -0800786
Yinghai Lu95f72d12010-07-12 14:36:09 +1000787 memblock_reserve(initrd_start, sparc_ramdisk_size);
David S. Millerd45100f2008-05-06 15:19:54 -0700788
789 initrd_start += PAGE_OFFSET;
790 initrd_end += PAGE_OFFSET;
David S. Miller4e82c9a2008-02-13 18:00:03 -0800791 }
792#endif
793}
794
David S. Miller919ee672008-04-23 05:40:25 -0700795struct node_mem_mask {
796 unsigned long mask;
797 unsigned long val;
David S. Miller919ee672008-04-23 05:40:25 -0700798};
799static struct node_mem_mask node_masks[MAX_NUMNODES];
800static int num_node_masks;
801
Sam Ravnborg48d37212014-05-16 23:26:12 +0200802#ifdef CONFIG_NEED_MULTIPLE_NODES
803
David S. Miller919ee672008-04-23 05:40:25 -0700804int numa_cpu_lookup_table[NR_CPUS];
805cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
806
David S. Miller919ee672008-04-23 05:40:25 -0700807struct mdesc_mblock {
808 u64 base;
809 u64 size;
810 u64 offset; /* RA-to-PA */
811};
812static struct mdesc_mblock *mblocks;
813static int num_mblocks;
814
815static unsigned long ra_to_pa(unsigned long addr)
David S. Millerd1112012006-03-08 02:16:07 -0800816{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 int i;
818
David S. Miller919ee672008-04-23 05:40:25 -0700819 for (i = 0; i < num_mblocks; i++) {
820 struct mdesc_mblock *m = &mblocks[i];
David S. Miller6fc5bae2006-12-28 21:00:23 -0800821
David S. Miller919ee672008-04-23 05:40:25 -0700822 if (addr >= m->base &&
823 addr < (m->base + m->size)) {
824 addr += m->offset;
825 break;
826 }
827 }
828 return addr;
829}
830
831static int find_node(unsigned long addr)
832{
833 int i;
834
835 addr = ra_to_pa(addr);
836 for (i = 0; i < num_node_masks; i++) {
837 struct node_mem_mask *p = &node_masks[i];
838
839 if ((addr & p->mask) == p->val)
840 return i;
841 }
842 return -1;
843}
844
Tejun Heof9b18db2011-07-12 10:46:32 +0200845static u64 memblock_nid_range(u64 start, u64 end, int *nid)
David S. Miller919ee672008-04-23 05:40:25 -0700846{
847 *nid = find_node(start);
848 start += PAGE_SIZE;
849 while (start < end) {
850 int n = find_node(start);
851
852 if (n != *nid)
853 break;
854 start += PAGE_SIZE;
855 }
856
David S. Millerc918dcc2008-08-14 01:41:39 -0700857 if (start > end)
858 start = end;
859
David S. Miller919ee672008-04-23 05:40:25 -0700860 return start;
861}
David S. Miller919ee672008-04-23 05:40:25 -0700862#endif
863
864/* This must be invoked after performing all of the necessary
Tejun Heo2a4814d2011-12-08 10:22:08 -0800865 * memblock_set_node() calls for 'nid'. We need to be able to get
David S. Miller919ee672008-04-23 05:40:25 -0700866 * correct data from get_pfn_range_for_nid().
867 */
868static void __init allocate_node_data(int nid)
869{
David S. Miller919ee672008-04-23 05:40:25 -0700870 struct pglist_data *p;
Paul Gortmakeraa6f0792012-05-09 20:44:29 -0400871 unsigned long start_pfn, end_pfn;
David S. Miller919ee672008-04-23 05:40:25 -0700872#ifdef CONFIG_NEED_MULTIPLE_NODES
Paul Gortmakeraa6f0792012-05-09 20:44:29 -0400873 unsigned long paddr;
874
Benjamin Herrenschmidt9d1e2492010-07-06 15:39:17 -0700875 paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
David S. Miller919ee672008-04-23 05:40:25 -0700876 if (!paddr) {
877 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
878 prom_halt();
879 }
880 NODE_DATA(nid) = __va(paddr);
881 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
882
David S. Miller625d6932012-04-25 13:13:43 -0700883 NODE_DATA(nid)->node_id = nid;
David S. Miller919ee672008-04-23 05:40:25 -0700884#endif
885
886 p = NODE_DATA(nid);
887
888 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
889 p->node_start_pfn = start_pfn;
890 p->node_spanned_pages = end_pfn - start_pfn;
David S. Miller919ee672008-04-23 05:40:25 -0700891}
892
893static void init_node_masks_nonnuma(void)
894{
Sam Ravnborg48d37212014-05-16 23:26:12 +0200895#ifdef CONFIG_NEED_MULTIPLE_NODES
David S. Miller919ee672008-04-23 05:40:25 -0700896 int i;
Sam Ravnborg48d37212014-05-16 23:26:12 +0200897#endif
David S. Miller919ee672008-04-23 05:40:25 -0700898
899 numadbg("Initializing tables for non-numa.\n");
900
901 node_masks[0].mask = node_masks[0].val = 0;
902 num_node_masks = 1;
903
Sam Ravnborg48d37212014-05-16 23:26:12 +0200904#ifdef CONFIG_NEED_MULTIPLE_NODES
David S. Miller919ee672008-04-23 05:40:25 -0700905 for (i = 0; i < NR_CPUS; i++)
906 numa_cpu_lookup_table[i] = 0;
907
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -0700908 cpumask_setall(&numa_cpumask_lookup_table[0]);
Sam Ravnborg48d37212014-05-16 23:26:12 +0200909#endif
David S. Miller919ee672008-04-23 05:40:25 -0700910}
911
912#ifdef CONFIG_NEED_MULTIPLE_NODES
913struct pglist_data *node_data[MAX_NUMNODES];
914
915EXPORT_SYMBOL(numa_cpu_lookup_table);
916EXPORT_SYMBOL(numa_cpumask_lookup_table);
917EXPORT_SYMBOL(node_data);
918
919struct mdesc_mlgroup {
920 u64 node;
921 u64 latency;
922 u64 match;
923 u64 mask;
924};
925static struct mdesc_mlgroup *mlgroups;
926static int num_mlgroups;
927
928static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
929 u32 cfg_handle)
930{
931 u64 arc;
932
933 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
934 u64 target = mdesc_arc_target(md, arc);
935 const u64 *val;
936
937 val = mdesc_get_property(md, target,
938 "cfg-handle", NULL);
939 if (val && *val == cfg_handle)
940 return 0;
941 }
942 return -ENODEV;
943}
944
945static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
946 u32 cfg_handle)
947{
948 u64 arc, candidate, best_latency = ~(u64)0;
949
950 candidate = MDESC_NODE_NULL;
951 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
952 u64 target = mdesc_arc_target(md, arc);
953 const char *name = mdesc_node_name(md, target);
954 const u64 *val;
955
956 if (strcmp(name, "pio-latency-group"))
957 continue;
958
959 val = mdesc_get_property(md, target, "latency", NULL);
960 if (!val)
961 continue;
962
963 if (*val < best_latency) {
964 candidate = target;
965 best_latency = *val;
966 }
967 }
968
969 if (candidate == MDESC_NODE_NULL)
970 return -ENODEV;
971
972 return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
973}
974
975int of_node_to_nid(struct device_node *dp)
976{
977 const struct linux_prom64_registers *regs;
978 struct mdesc_handle *md;
979 u32 cfg_handle;
980 int count, nid;
981 u64 grp;
982
David S. Miller072bd412008-08-18 20:36:17 -0700983 /* This is the right thing to do on currently supported
984 * SUN4U NUMA platforms as well, as the PCI controller does
985 * not sit behind any particular memory controller.
986 */
David S. Miller919ee672008-04-23 05:40:25 -0700987 if (!mlgroups)
988 return -1;
989
990 regs = of_get_property(dp, "reg", NULL);
991 if (!regs)
992 return -1;
993
994 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
995
996 md = mdesc_grab();
997
998 count = 0;
999 nid = -1;
1000 mdesc_for_each_node_by_name(md, grp, "group") {
1001 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
1002 nid = count;
1003 break;
1004 }
1005 count++;
1006 }
1007
1008 mdesc_release(md);
1009
1010 return nid;
1011}
1012
David S. Miller01c453812009-04-07 01:05:22 -07001013static void __init add_node_ranges(void)
David S. Miller919ee672008-04-23 05:40:25 -07001014{
Benjamin Herrenschmidt08b84792010-08-04 13:43:31 +10001015 struct memblock_region *reg;
David S. Miller919ee672008-04-23 05:40:25 -07001016
Benjamin Herrenschmidt08b84792010-08-04 13:43:31 +10001017 for_each_memblock(memory, reg) {
1018 unsigned long size = reg->size;
David S. Miller919ee672008-04-23 05:40:25 -07001019 unsigned long start, end;
1020
Benjamin Herrenschmidt08b84792010-08-04 13:43:31 +10001021 start = reg->base;
David S. Miller919ee672008-04-23 05:40:25 -07001022 end = start + size;
1023 while (start < end) {
1024 unsigned long this_end;
1025 int nid;
1026
Benjamin Herrenschmidt35a1f0b2010-07-06 15:38:58 -07001027 this_end = memblock_nid_range(start, end, &nid);
David S. Miller919ee672008-04-23 05:40:25 -07001028
Tejun Heo2a4814d2011-12-08 10:22:08 -08001029 numadbg("Setting memblock NUMA node nid[%d] "
David S. Miller919ee672008-04-23 05:40:25 -07001030 "start[%lx] end[%lx]\n",
1031 nid, start, this_end);
1032
Tang Chene7e8de52014-01-21 15:49:26 -08001033 memblock_set_node(start, this_end - start,
1034 &memblock.memory, nid);
David S. Miller919ee672008-04-23 05:40:25 -07001035 start = this_end;
1036 }
1037 }
1038}
1039
1040static int __init grab_mlgroups(struct mdesc_handle *md)
1041{
1042 unsigned long paddr;
1043 int count = 0;
1044 u64 node;
1045
1046 mdesc_for_each_node_by_name(md, node, "memory-latency-group")
1047 count++;
1048 if (!count)
1049 return -ENOENT;
1050
Yinghai Lu95f72d12010-07-12 14:36:09 +10001051 paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup),
David S. Miller919ee672008-04-23 05:40:25 -07001052 SMP_CACHE_BYTES);
1053 if (!paddr)
1054 return -ENOMEM;
1055
1056 mlgroups = __va(paddr);
1057 num_mlgroups = count;
1058
1059 count = 0;
1060 mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
1061 struct mdesc_mlgroup *m = &mlgroups[count++];
1062 const u64 *val;
1063
1064 m->node = node;
1065
1066 val = mdesc_get_property(md, node, "latency", NULL);
1067 m->latency = *val;
1068 val = mdesc_get_property(md, node, "address-match", NULL);
1069 m->match = *val;
1070 val = mdesc_get_property(md, node, "address-mask", NULL);
1071 m->mask = *val;
1072
Sam Ravnborg90181132009-01-06 13:19:28 -08001073 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
1074 "match[%llx] mask[%llx]\n",
David S. Miller919ee672008-04-23 05:40:25 -07001075 count - 1, m->node, m->latency, m->match, m->mask);
1076 }
1077
1078 return 0;
1079}
1080
1081static int __init grab_mblocks(struct mdesc_handle *md)
1082{
1083 unsigned long paddr;
1084 int count = 0;
1085 u64 node;
1086
1087 mdesc_for_each_node_by_name(md, node, "mblock")
1088 count++;
1089 if (!count)
1090 return -ENOENT;
1091
Yinghai Lu95f72d12010-07-12 14:36:09 +10001092 paddr = memblock_alloc(count * sizeof(struct mdesc_mblock),
David S. Miller919ee672008-04-23 05:40:25 -07001093 SMP_CACHE_BYTES);
1094 if (!paddr)
1095 return -ENOMEM;
1096
1097 mblocks = __va(paddr);
1098 num_mblocks = count;
1099
1100 count = 0;
1101 mdesc_for_each_node_by_name(md, node, "mblock") {
1102 struct mdesc_mblock *m = &mblocks[count++];
1103 const u64 *val;
1104
1105 val = mdesc_get_property(md, node, "base", NULL);
1106 m->base = *val;
1107 val = mdesc_get_property(md, node, "size", NULL);
1108 m->size = *val;
1109 val = mdesc_get_property(md, node,
1110 "address-congruence-offset", NULL);
bob picco771a37f2013-06-11 14:54:51 -04001111
1112 /* The address-congruence-offset property is optional.
1113 * Explicity zero it be identifty this.
1114 */
1115 if (val)
1116 m->offset = *val;
1117 else
1118 m->offset = 0UL;
David S. Miller919ee672008-04-23 05:40:25 -07001119
Sam Ravnborg90181132009-01-06 13:19:28 -08001120 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
David S. Miller919ee672008-04-23 05:40:25 -07001121 count - 1, m->base, m->size, m->offset);
1122 }
1123
1124 return 0;
1125}
1126
1127static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1128 u64 grp, cpumask_t *mask)
1129{
1130 u64 arc;
1131
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001132 cpumask_clear(mask);
David S. Miller919ee672008-04-23 05:40:25 -07001133
1134 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1135 u64 target = mdesc_arc_target(md, arc);
1136 const char *name = mdesc_node_name(md, target);
1137 const u64 *id;
1138
1139 if (strcmp(name, "cpu"))
1140 continue;
1141 id = mdesc_get_property(md, target, "id", NULL);
Rusty Russelle305cb8f2009-03-16 14:40:23 +10301142 if (*id < nr_cpu_ids)
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001143 cpumask_set_cpu(*id, mask);
David S. Miller919ee672008-04-23 05:40:25 -07001144 }
1145}
1146
1147static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
1148{
1149 int i;
1150
1151 for (i = 0; i < num_mlgroups; i++) {
1152 struct mdesc_mlgroup *m = &mlgroups[i];
1153 if (m->node == node)
1154 return m;
1155 }
1156 return NULL;
1157}
1158
1159static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
1160 int index)
1161{
1162 struct mdesc_mlgroup *candidate = NULL;
1163 u64 arc, best_latency = ~(u64)0;
1164 struct node_mem_mask *n;
1165
1166 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1167 u64 target = mdesc_arc_target(md, arc);
1168 struct mdesc_mlgroup *m = find_mlgroup(target);
1169 if (!m)
1170 continue;
1171 if (m->latency < best_latency) {
1172 candidate = m;
1173 best_latency = m->latency;
1174 }
1175 }
1176 if (!candidate)
1177 return -ENOENT;
1178
1179 if (num_node_masks != index) {
1180 printk(KERN_ERR "Inconsistent NUMA state, "
1181 "index[%d] != num_node_masks[%d]\n",
1182 index, num_node_masks);
1183 return -EINVAL;
1184 }
1185
1186 n = &node_masks[num_node_masks++];
1187
1188 n->mask = candidate->mask;
1189 n->val = candidate->match;
1190
Sam Ravnborg90181132009-01-06 13:19:28 -08001191 numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n",
David S. Miller919ee672008-04-23 05:40:25 -07001192 index, n->mask, n->val, candidate->latency);
1193
1194 return 0;
1195}
1196
1197static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1198 int index)
1199{
1200 cpumask_t mask;
1201 int cpu;
1202
1203 numa_parse_mdesc_group_cpus(md, grp, &mask);
1204
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001205 for_each_cpu(cpu, &mask)
David S. Miller919ee672008-04-23 05:40:25 -07001206 numa_cpu_lookup_table[cpu] = index;
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001207 cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
David S. Miller919ee672008-04-23 05:40:25 -07001208
1209 if (numa_debug) {
1210 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001211 for_each_cpu(cpu, &mask)
David S. Miller919ee672008-04-23 05:40:25 -07001212 printk("%d ", cpu);
1213 printk("]\n");
1214 }
1215
1216 return numa_attach_mlgroup(md, grp, index);
1217}
1218
1219static int __init numa_parse_mdesc(void)
1220{
1221 struct mdesc_handle *md = mdesc_grab();
1222 int i, err, count;
1223 u64 node;
1224
1225 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1226 if (node == MDESC_NODE_NULL) {
1227 mdesc_release(md);
1228 return -ENOENT;
1229 }
1230
1231 err = grab_mblocks(md);
1232 if (err < 0)
1233 goto out;
1234
1235 err = grab_mlgroups(md);
1236 if (err < 0)
1237 goto out;
1238
1239 count = 0;
1240 mdesc_for_each_node_by_name(md, node, "group") {
1241 err = numa_parse_mdesc_group(md, node, count);
1242 if (err < 0)
1243 break;
1244 count++;
1245 }
1246
1247 add_node_ranges();
1248
1249 for (i = 0; i < num_node_masks; i++) {
1250 allocate_node_data(i);
1251 node_set_online(i);
1252 }
1253
1254 err = 0;
1255out:
1256 mdesc_release(md);
1257 return err;
1258}
1259
David S. Miller072bd412008-08-18 20:36:17 -07001260static int __init numa_parse_jbus(void)
1261{
1262 unsigned long cpu, index;
1263
1264 /* NUMA node id is encoded in bits 36 and higher, and there is
1265 * a 1-to-1 mapping from CPU ID to NUMA node ID.
1266 */
1267 index = 0;
1268 for_each_present_cpu(cpu) {
1269 numa_cpu_lookup_table[cpu] = index;
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001270 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
David S. Miller072bd412008-08-18 20:36:17 -07001271 node_masks[index].mask = ~((1UL << 36UL) - 1UL);
1272 node_masks[index].val = cpu << 36UL;
1273
1274 index++;
1275 }
1276 num_node_masks = index;
1277
1278 add_node_ranges();
1279
1280 for (index = 0; index < num_node_masks; index++) {
1281 allocate_node_data(index);
1282 node_set_online(index);
1283 }
1284
1285 return 0;
1286}
1287
David S. Miller919ee672008-04-23 05:40:25 -07001288static int __init numa_parse_sun4u(void)
1289{
David S. Miller072bd412008-08-18 20:36:17 -07001290 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1291 unsigned long ver;
1292
1293 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
1294 if ((ver >> 32UL) == __JALAPENO_ID ||
1295 (ver >> 32UL) == __SERRANO_ID)
1296 return numa_parse_jbus();
1297 }
David S. Miller919ee672008-04-23 05:40:25 -07001298 return -1;
1299}
1300
1301static int __init bootmem_init_numa(void)
1302{
1303 int err = -1;
1304
1305 numadbg("bootmem_init_numa()\n");
1306
1307 if (numa_enabled) {
1308 if (tlb_type == hypervisor)
1309 err = numa_parse_mdesc();
1310 else
1311 err = numa_parse_sun4u();
1312 }
1313 return err;
1314}
1315
1316#else
1317
1318static int bootmem_init_numa(void)
1319{
1320 return -1;
1321}
1322
1323#endif
1324
1325static void __init bootmem_init_nonnuma(void)
1326{
Yinghai Lu95f72d12010-07-12 14:36:09 +10001327 unsigned long top_of_ram = memblock_end_of_DRAM();
1328 unsigned long total_ram = memblock_phys_mem_size();
David S. Miller919ee672008-04-23 05:40:25 -07001329
1330 numadbg("bootmem_init_nonnuma()\n");
1331
1332 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1333 top_of_ram, total_ram);
1334 printk(KERN_INFO "Memory hole size: %ldMB\n",
1335 (top_of_ram - total_ram) >> 20);
1336
1337 init_node_masks_nonnuma();
Tang Chene7e8de52014-01-21 15:49:26 -08001338 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
David S. Miller919ee672008-04-23 05:40:25 -07001339 allocate_node_data(0);
David S. Miller919ee672008-04-23 05:40:25 -07001340 node_set_online(0);
1341}
1342
David S. Miller919ee672008-04-23 05:40:25 -07001343static unsigned long __init bootmem_init(unsigned long phys_base)
1344{
1345 unsigned long end_pfn;
David S. Miller919ee672008-04-23 05:40:25 -07001346
Yinghai Lu95f72d12010-07-12 14:36:09 +10001347 end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 max_pfn = max_low_pfn = end_pfn;
David S. Millerd1112012006-03-08 02:16:07 -08001349 min_low_pfn = (phys_base >> PAGE_SHIFT);
1350
David S. Miller919ee672008-04-23 05:40:25 -07001351 if (bootmem_init_numa() < 0)
1352 bootmem_init_nonnuma();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353
David S. Miller625d6932012-04-25 13:13:43 -07001354 /* Dump memblock with node info. */
1355 memblock_dump_all();
1356
David S. Miller919ee672008-04-23 05:40:25 -07001357 /* XXX cpu notifier XXX */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358
David S. Miller625d6932012-04-25 13:13:43 -07001359 sparse_memory_present_with_active_regions(MAX_NUMNODES);
David S. Millerd1112012006-03-08 02:16:07 -08001360 sparse_init();
1361
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 return end_pfn;
1363}
1364
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001365static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1366static int pall_ents __initdata;
1367
David S. Miller56425302005-09-25 16:46:57 -07001368#ifdef CONFIG_DEBUG_PAGEALLOC
Sam Ravnborg896aef42008-02-24 19:49:52 -08001369static unsigned long __ref kernel_map_range(unsigned long pstart,
1370 unsigned long pend, pgprot_t prot)
David S. Miller56425302005-09-25 16:46:57 -07001371{
1372 unsigned long vstart = PAGE_OFFSET + pstart;
1373 unsigned long vend = PAGE_OFFSET + pend;
1374 unsigned long alloc_bytes = 0UL;
1375
1376 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
David S. Miller13edad72005-09-29 17:58:26 -07001377 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
David S. Miller56425302005-09-25 16:46:57 -07001378 vstart, vend);
1379 prom_halt();
1380 }
1381
1382 while (vstart < vend) {
1383 unsigned long this_end, paddr = __pa(vstart);
1384 pgd_t *pgd = pgd_offset_k(vstart);
1385 pud_t *pud;
1386 pmd_t *pmd;
1387 pte_t *pte;
1388
1389 pud = pud_offset(pgd, vstart);
1390 if (pud_none(*pud)) {
1391 pmd_t *new;
1392
1393 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1394 alloc_bytes += PAGE_SIZE;
1395 pud_populate(&init_mm, pud, new);
1396 }
1397
1398 pmd = pmd_offset(pud, vstart);
1399 if (!pmd_present(*pmd)) {
1400 pte_t *new;
1401
1402 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1403 alloc_bytes += PAGE_SIZE;
1404 pmd_populate_kernel(&init_mm, pmd, new);
1405 }
1406
1407 pte = pte_offset_kernel(pmd, vstart);
1408 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1409 if (this_end > vend)
1410 this_end = vend;
1411
1412 while (vstart < this_end) {
1413 pte_val(*pte) = (paddr | pgprot_val(prot));
1414
1415 vstart += PAGE_SIZE;
1416 paddr += PAGE_SIZE;
1417 pte++;
1418 }
1419 }
1420
1421 return alloc_bytes;
1422}
1423
David S. Miller56425302005-09-25 16:46:57 -07001424extern unsigned int kvmap_linear_patch[1];
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001425#endif /* CONFIG_DEBUG_PAGEALLOC */
1426
David S. Miller4f93d212012-09-06 18:13:58 -07001427static void __init kpte_set_val(unsigned long index, unsigned long val)
1428{
1429 unsigned long *ptr = kpte_linear_bitmap;
1430
1431 val <<= ((index % (BITS_PER_LONG / 2)) * 2);
1432 ptr += (index / (BITS_PER_LONG / 2));
1433
1434 *ptr |= val;
1435}
1436
1437static const unsigned long kpte_shift_min = 28; /* 256MB */
1438static const unsigned long kpte_shift_max = 34; /* 16GB */
1439static const unsigned long kpte_shift_incr = 3;
1440
1441static unsigned long kpte_mark_using_shift(unsigned long start, unsigned long end,
1442 unsigned long shift)
1443{
1444 unsigned long size = (1UL << shift);
1445 unsigned long mask = (size - 1UL);
1446 unsigned long remains = end - start;
1447 unsigned long val;
1448
1449 if (remains < size || (start & mask))
1450 return start;
1451
1452 /* VAL maps:
1453 *
1454 * shift 28 --> kern_linear_pte_xor index 1
1455 * shift 31 --> kern_linear_pte_xor index 2
1456 * shift 34 --> kern_linear_pte_xor index 3
1457 */
1458 val = ((shift - kpte_shift_min) / kpte_shift_incr) + 1;
1459
1460 remains &= ~mask;
1461 if (shift != kpte_shift_max)
1462 remains = size;
1463
1464 while (remains) {
1465 unsigned long index = start >> kpte_shift_min;
1466
1467 kpte_set_val(index, val);
1468
1469 start += 1UL << kpte_shift_min;
1470 remains -= 1UL << kpte_shift_min;
1471 }
1472
1473 return start;
1474}
1475
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001476static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
1477{
David S. Miller4f93d212012-09-06 18:13:58 -07001478 unsigned long smallest_size, smallest_mask;
1479 unsigned long s;
1480
1481 smallest_size = (1UL << kpte_shift_min);
1482 smallest_mask = (smallest_size - 1UL);
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001483
1484 while (start < end) {
David S. Miller4f93d212012-09-06 18:13:58 -07001485 unsigned long orig_start = start;
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001486
David S. Miller4f93d212012-09-06 18:13:58 -07001487 for (s = kpte_shift_max; s >= kpte_shift_min; s -= kpte_shift_incr) {
1488 start = kpte_mark_using_shift(start, end, s);
David S. Millerf7c00332006-03-05 22:18:50 -08001489
David S. Miller4f93d212012-09-06 18:13:58 -07001490 if (start != orig_start)
1491 break;
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001492 }
1493
David S. Miller4f93d212012-09-06 18:13:58 -07001494 if (start == orig_start)
1495 start = (start + smallest_size) & ~smallest_mask;
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001496 }
1497}
David S. Miller56425302005-09-25 16:46:57 -07001498
David S. Miller8f3614532007-12-13 06:13:38 -08001499static void __init init_kpte_bitmap(void)
David S. Miller56425302005-09-25 16:46:57 -07001500{
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001501 unsigned long i;
David S. Miller13edad72005-09-29 17:58:26 -07001502
1503 for (i = 0; i < pall_ents; i++) {
David S. Miller56425302005-09-25 16:46:57 -07001504 unsigned long phys_start, phys_end;
1505
David S. Miller13edad72005-09-29 17:58:26 -07001506 phys_start = pall[i].phys_addr;
1507 phys_end = phys_start + pall[i].reg_size;
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001508
1509 mark_kpte_bitmap(phys_start, phys_end);
David S. Miller8f3614532007-12-13 06:13:38 -08001510 }
1511}
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001512
David S. Miller8f3614532007-12-13 06:13:38 -08001513static void __init kernel_physical_mapping_init(void)
1514{
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001515#ifdef CONFIG_DEBUG_PAGEALLOC
David S. Miller8f3614532007-12-13 06:13:38 -08001516 unsigned long i, mem_alloced = 0UL;
1517
1518 for (i = 0; i < pall_ents; i++) {
1519 unsigned long phys_start, phys_end;
1520
1521 phys_start = pall[i].phys_addr;
1522 phys_end = phys_start + pall[i].reg_size;
1523
David S. Miller56425302005-09-25 16:46:57 -07001524 mem_alloced += kernel_map_range(phys_start, phys_end,
1525 PAGE_KERNEL);
David S. Miller56425302005-09-25 16:46:57 -07001526 }
1527
1528 printk("Allocated %ld bytes for kernel page tables.\n",
1529 mem_alloced);
1530
1531 kvmap_linear_patch[0] = 0x01000000; /* nop */
1532 flushi(&kvmap_linear_patch[0]);
1533
1534 __flush_tlb_all();
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001535#endif
David S. Miller56425302005-09-25 16:46:57 -07001536}
1537
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001538#ifdef CONFIG_DEBUG_PAGEALLOC
David S. Miller56425302005-09-25 16:46:57 -07001539void kernel_map_pages(struct page *page, int numpages, int enable)
1540{
1541 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1542 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1543
1544 kernel_map_range(phys_start, phys_end,
1545 (enable ? PAGE_KERNEL : __pgprot(0)));
1546
David S. Miller74bf4312006-01-31 18:29:18 -08001547 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1548 PAGE_OFFSET + phys_end);
1549
David S. Miller56425302005-09-25 16:46:57 -07001550 /* we should perform an IPI and flush all tlbs,
1551 * but that can deadlock->flush only current cpu.
1552 */
1553 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1554 PAGE_OFFSET + phys_end);
1555}
1556#endif
1557
David S. Miller10147572005-09-28 21:46:43 -07001558unsigned long __init find_ecache_flush_span(unsigned long size)
1559{
David S. Miller13edad72005-09-29 17:58:26 -07001560 int i;
David S. Miller10147572005-09-28 21:46:43 -07001561
David S. Miller13edad72005-09-29 17:58:26 -07001562 for (i = 0; i < pavail_ents; i++) {
1563 if (pavail[i].reg_size >= size)
1564 return pavail[i].phys_addr;
David S. Miller10147572005-09-28 21:46:43 -07001565 }
1566
1567 return ~0UL;
1568}
1569
David S. Millerb2d43832013-09-20 21:50:41 -07001570unsigned long PAGE_OFFSET;
1571EXPORT_SYMBOL(PAGE_OFFSET);
1572
1573static void __init page_offset_shift_patch_one(unsigned int *insn, unsigned long phys_bits)
1574{
1575 unsigned long final_shift;
1576 unsigned int val = *insn;
1577 unsigned int cnt;
1578
1579 /* We are patching in ilog2(max_supported_phys_address), and
1580 * we are doing so in a manner similar to a relocation addend.
1581 * That is, we are adding the shift value to whatever value
1582 * is in the shift instruction count field already.
1583 */
1584 cnt = (val & 0x3f);
1585 val &= ~0x3f;
1586
1587 /* If we are trying to shift >= 64 bits, clear the destination
1588 * register. This can happen when phys_bits ends up being equal
1589 * to MAX_PHYS_ADDRESS_BITS.
1590 */
1591 final_shift = (cnt + (64 - phys_bits));
1592 if (final_shift >= 64) {
1593 unsigned int rd = (val >> 25) & 0x1f;
1594
1595 val = 0x80100000 | (rd << 25);
1596 } else {
1597 val |= final_shift;
1598 }
1599 *insn = val;
1600
1601 __asm__ __volatile__("flush %0"
1602 : /* no outputs */
1603 : "r" (insn));
1604}
1605
1606static void __init page_offset_shift_patch(unsigned long phys_bits)
1607{
1608 extern unsigned int __page_offset_shift_patch;
1609 extern unsigned int __page_offset_shift_patch_end;
1610 unsigned int *p;
1611
1612 p = &__page_offset_shift_patch;
1613 while (p < &__page_offset_shift_patch_end) {
1614 unsigned int *insn = (unsigned int *)(unsigned long)*p;
1615
1616 page_offset_shift_patch_one(insn, phys_bits);
1617
1618 p++;
1619 }
1620}
1621
1622static void __init setup_page_offset(void)
1623{
1624 unsigned long max_phys_bits = 40;
1625
1626 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1627 max_phys_bits = 42;
1628 } else if (tlb_type == hypervisor) {
1629 switch (sun4v_chip_type) {
1630 case SUN4V_CHIP_NIAGARA1:
1631 case SUN4V_CHIP_NIAGARA2:
1632 max_phys_bits = 39;
1633 break;
1634 case SUN4V_CHIP_NIAGARA3:
1635 max_phys_bits = 43;
1636 break;
1637 case SUN4V_CHIP_NIAGARA4:
1638 case SUN4V_CHIP_NIAGARA5:
1639 case SUN4V_CHIP_SPARC64X:
1640 default:
1641 max_phys_bits = 47;
1642 break;
1643 }
1644 }
1645
1646 if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) {
1647 prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n",
1648 max_phys_bits);
1649 prom_halt();
1650 }
1651
1652 PAGE_OFFSET = PAGE_OFFSET_BY_BITS(max_phys_bits);
1653
1654 pr_info("PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
1655 PAGE_OFFSET, max_phys_bits);
1656
1657 page_offset_shift_patch(max_phys_bits);
1658}
1659
David S. Miller517af332006-02-01 15:55:21 -08001660static void __init tsb_phys_patch(void)
1661{
David S. Millerd257d5d2006-02-06 23:44:37 -08001662 struct tsb_ldquad_phys_patch_entry *pquad;
David S. Miller517af332006-02-01 15:55:21 -08001663 struct tsb_phys_patch_entry *p;
1664
David S. Millerd257d5d2006-02-06 23:44:37 -08001665 pquad = &__tsb_ldquad_phys_patch;
1666 while (pquad < &__tsb_ldquad_phys_patch_end) {
1667 unsigned long addr = pquad->addr;
1668
1669 if (tlb_type == hypervisor)
1670 *(unsigned int *) addr = pquad->sun4v_insn;
1671 else
1672 *(unsigned int *) addr = pquad->sun4u_insn;
1673 wmb();
1674 __asm__ __volatile__("flush %0"
1675 : /* no outputs */
1676 : "r" (addr));
1677
1678 pquad++;
1679 }
1680
David S. Miller517af332006-02-01 15:55:21 -08001681 p = &__tsb_phys_patch;
1682 while (p < &__tsb_phys_patch_end) {
1683 unsigned long addr = p->addr;
1684
1685 *(unsigned int *) addr = p->insn;
1686 wmb();
1687 __asm__ __volatile__("flush %0"
1688 : /* no outputs */
1689 : "r" (addr));
1690
1691 p++;
1692 }
1693}
1694
David S. Miller490384e2006-02-11 14:41:18 -08001695/* Don't mark as init, we give this to the Hypervisor. */
David S. Millerd1acb422007-03-16 17:20:28 -07001696#ifndef CONFIG_DEBUG_PAGEALLOC
1697#define NUM_KTSB_DESCR 2
1698#else
1699#define NUM_KTSB_DESCR 1
1700#endif
1701static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
David S. Miller490384e2006-02-11 14:41:18 -08001702extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
1703
David S. Miller9076d0e2011-08-05 00:53:57 -07001704static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
1705{
1706 pa >>= KTSB_PHYS_SHIFT;
1707
1708 while (start < end) {
1709 unsigned int *ia = (unsigned int *)(unsigned long)*start;
1710
1711 ia[0] = (ia[0] & ~0x3fffff) | (pa >> 10);
1712 __asm__ __volatile__("flush %0" : : "r" (ia));
1713
1714 ia[1] = (ia[1] & ~0x3ff) | (pa & 0x3ff);
1715 __asm__ __volatile__("flush %0" : : "r" (ia + 1));
1716
1717 start++;
1718 }
1719}
1720
1721static void ktsb_phys_patch(void)
1722{
1723 extern unsigned int __swapper_tsb_phys_patch;
1724 extern unsigned int __swapper_tsb_phys_patch_end;
David S. Miller9076d0e2011-08-05 00:53:57 -07001725 unsigned long ktsb_pa;
1726
1727 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1728 patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
1729 &__swapper_tsb_phys_patch_end, ktsb_pa);
1730#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Miller0785a8e2011-08-06 05:26:35 -07001731 {
1732 extern unsigned int __swapper_4m_tsb_phys_patch;
1733 extern unsigned int __swapper_4m_tsb_phys_patch_end;
David S. Miller9076d0e2011-08-05 00:53:57 -07001734 ktsb_pa = (kern_base +
1735 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1736 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
1737 &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
David S. Miller0785a8e2011-08-06 05:26:35 -07001738 }
David S. Miller9076d0e2011-08-05 00:53:57 -07001739#endif
1740}
1741
David S. Miller490384e2006-02-11 14:41:18 -08001742static void __init sun4v_ktsb_init(void)
1743{
1744 unsigned long ktsb_pa;
1745
David S. Millerd7744a02006-02-21 22:31:11 -08001746 /* First KTSB for PAGE_SIZE mappings. */
David S. Miller490384e2006-02-11 14:41:18 -08001747 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1748
1749 switch (PAGE_SIZE) {
1750 case 8 * 1024:
1751 default:
1752 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
1753 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
1754 break;
1755
1756 case 64 * 1024:
1757 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
1758 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
1759 break;
1760
1761 case 512 * 1024:
1762 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
1763 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
1764 break;
1765
1766 case 4 * 1024 * 1024:
1767 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
1768 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
1769 break;
Joe Perches6cb79b32011-06-03 14:45:23 +00001770 }
David S. Miller490384e2006-02-11 14:41:18 -08001771
David S. Miller3f19a842006-02-17 12:03:20 -08001772 ktsb_descr[0].assoc = 1;
David S. Miller490384e2006-02-11 14:41:18 -08001773 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
1774 ktsb_descr[0].ctx_idx = 0;
1775 ktsb_descr[0].tsb_base = ktsb_pa;
1776 ktsb_descr[0].resv = 0;
1777
David S. Millerd1acb422007-03-16 17:20:28 -07001778#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Miller4f93d212012-09-06 18:13:58 -07001779 /* Second KTSB for 4MB/256MB/2GB/16GB mappings. */
David S. Millerd7744a02006-02-21 22:31:11 -08001780 ktsb_pa = (kern_base +
1781 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1782
1783 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
David S. Millerc69ad0a2012-09-06 20:35:36 -07001784 ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB |
1785 HV_PGSZ_MASK_256MB |
1786 HV_PGSZ_MASK_2GB |
1787 HV_PGSZ_MASK_16GB) &
1788 cpu_pgsz_mask);
David S. Millerd7744a02006-02-21 22:31:11 -08001789 ktsb_descr[1].assoc = 1;
1790 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
1791 ktsb_descr[1].ctx_idx = 0;
1792 ktsb_descr[1].tsb_base = ktsb_pa;
1793 ktsb_descr[1].resv = 0;
David S. Millerd1acb422007-03-16 17:20:28 -07001794#endif
David S. Miller490384e2006-02-11 14:41:18 -08001795}
1796
Paul Gortmaker2066aad2013-06-17 15:43:14 -04001797void sun4v_ktsb_register(void)
David S. Miller490384e2006-02-11 14:41:18 -08001798{
David S. Miller7db35f32007-05-29 02:22:14 -07001799 unsigned long pa, ret;
David S. Miller490384e2006-02-11 14:41:18 -08001800
1801 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
1802
David S. Miller7db35f32007-05-29 02:22:14 -07001803 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
1804 if (ret != 0) {
1805 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
1806 "errors with %lx\n", pa, ret);
1807 prom_halt();
1808 }
David S. Miller490384e2006-02-11 14:41:18 -08001809}
1810
David S. Millerc69ad0a2012-09-06 20:35:36 -07001811static void __init sun4u_linear_pte_xor_finalize(void)
1812{
1813#ifndef CONFIG_DEBUG_PAGEALLOC
1814 /* This is where we would add Panther support for
1815 * 32MB and 256MB pages.
1816 */
1817#endif
1818}
1819
1820static void __init sun4v_linear_pte_xor_finalize(void)
1821{
1822#ifndef CONFIG_DEBUG_PAGEALLOC
1823 if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
1824 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
David S. Miller922631b2013-09-18 12:00:00 -07001825 PAGE_OFFSET;
David S. Millerc69ad0a2012-09-06 20:35:36 -07001826 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1827 _PAGE_P_4V | _PAGE_W_4V);
1828 } else {
1829 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
1830 }
1831
1832 if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
1833 kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
David S. Miller922631b2013-09-18 12:00:00 -07001834 PAGE_OFFSET;
David S. Millerc69ad0a2012-09-06 20:35:36 -07001835 kern_linear_pte_xor[2] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1836 _PAGE_P_4V | _PAGE_W_4V);
1837 } else {
1838 kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
1839 }
1840
1841 if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
1842 kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
David S. Miller922631b2013-09-18 12:00:00 -07001843 PAGE_OFFSET;
David S. Millerc69ad0a2012-09-06 20:35:36 -07001844 kern_linear_pte_xor[3] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1845 _PAGE_P_4V | _PAGE_W_4V);
1846 } else {
1847 kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
1848 }
1849#endif
1850}
1851
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852/* paging_init() sets up the page tables */
1853
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854static unsigned long last_valid_pfn;
David S. Miller2b779332013-09-25 14:33:16 -07001855pgd_t swapper_pg_dir[PTRS_PER_PGD];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856
David S. Millerc4bce902006-02-11 21:57:54 -08001857static void sun4u_pgprot_init(void);
1858static void sun4v_pgprot_init(void);
1859
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860void __init paging_init(void)
1861{
David S. Miller919ee672008-04-23 05:40:25 -07001862 unsigned long end_pfn, shift, phys_base;
David S. Miller0836a0e2005-09-28 21:38:08 -07001863 unsigned long real_end, i;
Paul Gortmakeraa6f0792012-05-09 20:44:29 -04001864 int node;
David S. Miller0836a0e2005-09-28 21:38:08 -07001865
David S. Millerb2d43832013-09-20 21:50:41 -07001866 setup_page_offset();
1867
David S. Miller22adb352007-05-26 01:14:43 -07001868 /* These build time checkes make sure that the dcache_dirty_cpu()
1869 * page->flags usage will work.
1870 *
1871 * When a page gets marked as dcache-dirty, we store the
1872 * cpu number starting at bit 32 in the page->flags. Also,
1873 * functions like clear_dcache_dirty_cpu use the cpu mask
1874 * in 13-bit signed-immediate instruction fields.
1875 */
Christoph Lameter9223b4192008-04-28 02:12:48 -07001876
1877 /*
1878 * Page flags must not reach into upper 32 bits that are used
1879 * for the cpu number
1880 */
1881 BUILD_BUG_ON(NR_PAGEFLAGS > 32);
1882
1883 /*
1884 * The bit fields placed in the high range must not reach below
1885 * the 32 bit boundary. Otherwise we cannot place the cpu field
1886 * at the 32 bit boundary.
1887 */
David S. Miller22adb352007-05-26 01:14:43 -07001888 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
Christoph Lameter9223b4192008-04-28 02:12:48 -07001889 ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
1890
David S. Miller22adb352007-05-26 01:14:43 -07001891 BUILD_BUG_ON(NR_CPUS > 4096);
1892
David S. Miller0eef3312014-05-03 22:52:50 -07001893 kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
David S. Miller481295f2006-02-07 21:51:08 -08001894 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1895
David S. Millerd7744a02006-02-21 22:31:11 -08001896 /* Invalidate both kernel TSBs. */
David S. Miller8b234272006-02-17 18:01:02 -08001897 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
David S. Millerd1acb422007-03-16 17:20:28 -07001898#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Millerd7744a02006-02-21 22:31:11 -08001899 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
David S. Millerd1acb422007-03-16 17:20:28 -07001900#endif
David S. Miller8b234272006-02-17 18:01:02 -08001901
David S. Millerc4bce902006-02-11 21:57:54 -08001902 if (tlb_type == hypervisor)
1903 sun4v_pgprot_init();
1904 else
1905 sun4u_pgprot_init();
1906
David S. Millerd257d5d2006-02-06 23:44:37 -08001907 if (tlb_type == cheetah_plus ||
David S. Miller9076d0e2011-08-05 00:53:57 -07001908 tlb_type == hypervisor) {
David S. Miller517af332006-02-01 15:55:21 -08001909 tsb_phys_patch();
David S. Miller9076d0e2011-08-05 00:53:57 -07001910 ktsb_phys_patch();
1911 }
David S. Miller517af332006-02-01 15:55:21 -08001912
David S. Millerc69ad0a2012-09-06 20:35:36 -07001913 if (tlb_type == hypervisor)
David S. Millerd257d5d2006-02-06 23:44:37 -08001914 sun4v_patch_tlb_handlers();
1915
David S. Millera94a1722008-05-11 21:04:48 -07001916 /* Find available physical memory...
1917 *
1918 * Read it twice in order to work around a bug in openfirmware.
1919 * The call to grab this table itself can cause openfirmware to
1920 * allocate memory, which in turn can take away some space from
1921 * the list of available memory. Reading it twice makes sure
1922 * we really do get the final value.
1923 */
1924 read_obp_translations();
1925 read_obp_memory("reg", &pall[0], &pall_ents);
1926 read_obp_memory("available", &pavail[0], &pavail_ents);
David S. Miller13edad72005-09-29 17:58:26 -07001927 read_obp_memory("available", &pavail[0], &pavail_ents);
David S. Miller0836a0e2005-09-28 21:38:08 -07001928
1929 phys_base = 0xffffffffffffffffUL;
David S. Miller3b2a7e22008-02-13 18:13:20 -08001930 for (i = 0; i < pavail_ents; i++) {
David S. Miller13edad72005-09-29 17:58:26 -07001931 phys_base = min(phys_base, pavail[i].phys_addr);
Yinghai Lu95f72d12010-07-12 14:36:09 +10001932 memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
David S. Miller3b2a7e22008-02-13 18:13:20 -08001933 }
1934
Yinghai Lu95f72d12010-07-12 14:36:09 +10001935 memblock_reserve(kern_base, kern_size);
David S. Miller0836a0e2005-09-28 21:38:08 -07001936
David S. Miller4e82c9a2008-02-13 18:00:03 -08001937 find_ramdisk(phys_base);
1938
Yinghai Lu95f72d12010-07-12 14:36:09 +10001939 memblock_enforce_memory_limit(cmdline_memory_size);
David S. Miller25b0c652008-02-13 18:20:14 -08001940
Tejun Heo1aadc052011-12-08 10:22:08 -08001941 memblock_allow_resize();
Yinghai Lu95f72d12010-07-12 14:36:09 +10001942 memblock_dump_all();
David S. Miller3b2a7e22008-02-13 18:13:20 -08001943
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 set_bit(0, mmu_context_bmap);
1945
David S. Miller2bdb3cb2005-09-22 01:08:57 -07001946 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
1947
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 real_end = (unsigned long)_end;
David S. Miller0eef3312014-05-03 22:52:50 -07001949 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
David S. Miller64658742008-03-21 17:01:38 -07001950 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
1951 num_kernel_image_mappings);
David S. Miller2bdb3cb2005-09-22 01:08:57 -07001952
1953 /* Set kernel pgd to upper alias so physical page computations
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954 * work.
1955 */
1956 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
1957
David S. Miller56425302005-09-25 16:46:57 -07001958 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959
1960 /* Now can init the kernel/bad page tables. */
1961 pud_set(pud_offset(&swapper_pg_dir[0], 0),
David S. Miller56425302005-09-25 16:46:57 -07001962 swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963
David S. Millerc9c10832005-10-12 12:22:46 -07001964 inherit_prom_mappings();
David S. Miller5085b4a2005-09-22 00:45:41 -07001965
David S. Miller8f3614532007-12-13 06:13:38 -08001966 init_kpte_bitmap();
1967
David S. Millera8b900d2006-01-31 18:33:37 -08001968 /* Ok, we can use our TLB miss and window trap handlers safely. */
1969 setup_tba();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970
David S. Millerc9c10832005-10-12 12:22:46 -07001971 __flush_tlb_all();
David S. Miller9ad98c52005-10-05 15:12:00 -07001972
David S. Millerad072002008-02-13 19:21:51 -08001973 prom_build_devicetree();
David S. Millerb696fdc2009-05-26 22:37:25 -07001974 of_populate_present_mask();
David S. Millerb99c6eb2009-06-18 01:44:19 -07001975#ifndef CONFIG_SMP
1976 of_fill_in_cpu_data();
1977#endif
David S. Millerad072002008-02-13 19:21:51 -08001978
David S. Miller890db402009-04-01 03:13:15 -07001979 if (tlb_type == hypervisor) {
David S. Miller4a283332008-02-13 19:22:23 -08001980 sun4v_mdesc_init();
Stephen Rothwell6ac5c612009-06-15 03:06:18 -07001981 mdesc_populate_present_mask(cpu_all_mask);
David S. Millerb99c6eb2009-06-18 01:44:19 -07001982#ifndef CONFIG_SMP
1983 mdesc_fill_in_cpu_data(cpu_all_mask);
1984#endif
David S. Millerce33fdc2012-09-06 19:01:25 -07001985 mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask);
David S. Millerc69ad0a2012-09-06 20:35:36 -07001986
1987 sun4v_linear_pte_xor_finalize();
1988
1989 sun4v_ktsb_init();
1990 sun4v_ktsb_register();
David S. Millerce33fdc2012-09-06 19:01:25 -07001991 } else {
1992 unsigned long impl, ver;
1993
1994 cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
1995 HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
1996
1997 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
1998 impl = ((ver >> 32) & 0xffff);
1999 if (impl == PANTHER_IMPL)
2000 cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB |
2001 HV_PGSZ_MASK_256MB);
David S. Millerc69ad0a2012-09-06 20:35:36 -07002002
2003 sun4u_linear_pte_xor_finalize();
David S. Miller890db402009-04-01 03:13:15 -07002004 }
David S. Miller4a283332008-02-13 19:22:23 -08002005
David S. Millerc69ad0a2012-09-06 20:35:36 -07002006 /* Flush the TLBs and the 4M TSB so that the updated linear
2007 * pte XOR settings are realized for all mappings.
2008 */
2009 __flush_tlb_all();
2010#ifndef CONFIG_DEBUG_PAGEALLOC
2011 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2012#endif
2013 __flush_tlb_all();
2014
David S. Miller2bdb3cb2005-09-22 01:08:57 -07002015 /* Setup bootmem... */
David S. Miller919ee672008-04-23 05:40:25 -07002016 last_valid_pfn = end_pfn = bootmem_init(phys_base);
David S. Millerd1112012006-03-08 02:16:07 -08002017
David S. Miller5ed56f12012-04-26 20:50:34 -07002018 /* Once the OF device tree and MDESC have been setup, we know
2019 * the list of possible cpus. Therefore we can allocate the
2020 * IRQ stacks.
2021 */
2022 for_each_possible_cpu(i) {
Paul Gortmakeraa6f0792012-05-09 20:44:29 -04002023 node = cpu_to_node(i);
David S. Miller5ed56f12012-04-26 20:50:34 -07002024
2025 softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
2026 THREAD_SIZE,
2027 THREAD_SIZE, 0);
2028 hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
2029 THREAD_SIZE,
2030 THREAD_SIZE, 0);
2031 }
2032
David S. Miller56425302005-09-25 16:46:57 -07002033 kernel_physical_mapping_init();
David S. Miller56425302005-09-25 16:46:57 -07002034
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 {
David S. Miller919ee672008-04-23 05:40:25 -07002036 unsigned long max_zone_pfns[MAX_NR_ZONES];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037
David S. Miller919ee672008-04-23 05:40:25 -07002038 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039
David S. Miller919ee672008-04-23 05:40:25 -07002040 max_zone_pfns[ZONE_NORMAL] = end_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041
David S. Miller919ee672008-04-23 05:40:25 -07002042 free_area_init_nodes(max_zone_pfns);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043 }
2044
David S. Miller3c62a2d2008-02-17 23:22:50 -08002045 printk("Booting Linux...\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046}
2047
Greg Kroah-Hartman7c9503b2012-12-21 14:03:26 -08002048int page_in_phys_avail(unsigned long paddr)
David S. Miller919ee672008-04-23 05:40:25 -07002049{
2050 int i;
2051
2052 paddr &= PAGE_MASK;
2053
2054 for (i = 0; i < pavail_ents; i++) {
2055 unsigned long start, end;
2056
2057 start = pavail[i].phys_addr;
2058 end = start + pavail[i].reg_size;
2059
2060 if (paddr >= start && paddr < end)
2061 return 1;
2062 }
2063 if (paddr >= kern_base && paddr < (kern_base + kern_size))
2064 return 1;
2065#ifdef CONFIG_BLK_DEV_INITRD
2066 if (paddr >= __pa(initrd_start) &&
2067 paddr < __pa(PAGE_ALIGN(initrd_end)))
2068 return 1;
2069#endif
2070
2071 return 0;
2072}
2073
2074static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
2075static int pavail_rescan_ents __initdata;
2076
2077/* Certain OBP calls, such as fetching "available" properties, can
2078 * claim physical memory. So, along with initializing the valid
2079 * address bitmap, what we do here is refetch the physical available
2080 * memory list again, and make sure it provides at least as much
2081 * memory as 'pavail' does.
2082 */
David S. Millerd8ed1d42009-08-25 16:47:46 -07002083static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 int i;
2086
David S. Miller13edad72005-09-29 17:58:26 -07002087 read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088
David S. Miller13edad72005-09-29 17:58:26 -07002089 for (i = 0; i < pavail_ents; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090 unsigned long old_start, old_end;
2091
David S. Miller13edad72005-09-29 17:58:26 -07002092 old_start = pavail[i].phys_addr;
David S. Miller919ee672008-04-23 05:40:25 -07002093 old_end = old_start + pavail[i].reg_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 while (old_start < old_end) {
2095 int n;
2096
David S. Millerc2a5a462006-06-22 00:01:56 -07002097 for (n = 0; n < pavail_rescan_ents; n++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098 unsigned long new_start, new_end;
2099
David S. Miller13edad72005-09-29 17:58:26 -07002100 new_start = pavail_rescan[n].phys_addr;
2101 new_end = new_start +
2102 pavail_rescan[n].reg_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103
2104 if (new_start <= old_start &&
2105 new_end >= (old_start + PAGE_SIZE)) {
David S. Miller0eef3312014-05-03 22:52:50 -07002106 set_bit(old_start >> ILOG2_4MB, bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 goto do_next_page;
2108 }
2109 }
David S. Miller919ee672008-04-23 05:40:25 -07002110
2111 prom_printf("mem_init: Lost memory in pavail\n");
2112 prom_printf("mem_init: OLD start[%lx] size[%lx]\n",
2113 pavail[i].phys_addr,
2114 pavail[i].reg_size);
2115 prom_printf("mem_init: NEW start[%lx] size[%lx]\n",
2116 pavail_rescan[i].phys_addr,
2117 pavail_rescan[i].reg_size);
2118 prom_printf("mem_init: Cannot continue, aborting.\n");
2119 prom_halt();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120
2121 do_next_page:
2122 old_start += PAGE_SIZE;
2123 }
2124 }
2125}
2126
David S. Millerd8ed1d42009-08-25 16:47:46 -07002127static void __init patch_tlb_miss_handler_bitmap(void)
2128{
2129 extern unsigned int valid_addr_bitmap_insn[];
2130 extern unsigned int valid_addr_bitmap_patch[];
2131
2132 valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1];
2133 mb();
2134 valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0];
2135 flushi(&valid_addr_bitmap_insn[0]);
2136}
2137
Yinghai Lu961f8fa2012-11-16 19:39:21 -08002138static void __init register_page_bootmem_info(void)
2139{
2140#ifdef CONFIG_NEED_MULTIPLE_NODES
2141 int i;
2142
2143 for_each_online_node(i)
2144 if (NODE_DATA(i)->node_spanned_pages)
2145 register_page_bootmem_info_node(NODE_DATA(i));
2146#endif
2147}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148void __init mem_init(void)
2149{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 unsigned long addr, last;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151
2152 addr = PAGE_OFFSET + kern_base;
2153 last = PAGE_ALIGN(kern_size) + addr;
2154 while (addr < last) {
David S. Miller0eef3312014-05-03 22:52:50 -07002155 set_bit(__pa(addr) >> ILOG2_4MB, sparc64_valid_addr_bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 addr += PAGE_SIZE;
2157 }
2158
David S. Millerd8ed1d42009-08-25 16:47:46 -07002159 setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap);
2160 patch_tlb_miss_handler_bitmap();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
2163
Yinghai Lu961f8fa2012-11-16 19:39:21 -08002164 register_page_bootmem_info();
Jiang Liu0c988532013-07-03 15:03:24 -07002165 free_all_bootmem();
David S. Miller919ee672008-04-23 05:40:25 -07002166
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 /*
2168 * Set up the zero page, mark it reserved, so that page count
2169 * is not manipulated when freeing the page from user ptes.
2170 */
2171 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
2172 if (mem_map_zero == NULL) {
2173 prom_printf("paging_init: Cannot alloc zero page.\n");
2174 prom_halt();
2175 }
Jiang Liu70affe42013-05-07 16:18:08 -07002176 mark_page_reserved(mem_map_zero);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177
Jiang Liudceccbe2013-07-03 15:04:14 -07002178 mem_init_print_info(NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179
2180 if (tlb_type == cheetah || tlb_type == cheetah_plus)
2181 cheetah_ecache_flush_init();
2182}
2183
David S. Miller898cf0e2005-09-23 11:59:44 -07002184void free_initmem(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185{
2186 unsigned long addr, initend;
David S. Millerf2b60792008-08-14 01:45:41 -07002187 int do_free = 1;
2188
2189 /* If the physical memory maps were trimmed by kernel command
2190 * line options, don't even try freeing this initmem stuff up.
2191 * The kernel image could have been in the trimmed out region
2192 * and if so the freeing below will free invalid page structs.
2193 */
2194 if (cmdline_memory_size)
2195 do_free = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196
2197 /*
2198 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2199 */
2200 addr = PAGE_ALIGN((unsigned long)(__init_begin));
2201 initend = (unsigned long)(__init_end) & PAGE_MASK;
2202 for (; addr < initend; addr += PAGE_SIZE) {
2203 unsigned long page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204
2205 page = (addr +
2206 ((unsigned long) __va(kern_base)) -
2207 ((unsigned long) KERNBASE));
Randy Dunlapc9cf5522006-06-27 02:53:52 -07002208 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209
Jiang Liu70affe42013-05-07 16:18:08 -07002210 if (do_free)
2211 free_reserved_page(virt_to_page(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 }
2213}
2214
2215#ifdef CONFIG_BLK_DEV_INITRD
2216void free_initrd_mem(unsigned long start, unsigned long end)
2217{
Jiang Liudceccbe2013-07-03 15:04:14 -07002218 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
2219 "initrd");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220}
2221#endif
David S. Millerc4bce902006-02-11 21:57:54 -08002222
David S. Millerc4bce902006-02-11 21:57:54 -08002223#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2224#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2225#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2226#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2227#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2228#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2229
2230pgprot_t PAGE_KERNEL __read_mostly;
2231EXPORT_SYMBOL(PAGE_KERNEL);
2232
2233pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
2234pgprot_t PAGE_COPY __read_mostly;
David S. Miller0f159522006-02-18 12:43:16 -08002235
2236pgprot_t PAGE_SHARED __read_mostly;
2237EXPORT_SYMBOL(PAGE_SHARED);
2238
David S. Millerc4bce902006-02-11 21:57:54 -08002239unsigned long pg_iobits __read_mostly;
2240
2241unsigned long _PAGE_IE __read_mostly;
David S. Miller987c74f2006-06-25 01:34:43 -07002242EXPORT_SYMBOL(_PAGE_IE);
David S. Millerb2bef442006-02-23 01:55:55 -08002243
David S. Millerc4bce902006-02-11 21:57:54 -08002244unsigned long _PAGE_E __read_mostly;
David S. Millerb2bef442006-02-23 01:55:55 -08002245EXPORT_SYMBOL(_PAGE_E);
2246
David S. Millerc4bce902006-02-11 21:57:54 -08002247unsigned long _PAGE_CACHE __read_mostly;
David S. Millerb2bef442006-02-23 01:55:55 -08002248EXPORT_SYMBOL(_PAGE_CACHE);
David S. Millerc4bce902006-02-11 21:57:54 -08002249
David Miller46644c22007-10-16 01:24:16 -07002250#ifdef CONFIG_SPARSEMEM_VMEMMAP
David Miller46644c22007-10-16 01:24:16 -07002251unsigned long vmemmap_table[VMEMMAP_SIZE];
2252
David S. Miller2856cc22012-08-15 00:37:29 -07002253static long __meminitdata addr_start, addr_end;
2254static int __meminitdata node_start;
2255
Johannes Weiner0aad8182013-04-29 15:07:50 -07002256int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
2257 int node)
David Miller46644c22007-10-16 01:24:16 -07002258{
David Miller46644c22007-10-16 01:24:16 -07002259 unsigned long phys_start = (vstart - VMEMMAP_BASE);
2260 unsigned long phys_end = (vend - VMEMMAP_BASE);
2261 unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
2262 unsigned long end = VMEMMAP_ALIGN(phys_end);
2263 unsigned long pte_base;
2264
2265 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2266 _PAGE_CP_4U | _PAGE_CV_4U |
2267 _PAGE_P_4U | _PAGE_W_4U);
2268 if (tlb_type == hypervisor)
2269 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2270 _PAGE_CP_4V | _PAGE_CV_4V |
2271 _PAGE_P_4V | _PAGE_W_4V);
2272
2273 for (; addr < end; addr += VMEMMAP_CHUNK) {
2274 unsigned long *vmem_pp =
2275 vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT);
2276 void *block;
2277
2278 if (!(*vmem_pp & _PAGE_VALID)) {
David S. Miller0eef3312014-05-03 22:52:50 -07002279 block = vmemmap_alloc_block(1UL << ILOG2_4MB, node);
David Miller46644c22007-10-16 01:24:16 -07002280 if (!block)
2281 return -ENOMEM;
2282
2283 *vmem_pp = pte_base | __pa(block);
2284
David S. Miller2856cc22012-08-15 00:37:29 -07002285 /* check to see if we have contiguous blocks */
2286 if (addr_end != addr || node_start != node) {
2287 if (addr_start)
2288 printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
2289 addr_start, addr_end-1, node_start);
2290 addr_start = addr;
2291 node_start = node;
2292 }
2293 addr_end = addr + VMEMMAP_CHUNK;
David Miller46644c22007-10-16 01:24:16 -07002294 }
2295 }
2296 return 0;
2297}
David S. Miller2856cc22012-08-15 00:37:29 -07002298
2299void __meminit vmemmap_populate_print_last(void)
2300{
2301 if (addr_start) {
2302 printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
2303 addr_start, addr_end-1, node_start);
2304 addr_start = 0;
2305 addr_end = 0;
2306 node_start = 0;
2307 }
2308}
Yasuaki Ishimatsu46723bf2013-02-22 16:33:00 -08002309
Johannes Weiner0aad8182013-04-29 15:07:50 -07002310void vmemmap_free(unsigned long start, unsigned long end)
Tang Chen01975182013-02-22 16:33:08 -08002311{
2312}
2313
David Miller46644c22007-10-16 01:24:16 -07002314#endif /* CONFIG_SPARSEMEM_VMEMMAP */
2315
David S. Millerc4bce902006-02-11 21:57:54 -08002316static void prot_init_common(unsigned long page_none,
2317 unsigned long page_shared,
2318 unsigned long page_copy,
2319 unsigned long page_readonly,
2320 unsigned long page_exec_bit)
2321{
2322 PAGE_COPY = __pgprot(page_copy);
David S. Miller0f159522006-02-18 12:43:16 -08002323 PAGE_SHARED = __pgprot(page_shared);
David S. Millerc4bce902006-02-11 21:57:54 -08002324
2325 protection_map[0x0] = __pgprot(page_none);
2326 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
2327 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
2328 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
2329 protection_map[0x4] = __pgprot(page_readonly);
2330 protection_map[0x5] = __pgprot(page_readonly);
2331 protection_map[0x6] = __pgprot(page_copy);
2332 protection_map[0x7] = __pgprot(page_copy);
2333 protection_map[0x8] = __pgprot(page_none);
2334 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
2335 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
2336 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
2337 protection_map[0xc] = __pgprot(page_readonly);
2338 protection_map[0xd] = __pgprot(page_readonly);
2339 protection_map[0xe] = __pgprot(page_shared);
2340 protection_map[0xf] = __pgprot(page_shared);
2341}
2342
2343static void __init sun4u_pgprot_init(void)
2344{
2345 unsigned long page_none, page_shared, page_copy, page_readonly;
2346 unsigned long page_exec_bit;
David S. Miller4f93d212012-09-06 18:13:58 -07002347 int i;
David S. Millerc4bce902006-02-11 21:57:54 -08002348
2349 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2350 _PAGE_CACHE_4U | _PAGE_P_4U |
2351 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2352 _PAGE_EXEC_4U);
2353 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2354 _PAGE_CACHE_4U | _PAGE_P_4U |
2355 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2356 _PAGE_EXEC_4U | _PAGE_L_4U);
David S. Millerc4bce902006-02-11 21:57:54 -08002357
2358 _PAGE_IE = _PAGE_IE_4U;
2359 _PAGE_E = _PAGE_E_4U;
2360 _PAGE_CACHE = _PAGE_CACHE_4U;
2361
2362 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
2363 __ACCESS_BITS_4U | _PAGE_E_4U);
2364
David S. Millerd1acb422007-03-16 17:20:28 -07002365#ifdef CONFIG_DEBUG_PAGEALLOC
David S. Miller922631b2013-09-18 12:00:00 -07002366 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
David S. Millerd1acb422007-03-16 17:20:28 -07002367#else
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002368 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
David S. Miller922631b2013-09-18 12:00:00 -07002369 PAGE_OFFSET;
David S. Millerd1acb422007-03-16 17:20:28 -07002370#endif
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002371 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2372 _PAGE_P_4U | _PAGE_W_4U);
2373
David S. Miller4f93d212012-09-06 18:13:58 -07002374 for (i = 1; i < 4; i++)
2375 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
David S. Millerc4bce902006-02-11 21:57:54 -08002376
David S. Millerc4bce902006-02-11 21:57:54 -08002377 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
2378 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
2379 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
2380
2381
2382 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
2383 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2384 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
2385 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2386 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2387 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2388 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2389
2390 page_exec_bit = _PAGE_EXEC_4U;
2391
2392 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2393 page_exec_bit);
2394}
2395
2396static void __init sun4v_pgprot_init(void)
2397{
2398 unsigned long page_none, page_shared, page_copy, page_readonly;
2399 unsigned long page_exec_bit;
David S. Miller4f93d212012-09-06 18:13:58 -07002400 int i;
David S. Millerc4bce902006-02-11 21:57:54 -08002401
2402 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
2403 _PAGE_CACHE_4V | _PAGE_P_4V |
2404 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
2405 _PAGE_EXEC_4V);
2406 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
David S. Millerc4bce902006-02-11 21:57:54 -08002407
2408 _PAGE_IE = _PAGE_IE_4V;
2409 _PAGE_E = _PAGE_E_4V;
2410 _PAGE_CACHE = _PAGE_CACHE_4V;
2411
David S. Millerd1acb422007-03-16 17:20:28 -07002412#ifdef CONFIG_DEBUG_PAGEALLOC
David S. Miller922631b2013-09-18 12:00:00 -07002413 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
David S. Millerd1acb422007-03-16 17:20:28 -07002414#else
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002415 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
David S. Miller922631b2013-09-18 12:00:00 -07002416 PAGE_OFFSET;
David S. Millerd1acb422007-03-16 17:20:28 -07002417#endif
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002418 kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
2419 _PAGE_P_4V | _PAGE_W_4V);
2420
David S. Millerc69ad0a2012-09-06 20:35:36 -07002421 for (i = 1; i < 4; i++)
2422 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
David S. Miller4f93d212012-09-06 18:13:58 -07002423
David S. Millerc4bce902006-02-11 21:57:54 -08002424 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
2425 __ACCESS_BITS_4V | _PAGE_E_4V);
2426
David S. Millerc4bce902006-02-11 21:57:54 -08002427 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
2428 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
2429 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2430 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2431
2432 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
2433 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2434 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
2435 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2436 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2437 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2438 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2439
2440 page_exec_bit = _PAGE_EXEC_4V;
2441
2442 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2443 page_exec_bit);
2444}
2445
2446unsigned long pte_sz_bits(unsigned long sz)
2447{
2448 if (tlb_type == hypervisor) {
2449 switch (sz) {
2450 case 8 * 1024:
2451 default:
2452 return _PAGE_SZ8K_4V;
2453 case 64 * 1024:
2454 return _PAGE_SZ64K_4V;
2455 case 512 * 1024:
2456 return _PAGE_SZ512K_4V;
2457 case 4 * 1024 * 1024:
2458 return _PAGE_SZ4MB_4V;
Joe Perches6cb79b32011-06-03 14:45:23 +00002459 }
David S. Millerc4bce902006-02-11 21:57:54 -08002460 } else {
2461 switch (sz) {
2462 case 8 * 1024:
2463 default:
2464 return _PAGE_SZ8K_4U;
2465 case 64 * 1024:
2466 return _PAGE_SZ64K_4U;
2467 case 512 * 1024:
2468 return _PAGE_SZ512K_4U;
2469 case 4 * 1024 * 1024:
2470 return _PAGE_SZ4MB_4U;
Joe Perches6cb79b32011-06-03 14:45:23 +00002471 }
David S. Millerc4bce902006-02-11 21:57:54 -08002472 }
2473}
2474
2475pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2476{
2477 pte_t pte;
David S. Millercf627152006-02-12 21:10:07 -08002478
2479 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
David S. Millerc4bce902006-02-11 21:57:54 -08002480 pte_val(pte) |= (((unsigned long)space) << 32);
2481 pte_val(pte) |= pte_sz_bits(page_size);
David S. Millercf627152006-02-12 21:10:07 -08002482
David S. Millerc4bce902006-02-11 21:57:54 -08002483 return pte;
2484}
2485
David S. Millerc4bce902006-02-11 21:57:54 -08002486static unsigned long kern_large_tte(unsigned long paddr)
2487{
2488 unsigned long val;
2489
2490 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2491 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
2492 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2493 if (tlb_type == hypervisor)
2494 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2495 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
2496 _PAGE_EXEC_4V | _PAGE_W_4V);
2497
2498 return val | paddr;
2499}
2500
David S. Millerc4bce902006-02-11 21:57:54 -08002501/* If not locked, zap it. */
2502void __flush_tlb_all(void)
2503{
2504 unsigned long pstate;
2505 int i;
2506
2507 __asm__ __volatile__("flushw\n\t"
2508 "rdpr %%pstate, %0\n\t"
2509 "wrpr %0, %1, %%pstate"
2510 : "=r" (pstate)
2511 : "i" (PSTATE_IE));
David S. Miller8f3614532007-12-13 06:13:38 -08002512 if (tlb_type == hypervisor) {
2513 sun4v_mmu_demap_all();
2514 } else if (tlb_type == spitfire) {
David S. Millerc4bce902006-02-11 21:57:54 -08002515 for (i = 0; i < 64; i++) {
2516 /* Spitfire Errata #32 workaround */
2517 /* NOTE: Always runs on spitfire, so no
2518 * cheetah+ page size encodings.
2519 */
2520 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2521 "flush %%g6"
2522 : /* No outputs */
2523 : "r" (0),
2524 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2525
2526 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
2527 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2528 "membar #Sync"
2529 : /* no outputs */
2530 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
2531 spitfire_put_dtlb_data(i, 0x0UL);
2532 }
2533
2534 /* Spitfire Errata #32 workaround */
2535 /* NOTE: Always runs on spitfire, so no
2536 * cheetah+ page size encodings.
2537 */
2538 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2539 "flush %%g6"
2540 : /* No outputs */
2541 : "r" (0),
2542 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2543
2544 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2545 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2546 "membar #Sync"
2547 : /* no outputs */
2548 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
2549 spitfire_put_itlb_data(i, 0x0UL);
2550 }
2551 }
2552 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
2553 cheetah_flush_dtlb_all();
2554 cheetah_flush_itlb_all();
2555 }
2556 __asm__ __volatile__("wrpr %0, 0, %%pstate"
2557 : : "r" (pstate));
2558}
David Millerc460bec2012-10-08 16:34:22 -07002559
David Millerc460bec2012-10-08 16:34:22 -07002560pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
2561 unsigned long address)
2562{
David S. Miller37b3a8f2013-09-25 13:48:49 -07002563 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
2564 __GFP_REPEAT | __GFP_ZERO);
2565 pte_t *pte = NULL;
David Millerc460bec2012-10-08 16:34:22 -07002566
David Millerc460bec2012-10-08 16:34:22 -07002567 if (page)
2568 pte = (pte_t *) page_address(page);
2569
2570 return pte;
2571}
2572
2573pgtable_t pte_alloc_one(struct mm_struct *mm,
2574 unsigned long address)
2575{
David S. Miller37b3a8f2013-09-25 13:48:49 -07002576 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
2577 __GFP_REPEAT | __GFP_ZERO);
Kirill A. Shutemov1ae9ae52013-11-14 14:31:42 -08002578 if (!page)
2579 return NULL;
2580 if (!pgtable_page_ctor(page)) {
2581 free_hot_cold_page(page, 0);
2582 return NULL;
David Millerc460bec2012-10-08 16:34:22 -07002583 }
Kirill A. Shutemov1ae9ae52013-11-14 14:31:42 -08002584 return (pte_t *) page_address(page);
David Millerc460bec2012-10-08 16:34:22 -07002585}
2586
2587void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
2588{
David S. Miller37b3a8f2013-09-25 13:48:49 -07002589 free_page((unsigned long)pte);
David Millerc460bec2012-10-08 16:34:22 -07002590}
2591
2592static void __pte_free(pgtable_t pte)
2593{
2594 struct page *page = virt_to_page(pte);
David S. Miller37b3a8f2013-09-25 13:48:49 -07002595
2596 pgtable_page_dtor(page);
2597 __free_page(page);
David Millerc460bec2012-10-08 16:34:22 -07002598}
2599
2600void pte_free(struct mm_struct *mm, pgtable_t pte)
2601{
2602 __pte_free(pte);
2603}
2604
2605void pgtable_free(void *table, bool is_page)
2606{
2607 if (is_page)
2608 __pte_free(table);
2609 else
2610 kmem_cache_free(pgtable_cache, table);
2611}
David Miller9e695d22012-10-08 16:34:29 -07002612
2613#ifdef CONFIG_TRANSPARENT_HUGEPAGE
David Miller9e695d22012-10-08 16:34:29 -07002614void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
2615 pmd_t *pmd)
2616{
2617 unsigned long pte, flags;
2618 struct mm_struct *mm;
2619 pmd_t entry = *pmd;
David Miller9e695d22012-10-08 16:34:29 -07002620
2621 if (!pmd_large(entry) || !pmd_young(entry))
2622 return;
2623
David S. Millera7b94032013-09-26 13:45:15 -07002624 pte = pmd_val(entry);
David Miller9e695d22012-10-08 16:34:29 -07002625
David S. Miller18f38132014-08-04 16:34:01 -07002626 /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */
2627 if (!(pte & _PAGE_VALID))
2628 return;
2629
David S. Miller37b3a8f2013-09-25 13:48:49 -07002630 /* We are fabricating 8MB pages using 4MB real hw pages. */
2631 pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
David Miller9e695d22012-10-08 16:34:29 -07002632
2633 mm = vma->vm_mm;
2634
2635 spin_lock_irqsave(&mm->context.lock, flags);
2636
2637 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
David S. Miller37b3a8f2013-09-25 13:48:49 -07002638 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
David Miller9e695d22012-10-08 16:34:29 -07002639 addr, pte);
2640
2641 spin_unlock_irqrestore(&mm->context.lock, flags);
2642}
2643#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2644
2645#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
2646static void context_reload(void *__data)
2647{
2648 struct mm_struct *mm = __data;
2649
2650 if (mm == current->mm)
2651 load_secondary_context(mm);
2652}
2653
David S. Miller0fbebed2013-02-19 22:34:10 -08002654void hugetlb_setup(struct pt_regs *regs)
David Miller9e695d22012-10-08 16:34:29 -07002655{
David S. Miller0fbebed2013-02-19 22:34:10 -08002656 struct mm_struct *mm = current->mm;
2657 struct tsb_config *tp;
David Miller9e695d22012-10-08 16:34:29 -07002658
David S. Miller0fbebed2013-02-19 22:34:10 -08002659 if (in_atomic() || !mm) {
2660 const struct exception_table_entry *entry;
David Miller9e695d22012-10-08 16:34:29 -07002661
David S. Miller0fbebed2013-02-19 22:34:10 -08002662 entry = search_exception_tables(regs->tpc);
2663 if (entry) {
2664 regs->tpc = entry->fixup;
2665 regs->tnpc = regs->tpc + 4;
2666 return;
2667 }
2668 pr_alert("Unexpected HugeTLB setup in atomic context.\n");
2669 die_if_kernel("HugeTSB in atomic", regs);
2670 }
2671
2672 tp = &mm->context.tsb_block[MM_TSB_HUGE];
2673 if (likely(tp->tsb == NULL))
2674 tsb_grow(mm, MM_TSB_HUGE, 0);
2675
David Miller9e695d22012-10-08 16:34:29 -07002676 tsb_context_switch(mm);
2677 smp_tsb_sync(mm);
2678
2679 /* On UltraSPARC-III+ and later, configure the second half of
2680 * the Data-TLB for huge pages.
2681 */
2682 if (tlb_type == cheetah_plus) {
2683 unsigned long ctx;
2684
2685 spin_lock(&ctx_alloc_lock);
2686 ctx = mm->context.sparc64_ctx_val;
2687 ctx &= ~CTX_PGSZ_MASK;
2688 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
2689 ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
2690
2691 if (ctx != mm->context.sparc64_ctx_val) {
2692 /* When changing the page size fields, we
2693 * must perform a context flush so that no
2694 * stale entries match. This flush must
2695 * occur with the original context register
2696 * settings.
2697 */
2698 do_flush_tlb_mm(mm);
2699
2700 /* Reload the context register of all processors
2701 * also executing in this address space.
2702 */
2703 mm->context.sparc64_ctx_val = ctx;
2704 on_each_cpu(context_reload, mm, 0);
2705 }
2706 spin_unlock(&ctx_alloc_lock);
2707 }
2708}
2709#endif