blob: aef153f9fdac21575e3cbfb8b089f779b7e73117 [file] [log] [blame]
Adrian Bunkb00dc832008-05-19 16:52:27 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * arch/sparc64/mm/init.c
3 *
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
David S. Millerc4bce902006-02-11 21:57:54 -08008#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/string.h>
12#include <linux/init.h>
13#include <linux/bootmem.h>
14#include <linux/mm.h>
15#include <linux/hugetlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/initrd.h>
17#include <linux/swap.h>
18#include <linux/pagemap.h>
Randy Dunlapc9cf5522006-06-27 02:53:52 -070019#include <linux/poison.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/fs.h>
21#include <linux/seq_file.h>
Prasanna S Panchamukhi05e14cb2005-09-06 15:19:30 -070022#include <linux/kprobes.h>
David S. Miller1ac4f5e2005-09-21 21:49:32 -070023#include <linux/cache.h>
David S. Miller13edad72005-09-29 17:58:26 -070024#include <linux/sort.h>
bob piccof6d4fb52014-03-03 11:54:42 -050025#include <linux/ioport.h>
David S. Miller5cbc3072007-05-25 15:49:59 -070026#include <linux/percpu.h>
Yinghai Lu95f72d12010-07-12 14:36:09 +100027#include <linux/memblock.h>
David S. Miller919ee672008-04-23 05:40:25 -070028#include <linux/mmzone.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
31#include <asm/head.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <asm/page.h>
33#include <asm/pgalloc.h>
34#include <asm/pgtable.h>
35#include <asm/oplib.h>
36#include <asm/iommu.h>
37#include <asm/io.h>
38#include <asm/uaccess.h>
39#include <asm/mmu_context.h>
40#include <asm/tlbflush.h>
41#include <asm/dma.h>
42#include <asm/starfire.h>
43#include <asm/tlb.h>
44#include <asm/spitfire.h>
45#include <asm/sections.h>
David S. Miller517af332006-02-01 15:55:21 -080046#include <asm/tsb.h>
David S. Miller481295f2006-02-07 21:51:08 -080047#include <asm/hypervisor.h>
David S. Miller372b07b2006-06-21 15:35:28 -070048#include <asm/prom.h>
David S. Miller5cbc3072007-05-25 15:49:59 -070049#include <asm/mdesc.h>
David S. Miller3d5ae6b2008-03-25 21:51:40 -070050#include <asm/cpudata.h>
Sam Ravnborg59dec132014-05-16 23:26:07 +020051#include <asm/setup.h>
David S. Miller4f70f7a2008-08-12 18:33:56 -070052#include <asm/irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Sam Ravnborg27137e52008-11-16 20:08:45 -080054#include "init_64.h"
David S. Miller9cc3a1a2006-02-21 20:51:13 -080055
David S. Miller4f93d212012-09-06 18:13:58 -070056unsigned long kern_linear_pte_xor[4] __read_mostly;
Khalid Aziz494e5b62015-05-27 10:00:46 -060057static unsigned long page_cache4v_flag;
David S. Miller9cc3a1a2006-02-21 20:51:13 -080058
David S. Miller4f93d212012-09-06 18:13:58 -070059/* A bitmap, two bits for every 256MB of physical memory. These two
60 * bits determine what page size we use for kernel linear
61 * translations. They form an index into kern_linear_pte_xor[]. The
62 * value in the indexed slot is XOR'd with the TLB miss virtual
63 * address to form the resulting TTE. The mapping is:
64 *
65 * 0 ==> 4MB
66 * 1 ==> 256MB
67 * 2 ==> 2GB
68 * 3 ==> 16GB
69 *
70 * All sun4v chips support 256MB pages. Only SPARC-T4 and later
71 * support 2GB pages, and hopefully future cpus will support the 16GB
72 * pages as well. For slots 2 and 3, we encode a 256MB TTE xor there
73 * if these larger page sizes are not supported by the cpu.
74 *
75 * It would be nice to determine this from the machine description
76 * 'cpu' properties, but we need to have this table setup before the
77 * MDESC is initialized.
David S. Miller9cc3a1a2006-02-21 20:51:13 -080078 */
David S. Miller9cc3a1a2006-02-21 20:51:13 -080079
David S. Millerd1acb422007-03-16 17:20:28 -070080#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Miller4f93d212012-09-06 18:13:58 -070081/* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
82 * Space is allocated for this right after the trap table in
83 * arch/sparc64/kernel/head.S
David S. Miller2d9e2762007-05-29 01:58:31 -070084 */
85extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
David S. Millerd1acb422007-03-16 17:20:28 -070086#endif
David S. Miller0dd5b7b2014-09-24 20:56:11 -070087extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
David S. Millerd7744a02006-02-21 22:31:11 -080088
David S. Millerce33fdc2012-09-06 19:01:25 -070089static unsigned long cpu_pgsz_mask;
90
David S. Millerd195b712014-09-27 21:30:57 -070091#define MAX_BANKS 1024
David S. Miller10147572005-09-28 21:46:43 -070092
Greg Kroah-Hartman7c9503b2012-12-21 14:03:26 -080093static struct linux_prom64_registers pavail[MAX_BANKS];
94static int pavail_ents;
David S. Miller10147572005-09-28 21:46:43 -070095
Nitin Gupta52708d62015-11-02 16:30:24 -050096u64 numa_latency[MAX_NUMNODES][MAX_NUMNODES];
97
David S. Miller13edad72005-09-29 17:58:26 -070098static int cmp_p64(const void *a, const void *b)
99{
100 const struct linux_prom64_registers *x = a, *y = b;
101
102 if (x->phys_addr > y->phys_addr)
103 return 1;
104 if (x->phys_addr < y->phys_addr)
105 return -1;
106 return 0;
107}
108
109static void __init read_obp_memory(const char *property,
110 struct linux_prom64_registers *regs,
111 int *num_ents)
112{
Andres Salomon8d125562010-10-08 14:18:11 -0700113 phandle node = prom_finddevice("/memory");
David S. Miller13edad72005-09-29 17:58:26 -0700114 int prop_size = prom_getproplen(node, property);
115 int ents, ret, i;
116
117 ents = prop_size / sizeof(struct linux_prom64_registers);
118 if (ents > MAX_BANKS) {
119 prom_printf("The machine has more %s property entries than "
120 "this kernel can support (%d).\n",
121 property, MAX_BANKS);
122 prom_halt();
123 }
124
125 ret = prom_getproperty(node, property, (char *) regs, prop_size);
126 if (ret == -1) {
Akinobu Mita5da444a2012-09-29 03:14:49 +0000127 prom_printf("Couldn't get %s property from /memory.\n",
128 property);
David S. Miller13edad72005-09-29 17:58:26 -0700129 prom_halt();
130 }
131
David S. Miller13edad72005-09-29 17:58:26 -0700132 /* Sanitize what we got from the firmware, by page aligning
133 * everything.
134 */
135 for (i = 0; i < ents; i++) {
136 unsigned long base, size;
137
138 base = regs[i].phys_addr;
139 size = regs[i].reg_size;
140
141 size &= PAGE_MASK;
142 if (base & ~PAGE_MASK) {
143 unsigned long new_base = PAGE_ALIGN(base);
144
145 size -= new_base - base;
146 if ((long) size < 0L)
147 size = 0UL;
148 base = new_base;
149 }
David S. Miller0015d3d2007-03-15 00:06:34 -0700150 if (size == 0UL) {
151 /* If it is empty, simply get rid of it.
152 * This simplifies the logic of the other
153 * functions that process these arrays.
154 */
155 memmove(&regs[i], &regs[i + 1],
156 (ents - i - 1) * sizeof(regs[0]));
157 i--;
158 ents--;
159 continue;
160 }
David S. Miller13edad72005-09-29 17:58:26 -0700161 regs[i].phys_addr = base;
162 regs[i].reg_size = size;
163 }
David S. Miller486ad102006-06-22 00:00:00 -0700164
David S. Miller486ad102006-06-22 00:00:00 -0700165 *num_ents = ents;
166
David S. Millerc9c10832005-10-12 12:22:46 -0700167 sort(regs, ents, sizeof(struct linux_prom64_registers),
David S. Miller13edad72005-09-29 17:58:26 -0700168 cmp_p64, NULL);
169}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
David S. Millerd1112012006-03-08 02:16:07 -0800171/* Kernel physical address base and size in bytes. */
David S. Miller1ac4f5e2005-09-21 21:49:32 -0700172unsigned long kern_base __read_mostly;
173unsigned long kern_size __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175/* Initial ramdisk setup */
176extern unsigned long sparc_ramdisk_image64;
177extern unsigned int sparc_ramdisk_image;
178extern unsigned int sparc_ramdisk_size;
179
David S. Miller1ac4f5e2005-09-21 21:49:32 -0700180struct page *mem_map_zero __read_mostly;
Aneesh Kumar K.V35802c02008-04-29 08:11:12 -0400181EXPORT_SYMBOL(mem_map_zero);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182
David S. Miller0835ae02005-10-04 15:23:20 -0700183unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
184
185unsigned long sparc64_kern_pri_context __read_mostly;
186unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
187unsigned long sparc64_kern_sec_context __read_mostly;
188
David S. Miller64658742008-03-21 17:01:38 -0700189int num_kernel_image_mappings;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191#ifdef CONFIG_DEBUG_DCFLUSH
192atomic_t dcpage_flushes = ATOMIC_INIT(0);
193#ifdef CONFIG_SMP
194atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
195#endif
196#endif
197
David S. Miller7a591cf2006-02-26 19:44:50 -0800198inline void flush_dcache_page_impl(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199{
David S. Miller7a591cf2006-02-26 19:44:50 -0800200 BUG_ON(tlb_type == hypervisor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201#ifdef CONFIG_DEBUG_DCFLUSH
202 atomic_inc(&dcpage_flushes);
203#endif
204
205#ifdef DCACHE_ALIASING_POSSIBLE
206 __flush_dcache_page(page_address(page),
207 ((tlb_type == spitfire) &&
208 page_mapping(page) != NULL));
209#else
210 if (page_mapping(page) != NULL &&
211 tlb_type == spitfire)
212 __flush_icache_page(__pa(page_address(page)));
213#endif
214}
215
216#define PG_dcache_dirty PG_arch_1
David S. Miller22adb352007-05-26 01:14:43 -0700217#define PG_dcache_cpu_shift 32UL
218#define PG_dcache_cpu_mask \
219 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
221#define dcache_dirty_cpu(page) \
David S. Miller48b0e542005-07-27 16:08:44 -0700222 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
David S. Millerd979f172007-10-27 00:13:04 -0700224static inline void set_dcache_dirty(struct page *page, int this_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225{
226 unsigned long mask = this_cpu;
David S. Miller48b0e542005-07-27 16:08:44 -0700227 unsigned long non_cpu_bits;
228
229 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
230 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
231
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 __asm__ __volatile__("1:\n\t"
233 "ldx [%2], %%g7\n\t"
234 "and %%g7, %1, %%g1\n\t"
235 "or %%g1, %0, %%g1\n\t"
236 "casx [%2], %%g7, %%g1\n\t"
237 "cmp %%g7, %%g1\n\t"
238 "bne,pn %%xcc, 1b\n\t"
David S. Millerb445e262005-06-27 15:42:04 -0700239 " nop"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 : /* no outputs */
241 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
242 : "g1", "g7");
243}
244
David S. Millerd979f172007-10-27 00:13:04 -0700245static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246{
247 unsigned long mask = (1UL << PG_dcache_dirty);
248
249 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
250 "1:\n\t"
251 "ldx [%2], %%g7\n\t"
David S. Miller48b0e542005-07-27 16:08:44 -0700252 "srlx %%g7, %4, %%g1\n\t"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 "and %%g1, %3, %%g1\n\t"
254 "cmp %%g1, %0\n\t"
255 "bne,pn %%icc, 2f\n\t"
256 " andn %%g7, %1, %%g1\n\t"
257 "casx [%2], %%g7, %%g1\n\t"
258 "cmp %%g7, %%g1\n\t"
259 "bne,pn %%xcc, 1b\n\t"
David S. Millerb445e262005-06-27 15:42:04 -0700260 " nop\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 "2:"
262 : /* no outputs */
263 : "r" (cpu), "r" (mask), "r" (&page->flags),
David S. Miller48b0e542005-07-27 16:08:44 -0700264 "i" (PG_dcache_cpu_mask),
265 "i" (PG_dcache_cpu_shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 : "g1", "g7");
267}
268
David S. Miller517af332006-02-01 15:55:21 -0800269static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
270{
271 unsigned long tsb_addr = (unsigned long) ent;
272
David S. Miller3b3ab2e2006-02-17 09:54:42 -0800273 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
David S. Miller517af332006-02-01 15:55:21 -0800274 tsb_addr = __pa(tsb_addr);
275
276 __tsb_insert(tsb_addr, tag, pte);
277}
278
David S. Millerc4bce902006-02-11 21:57:54 -0800279unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
David S. Millerc4bce902006-02-11 21:57:54 -0800280
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800281static void flush_dcache(unsigned long pfn)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282{
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800283 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800285 page = pfn_to_page(pfn);
David S. Miller1a78ced2009-10-12 03:20:57 -0700286 if (page) {
David S. Miller7a591cf2006-02-26 19:44:50 -0800287 unsigned long pg_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800289 pg_flags = page->flags;
290 if (pg_flags & (1UL << PG_dcache_dirty)) {
David S. Miller7a591cf2006-02-26 19:44:50 -0800291 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
292 PG_dcache_cpu_mask);
293 int this_cpu = get_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
David S. Miller7a591cf2006-02-26 19:44:50 -0800295 /* This is just to optimize away some function calls
296 * in the SMP case.
297 */
298 if (cpu == this_cpu)
299 flush_dcache_page_impl(page);
300 else
301 smp_flush_dcache_page_impl(page, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
David S. Miller7a591cf2006-02-26 19:44:50 -0800303 clear_dcache_dirty_cpu(page, cpu);
304
305 put_cpu();
306 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 }
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800308}
309
David Miller9e695d22012-10-08 16:34:29 -0700310/* mm->context.lock must be held */
311static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
312 unsigned long tsb_hash_shift, unsigned long address,
313 unsigned long tte)
314{
315 struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
316 unsigned long tag;
317
David S. Millerbcd896b2013-02-19 13:20:08 -0800318 if (unlikely(!tsb))
319 return;
320
David Miller9e695d22012-10-08 16:34:29 -0700321 tsb += ((address >> tsb_hash_shift) &
322 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
323 tag = (address >> 22UL);
324 tsb_insert(tsb, tag, tte);
325}
326
Russell King4b3073e2009-12-18 16:40:18 +0000327void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800328{
329 struct mm_struct *mm;
David S. Millerbcd896b2013-02-19 13:20:08 -0800330 unsigned long flags;
Russell King4b3073e2009-12-18 16:40:18 +0000331 pte_t pte = *ptep;
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800332
333 if (tlb_type != hypervisor) {
334 unsigned long pfn = pte_pfn(pte);
335
336 if (pfn_valid(pfn))
337 flush_dcache(pfn);
338 }
David S. Millerbd407912006-01-31 18:31:38 -0800339
340 mm = vma->vm_mm;
David S. Miller7a1ac522006-03-16 02:02:32 -0800341
David S. Miller18f38132014-08-04 16:34:01 -0700342 /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */
343 if (!pte_accessible(mm, pte))
344 return;
345
David S. Miller7a1ac522006-03-16 02:02:32 -0800346 spin_lock_irqsave(&mm->context.lock, flags);
347
David Miller9e695d22012-10-08 16:34:29 -0700348#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
Mike Kravetzaf1b1a92016-07-15 13:08:42 -0700349 if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) &&
350 is_hugetlb_pte(pte))
David S. Miller37b3a8f2013-09-25 13:48:49 -0700351 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
David S. Millerbcd896b2013-02-19 13:20:08 -0800352 address, pte_val(pte));
353 else
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800354#endif
David S. Millerbcd896b2013-02-19 13:20:08 -0800355 __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
356 address, pte_val(pte));
David S. Miller7a1ac522006-03-16 02:02:32 -0800357
358 spin_unlock_irqrestore(&mm->context.lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359}
360
361void flush_dcache_page(struct page *page)
362{
David S. Millera9546f52005-04-17 18:03:09 -0700363 struct address_space *mapping;
364 int this_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
David S. Miller7a591cf2006-02-26 19:44:50 -0800366 if (tlb_type == hypervisor)
367 return;
368
David S. Millera9546f52005-04-17 18:03:09 -0700369 /* Do not bother with the expensive D-cache flush if it
370 * is merely the zero page. The 'bigcore' testcase in GDB
371 * causes this case to run millions of times.
372 */
373 if (page == ZERO_PAGE(0))
374 return;
375
376 this_cpu = get_cpu();
377
378 mapping = page_mapping(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 if (mapping && !mapping_mapped(mapping)) {
David S. Millera9546f52005-04-17 18:03:09 -0700380 int dirty = test_bit(PG_dcache_dirty, &page->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 if (dirty) {
David S. Millera9546f52005-04-17 18:03:09 -0700382 int dirty_cpu = dcache_dirty_cpu(page);
383
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 if (dirty_cpu == this_cpu)
385 goto out;
386 smp_flush_dcache_page_impl(page, dirty_cpu);
387 }
388 set_dcache_dirty(page, this_cpu);
389 } else {
390 /* We could delay the flush for the !page_mapping
391 * case too. But that case is for exec env/arg
392 * pages and those are %99 certainly going to get
393 * faulted into the tlb (and thus flushed) anyways.
394 */
395 flush_dcache_page_impl(page);
396 }
397
398out:
399 put_cpu();
400}
Sam Ravnborg917c3662009-01-08 16:58:20 -0800401EXPORT_SYMBOL(flush_dcache_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402
Prasanna S Panchamukhi05e14cb2005-09-06 15:19:30 -0700403void __kprobes flush_icache_range(unsigned long start, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404{
David S. Millera43fe0e2006-02-04 03:10:53 -0800405 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 if (tlb_type == spitfire) {
407 unsigned long kaddr;
408
David S. Millera94aa252007-03-15 15:50:11 -0700409 /* This code only runs on Spitfire cpus so this is
410 * why we can assume _PAGE_PADDR_4U.
411 */
412 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
413 unsigned long paddr, mask = _PAGE_PADDR_4U;
414
415 if (kaddr >= PAGE_OFFSET)
416 paddr = kaddr & mask;
417 else {
418 pgd_t *pgdp = pgd_offset_k(kaddr);
419 pud_t *pudp = pud_offset(pgdp, kaddr);
420 pmd_t *pmdp = pmd_offset(pudp, kaddr);
421 pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
422
423 paddr = pte_val(*ptep) & mask;
424 }
425 __flush_icache_page(paddr);
426 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 }
428}
Sam Ravnborg917c3662009-01-08 16:58:20 -0800429EXPORT_SYMBOL(flush_icache_range);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431void mmu_info(struct seq_file *m)
432{
David S. Millerce33fdc2012-09-06 19:01:25 -0700433 static const char *pgsz_strings[] = {
434 "8K", "64K", "512K", "4MB", "32MB",
435 "256MB", "2GB", "16GB",
436 };
437 int i, printed;
438
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 if (tlb_type == cheetah)
440 seq_printf(m, "MMU Type\t: Cheetah\n");
441 else if (tlb_type == cheetah_plus)
442 seq_printf(m, "MMU Type\t: Cheetah+\n");
443 else if (tlb_type == spitfire)
444 seq_printf(m, "MMU Type\t: Spitfire\n");
David S. Millera43fe0e2006-02-04 03:10:53 -0800445 else if (tlb_type == hypervisor)
446 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 else
448 seq_printf(m, "MMU Type\t: ???\n");
449
David S. Millerce33fdc2012-09-06 19:01:25 -0700450 seq_printf(m, "MMU PGSZs\t: ");
451 printed = 0;
452 for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) {
453 if (cpu_pgsz_mask & (1UL << i)) {
454 seq_printf(m, "%s%s",
455 printed ? "," : "", pgsz_strings[i]);
456 printed++;
457 }
458 }
459 seq_putc(m, '\n');
460
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461#ifdef CONFIG_DEBUG_DCFLUSH
462 seq_printf(m, "DCPageFlushes\t: %d\n",
463 atomic_read(&dcpage_flushes));
464#ifdef CONFIG_SMP
465 seq_printf(m, "DCPageFlushesXC\t: %d\n",
466 atomic_read(&dcpage_flushes_xcall));
467#endif /* CONFIG_SMP */
468#endif /* CONFIG_DEBUG_DCFLUSH */
469}
470
David S. Millera94aa252007-03-15 15:50:11 -0700471struct linux_prom_translation prom_trans[512] __read_mostly;
472unsigned int prom_trans_ents __read_mostly;
473
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474unsigned long kern_locked_tte_data;
475
David S. Miller405599b2005-09-22 00:12:35 -0700476/* The obp translations are saved based on 8k pagesize, since obp can
477 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
David S. Miller74bf4312006-01-31 18:29:18 -0800478 * HI_OBP_ADDRESS range are handled in ktlb.S.
David S. Miller405599b2005-09-22 00:12:35 -0700479 */
David S. Miller5085b4a2005-09-22 00:45:41 -0700480static inline int in_obp_range(unsigned long vaddr)
481{
482 return (vaddr >= LOW_OBP_ADDRESS &&
483 vaddr < HI_OBP_ADDRESS);
484}
485
David S. Millerc9c10832005-10-12 12:22:46 -0700486static int cmp_ptrans(const void *a, const void *b)
David S. Miller405599b2005-09-22 00:12:35 -0700487{
David S. Millerc9c10832005-10-12 12:22:46 -0700488 const struct linux_prom_translation *x = a, *y = b;
David S. Miller405599b2005-09-22 00:12:35 -0700489
David S. Millerc9c10832005-10-12 12:22:46 -0700490 if (x->virt > y->virt)
491 return 1;
492 if (x->virt < y->virt)
493 return -1;
494 return 0;
David S. Miller405599b2005-09-22 00:12:35 -0700495}
496
David S. Millerc9c10832005-10-12 12:22:46 -0700497/* Read OBP translations property into 'prom_trans[]'. */
David S. Miller9ad98c52005-10-05 15:12:00 -0700498static void __init read_obp_translations(void)
David S. Miller405599b2005-09-22 00:12:35 -0700499{
David S. Millerc9c10832005-10-12 12:22:46 -0700500 int n, node, ents, first, last, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501
502 node = prom_finddevice("/virtual-memory");
503 n = prom_getproplen(node, "translations");
David S. Miller405599b2005-09-22 00:12:35 -0700504 if (unlikely(n == 0 || n == -1)) {
David S. Millerb206fc42005-09-21 22:31:13 -0700505 prom_printf("prom_mappings: Couldn't get size.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 prom_halt();
507 }
David S. Miller405599b2005-09-22 00:12:35 -0700508 if (unlikely(n > sizeof(prom_trans))) {
Akinobu Mita5da444a2012-09-29 03:14:49 +0000509 prom_printf("prom_mappings: Size %d is too big.\n", n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 prom_halt();
511 }
David S. Miller405599b2005-09-22 00:12:35 -0700512
David S. Millerb206fc42005-09-21 22:31:13 -0700513 if ((n = prom_getproperty(node, "translations",
David S. Miller405599b2005-09-22 00:12:35 -0700514 (char *)&prom_trans[0],
515 sizeof(prom_trans))) == -1) {
David S. Millerb206fc42005-09-21 22:31:13 -0700516 prom_printf("prom_mappings: Couldn't get property.\n");
517 prom_halt();
518 }
David S. Miller9ad98c52005-10-05 15:12:00 -0700519
David S. Millerb206fc42005-09-21 22:31:13 -0700520 n = n / sizeof(struct linux_prom_translation);
David S. Miller9ad98c52005-10-05 15:12:00 -0700521
David S. Millerc9c10832005-10-12 12:22:46 -0700522 ents = n;
523
524 sort(prom_trans, ents, sizeof(struct linux_prom_translation),
525 cmp_ptrans, NULL);
526
527 /* Now kick out all the non-OBP entries. */
528 for (i = 0; i < ents; i++) {
529 if (in_obp_range(prom_trans[i].virt))
530 break;
531 }
532 first = i;
533 for (; i < ents; i++) {
534 if (!in_obp_range(prom_trans[i].virt))
535 break;
536 }
537 last = i;
538
539 for (i = 0; i < (last - first); i++) {
540 struct linux_prom_translation *src = &prom_trans[i + first];
541 struct linux_prom_translation *dest = &prom_trans[i];
542
543 *dest = *src;
544 }
545 for (; i < ents; i++) {
546 struct linux_prom_translation *dest = &prom_trans[i];
547 dest->virt = dest->size = dest->data = 0x0UL;
548 }
549
550 prom_trans_ents = last - first;
551
552 if (tlb_type == spitfire) {
553 /* Clear diag TTE bits. */
554 for (i = 0; i < prom_trans_ents; i++)
555 prom_trans[i].data &= ~0x0003fe0000000000UL;
556 }
David S. Millerf4142cb2011-09-29 12:18:59 -0700557
558 /* Force execute bit on. */
559 for (i = 0; i < prom_trans_ents; i++)
560 prom_trans[i].data |= (tlb_type == hypervisor ?
561 _PAGE_EXEC_4V : _PAGE_EXEC_4U);
David S. Miller405599b2005-09-22 00:12:35 -0700562}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563
David S. Millerd82ace72006-02-09 02:52:44 -0800564static void __init hypervisor_tlb_lock(unsigned long vaddr,
565 unsigned long pte,
566 unsigned long mmu)
567{
David S. Miller7db35f32007-05-29 02:22:14 -0700568 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
David S. Millerd82ace72006-02-09 02:52:44 -0800569
David S. Miller7db35f32007-05-29 02:22:14 -0700570 if (ret != 0) {
Akinobu Mita5da444a2012-09-29 03:14:49 +0000571 prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
David S. Miller7db35f32007-05-29 02:22:14 -0700572 "errors with %lx\n", vaddr, 0, pte, mmu, ret);
David S. Miller12e126a2006-02-17 14:40:30 -0800573 prom_halt();
574 }
David S. Millerd82ace72006-02-09 02:52:44 -0800575}
576
David S. Millerc4bce902006-02-11 21:57:54 -0800577static unsigned long kern_large_tte(unsigned long paddr);
578
David S. Miller898cf0e2005-09-23 11:59:44 -0700579static void __init remap_kernel(void)
David S. Miller405599b2005-09-22 00:12:35 -0700580{
581 unsigned long phys_page, tte_vaddr, tte_data;
David S. Miller64658742008-03-21 17:01:38 -0700582 int i, tlb_ent = sparc64_highest_locked_tlbent();
David S. Miller405599b2005-09-22 00:12:35 -0700583
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 tte_vaddr = (unsigned long) KERNBASE;
David S. Miller0eef3312014-05-03 22:52:50 -0700585 phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
David S. Millerc4bce902006-02-11 21:57:54 -0800586 tte_data = kern_large_tte(phys_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587
588 kern_locked_tte_data = tte_data;
589
David S. Millerd82ace72006-02-09 02:52:44 -0800590 /* Now lock us into the TLBs via Hypervisor or OBP. */
591 if (tlb_type == hypervisor) {
David S. Miller64658742008-03-21 17:01:38 -0700592 for (i = 0; i < num_kernel_image_mappings; i++) {
David S. Millerd82ace72006-02-09 02:52:44 -0800593 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
594 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
David S. Miller64658742008-03-21 17:01:38 -0700595 tte_vaddr += 0x400000;
596 tte_data += 0x400000;
David S. Millerd82ace72006-02-09 02:52:44 -0800597 }
598 } else {
David S. Miller64658742008-03-21 17:01:38 -0700599 for (i = 0; i < num_kernel_image_mappings; i++) {
600 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
601 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
602 tte_vaddr += 0x400000;
603 tte_data += 0x400000;
David S. Millerd82ace72006-02-09 02:52:44 -0800604 }
David S. Miller64658742008-03-21 17:01:38 -0700605 sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 }
David S. Miller0835ae02005-10-04 15:23:20 -0700607 if (tlb_type == cheetah_plus) {
608 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
609 CTX_CHEETAH_PLUS_NUC);
610 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
611 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
612 }
David S. Miller405599b2005-09-22 00:12:35 -0700613}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614
David S. Miller405599b2005-09-22 00:12:35 -0700615
David S. Millerc9c10832005-10-12 12:22:46 -0700616static void __init inherit_prom_mappings(void)
David S. Miller9ad98c52005-10-05 15:12:00 -0700617{
David S. Miller405599b2005-09-22 00:12:35 -0700618 /* Now fixup OBP's idea about where we really are mapped. */
David S. Miller3c62a2d2008-02-17 23:22:50 -0800619 printk("Remapping the kernel... ");
David S. Miller405599b2005-09-22 00:12:35 -0700620 remap_kernel();
David S. Miller3c62a2d2008-02-17 23:22:50 -0800621 printk("done.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622}
623
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624void prom_world(int enter)
625{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 if (!enter)
Al Virodff933d2012-09-26 01:21:14 -0400627 set_fs(get_fs());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628
David S. Miller3487d1d2006-01-31 18:33:25 -0800629 __asm__ __volatile__("flushw");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630}
631
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632void __flush_dcache_range(unsigned long start, unsigned long end)
633{
634 unsigned long va;
635
636 if (tlb_type == spitfire) {
637 int n = 0;
638
639 for (va = start; va < end; va += 32) {
640 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
641 if (++n >= 512)
642 break;
643 }
David S. Millera43fe0e2006-02-04 03:10:53 -0800644 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 start = __pa(start);
646 end = __pa(end);
647 for (va = start; va < end; va += 32)
648 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
649 "membar #Sync"
650 : /* no outputs */
651 : "r" (va),
652 "i" (ASI_DCACHE_INVALIDATE));
653 }
654}
Sam Ravnborg917c3662009-01-08 16:58:20 -0800655EXPORT_SYMBOL(__flush_dcache_range);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656
David S. Miller85f1e1f2007-03-15 17:51:26 -0700657/* get_new_mmu_context() uses "cache + 1". */
658DEFINE_SPINLOCK(ctx_alloc_lock);
659unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
660#define MAX_CTX_NR (1UL << CTX_NR_BITS)
661#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
662DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
663
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664/* Caller does TLB context flushing on local CPU if necessary.
665 * The caller also ensures that CTX_VALID(mm->context) is false.
666 *
667 * We must be careful about boundary cases so that we never
668 * let the user have CTX 0 (nucleus) or we ever use a CTX
669 * version of zero (and thus NO_CONTEXT would not be caught
670 * by version mis-match tests in mmu_context.h).
David S. Millera0663a72006-02-23 14:19:28 -0800671 *
672 * Always invoked with interrupts disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 */
674void get_new_mmu_context(struct mm_struct *mm)
675{
676 unsigned long ctx, new_ctx;
677 unsigned long orig_pgsz_bits;
David S. Millera0663a72006-02-23 14:19:28 -0800678 int new_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679
Kirill Tkhai07df8412013-04-09 00:29:46 +0400680 spin_lock(&ctx_alloc_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
682 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
683 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
David S. Millera0663a72006-02-23 14:19:28 -0800684 new_version = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 if (new_ctx >= (1 << CTX_NR_BITS)) {
686 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
687 if (new_ctx >= ctx) {
688 int i;
689 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
690 CTX_FIRST_VERSION;
691 if (new_ctx == 1)
692 new_ctx = CTX_FIRST_VERSION;
693
694 /* Don't call memset, for 16 entries that's just
695 * plain silly...
696 */
697 mmu_context_bmap[0] = 3;
698 mmu_context_bmap[1] = 0;
699 mmu_context_bmap[2] = 0;
700 mmu_context_bmap[3] = 0;
701 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
702 mmu_context_bmap[i + 0] = 0;
703 mmu_context_bmap[i + 1] = 0;
704 mmu_context_bmap[i + 2] = 0;
705 mmu_context_bmap[i + 3] = 0;
706 }
David S. Millera0663a72006-02-23 14:19:28 -0800707 new_version = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 goto out;
709 }
710 }
711 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
712 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
713out:
714 tlb_context_cache = new_ctx;
715 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
Kirill Tkhai07df8412013-04-09 00:29:46 +0400716 spin_unlock(&ctx_alloc_lock);
David S. Millera0663a72006-02-23 14:19:28 -0800717
718 if (unlikely(new_version))
719 smp_new_mmu_context_version();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720}
721
David S. Miller919ee672008-04-23 05:40:25 -0700722static int numa_enabled = 1;
723static int numa_debug;
724
725static int __init early_numa(char *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726{
David S. Miller919ee672008-04-23 05:40:25 -0700727 if (!p)
728 return 0;
David S. Millerd1112012006-03-08 02:16:07 -0800729
David S. Miller919ee672008-04-23 05:40:25 -0700730 if (strstr(p, "off"))
731 numa_enabled = 0;
David S. Millerd1112012006-03-08 02:16:07 -0800732
David S. Miller919ee672008-04-23 05:40:25 -0700733 if (strstr(p, "debug"))
734 numa_debug = 1;
735
736 return 0;
David S. Millerd1112012006-03-08 02:16:07 -0800737}
David S. Miller919ee672008-04-23 05:40:25 -0700738early_param("numa", early_numa);
739
740#define numadbg(f, a...) \
741do { if (numa_debug) \
742 printk(KERN_INFO f, ## a); \
743} while (0)
David S. Millerd1112012006-03-08 02:16:07 -0800744
David S. Miller4e82c9a2008-02-13 18:00:03 -0800745static void __init find_ramdisk(unsigned long phys_base)
746{
747#ifdef CONFIG_BLK_DEV_INITRD
748 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
749 unsigned long ramdisk_image;
750
751 /* Older versions of the bootloader only supported a
752 * 32-bit physical address for the ramdisk image
753 * location, stored at sparc_ramdisk_image. Newer
754 * SILO versions set sparc_ramdisk_image to zero and
755 * provide a full 64-bit physical address at
756 * sparc_ramdisk_image64.
757 */
758 ramdisk_image = sparc_ramdisk_image;
759 if (!ramdisk_image)
760 ramdisk_image = sparc_ramdisk_image64;
761
762 /* Another bootloader quirk. The bootloader normalizes
763 * the physical address to KERNBASE, so we have to
764 * factor that back out and add in the lowest valid
765 * physical page address to get the true physical address.
766 */
767 ramdisk_image -= KERNBASE;
768 ramdisk_image += phys_base;
769
David S. Miller919ee672008-04-23 05:40:25 -0700770 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
771 ramdisk_image, sparc_ramdisk_size);
772
David S. Miller4e82c9a2008-02-13 18:00:03 -0800773 initrd_start = ramdisk_image;
774 initrd_end = ramdisk_image + sparc_ramdisk_size;
David S. Miller3b2a7e22008-02-13 18:13:20 -0800775
Yinghai Lu95f72d12010-07-12 14:36:09 +1000776 memblock_reserve(initrd_start, sparc_ramdisk_size);
David S. Millerd45100f2008-05-06 15:19:54 -0700777
778 initrd_start += PAGE_OFFSET;
779 initrd_end += PAGE_OFFSET;
David S. Miller4e82c9a2008-02-13 18:00:03 -0800780 }
781#endif
782}
783
David S. Miller919ee672008-04-23 05:40:25 -0700784struct node_mem_mask {
785 unsigned long mask;
786 unsigned long val;
David S. Miller919ee672008-04-23 05:40:25 -0700787};
788static struct node_mem_mask node_masks[MAX_NUMNODES];
789static int num_node_masks;
790
Sam Ravnborg48d37212014-05-16 23:26:12 +0200791#ifdef CONFIG_NEED_MULTIPLE_NODES
792
David S. Miller919ee672008-04-23 05:40:25 -0700793int numa_cpu_lookup_table[NR_CPUS];
794cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
795
David S. Miller919ee672008-04-23 05:40:25 -0700796struct mdesc_mblock {
797 u64 base;
798 u64 size;
799 u64 offset; /* RA-to-PA */
800};
801static struct mdesc_mblock *mblocks;
802static int num_mblocks;
803
804static unsigned long ra_to_pa(unsigned long addr)
David S. Millerd1112012006-03-08 02:16:07 -0800805{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 int i;
807
David S. Miller919ee672008-04-23 05:40:25 -0700808 for (i = 0; i < num_mblocks; i++) {
809 struct mdesc_mblock *m = &mblocks[i];
David S. Miller6fc5bae2006-12-28 21:00:23 -0800810
David S. Miller919ee672008-04-23 05:40:25 -0700811 if (addr >= m->base &&
812 addr < (m->base + m->size)) {
813 addr += m->offset;
814 break;
815 }
816 }
817 return addr;
818}
819
820static int find_node(unsigned long addr)
821{
822 int i;
823
824 addr = ra_to_pa(addr);
825 for (i = 0; i < num_node_masks; i++) {
826 struct node_mem_mask *p = &node_masks[i];
827
828 if ((addr & p->mask) == p->val)
829 return i;
830 }
bob picco3dee9df2014-09-16 09:28:15 -0400831 /* The following condition has been observed on LDOM guests.*/
832 WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node"
833 " rule. Some physical memory will be owned by node 0.");
834 return 0;
David S. Miller919ee672008-04-23 05:40:25 -0700835}
836
Tejun Heof9b18db2011-07-12 10:46:32 +0200837static u64 memblock_nid_range(u64 start, u64 end, int *nid)
David S. Miller919ee672008-04-23 05:40:25 -0700838{
839 *nid = find_node(start);
840 start += PAGE_SIZE;
841 while (start < end) {
842 int n = find_node(start);
843
844 if (n != *nid)
845 break;
846 start += PAGE_SIZE;
847 }
848
David S. Millerc918dcc2008-08-14 01:41:39 -0700849 if (start > end)
850 start = end;
851
David S. Miller919ee672008-04-23 05:40:25 -0700852 return start;
853}
David S. Miller919ee672008-04-23 05:40:25 -0700854#endif
855
856/* This must be invoked after performing all of the necessary
Tejun Heo2a4814d2011-12-08 10:22:08 -0800857 * memblock_set_node() calls for 'nid'. We need to be able to get
David S. Miller919ee672008-04-23 05:40:25 -0700858 * correct data from get_pfn_range_for_nid().
859 */
860static void __init allocate_node_data(int nid)
861{
David S. Miller919ee672008-04-23 05:40:25 -0700862 struct pglist_data *p;
Paul Gortmakeraa6f0792012-05-09 20:44:29 -0400863 unsigned long start_pfn, end_pfn;
David S. Miller919ee672008-04-23 05:40:25 -0700864#ifdef CONFIG_NEED_MULTIPLE_NODES
Paul Gortmakeraa6f0792012-05-09 20:44:29 -0400865 unsigned long paddr;
866
Benjamin Herrenschmidt9d1e2492010-07-06 15:39:17 -0700867 paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
David S. Miller919ee672008-04-23 05:40:25 -0700868 if (!paddr) {
869 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
870 prom_halt();
871 }
872 NODE_DATA(nid) = __va(paddr);
873 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
874
David S. Miller625d6932012-04-25 13:13:43 -0700875 NODE_DATA(nid)->node_id = nid;
David S. Miller919ee672008-04-23 05:40:25 -0700876#endif
877
878 p = NODE_DATA(nid);
879
880 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
881 p->node_start_pfn = start_pfn;
882 p->node_spanned_pages = end_pfn - start_pfn;
David S. Miller919ee672008-04-23 05:40:25 -0700883}
884
885static void init_node_masks_nonnuma(void)
886{
Sam Ravnborg48d37212014-05-16 23:26:12 +0200887#ifdef CONFIG_NEED_MULTIPLE_NODES
David S. Miller919ee672008-04-23 05:40:25 -0700888 int i;
Sam Ravnborg48d37212014-05-16 23:26:12 +0200889#endif
David S. Miller919ee672008-04-23 05:40:25 -0700890
891 numadbg("Initializing tables for non-numa.\n");
892
893 node_masks[0].mask = node_masks[0].val = 0;
894 num_node_masks = 1;
895
Sam Ravnborg48d37212014-05-16 23:26:12 +0200896#ifdef CONFIG_NEED_MULTIPLE_NODES
David S. Miller919ee672008-04-23 05:40:25 -0700897 for (i = 0; i < NR_CPUS; i++)
898 numa_cpu_lookup_table[i] = 0;
899
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -0700900 cpumask_setall(&numa_cpumask_lookup_table[0]);
Sam Ravnborg48d37212014-05-16 23:26:12 +0200901#endif
David S. Miller919ee672008-04-23 05:40:25 -0700902}
903
904#ifdef CONFIG_NEED_MULTIPLE_NODES
905struct pglist_data *node_data[MAX_NUMNODES];
906
907EXPORT_SYMBOL(numa_cpu_lookup_table);
908EXPORT_SYMBOL(numa_cpumask_lookup_table);
909EXPORT_SYMBOL(node_data);
910
911struct mdesc_mlgroup {
912 u64 node;
913 u64 latency;
914 u64 match;
915 u64 mask;
916};
917static struct mdesc_mlgroup *mlgroups;
918static int num_mlgroups;
919
920static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
921 u32 cfg_handle)
922{
923 u64 arc;
924
925 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
926 u64 target = mdesc_arc_target(md, arc);
927 const u64 *val;
928
929 val = mdesc_get_property(md, target,
930 "cfg-handle", NULL);
931 if (val && *val == cfg_handle)
932 return 0;
933 }
934 return -ENODEV;
935}
936
937static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
938 u32 cfg_handle)
939{
940 u64 arc, candidate, best_latency = ~(u64)0;
941
942 candidate = MDESC_NODE_NULL;
943 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
944 u64 target = mdesc_arc_target(md, arc);
945 const char *name = mdesc_node_name(md, target);
946 const u64 *val;
947
948 if (strcmp(name, "pio-latency-group"))
949 continue;
950
951 val = mdesc_get_property(md, target, "latency", NULL);
952 if (!val)
953 continue;
954
955 if (*val < best_latency) {
956 candidate = target;
957 best_latency = *val;
958 }
959 }
960
961 if (candidate == MDESC_NODE_NULL)
962 return -ENODEV;
963
964 return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
965}
966
967int of_node_to_nid(struct device_node *dp)
968{
969 const struct linux_prom64_registers *regs;
970 struct mdesc_handle *md;
971 u32 cfg_handle;
972 int count, nid;
973 u64 grp;
974
David S. Miller072bd412008-08-18 20:36:17 -0700975 /* This is the right thing to do on currently supported
976 * SUN4U NUMA platforms as well, as the PCI controller does
977 * not sit behind any particular memory controller.
978 */
David S. Miller919ee672008-04-23 05:40:25 -0700979 if (!mlgroups)
980 return -1;
981
982 regs = of_get_property(dp, "reg", NULL);
983 if (!regs)
984 return -1;
985
986 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
987
988 md = mdesc_grab();
989
990 count = 0;
991 nid = -1;
992 mdesc_for_each_node_by_name(md, grp, "group") {
993 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
994 nid = count;
995 break;
996 }
997 count++;
998 }
999
1000 mdesc_release(md);
1001
1002 return nid;
1003}
1004
David S. Miller01c453812009-04-07 01:05:22 -07001005static void __init add_node_ranges(void)
David S. Miller919ee672008-04-23 05:40:25 -07001006{
Benjamin Herrenschmidt08b84792010-08-04 13:43:31 +10001007 struct memblock_region *reg;
David S. Miller919ee672008-04-23 05:40:25 -07001008
Benjamin Herrenschmidt08b84792010-08-04 13:43:31 +10001009 for_each_memblock(memory, reg) {
1010 unsigned long size = reg->size;
David S. Miller919ee672008-04-23 05:40:25 -07001011 unsigned long start, end;
1012
Benjamin Herrenschmidt08b84792010-08-04 13:43:31 +10001013 start = reg->base;
David S. Miller919ee672008-04-23 05:40:25 -07001014 end = start + size;
1015 while (start < end) {
1016 unsigned long this_end;
1017 int nid;
1018
Benjamin Herrenschmidt35a1f0b2010-07-06 15:38:58 -07001019 this_end = memblock_nid_range(start, end, &nid);
David S. Miller919ee672008-04-23 05:40:25 -07001020
Tejun Heo2a4814d2011-12-08 10:22:08 -08001021 numadbg("Setting memblock NUMA node nid[%d] "
David S. Miller919ee672008-04-23 05:40:25 -07001022 "start[%lx] end[%lx]\n",
1023 nid, start, this_end);
1024
Tang Chene7e8de52014-01-21 15:49:26 -08001025 memblock_set_node(start, this_end - start,
1026 &memblock.memory, nid);
David S. Miller919ee672008-04-23 05:40:25 -07001027 start = this_end;
1028 }
1029 }
1030}
1031
1032static int __init grab_mlgroups(struct mdesc_handle *md)
1033{
1034 unsigned long paddr;
1035 int count = 0;
1036 u64 node;
1037
1038 mdesc_for_each_node_by_name(md, node, "memory-latency-group")
1039 count++;
1040 if (!count)
1041 return -ENOENT;
1042
Yinghai Lu95f72d12010-07-12 14:36:09 +10001043 paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup),
David S. Miller919ee672008-04-23 05:40:25 -07001044 SMP_CACHE_BYTES);
1045 if (!paddr)
1046 return -ENOMEM;
1047
1048 mlgroups = __va(paddr);
1049 num_mlgroups = count;
1050
1051 count = 0;
1052 mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
1053 struct mdesc_mlgroup *m = &mlgroups[count++];
1054 const u64 *val;
1055
1056 m->node = node;
1057
1058 val = mdesc_get_property(md, node, "latency", NULL);
1059 m->latency = *val;
1060 val = mdesc_get_property(md, node, "address-match", NULL);
1061 m->match = *val;
1062 val = mdesc_get_property(md, node, "address-mask", NULL);
1063 m->mask = *val;
1064
Sam Ravnborg90181132009-01-06 13:19:28 -08001065 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
1066 "match[%llx] mask[%llx]\n",
David S. Miller919ee672008-04-23 05:40:25 -07001067 count - 1, m->node, m->latency, m->match, m->mask);
1068 }
1069
1070 return 0;
1071}
1072
1073static int __init grab_mblocks(struct mdesc_handle *md)
1074{
1075 unsigned long paddr;
1076 int count = 0;
1077 u64 node;
1078
1079 mdesc_for_each_node_by_name(md, node, "mblock")
1080 count++;
1081 if (!count)
1082 return -ENOENT;
1083
Yinghai Lu95f72d12010-07-12 14:36:09 +10001084 paddr = memblock_alloc(count * sizeof(struct mdesc_mblock),
David S. Miller919ee672008-04-23 05:40:25 -07001085 SMP_CACHE_BYTES);
1086 if (!paddr)
1087 return -ENOMEM;
1088
1089 mblocks = __va(paddr);
1090 num_mblocks = count;
1091
1092 count = 0;
1093 mdesc_for_each_node_by_name(md, node, "mblock") {
1094 struct mdesc_mblock *m = &mblocks[count++];
1095 const u64 *val;
1096
1097 val = mdesc_get_property(md, node, "base", NULL);
1098 m->base = *val;
1099 val = mdesc_get_property(md, node, "size", NULL);
1100 m->size = *val;
1101 val = mdesc_get_property(md, node,
1102 "address-congruence-offset", NULL);
bob picco771a37f2013-06-11 14:54:51 -04001103
1104 /* The address-congruence-offset property is optional.
1105 * Explicity zero it be identifty this.
1106 */
1107 if (val)
1108 m->offset = *val;
1109 else
1110 m->offset = 0UL;
David S. Miller919ee672008-04-23 05:40:25 -07001111
Sam Ravnborg90181132009-01-06 13:19:28 -08001112 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
David S. Miller919ee672008-04-23 05:40:25 -07001113 count - 1, m->base, m->size, m->offset);
1114 }
1115
1116 return 0;
1117}
1118
1119static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1120 u64 grp, cpumask_t *mask)
1121{
1122 u64 arc;
1123
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001124 cpumask_clear(mask);
David S. Miller919ee672008-04-23 05:40:25 -07001125
1126 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1127 u64 target = mdesc_arc_target(md, arc);
1128 const char *name = mdesc_node_name(md, target);
1129 const u64 *id;
1130
1131 if (strcmp(name, "cpu"))
1132 continue;
1133 id = mdesc_get_property(md, target, "id", NULL);
Rusty Russelle305cb8f2009-03-16 14:40:23 +10301134 if (*id < nr_cpu_ids)
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001135 cpumask_set_cpu(*id, mask);
David S. Miller919ee672008-04-23 05:40:25 -07001136 }
1137}
1138
1139static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
1140{
1141 int i;
1142
1143 for (i = 0; i < num_mlgroups; i++) {
1144 struct mdesc_mlgroup *m = &mlgroups[i];
1145 if (m->node == node)
1146 return m;
1147 }
1148 return NULL;
1149}
1150
Nitin Gupta52708d62015-11-02 16:30:24 -05001151int __node_distance(int from, int to)
1152{
1153 if ((from >= MAX_NUMNODES) || (to >= MAX_NUMNODES)) {
1154 pr_warn("Returning default NUMA distance value for %d->%d\n",
1155 from, to);
1156 return (from == to) ? LOCAL_DISTANCE : REMOTE_DISTANCE;
1157 }
1158 return numa_latency[from][to];
1159}
1160
1161static int find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
1162{
1163 int i;
1164
1165 for (i = 0; i < MAX_NUMNODES; i++) {
1166 struct node_mem_mask *n = &node_masks[i];
1167
1168 if ((grp->mask == n->mask) && (grp->match == n->val))
1169 break;
1170 }
1171 return i;
1172}
1173
1174static void find_numa_latencies_for_group(struct mdesc_handle *md, u64 grp,
1175 int index)
1176{
1177 u64 arc;
1178
1179 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1180 int tnode;
1181 u64 target = mdesc_arc_target(md, arc);
1182 struct mdesc_mlgroup *m = find_mlgroup(target);
1183
1184 if (!m)
1185 continue;
1186 tnode = find_best_numa_node_for_mlgroup(m);
1187 if (tnode == MAX_NUMNODES)
1188 continue;
1189 numa_latency[index][tnode] = m->latency;
1190 }
1191}
1192
David S. Miller919ee672008-04-23 05:40:25 -07001193static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
1194 int index)
1195{
1196 struct mdesc_mlgroup *candidate = NULL;
1197 u64 arc, best_latency = ~(u64)0;
1198 struct node_mem_mask *n;
1199
1200 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1201 u64 target = mdesc_arc_target(md, arc);
1202 struct mdesc_mlgroup *m = find_mlgroup(target);
1203 if (!m)
1204 continue;
1205 if (m->latency < best_latency) {
1206 candidate = m;
1207 best_latency = m->latency;
1208 }
1209 }
1210 if (!candidate)
1211 return -ENOENT;
1212
1213 if (num_node_masks != index) {
1214 printk(KERN_ERR "Inconsistent NUMA state, "
1215 "index[%d] != num_node_masks[%d]\n",
1216 index, num_node_masks);
1217 return -EINVAL;
1218 }
1219
1220 n = &node_masks[num_node_masks++];
1221
1222 n->mask = candidate->mask;
1223 n->val = candidate->match;
1224
Sam Ravnborg90181132009-01-06 13:19:28 -08001225 numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n",
David S. Miller919ee672008-04-23 05:40:25 -07001226 index, n->mask, n->val, candidate->latency);
1227
1228 return 0;
1229}
1230
1231static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1232 int index)
1233{
1234 cpumask_t mask;
1235 int cpu;
1236
1237 numa_parse_mdesc_group_cpus(md, grp, &mask);
1238
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001239 for_each_cpu(cpu, &mask)
David S. Miller919ee672008-04-23 05:40:25 -07001240 numa_cpu_lookup_table[cpu] = index;
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001241 cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
David S. Miller919ee672008-04-23 05:40:25 -07001242
1243 if (numa_debug) {
1244 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001245 for_each_cpu(cpu, &mask)
David S. Miller919ee672008-04-23 05:40:25 -07001246 printk("%d ", cpu);
1247 printk("]\n");
1248 }
1249
1250 return numa_attach_mlgroup(md, grp, index);
1251}
1252
1253static int __init numa_parse_mdesc(void)
1254{
1255 struct mdesc_handle *md = mdesc_grab();
Nitin Gupta52708d62015-11-02 16:30:24 -05001256 int i, j, err, count;
David S. Miller919ee672008-04-23 05:40:25 -07001257 u64 node;
1258
1259 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1260 if (node == MDESC_NODE_NULL) {
1261 mdesc_release(md);
1262 return -ENOENT;
1263 }
1264
1265 err = grab_mblocks(md);
1266 if (err < 0)
1267 goto out;
1268
1269 err = grab_mlgroups(md);
1270 if (err < 0)
1271 goto out;
1272
1273 count = 0;
1274 mdesc_for_each_node_by_name(md, node, "group") {
1275 err = numa_parse_mdesc_group(md, node, count);
1276 if (err < 0)
1277 break;
1278 count++;
1279 }
1280
Nitin Gupta52708d62015-11-02 16:30:24 -05001281 count = 0;
1282 mdesc_for_each_node_by_name(md, node, "group") {
1283 find_numa_latencies_for_group(md, node, count);
1284 count++;
1285 }
1286
1287 /* Normalize numa latency matrix according to ACPI SLIT spec. */
1288 for (i = 0; i < MAX_NUMNODES; i++) {
1289 u64 self_latency = numa_latency[i][i];
1290
1291 for (j = 0; j < MAX_NUMNODES; j++) {
1292 numa_latency[i][j] =
1293 (numa_latency[i][j] * LOCAL_DISTANCE) /
1294 self_latency;
1295 }
1296 }
1297
David S. Miller919ee672008-04-23 05:40:25 -07001298 add_node_ranges();
1299
1300 for (i = 0; i < num_node_masks; i++) {
1301 allocate_node_data(i);
1302 node_set_online(i);
1303 }
1304
1305 err = 0;
1306out:
1307 mdesc_release(md);
1308 return err;
1309}
1310
David S. Miller072bd412008-08-18 20:36:17 -07001311static int __init numa_parse_jbus(void)
1312{
1313 unsigned long cpu, index;
1314
1315 /* NUMA node id is encoded in bits 36 and higher, and there is
1316 * a 1-to-1 mapping from CPU ID to NUMA node ID.
1317 */
1318 index = 0;
1319 for_each_present_cpu(cpu) {
1320 numa_cpu_lookup_table[cpu] = index;
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001321 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
David S. Miller072bd412008-08-18 20:36:17 -07001322 node_masks[index].mask = ~((1UL << 36UL) - 1UL);
1323 node_masks[index].val = cpu << 36UL;
1324
1325 index++;
1326 }
1327 num_node_masks = index;
1328
1329 add_node_ranges();
1330
1331 for (index = 0; index < num_node_masks; index++) {
1332 allocate_node_data(index);
1333 node_set_online(index);
1334 }
1335
1336 return 0;
1337}
1338
David S. Miller919ee672008-04-23 05:40:25 -07001339static int __init numa_parse_sun4u(void)
1340{
David S. Miller072bd412008-08-18 20:36:17 -07001341 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1342 unsigned long ver;
1343
1344 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
1345 if ((ver >> 32UL) == __JALAPENO_ID ||
1346 (ver >> 32UL) == __SERRANO_ID)
1347 return numa_parse_jbus();
1348 }
David S. Miller919ee672008-04-23 05:40:25 -07001349 return -1;
1350}
1351
1352static int __init bootmem_init_numa(void)
1353{
Nitin Gupta36beca62016-01-05 22:35:35 -08001354 int i, j;
David S. Miller919ee672008-04-23 05:40:25 -07001355 int err = -1;
1356
1357 numadbg("bootmem_init_numa()\n");
1358
Nitin Gupta36beca62016-01-05 22:35:35 -08001359 /* Some sane defaults for numa latency values */
1360 for (i = 0; i < MAX_NUMNODES; i++) {
1361 for (j = 0; j < MAX_NUMNODES; j++)
1362 numa_latency[i][j] = (i == j) ?
1363 LOCAL_DISTANCE : REMOTE_DISTANCE;
1364 }
1365
David S. Miller919ee672008-04-23 05:40:25 -07001366 if (numa_enabled) {
1367 if (tlb_type == hypervisor)
1368 err = numa_parse_mdesc();
1369 else
1370 err = numa_parse_sun4u();
1371 }
1372 return err;
1373}
1374
1375#else
1376
1377static int bootmem_init_numa(void)
1378{
1379 return -1;
1380}
1381
1382#endif
1383
1384static void __init bootmem_init_nonnuma(void)
1385{
Yinghai Lu95f72d12010-07-12 14:36:09 +10001386 unsigned long top_of_ram = memblock_end_of_DRAM();
1387 unsigned long total_ram = memblock_phys_mem_size();
David S. Miller919ee672008-04-23 05:40:25 -07001388
1389 numadbg("bootmem_init_nonnuma()\n");
1390
1391 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1392 top_of_ram, total_ram);
1393 printk(KERN_INFO "Memory hole size: %ldMB\n",
1394 (top_of_ram - total_ram) >> 20);
1395
1396 init_node_masks_nonnuma();
Tang Chene7e8de52014-01-21 15:49:26 -08001397 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
David S. Miller919ee672008-04-23 05:40:25 -07001398 allocate_node_data(0);
David S. Miller919ee672008-04-23 05:40:25 -07001399 node_set_online(0);
1400}
1401
David S. Miller919ee672008-04-23 05:40:25 -07001402static unsigned long __init bootmem_init(unsigned long phys_base)
1403{
1404 unsigned long end_pfn;
David S. Miller919ee672008-04-23 05:40:25 -07001405
Yinghai Lu95f72d12010-07-12 14:36:09 +10001406 end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 max_pfn = max_low_pfn = end_pfn;
David S. Millerd1112012006-03-08 02:16:07 -08001408 min_low_pfn = (phys_base >> PAGE_SHIFT);
1409
David S. Miller919ee672008-04-23 05:40:25 -07001410 if (bootmem_init_numa() < 0)
1411 bootmem_init_nonnuma();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412
David S. Miller625d6932012-04-25 13:13:43 -07001413 /* Dump memblock with node info. */
1414 memblock_dump_all();
1415
David S. Miller919ee672008-04-23 05:40:25 -07001416 /* XXX cpu notifier XXX */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417
David S. Miller625d6932012-04-25 13:13:43 -07001418 sparse_memory_present_with_active_regions(MAX_NUMNODES);
David S. Millerd1112012006-03-08 02:16:07 -08001419 sparse_init();
1420
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 return end_pfn;
1422}
1423
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001424static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1425static int pall_ents __initdata;
1426
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001427static unsigned long max_phys_bits = 40;
1428
1429bool kern_addr_valid(unsigned long addr)
1430{
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001431 pgd_t *pgd;
1432 pud_t *pud;
1433 pmd_t *pmd;
1434 pte_t *pte;
1435
David S. Millerbb4e6e82014-09-27 11:05:21 -07001436 if ((long)addr < 0L) {
1437 unsigned long pa = __pa(addr);
1438
1439 if ((addr >> max_phys_bits) != 0UL)
1440 return false;
1441
1442 return pfn_valid(pa >> PAGE_SHIFT);
1443 }
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001444
1445 if (addr >= (unsigned long) KERNBASE &&
1446 addr < (unsigned long)&_end)
1447 return true;
1448
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001449 pgd = pgd_offset_k(addr);
1450 if (pgd_none(*pgd))
1451 return 0;
1452
1453 pud = pud_offset(pgd, addr);
1454 if (pud_none(*pud))
1455 return 0;
1456
1457 if (pud_large(*pud))
1458 return pfn_valid(pud_pfn(*pud));
1459
1460 pmd = pmd_offset(pud, addr);
1461 if (pmd_none(*pmd))
1462 return 0;
1463
1464 if (pmd_large(*pmd))
1465 return pfn_valid(pmd_pfn(*pmd));
1466
1467 pte = pte_offset_kernel(pmd, addr);
1468 if (pte_none(*pte))
1469 return 0;
1470
1471 return pfn_valid(pte_pfn(*pte));
1472}
1473EXPORT_SYMBOL(kern_addr_valid);
1474
1475static unsigned long __ref kernel_map_hugepud(unsigned long vstart,
1476 unsigned long vend,
1477 pud_t *pud)
1478{
1479 const unsigned long mask16gb = (1UL << 34) - 1UL;
1480 u64 pte_val = vstart;
1481
1482 /* Each PUD is 8GB */
1483 if ((vstart & mask16gb) ||
1484 (vend - vstart <= mask16gb)) {
1485 pte_val ^= kern_linear_pte_xor[2];
1486 pud_val(*pud) = pte_val | _PAGE_PUD_HUGE;
1487
1488 return vstart + PUD_SIZE;
1489 }
1490
1491 pte_val ^= kern_linear_pte_xor[3];
1492 pte_val |= _PAGE_PUD_HUGE;
1493
1494 vend = vstart + mask16gb + 1UL;
1495 while (vstart < vend) {
1496 pud_val(*pud) = pte_val;
1497
1498 pte_val += PUD_SIZE;
1499 vstart += PUD_SIZE;
1500 pud++;
1501 }
1502 return vstart;
1503}
1504
1505static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend,
1506 bool guard)
1507{
1508 if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE)
1509 return true;
1510
1511 return false;
1512}
1513
1514static unsigned long __ref kernel_map_hugepmd(unsigned long vstart,
1515 unsigned long vend,
1516 pmd_t *pmd)
1517{
1518 const unsigned long mask256mb = (1UL << 28) - 1UL;
1519 const unsigned long mask2gb = (1UL << 31) - 1UL;
1520 u64 pte_val = vstart;
1521
1522 /* Each PMD is 8MB */
1523 if ((vstart & mask256mb) ||
1524 (vend - vstart <= mask256mb)) {
1525 pte_val ^= kern_linear_pte_xor[0];
1526 pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE;
1527
1528 return vstart + PMD_SIZE;
1529 }
1530
1531 if ((vstart & mask2gb) ||
1532 (vend - vstart <= mask2gb)) {
1533 pte_val ^= kern_linear_pte_xor[1];
1534 pte_val |= _PAGE_PMD_HUGE;
1535 vend = vstart + mask256mb + 1UL;
1536 } else {
1537 pte_val ^= kern_linear_pte_xor[2];
1538 pte_val |= _PAGE_PMD_HUGE;
1539 vend = vstart + mask2gb + 1UL;
1540 }
1541
1542 while (vstart < vend) {
1543 pmd_val(*pmd) = pte_val;
1544
1545 pte_val += PMD_SIZE;
1546 vstart += PMD_SIZE;
1547 pmd++;
1548 }
1549
1550 return vstart;
1551}
1552
1553static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend,
1554 bool guard)
1555{
1556 if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE)
1557 return true;
1558
1559 return false;
1560}
1561
Sam Ravnborg896aef42008-02-24 19:49:52 -08001562static unsigned long __ref kernel_map_range(unsigned long pstart,
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001563 unsigned long pend, pgprot_t prot,
1564 bool use_huge)
David S. Miller56425302005-09-25 16:46:57 -07001565{
1566 unsigned long vstart = PAGE_OFFSET + pstart;
1567 unsigned long vend = PAGE_OFFSET + pend;
1568 unsigned long alloc_bytes = 0UL;
1569
1570 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
David S. Miller13edad72005-09-29 17:58:26 -07001571 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
David S. Miller56425302005-09-25 16:46:57 -07001572 vstart, vend);
1573 prom_halt();
1574 }
1575
1576 while (vstart < vend) {
1577 unsigned long this_end, paddr = __pa(vstart);
1578 pgd_t *pgd = pgd_offset_k(vstart);
1579 pud_t *pud;
1580 pmd_t *pmd;
1581 pte_t *pte;
1582
David S. Millerac55c762014-09-26 21:19:46 -07001583 if (pgd_none(*pgd)) {
1584 pud_t *new;
1585
1586 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1587 alloc_bytes += PAGE_SIZE;
1588 pgd_populate(&init_mm, pgd, new);
1589 }
David S. Miller56425302005-09-25 16:46:57 -07001590 pud = pud_offset(pgd, vstart);
1591 if (pud_none(*pud)) {
1592 pmd_t *new;
1593
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001594 if (kernel_can_map_hugepud(vstart, vend, use_huge)) {
1595 vstart = kernel_map_hugepud(vstart, vend, pud);
1596 continue;
1597 }
David S. Miller56425302005-09-25 16:46:57 -07001598 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1599 alloc_bytes += PAGE_SIZE;
1600 pud_populate(&init_mm, pud, new);
1601 }
1602
1603 pmd = pmd_offset(pud, vstart);
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001604 if (pmd_none(*pmd)) {
David S. Miller56425302005-09-25 16:46:57 -07001605 pte_t *new;
1606
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001607 if (kernel_can_map_hugepmd(vstart, vend, use_huge)) {
1608 vstart = kernel_map_hugepmd(vstart, vend, pmd);
1609 continue;
1610 }
David S. Miller56425302005-09-25 16:46:57 -07001611 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1612 alloc_bytes += PAGE_SIZE;
1613 pmd_populate_kernel(&init_mm, pmd, new);
1614 }
1615
1616 pte = pte_offset_kernel(pmd, vstart);
1617 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1618 if (this_end > vend)
1619 this_end = vend;
1620
1621 while (vstart < this_end) {
1622 pte_val(*pte) = (paddr | pgprot_val(prot));
1623
1624 vstart += PAGE_SIZE;
1625 paddr += PAGE_SIZE;
1626 pte++;
1627 }
1628 }
1629
1630 return alloc_bytes;
1631}
1632
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001633static void __init flush_all_kernel_tsbs(void)
1634{
1635 int i;
1636
1637 for (i = 0; i < KERNEL_TSB_NENTRIES; i++) {
1638 struct tsb *ent = &swapper_tsb[i];
1639
1640 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1641 }
1642#ifndef CONFIG_DEBUG_PAGEALLOC
1643 for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) {
1644 struct tsb *ent = &swapper_4m_tsb[i];
1645
1646 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1647 }
1648#endif
1649}
1650
David S. Miller56425302005-09-25 16:46:57 -07001651extern unsigned int kvmap_linear_patch[1];
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001652
David S. Miller8f3614532007-12-13 06:13:38 -08001653static void __init kernel_physical_mapping_init(void)
1654{
David S. Miller8f3614532007-12-13 06:13:38 -08001655 unsigned long i, mem_alloced = 0UL;
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001656 bool use_huge = true;
David S. Miller8f3614532007-12-13 06:13:38 -08001657
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001658#ifdef CONFIG_DEBUG_PAGEALLOC
1659 use_huge = false;
1660#endif
David S. Miller8f3614532007-12-13 06:13:38 -08001661 for (i = 0; i < pall_ents; i++) {
1662 unsigned long phys_start, phys_end;
1663
1664 phys_start = pall[i].phys_addr;
1665 phys_end = phys_start + pall[i].reg_size;
1666
David S. Miller56425302005-09-25 16:46:57 -07001667 mem_alloced += kernel_map_range(phys_start, phys_end,
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001668 PAGE_KERNEL, use_huge);
David S. Miller56425302005-09-25 16:46:57 -07001669 }
1670
1671 printk("Allocated %ld bytes for kernel page tables.\n",
1672 mem_alloced);
1673
1674 kvmap_linear_patch[0] = 0x01000000; /* nop */
1675 flushi(&kvmap_linear_patch[0]);
1676
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001677 flush_all_kernel_tsbs();
1678
David S. Miller56425302005-09-25 16:46:57 -07001679 __flush_tlb_all();
1680}
1681
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001682#ifdef CONFIG_DEBUG_PAGEALLOC
Joonsoo Kim031bc572014-12-12 16:55:52 -08001683void __kernel_map_pages(struct page *page, int numpages, int enable)
David S. Miller56425302005-09-25 16:46:57 -07001684{
1685 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1686 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1687
1688 kernel_map_range(phys_start, phys_end,
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001689 (enable ? PAGE_KERNEL : __pgprot(0)), false);
David S. Miller56425302005-09-25 16:46:57 -07001690
David S. Miller74bf4312006-01-31 18:29:18 -08001691 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1692 PAGE_OFFSET + phys_end);
1693
David S. Miller56425302005-09-25 16:46:57 -07001694 /* we should perform an IPI and flush all tlbs,
1695 * but that can deadlock->flush only current cpu.
1696 */
1697 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1698 PAGE_OFFSET + phys_end);
1699}
1700#endif
1701
David S. Miller10147572005-09-28 21:46:43 -07001702unsigned long __init find_ecache_flush_span(unsigned long size)
1703{
David S. Miller13edad72005-09-29 17:58:26 -07001704 int i;
David S. Miller10147572005-09-28 21:46:43 -07001705
David S. Miller13edad72005-09-29 17:58:26 -07001706 for (i = 0; i < pavail_ents; i++) {
1707 if (pavail[i].reg_size >= size)
1708 return pavail[i].phys_addr;
David S. Miller10147572005-09-28 21:46:43 -07001709 }
1710
1711 return ~0UL;
1712}
1713
David S. Millerb2d43832013-09-20 21:50:41 -07001714unsigned long PAGE_OFFSET;
1715EXPORT_SYMBOL(PAGE_OFFSET);
1716
David S. Millerbb4e6e82014-09-27 11:05:21 -07001717unsigned long VMALLOC_END = 0x0000010000000000UL;
1718EXPORT_SYMBOL(VMALLOC_END);
1719
David S. Miller4397bed2014-09-26 21:58:33 -07001720unsigned long sparc64_va_hole_top = 0xfffff80000000000UL;
1721unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL;
1722
David S. Millerb2d43832013-09-20 21:50:41 -07001723static void __init setup_page_offset(void)
1724{
David S. Millerb2d43832013-09-20 21:50:41 -07001725 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
David S. Miller4397bed2014-09-26 21:58:33 -07001726 /* Cheetah/Panther support a full 64-bit virtual
1727 * address, so we can use all that our page tables
1728 * support.
1729 */
1730 sparc64_va_hole_top = 0xfff0000000000000UL;
1731 sparc64_va_hole_bottom = 0x0010000000000000UL;
1732
David S. Millerb2d43832013-09-20 21:50:41 -07001733 max_phys_bits = 42;
1734 } else if (tlb_type == hypervisor) {
1735 switch (sun4v_chip_type) {
1736 case SUN4V_CHIP_NIAGARA1:
1737 case SUN4V_CHIP_NIAGARA2:
David S. Miller4397bed2014-09-26 21:58:33 -07001738 /* T1 and T2 support 48-bit virtual addresses. */
1739 sparc64_va_hole_top = 0xffff800000000000UL;
1740 sparc64_va_hole_bottom = 0x0000800000000000UL;
1741
David S. Millerb2d43832013-09-20 21:50:41 -07001742 max_phys_bits = 39;
1743 break;
1744 case SUN4V_CHIP_NIAGARA3:
David S. Miller4397bed2014-09-26 21:58:33 -07001745 /* T3 supports 48-bit virtual addresses. */
1746 sparc64_va_hole_top = 0xffff800000000000UL;
1747 sparc64_va_hole_bottom = 0x0000800000000000UL;
1748
David S. Millerb2d43832013-09-20 21:50:41 -07001749 max_phys_bits = 43;
1750 break;
1751 case SUN4V_CHIP_NIAGARA4:
1752 case SUN4V_CHIP_NIAGARA5:
1753 case SUN4V_CHIP_SPARC64X:
David S. Miller7c0fa0f2014-09-24 21:49:29 -07001754 case SUN4V_CHIP_SPARC_M6:
David S. Miller4397bed2014-09-26 21:58:33 -07001755 /* T4 and later support 52-bit virtual addresses. */
1756 sparc64_va_hole_top = 0xfff8000000000000UL;
1757 sparc64_va_hole_bottom = 0x0008000000000000UL;
David S. Millerb2d43832013-09-20 21:50:41 -07001758 max_phys_bits = 47;
1759 break;
David S. Miller7c0fa0f2014-09-24 21:49:29 -07001760 case SUN4V_CHIP_SPARC_M7:
Khalid Azizc5b8b5b2016-04-19 11:12:54 -06001761 case SUN4V_CHIP_SPARC_SN:
David S. Miller7c0fa0f2014-09-24 21:49:29 -07001762 default:
1763 /* M7 and later support 52-bit virtual addresses. */
1764 sparc64_va_hole_top = 0xfff8000000000000UL;
1765 sparc64_va_hole_bottom = 0x0008000000000000UL;
1766 max_phys_bits = 49;
1767 break;
David S. Millerb2d43832013-09-20 21:50:41 -07001768 }
1769 }
1770
1771 if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) {
1772 prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n",
1773 max_phys_bits);
1774 prom_halt();
1775 }
1776
David S. Millerbb4e6e82014-09-27 11:05:21 -07001777 PAGE_OFFSET = sparc64_va_hole_top;
1778 VMALLOC_END = ((sparc64_va_hole_bottom >> 1) +
1779 (sparc64_va_hole_bottom >> 2));
David S. Millerb2d43832013-09-20 21:50:41 -07001780
David S. Millerbb4e6e82014-09-27 11:05:21 -07001781 pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
David S. Millerb2d43832013-09-20 21:50:41 -07001782 PAGE_OFFSET, max_phys_bits);
David S. Millerbb4e6e82014-09-27 11:05:21 -07001783 pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n",
1784 VMALLOC_START, VMALLOC_END);
1785 pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n",
1786 VMEMMAP_BASE, VMEMMAP_BASE << 1);
David S. Millerb2d43832013-09-20 21:50:41 -07001787}
1788
David S. Miller517af332006-02-01 15:55:21 -08001789static void __init tsb_phys_patch(void)
1790{
David S. Millerd257d5d2006-02-06 23:44:37 -08001791 struct tsb_ldquad_phys_patch_entry *pquad;
David S. Miller517af332006-02-01 15:55:21 -08001792 struct tsb_phys_patch_entry *p;
1793
David S. Millerd257d5d2006-02-06 23:44:37 -08001794 pquad = &__tsb_ldquad_phys_patch;
1795 while (pquad < &__tsb_ldquad_phys_patch_end) {
1796 unsigned long addr = pquad->addr;
1797
1798 if (tlb_type == hypervisor)
1799 *(unsigned int *) addr = pquad->sun4v_insn;
1800 else
1801 *(unsigned int *) addr = pquad->sun4u_insn;
1802 wmb();
1803 __asm__ __volatile__("flush %0"
1804 : /* no outputs */
1805 : "r" (addr));
1806
1807 pquad++;
1808 }
1809
David S. Miller517af332006-02-01 15:55:21 -08001810 p = &__tsb_phys_patch;
1811 while (p < &__tsb_phys_patch_end) {
1812 unsigned long addr = p->addr;
1813
1814 *(unsigned int *) addr = p->insn;
1815 wmb();
1816 __asm__ __volatile__("flush %0"
1817 : /* no outputs */
1818 : "r" (addr));
1819
1820 p++;
1821 }
1822}
1823
David S. Miller490384e2006-02-11 14:41:18 -08001824/* Don't mark as init, we give this to the Hypervisor. */
David S. Millerd1acb422007-03-16 17:20:28 -07001825#ifndef CONFIG_DEBUG_PAGEALLOC
1826#define NUM_KTSB_DESCR 2
1827#else
1828#define NUM_KTSB_DESCR 1
1829#endif
1830static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
David S. Miller490384e2006-02-11 14:41:18 -08001831
David S. Miller8c82dc02014-09-17 10:14:56 -07001832/* The swapper TSBs are loaded with a base sequence of:
1833 *
1834 * sethi %uhi(SYMBOL), REG1
1835 * sethi %hi(SYMBOL), REG2
1836 * or REG1, %ulo(SYMBOL), REG1
1837 * or REG2, %lo(SYMBOL), REG2
1838 * sllx REG1, 32, REG1
1839 * or REG1, REG2, REG1
1840 *
1841 * When we use physical addressing for the TSB accesses, we patch the
1842 * first four instructions in the above sequence.
1843 */
1844
David S. Miller9076d0e2011-08-05 00:53:57 -07001845static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
1846{
David S. Miller8c82dc02014-09-17 10:14:56 -07001847 unsigned long high_bits, low_bits;
1848
1849 high_bits = (pa >> 32) & 0xffffffff;
1850 low_bits = (pa >> 0) & 0xffffffff;
David S. Miller9076d0e2011-08-05 00:53:57 -07001851
1852 while (start < end) {
1853 unsigned int *ia = (unsigned int *)(unsigned long)*start;
1854
David S. Miller8c82dc02014-09-17 10:14:56 -07001855 ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10);
David S. Miller9076d0e2011-08-05 00:53:57 -07001856 __asm__ __volatile__("flush %0" : : "r" (ia));
1857
David S. Miller8c82dc02014-09-17 10:14:56 -07001858 ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10);
David S. Miller9076d0e2011-08-05 00:53:57 -07001859 __asm__ __volatile__("flush %0" : : "r" (ia + 1));
1860
David S. Miller8c82dc02014-09-17 10:14:56 -07001861 ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff);
1862 __asm__ __volatile__("flush %0" : : "r" (ia + 2));
1863
1864 ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff);
1865 __asm__ __volatile__("flush %0" : : "r" (ia + 3));
1866
David S. Miller9076d0e2011-08-05 00:53:57 -07001867 start++;
1868 }
1869}
1870
1871static void ktsb_phys_patch(void)
1872{
1873 extern unsigned int __swapper_tsb_phys_patch;
1874 extern unsigned int __swapper_tsb_phys_patch_end;
David S. Miller9076d0e2011-08-05 00:53:57 -07001875 unsigned long ktsb_pa;
1876
1877 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1878 patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
1879 &__swapper_tsb_phys_patch_end, ktsb_pa);
1880#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Miller0785a8e2011-08-06 05:26:35 -07001881 {
1882 extern unsigned int __swapper_4m_tsb_phys_patch;
1883 extern unsigned int __swapper_4m_tsb_phys_patch_end;
David S. Miller9076d0e2011-08-05 00:53:57 -07001884 ktsb_pa = (kern_base +
1885 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1886 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
1887 &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
David S. Miller0785a8e2011-08-06 05:26:35 -07001888 }
David S. Miller9076d0e2011-08-05 00:53:57 -07001889#endif
1890}
1891
David S. Miller490384e2006-02-11 14:41:18 -08001892static void __init sun4v_ktsb_init(void)
1893{
1894 unsigned long ktsb_pa;
1895
David S. Millerd7744a02006-02-21 22:31:11 -08001896 /* First KTSB for PAGE_SIZE mappings. */
David S. Miller490384e2006-02-11 14:41:18 -08001897 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1898
1899 switch (PAGE_SIZE) {
1900 case 8 * 1024:
1901 default:
1902 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
1903 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
1904 break;
1905
1906 case 64 * 1024:
1907 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
1908 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
1909 break;
1910
1911 case 512 * 1024:
1912 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
1913 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
1914 break;
1915
1916 case 4 * 1024 * 1024:
1917 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
1918 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
1919 break;
Joe Perches6cb79b32011-06-03 14:45:23 +00001920 }
David S. Miller490384e2006-02-11 14:41:18 -08001921
David S. Miller3f19a842006-02-17 12:03:20 -08001922 ktsb_descr[0].assoc = 1;
David S. Miller490384e2006-02-11 14:41:18 -08001923 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
1924 ktsb_descr[0].ctx_idx = 0;
1925 ktsb_descr[0].tsb_base = ktsb_pa;
1926 ktsb_descr[0].resv = 0;
1927
David S. Millerd1acb422007-03-16 17:20:28 -07001928#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Miller4f93d212012-09-06 18:13:58 -07001929 /* Second KTSB for 4MB/256MB/2GB/16GB mappings. */
David S. Millerd7744a02006-02-21 22:31:11 -08001930 ktsb_pa = (kern_base +
1931 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1932
1933 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
David S. Millerc69ad0a2012-09-06 20:35:36 -07001934 ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB |
1935 HV_PGSZ_MASK_256MB |
1936 HV_PGSZ_MASK_2GB |
1937 HV_PGSZ_MASK_16GB) &
1938 cpu_pgsz_mask);
David S. Millerd7744a02006-02-21 22:31:11 -08001939 ktsb_descr[1].assoc = 1;
1940 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
1941 ktsb_descr[1].ctx_idx = 0;
1942 ktsb_descr[1].tsb_base = ktsb_pa;
1943 ktsb_descr[1].resv = 0;
David S. Millerd1acb422007-03-16 17:20:28 -07001944#endif
David S. Miller490384e2006-02-11 14:41:18 -08001945}
1946
Paul Gortmaker2066aad2013-06-17 15:43:14 -04001947void sun4v_ktsb_register(void)
David S. Miller490384e2006-02-11 14:41:18 -08001948{
David S. Miller7db35f32007-05-29 02:22:14 -07001949 unsigned long pa, ret;
David S. Miller490384e2006-02-11 14:41:18 -08001950
1951 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
1952
David S. Miller7db35f32007-05-29 02:22:14 -07001953 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
1954 if (ret != 0) {
1955 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
1956 "errors with %lx\n", pa, ret);
1957 prom_halt();
1958 }
David S. Miller490384e2006-02-11 14:41:18 -08001959}
1960
David S. Millerc69ad0a2012-09-06 20:35:36 -07001961static void __init sun4u_linear_pte_xor_finalize(void)
1962{
1963#ifndef CONFIG_DEBUG_PAGEALLOC
1964 /* This is where we would add Panther support for
1965 * 32MB and 256MB pages.
1966 */
1967#endif
1968}
1969
1970static void __init sun4v_linear_pte_xor_finalize(void)
1971{
Khalid Aziz494e5b62015-05-27 10:00:46 -06001972 unsigned long pagecv_flag;
1973
1974 /* Bit 9 of TTE is no longer CV bit on M7 processor and it instead
1975 * enables MCD error. Do not set bit 9 on M7 processor.
1976 */
1977 switch (sun4v_chip_type) {
1978 case SUN4V_CHIP_SPARC_M7:
Khalid Azizc5b8b5b2016-04-19 11:12:54 -06001979 case SUN4V_CHIP_SPARC_SN:
Khalid Aziz494e5b62015-05-27 10:00:46 -06001980 pagecv_flag = 0x00;
1981 break;
1982 default:
1983 pagecv_flag = _PAGE_CV_4V;
1984 break;
1985 }
David S. Millerc69ad0a2012-09-06 20:35:36 -07001986#ifndef CONFIG_DEBUG_PAGEALLOC
1987 if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
1988 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
David S. Miller922631b2013-09-18 12:00:00 -07001989 PAGE_OFFSET;
Khalid Aziz494e5b62015-05-27 10:00:46 -06001990 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag |
David S. Millerc69ad0a2012-09-06 20:35:36 -07001991 _PAGE_P_4V | _PAGE_W_4V);
1992 } else {
1993 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
1994 }
1995
1996 if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
1997 kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
David S. Miller922631b2013-09-18 12:00:00 -07001998 PAGE_OFFSET;
Khalid Aziz494e5b62015-05-27 10:00:46 -06001999 kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag |
David S. Millerc69ad0a2012-09-06 20:35:36 -07002000 _PAGE_P_4V | _PAGE_W_4V);
2001 } else {
2002 kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
2003 }
2004
2005 if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
2006 kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
David S. Miller922631b2013-09-18 12:00:00 -07002007 PAGE_OFFSET;
Khalid Aziz494e5b62015-05-27 10:00:46 -06002008 kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag |
David S. Millerc69ad0a2012-09-06 20:35:36 -07002009 _PAGE_P_4V | _PAGE_W_4V);
2010 } else {
2011 kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
2012 }
2013#endif
2014}
2015
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016/* paging_init() sets up the page tables */
2017
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018static unsigned long last_valid_pfn;
David S. Millerac55c762014-09-26 21:19:46 -07002019
David S. Millerc4bce902006-02-11 21:57:54 -08002020static void sun4u_pgprot_init(void);
2021static void sun4v_pgprot_init(void);
2022
bob picco7c21d532014-09-16 09:29:54 -04002023static phys_addr_t __init available_memory(void)
2024{
2025 phys_addr_t available = 0ULL;
2026 phys_addr_t pa_start, pa_end;
2027 u64 i;
2028
Tony Luckfc6daaf2015-06-24 16:58:09 -07002029 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start,
2030 &pa_end, NULL)
bob picco7c21d532014-09-16 09:29:54 -04002031 available = available + (pa_end - pa_start);
2032
2033 return available;
2034}
2035
Khalid Aziz494e5b62015-05-27 10:00:46 -06002036#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2037#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2038#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2039#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2040#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2041#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2042
bob picco7c21d532014-09-16 09:29:54 -04002043/* We need to exclude reserved regions. This exclusion will include
2044 * vmlinux and initrd. To be more precise the initrd size could be used to
2045 * compute a new lower limit because it is freed later during initialization.
2046 */
2047static void __init reduce_memory(phys_addr_t limit_ram)
2048{
2049 phys_addr_t avail_ram = available_memory();
2050 phys_addr_t pa_start, pa_end;
2051 u64 i;
2052
2053 if (limit_ram >= avail_ram)
2054 return;
2055
Tony Luckfc6daaf2015-06-24 16:58:09 -07002056 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start,
2057 &pa_end, NULL) {
bob picco7c21d532014-09-16 09:29:54 -04002058 phys_addr_t region_size = pa_end - pa_start;
2059 phys_addr_t clip_start = pa_start;
2060
2061 avail_ram = avail_ram - region_size;
2062 /* Are we consuming too much? */
2063 if (avail_ram < limit_ram) {
2064 phys_addr_t give_back = limit_ram - avail_ram;
2065
2066 region_size = region_size - give_back;
2067 clip_start = clip_start + give_back;
2068 }
2069
2070 memblock_remove(clip_start, region_size);
2071
2072 if (avail_ram <= limit_ram)
2073 break;
2074 i = 0UL;
2075 }
2076}
2077
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078void __init paging_init(void)
2079{
David S. Miller919ee672008-04-23 05:40:25 -07002080 unsigned long end_pfn, shift, phys_base;
David S. Miller0836a0e2005-09-28 21:38:08 -07002081 unsigned long real_end, i;
Paul Gortmakeraa6f0792012-05-09 20:44:29 -04002082 int node;
David S. Miller0836a0e2005-09-28 21:38:08 -07002083
David S. Millerb2d43832013-09-20 21:50:41 -07002084 setup_page_offset();
2085
David S. Miller22adb352007-05-26 01:14:43 -07002086 /* These build time checkes make sure that the dcache_dirty_cpu()
2087 * page->flags usage will work.
2088 *
2089 * When a page gets marked as dcache-dirty, we store the
2090 * cpu number starting at bit 32 in the page->flags. Also,
2091 * functions like clear_dcache_dirty_cpu use the cpu mask
2092 * in 13-bit signed-immediate instruction fields.
2093 */
Christoph Lameter9223b412008-04-28 02:12:48 -07002094
2095 /*
2096 * Page flags must not reach into upper 32 bits that are used
2097 * for the cpu number
2098 */
2099 BUILD_BUG_ON(NR_PAGEFLAGS > 32);
2100
2101 /*
2102 * The bit fields placed in the high range must not reach below
2103 * the 32 bit boundary. Otherwise we cannot place the cpu field
2104 * at the 32 bit boundary.
2105 */
David S. Miller22adb352007-05-26 01:14:43 -07002106 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
Christoph Lameter9223b412008-04-28 02:12:48 -07002107 ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
2108
David S. Miller22adb352007-05-26 01:14:43 -07002109 BUILD_BUG_ON(NR_CPUS > 4096);
2110
David S. Miller0eef3312014-05-03 22:52:50 -07002111 kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
David S. Miller481295f2006-02-07 21:51:08 -08002112 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
2113
David S. Millerd7744a02006-02-21 22:31:11 -08002114 /* Invalidate both kernel TSBs. */
David S. Miller8b234272006-02-17 18:01:02 -08002115 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
David S. Millerd1acb422007-03-16 17:20:28 -07002116#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Millerd7744a02006-02-21 22:31:11 -08002117 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
David S. Millerd1acb422007-03-16 17:20:28 -07002118#endif
David S. Miller8b234272006-02-17 18:01:02 -08002119
Khalid Aziz494e5b62015-05-27 10:00:46 -06002120 /* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde
2121 * bit on M7 processor. This is a conflicting usage of the same
2122 * bit. Enabling TTE.cv on M7 would turn on Memory Corruption
2123 * Detection error on all pages and this will lead to problems
2124 * later. Kernel does not run with MCD enabled and hence rest
2125 * of the required steps to fully configure memory corruption
2126 * detection are not taken. We need to ensure TTE.mcde is not
2127 * set on M7 processor. Compute the value of cacheability
2128 * flag for use later taking this into consideration.
2129 */
2130 switch (sun4v_chip_type) {
2131 case SUN4V_CHIP_SPARC_M7:
Khalid Azizc5b8b5b2016-04-19 11:12:54 -06002132 case SUN4V_CHIP_SPARC_SN:
Khalid Aziz494e5b62015-05-27 10:00:46 -06002133 page_cache4v_flag = _PAGE_CP_4V;
2134 break;
2135 default:
2136 page_cache4v_flag = _PAGE_CACHE_4V;
2137 break;
2138 }
2139
David S. Millerc4bce902006-02-11 21:57:54 -08002140 if (tlb_type == hypervisor)
2141 sun4v_pgprot_init();
2142 else
2143 sun4u_pgprot_init();
2144
David S. Millerd257d5d2006-02-06 23:44:37 -08002145 if (tlb_type == cheetah_plus ||
David S. Miller9076d0e2011-08-05 00:53:57 -07002146 tlb_type == hypervisor) {
David S. Miller517af332006-02-01 15:55:21 -08002147 tsb_phys_patch();
David S. Miller9076d0e2011-08-05 00:53:57 -07002148 ktsb_phys_patch();
2149 }
David S. Miller517af332006-02-01 15:55:21 -08002150
David S. Millerc69ad0a2012-09-06 20:35:36 -07002151 if (tlb_type == hypervisor)
David S. Millerd257d5d2006-02-06 23:44:37 -08002152 sun4v_patch_tlb_handlers();
2153
David S. Millera94a1722008-05-11 21:04:48 -07002154 /* Find available physical memory...
2155 *
2156 * Read it twice in order to work around a bug in openfirmware.
2157 * The call to grab this table itself can cause openfirmware to
2158 * allocate memory, which in turn can take away some space from
2159 * the list of available memory. Reading it twice makes sure
2160 * we really do get the final value.
2161 */
2162 read_obp_translations();
2163 read_obp_memory("reg", &pall[0], &pall_ents);
2164 read_obp_memory("available", &pavail[0], &pavail_ents);
David S. Miller13edad72005-09-29 17:58:26 -07002165 read_obp_memory("available", &pavail[0], &pavail_ents);
David S. Miller0836a0e2005-09-28 21:38:08 -07002166
2167 phys_base = 0xffffffffffffffffUL;
David S. Miller3b2a7e22008-02-13 18:13:20 -08002168 for (i = 0; i < pavail_ents; i++) {
David S. Miller13edad72005-09-29 17:58:26 -07002169 phys_base = min(phys_base, pavail[i].phys_addr);
Yinghai Lu95f72d12010-07-12 14:36:09 +10002170 memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
David S. Miller3b2a7e22008-02-13 18:13:20 -08002171 }
2172
Yinghai Lu95f72d12010-07-12 14:36:09 +10002173 memblock_reserve(kern_base, kern_size);
David S. Miller0836a0e2005-09-28 21:38:08 -07002174
David S. Miller4e82c9a2008-02-13 18:00:03 -08002175 find_ramdisk(phys_base);
2176
bob picco7c21d532014-09-16 09:29:54 -04002177 if (cmdline_memory_size)
2178 reduce_memory(cmdline_memory_size);
David S. Miller25b0c652008-02-13 18:20:14 -08002179
Tejun Heo1aadc052011-12-08 10:22:08 -08002180 memblock_allow_resize();
Yinghai Lu95f72d12010-07-12 14:36:09 +10002181 memblock_dump_all();
David S. Miller3b2a7e22008-02-13 18:13:20 -08002182
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 set_bit(0, mmu_context_bmap);
2184
David S. Miller2bdb3cb2005-09-22 01:08:57 -07002185 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
2186
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187 real_end = (unsigned long)_end;
David S. Miller0eef3312014-05-03 22:52:50 -07002188 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
David S. Miller64658742008-03-21 17:01:38 -07002189 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
2190 num_kernel_image_mappings);
David S. Miller2bdb3cb2005-09-22 01:08:57 -07002191
2192 /* Set kernel pgd to upper alias so physical page computations
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 * work.
2194 */
2195 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
2196
David S. Millerd195b712014-09-27 21:30:57 -07002197 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
David S. Miller0dd5b7b2014-09-24 20:56:11 -07002198
David S. Millerc9c10832005-10-12 12:22:46 -07002199 inherit_prom_mappings();
David S. Miller5085b4a2005-09-22 00:45:41 -07002200
David S. Millera8b900d2006-01-31 18:33:37 -08002201 /* Ok, we can use our TLB miss and window trap handlers safely. */
2202 setup_tba();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203
David S. Millerc9c10832005-10-12 12:22:46 -07002204 __flush_tlb_all();
David S. Miller9ad98c52005-10-05 15:12:00 -07002205
David S. Millerad072002008-02-13 19:21:51 -08002206 prom_build_devicetree();
David S. Millerb696fdc2009-05-26 22:37:25 -07002207 of_populate_present_mask();
David S. Millerb99c6eb2009-06-18 01:44:19 -07002208#ifndef CONFIG_SMP
2209 of_fill_in_cpu_data();
2210#endif
David S. Millerad072002008-02-13 19:21:51 -08002211
David S. Miller890db402009-04-01 03:13:15 -07002212 if (tlb_type == hypervisor) {
David S. Miller4a283332008-02-13 19:22:23 -08002213 sun4v_mdesc_init();
Stephen Rothwell6ac5c612009-06-15 03:06:18 -07002214 mdesc_populate_present_mask(cpu_all_mask);
David S. Millerb99c6eb2009-06-18 01:44:19 -07002215#ifndef CONFIG_SMP
2216 mdesc_fill_in_cpu_data(cpu_all_mask);
2217#endif
David S. Millerce33fdc2012-09-06 19:01:25 -07002218 mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask);
David S. Millerc69ad0a2012-09-06 20:35:36 -07002219
2220 sun4v_linear_pte_xor_finalize();
2221
2222 sun4v_ktsb_init();
2223 sun4v_ktsb_register();
David S. Millerce33fdc2012-09-06 19:01:25 -07002224 } else {
2225 unsigned long impl, ver;
2226
2227 cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
2228 HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
2229
2230 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
2231 impl = ((ver >> 32) & 0xffff);
2232 if (impl == PANTHER_IMPL)
2233 cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB |
2234 HV_PGSZ_MASK_256MB);
David S. Millerc69ad0a2012-09-06 20:35:36 -07002235
2236 sun4u_linear_pte_xor_finalize();
David S. Miller890db402009-04-01 03:13:15 -07002237 }
David S. Miller4a283332008-02-13 19:22:23 -08002238
David S. Millerc69ad0a2012-09-06 20:35:36 -07002239 /* Flush the TLBs and the 4M TSB so that the updated linear
2240 * pte XOR settings are realized for all mappings.
2241 */
2242 __flush_tlb_all();
2243#ifndef CONFIG_DEBUG_PAGEALLOC
2244 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2245#endif
2246 __flush_tlb_all();
2247
David S. Miller2bdb3cb2005-09-22 01:08:57 -07002248 /* Setup bootmem... */
David S. Miller919ee672008-04-23 05:40:25 -07002249 last_valid_pfn = end_pfn = bootmem_init(phys_base);
David S. Millerd1112012006-03-08 02:16:07 -08002250
David S. Miller5ed56f12012-04-26 20:50:34 -07002251 /* Once the OF device tree and MDESC have been setup, we know
2252 * the list of possible cpus. Therefore we can allocate the
2253 * IRQ stacks.
2254 */
2255 for_each_possible_cpu(i) {
Paul Gortmakeraa6f0792012-05-09 20:44:29 -04002256 node = cpu_to_node(i);
David S. Miller5ed56f12012-04-26 20:50:34 -07002257
2258 softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
2259 THREAD_SIZE,
2260 THREAD_SIZE, 0);
2261 hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
2262 THREAD_SIZE,
2263 THREAD_SIZE, 0);
2264 }
2265
David S. Miller56425302005-09-25 16:46:57 -07002266 kernel_physical_mapping_init();
David S. Miller56425302005-09-25 16:46:57 -07002267
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 {
David S. Miller919ee672008-04-23 05:40:25 -07002269 unsigned long max_zone_pfns[MAX_NR_ZONES];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270
David S. Miller919ee672008-04-23 05:40:25 -07002271 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272
David S. Miller919ee672008-04-23 05:40:25 -07002273 max_zone_pfns[ZONE_NORMAL] = end_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274
David S. Miller919ee672008-04-23 05:40:25 -07002275 free_area_init_nodes(max_zone_pfns);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 }
2277
David S. Miller3c62a2d2008-02-17 23:22:50 -08002278 printk("Booting Linux...\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279}
2280
Greg Kroah-Hartman7c9503b2012-12-21 14:03:26 -08002281int page_in_phys_avail(unsigned long paddr)
David S. Miller919ee672008-04-23 05:40:25 -07002282{
2283 int i;
2284
2285 paddr &= PAGE_MASK;
2286
2287 for (i = 0; i < pavail_ents; i++) {
2288 unsigned long start, end;
2289
2290 start = pavail[i].phys_addr;
2291 end = start + pavail[i].reg_size;
2292
2293 if (paddr >= start && paddr < end)
2294 return 1;
2295 }
2296 if (paddr >= kern_base && paddr < (kern_base + kern_size))
2297 return 1;
2298#ifdef CONFIG_BLK_DEV_INITRD
2299 if (paddr >= __pa(initrd_start) &&
2300 paddr < __pa(PAGE_ALIGN(initrd_end)))
2301 return 1;
2302#endif
2303
2304 return 0;
2305}
2306
Yinghai Lu961f8fa2012-11-16 19:39:21 -08002307static void __init register_page_bootmem_info(void)
2308{
2309#ifdef CONFIG_NEED_MULTIPLE_NODES
2310 int i;
2311
2312 for_each_online_node(i)
2313 if (NODE_DATA(i)->node_spanned_pages)
2314 register_page_bootmem_info_node(NODE_DATA(i));
2315#endif
2316}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317void __init mem_init(void)
2318{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
2320
Yinghai Lu961f8fa2012-11-16 19:39:21 -08002321 register_page_bootmem_info();
Jiang Liu0c988532013-07-03 15:03:24 -07002322 free_all_bootmem();
David S. Miller919ee672008-04-23 05:40:25 -07002323
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 /*
2325 * Set up the zero page, mark it reserved, so that page count
2326 * is not manipulated when freeing the page from user ptes.
2327 */
2328 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
2329 if (mem_map_zero == NULL) {
2330 prom_printf("paging_init: Cannot alloc zero page.\n");
2331 prom_halt();
2332 }
Jiang Liu70affe42013-05-07 16:18:08 -07002333 mark_page_reserved(mem_map_zero);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334
Jiang Liudceccbe2013-07-03 15:04:14 -07002335 mem_init_print_info(NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336
2337 if (tlb_type == cheetah || tlb_type == cheetah_plus)
2338 cheetah_ecache_flush_init();
2339}
2340
David S. Miller898cf0e2005-09-23 11:59:44 -07002341void free_initmem(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342{
2343 unsigned long addr, initend;
David S. Millerf2b60792008-08-14 01:45:41 -07002344 int do_free = 1;
2345
2346 /* If the physical memory maps were trimmed by kernel command
2347 * line options, don't even try freeing this initmem stuff up.
2348 * The kernel image could have been in the trimmed out region
2349 * and if so the freeing below will free invalid page structs.
2350 */
2351 if (cmdline_memory_size)
2352 do_free = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353
2354 /*
2355 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2356 */
2357 addr = PAGE_ALIGN((unsigned long)(__init_begin));
2358 initend = (unsigned long)(__init_end) & PAGE_MASK;
2359 for (; addr < initend; addr += PAGE_SIZE) {
2360 unsigned long page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361
2362 page = (addr +
2363 ((unsigned long) __va(kern_base)) -
2364 ((unsigned long) KERNBASE));
Randy Dunlapc9cf5522006-06-27 02:53:52 -07002365 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366
Jiang Liu70affe42013-05-07 16:18:08 -07002367 if (do_free)
2368 free_reserved_page(virt_to_page(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369 }
2370}
2371
2372#ifdef CONFIG_BLK_DEV_INITRD
2373void free_initrd_mem(unsigned long start, unsigned long end)
2374{
Jiang Liudceccbe2013-07-03 15:04:14 -07002375 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
2376 "initrd");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377}
2378#endif
David S. Millerc4bce902006-02-11 21:57:54 -08002379
David S. Millerc4bce902006-02-11 21:57:54 -08002380pgprot_t PAGE_KERNEL __read_mostly;
2381EXPORT_SYMBOL(PAGE_KERNEL);
2382
2383pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
2384pgprot_t PAGE_COPY __read_mostly;
David S. Miller0f159522006-02-18 12:43:16 -08002385
2386pgprot_t PAGE_SHARED __read_mostly;
2387EXPORT_SYMBOL(PAGE_SHARED);
2388
David S. Millerc4bce902006-02-11 21:57:54 -08002389unsigned long pg_iobits __read_mostly;
2390
2391unsigned long _PAGE_IE __read_mostly;
David S. Miller987c74f2006-06-25 01:34:43 -07002392EXPORT_SYMBOL(_PAGE_IE);
David S. Millerb2bef442006-02-23 01:55:55 -08002393
David S. Millerc4bce902006-02-11 21:57:54 -08002394unsigned long _PAGE_E __read_mostly;
David S. Millerb2bef442006-02-23 01:55:55 -08002395EXPORT_SYMBOL(_PAGE_E);
2396
David S. Millerc4bce902006-02-11 21:57:54 -08002397unsigned long _PAGE_CACHE __read_mostly;
David S. Millerb2bef442006-02-23 01:55:55 -08002398EXPORT_SYMBOL(_PAGE_CACHE);
David S. Millerc4bce902006-02-11 21:57:54 -08002399
David Miller46644c22007-10-16 01:24:16 -07002400#ifdef CONFIG_SPARSEMEM_VMEMMAP
Johannes Weiner0aad8182013-04-29 15:07:50 -07002401int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
2402 int node)
David Miller46644c22007-10-16 01:24:16 -07002403{
David Miller46644c22007-10-16 01:24:16 -07002404 unsigned long pte_base;
2405
2406 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2407 _PAGE_CP_4U | _PAGE_CV_4U |
2408 _PAGE_P_4U | _PAGE_W_4U);
2409 if (tlb_type == hypervisor)
2410 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
Khalid Aziz494e5b62015-05-27 10:00:46 -06002411 page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V);
David Miller46644c22007-10-16 01:24:16 -07002412
David S. Millerc06240c2014-09-24 21:20:14 -07002413 pte_base |= _PAGE_PMD_HUGE;
David Miller46644c22007-10-16 01:24:16 -07002414
David S. Millerc06240c2014-09-24 21:20:14 -07002415 vstart = vstart & PMD_MASK;
2416 vend = ALIGN(vend, PMD_SIZE);
2417 for (; vstart < vend; vstart += PMD_SIZE) {
2418 pgd_t *pgd = pgd_offset_k(vstart);
2419 unsigned long pte;
2420 pud_t *pud;
2421 pmd_t *pmd;
2422
2423 if (pgd_none(*pgd)) {
2424 pud_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
2425
2426 if (!new)
2427 return -ENOMEM;
2428 pgd_populate(&init_mm, pgd, new);
2429 }
2430
2431 pud = pud_offset(pgd, vstart);
2432 if (pud_none(*pud)) {
2433 pmd_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
2434
2435 if (!new)
2436 return -ENOMEM;
2437 pud_populate(&init_mm, pud, new);
2438 }
2439
2440 pmd = pmd_offset(pud, vstart);
2441
2442 pte = pmd_val(*pmd);
2443 if (!(pte & _PAGE_VALID)) {
2444 void *block = vmemmap_alloc_block(PMD_SIZE, node);
2445
David Miller46644c22007-10-16 01:24:16 -07002446 if (!block)
2447 return -ENOMEM;
2448
David S. Millerc06240c2014-09-24 21:20:14 -07002449 pmd_val(*pmd) = pte_base | __pa(block);
David Miller46644c22007-10-16 01:24:16 -07002450 }
2451 }
David S. Miller2856cc22012-08-15 00:37:29 -07002452
David S. Millerc06240c2014-09-24 21:20:14 -07002453 return 0;
David S. Miller2856cc22012-08-15 00:37:29 -07002454}
Yasuaki Ishimatsu46723bf2013-02-22 16:33:00 -08002455
Johannes Weiner0aad8182013-04-29 15:07:50 -07002456void vmemmap_free(unsigned long start, unsigned long end)
Tang Chen01975182013-02-22 16:33:08 -08002457{
2458}
David Miller46644c22007-10-16 01:24:16 -07002459#endif /* CONFIG_SPARSEMEM_VMEMMAP */
2460
David S. Millerc4bce902006-02-11 21:57:54 -08002461static void prot_init_common(unsigned long page_none,
2462 unsigned long page_shared,
2463 unsigned long page_copy,
2464 unsigned long page_readonly,
2465 unsigned long page_exec_bit)
2466{
2467 PAGE_COPY = __pgprot(page_copy);
David S. Miller0f159522006-02-18 12:43:16 -08002468 PAGE_SHARED = __pgprot(page_shared);
David S. Millerc4bce902006-02-11 21:57:54 -08002469
2470 protection_map[0x0] = __pgprot(page_none);
2471 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
2472 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
2473 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
2474 protection_map[0x4] = __pgprot(page_readonly);
2475 protection_map[0x5] = __pgprot(page_readonly);
2476 protection_map[0x6] = __pgprot(page_copy);
2477 protection_map[0x7] = __pgprot(page_copy);
2478 protection_map[0x8] = __pgprot(page_none);
2479 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
2480 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
2481 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
2482 protection_map[0xc] = __pgprot(page_readonly);
2483 protection_map[0xd] = __pgprot(page_readonly);
2484 protection_map[0xe] = __pgprot(page_shared);
2485 protection_map[0xf] = __pgprot(page_shared);
2486}
2487
2488static void __init sun4u_pgprot_init(void)
2489{
2490 unsigned long page_none, page_shared, page_copy, page_readonly;
2491 unsigned long page_exec_bit;
David S. Miller4f93d212012-09-06 18:13:58 -07002492 int i;
David S. Millerc4bce902006-02-11 21:57:54 -08002493
2494 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2495 _PAGE_CACHE_4U | _PAGE_P_4U |
2496 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2497 _PAGE_EXEC_4U);
2498 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2499 _PAGE_CACHE_4U | _PAGE_P_4U |
2500 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2501 _PAGE_EXEC_4U | _PAGE_L_4U);
David S. Millerc4bce902006-02-11 21:57:54 -08002502
2503 _PAGE_IE = _PAGE_IE_4U;
2504 _PAGE_E = _PAGE_E_4U;
2505 _PAGE_CACHE = _PAGE_CACHE_4U;
2506
2507 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
2508 __ACCESS_BITS_4U | _PAGE_E_4U);
2509
David S. Millerd1acb422007-03-16 17:20:28 -07002510#ifdef CONFIG_DEBUG_PAGEALLOC
David S. Miller922631b2013-09-18 12:00:00 -07002511 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
David S. Millerd1acb422007-03-16 17:20:28 -07002512#else
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002513 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
David S. Miller922631b2013-09-18 12:00:00 -07002514 PAGE_OFFSET;
David S. Millerd1acb422007-03-16 17:20:28 -07002515#endif
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002516 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2517 _PAGE_P_4U | _PAGE_W_4U);
2518
David S. Miller4f93d212012-09-06 18:13:58 -07002519 for (i = 1; i < 4; i++)
2520 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
David S. Millerc4bce902006-02-11 21:57:54 -08002521
David S. Millerc4bce902006-02-11 21:57:54 -08002522 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
2523 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
2524 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
2525
2526
2527 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
2528 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2529 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
2530 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2531 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2532 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2533 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2534
2535 page_exec_bit = _PAGE_EXEC_4U;
2536
2537 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2538 page_exec_bit);
2539}
2540
2541static void __init sun4v_pgprot_init(void)
2542{
2543 unsigned long page_none, page_shared, page_copy, page_readonly;
2544 unsigned long page_exec_bit;
David S. Miller4f93d212012-09-06 18:13:58 -07002545 int i;
David S. Millerc4bce902006-02-11 21:57:54 -08002546
2547 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
Khalid Aziz494e5b62015-05-27 10:00:46 -06002548 page_cache4v_flag | _PAGE_P_4V |
David S. Millerc4bce902006-02-11 21:57:54 -08002549 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
2550 _PAGE_EXEC_4V);
2551 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
David S. Millerc4bce902006-02-11 21:57:54 -08002552
2553 _PAGE_IE = _PAGE_IE_4V;
2554 _PAGE_E = _PAGE_E_4V;
Khalid Aziz494e5b62015-05-27 10:00:46 -06002555 _PAGE_CACHE = page_cache4v_flag;
David S. Millerc4bce902006-02-11 21:57:54 -08002556
David S. Millerd1acb422007-03-16 17:20:28 -07002557#ifdef CONFIG_DEBUG_PAGEALLOC
David S. Miller922631b2013-09-18 12:00:00 -07002558 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
David S. Millerd1acb422007-03-16 17:20:28 -07002559#else
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002560 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
David S. Miller922631b2013-09-18 12:00:00 -07002561 PAGE_OFFSET;
David S. Millerd1acb422007-03-16 17:20:28 -07002562#endif
Khalid Aziz494e5b62015-05-27 10:00:46 -06002563 kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V |
2564 _PAGE_W_4V);
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002565
David S. Millerc69ad0a2012-09-06 20:35:36 -07002566 for (i = 1; i < 4; i++)
2567 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
David S. Miller4f93d212012-09-06 18:13:58 -07002568
David S. Millerc4bce902006-02-11 21:57:54 -08002569 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
2570 __ACCESS_BITS_4V | _PAGE_E_4V);
2571
David S. Millerc4bce902006-02-11 21:57:54 -08002572 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
2573 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
2574 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2575 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2576
Khalid Aziz494e5b62015-05-27 10:00:46 -06002577 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag;
2578 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
David S. Millerc4bce902006-02-11 21:57:54 -08002579 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
Khalid Aziz494e5b62015-05-27 10:00:46 -06002580 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
David S. Millerc4bce902006-02-11 21:57:54 -08002581 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
Khalid Aziz494e5b62015-05-27 10:00:46 -06002582 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
David S. Millerc4bce902006-02-11 21:57:54 -08002583 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2584
2585 page_exec_bit = _PAGE_EXEC_4V;
2586
2587 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2588 page_exec_bit);
2589}
2590
2591unsigned long pte_sz_bits(unsigned long sz)
2592{
2593 if (tlb_type == hypervisor) {
2594 switch (sz) {
2595 case 8 * 1024:
2596 default:
2597 return _PAGE_SZ8K_4V;
2598 case 64 * 1024:
2599 return _PAGE_SZ64K_4V;
2600 case 512 * 1024:
2601 return _PAGE_SZ512K_4V;
2602 case 4 * 1024 * 1024:
2603 return _PAGE_SZ4MB_4V;
Joe Perches6cb79b32011-06-03 14:45:23 +00002604 }
David S. Millerc4bce902006-02-11 21:57:54 -08002605 } else {
2606 switch (sz) {
2607 case 8 * 1024:
2608 default:
2609 return _PAGE_SZ8K_4U;
2610 case 64 * 1024:
2611 return _PAGE_SZ64K_4U;
2612 case 512 * 1024:
2613 return _PAGE_SZ512K_4U;
2614 case 4 * 1024 * 1024:
2615 return _PAGE_SZ4MB_4U;
Joe Perches6cb79b32011-06-03 14:45:23 +00002616 }
David S. Millerc4bce902006-02-11 21:57:54 -08002617 }
2618}
2619
2620pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2621{
2622 pte_t pte;
David S. Millercf627152006-02-12 21:10:07 -08002623
2624 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
David S. Millerc4bce902006-02-11 21:57:54 -08002625 pte_val(pte) |= (((unsigned long)space) << 32);
2626 pte_val(pte) |= pte_sz_bits(page_size);
David S. Millercf627152006-02-12 21:10:07 -08002627
David S. Millerc4bce902006-02-11 21:57:54 -08002628 return pte;
2629}
2630
David S. Millerc4bce902006-02-11 21:57:54 -08002631static unsigned long kern_large_tte(unsigned long paddr)
2632{
2633 unsigned long val;
2634
2635 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2636 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
2637 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2638 if (tlb_type == hypervisor)
2639 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
Khalid Aziz494e5b62015-05-27 10:00:46 -06002640 page_cache4v_flag | _PAGE_P_4V |
David S. Millerc4bce902006-02-11 21:57:54 -08002641 _PAGE_EXEC_4V | _PAGE_W_4V);
2642
2643 return val | paddr;
2644}
2645
David S. Millerc4bce902006-02-11 21:57:54 -08002646/* If not locked, zap it. */
2647void __flush_tlb_all(void)
2648{
2649 unsigned long pstate;
2650 int i;
2651
2652 __asm__ __volatile__("flushw\n\t"
2653 "rdpr %%pstate, %0\n\t"
2654 "wrpr %0, %1, %%pstate"
2655 : "=r" (pstate)
2656 : "i" (PSTATE_IE));
David S. Miller8f3614532007-12-13 06:13:38 -08002657 if (tlb_type == hypervisor) {
2658 sun4v_mmu_demap_all();
2659 } else if (tlb_type == spitfire) {
David S. Millerc4bce902006-02-11 21:57:54 -08002660 for (i = 0; i < 64; i++) {
2661 /* Spitfire Errata #32 workaround */
2662 /* NOTE: Always runs on spitfire, so no
2663 * cheetah+ page size encodings.
2664 */
2665 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2666 "flush %%g6"
2667 : /* No outputs */
2668 : "r" (0),
2669 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2670
2671 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
2672 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2673 "membar #Sync"
2674 : /* no outputs */
2675 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
2676 spitfire_put_dtlb_data(i, 0x0UL);
2677 }
2678
2679 /* Spitfire Errata #32 workaround */
2680 /* NOTE: Always runs on spitfire, so no
2681 * cheetah+ page size encodings.
2682 */
2683 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2684 "flush %%g6"
2685 : /* No outputs */
2686 : "r" (0),
2687 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2688
2689 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2690 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2691 "membar #Sync"
2692 : /* no outputs */
2693 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
2694 spitfire_put_itlb_data(i, 0x0UL);
2695 }
2696 }
2697 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
2698 cheetah_flush_dtlb_all();
2699 cheetah_flush_itlb_all();
2700 }
2701 __asm__ __volatile__("wrpr %0, 0, %%pstate"
2702 : : "r" (pstate));
2703}
David Millerc460bec2012-10-08 16:34:22 -07002704
David Millerc460bec2012-10-08 16:34:22 -07002705pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
2706 unsigned long address)
2707{
Michal Hocko32d6bd92016-06-24 14:48:47 -07002708 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
David S. Miller37b3a8f2013-09-25 13:48:49 -07002709 pte_t *pte = NULL;
David Millerc460bec2012-10-08 16:34:22 -07002710
David Millerc460bec2012-10-08 16:34:22 -07002711 if (page)
2712 pte = (pte_t *) page_address(page);
2713
2714 return pte;
2715}
2716
2717pgtable_t pte_alloc_one(struct mm_struct *mm,
2718 unsigned long address)
2719{
Michal Hocko32d6bd92016-06-24 14:48:47 -07002720 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
Kirill A. Shutemov1ae9ae52013-11-14 14:31:42 -08002721 if (!page)
2722 return NULL;
2723 if (!pgtable_page_ctor(page)) {
2724 free_hot_cold_page(page, 0);
2725 return NULL;
David Millerc460bec2012-10-08 16:34:22 -07002726 }
Kirill A. Shutemov1ae9ae52013-11-14 14:31:42 -08002727 return (pte_t *) page_address(page);
David Millerc460bec2012-10-08 16:34:22 -07002728}
2729
2730void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
2731{
David S. Miller37b3a8f2013-09-25 13:48:49 -07002732 free_page((unsigned long)pte);
David Millerc460bec2012-10-08 16:34:22 -07002733}
2734
2735static void __pte_free(pgtable_t pte)
2736{
2737 struct page *page = virt_to_page(pte);
David S. Miller37b3a8f2013-09-25 13:48:49 -07002738
2739 pgtable_page_dtor(page);
2740 __free_page(page);
David Millerc460bec2012-10-08 16:34:22 -07002741}
2742
2743void pte_free(struct mm_struct *mm, pgtable_t pte)
2744{
2745 __pte_free(pte);
2746}
2747
2748void pgtable_free(void *table, bool is_page)
2749{
2750 if (is_page)
2751 __pte_free(table);
2752 else
2753 kmem_cache_free(pgtable_cache, table);
2754}
David Miller9e695d22012-10-08 16:34:29 -07002755
2756#ifdef CONFIG_TRANSPARENT_HUGEPAGE
David Miller9e695d22012-10-08 16:34:29 -07002757void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
2758 pmd_t *pmd)
2759{
2760 unsigned long pte, flags;
2761 struct mm_struct *mm;
2762 pmd_t entry = *pmd;
David Miller9e695d22012-10-08 16:34:29 -07002763
2764 if (!pmd_large(entry) || !pmd_young(entry))
2765 return;
2766
David S. Millera7b94032013-09-26 13:45:15 -07002767 pte = pmd_val(entry);
David Miller9e695d22012-10-08 16:34:29 -07002768
David S. Miller18f38132014-08-04 16:34:01 -07002769 /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */
2770 if (!(pte & _PAGE_VALID))
2771 return;
2772
David S. Miller37b3a8f2013-09-25 13:48:49 -07002773 /* We are fabricating 8MB pages using 4MB real hw pages. */
2774 pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
David Miller9e695d22012-10-08 16:34:29 -07002775
2776 mm = vma->vm_mm;
2777
2778 spin_lock_irqsave(&mm->context.lock, flags);
2779
2780 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
David S. Miller37b3a8f2013-09-25 13:48:49 -07002781 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
David Miller9e695d22012-10-08 16:34:29 -07002782 addr, pte);
2783
2784 spin_unlock_irqrestore(&mm->context.lock, flags);
2785}
2786#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2787
2788#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
2789static void context_reload(void *__data)
2790{
2791 struct mm_struct *mm = __data;
2792
2793 if (mm == current->mm)
2794 load_secondary_context(mm);
2795}
2796
David S. Miller0fbebed2013-02-19 22:34:10 -08002797void hugetlb_setup(struct pt_regs *regs)
David Miller9e695d22012-10-08 16:34:29 -07002798{
David S. Miller0fbebed2013-02-19 22:34:10 -08002799 struct mm_struct *mm = current->mm;
2800 struct tsb_config *tp;
David Miller9e695d22012-10-08 16:34:29 -07002801
David Hildenbrand70ffdb92015-05-11 17:52:11 +02002802 if (faulthandler_disabled() || !mm) {
David S. Miller0fbebed2013-02-19 22:34:10 -08002803 const struct exception_table_entry *entry;
David Miller9e695d22012-10-08 16:34:29 -07002804
David S. Miller0fbebed2013-02-19 22:34:10 -08002805 entry = search_exception_tables(regs->tpc);
2806 if (entry) {
2807 regs->tpc = entry->fixup;
2808 regs->tnpc = regs->tpc + 4;
2809 return;
2810 }
2811 pr_alert("Unexpected HugeTLB setup in atomic context.\n");
2812 die_if_kernel("HugeTSB in atomic", regs);
2813 }
2814
2815 tp = &mm->context.tsb_block[MM_TSB_HUGE];
2816 if (likely(tp->tsb == NULL))
2817 tsb_grow(mm, MM_TSB_HUGE, 0);
2818
David Miller9e695d22012-10-08 16:34:29 -07002819 tsb_context_switch(mm);
2820 smp_tsb_sync(mm);
2821
2822 /* On UltraSPARC-III+ and later, configure the second half of
2823 * the Data-TLB for huge pages.
2824 */
2825 if (tlb_type == cheetah_plus) {
David S. Miller9ea46abe2016-05-25 12:51:20 -07002826 bool need_context_reload = false;
David Miller9e695d22012-10-08 16:34:29 -07002827 unsigned long ctx;
2828
David S. Miller9ea46abe2016-05-25 12:51:20 -07002829 spin_lock_irq(&ctx_alloc_lock);
David Miller9e695d22012-10-08 16:34:29 -07002830 ctx = mm->context.sparc64_ctx_val;
2831 ctx &= ~CTX_PGSZ_MASK;
2832 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
2833 ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
2834
2835 if (ctx != mm->context.sparc64_ctx_val) {
2836 /* When changing the page size fields, we
2837 * must perform a context flush so that no
2838 * stale entries match. This flush must
2839 * occur with the original context register
2840 * settings.
2841 */
2842 do_flush_tlb_mm(mm);
2843
2844 /* Reload the context register of all processors
2845 * also executing in this address space.
2846 */
2847 mm->context.sparc64_ctx_val = ctx;
David S. Miller9ea46abe2016-05-25 12:51:20 -07002848 need_context_reload = true;
David Miller9e695d22012-10-08 16:34:29 -07002849 }
David S. Miller9ea46abe2016-05-25 12:51:20 -07002850 spin_unlock_irq(&ctx_alloc_lock);
2851
2852 if (need_context_reload)
2853 on_each_cpu(context_reload, mm, 0);
David Miller9e695d22012-10-08 16:34:29 -07002854 }
2855}
2856#endif
bob piccof6d4fb52014-03-03 11:54:42 -05002857
2858static struct resource code_resource = {
2859 .name = "Kernel code",
Toshi Kani35d98e92016-01-26 21:57:22 +01002860 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
bob piccof6d4fb52014-03-03 11:54:42 -05002861};
2862
2863static struct resource data_resource = {
2864 .name = "Kernel data",
Toshi Kani35d98e92016-01-26 21:57:22 +01002865 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
bob piccof6d4fb52014-03-03 11:54:42 -05002866};
2867
2868static struct resource bss_resource = {
2869 .name = "Kernel bss",
Toshi Kani35d98e92016-01-26 21:57:22 +01002870 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
bob piccof6d4fb52014-03-03 11:54:42 -05002871};
2872
2873static inline resource_size_t compute_kern_paddr(void *addr)
2874{
2875 return (resource_size_t) (addr - KERNBASE + kern_base);
2876}
2877
2878static void __init kernel_lds_init(void)
2879{
2880 code_resource.start = compute_kern_paddr(_text);
2881 code_resource.end = compute_kern_paddr(_etext - 1);
2882 data_resource.start = compute_kern_paddr(_etext);
2883 data_resource.end = compute_kern_paddr(_edata - 1);
2884 bss_resource.start = compute_kern_paddr(__bss_start);
2885 bss_resource.end = compute_kern_paddr(_end - 1);
2886}
2887
2888static int __init report_memory(void)
2889{
2890 int i;
2891 struct resource *res;
2892
2893 kernel_lds_init();
2894
2895 for (i = 0; i < pavail_ents; i++) {
2896 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
2897
2898 if (!res) {
2899 pr_warn("Failed to allocate source.\n");
2900 break;
2901 }
2902
2903 res->name = "System RAM";
2904 res->start = pavail[i].phys_addr;
2905 res->end = pavail[i].phys_addr + pavail[i].reg_size - 1;
Toshi Kani35d98e92016-01-26 21:57:22 +01002906 res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
bob piccof6d4fb52014-03-03 11:54:42 -05002907
2908 if (insert_resource(&iomem_resource, res) < 0) {
2909 pr_warn("Resource insertion failed.\n");
2910 break;
2911 }
2912
2913 insert_resource(res, &code_resource);
2914 insert_resource(res, &data_resource);
2915 insert_resource(res, &bss_resource);
2916 }
2917
2918 return 0;
2919}
David S. Miller3c081582015-03-18 19:15:28 -07002920arch_initcall(report_memory);
David S. Millere9011d02014-08-05 18:57:18 -07002921
David S. Miller4ca9a232014-08-04 20:07:37 -07002922#ifdef CONFIG_SMP
2923#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
2924#else
2925#define do_flush_tlb_kernel_range __flush_tlb_kernel_range
2926#endif
2927
2928void flush_tlb_kernel_range(unsigned long start, unsigned long end)
2929{
2930 if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
2931 if (start < LOW_OBP_ADDRESS) {
2932 flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
2933 do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
2934 }
2935 if (end > HI_OBP_ADDRESS) {
David S. Miller473ad7f2014-10-04 21:05:14 -07002936 flush_tsb_kernel_range(HI_OBP_ADDRESS, end);
2937 do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end);
David S. Miller4ca9a232014-08-04 20:07:37 -07002938 }
2939 } else {
2940 flush_tsb_kernel_range(start, end);
2941 do_flush_tlb_kernel_range(start, end);
2942 }
2943}