blob: 559cb744112ccd608bf4288470398fb21350b0ce [file] [log] [blame]
Adrian Bunkb00dc832008-05-19 16:52:27 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * arch/sparc64/mm/init.c
3 *
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
David S. Millerc4bce902006-02-11 21:57:54 -08008#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/string.h>
12#include <linux/init.h>
13#include <linux/bootmem.h>
14#include <linux/mm.h>
15#include <linux/hugetlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/initrd.h>
17#include <linux/swap.h>
18#include <linux/pagemap.h>
Randy Dunlapc9cf5522006-06-27 02:53:52 -070019#include <linux/poison.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/fs.h>
21#include <linux/seq_file.h>
Prasanna S Panchamukhi05e14cb2005-09-06 15:19:30 -070022#include <linux/kprobes.h>
David S. Miller1ac4f5e2005-09-21 21:49:32 -070023#include <linux/cache.h>
David S. Miller13edad72005-09-29 17:58:26 -070024#include <linux/sort.h>
bob piccof6d4fb52014-03-03 11:54:42 -050025#include <linux/ioport.h>
David S. Miller5cbc3072007-05-25 15:49:59 -070026#include <linux/percpu.h>
Yinghai Lu95f72d12010-07-12 14:36:09 +100027#include <linux/memblock.h>
David S. Miller919ee672008-04-23 05:40:25 -070028#include <linux/mmzone.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
31#include <asm/head.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <asm/page.h>
33#include <asm/pgalloc.h>
34#include <asm/pgtable.h>
35#include <asm/oplib.h>
36#include <asm/iommu.h>
37#include <asm/io.h>
38#include <asm/uaccess.h>
39#include <asm/mmu_context.h>
40#include <asm/tlbflush.h>
41#include <asm/dma.h>
42#include <asm/starfire.h>
43#include <asm/tlb.h>
44#include <asm/spitfire.h>
45#include <asm/sections.h>
David S. Miller517af332006-02-01 15:55:21 -080046#include <asm/tsb.h>
David S. Miller481295f2006-02-07 21:51:08 -080047#include <asm/hypervisor.h>
David S. Miller372b07b2006-06-21 15:35:28 -070048#include <asm/prom.h>
David S. Miller5cbc3072007-05-25 15:49:59 -070049#include <asm/mdesc.h>
David S. Miller3d5ae6b2008-03-25 21:51:40 -070050#include <asm/cpudata.h>
Sam Ravnborg59dec132014-05-16 23:26:07 +020051#include <asm/setup.h>
David S. Miller4f70f7a2008-08-12 18:33:56 -070052#include <asm/irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Sam Ravnborg27137e52008-11-16 20:08:45 -080054#include "init_64.h"
David S. Miller9cc3a1a2006-02-21 20:51:13 -080055
David S. Miller4f93d212012-09-06 18:13:58 -070056unsigned long kern_linear_pte_xor[4] __read_mostly;
Khalid Aziz494e5b62015-05-27 10:00:46 -060057static unsigned long page_cache4v_flag;
David S. Miller9cc3a1a2006-02-21 20:51:13 -080058
David S. Miller4f93d212012-09-06 18:13:58 -070059/* A bitmap, two bits for every 256MB of physical memory. These two
60 * bits determine what page size we use for kernel linear
61 * translations. They form an index into kern_linear_pte_xor[]. The
62 * value in the indexed slot is XOR'd with the TLB miss virtual
63 * address to form the resulting TTE. The mapping is:
64 *
65 * 0 ==> 4MB
66 * 1 ==> 256MB
67 * 2 ==> 2GB
68 * 3 ==> 16GB
69 *
70 * All sun4v chips support 256MB pages. Only SPARC-T4 and later
71 * support 2GB pages, and hopefully future cpus will support the 16GB
72 * pages as well. For slots 2 and 3, we encode a 256MB TTE xor there
73 * if these larger page sizes are not supported by the cpu.
74 *
75 * It would be nice to determine this from the machine description
76 * 'cpu' properties, but we need to have this table setup before the
77 * MDESC is initialized.
David S. Miller9cc3a1a2006-02-21 20:51:13 -080078 */
David S. Miller9cc3a1a2006-02-21 20:51:13 -080079
David S. Millerd1acb422007-03-16 17:20:28 -070080#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Miller4f93d212012-09-06 18:13:58 -070081/* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
82 * Space is allocated for this right after the trap table in
83 * arch/sparc64/kernel/head.S
David S. Miller2d9e2762007-05-29 01:58:31 -070084 */
85extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
David S. Millerd1acb422007-03-16 17:20:28 -070086#endif
David S. Miller0dd5b7b2014-09-24 20:56:11 -070087extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
David S. Millerd7744a02006-02-21 22:31:11 -080088
David S. Millerce33fdc2012-09-06 19:01:25 -070089static unsigned long cpu_pgsz_mask;
90
David S. Millerd195b712014-09-27 21:30:57 -070091#define MAX_BANKS 1024
David S. Miller10147572005-09-28 21:46:43 -070092
Greg Kroah-Hartman7c9503b2012-12-21 14:03:26 -080093static struct linux_prom64_registers pavail[MAX_BANKS];
94static int pavail_ents;
David S. Miller10147572005-09-28 21:46:43 -070095
David S. Miller13edad72005-09-29 17:58:26 -070096static int cmp_p64(const void *a, const void *b)
97{
98 const struct linux_prom64_registers *x = a, *y = b;
99
100 if (x->phys_addr > y->phys_addr)
101 return 1;
102 if (x->phys_addr < y->phys_addr)
103 return -1;
104 return 0;
105}
106
107static void __init read_obp_memory(const char *property,
108 struct linux_prom64_registers *regs,
109 int *num_ents)
110{
Andres Salomon8d125562010-10-08 14:18:11 -0700111 phandle node = prom_finddevice("/memory");
David S. Miller13edad72005-09-29 17:58:26 -0700112 int prop_size = prom_getproplen(node, property);
113 int ents, ret, i;
114
115 ents = prop_size / sizeof(struct linux_prom64_registers);
116 if (ents > MAX_BANKS) {
117 prom_printf("The machine has more %s property entries than "
118 "this kernel can support (%d).\n",
119 property, MAX_BANKS);
120 prom_halt();
121 }
122
123 ret = prom_getproperty(node, property, (char *) regs, prop_size);
124 if (ret == -1) {
Akinobu Mita5da444a2012-09-29 03:14:49 +0000125 prom_printf("Couldn't get %s property from /memory.\n",
126 property);
David S. Miller13edad72005-09-29 17:58:26 -0700127 prom_halt();
128 }
129
David S. Miller13edad72005-09-29 17:58:26 -0700130 /* Sanitize what we got from the firmware, by page aligning
131 * everything.
132 */
133 for (i = 0; i < ents; i++) {
134 unsigned long base, size;
135
136 base = regs[i].phys_addr;
137 size = regs[i].reg_size;
138
139 size &= PAGE_MASK;
140 if (base & ~PAGE_MASK) {
141 unsigned long new_base = PAGE_ALIGN(base);
142
143 size -= new_base - base;
144 if ((long) size < 0L)
145 size = 0UL;
146 base = new_base;
147 }
David S. Miller0015d3d2007-03-15 00:06:34 -0700148 if (size == 0UL) {
149 /* If it is empty, simply get rid of it.
150 * This simplifies the logic of the other
151 * functions that process these arrays.
152 */
153 memmove(&regs[i], &regs[i + 1],
154 (ents - i - 1) * sizeof(regs[0]));
155 i--;
156 ents--;
157 continue;
158 }
David S. Miller13edad72005-09-29 17:58:26 -0700159 regs[i].phys_addr = base;
160 regs[i].reg_size = size;
161 }
David S. Miller486ad102006-06-22 00:00:00 -0700162
David S. Miller486ad102006-06-22 00:00:00 -0700163 *num_ents = ents;
164
David S. Millerc9c10832005-10-12 12:22:46 -0700165 sort(regs, ents, sizeof(struct linux_prom64_registers),
David S. Miller13edad72005-09-29 17:58:26 -0700166 cmp_p64, NULL);
167}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
David S. Millerd1112012006-03-08 02:16:07 -0800169/* Kernel physical address base and size in bytes. */
David S. Miller1ac4f5e2005-09-21 21:49:32 -0700170unsigned long kern_base __read_mostly;
171unsigned long kern_size __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173/* Initial ramdisk setup */
174extern unsigned long sparc_ramdisk_image64;
175extern unsigned int sparc_ramdisk_image;
176extern unsigned int sparc_ramdisk_size;
177
David S. Miller1ac4f5e2005-09-21 21:49:32 -0700178struct page *mem_map_zero __read_mostly;
Aneesh Kumar K.V35802c02008-04-29 08:11:12 -0400179EXPORT_SYMBOL(mem_map_zero);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
David S. Miller0835ae02005-10-04 15:23:20 -0700181unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
182
183unsigned long sparc64_kern_pri_context __read_mostly;
184unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
185unsigned long sparc64_kern_sec_context __read_mostly;
186
David S. Miller64658742008-03-21 17:01:38 -0700187int num_kernel_image_mappings;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189#ifdef CONFIG_DEBUG_DCFLUSH
190atomic_t dcpage_flushes = ATOMIC_INIT(0);
191#ifdef CONFIG_SMP
192atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
193#endif
194#endif
195
David S. Miller7a591cf2006-02-26 19:44:50 -0800196inline void flush_dcache_page_impl(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197{
David S. Miller7a591cf2006-02-26 19:44:50 -0800198 BUG_ON(tlb_type == hypervisor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199#ifdef CONFIG_DEBUG_DCFLUSH
200 atomic_inc(&dcpage_flushes);
201#endif
202
203#ifdef DCACHE_ALIASING_POSSIBLE
204 __flush_dcache_page(page_address(page),
205 ((tlb_type == spitfire) &&
206 page_mapping(page) != NULL));
207#else
208 if (page_mapping(page) != NULL &&
209 tlb_type == spitfire)
210 __flush_icache_page(__pa(page_address(page)));
211#endif
212}
213
214#define PG_dcache_dirty PG_arch_1
David S. Miller22adb352007-05-26 01:14:43 -0700215#define PG_dcache_cpu_shift 32UL
216#define PG_dcache_cpu_mask \
217 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
219#define dcache_dirty_cpu(page) \
David S. Miller48b0e542005-07-27 16:08:44 -0700220 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
David S. Millerd979f172007-10-27 00:13:04 -0700222static inline void set_dcache_dirty(struct page *page, int this_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223{
224 unsigned long mask = this_cpu;
David S. Miller48b0e542005-07-27 16:08:44 -0700225 unsigned long non_cpu_bits;
226
227 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
228 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
229
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 __asm__ __volatile__("1:\n\t"
231 "ldx [%2], %%g7\n\t"
232 "and %%g7, %1, %%g1\n\t"
233 "or %%g1, %0, %%g1\n\t"
234 "casx [%2], %%g7, %%g1\n\t"
235 "cmp %%g7, %%g1\n\t"
236 "bne,pn %%xcc, 1b\n\t"
David S. Millerb445e262005-06-27 15:42:04 -0700237 " nop"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 : /* no outputs */
239 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
240 : "g1", "g7");
241}
242
David S. Millerd979f172007-10-27 00:13:04 -0700243static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244{
245 unsigned long mask = (1UL << PG_dcache_dirty);
246
247 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
248 "1:\n\t"
249 "ldx [%2], %%g7\n\t"
David S. Miller48b0e542005-07-27 16:08:44 -0700250 "srlx %%g7, %4, %%g1\n\t"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 "and %%g1, %3, %%g1\n\t"
252 "cmp %%g1, %0\n\t"
253 "bne,pn %%icc, 2f\n\t"
254 " andn %%g7, %1, %%g1\n\t"
255 "casx [%2], %%g7, %%g1\n\t"
256 "cmp %%g7, %%g1\n\t"
257 "bne,pn %%xcc, 1b\n\t"
David S. Millerb445e262005-06-27 15:42:04 -0700258 " nop\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 "2:"
260 : /* no outputs */
261 : "r" (cpu), "r" (mask), "r" (&page->flags),
David S. Miller48b0e542005-07-27 16:08:44 -0700262 "i" (PG_dcache_cpu_mask),
263 "i" (PG_dcache_cpu_shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 : "g1", "g7");
265}
266
David S. Miller517af332006-02-01 15:55:21 -0800267static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
268{
269 unsigned long tsb_addr = (unsigned long) ent;
270
David S. Miller3b3ab2e2006-02-17 09:54:42 -0800271 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
David S. Miller517af332006-02-01 15:55:21 -0800272 tsb_addr = __pa(tsb_addr);
273
274 __tsb_insert(tsb_addr, tag, pte);
275}
276
David S. Millerc4bce902006-02-11 21:57:54 -0800277unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
David S. Millerc4bce902006-02-11 21:57:54 -0800278
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800279static void flush_dcache(unsigned long pfn)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280{
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800281 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800283 page = pfn_to_page(pfn);
David S. Miller1a78ced2009-10-12 03:20:57 -0700284 if (page) {
David S. Miller7a591cf2006-02-26 19:44:50 -0800285 unsigned long pg_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800287 pg_flags = page->flags;
288 if (pg_flags & (1UL << PG_dcache_dirty)) {
David S. Miller7a591cf2006-02-26 19:44:50 -0800289 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
290 PG_dcache_cpu_mask);
291 int this_cpu = get_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
David S. Miller7a591cf2006-02-26 19:44:50 -0800293 /* This is just to optimize away some function calls
294 * in the SMP case.
295 */
296 if (cpu == this_cpu)
297 flush_dcache_page_impl(page);
298 else
299 smp_flush_dcache_page_impl(page, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
David S. Miller7a591cf2006-02-26 19:44:50 -0800301 clear_dcache_dirty_cpu(page, cpu);
302
303 put_cpu();
304 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 }
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800306}
307
David Miller9e695d22012-10-08 16:34:29 -0700308/* mm->context.lock must be held */
309static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
310 unsigned long tsb_hash_shift, unsigned long address,
311 unsigned long tte)
312{
313 struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
314 unsigned long tag;
315
David S. Millerbcd896b2013-02-19 13:20:08 -0800316 if (unlikely(!tsb))
317 return;
318
David Miller9e695d22012-10-08 16:34:29 -0700319 tsb += ((address >> tsb_hash_shift) &
320 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
321 tag = (address >> 22UL);
322 tsb_insert(tsb, tag, tte);
323}
324
David S. Millerbcd896b2013-02-19 13:20:08 -0800325#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
326static inline bool is_hugetlb_pte(pte_t pte)
327{
328 if ((tlb_type == hypervisor &&
329 (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
330 (tlb_type != hypervisor &&
331 (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U))
332 return true;
333 return false;
334}
335#endif
336
Russell King4b3073e2009-12-18 16:40:18 +0000337void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800338{
339 struct mm_struct *mm;
David S. Millerbcd896b2013-02-19 13:20:08 -0800340 unsigned long flags;
Russell King4b3073e2009-12-18 16:40:18 +0000341 pte_t pte = *ptep;
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800342
343 if (tlb_type != hypervisor) {
344 unsigned long pfn = pte_pfn(pte);
345
346 if (pfn_valid(pfn))
347 flush_dcache(pfn);
348 }
David S. Millerbd407912006-01-31 18:31:38 -0800349
350 mm = vma->vm_mm;
David S. Miller7a1ac522006-03-16 02:02:32 -0800351
David S. Miller18f38132014-08-04 16:34:01 -0700352 /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */
353 if (!pte_accessible(mm, pte))
354 return;
355
David S. Miller7a1ac522006-03-16 02:02:32 -0800356 spin_lock_irqsave(&mm->context.lock, flags);
357
David Miller9e695d22012-10-08 16:34:29 -0700358#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
David S. Millerbcd896b2013-02-19 13:20:08 -0800359 if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
David S. Miller37b3a8f2013-09-25 13:48:49 -0700360 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
David S. Millerbcd896b2013-02-19 13:20:08 -0800361 address, pte_val(pte));
362 else
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800363#endif
David S. Millerbcd896b2013-02-19 13:20:08 -0800364 __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
365 address, pte_val(pte));
David S. Miller7a1ac522006-03-16 02:02:32 -0800366
367 spin_unlock_irqrestore(&mm->context.lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368}
369
370void flush_dcache_page(struct page *page)
371{
David S. Millera9546f52005-04-17 18:03:09 -0700372 struct address_space *mapping;
373 int this_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374
David S. Miller7a591cf2006-02-26 19:44:50 -0800375 if (tlb_type == hypervisor)
376 return;
377
David S. Millera9546f52005-04-17 18:03:09 -0700378 /* Do not bother with the expensive D-cache flush if it
379 * is merely the zero page. The 'bigcore' testcase in GDB
380 * causes this case to run millions of times.
381 */
382 if (page == ZERO_PAGE(0))
383 return;
384
385 this_cpu = get_cpu();
386
387 mapping = page_mapping(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 if (mapping && !mapping_mapped(mapping)) {
David S. Millera9546f52005-04-17 18:03:09 -0700389 int dirty = test_bit(PG_dcache_dirty, &page->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 if (dirty) {
David S. Millera9546f52005-04-17 18:03:09 -0700391 int dirty_cpu = dcache_dirty_cpu(page);
392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 if (dirty_cpu == this_cpu)
394 goto out;
395 smp_flush_dcache_page_impl(page, dirty_cpu);
396 }
397 set_dcache_dirty(page, this_cpu);
398 } else {
399 /* We could delay the flush for the !page_mapping
400 * case too. But that case is for exec env/arg
401 * pages and those are %99 certainly going to get
402 * faulted into the tlb (and thus flushed) anyways.
403 */
404 flush_dcache_page_impl(page);
405 }
406
407out:
408 put_cpu();
409}
Sam Ravnborg917c3662009-01-08 16:58:20 -0800410EXPORT_SYMBOL(flush_dcache_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411
Prasanna S Panchamukhi05e14cb2005-09-06 15:19:30 -0700412void __kprobes flush_icache_range(unsigned long start, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413{
David S. Millera43fe0e2006-02-04 03:10:53 -0800414 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 if (tlb_type == spitfire) {
416 unsigned long kaddr;
417
David S. Millera94aa252007-03-15 15:50:11 -0700418 /* This code only runs on Spitfire cpus so this is
419 * why we can assume _PAGE_PADDR_4U.
420 */
421 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
422 unsigned long paddr, mask = _PAGE_PADDR_4U;
423
424 if (kaddr >= PAGE_OFFSET)
425 paddr = kaddr & mask;
426 else {
427 pgd_t *pgdp = pgd_offset_k(kaddr);
428 pud_t *pudp = pud_offset(pgdp, kaddr);
429 pmd_t *pmdp = pmd_offset(pudp, kaddr);
430 pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
431
432 paddr = pte_val(*ptep) & mask;
433 }
434 __flush_icache_page(paddr);
435 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 }
437}
Sam Ravnborg917c3662009-01-08 16:58:20 -0800438EXPORT_SYMBOL(flush_icache_range);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440void mmu_info(struct seq_file *m)
441{
David S. Millerce33fdc2012-09-06 19:01:25 -0700442 static const char *pgsz_strings[] = {
443 "8K", "64K", "512K", "4MB", "32MB",
444 "256MB", "2GB", "16GB",
445 };
446 int i, printed;
447
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 if (tlb_type == cheetah)
449 seq_printf(m, "MMU Type\t: Cheetah\n");
450 else if (tlb_type == cheetah_plus)
451 seq_printf(m, "MMU Type\t: Cheetah+\n");
452 else if (tlb_type == spitfire)
453 seq_printf(m, "MMU Type\t: Spitfire\n");
David S. Millera43fe0e2006-02-04 03:10:53 -0800454 else if (tlb_type == hypervisor)
455 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 else
457 seq_printf(m, "MMU Type\t: ???\n");
458
David S. Millerce33fdc2012-09-06 19:01:25 -0700459 seq_printf(m, "MMU PGSZs\t: ");
460 printed = 0;
461 for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) {
462 if (cpu_pgsz_mask & (1UL << i)) {
463 seq_printf(m, "%s%s",
464 printed ? "," : "", pgsz_strings[i]);
465 printed++;
466 }
467 }
468 seq_putc(m, '\n');
469
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470#ifdef CONFIG_DEBUG_DCFLUSH
471 seq_printf(m, "DCPageFlushes\t: %d\n",
472 atomic_read(&dcpage_flushes));
473#ifdef CONFIG_SMP
474 seq_printf(m, "DCPageFlushesXC\t: %d\n",
475 atomic_read(&dcpage_flushes_xcall));
476#endif /* CONFIG_SMP */
477#endif /* CONFIG_DEBUG_DCFLUSH */
478}
479
David S. Millera94aa252007-03-15 15:50:11 -0700480struct linux_prom_translation prom_trans[512] __read_mostly;
481unsigned int prom_trans_ents __read_mostly;
482
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483unsigned long kern_locked_tte_data;
484
David S. Miller405599b2005-09-22 00:12:35 -0700485/* The obp translations are saved based on 8k pagesize, since obp can
486 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
David S. Miller74bf4312006-01-31 18:29:18 -0800487 * HI_OBP_ADDRESS range are handled in ktlb.S.
David S. Miller405599b2005-09-22 00:12:35 -0700488 */
David S. Miller5085b4a2005-09-22 00:45:41 -0700489static inline int in_obp_range(unsigned long vaddr)
490{
491 return (vaddr >= LOW_OBP_ADDRESS &&
492 vaddr < HI_OBP_ADDRESS);
493}
494
David S. Millerc9c10832005-10-12 12:22:46 -0700495static int cmp_ptrans(const void *a, const void *b)
David S. Miller405599b2005-09-22 00:12:35 -0700496{
David S. Millerc9c10832005-10-12 12:22:46 -0700497 const struct linux_prom_translation *x = a, *y = b;
David S. Miller405599b2005-09-22 00:12:35 -0700498
David S. Millerc9c10832005-10-12 12:22:46 -0700499 if (x->virt > y->virt)
500 return 1;
501 if (x->virt < y->virt)
502 return -1;
503 return 0;
David S. Miller405599b2005-09-22 00:12:35 -0700504}
505
David S. Millerc9c10832005-10-12 12:22:46 -0700506/* Read OBP translations property into 'prom_trans[]'. */
David S. Miller9ad98c52005-10-05 15:12:00 -0700507static void __init read_obp_translations(void)
David S. Miller405599b2005-09-22 00:12:35 -0700508{
David S. Millerc9c10832005-10-12 12:22:46 -0700509 int n, node, ents, first, last, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510
511 node = prom_finddevice("/virtual-memory");
512 n = prom_getproplen(node, "translations");
David S. Miller405599b2005-09-22 00:12:35 -0700513 if (unlikely(n == 0 || n == -1)) {
David S. Millerb206fc42005-09-21 22:31:13 -0700514 prom_printf("prom_mappings: Couldn't get size.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 prom_halt();
516 }
David S. Miller405599b2005-09-22 00:12:35 -0700517 if (unlikely(n > sizeof(prom_trans))) {
Akinobu Mita5da444a2012-09-29 03:14:49 +0000518 prom_printf("prom_mappings: Size %d is too big.\n", n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 prom_halt();
520 }
David S. Miller405599b2005-09-22 00:12:35 -0700521
David S. Millerb206fc42005-09-21 22:31:13 -0700522 if ((n = prom_getproperty(node, "translations",
David S. Miller405599b2005-09-22 00:12:35 -0700523 (char *)&prom_trans[0],
524 sizeof(prom_trans))) == -1) {
David S. Millerb206fc42005-09-21 22:31:13 -0700525 prom_printf("prom_mappings: Couldn't get property.\n");
526 prom_halt();
527 }
David S. Miller9ad98c52005-10-05 15:12:00 -0700528
David S. Millerb206fc42005-09-21 22:31:13 -0700529 n = n / sizeof(struct linux_prom_translation);
David S. Miller9ad98c52005-10-05 15:12:00 -0700530
David S. Millerc9c10832005-10-12 12:22:46 -0700531 ents = n;
532
533 sort(prom_trans, ents, sizeof(struct linux_prom_translation),
534 cmp_ptrans, NULL);
535
536 /* Now kick out all the non-OBP entries. */
537 for (i = 0; i < ents; i++) {
538 if (in_obp_range(prom_trans[i].virt))
539 break;
540 }
541 first = i;
542 for (; i < ents; i++) {
543 if (!in_obp_range(prom_trans[i].virt))
544 break;
545 }
546 last = i;
547
548 for (i = 0; i < (last - first); i++) {
549 struct linux_prom_translation *src = &prom_trans[i + first];
550 struct linux_prom_translation *dest = &prom_trans[i];
551
552 *dest = *src;
553 }
554 for (; i < ents; i++) {
555 struct linux_prom_translation *dest = &prom_trans[i];
556 dest->virt = dest->size = dest->data = 0x0UL;
557 }
558
559 prom_trans_ents = last - first;
560
561 if (tlb_type == spitfire) {
562 /* Clear diag TTE bits. */
563 for (i = 0; i < prom_trans_ents; i++)
564 prom_trans[i].data &= ~0x0003fe0000000000UL;
565 }
David S. Millerf4142cb2011-09-29 12:18:59 -0700566
567 /* Force execute bit on. */
568 for (i = 0; i < prom_trans_ents; i++)
569 prom_trans[i].data |= (tlb_type == hypervisor ?
570 _PAGE_EXEC_4V : _PAGE_EXEC_4U);
David S. Miller405599b2005-09-22 00:12:35 -0700571}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572
David S. Millerd82ace72006-02-09 02:52:44 -0800573static void __init hypervisor_tlb_lock(unsigned long vaddr,
574 unsigned long pte,
575 unsigned long mmu)
576{
David S. Miller7db35f32007-05-29 02:22:14 -0700577 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
David S. Millerd82ace72006-02-09 02:52:44 -0800578
David S. Miller7db35f32007-05-29 02:22:14 -0700579 if (ret != 0) {
Akinobu Mita5da444a2012-09-29 03:14:49 +0000580 prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
David S. Miller7db35f32007-05-29 02:22:14 -0700581 "errors with %lx\n", vaddr, 0, pte, mmu, ret);
David S. Miller12e126a2006-02-17 14:40:30 -0800582 prom_halt();
583 }
David S. Millerd82ace72006-02-09 02:52:44 -0800584}
585
David S. Millerc4bce902006-02-11 21:57:54 -0800586static unsigned long kern_large_tte(unsigned long paddr);
587
David S. Miller898cf0e2005-09-23 11:59:44 -0700588static void __init remap_kernel(void)
David S. Miller405599b2005-09-22 00:12:35 -0700589{
590 unsigned long phys_page, tte_vaddr, tte_data;
David S. Miller64658742008-03-21 17:01:38 -0700591 int i, tlb_ent = sparc64_highest_locked_tlbent();
David S. Miller405599b2005-09-22 00:12:35 -0700592
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 tte_vaddr = (unsigned long) KERNBASE;
David S. Miller0eef3312014-05-03 22:52:50 -0700594 phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
David S. Millerc4bce902006-02-11 21:57:54 -0800595 tte_data = kern_large_tte(phys_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596
597 kern_locked_tte_data = tte_data;
598
David S. Millerd82ace72006-02-09 02:52:44 -0800599 /* Now lock us into the TLBs via Hypervisor or OBP. */
600 if (tlb_type == hypervisor) {
David S. Miller64658742008-03-21 17:01:38 -0700601 for (i = 0; i < num_kernel_image_mappings; i++) {
David S. Millerd82ace72006-02-09 02:52:44 -0800602 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
603 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
David S. Miller64658742008-03-21 17:01:38 -0700604 tte_vaddr += 0x400000;
605 tte_data += 0x400000;
David S. Millerd82ace72006-02-09 02:52:44 -0800606 }
607 } else {
David S. Miller64658742008-03-21 17:01:38 -0700608 for (i = 0; i < num_kernel_image_mappings; i++) {
609 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
610 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
611 tte_vaddr += 0x400000;
612 tte_data += 0x400000;
David S. Millerd82ace72006-02-09 02:52:44 -0800613 }
David S. Miller64658742008-03-21 17:01:38 -0700614 sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 }
David S. Miller0835ae02005-10-04 15:23:20 -0700616 if (tlb_type == cheetah_plus) {
617 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
618 CTX_CHEETAH_PLUS_NUC);
619 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
620 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
621 }
David S. Miller405599b2005-09-22 00:12:35 -0700622}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623
David S. Miller405599b2005-09-22 00:12:35 -0700624
David S. Millerc9c10832005-10-12 12:22:46 -0700625static void __init inherit_prom_mappings(void)
David S. Miller9ad98c52005-10-05 15:12:00 -0700626{
David S. Miller405599b2005-09-22 00:12:35 -0700627 /* Now fixup OBP's idea about where we really are mapped. */
David S. Miller3c62a2d2008-02-17 23:22:50 -0800628 printk("Remapping the kernel... ");
David S. Miller405599b2005-09-22 00:12:35 -0700629 remap_kernel();
David S. Miller3c62a2d2008-02-17 23:22:50 -0800630 printk("done.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631}
632
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633void prom_world(int enter)
634{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 if (!enter)
Al Virodff933d2012-09-26 01:21:14 -0400636 set_fs(get_fs());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637
David S. Miller3487d1d2006-01-31 18:33:25 -0800638 __asm__ __volatile__("flushw");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639}
640
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641void __flush_dcache_range(unsigned long start, unsigned long end)
642{
643 unsigned long va;
644
645 if (tlb_type == spitfire) {
646 int n = 0;
647
648 for (va = start; va < end; va += 32) {
649 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
650 if (++n >= 512)
651 break;
652 }
David S. Millera43fe0e2006-02-04 03:10:53 -0800653 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 start = __pa(start);
655 end = __pa(end);
656 for (va = start; va < end; va += 32)
657 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
658 "membar #Sync"
659 : /* no outputs */
660 : "r" (va),
661 "i" (ASI_DCACHE_INVALIDATE));
662 }
663}
Sam Ravnborg917c3662009-01-08 16:58:20 -0800664EXPORT_SYMBOL(__flush_dcache_range);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665
David S. Miller85f1e1f2007-03-15 17:51:26 -0700666/* get_new_mmu_context() uses "cache + 1". */
667DEFINE_SPINLOCK(ctx_alloc_lock);
668unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
669#define MAX_CTX_NR (1UL << CTX_NR_BITS)
670#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
671DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
672
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673/* Caller does TLB context flushing on local CPU if necessary.
674 * The caller also ensures that CTX_VALID(mm->context) is false.
675 *
676 * We must be careful about boundary cases so that we never
677 * let the user have CTX 0 (nucleus) or we ever use a CTX
678 * version of zero (and thus NO_CONTEXT would not be caught
679 * by version mis-match tests in mmu_context.h).
David S. Millera0663a72006-02-23 14:19:28 -0800680 *
681 * Always invoked with interrupts disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 */
683void get_new_mmu_context(struct mm_struct *mm)
684{
685 unsigned long ctx, new_ctx;
686 unsigned long orig_pgsz_bits;
David S. Millera0663a72006-02-23 14:19:28 -0800687 int new_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688
Kirill Tkhai07df8412013-04-09 00:29:46 +0400689 spin_lock(&ctx_alloc_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
691 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
692 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
David S. Millera0663a72006-02-23 14:19:28 -0800693 new_version = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 if (new_ctx >= (1 << CTX_NR_BITS)) {
695 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
696 if (new_ctx >= ctx) {
697 int i;
698 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
699 CTX_FIRST_VERSION;
700 if (new_ctx == 1)
701 new_ctx = CTX_FIRST_VERSION;
702
703 /* Don't call memset, for 16 entries that's just
704 * plain silly...
705 */
706 mmu_context_bmap[0] = 3;
707 mmu_context_bmap[1] = 0;
708 mmu_context_bmap[2] = 0;
709 mmu_context_bmap[3] = 0;
710 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
711 mmu_context_bmap[i + 0] = 0;
712 mmu_context_bmap[i + 1] = 0;
713 mmu_context_bmap[i + 2] = 0;
714 mmu_context_bmap[i + 3] = 0;
715 }
David S. Millera0663a72006-02-23 14:19:28 -0800716 new_version = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 goto out;
718 }
719 }
720 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
721 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
722out:
723 tlb_context_cache = new_ctx;
724 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
Kirill Tkhai07df8412013-04-09 00:29:46 +0400725 spin_unlock(&ctx_alloc_lock);
David S. Millera0663a72006-02-23 14:19:28 -0800726
727 if (unlikely(new_version))
728 smp_new_mmu_context_version();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729}
730
David S. Miller919ee672008-04-23 05:40:25 -0700731static int numa_enabled = 1;
732static int numa_debug;
733
734static int __init early_numa(char *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735{
David S. Miller919ee672008-04-23 05:40:25 -0700736 if (!p)
737 return 0;
David S. Millerd1112012006-03-08 02:16:07 -0800738
David S. Miller919ee672008-04-23 05:40:25 -0700739 if (strstr(p, "off"))
740 numa_enabled = 0;
David S. Millerd1112012006-03-08 02:16:07 -0800741
David S. Miller919ee672008-04-23 05:40:25 -0700742 if (strstr(p, "debug"))
743 numa_debug = 1;
744
745 return 0;
David S. Millerd1112012006-03-08 02:16:07 -0800746}
David S. Miller919ee672008-04-23 05:40:25 -0700747early_param("numa", early_numa);
748
749#define numadbg(f, a...) \
750do { if (numa_debug) \
751 printk(KERN_INFO f, ## a); \
752} while (0)
David S. Millerd1112012006-03-08 02:16:07 -0800753
David S. Miller4e82c9a2008-02-13 18:00:03 -0800754static void __init find_ramdisk(unsigned long phys_base)
755{
756#ifdef CONFIG_BLK_DEV_INITRD
757 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
758 unsigned long ramdisk_image;
759
760 /* Older versions of the bootloader only supported a
761 * 32-bit physical address for the ramdisk image
762 * location, stored at sparc_ramdisk_image. Newer
763 * SILO versions set sparc_ramdisk_image to zero and
764 * provide a full 64-bit physical address at
765 * sparc_ramdisk_image64.
766 */
767 ramdisk_image = sparc_ramdisk_image;
768 if (!ramdisk_image)
769 ramdisk_image = sparc_ramdisk_image64;
770
771 /* Another bootloader quirk. The bootloader normalizes
772 * the physical address to KERNBASE, so we have to
773 * factor that back out and add in the lowest valid
774 * physical page address to get the true physical address.
775 */
776 ramdisk_image -= KERNBASE;
777 ramdisk_image += phys_base;
778
David S. Miller919ee672008-04-23 05:40:25 -0700779 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
780 ramdisk_image, sparc_ramdisk_size);
781
David S. Miller4e82c9a2008-02-13 18:00:03 -0800782 initrd_start = ramdisk_image;
783 initrd_end = ramdisk_image + sparc_ramdisk_size;
David S. Miller3b2a7e22008-02-13 18:13:20 -0800784
Yinghai Lu95f72d12010-07-12 14:36:09 +1000785 memblock_reserve(initrd_start, sparc_ramdisk_size);
David S. Millerd45100f2008-05-06 15:19:54 -0700786
787 initrd_start += PAGE_OFFSET;
788 initrd_end += PAGE_OFFSET;
David S. Miller4e82c9a2008-02-13 18:00:03 -0800789 }
790#endif
791}
792
David S. Miller919ee672008-04-23 05:40:25 -0700793struct node_mem_mask {
794 unsigned long mask;
795 unsigned long val;
David S. Miller919ee672008-04-23 05:40:25 -0700796};
797static struct node_mem_mask node_masks[MAX_NUMNODES];
798static int num_node_masks;
799
Sam Ravnborg48d37212014-05-16 23:26:12 +0200800#ifdef CONFIG_NEED_MULTIPLE_NODES
801
David S. Miller919ee672008-04-23 05:40:25 -0700802int numa_cpu_lookup_table[NR_CPUS];
803cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
804
David S. Miller919ee672008-04-23 05:40:25 -0700805struct mdesc_mblock {
806 u64 base;
807 u64 size;
808 u64 offset; /* RA-to-PA */
809};
810static struct mdesc_mblock *mblocks;
811static int num_mblocks;
812
813static unsigned long ra_to_pa(unsigned long addr)
David S. Millerd1112012006-03-08 02:16:07 -0800814{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 int i;
816
David S. Miller919ee672008-04-23 05:40:25 -0700817 for (i = 0; i < num_mblocks; i++) {
818 struct mdesc_mblock *m = &mblocks[i];
David S. Miller6fc5bae2006-12-28 21:00:23 -0800819
David S. Miller919ee672008-04-23 05:40:25 -0700820 if (addr >= m->base &&
821 addr < (m->base + m->size)) {
822 addr += m->offset;
823 break;
824 }
825 }
826 return addr;
827}
828
829static int find_node(unsigned long addr)
830{
831 int i;
832
833 addr = ra_to_pa(addr);
834 for (i = 0; i < num_node_masks; i++) {
835 struct node_mem_mask *p = &node_masks[i];
836
837 if ((addr & p->mask) == p->val)
838 return i;
839 }
bob picco3dee9df2014-09-16 09:28:15 -0400840 /* The following condition has been observed on LDOM guests.*/
841 WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node"
842 " rule. Some physical memory will be owned by node 0.");
843 return 0;
David S. Miller919ee672008-04-23 05:40:25 -0700844}
845
Tejun Heof9b18db2011-07-12 10:46:32 +0200846static u64 memblock_nid_range(u64 start, u64 end, int *nid)
David S. Miller919ee672008-04-23 05:40:25 -0700847{
848 *nid = find_node(start);
849 start += PAGE_SIZE;
850 while (start < end) {
851 int n = find_node(start);
852
853 if (n != *nid)
854 break;
855 start += PAGE_SIZE;
856 }
857
David S. Millerc918dcc2008-08-14 01:41:39 -0700858 if (start > end)
859 start = end;
860
David S. Miller919ee672008-04-23 05:40:25 -0700861 return start;
862}
David S. Miller919ee672008-04-23 05:40:25 -0700863#endif
864
865/* This must be invoked after performing all of the necessary
Tejun Heo2a4814d2011-12-08 10:22:08 -0800866 * memblock_set_node() calls for 'nid'. We need to be able to get
David S. Miller919ee672008-04-23 05:40:25 -0700867 * correct data from get_pfn_range_for_nid().
868 */
869static void __init allocate_node_data(int nid)
870{
David S. Miller919ee672008-04-23 05:40:25 -0700871 struct pglist_data *p;
Paul Gortmakeraa6f0792012-05-09 20:44:29 -0400872 unsigned long start_pfn, end_pfn;
David S. Miller919ee672008-04-23 05:40:25 -0700873#ifdef CONFIG_NEED_MULTIPLE_NODES
Paul Gortmakeraa6f0792012-05-09 20:44:29 -0400874 unsigned long paddr;
875
Benjamin Herrenschmidt9d1e2492010-07-06 15:39:17 -0700876 paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
David S. Miller919ee672008-04-23 05:40:25 -0700877 if (!paddr) {
878 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
879 prom_halt();
880 }
881 NODE_DATA(nid) = __va(paddr);
882 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
883
David S. Miller625d6932012-04-25 13:13:43 -0700884 NODE_DATA(nid)->node_id = nid;
David S. Miller919ee672008-04-23 05:40:25 -0700885#endif
886
887 p = NODE_DATA(nid);
888
889 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
890 p->node_start_pfn = start_pfn;
891 p->node_spanned_pages = end_pfn - start_pfn;
David S. Miller919ee672008-04-23 05:40:25 -0700892}
893
894static void init_node_masks_nonnuma(void)
895{
Sam Ravnborg48d37212014-05-16 23:26:12 +0200896#ifdef CONFIG_NEED_MULTIPLE_NODES
David S. Miller919ee672008-04-23 05:40:25 -0700897 int i;
Sam Ravnborg48d37212014-05-16 23:26:12 +0200898#endif
David S. Miller919ee672008-04-23 05:40:25 -0700899
900 numadbg("Initializing tables for non-numa.\n");
901
902 node_masks[0].mask = node_masks[0].val = 0;
903 num_node_masks = 1;
904
Sam Ravnborg48d37212014-05-16 23:26:12 +0200905#ifdef CONFIG_NEED_MULTIPLE_NODES
David S. Miller919ee672008-04-23 05:40:25 -0700906 for (i = 0; i < NR_CPUS; i++)
907 numa_cpu_lookup_table[i] = 0;
908
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -0700909 cpumask_setall(&numa_cpumask_lookup_table[0]);
Sam Ravnborg48d37212014-05-16 23:26:12 +0200910#endif
David S. Miller919ee672008-04-23 05:40:25 -0700911}
912
913#ifdef CONFIG_NEED_MULTIPLE_NODES
914struct pglist_data *node_data[MAX_NUMNODES];
915
916EXPORT_SYMBOL(numa_cpu_lookup_table);
917EXPORT_SYMBOL(numa_cpumask_lookup_table);
918EXPORT_SYMBOL(node_data);
919
920struct mdesc_mlgroup {
921 u64 node;
922 u64 latency;
923 u64 match;
924 u64 mask;
925};
926static struct mdesc_mlgroup *mlgroups;
927static int num_mlgroups;
928
929static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
930 u32 cfg_handle)
931{
932 u64 arc;
933
934 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
935 u64 target = mdesc_arc_target(md, arc);
936 const u64 *val;
937
938 val = mdesc_get_property(md, target,
939 "cfg-handle", NULL);
940 if (val && *val == cfg_handle)
941 return 0;
942 }
943 return -ENODEV;
944}
945
946static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
947 u32 cfg_handle)
948{
949 u64 arc, candidate, best_latency = ~(u64)0;
950
951 candidate = MDESC_NODE_NULL;
952 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
953 u64 target = mdesc_arc_target(md, arc);
954 const char *name = mdesc_node_name(md, target);
955 const u64 *val;
956
957 if (strcmp(name, "pio-latency-group"))
958 continue;
959
960 val = mdesc_get_property(md, target, "latency", NULL);
961 if (!val)
962 continue;
963
964 if (*val < best_latency) {
965 candidate = target;
966 best_latency = *val;
967 }
968 }
969
970 if (candidate == MDESC_NODE_NULL)
971 return -ENODEV;
972
973 return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
974}
975
976int of_node_to_nid(struct device_node *dp)
977{
978 const struct linux_prom64_registers *regs;
979 struct mdesc_handle *md;
980 u32 cfg_handle;
981 int count, nid;
982 u64 grp;
983
David S. Miller072bd412008-08-18 20:36:17 -0700984 /* This is the right thing to do on currently supported
985 * SUN4U NUMA platforms as well, as the PCI controller does
986 * not sit behind any particular memory controller.
987 */
David S. Miller919ee672008-04-23 05:40:25 -0700988 if (!mlgroups)
989 return -1;
990
991 regs = of_get_property(dp, "reg", NULL);
992 if (!regs)
993 return -1;
994
995 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
996
997 md = mdesc_grab();
998
999 count = 0;
1000 nid = -1;
1001 mdesc_for_each_node_by_name(md, grp, "group") {
1002 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
1003 nid = count;
1004 break;
1005 }
1006 count++;
1007 }
1008
1009 mdesc_release(md);
1010
1011 return nid;
1012}
1013
David S. Miller01c453812009-04-07 01:05:22 -07001014static void __init add_node_ranges(void)
David S. Miller919ee672008-04-23 05:40:25 -07001015{
Benjamin Herrenschmidt08b84792010-08-04 13:43:31 +10001016 struct memblock_region *reg;
David S. Miller919ee672008-04-23 05:40:25 -07001017
Benjamin Herrenschmidt08b84792010-08-04 13:43:31 +10001018 for_each_memblock(memory, reg) {
1019 unsigned long size = reg->size;
David S. Miller919ee672008-04-23 05:40:25 -07001020 unsigned long start, end;
1021
Benjamin Herrenschmidt08b84792010-08-04 13:43:31 +10001022 start = reg->base;
David S. Miller919ee672008-04-23 05:40:25 -07001023 end = start + size;
1024 while (start < end) {
1025 unsigned long this_end;
1026 int nid;
1027
Benjamin Herrenschmidt35a1f0b2010-07-06 15:38:58 -07001028 this_end = memblock_nid_range(start, end, &nid);
David S. Miller919ee672008-04-23 05:40:25 -07001029
Tejun Heo2a4814d2011-12-08 10:22:08 -08001030 numadbg("Setting memblock NUMA node nid[%d] "
David S. Miller919ee672008-04-23 05:40:25 -07001031 "start[%lx] end[%lx]\n",
1032 nid, start, this_end);
1033
Tang Chene7e8de52014-01-21 15:49:26 -08001034 memblock_set_node(start, this_end - start,
1035 &memblock.memory, nid);
David S. Miller919ee672008-04-23 05:40:25 -07001036 start = this_end;
1037 }
1038 }
1039}
1040
1041static int __init grab_mlgroups(struct mdesc_handle *md)
1042{
1043 unsigned long paddr;
1044 int count = 0;
1045 u64 node;
1046
1047 mdesc_for_each_node_by_name(md, node, "memory-latency-group")
1048 count++;
1049 if (!count)
1050 return -ENOENT;
1051
Yinghai Lu95f72d12010-07-12 14:36:09 +10001052 paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup),
David S. Miller919ee672008-04-23 05:40:25 -07001053 SMP_CACHE_BYTES);
1054 if (!paddr)
1055 return -ENOMEM;
1056
1057 mlgroups = __va(paddr);
1058 num_mlgroups = count;
1059
1060 count = 0;
1061 mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
1062 struct mdesc_mlgroup *m = &mlgroups[count++];
1063 const u64 *val;
1064
1065 m->node = node;
1066
1067 val = mdesc_get_property(md, node, "latency", NULL);
1068 m->latency = *val;
1069 val = mdesc_get_property(md, node, "address-match", NULL);
1070 m->match = *val;
1071 val = mdesc_get_property(md, node, "address-mask", NULL);
1072 m->mask = *val;
1073
Sam Ravnborg90181132009-01-06 13:19:28 -08001074 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
1075 "match[%llx] mask[%llx]\n",
David S. Miller919ee672008-04-23 05:40:25 -07001076 count - 1, m->node, m->latency, m->match, m->mask);
1077 }
1078
1079 return 0;
1080}
1081
1082static int __init grab_mblocks(struct mdesc_handle *md)
1083{
1084 unsigned long paddr;
1085 int count = 0;
1086 u64 node;
1087
1088 mdesc_for_each_node_by_name(md, node, "mblock")
1089 count++;
1090 if (!count)
1091 return -ENOENT;
1092
Yinghai Lu95f72d12010-07-12 14:36:09 +10001093 paddr = memblock_alloc(count * sizeof(struct mdesc_mblock),
David S. Miller919ee672008-04-23 05:40:25 -07001094 SMP_CACHE_BYTES);
1095 if (!paddr)
1096 return -ENOMEM;
1097
1098 mblocks = __va(paddr);
1099 num_mblocks = count;
1100
1101 count = 0;
1102 mdesc_for_each_node_by_name(md, node, "mblock") {
1103 struct mdesc_mblock *m = &mblocks[count++];
1104 const u64 *val;
1105
1106 val = mdesc_get_property(md, node, "base", NULL);
1107 m->base = *val;
1108 val = mdesc_get_property(md, node, "size", NULL);
1109 m->size = *val;
1110 val = mdesc_get_property(md, node,
1111 "address-congruence-offset", NULL);
bob picco771a37f2013-06-11 14:54:51 -04001112
1113 /* The address-congruence-offset property is optional.
1114 * Explicity zero it be identifty this.
1115 */
1116 if (val)
1117 m->offset = *val;
1118 else
1119 m->offset = 0UL;
David S. Miller919ee672008-04-23 05:40:25 -07001120
Sam Ravnborg90181132009-01-06 13:19:28 -08001121 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
David S. Miller919ee672008-04-23 05:40:25 -07001122 count - 1, m->base, m->size, m->offset);
1123 }
1124
1125 return 0;
1126}
1127
1128static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1129 u64 grp, cpumask_t *mask)
1130{
1131 u64 arc;
1132
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001133 cpumask_clear(mask);
David S. Miller919ee672008-04-23 05:40:25 -07001134
1135 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1136 u64 target = mdesc_arc_target(md, arc);
1137 const char *name = mdesc_node_name(md, target);
1138 const u64 *id;
1139
1140 if (strcmp(name, "cpu"))
1141 continue;
1142 id = mdesc_get_property(md, target, "id", NULL);
Rusty Russelle305cb8f2009-03-16 14:40:23 +10301143 if (*id < nr_cpu_ids)
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001144 cpumask_set_cpu(*id, mask);
David S. Miller919ee672008-04-23 05:40:25 -07001145 }
1146}
1147
1148static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
1149{
1150 int i;
1151
1152 for (i = 0; i < num_mlgroups; i++) {
1153 struct mdesc_mlgroup *m = &mlgroups[i];
1154 if (m->node == node)
1155 return m;
1156 }
1157 return NULL;
1158}
1159
1160static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
1161 int index)
1162{
1163 struct mdesc_mlgroup *candidate = NULL;
1164 u64 arc, best_latency = ~(u64)0;
1165 struct node_mem_mask *n;
1166
1167 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1168 u64 target = mdesc_arc_target(md, arc);
1169 struct mdesc_mlgroup *m = find_mlgroup(target);
1170 if (!m)
1171 continue;
1172 if (m->latency < best_latency) {
1173 candidate = m;
1174 best_latency = m->latency;
1175 }
1176 }
1177 if (!candidate)
1178 return -ENOENT;
1179
1180 if (num_node_masks != index) {
1181 printk(KERN_ERR "Inconsistent NUMA state, "
1182 "index[%d] != num_node_masks[%d]\n",
1183 index, num_node_masks);
1184 return -EINVAL;
1185 }
1186
1187 n = &node_masks[num_node_masks++];
1188
1189 n->mask = candidate->mask;
1190 n->val = candidate->match;
1191
Sam Ravnborg90181132009-01-06 13:19:28 -08001192 numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n",
David S. Miller919ee672008-04-23 05:40:25 -07001193 index, n->mask, n->val, candidate->latency);
1194
1195 return 0;
1196}
1197
1198static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1199 int index)
1200{
1201 cpumask_t mask;
1202 int cpu;
1203
1204 numa_parse_mdesc_group_cpus(md, grp, &mask);
1205
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001206 for_each_cpu(cpu, &mask)
David S. Miller919ee672008-04-23 05:40:25 -07001207 numa_cpu_lookup_table[cpu] = index;
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001208 cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
David S. Miller919ee672008-04-23 05:40:25 -07001209
1210 if (numa_debug) {
1211 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001212 for_each_cpu(cpu, &mask)
David S. Miller919ee672008-04-23 05:40:25 -07001213 printk("%d ", cpu);
1214 printk("]\n");
1215 }
1216
1217 return numa_attach_mlgroup(md, grp, index);
1218}
1219
1220static int __init numa_parse_mdesc(void)
1221{
1222 struct mdesc_handle *md = mdesc_grab();
1223 int i, err, count;
1224 u64 node;
1225
1226 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1227 if (node == MDESC_NODE_NULL) {
1228 mdesc_release(md);
1229 return -ENOENT;
1230 }
1231
1232 err = grab_mblocks(md);
1233 if (err < 0)
1234 goto out;
1235
1236 err = grab_mlgroups(md);
1237 if (err < 0)
1238 goto out;
1239
1240 count = 0;
1241 mdesc_for_each_node_by_name(md, node, "group") {
1242 err = numa_parse_mdesc_group(md, node, count);
1243 if (err < 0)
1244 break;
1245 count++;
1246 }
1247
1248 add_node_ranges();
1249
1250 for (i = 0; i < num_node_masks; i++) {
1251 allocate_node_data(i);
1252 node_set_online(i);
1253 }
1254
1255 err = 0;
1256out:
1257 mdesc_release(md);
1258 return err;
1259}
1260
David S. Miller072bd412008-08-18 20:36:17 -07001261static int __init numa_parse_jbus(void)
1262{
1263 unsigned long cpu, index;
1264
1265 /* NUMA node id is encoded in bits 36 and higher, and there is
1266 * a 1-to-1 mapping from CPU ID to NUMA node ID.
1267 */
1268 index = 0;
1269 for_each_present_cpu(cpu) {
1270 numa_cpu_lookup_table[cpu] = index;
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001271 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
David S. Miller072bd412008-08-18 20:36:17 -07001272 node_masks[index].mask = ~((1UL << 36UL) - 1UL);
1273 node_masks[index].val = cpu << 36UL;
1274
1275 index++;
1276 }
1277 num_node_masks = index;
1278
1279 add_node_ranges();
1280
1281 for (index = 0; index < num_node_masks; index++) {
1282 allocate_node_data(index);
1283 node_set_online(index);
1284 }
1285
1286 return 0;
1287}
1288
David S. Miller919ee672008-04-23 05:40:25 -07001289static int __init numa_parse_sun4u(void)
1290{
David S. Miller072bd412008-08-18 20:36:17 -07001291 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1292 unsigned long ver;
1293
1294 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
1295 if ((ver >> 32UL) == __JALAPENO_ID ||
1296 (ver >> 32UL) == __SERRANO_ID)
1297 return numa_parse_jbus();
1298 }
David S. Miller919ee672008-04-23 05:40:25 -07001299 return -1;
1300}
1301
1302static int __init bootmem_init_numa(void)
1303{
1304 int err = -1;
1305
1306 numadbg("bootmem_init_numa()\n");
1307
1308 if (numa_enabled) {
1309 if (tlb_type == hypervisor)
1310 err = numa_parse_mdesc();
1311 else
1312 err = numa_parse_sun4u();
1313 }
1314 return err;
1315}
1316
1317#else
1318
1319static int bootmem_init_numa(void)
1320{
1321 return -1;
1322}
1323
1324#endif
1325
1326static void __init bootmem_init_nonnuma(void)
1327{
Yinghai Lu95f72d12010-07-12 14:36:09 +10001328 unsigned long top_of_ram = memblock_end_of_DRAM();
1329 unsigned long total_ram = memblock_phys_mem_size();
David S. Miller919ee672008-04-23 05:40:25 -07001330
1331 numadbg("bootmem_init_nonnuma()\n");
1332
1333 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1334 top_of_ram, total_ram);
1335 printk(KERN_INFO "Memory hole size: %ldMB\n",
1336 (top_of_ram - total_ram) >> 20);
1337
1338 init_node_masks_nonnuma();
Tang Chene7e8de52014-01-21 15:49:26 -08001339 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
David S. Miller919ee672008-04-23 05:40:25 -07001340 allocate_node_data(0);
David S. Miller919ee672008-04-23 05:40:25 -07001341 node_set_online(0);
1342}
1343
David S. Miller919ee672008-04-23 05:40:25 -07001344static unsigned long __init bootmem_init(unsigned long phys_base)
1345{
1346 unsigned long end_pfn;
David S. Miller919ee672008-04-23 05:40:25 -07001347
Yinghai Lu95f72d12010-07-12 14:36:09 +10001348 end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 max_pfn = max_low_pfn = end_pfn;
David S. Millerd1112012006-03-08 02:16:07 -08001350 min_low_pfn = (phys_base >> PAGE_SHIFT);
1351
David S. Miller919ee672008-04-23 05:40:25 -07001352 if (bootmem_init_numa() < 0)
1353 bootmem_init_nonnuma();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354
David S. Miller625d6932012-04-25 13:13:43 -07001355 /* Dump memblock with node info. */
1356 memblock_dump_all();
1357
David S. Miller919ee672008-04-23 05:40:25 -07001358 /* XXX cpu notifier XXX */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359
David S. Miller625d6932012-04-25 13:13:43 -07001360 sparse_memory_present_with_active_regions(MAX_NUMNODES);
David S. Millerd1112012006-03-08 02:16:07 -08001361 sparse_init();
1362
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 return end_pfn;
1364}
1365
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001366static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1367static int pall_ents __initdata;
1368
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001369static unsigned long max_phys_bits = 40;
1370
1371bool kern_addr_valid(unsigned long addr)
1372{
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001373 pgd_t *pgd;
1374 pud_t *pud;
1375 pmd_t *pmd;
1376 pte_t *pte;
1377
David S. Millerbb4e6e82014-09-27 11:05:21 -07001378 if ((long)addr < 0L) {
1379 unsigned long pa = __pa(addr);
1380
1381 if ((addr >> max_phys_bits) != 0UL)
1382 return false;
1383
1384 return pfn_valid(pa >> PAGE_SHIFT);
1385 }
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001386
1387 if (addr >= (unsigned long) KERNBASE &&
1388 addr < (unsigned long)&_end)
1389 return true;
1390
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001391 pgd = pgd_offset_k(addr);
1392 if (pgd_none(*pgd))
1393 return 0;
1394
1395 pud = pud_offset(pgd, addr);
1396 if (pud_none(*pud))
1397 return 0;
1398
1399 if (pud_large(*pud))
1400 return pfn_valid(pud_pfn(*pud));
1401
1402 pmd = pmd_offset(pud, addr);
1403 if (pmd_none(*pmd))
1404 return 0;
1405
1406 if (pmd_large(*pmd))
1407 return pfn_valid(pmd_pfn(*pmd));
1408
1409 pte = pte_offset_kernel(pmd, addr);
1410 if (pte_none(*pte))
1411 return 0;
1412
1413 return pfn_valid(pte_pfn(*pte));
1414}
1415EXPORT_SYMBOL(kern_addr_valid);
1416
1417static unsigned long __ref kernel_map_hugepud(unsigned long vstart,
1418 unsigned long vend,
1419 pud_t *pud)
1420{
1421 const unsigned long mask16gb = (1UL << 34) - 1UL;
1422 u64 pte_val = vstart;
1423
1424 /* Each PUD is 8GB */
1425 if ((vstart & mask16gb) ||
1426 (vend - vstart <= mask16gb)) {
1427 pte_val ^= kern_linear_pte_xor[2];
1428 pud_val(*pud) = pte_val | _PAGE_PUD_HUGE;
1429
1430 return vstart + PUD_SIZE;
1431 }
1432
1433 pte_val ^= kern_linear_pte_xor[3];
1434 pte_val |= _PAGE_PUD_HUGE;
1435
1436 vend = vstart + mask16gb + 1UL;
1437 while (vstart < vend) {
1438 pud_val(*pud) = pte_val;
1439
1440 pte_val += PUD_SIZE;
1441 vstart += PUD_SIZE;
1442 pud++;
1443 }
1444 return vstart;
1445}
1446
1447static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend,
1448 bool guard)
1449{
1450 if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE)
1451 return true;
1452
1453 return false;
1454}
1455
1456static unsigned long __ref kernel_map_hugepmd(unsigned long vstart,
1457 unsigned long vend,
1458 pmd_t *pmd)
1459{
1460 const unsigned long mask256mb = (1UL << 28) - 1UL;
1461 const unsigned long mask2gb = (1UL << 31) - 1UL;
1462 u64 pte_val = vstart;
1463
1464 /* Each PMD is 8MB */
1465 if ((vstart & mask256mb) ||
1466 (vend - vstart <= mask256mb)) {
1467 pte_val ^= kern_linear_pte_xor[0];
1468 pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE;
1469
1470 return vstart + PMD_SIZE;
1471 }
1472
1473 if ((vstart & mask2gb) ||
1474 (vend - vstart <= mask2gb)) {
1475 pte_val ^= kern_linear_pte_xor[1];
1476 pte_val |= _PAGE_PMD_HUGE;
1477 vend = vstart + mask256mb + 1UL;
1478 } else {
1479 pte_val ^= kern_linear_pte_xor[2];
1480 pte_val |= _PAGE_PMD_HUGE;
1481 vend = vstart + mask2gb + 1UL;
1482 }
1483
1484 while (vstart < vend) {
1485 pmd_val(*pmd) = pte_val;
1486
1487 pte_val += PMD_SIZE;
1488 vstart += PMD_SIZE;
1489 pmd++;
1490 }
1491
1492 return vstart;
1493}
1494
1495static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend,
1496 bool guard)
1497{
1498 if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE)
1499 return true;
1500
1501 return false;
1502}
1503
Sam Ravnborg896aef42008-02-24 19:49:52 -08001504static unsigned long __ref kernel_map_range(unsigned long pstart,
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001505 unsigned long pend, pgprot_t prot,
1506 bool use_huge)
David S. Miller56425302005-09-25 16:46:57 -07001507{
1508 unsigned long vstart = PAGE_OFFSET + pstart;
1509 unsigned long vend = PAGE_OFFSET + pend;
1510 unsigned long alloc_bytes = 0UL;
1511
1512 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
David S. Miller13edad72005-09-29 17:58:26 -07001513 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
David S. Miller56425302005-09-25 16:46:57 -07001514 vstart, vend);
1515 prom_halt();
1516 }
1517
1518 while (vstart < vend) {
1519 unsigned long this_end, paddr = __pa(vstart);
1520 pgd_t *pgd = pgd_offset_k(vstart);
1521 pud_t *pud;
1522 pmd_t *pmd;
1523 pte_t *pte;
1524
David S. Millerac55c762014-09-26 21:19:46 -07001525 if (pgd_none(*pgd)) {
1526 pud_t *new;
1527
1528 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1529 alloc_bytes += PAGE_SIZE;
1530 pgd_populate(&init_mm, pgd, new);
1531 }
David S. Miller56425302005-09-25 16:46:57 -07001532 pud = pud_offset(pgd, vstart);
1533 if (pud_none(*pud)) {
1534 pmd_t *new;
1535
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001536 if (kernel_can_map_hugepud(vstart, vend, use_huge)) {
1537 vstart = kernel_map_hugepud(vstart, vend, pud);
1538 continue;
1539 }
David S. Miller56425302005-09-25 16:46:57 -07001540 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1541 alloc_bytes += PAGE_SIZE;
1542 pud_populate(&init_mm, pud, new);
1543 }
1544
1545 pmd = pmd_offset(pud, vstart);
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001546 if (pmd_none(*pmd)) {
David S. Miller56425302005-09-25 16:46:57 -07001547 pte_t *new;
1548
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001549 if (kernel_can_map_hugepmd(vstart, vend, use_huge)) {
1550 vstart = kernel_map_hugepmd(vstart, vend, pmd);
1551 continue;
1552 }
David S. Miller56425302005-09-25 16:46:57 -07001553 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1554 alloc_bytes += PAGE_SIZE;
1555 pmd_populate_kernel(&init_mm, pmd, new);
1556 }
1557
1558 pte = pte_offset_kernel(pmd, vstart);
1559 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1560 if (this_end > vend)
1561 this_end = vend;
1562
1563 while (vstart < this_end) {
1564 pte_val(*pte) = (paddr | pgprot_val(prot));
1565
1566 vstart += PAGE_SIZE;
1567 paddr += PAGE_SIZE;
1568 pte++;
1569 }
1570 }
1571
1572 return alloc_bytes;
1573}
1574
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001575static void __init flush_all_kernel_tsbs(void)
1576{
1577 int i;
1578
1579 for (i = 0; i < KERNEL_TSB_NENTRIES; i++) {
1580 struct tsb *ent = &swapper_tsb[i];
1581
1582 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1583 }
1584#ifndef CONFIG_DEBUG_PAGEALLOC
1585 for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) {
1586 struct tsb *ent = &swapper_4m_tsb[i];
1587
1588 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1589 }
1590#endif
1591}
1592
David S. Miller56425302005-09-25 16:46:57 -07001593extern unsigned int kvmap_linear_patch[1];
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001594
David S. Miller8f3614532007-12-13 06:13:38 -08001595static void __init kernel_physical_mapping_init(void)
1596{
David S. Miller8f3614532007-12-13 06:13:38 -08001597 unsigned long i, mem_alloced = 0UL;
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001598 bool use_huge = true;
David S. Miller8f3614532007-12-13 06:13:38 -08001599
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001600#ifdef CONFIG_DEBUG_PAGEALLOC
1601 use_huge = false;
1602#endif
David S. Miller8f3614532007-12-13 06:13:38 -08001603 for (i = 0; i < pall_ents; i++) {
1604 unsigned long phys_start, phys_end;
1605
1606 phys_start = pall[i].phys_addr;
1607 phys_end = phys_start + pall[i].reg_size;
1608
David S. Miller56425302005-09-25 16:46:57 -07001609 mem_alloced += kernel_map_range(phys_start, phys_end,
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001610 PAGE_KERNEL, use_huge);
David S. Miller56425302005-09-25 16:46:57 -07001611 }
1612
1613 printk("Allocated %ld bytes for kernel page tables.\n",
1614 mem_alloced);
1615
1616 kvmap_linear_patch[0] = 0x01000000; /* nop */
1617 flushi(&kvmap_linear_patch[0]);
1618
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001619 flush_all_kernel_tsbs();
1620
David S. Miller56425302005-09-25 16:46:57 -07001621 __flush_tlb_all();
1622}
1623
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001624#ifdef CONFIG_DEBUG_PAGEALLOC
Joonsoo Kim031bc572014-12-12 16:55:52 -08001625void __kernel_map_pages(struct page *page, int numpages, int enable)
David S. Miller56425302005-09-25 16:46:57 -07001626{
1627 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1628 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1629
1630 kernel_map_range(phys_start, phys_end,
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001631 (enable ? PAGE_KERNEL : __pgprot(0)), false);
David S. Miller56425302005-09-25 16:46:57 -07001632
David S. Miller74bf4312006-01-31 18:29:18 -08001633 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1634 PAGE_OFFSET + phys_end);
1635
David S. Miller56425302005-09-25 16:46:57 -07001636 /* we should perform an IPI and flush all tlbs,
1637 * but that can deadlock->flush only current cpu.
1638 */
1639 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1640 PAGE_OFFSET + phys_end);
1641}
1642#endif
1643
David S. Miller10147572005-09-28 21:46:43 -07001644unsigned long __init find_ecache_flush_span(unsigned long size)
1645{
David S. Miller13edad72005-09-29 17:58:26 -07001646 int i;
David S. Miller10147572005-09-28 21:46:43 -07001647
David S. Miller13edad72005-09-29 17:58:26 -07001648 for (i = 0; i < pavail_ents; i++) {
1649 if (pavail[i].reg_size >= size)
1650 return pavail[i].phys_addr;
David S. Miller10147572005-09-28 21:46:43 -07001651 }
1652
1653 return ~0UL;
1654}
1655
David S. Millerb2d43832013-09-20 21:50:41 -07001656unsigned long PAGE_OFFSET;
1657EXPORT_SYMBOL(PAGE_OFFSET);
1658
David S. Millerbb4e6e82014-09-27 11:05:21 -07001659unsigned long VMALLOC_END = 0x0000010000000000UL;
1660EXPORT_SYMBOL(VMALLOC_END);
1661
David S. Miller4397bed2014-09-26 21:58:33 -07001662unsigned long sparc64_va_hole_top = 0xfffff80000000000UL;
1663unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL;
1664
David S. Millerb2d43832013-09-20 21:50:41 -07001665static void __init setup_page_offset(void)
1666{
David S. Millerb2d43832013-09-20 21:50:41 -07001667 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
David S. Miller4397bed2014-09-26 21:58:33 -07001668 /* Cheetah/Panther support a full 64-bit virtual
1669 * address, so we can use all that our page tables
1670 * support.
1671 */
1672 sparc64_va_hole_top = 0xfff0000000000000UL;
1673 sparc64_va_hole_bottom = 0x0010000000000000UL;
1674
David S. Millerb2d43832013-09-20 21:50:41 -07001675 max_phys_bits = 42;
1676 } else if (tlb_type == hypervisor) {
1677 switch (sun4v_chip_type) {
1678 case SUN4V_CHIP_NIAGARA1:
1679 case SUN4V_CHIP_NIAGARA2:
David S. Miller4397bed2014-09-26 21:58:33 -07001680 /* T1 and T2 support 48-bit virtual addresses. */
1681 sparc64_va_hole_top = 0xffff800000000000UL;
1682 sparc64_va_hole_bottom = 0x0000800000000000UL;
1683
David S. Millerb2d43832013-09-20 21:50:41 -07001684 max_phys_bits = 39;
1685 break;
1686 case SUN4V_CHIP_NIAGARA3:
David S. Miller4397bed2014-09-26 21:58:33 -07001687 /* T3 supports 48-bit virtual addresses. */
1688 sparc64_va_hole_top = 0xffff800000000000UL;
1689 sparc64_va_hole_bottom = 0x0000800000000000UL;
1690
David S. Millerb2d43832013-09-20 21:50:41 -07001691 max_phys_bits = 43;
1692 break;
1693 case SUN4V_CHIP_NIAGARA4:
1694 case SUN4V_CHIP_NIAGARA5:
1695 case SUN4V_CHIP_SPARC64X:
David S. Miller7c0fa0f2014-09-24 21:49:29 -07001696 case SUN4V_CHIP_SPARC_M6:
David S. Miller4397bed2014-09-26 21:58:33 -07001697 /* T4 and later support 52-bit virtual addresses. */
1698 sparc64_va_hole_top = 0xfff8000000000000UL;
1699 sparc64_va_hole_bottom = 0x0008000000000000UL;
David S. Millerb2d43832013-09-20 21:50:41 -07001700 max_phys_bits = 47;
1701 break;
David S. Miller7c0fa0f2014-09-24 21:49:29 -07001702 case SUN4V_CHIP_SPARC_M7:
1703 default:
1704 /* M7 and later support 52-bit virtual addresses. */
1705 sparc64_va_hole_top = 0xfff8000000000000UL;
1706 sparc64_va_hole_bottom = 0x0008000000000000UL;
1707 max_phys_bits = 49;
1708 break;
David S. Millerb2d43832013-09-20 21:50:41 -07001709 }
1710 }
1711
1712 if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) {
1713 prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n",
1714 max_phys_bits);
1715 prom_halt();
1716 }
1717
David S. Millerbb4e6e82014-09-27 11:05:21 -07001718 PAGE_OFFSET = sparc64_va_hole_top;
1719 VMALLOC_END = ((sparc64_va_hole_bottom >> 1) +
1720 (sparc64_va_hole_bottom >> 2));
David S. Millerb2d43832013-09-20 21:50:41 -07001721
David S. Millerbb4e6e82014-09-27 11:05:21 -07001722 pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
David S. Millerb2d43832013-09-20 21:50:41 -07001723 PAGE_OFFSET, max_phys_bits);
David S. Millerbb4e6e82014-09-27 11:05:21 -07001724 pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n",
1725 VMALLOC_START, VMALLOC_END);
1726 pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n",
1727 VMEMMAP_BASE, VMEMMAP_BASE << 1);
David S. Millerb2d43832013-09-20 21:50:41 -07001728}
1729
David S. Miller517af332006-02-01 15:55:21 -08001730static void __init tsb_phys_patch(void)
1731{
David S. Millerd257d5d2006-02-06 23:44:37 -08001732 struct tsb_ldquad_phys_patch_entry *pquad;
David S. Miller517af332006-02-01 15:55:21 -08001733 struct tsb_phys_patch_entry *p;
1734
David S. Millerd257d5d2006-02-06 23:44:37 -08001735 pquad = &__tsb_ldquad_phys_patch;
1736 while (pquad < &__tsb_ldquad_phys_patch_end) {
1737 unsigned long addr = pquad->addr;
1738
1739 if (tlb_type == hypervisor)
1740 *(unsigned int *) addr = pquad->sun4v_insn;
1741 else
1742 *(unsigned int *) addr = pquad->sun4u_insn;
1743 wmb();
1744 __asm__ __volatile__("flush %0"
1745 : /* no outputs */
1746 : "r" (addr));
1747
1748 pquad++;
1749 }
1750
David S. Miller517af332006-02-01 15:55:21 -08001751 p = &__tsb_phys_patch;
1752 while (p < &__tsb_phys_patch_end) {
1753 unsigned long addr = p->addr;
1754
1755 *(unsigned int *) addr = p->insn;
1756 wmb();
1757 __asm__ __volatile__("flush %0"
1758 : /* no outputs */
1759 : "r" (addr));
1760
1761 p++;
1762 }
1763}
1764
David S. Miller490384e2006-02-11 14:41:18 -08001765/* Don't mark as init, we give this to the Hypervisor. */
David S. Millerd1acb422007-03-16 17:20:28 -07001766#ifndef CONFIG_DEBUG_PAGEALLOC
1767#define NUM_KTSB_DESCR 2
1768#else
1769#define NUM_KTSB_DESCR 1
1770#endif
1771static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
David S. Miller490384e2006-02-11 14:41:18 -08001772
David S. Miller8c82dc02014-09-17 10:14:56 -07001773/* The swapper TSBs are loaded with a base sequence of:
1774 *
1775 * sethi %uhi(SYMBOL), REG1
1776 * sethi %hi(SYMBOL), REG2
1777 * or REG1, %ulo(SYMBOL), REG1
1778 * or REG2, %lo(SYMBOL), REG2
1779 * sllx REG1, 32, REG1
1780 * or REG1, REG2, REG1
1781 *
1782 * When we use physical addressing for the TSB accesses, we patch the
1783 * first four instructions in the above sequence.
1784 */
1785
David S. Miller9076d0e2011-08-05 00:53:57 -07001786static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
1787{
David S. Miller8c82dc02014-09-17 10:14:56 -07001788 unsigned long high_bits, low_bits;
1789
1790 high_bits = (pa >> 32) & 0xffffffff;
1791 low_bits = (pa >> 0) & 0xffffffff;
David S. Miller9076d0e2011-08-05 00:53:57 -07001792
1793 while (start < end) {
1794 unsigned int *ia = (unsigned int *)(unsigned long)*start;
1795
David S. Miller8c82dc02014-09-17 10:14:56 -07001796 ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10);
David S. Miller9076d0e2011-08-05 00:53:57 -07001797 __asm__ __volatile__("flush %0" : : "r" (ia));
1798
David S. Miller8c82dc02014-09-17 10:14:56 -07001799 ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10);
David S. Miller9076d0e2011-08-05 00:53:57 -07001800 __asm__ __volatile__("flush %0" : : "r" (ia + 1));
1801
David S. Miller8c82dc02014-09-17 10:14:56 -07001802 ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff);
1803 __asm__ __volatile__("flush %0" : : "r" (ia + 2));
1804
1805 ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff);
1806 __asm__ __volatile__("flush %0" : : "r" (ia + 3));
1807
David S. Miller9076d0e2011-08-05 00:53:57 -07001808 start++;
1809 }
1810}
1811
1812static void ktsb_phys_patch(void)
1813{
1814 extern unsigned int __swapper_tsb_phys_patch;
1815 extern unsigned int __swapper_tsb_phys_patch_end;
David S. Miller9076d0e2011-08-05 00:53:57 -07001816 unsigned long ktsb_pa;
1817
1818 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1819 patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
1820 &__swapper_tsb_phys_patch_end, ktsb_pa);
1821#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Miller0785a8e2011-08-06 05:26:35 -07001822 {
1823 extern unsigned int __swapper_4m_tsb_phys_patch;
1824 extern unsigned int __swapper_4m_tsb_phys_patch_end;
David S. Miller9076d0e2011-08-05 00:53:57 -07001825 ktsb_pa = (kern_base +
1826 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1827 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
1828 &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
David S. Miller0785a8e2011-08-06 05:26:35 -07001829 }
David S. Miller9076d0e2011-08-05 00:53:57 -07001830#endif
1831}
1832
David S. Miller490384e2006-02-11 14:41:18 -08001833static void __init sun4v_ktsb_init(void)
1834{
1835 unsigned long ktsb_pa;
1836
David S. Millerd7744a02006-02-21 22:31:11 -08001837 /* First KTSB for PAGE_SIZE mappings. */
David S. Miller490384e2006-02-11 14:41:18 -08001838 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1839
1840 switch (PAGE_SIZE) {
1841 case 8 * 1024:
1842 default:
1843 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
1844 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
1845 break;
1846
1847 case 64 * 1024:
1848 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
1849 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
1850 break;
1851
1852 case 512 * 1024:
1853 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
1854 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
1855 break;
1856
1857 case 4 * 1024 * 1024:
1858 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
1859 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
1860 break;
Joe Perches6cb79b32011-06-03 14:45:23 +00001861 }
David S. Miller490384e2006-02-11 14:41:18 -08001862
David S. Miller3f19a842006-02-17 12:03:20 -08001863 ktsb_descr[0].assoc = 1;
David S. Miller490384e2006-02-11 14:41:18 -08001864 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
1865 ktsb_descr[0].ctx_idx = 0;
1866 ktsb_descr[0].tsb_base = ktsb_pa;
1867 ktsb_descr[0].resv = 0;
1868
David S. Millerd1acb422007-03-16 17:20:28 -07001869#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Miller4f93d212012-09-06 18:13:58 -07001870 /* Second KTSB for 4MB/256MB/2GB/16GB mappings. */
David S. Millerd7744a02006-02-21 22:31:11 -08001871 ktsb_pa = (kern_base +
1872 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1873
1874 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
David S. Millerc69ad0a2012-09-06 20:35:36 -07001875 ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB |
1876 HV_PGSZ_MASK_256MB |
1877 HV_PGSZ_MASK_2GB |
1878 HV_PGSZ_MASK_16GB) &
1879 cpu_pgsz_mask);
David S. Millerd7744a02006-02-21 22:31:11 -08001880 ktsb_descr[1].assoc = 1;
1881 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
1882 ktsb_descr[1].ctx_idx = 0;
1883 ktsb_descr[1].tsb_base = ktsb_pa;
1884 ktsb_descr[1].resv = 0;
David S. Millerd1acb422007-03-16 17:20:28 -07001885#endif
David S. Miller490384e2006-02-11 14:41:18 -08001886}
1887
Paul Gortmaker2066aad2013-06-17 15:43:14 -04001888void sun4v_ktsb_register(void)
David S. Miller490384e2006-02-11 14:41:18 -08001889{
David S. Miller7db35f32007-05-29 02:22:14 -07001890 unsigned long pa, ret;
David S. Miller490384e2006-02-11 14:41:18 -08001891
1892 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
1893
David S. Miller7db35f32007-05-29 02:22:14 -07001894 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
1895 if (ret != 0) {
1896 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
1897 "errors with %lx\n", pa, ret);
1898 prom_halt();
1899 }
David S. Miller490384e2006-02-11 14:41:18 -08001900}
1901
David S. Millerc69ad0a2012-09-06 20:35:36 -07001902static void __init sun4u_linear_pte_xor_finalize(void)
1903{
1904#ifndef CONFIG_DEBUG_PAGEALLOC
1905 /* This is where we would add Panther support for
1906 * 32MB and 256MB pages.
1907 */
1908#endif
1909}
1910
1911static void __init sun4v_linear_pte_xor_finalize(void)
1912{
Khalid Aziz494e5b62015-05-27 10:00:46 -06001913 unsigned long pagecv_flag;
1914
1915 /* Bit 9 of TTE is no longer CV bit on M7 processor and it instead
1916 * enables MCD error. Do not set bit 9 on M7 processor.
1917 */
1918 switch (sun4v_chip_type) {
1919 case SUN4V_CHIP_SPARC_M7:
1920 pagecv_flag = 0x00;
1921 break;
1922 default:
1923 pagecv_flag = _PAGE_CV_4V;
1924 break;
1925 }
David S. Millerc69ad0a2012-09-06 20:35:36 -07001926#ifndef CONFIG_DEBUG_PAGEALLOC
1927 if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
1928 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
David S. Miller922631b2013-09-18 12:00:00 -07001929 PAGE_OFFSET;
Khalid Aziz494e5b62015-05-27 10:00:46 -06001930 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag |
David S. Millerc69ad0a2012-09-06 20:35:36 -07001931 _PAGE_P_4V | _PAGE_W_4V);
1932 } else {
1933 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
1934 }
1935
1936 if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
1937 kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
David S. Miller922631b2013-09-18 12:00:00 -07001938 PAGE_OFFSET;
Khalid Aziz494e5b62015-05-27 10:00:46 -06001939 kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag |
David S. Millerc69ad0a2012-09-06 20:35:36 -07001940 _PAGE_P_4V | _PAGE_W_4V);
1941 } else {
1942 kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
1943 }
1944
1945 if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
1946 kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
David S. Miller922631b2013-09-18 12:00:00 -07001947 PAGE_OFFSET;
Khalid Aziz494e5b62015-05-27 10:00:46 -06001948 kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag |
David S. Millerc69ad0a2012-09-06 20:35:36 -07001949 _PAGE_P_4V | _PAGE_W_4V);
1950 } else {
1951 kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
1952 }
1953#endif
1954}
1955
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956/* paging_init() sets up the page tables */
1957
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958static unsigned long last_valid_pfn;
David S. Millerac55c762014-09-26 21:19:46 -07001959
David S. Millerc4bce902006-02-11 21:57:54 -08001960static void sun4u_pgprot_init(void);
1961static void sun4v_pgprot_init(void);
1962
bob picco7c21d532014-09-16 09:29:54 -04001963static phys_addr_t __init available_memory(void)
1964{
1965 phys_addr_t available = 0ULL;
1966 phys_addr_t pa_start, pa_end;
1967 u64 i;
1968
1969 for_each_free_mem_range(i, NUMA_NO_NODE, &pa_start, &pa_end, NULL)
1970 available = available + (pa_end - pa_start);
1971
1972 return available;
1973}
1974
Khalid Aziz494e5b62015-05-27 10:00:46 -06001975#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
1976#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
1977#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
1978#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
1979#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
1980#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
1981
bob picco7c21d532014-09-16 09:29:54 -04001982/* We need to exclude reserved regions. This exclusion will include
1983 * vmlinux and initrd. To be more precise the initrd size could be used to
1984 * compute a new lower limit because it is freed later during initialization.
1985 */
1986static void __init reduce_memory(phys_addr_t limit_ram)
1987{
1988 phys_addr_t avail_ram = available_memory();
1989 phys_addr_t pa_start, pa_end;
1990 u64 i;
1991
1992 if (limit_ram >= avail_ram)
1993 return;
1994
1995 for_each_free_mem_range(i, NUMA_NO_NODE, &pa_start, &pa_end, NULL) {
1996 phys_addr_t region_size = pa_end - pa_start;
1997 phys_addr_t clip_start = pa_start;
1998
1999 avail_ram = avail_ram - region_size;
2000 /* Are we consuming too much? */
2001 if (avail_ram < limit_ram) {
2002 phys_addr_t give_back = limit_ram - avail_ram;
2003
2004 region_size = region_size - give_back;
2005 clip_start = clip_start + give_back;
2006 }
2007
2008 memblock_remove(clip_start, region_size);
2009
2010 if (avail_ram <= limit_ram)
2011 break;
2012 i = 0UL;
2013 }
2014}
2015
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016void __init paging_init(void)
2017{
David S. Miller919ee672008-04-23 05:40:25 -07002018 unsigned long end_pfn, shift, phys_base;
David S. Miller0836a0e2005-09-28 21:38:08 -07002019 unsigned long real_end, i;
Paul Gortmakeraa6f0792012-05-09 20:44:29 -04002020 int node;
David S. Miller0836a0e2005-09-28 21:38:08 -07002021
David S. Millerb2d43832013-09-20 21:50:41 -07002022 setup_page_offset();
2023
David S. Miller22adb352007-05-26 01:14:43 -07002024 /* These build time checkes make sure that the dcache_dirty_cpu()
2025 * page->flags usage will work.
2026 *
2027 * When a page gets marked as dcache-dirty, we store the
2028 * cpu number starting at bit 32 in the page->flags. Also,
2029 * functions like clear_dcache_dirty_cpu use the cpu mask
2030 * in 13-bit signed-immediate instruction fields.
2031 */
Christoph Lameter9223b412008-04-28 02:12:48 -07002032
2033 /*
2034 * Page flags must not reach into upper 32 bits that are used
2035 * for the cpu number
2036 */
2037 BUILD_BUG_ON(NR_PAGEFLAGS > 32);
2038
2039 /*
2040 * The bit fields placed in the high range must not reach below
2041 * the 32 bit boundary. Otherwise we cannot place the cpu field
2042 * at the 32 bit boundary.
2043 */
David S. Miller22adb352007-05-26 01:14:43 -07002044 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
Christoph Lameter9223b412008-04-28 02:12:48 -07002045 ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
2046
David S. Miller22adb352007-05-26 01:14:43 -07002047 BUILD_BUG_ON(NR_CPUS > 4096);
2048
David S. Miller0eef3312014-05-03 22:52:50 -07002049 kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
David S. Miller481295f2006-02-07 21:51:08 -08002050 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
2051
David S. Millerd7744a02006-02-21 22:31:11 -08002052 /* Invalidate both kernel TSBs. */
David S. Miller8b234272006-02-17 18:01:02 -08002053 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
David S. Millerd1acb422007-03-16 17:20:28 -07002054#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Millerd7744a02006-02-21 22:31:11 -08002055 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
David S. Millerd1acb422007-03-16 17:20:28 -07002056#endif
David S. Miller8b234272006-02-17 18:01:02 -08002057
Khalid Aziz494e5b62015-05-27 10:00:46 -06002058 /* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde
2059 * bit on M7 processor. This is a conflicting usage of the same
2060 * bit. Enabling TTE.cv on M7 would turn on Memory Corruption
2061 * Detection error on all pages and this will lead to problems
2062 * later. Kernel does not run with MCD enabled and hence rest
2063 * of the required steps to fully configure memory corruption
2064 * detection are not taken. We need to ensure TTE.mcde is not
2065 * set on M7 processor. Compute the value of cacheability
2066 * flag for use later taking this into consideration.
2067 */
2068 switch (sun4v_chip_type) {
2069 case SUN4V_CHIP_SPARC_M7:
2070 page_cache4v_flag = _PAGE_CP_4V;
2071 break;
2072 default:
2073 page_cache4v_flag = _PAGE_CACHE_4V;
2074 break;
2075 }
2076
David S. Millerc4bce902006-02-11 21:57:54 -08002077 if (tlb_type == hypervisor)
2078 sun4v_pgprot_init();
2079 else
2080 sun4u_pgprot_init();
2081
David S. Millerd257d5d2006-02-06 23:44:37 -08002082 if (tlb_type == cheetah_plus ||
David S. Miller9076d0e2011-08-05 00:53:57 -07002083 tlb_type == hypervisor) {
David S. Miller517af332006-02-01 15:55:21 -08002084 tsb_phys_patch();
David S. Miller9076d0e2011-08-05 00:53:57 -07002085 ktsb_phys_patch();
2086 }
David S. Miller517af332006-02-01 15:55:21 -08002087
David S. Millerc69ad0a2012-09-06 20:35:36 -07002088 if (tlb_type == hypervisor)
David S. Millerd257d5d2006-02-06 23:44:37 -08002089 sun4v_patch_tlb_handlers();
2090
David S. Millera94a1722008-05-11 21:04:48 -07002091 /* Find available physical memory...
2092 *
2093 * Read it twice in order to work around a bug in openfirmware.
2094 * The call to grab this table itself can cause openfirmware to
2095 * allocate memory, which in turn can take away some space from
2096 * the list of available memory. Reading it twice makes sure
2097 * we really do get the final value.
2098 */
2099 read_obp_translations();
2100 read_obp_memory("reg", &pall[0], &pall_ents);
2101 read_obp_memory("available", &pavail[0], &pavail_ents);
David S. Miller13edad72005-09-29 17:58:26 -07002102 read_obp_memory("available", &pavail[0], &pavail_ents);
David S. Miller0836a0e2005-09-28 21:38:08 -07002103
2104 phys_base = 0xffffffffffffffffUL;
David S. Miller3b2a7e22008-02-13 18:13:20 -08002105 for (i = 0; i < pavail_ents; i++) {
David S. Miller13edad72005-09-29 17:58:26 -07002106 phys_base = min(phys_base, pavail[i].phys_addr);
Yinghai Lu95f72d12010-07-12 14:36:09 +10002107 memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
David S. Miller3b2a7e22008-02-13 18:13:20 -08002108 }
2109
Yinghai Lu95f72d12010-07-12 14:36:09 +10002110 memblock_reserve(kern_base, kern_size);
David S. Miller0836a0e2005-09-28 21:38:08 -07002111
David S. Miller4e82c9a2008-02-13 18:00:03 -08002112 find_ramdisk(phys_base);
2113
bob picco7c21d532014-09-16 09:29:54 -04002114 if (cmdline_memory_size)
2115 reduce_memory(cmdline_memory_size);
David S. Miller25b0c652008-02-13 18:20:14 -08002116
Tejun Heo1aadc052011-12-08 10:22:08 -08002117 memblock_allow_resize();
Yinghai Lu95f72d12010-07-12 14:36:09 +10002118 memblock_dump_all();
David S. Miller3b2a7e22008-02-13 18:13:20 -08002119
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 set_bit(0, mmu_context_bmap);
2121
David S. Miller2bdb3cb2005-09-22 01:08:57 -07002122 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
2123
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124 real_end = (unsigned long)_end;
David S. Miller0eef3312014-05-03 22:52:50 -07002125 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
David S. Miller64658742008-03-21 17:01:38 -07002126 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
2127 num_kernel_image_mappings);
David S. Miller2bdb3cb2005-09-22 01:08:57 -07002128
2129 /* Set kernel pgd to upper alias so physical page computations
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130 * work.
2131 */
2132 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
2133
David S. Millerd195b712014-09-27 21:30:57 -07002134 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
David S. Miller0dd5b7b2014-09-24 20:56:11 -07002135
David S. Millerc9c10832005-10-12 12:22:46 -07002136 inherit_prom_mappings();
David S. Miller5085b4a2005-09-22 00:45:41 -07002137
David S. Millera8b900d2006-01-31 18:33:37 -08002138 /* Ok, we can use our TLB miss and window trap handlers safely. */
2139 setup_tba();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140
David S. Millerc9c10832005-10-12 12:22:46 -07002141 __flush_tlb_all();
David S. Miller9ad98c52005-10-05 15:12:00 -07002142
David S. Millerad072002008-02-13 19:21:51 -08002143 prom_build_devicetree();
David S. Millerb696fdc2009-05-26 22:37:25 -07002144 of_populate_present_mask();
David S. Millerb99c6eb2009-06-18 01:44:19 -07002145#ifndef CONFIG_SMP
2146 of_fill_in_cpu_data();
2147#endif
David S. Millerad072002008-02-13 19:21:51 -08002148
David S. Miller890db402009-04-01 03:13:15 -07002149 if (tlb_type == hypervisor) {
David S. Miller4a283332008-02-13 19:22:23 -08002150 sun4v_mdesc_init();
Stephen Rothwell6ac5c612009-06-15 03:06:18 -07002151 mdesc_populate_present_mask(cpu_all_mask);
David S. Millerb99c6eb2009-06-18 01:44:19 -07002152#ifndef CONFIG_SMP
2153 mdesc_fill_in_cpu_data(cpu_all_mask);
2154#endif
David S. Millerce33fdc2012-09-06 19:01:25 -07002155 mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask);
David S. Millerc69ad0a2012-09-06 20:35:36 -07002156
2157 sun4v_linear_pte_xor_finalize();
2158
2159 sun4v_ktsb_init();
2160 sun4v_ktsb_register();
David S. Millerce33fdc2012-09-06 19:01:25 -07002161 } else {
2162 unsigned long impl, ver;
2163
2164 cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
2165 HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
2166
2167 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
2168 impl = ((ver >> 32) & 0xffff);
2169 if (impl == PANTHER_IMPL)
2170 cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB |
2171 HV_PGSZ_MASK_256MB);
David S. Millerc69ad0a2012-09-06 20:35:36 -07002172
2173 sun4u_linear_pte_xor_finalize();
David S. Miller890db402009-04-01 03:13:15 -07002174 }
David S. Miller4a283332008-02-13 19:22:23 -08002175
David S. Millerc69ad0a2012-09-06 20:35:36 -07002176 /* Flush the TLBs and the 4M TSB so that the updated linear
2177 * pte XOR settings are realized for all mappings.
2178 */
2179 __flush_tlb_all();
2180#ifndef CONFIG_DEBUG_PAGEALLOC
2181 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2182#endif
2183 __flush_tlb_all();
2184
David S. Miller2bdb3cb2005-09-22 01:08:57 -07002185 /* Setup bootmem... */
David S. Miller919ee672008-04-23 05:40:25 -07002186 last_valid_pfn = end_pfn = bootmem_init(phys_base);
David S. Millerd1112012006-03-08 02:16:07 -08002187
David S. Miller5ed56f12012-04-26 20:50:34 -07002188 /* Once the OF device tree and MDESC have been setup, we know
2189 * the list of possible cpus. Therefore we can allocate the
2190 * IRQ stacks.
2191 */
2192 for_each_possible_cpu(i) {
Paul Gortmakeraa6f0792012-05-09 20:44:29 -04002193 node = cpu_to_node(i);
David S. Miller5ed56f12012-04-26 20:50:34 -07002194
2195 softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
2196 THREAD_SIZE,
2197 THREAD_SIZE, 0);
2198 hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
2199 THREAD_SIZE,
2200 THREAD_SIZE, 0);
2201 }
2202
David S. Miller56425302005-09-25 16:46:57 -07002203 kernel_physical_mapping_init();
David S. Miller56425302005-09-25 16:46:57 -07002204
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205 {
David S. Miller919ee672008-04-23 05:40:25 -07002206 unsigned long max_zone_pfns[MAX_NR_ZONES];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207
David S. Miller919ee672008-04-23 05:40:25 -07002208 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209
David S. Miller919ee672008-04-23 05:40:25 -07002210 max_zone_pfns[ZONE_NORMAL] = end_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211
David S. Miller919ee672008-04-23 05:40:25 -07002212 free_area_init_nodes(max_zone_pfns);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213 }
2214
David S. Miller3c62a2d2008-02-17 23:22:50 -08002215 printk("Booting Linux...\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216}
2217
Greg Kroah-Hartman7c9503b2012-12-21 14:03:26 -08002218int page_in_phys_avail(unsigned long paddr)
David S. Miller919ee672008-04-23 05:40:25 -07002219{
2220 int i;
2221
2222 paddr &= PAGE_MASK;
2223
2224 for (i = 0; i < pavail_ents; i++) {
2225 unsigned long start, end;
2226
2227 start = pavail[i].phys_addr;
2228 end = start + pavail[i].reg_size;
2229
2230 if (paddr >= start && paddr < end)
2231 return 1;
2232 }
2233 if (paddr >= kern_base && paddr < (kern_base + kern_size))
2234 return 1;
2235#ifdef CONFIG_BLK_DEV_INITRD
2236 if (paddr >= __pa(initrd_start) &&
2237 paddr < __pa(PAGE_ALIGN(initrd_end)))
2238 return 1;
2239#endif
2240
2241 return 0;
2242}
2243
Yinghai Lu961f8fa2012-11-16 19:39:21 -08002244static void __init register_page_bootmem_info(void)
2245{
2246#ifdef CONFIG_NEED_MULTIPLE_NODES
2247 int i;
2248
2249 for_each_online_node(i)
2250 if (NODE_DATA(i)->node_spanned_pages)
2251 register_page_bootmem_info_node(NODE_DATA(i));
2252#endif
2253}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254void __init mem_init(void)
2255{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
2257
Yinghai Lu961f8fa2012-11-16 19:39:21 -08002258 register_page_bootmem_info();
Jiang Liu0c988532013-07-03 15:03:24 -07002259 free_all_bootmem();
David S. Miller919ee672008-04-23 05:40:25 -07002260
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 /*
2262 * Set up the zero page, mark it reserved, so that page count
2263 * is not manipulated when freeing the page from user ptes.
2264 */
2265 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
2266 if (mem_map_zero == NULL) {
2267 prom_printf("paging_init: Cannot alloc zero page.\n");
2268 prom_halt();
2269 }
Jiang Liu70affe42013-05-07 16:18:08 -07002270 mark_page_reserved(mem_map_zero);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271
Jiang Liudceccbe2013-07-03 15:04:14 -07002272 mem_init_print_info(NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273
2274 if (tlb_type == cheetah || tlb_type == cheetah_plus)
2275 cheetah_ecache_flush_init();
2276}
2277
David S. Miller898cf0e2005-09-23 11:59:44 -07002278void free_initmem(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279{
2280 unsigned long addr, initend;
David S. Millerf2b60792008-08-14 01:45:41 -07002281 int do_free = 1;
2282
2283 /* If the physical memory maps were trimmed by kernel command
2284 * line options, don't even try freeing this initmem stuff up.
2285 * The kernel image could have been in the trimmed out region
2286 * and if so the freeing below will free invalid page structs.
2287 */
2288 if (cmdline_memory_size)
2289 do_free = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290
2291 /*
2292 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2293 */
2294 addr = PAGE_ALIGN((unsigned long)(__init_begin));
2295 initend = (unsigned long)(__init_end) & PAGE_MASK;
2296 for (; addr < initend; addr += PAGE_SIZE) {
2297 unsigned long page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298
2299 page = (addr +
2300 ((unsigned long) __va(kern_base)) -
2301 ((unsigned long) KERNBASE));
Randy Dunlapc9cf5522006-06-27 02:53:52 -07002302 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303
Jiang Liu70affe42013-05-07 16:18:08 -07002304 if (do_free)
2305 free_reserved_page(virt_to_page(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306 }
2307}
2308
2309#ifdef CONFIG_BLK_DEV_INITRD
2310void free_initrd_mem(unsigned long start, unsigned long end)
2311{
Jiang Liudceccbe2013-07-03 15:04:14 -07002312 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
2313 "initrd");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314}
2315#endif
David S. Millerc4bce902006-02-11 21:57:54 -08002316
David S. Millerc4bce902006-02-11 21:57:54 -08002317pgprot_t PAGE_KERNEL __read_mostly;
2318EXPORT_SYMBOL(PAGE_KERNEL);
2319
2320pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
2321pgprot_t PAGE_COPY __read_mostly;
David S. Miller0f159522006-02-18 12:43:16 -08002322
2323pgprot_t PAGE_SHARED __read_mostly;
2324EXPORT_SYMBOL(PAGE_SHARED);
2325
David S. Millerc4bce902006-02-11 21:57:54 -08002326unsigned long pg_iobits __read_mostly;
2327
2328unsigned long _PAGE_IE __read_mostly;
David S. Miller987c74f2006-06-25 01:34:43 -07002329EXPORT_SYMBOL(_PAGE_IE);
David S. Millerb2bef442006-02-23 01:55:55 -08002330
David S. Millerc4bce902006-02-11 21:57:54 -08002331unsigned long _PAGE_E __read_mostly;
David S. Millerb2bef442006-02-23 01:55:55 -08002332EXPORT_SYMBOL(_PAGE_E);
2333
David S. Millerc4bce902006-02-11 21:57:54 -08002334unsigned long _PAGE_CACHE __read_mostly;
David S. Millerb2bef442006-02-23 01:55:55 -08002335EXPORT_SYMBOL(_PAGE_CACHE);
David S. Millerc4bce902006-02-11 21:57:54 -08002336
David Miller46644c22007-10-16 01:24:16 -07002337#ifdef CONFIG_SPARSEMEM_VMEMMAP
Johannes Weiner0aad8182013-04-29 15:07:50 -07002338int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
2339 int node)
David Miller46644c22007-10-16 01:24:16 -07002340{
David Miller46644c22007-10-16 01:24:16 -07002341 unsigned long pte_base;
2342
2343 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2344 _PAGE_CP_4U | _PAGE_CV_4U |
2345 _PAGE_P_4U | _PAGE_W_4U);
2346 if (tlb_type == hypervisor)
2347 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
Khalid Aziz494e5b62015-05-27 10:00:46 -06002348 page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V);
David Miller46644c22007-10-16 01:24:16 -07002349
David S. Millerc06240c2014-09-24 21:20:14 -07002350 pte_base |= _PAGE_PMD_HUGE;
David Miller46644c22007-10-16 01:24:16 -07002351
David S. Millerc06240c2014-09-24 21:20:14 -07002352 vstart = vstart & PMD_MASK;
2353 vend = ALIGN(vend, PMD_SIZE);
2354 for (; vstart < vend; vstart += PMD_SIZE) {
2355 pgd_t *pgd = pgd_offset_k(vstart);
2356 unsigned long pte;
2357 pud_t *pud;
2358 pmd_t *pmd;
2359
2360 if (pgd_none(*pgd)) {
2361 pud_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
2362
2363 if (!new)
2364 return -ENOMEM;
2365 pgd_populate(&init_mm, pgd, new);
2366 }
2367
2368 pud = pud_offset(pgd, vstart);
2369 if (pud_none(*pud)) {
2370 pmd_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
2371
2372 if (!new)
2373 return -ENOMEM;
2374 pud_populate(&init_mm, pud, new);
2375 }
2376
2377 pmd = pmd_offset(pud, vstart);
2378
2379 pte = pmd_val(*pmd);
2380 if (!(pte & _PAGE_VALID)) {
2381 void *block = vmemmap_alloc_block(PMD_SIZE, node);
2382
David Miller46644c22007-10-16 01:24:16 -07002383 if (!block)
2384 return -ENOMEM;
2385
David S. Millerc06240c2014-09-24 21:20:14 -07002386 pmd_val(*pmd) = pte_base | __pa(block);
David Miller46644c22007-10-16 01:24:16 -07002387 }
2388 }
David S. Miller2856cc22012-08-15 00:37:29 -07002389
David S. Millerc06240c2014-09-24 21:20:14 -07002390 return 0;
David S. Miller2856cc22012-08-15 00:37:29 -07002391}
Yasuaki Ishimatsu46723bf2013-02-22 16:33:00 -08002392
Johannes Weiner0aad8182013-04-29 15:07:50 -07002393void vmemmap_free(unsigned long start, unsigned long end)
Tang Chen01975182013-02-22 16:33:08 -08002394{
2395}
David Miller46644c22007-10-16 01:24:16 -07002396#endif /* CONFIG_SPARSEMEM_VMEMMAP */
2397
David S. Millerc4bce902006-02-11 21:57:54 -08002398static void prot_init_common(unsigned long page_none,
2399 unsigned long page_shared,
2400 unsigned long page_copy,
2401 unsigned long page_readonly,
2402 unsigned long page_exec_bit)
2403{
2404 PAGE_COPY = __pgprot(page_copy);
David S. Miller0f159522006-02-18 12:43:16 -08002405 PAGE_SHARED = __pgprot(page_shared);
David S. Millerc4bce902006-02-11 21:57:54 -08002406
2407 protection_map[0x0] = __pgprot(page_none);
2408 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
2409 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
2410 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
2411 protection_map[0x4] = __pgprot(page_readonly);
2412 protection_map[0x5] = __pgprot(page_readonly);
2413 protection_map[0x6] = __pgprot(page_copy);
2414 protection_map[0x7] = __pgprot(page_copy);
2415 protection_map[0x8] = __pgprot(page_none);
2416 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
2417 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
2418 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
2419 protection_map[0xc] = __pgprot(page_readonly);
2420 protection_map[0xd] = __pgprot(page_readonly);
2421 protection_map[0xe] = __pgprot(page_shared);
2422 protection_map[0xf] = __pgprot(page_shared);
2423}
2424
2425static void __init sun4u_pgprot_init(void)
2426{
2427 unsigned long page_none, page_shared, page_copy, page_readonly;
2428 unsigned long page_exec_bit;
David S. Miller4f93d212012-09-06 18:13:58 -07002429 int i;
David S. Millerc4bce902006-02-11 21:57:54 -08002430
2431 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2432 _PAGE_CACHE_4U | _PAGE_P_4U |
2433 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2434 _PAGE_EXEC_4U);
2435 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2436 _PAGE_CACHE_4U | _PAGE_P_4U |
2437 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2438 _PAGE_EXEC_4U | _PAGE_L_4U);
David S. Millerc4bce902006-02-11 21:57:54 -08002439
2440 _PAGE_IE = _PAGE_IE_4U;
2441 _PAGE_E = _PAGE_E_4U;
2442 _PAGE_CACHE = _PAGE_CACHE_4U;
2443
2444 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
2445 __ACCESS_BITS_4U | _PAGE_E_4U);
2446
David S. Millerd1acb422007-03-16 17:20:28 -07002447#ifdef CONFIG_DEBUG_PAGEALLOC
David S. Miller922631b2013-09-18 12:00:00 -07002448 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
David S. Millerd1acb422007-03-16 17:20:28 -07002449#else
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002450 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
David S. Miller922631b2013-09-18 12:00:00 -07002451 PAGE_OFFSET;
David S. Millerd1acb422007-03-16 17:20:28 -07002452#endif
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002453 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2454 _PAGE_P_4U | _PAGE_W_4U);
2455
David S. Miller4f93d212012-09-06 18:13:58 -07002456 for (i = 1; i < 4; i++)
2457 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
David S. Millerc4bce902006-02-11 21:57:54 -08002458
David S. Millerc4bce902006-02-11 21:57:54 -08002459 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
2460 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
2461 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
2462
2463
2464 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
2465 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2466 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
2467 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2468 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2469 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2470 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2471
2472 page_exec_bit = _PAGE_EXEC_4U;
2473
2474 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2475 page_exec_bit);
2476}
2477
2478static void __init sun4v_pgprot_init(void)
2479{
2480 unsigned long page_none, page_shared, page_copy, page_readonly;
2481 unsigned long page_exec_bit;
David S. Miller4f93d212012-09-06 18:13:58 -07002482 int i;
David S. Millerc4bce902006-02-11 21:57:54 -08002483
2484 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
Khalid Aziz494e5b62015-05-27 10:00:46 -06002485 page_cache4v_flag | _PAGE_P_4V |
David S. Millerc4bce902006-02-11 21:57:54 -08002486 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
2487 _PAGE_EXEC_4V);
2488 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
David S. Millerc4bce902006-02-11 21:57:54 -08002489
2490 _PAGE_IE = _PAGE_IE_4V;
2491 _PAGE_E = _PAGE_E_4V;
Khalid Aziz494e5b62015-05-27 10:00:46 -06002492 _PAGE_CACHE = page_cache4v_flag;
David S. Millerc4bce902006-02-11 21:57:54 -08002493
David S. Millerd1acb422007-03-16 17:20:28 -07002494#ifdef CONFIG_DEBUG_PAGEALLOC
David S. Miller922631b2013-09-18 12:00:00 -07002495 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
David S. Millerd1acb422007-03-16 17:20:28 -07002496#else
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002497 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
David S. Miller922631b2013-09-18 12:00:00 -07002498 PAGE_OFFSET;
David S. Millerd1acb422007-03-16 17:20:28 -07002499#endif
Khalid Aziz494e5b62015-05-27 10:00:46 -06002500 kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V |
2501 _PAGE_W_4V);
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002502
David S. Millerc69ad0a2012-09-06 20:35:36 -07002503 for (i = 1; i < 4; i++)
2504 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
David S. Miller4f93d212012-09-06 18:13:58 -07002505
David S. Millerc4bce902006-02-11 21:57:54 -08002506 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
2507 __ACCESS_BITS_4V | _PAGE_E_4V);
2508
David S. Millerc4bce902006-02-11 21:57:54 -08002509 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
2510 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
2511 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2512 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2513
Khalid Aziz494e5b62015-05-27 10:00:46 -06002514 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag;
2515 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
David S. Millerc4bce902006-02-11 21:57:54 -08002516 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
Khalid Aziz494e5b62015-05-27 10:00:46 -06002517 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
David S. Millerc4bce902006-02-11 21:57:54 -08002518 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
Khalid Aziz494e5b62015-05-27 10:00:46 -06002519 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
David S. Millerc4bce902006-02-11 21:57:54 -08002520 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2521
2522 page_exec_bit = _PAGE_EXEC_4V;
2523
2524 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2525 page_exec_bit);
2526}
2527
2528unsigned long pte_sz_bits(unsigned long sz)
2529{
2530 if (tlb_type == hypervisor) {
2531 switch (sz) {
2532 case 8 * 1024:
2533 default:
2534 return _PAGE_SZ8K_4V;
2535 case 64 * 1024:
2536 return _PAGE_SZ64K_4V;
2537 case 512 * 1024:
2538 return _PAGE_SZ512K_4V;
2539 case 4 * 1024 * 1024:
2540 return _PAGE_SZ4MB_4V;
Joe Perches6cb79b32011-06-03 14:45:23 +00002541 }
David S. Millerc4bce902006-02-11 21:57:54 -08002542 } else {
2543 switch (sz) {
2544 case 8 * 1024:
2545 default:
2546 return _PAGE_SZ8K_4U;
2547 case 64 * 1024:
2548 return _PAGE_SZ64K_4U;
2549 case 512 * 1024:
2550 return _PAGE_SZ512K_4U;
2551 case 4 * 1024 * 1024:
2552 return _PAGE_SZ4MB_4U;
Joe Perches6cb79b32011-06-03 14:45:23 +00002553 }
David S. Millerc4bce902006-02-11 21:57:54 -08002554 }
2555}
2556
2557pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2558{
2559 pte_t pte;
David S. Millercf627152006-02-12 21:10:07 -08002560
2561 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
David S. Millerc4bce902006-02-11 21:57:54 -08002562 pte_val(pte) |= (((unsigned long)space) << 32);
2563 pte_val(pte) |= pte_sz_bits(page_size);
David S. Millercf627152006-02-12 21:10:07 -08002564
David S. Millerc4bce902006-02-11 21:57:54 -08002565 return pte;
2566}
2567
David S. Millerc4bce902006-02-11 21:57:54 -08002568static unsigned long kern_large_tte(unsigned long paddr)
2569{
2570 unsigned long val;
2571
2572 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2573 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
2574 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2575 if (tlb_type == hypervisor)
2576 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
Khalid Aziz494e5b62015-05-27 10:00:46 -06002577 page_cache4v_flag | _PAGE_P_4V |
David S. Millerc4bce902006-02-11 21:57:54 -08002578 _PAGE_EXEC_4V | _PAGE_W_4V);
2579
2580 return val | paddr;
2581}
2582
David S. Millerc4bce902006-02-11 21:57:54 -08002583/* If not locked, zap it. */
2584void __flush_tlb_all(void)
2585{
2586 unsigned long pstate;
2587 int i;
2588
2589 __asm__ __volatile__("flushw\n\t"
2590 "rdpr %%pstate, %0\n\t"
2591 "wrpr %0, %1, %%pstate"
2592 : "=r" (pstate)
2593 : "i" (PSTATE_IE));
David S. Miller8f3614532007-12-13 06:13:38 -08002594 if (tlb_type == hypervisor) {
2595 sun4v_mmu_demap_all();
2596 } else if (tlb_type == spitfire) {
David S. Millerc4bce902006-02-11 21:57:54 -08002597 for (i = 0; i < 64; i++) {
2598 /* Spitfire Errata #32 workaround */
2599 /* NOTE: Always runs on spitfire, so no
2600 * cheetah+ page size encodings.
2601 */
2602 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2603 "flush %%g6"
2604 : /* No outputs */
2605 : "r" (0),
2606 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2607
2608 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
2609 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2610 "membar #Sync"
2611 : /* no outputs */
2612 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
2613 spitfire_put_dtlb_data(i, 0x0UL);
2614 }
2615
2616 /* Spitfire Errata #32 workaround */
2617 /* NOTE: Always runs on spitfire, so no
2618 * cheetah+ page size encodings.
2619 */
2620 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2621 "flush %%g6"
2622 : /* No outputs */
2623 : "r" (0),
2624 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2625
2626 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2627 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2628 "membar #Sync"
2629 : /* no outputs */
2630 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
2631 spitfire_put_itlb_data(i, 0x0UL);
2632 }
2633 }
2634 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
2635 cheetah_flush_dtlb_all();
2636 cheetah_flush_itlb_all();
2637 }
2638 __asm__ __volatile__("wrpr %0, 0, %%pstate"
2639 : : "r" (pstate));
2640}
David Millerc460bec2012-10-08 16:34:22 -07002641
David Millerc460bec2012-10-08 16:34:22 -07002642pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
2643 unsigned long address)
2644{
David S. Miller37b3a8f2013-09-25 13:48:49 -07002645 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
2646 __GFP_REPEAT | __GFP_ZERO);
2647 pte_t *pte = NULL;
David Millerc460bec2012-10-08 16:34:22 -07002648
David Millerc460bec2012-10-08 16:34:22 -07002649 if (page)
2650 pte = (pte_t *) page_address(page);
2651
2652 return pte;
2653}
2654
2655pgtable_t pte_alloc_one(struct mm_struct *mm,
2656 unsigned long address)
2657{
David S. Miller37b3a8f2013-09-25 13:48:49 -07002658 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
2659 __GFP_REPEAT | __GFP_ZERO);
Kirill A. Shutemov1ae9ae52013-11-14 14:31:42 -08002660 if (!page)
2661 return NULL;
2662 if (!pgtable_page_ctor(page)) {
2663 free_hot_cold_page(page, 0);
2664 return NULL;
David Millerc460bec2012-10-08 16:34:22 -07002665 }
Kirill A. Shutemov1ae9ae52013-11-14 14:31:42 -08002666 return (pte_t *) page_address(page);
David Millerc460bec2012-10-08 16:34:22 -07002667}
2668
2669void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
2670{
David S. Miller37b3a8f2013-09-25 13:48:49 -07002671 free_page((unsigned long)pte);
David Millerc460bec2012-10-08 16:34:22 -07002672}
2673
2674static void __pte_free(pgtable_t pte)
2675{
2676 struct page *page = virt_to_page(pte);
David S. Miller37b3a8f2013-09-25 13:48:49 -07002677
2678 pgtable_page_dtor(page);
2679 __free_page(page);
David Millerc460bec2012-10-08 16:34:22 -07002680}
2681
2682void pte_free(struct mm_struct *mm, pgtable_t pte)
2683{
2684 __pte_free(pte);
2685}
2686
2687void pgtable_free(void *table, bool is_page)
2688{
2689 if (is_page)
2690 __pte_free(table);
2691 else
2692 kmem_cache_free(pgtable_cache, table);
2693}
David Miller9e695d22012-10-08 16:34:29 -07002694
2695#ifdef CONFIG_TRANSPARENT_HUGEPAGE
David Miller9e695d22012-10-08 16:34:29 -07002696void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
2697 pmd_t *pmd)
2698{
2699 unsigned long pte, flags;
2700 struct mm_struct *mm;
2701 pmd_t entry = *pmd;
David Miller9e695d22012-10-08 16:34:29 -07002702
2703 if (!pmd_large(entry) || !pmd_young(entry))
2704 return;
2705
David S. Millera7b94032013-09-26 13:45:15 -07002706 pte = pmd_val(entry);
David Miller9e695d22012-10-08 16:34:29 -07002707
David S. Miller18f38132014-08-04 16:34:01 -07002708 /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */
2709 if (!(pte & _PAGE_VALID))
2710 return;
2711
David S. Miller37b3a8f2013-09-25 13:48:49 -07002712 /* We are fabricating 8MB pages using 4MB real hw pages. */
2713 pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
David Miller9e695d22012-10-08 16:34:29 -07002714
2715 mm = vma->vm_mm;
2716
2717 spin_lock_irqsave(&mm->context.lock, flags);
2718
2719 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
David S. Miller37b3a8f2013-09-25 13:48:49 -07002720 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
David Miller9e695d22012-10-08 16:34:29 -07002721 addr, pte);
2722
2723 spin_unlock_irqrestore(&mm->context.lock, flags);
2724}
2725#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2726
2727#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
2728static void context_reload(void *__data)
2729{
2730 struct mm_struct *mm = __data;
2731
2732 if (mm == current->mm)
2733 load_secondary_context(mm);
2734}
2735
David S. Miller0fbebed2013-02-19 22:34:10 -08002736void hugetlb_setup(struct pt_regs *regs)
David Miller9e695d22012-10-08 16:34:29 -07002737{
David S. Miller0fbebed2013-02-19 22:34:10 -08002738 struct mm_struct *mm = current->mm;
2739 struct tsb_config *tp;
David Miller9e695d22012-10-08 16:34:29 -07002740
David S. Miller0fbebed2013-02-19 22:34:10 -08002741 if (in_atomic() || !mm) {
2742 const struct exception_table_entry *entry;
David Miller9e695d22012-10-08 16:34:29 -07002743
David S. Miller0fbebed2013-02-19 22:34:10 -08002744 entry = search_exception_tables(regs->tpc);
2745 if (entry) {
2746 regs->tpc = entry->fixup;
2747 regs->tnpc = regs->tpc + 4;
2748 return;
2749 }
2750 pr_alert("Unexpected HugeTLB setup in atomic context.\n");
2751 die_if_kernel("HugeTSB in atomic", regs);
2752 }
2753
2754 tp = &mm->context.tsb_block[MM_TSB_HUGE];
2755 if (likely(tp->tsb == NULL))
2756 tsb_grow(mm, MM_TSB_HUGE, 0);
2757
David Miller9e695d22012-10-08 16:34:29 -07002758 tsb_context_switch(mm);
2759 smp_tsb_sync(mm);
2760
2761 /* On UltraSPARC-III+ and later, configure the second half of
2762 * the Data-TLB for huge pages.
2763 */
2764 if (tlb_type == cheetah_plus) {
2765 unsigned long ctx;
2766
2767 spin_lock(&ctx_alloc_lock);
2768 ctx = mm->context.sparc64_ctx_val;
2769 ctx &= ~CTX_PGSZ_MASK;
2770 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
2771 ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
2772
2773 if (ctx != mm->context.sparc64_ctx_val) {
2774 /* When changing the page size fields, we
2775 * must perform a context flush so that no
2776 * stale entries match. This flush must
2777 * occur with the original context register
2778 * settings.
2779 */
2780 do_flush_tlb_mm(mm);
2781
2782 /* Reload the context register of all processors
2783 * also executing in this address space.
2784 */
2785 mm->context.sparc64_ctx_val = ctx;
2786 on_each_cpu(context_reload, mm, 0);
2787 }
2788 spin_unlock(&ctx_alloc_lock);
2789 }
2790}
2791#endif
bob piccof6d4fb52014-03-03 11:54:42 -05002792
2793static struct resource code_resource = {
2794 .name = "Kernel code",
2795 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
2796};
2797
2798static struct resource data_resource = {
2799 .name = "Kernel data",
2800 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
2801};
2802
2803static struct resource bss_resource = {
2804 .name = "Kernel bss",
2805 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
2806};
2807
2808static inline resource_size_t compute_kern_paddr(void *addr)
2809{
2810 return (resource_size_t) (addr - KERNBASE + kern_base);
2811}
2812
2813static void __init kernel_lds_init(void)
2814{
2815 code_resource.start = compute_kern_paddr(_text);
2816 code_resource.end = compute_kern_paddr(_etext - 1);
2817 data_resource.start = compute_kern_paddr(_etext);
2818 data_resource.end = compute_kern_paddr(_edata - 1);
2819 bss_resource.start = compute_kern_paddr(__bss_start);
2820 bss_resource.end = compute_kern_paddr(_end - 1);
2821}
2822
2823static int __init report_memory(void)
2824{
2825 int i;
2826 struct resource *res;
2827
2828 kernel_lds_init();
2829
2830 for (i = 0; i < pavail_ents; i++) {
2831 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
2832
2833 if (!res) {
2834 pr_warn("Failed to allocate source.\n");
2835 break;
2836 }
2837
2838 res->name = "System RAM";
2839 res->start = pavail[i].phys_addr;
2840 res->end = pavail[i].phys_addr + pavail[i].reg_size - 1;
2841 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
2842
2843 if (insert_resource(&iomem_resource, res) < 0) {
2844 pr_warn("Resource insertion failed.\n");
2845 break;
2846 }
2847
2848 insert_resource(res, &code_resource);
2849 insert_resource(res, &data_resource);
2850 insert_resource(res, &bss_resource);
2851 }
2852
2853 return 0;
2854}
David S. Miller3c081582015-03-18 19:15:28 -07002855arch_initcall(report_memory);
David S. Millere9011d02014-08-05 18:57:18 -07002856
David S. Miller4ca9a232014-08-04 20:07:37 -07002857#ifdef CONFIG_SMP
2858#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
2859#else
2860#define do_flush_tlb_kernel_range __flush_tlb_kernel_range
2861#endif
2862
2863void flush_tlb_kernel_range(unsigned long start, unsigned long end)
2864{
2865 if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
2866 if (start < LOW_OBP_ADDRESS) {
2867 flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
2868 do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
2869 }
2870 if (end > HI_OBP_ADDRESS) {
David S. Miller473ad7f2014-10-04 21:05:14 -07002871 flush_tsb_kernel_range(HI_OBP_ADDRESS, end);
2872 do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end);
David S. Miller4ca9a232014-08-04 20:07:37 -07002873 }
2874 } else {
2875 flush_tsb_kernel_range(start, end);
2876 do_flush_tlb_kernel_range(start, end);
2877 }
2878}