blob: 35fcc9cb960d9e8caf25145742bc41e410afadb4 [file] [log] [blame]
Adrian Bunkb00dc832008-05-19 16:52:27 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * arch/sparc64/mm/init.c
3 *
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
David S. Millerc4bce902006-02-11 21:57:54 -08008#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/string.h>
12#include <linux/init.h>
13#include <linux/bootmem.h>
14#include <linux/mm.h>
15#include <linux/hugetlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/initrd.h>
17#include <linux/swap.h>
18#include <linux/pagemap.h>
Randy Dunlapc9cf5522006-06-27 02:53:52 -070019#include <linux/poison.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/fs.h>
21#include <linux/seq_file.h>
Prasanna S Panchamukhi05e14cb2005-09-06 15:19:30 -070022#include <linux/kprobes.h>
David S. Miller1ac4f5e2005-09-21 21:49:32 -070023#include <linux/cache.h>
David S. Miller13edad72005-09-29 17:58:26 -070024#include <linux/sort.h>
bob piccof6d4fb52014-03-03 11:54:42 -050025#include <linux/ioport.h>
David S. Miller5cbc3072007-05-25 15:49:59 -070026#include <linux/percpu.h>
Yinghai Lu95f72d12010-07-12 14:36:09 +100027#include <linux/memblock.h>
David S. Miller919ee672008-04-23 05:40:25 -070028#include <linux/mmzone.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
31#include <asm/head.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <asm/page.h>
33#include <asm/pgalloc.h>
34#include <asm/pgtable.h>
35#include <asm/oplib.h>
36#include <asm/iommu.h>
37#include <asm/io.h>
38#include <asm/uaccess.h>
39#include <asm/mmu_context.h>
40#include <asm/tlbflush.h>
41#include <asm/dma.h>
42#include <asm/starfire.h>
43#include <asm/tlb.h>
44#include <asm/spitfire.h>
45#include <asm/sections.h>
David S. Miller517af332006-02-01 15:55:21 -080046#include <asm/tsb.h>
David S. Miller481295f2006-02-07 21:51:08 -080047#include <asm/hypervisor.h>
David S. Miller372b07b2006-06-21 15:35:28 -070048#include <asm/prom.h>
David S. Miller5cbc3072007-05-25 15:49:59 -070049#include <asm/mdesc.h>
David S. Miller3d5ae6b2008-03-25 21:51:40 -070050#include <asm/cpudata.h>
Sam Ravnborg59dec132014-05-16 23:26:07 +020051#include <asm/setup.h>
David S. Miller4f70f7a2008-08-12 18:33:56 -070052#include <asm/irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Sam Ravnborg27137e52008-11-16 20:08:45 -080054#include "init_64.h"
David S. Miller9cc3a1a2006-02-21 20:51:13 -080055
David S. Miller4f93d212012-09-06 18:13:58 -070056unsigned long kern_linear_pte_xor[4] __read_mostly;
David S. Miller9cc3a1a2006-02-21 20:51:13 -080057
David S. Miller4f93d212012-09-06 18:13:58 -070058/* A bitmap, two bits for every 256MB of physical memory. These two
59 * bits determine what page size we use for kernel linear
60 * translations. They form an index into kern_linear_pte_xor[]. The
61 * value in the indexed slot is XOR'd with the TLB miss virtual
62 * address to form the resulting TTE. The mapping is:
63 *
64 * 0 ==> 4MB
65 * 1 ==> 256MB
66 * 2 ==> 2GB
67 * 3 ==> 16GB
68 *
69 * All sun4v chips support 256MB pages. Only SPARC-T4 and later
70 * support 2GB pages, and hopefully future cpus will support the 16GB
71 * pages as well. For slots 2 and 3, we encode a 256MB TTE xor there
72 * if these larger page sizes are not supported by the cpu.
73 *
74 * It would be nice to determine this from the machine description
75 * 'cpu' properties, but we need to have this table setup before the
76 * MDESC is initialized.
David S. Miller9cc3a1a2006-02-21 20:51:13 -080077 */
78unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
79
David S. Millerd1acb422007-03-16 17:20:28 -070080#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Miller4f93d212012-09-06 18:13:58 -070081/* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
82 * Space is allocated for this right after the trap table in
83 * arch/sparc64/kernel/head.S
David S. Miller2d9e2762007-05-29 01:58:31 -070084 */
85extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
David S. Millerd1acb422007-03-16 17:20:28 -070086#endif
David S. Millerd7744a02006-02-21 22:31:11 -080087
David S. Millerce33fdc2012-09-06 19:01:25 -070088static unsigned long cpu_pgsz_mask;
89
David S. Miller13edad72005-09-29 17:58:26 -070090#define MAX_BANKS 32
David S. Miller10147572005-09-28 21:46:43 -070091
Greg Kroah-Hartman7c9503b2012-12-21 14:03:26 -080092static struct linux_prom64_registers pavail[MAX_BANKS];
93static int pavail_ents;
David S. Miller10147572005-09-28 21:46:43 -070094
David S. Miller13edad72005-09-29 17:58:26 -070095static int cmp_p64(const void *a, const void *b)
96{
97 const struct linux_prom64_registers *x = a, *y = b;
98
99 if (x->phys_addr > y->phys_addr)
100 return 1;
101 if (x->phys_addr < y->phys_addr)
102 return -1;
103 return 0;
104}
105
106static void __init read_obp_memory(const char *property,
107 struct linux_prom64_registers *regs,
108 int *num_ents)
109{
Andres Salomon8d125562010-10-08 14:18:11 -0700110 phandle node = prom_finddevice("/memory");
David S. Miller13edad72005-09-29 17:58:26 -0700111 int prop_size = prom_getproplen(node, property);
112 int ents, ret, i;
113
114 ents = prop_size / sizeof(struct linux_prom64_registers);
115 if (ents > MAX_BANKS) {
116 prom_printf("The machine has more %s property entries than "
117 "this kernel can support (%d).\n",
118 property, MAX_BANKS);
119 prom_halt();
120 }
121
122 ret = prom_getproperty(node, property, (char *) regs, prop_size);
123 if (ret == -1) {
Akinobu Mita5da444a2012-09-29 03:14:49 +0000124 prom_printf("Couldn't get %s property from /memory.\n",
125 property);
David S. Miller13edad72005-09-29 17:58:26 -0700126 prom_halt();
127 }
128
David S. Miller13edad72005-09-29 17:58:26 -0700129 /* Sanitize what we got from the firmware, by page aligning
130 * everything.
131 */
132 for (i = 0; i < ents; i++) {
133 unsigned long base, size;
134
135 base = regs[i].phys_addr;
136 size = regs[i].reg_size;
137
138 size &= PAGE_MASK;
139 if (base & ~PAGE_MASK) {
140 unsigned long new_base = PAGE_ALIGN(base);
141
142 size -= new_base - base;
143 if ((long) size < 0L)
144 size = 0UL;
145 base = new_base;
146 }
David S. Miller0015d3d2007-03-15 00:06:34 -0700147 if (size == 0UL) {
148 /* If it is empty, simply get rid of it.
149 * This simplifies the logic of the other
150 * functions that process these arrays.
151 */
152 memmove(&regs[i], &regs[i + 1],
153 (ents - i - 1) * sizeof(regs[0]));
154 i--;
155 ents--;
156 continue;
157 }
David S. Miller13edad72005-09-29 17:58:26 -0700158 regs[i].phys_addr = base;
159 regs[i].reg_size = size;
160 }
David S. Miller486ad102006-06-22 00:00:00 -0700161
David S. Miller486ad102006-06-22 00:00:00 -0700162 *num_ents = ents;
163
David S. Millerc9c10832005-10-12 12:22:46 -0700164 sort(regs, ents, sizeof(struct linux_prom64_registers),
David S. Miller13edad72005-09-29 17:58:26 -0700165 cmp_p64, NULL);
166}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
David S. Millerd8ed1d42009-08-25 16:47:46 -0700168unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES /
169 sizeof(unsigned long)];
Sam Ravnborg917c3662009-01-08 16:58:20 -0800170EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
David S. Millerd1112012006-03-08 02:16:07 -0800172/* Kernel physical address base and size in bytes. */
David S. Miller1ac4f5e2005-09-21 21:49:32 -0700173unsigned long kern_base __read_mostly;
174unsigned long kern_size __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176/* Initial ramdisk setup */
177extern unsigned long sparc_ramdisk_image64;
178extern unsigned int sparc_ramdisk_image;
179extern unsigned int sparc_ramdisk_size;
180
David S. Miller1ac4f5e2005-09-21 21:49:32 -0700181struct page *mem_map_zero __read_mostly;
Aneesh Kumar K.V35802c02008-04-29 08:11:12 -0400182EXPORT_SYMBOL(mem_map_zero);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
David S. Miller0835ae02005-10-04 15:23:20 -0700184unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
185
186unsigned long sparc64_kern_pri_context __read_mostly;
187unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
188unsigned long sparc64_kern_sec_context __read_mostly;
189
David S. Miller64658742008-03-21 17:01:38 -0700190int num_kernel_image_mappings;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192#ifdef CONFIG_DEBUG_DCFLUSH
193atomic_t dcpage_flushes = ATOMIC_INIT(0);
194#ifdef CONFIG_SMP
195atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
196#endif
197#endif
198
David S. Miller7a591cf2006-02-26 19:44:50 -0800199inline void flush_dcache_page_impl(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200{
David S. Miller7a591cf2006-02-26 19:44:50 -0800201 BUG_ON(tlb_type == hypervisor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202#ifdef CONFIG_DEBUG_DCFLUSH
203 atomic_inc(&dcpage_flushes);
204#endif
205
206#ifdef DCACHE_ALIASING_POSSIBLE
207 __flush_dcache_page(page_address(page),
208 ((tlb_type == spitfire) &&
209 page_mapping(page) != NULL));
210#else
211 if (page_mapping(page) != NULL &&
212 tlb_type == spitfire)
213 __flush_icache_page(__pa(page_address(page)));
214#endif
215}
216
217#define PG_dcache_dirty PG_arch_1
David S. Miller22adb352007-05-26 01:14:43 -0700218#define PG_dcache_cpu_shift 32UL
219#define PG_dcache_cpu_mask \
220 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
222#define dcache_dirty_cpu(page) \
David S. Miller48b0e542005-07-27 16:08:44 -0700223 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
David S. Millerd979f172007-10-27 00:13:04 -0700225static inline void set_dcache_dirty(struct page *page, int this_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226{
227 unsigned long mask = this_cpu;
David S. Miller48b0e542005-07-27 16:08:44 -0700228 unsigned long non_cpu_bits;
229
230 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
231 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 __asm__ __volatile__("1:\n\t"
234 "ldx [%2], %%g7\n\t"
235 "and %%g7, %1, %%g1\n\t"
236 "or %%g1, %0, %%g1\n\t"
237 "casx [%2], %%g7, %%g1\n\t"
238 "cmp %%g7, %%g1\n\t"
239 "bne,pn %%xcc, 1b\n\t"
David S. Millerb445e262005-06-27 15:42:04 -0700240 " nop"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 : /* no outputs */
242 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
243 : "g1", "g7");
244}
245
David S. Millerd979f172007-10-27 00:13:04 -0700246static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247{
248 unsigned long mask = (1UL << PG_dcache_dirty);
249
250 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
251 "1:\n\t"
252 "ldx [%2], %%g7\n\t"
David S. Miller48b0e542005-07-27 16:08:44 -0700253 "srlx %%g7, %4, %%g1\n\t"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 "and %%g1, %3, %%g1\n\t"
255 "cmp %%g1, %0\n\t"
256 "bne,pn %%icc, 2f\n\t"
257 " andn %%g7, %1, %%g1\n\t"
258 "casx [%2], %%g7, %%g1\n\t"
259 "cmp %%g7, %%g1\n\t"
260 "bne,pn %%xcc, 1b\n\t"
David S. Millerb445e262005-06-27 15:42:04 -0700261 " nop\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 "2:"
263 : /* no outputs */
264 : "r" (cpu), "r" (mask), "r" (&page->flags),
David S. Miller48b0e542005-07-27 16:08:44 -0700265 "i" (PG_dcache_cpu_mask),
266 "i" (PG_dcache_cpu_shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 : "g1", "g7");
268}
269
David S. Miller517af332006-02-01 15:55:21 -0800270static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
271{
272 unsigned long tsb_addr = (unsigned long) ent;
273
David S. Miller3b3ab2e2006-02-17 09:54:42 -0800274 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
David S. Miller517af332006-02-01 15:55:21 -0800275 tsb_addr = __pa(tsb_addr);
276
277 __tsb_insert(tsb_addr, tag, pte);
278}
279
David S. Millerc4bce902006-02-11 21:57:54 -0800280unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
David S. Millerc4bce902006-02-11 21:57:54 -0800281
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800282static void flush_dcache(unsigned long pfn)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283{
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800284 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800286 page = pfn_to_page(pfn);
David S. Miller1a78ced2009-10-12 03:20:57 -0700287 if (page) {
David S. Miller7a591cf2006-02-26 19:44:50 -0800288 unsigned long pg_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800290 pg_flags = page->flags;
291 if (pg_flags & (1UL << PG_dcache_dirty)) {
David S. Miller7a591cf2006-02-26 19:44:50 -0800292 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
293 PG_dcache_cpu_mask);
294 int this_cpu = get_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
David S. Miller7a591cf2006-02-26 19:44:50 -0800296 /* This is just to optimize away some function calls
297 * in the SMP case.
298 */
299 if (cpu == this_cpu)
300 flush_dcache_page_impl(page);
301 else
302 smp_flush_dcache_page_impl(page, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303
David S. Miller7a591cf2006-02-26 19:44:50 -0800304 clear_dcache_dirty_cpu(page, cpu);
305
306 put_cpu();
307 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 }
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800309}
310
David Miller9e695d22012-10-08 16:34:29 -0700311/* mm->context.lock must be held */
312static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
313 unsigned long tsb_hash_shift, unsigned long address,
314 unsigned long tte)
315{
316 struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
317 unsigned long tag;
318
David S. Millerbcd896b2013-02-19 13:20:08 -0800319 if (unlikely(!tsb))
320 return;
321
David Miller9e695d22012-10-08 16:34:29 -0700322 tsb += ((address >> tsb_hash_shift) &
323 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
324 tag = (address >> 22UL);
325 tsb_insert(tsb, tag, tte);
326}
327
David S. Millerbcd896b2013-02-19 13:20:08 -0800328#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
329static inline bool is_hugetlb_pte(pte_t pte)
330{
331 if ((tlb_type == hypervisor &&
332 (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
333 (tlb_type != hypervisor &&
334 (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U))
335 return true;
336 return false;
337}
338#endif
339
Russell King4b3073e2009-12-18 16:40:18 +0000340void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800341{
342 struct mm_struct *mm;
David S. Millerbcd896b2013-02-19 13:20:08 -0800343 unsigned long flags;
Russell King4b3073e2009-12-18 16:40:18 +0000344 pte_t pte = *ptep;
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800345
346 if (tlb_type != hypervisor) {
347 unsigned long pfn = pte_pfn(pte);
348
349 if (pfn_valid(pfn))
350 flush_dcache(pfn);
351 }
David S. Millerbd407912006-01-31 18:31:38 -0800352
353 mm = vma->vm_mm;
David S. Miller7a1ac522006-03-16 02:02:32 -0800354
David S. Miller18f38132014-08-04 16:34:01 -0700355 /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */
356 if (!pte_accessible(mm, pte))
357 return;
358
David S. Miller7a1ac522006-03-16 02:02:32 -0800359 spin_lock_irqsave(&mm->context.lock, flags);
360
David Miller9e695d22012-10-08 16:34:29 -0700361#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
David S. Millerbcd896b2013-02-19 13:20:08 -0800362 if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
David S. Miller37b3a8f2013-09-25 13:48:49 -0700363 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
David S. Millerbcd896b2013-02-19 13:20:08 -0800364 address, pte_val(pte));
365 else
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800366#endif
David S. Millerbcd896b2013-02-19 13:20:08 -0800367 __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
368 address, pte_val(pte));
David S. Miller7a1ac522006-03-16 02:02:32 -0800369
370 spin_unlock_irqrestore(&mm->context.lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371}
372
373void flush_dcache_page(struct page *page)
374{
David S. Millera9546f52005-04-17 18:03:09 -0700375 struct address_space *mapping;
376 int this_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377
David S. Miller7a591cf2006-02-26 19:44:50 -0800378 if (tlb_type == hypervisor)
379 return;
380
David S. Millera9546f52005-04-17 18:03:09 -0700381 /* Do not bother with the expensive D-cache flush if it
382 * is merely the zero page. The 'bigcore' testcase in GDB
383 * causes this case to run millions of times.
384 */
385 if (page == ZERO_PAGE(0))
386 return;
387
388 this_cpu = get_cpu();
389
390 mapping = page_mapping(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 if (mapping && !mapping_mapped(mapping)) {
David S. Millera9546f52005-04-17 18:03:09 -0700392 int dirty = test_bit(PG_dcache_dirty, &page->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 if (dirty) {
David S. Millera9546f52005-04-17 18:03:09 -0700394 int dirty_cpu = dcache_dirty_cpu(page);
395
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 if (dirty_cpu == this_cpu)
397 goto out;
398 smp_flush_dcache_page_impl(page, dirty_cpu);
399 }
400 set_dcache_dirty(page, this_cpu);
401 } else {
402 /* We could delay the flush for the !page_mapping
403 * case too. But that case is for exec env/arg
404 * pages and those are %99 certainly going to get
405 * faulted into the tlb (and thus flushed) anyways.
406 */
407 flush_dcache_page_impl(page);
408 }
409
410out:
411 put_cpu();
412}
Sam Ravnborg917c3662009-01-08 16:58:20 -0800413EXPORT_SYMBOL(flush_dcache_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414
Prasanna S Panchamukhi05e14cb2005-09-06 15:19:30 -0700415void __kprobes flush_icache_range(unsigned long start, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416{
David S. Millera43fe0e2006-02-04 03:10:53 -0800417 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 if (tlb_type == spitfire) {
419 unsigned long kaddr;
420
David S. Millera94aa252007-03-15 15:50:11 -0700421 /* This code only runs on Spitfire cpus so this is
422 * why we can assume _PAGE_PADDR_4U.
423 */
424 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
425 unsigned long paddr, mask = _PAGE_PADDR_4U;
426
427 if (kaddr >= PAGE_OFFSET)
428 paddr = kaddr & mask;
429 else {
430 pgd_t *pgdp = pgd_offset_k(kaddr);
431 pud_t *pudp = pud_offset(pgdp, kaddr);
432 pmd_t *pmdp = pmd_offset(pudp, kaddr);
433 pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
434
435 paddr = pte_val(*ptep) & mask;
436 }
437 __flush_icache_page(paddr);
438 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 }
440}
Sam Ravnborg917c3662009-01-08 16:58:20 -0800441EXPORT_SYMBOL(flush_icache_range);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443void mmu_info(struct seq_file *m)
444{
David S. Millerce33fdc2012-09-06 19:01:25 -0700445 static const char *pgsz_strings[] = {
446 "8K", "64K", "512K", "4MB", "32MB",
447 "256MB", "2GB", "16GB",
448 };
449 int i, printed;
450
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 if (tlb_type == cheetah)
452 seq_printf(m, "MMU Type\t: Cheetah\n");
453 else if (tlb_type == cheetah_plus)
454 seq_printf(m, "MMU Type\t: Cheetah+\n");
455 else if (tlb_type == spitfire)
456 seq_printf(m, "MMU Type\t: Spitfire\n");
David S. Millera43fe0e2006-02-04 03:10:53 -0800457 else if (tlb_type == hypervisor)
458 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 else
460 seq_printf(m, "MMU Type\t: ???\n");
461
David S. Millerce33fdc2012-09-06 19:01:25 -0700462 seq_printf(m, "MMU PGSZs\t: ");
463 printed = 0;
464 for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) {
465 if (cpu_pgsz_mask & (1UL << i)) {
466 seq_printf(m, "%s%s",
467 printed ? "," : "", pgsz_strings[i]);
468 printed++;
469 }
470 }
471 seq_putc(m, '\n');
472
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473#ifdef CONFIG_DEBUG_DCFLUSH
474 seq_printf(m, "DCPageFlushes\t: %d\n",
475 atomic_read(&dcpage_flushes));
476#ifdef CONFIG_SMP
477 seq_printf(m, "DCPageFlushesXC\t: %d\n",
478 atomic_read(&dcpage_flushes_xcall));
479#endif /* CONFIG_SMP */
480#endif /* CONFIG_DEBUG_DCFLUSH */
481}
482
David S. Millera94aa252007-03-15 15:50:11 -0700483struct linux_prom_translation prom_trans[512] __read_mostly;
484unsigned int prom_trans_ents __read_mostly;
485
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486unsigned long kern_locked_tte_data;
487
David S. Miller405599b2005-09-22 00:12:35 -0700488/* The obp translations are saved based on 8k pagesize, since obp can
489 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
David S. Miller74bf4312006-01-31 18:29:18 -0800490 * HI_OBP_ADDRESS range are handled in ktlb.S.
David S. Miller405599b2005-09-22 00:12:35 -0700491 */
David S. Miller5085b4a2005-09-22 00:45:41 -0700492static inline int in_obp_range(unsigned long vaddr)
493{
494 return (vaddr >= LOW_OBP_ADDRESS &&
495 vaddr < HI_OBP_ADDRESS);
496}
497
David S. Millerc9c10832005-10-12 12:22:46 -0700498static int cmp_ptrans(const void *a, const void *b)
David S. Miller405599b2005-09-22 00:12:35 -0700499{
David S. Millerc9c10832005-10-12 12:22:46 -0700500 const struct linux_prom_translation *x = a, *y = b;
David S. Miller405599b2005-09-22 00:12:35 -0700501
David S. Millerc9c10832005-10-12 12:22:46 -0700502 if (x->virt > y->virt)
503 return 1;
504 if (x->virt < y->virt)
505 return -1;
506 return 0;
David S. Miller405599b2005-09-22 00:12:35 -0700507}
508
David S. Millerc9c10832005-10-12 12:22:46 -0700509/* Read OBP translations property into 'prom_trans[]'. */
David S. Miller9ad98c52005-10-05 15:12:00 -0700510static void __init read_obp_translations(void)
David S. Miller405599b2005-09-22 00:12:35 -0700511{
David S. Millerc9c10832005-10-12 12:22:46 -0700512 int n, node, ents, first, last, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513
514 node = prom_finddevice("/virtual-memory");
515 n = prom_getproplen(node, "translations");
David S. Miller405599b2005-09-22 00:12:35 -0700516 if (unlikely(n == 0 || n == -1)) {
David S. Millerb206fc42005-09-21 22:31:13 -0700517 prom_printf("prom_mappings: Couldn't get size.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 prom_halt();
519 }
David S. Miller405599b2005-09-22 00:12:35 -0700520 if (unlikely(n > sizeof(prom_trans))) {
Akinobu Mita5da444a2012-09-29 03:14:49 +0000521 prom_printf("prom_mappings: Size %d is too big.\n", n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 prom_halt();
523 }
David S. Miller405599b2005-09-22 00:12:35 -0700524
David S. Millerb206fc42005-09-21 22:31:13 -0700525 if ((n = prom_getproperty(node, "translations",
David S. Miller405599b2005-09-22 00:12:35 -0700526 (char *)&prom_trans[0],
527 sizeof(prom_trans))) == -1) {
David S. Millerb206fc42005-09-21 22:31:13 -0700528 prom_printf("prom_mappings: Couldn't get property.\n");
529 prom_halt();
530 }
David S. Miller9ad98c52005-10-05 15:12:00 -0700531
David S. Millerb206fc42005-09-21 22:31:13 -0700532 n = n / sizeof(struct linux_prom_translation);
David S. Miller9ad98c52005-10-05 15:12:00 -0700533
David S. Millerc9c10832005-10-12 12:22:46 -0700534 ents = n;
535
536 sort(prom_trans, ents, sizeof(struct linux_prom_translation),
537 cmp_ptrans, NULL);
538
539 /* Now kick out all the non-OBP entries. */
540 for (i = 0; i < ents; i++) {
541 if (in_obp_range(prom_trans[i].virt))
542 break;
543 }
544 first = i;
545 for (; i < ents; i++) {
546 if (!in_obp_range(prom_trans[i].virt))
547 break;
548 }
549 last = i;
550
551 for (i = 0; i < (last - first); i++) {
552 struct linux_prom_translation *src = &prom_trans[i + first];
553 struct linux_prom_translation *dest = &prom_trans[i];
554
555 *dest = *src;
556 }
557 for (; i < ents; i++) {
558 struct linux_prom_translation *dest = &prom_trans[i];
559 dest->virt = dest->size = dest->data = 0x0UL;
560 }
561
562 prom_trans_ents = last - first;
563
564 if (tlb_type == spitfire) {
565 /* Clear diag TTE bits. */
566 for (i = 0; i < prom_trans_ents; i++)
567 prom_trans[i].data &= ~0x0003fe0000000000UL;
568 }
David S. Millerf4142cb2011-09-29 12:18:59 -0700569
570 /* Force execute bit on. */
571 for (i = 0; i < prom_trans_ents; i++)
572 prom_trans[i].data |= (tlb_type == hypervisor ?
573 _PAGE_EXEC_4V : _PAGE_EXEC_4U);
David S. Miller405599b2005-09-22 00:12:35 -0700574}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575
David S. Millerd82ace72006-02-09 02:52:44 -0800576static void __init hypervisor_tlb_lock(unsigned long vaddr,
577 unsigned long pte,
578 unsigned long mmu)
579{
David S. Miller7db35f32007-05-29 02:22:14 -0700580 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
David S. Millerd82ace72006-02-09 02:52:44 -0800581
David S. Miller7db35f32007-05-29 02:22:14 -0700582 if (ret != 0) {
Akinobu Mita5da444a2012-09-29 03:14:49 +0000583 prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
David S. Miller7db35f32007-05-29 02:22:14 -0700584 "errors with %lx\n", vaddr, 0, pte, mmu, ret);
David S. Miller12e126a2006-02-17 14:40:30 -0800585 prom_halt();
586 }
David S. Millerd82ace72006-02-09 02:52:44 -0800587}
588
David S. Millerc4bce902006-02-11 21:57:54 -0800589static unsigned long kern_large_tte(unsigned long paddr);
590
David S. Miller898cf0e2005-09-23 11:59:44 -0700591static void __init remap_kernel(void)
David S. Miller405599b2005-09-22 00:12:35 -0700592{
593 unsigned long phys_page, tte_vaddr, tte_data;
David S. Miller64658742008-03-21 17:01:38 -0700594 int i, tlb_ent = sparc64_highest_locked_tlbent();
David S. Miller405599b2005-09-22 00:12:35 -0700595
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 tte_vaddr = (unsigned long) KERNBASE;
David S. Miller0eef3312014-05-03 22:52:50 -0700597 phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
David S. Millerc4bce902006-02-11 21:57:54 -0800598 tte_data = kern_large_tte(phys_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599
600 kern_locked_tte_data = tte_data;
601
David S. Millerd82ace72006-02-09 02:52:44 -0800602 /* Now lock us into the TLBs via Hypervisor or OBP. */
603 if (tlb_type == hypervisor) {
David S. Miller64658742008-03-21 17:01:38 -0700604 for (i = 0; i < num_kernel_image_mappings; i++) {
David S. Millerd82ace72006-02-09 02:52:44 -0800605 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
606 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
David S. Miller64658742008-03-21 17:01:38 -0700607 tte_vaddr += 0x400000;
608 tte_data += 0x400000;
David S. Millerd82ace72006-02-09 02:52:44 -0800609 }
610 } else {
David S. Miller64658742008-03-21 17:01:38 -0700611 for (i = 0; i < num_kernel_image_mappings; i++) {
612 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
613 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
614 tte_vaddr += 0x400000;
615 tte_data += 0x400000;
David S. Millerd82ace72006-02-09 02:52:44 -0800616 }
David S. Miller64658742008-03-21 17:01:38 -0700617 sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 }
David S. Miller0835ae02005-10-04 15:23:20 -0700619 if (tlb_type == cheetah_plus) {
620 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
621 CTX_CHEETAH_PLUS_NUC);
622 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
623 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
624 }
David S. Miller405599b2005-09-22 00:12:35 -0700625}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626
David S. Miller405599b2005-09-22 00:12:35 -0700627
David S. Millerc9c10832005-10-12 12:22:46 -0700628static void __init inherit_prom_mappings(void)
David S. Miller9ad98c52005-10-05 15:12:00 -0700629{
David S. Miller405599b2005-09-22 00:12:35 -0700630 /* Now fixup OBP's idea about where we really are mapped. */
David S. Miller3c62a2d2008-02-17 23:22:50 -0800631 printk("Remapping the kernel... ");
David S. Miller405599b2005-09-22 00:12:35 -0700632 remap_kernel();
David S. Miller3c62a2d2008-02-17 23:22:50 -0800633 printk("done.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634}
635
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636void prom_world(int enter)
637{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 if (!enter)
Al Virodff933d2012-09-26 01:21:14 -0400639 set_fs(get_fs());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640
David S. Miller3487d1d2006-01-31 18:33:25 -0800641 __asm__ __volatile__("flushw");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642}
643
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644void __flush_dcache_range(unsigned long start, unsigned long end)
645{
646 unsigned long va;
647
648 if (tlb_type == spitfire) {
649 int n = 0;
650
651 for (va = start; va < end; va += 32) {
652 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
653 if (++n >= 512)
654 break;
655 }
David S. Millera43fe0e2006-02-04 03:10:53 -0800656 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 start = __pa(start);
658 end = __pa(end);
659 for (va = start; va < end; va += 32)
660 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
661 "membar #Sync"
662 : /* no outputs */
663 : "r" (va),
664 "i" (ASI_DCACHE_INVALIDATE));
665 }
666}
Sam Ravnborg917c3662009-01-08 16:58:20 -0800667EXPORT_SYMBOL(__flush_dcache_range);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668
David S. Miller85f1e1f2007-03-15 17:51:26 -0700669/* get_new_mmu_context() uses "cache + 1". */
670DEFINE_SPINLOCK(ctx_alloc_lock);
671unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
672#define MAX_CTX_NR (1UL << CTX_NR_BITS)
673#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
674DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
675
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676/* Caller does TLB context flushing on local CPU if necessary.
677 * The caller also ensures that CTX_VALID(mm->context) is false.
678 *
679 * We must be careful about boundary cases so that we never
680 * let the user have CTX 0 (nucleus) or we ever use a CTX
681 * version of zero (and thus NO_CONTEXT would not be caught
682 * by version mis-match tests in mmu_context.h).
David S. Millera0663a72006-02-23 14:19:28 -0800683 *
684 * Always invoked with interrupts disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 */
686void get_new_mmu_context(struct mm_struct *mm)
687{
688 unsigned long ctx, new_ctx;
689 unsigned long orig_pgsz_bits;
David S. Millera0663a72006-02-23 14:19:28 -0800690 int new_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691
Kirill Tkhai07df8412013-04-09 00:29:46 +0400692 spin_lock(&ctx_alloc_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
694 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
695 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
David S. Millera0663a72006-02-23 14:19:28 -0800696 new_version = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 if (new_ctx >= (1 << CTX_NR_BITS)) {
698 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
699 if (new_ctx >= ctx) {
700 int i;
701 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
702 CTX_FIRST_VERSION;
703 if (new_ctx == 1)
704 new_ctx = CTX_FIRST_VERSION;
705
706 /* Don't call memset, for 16 entries that's just
707 * plain silly...
708 */
709 mmu_context_bmap[0] = 3;
710 mmu_context_bmap[1] = 0;
711 mmu_context_bmap[2] = 0;
712 mmu_context_bmap[3] = 0;
713 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
714 mmu_context_bmap[i + 0] = 0;
715 mmu_context_bmap[i + 1] = 0;
716 mmu_context_bmap[i + 2] = 0;
717 mmu_context_bmap[i + 3] = 0;
718 }
David S. Millera0663a72006-02-23 14:19:28 -0800719 new_version = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 goto out;
721 }
722 }
723 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
724 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
725out:
726 tlb_context_cache = new_ctx;
727 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
Kirill Tkhai07df8412013-04-09 00:29:46 +0400728 spin_unlock(&ctx_alloc_lock);
David S. Millera0663a72006-02-23 14:19:28 -0800729
730 if (unlikely(new_version))
731 smp_new_mmu_context_version();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732}
733
David S. Miller919ee672008-04-23 05:40:25 -0700734static int numa_enabled = 1;
735static int numa_debug;
736
737static int __init early_numa(char *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738{
David S. Miller919ee672008-04-23 05:40:25 -0700739 if (!p)
740 return 0;
David S. Millerd1112012006-03-08 02:16:07 -0800741
David S. Miller919ee672008-04-23 05:40:25 -0700742 if (strstr(p, "off"))
743 numa_enabled = 0;
David S. Millerd1112012006-03-08 02:16:07 -0800744
David S. Miller919ee672008-04-23 05:40:25 -0700745 if (strstr(p, "debug"))
746 numa_debug = 1;
747
748 return 0;
David S. Millerd1112012006-03-08 02:16:07 -0800749}
David S. Miller919ee672008-04-23 05:40:25 -0700750early_param("numa", early_numa);
751
752#define numadbg(f, a...) \
753do { if (numa_debug) \
754 printk(KERN_INFO f, ## a); \
755} while (0)
David S. Millerd1112012006-03-08 02:16:07 -0800756
David S. Miller4e82c9a2008-02-13 18:00:03 -0800757static void __init find_ramdisk(unsigned long phys_base)
758{
759#ifdef CONFIG_BLK_DEV_INITRD
760 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
761 unsigned long ramdisk_image;
762
763 /* Older versions of the bootloader only supported a
764 * 32-bit physical address for the ramdisk image
765 * location, stored at sparc_ramdisk_image. Newer
766 * SILO versions set sparc_ramdisk_image to zero and
767 * provide a full 64-bit physical address at
768 * sparc_ramdisk_image64.
769 */
770 ramdisk_image = sparc_ramdisk_image;
771 if (!ramdisk_image)
772 ramdisk_image = sparc_ramdisk_image64;
773
774 /* Another bootloader quirk. The bootloader normalizes
775 * the physical address to KERNBASE, so we have to
776 * factor that back out and add in the lowest valid
777 * physical page address to get the true physical address.
778 */
779 ramdisk_image -= KERNBASE;
780 ramdisk_image += phys_base;
781
David S. Miller919ee672008-04-23 05:40:25 -0700782 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
783 ramdisk_image, sparc_ramdisk_size);
784
David S. Miller4e82c9a2008-02-13 18:00:03 -0800785 initrd_start = ramdisk_image;
786 initrd_end = ramdisk_image + sparc_ramdisk_size;
David S. Miller3b2a7e22008-02-13 18:13:20 -0800787
Yinghai Lu95f72d12010-07-12 14:36:09 +1000788 memblock_reserve(initrd_start, sparc_ramdisk_size);
David S. Millerd45100f2008-05-06 15:19:54 -0700789
790 initrd_start += PAGE_OFFSET;
791 initrd_end += PAGE_OFFSET;
David S. Miller4e82c9a2008-02-13 18:00:03 -0800792 }
793#endif
794}
795
David S. Miller919ee672008-04-23 05:40:25 -0700796struct node_mem_mask {
797 unsigned long mask;
798 unsigned long val;
David S. Miller919ee672008-04-23 05:40:25 -0700799};
800static struct node_mem_mask node_masks[MAX_NUMNODES];
801static int num_node_masks;
802
Sam Ravnborg48d37212014-05-16 23:26:12 +0200803#ifdef CONFIG_NEED_MULTIPLE_NODES
804
David S. Miller919ee672008-04-23 05:40:25 -0700805int numa_cpu_lookup_table[NR_CPUS];
806cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
807
David S. Miller919ee672008-04-23 05:40:25 -0700808struct mdesc_mblock {
809 u64 base;
810 u64 size;
811 u64 offset; /* RA-to-PA */
812};
813static struct mdesc_mblock *mblocks;
814static int num_mblocks;
815
816static unsigned long ra_to_pa(unsigned long addr)
David S. Millerd1112012006-03-08 02:16:07 -0800817{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 int i;
819
David S. Miller919ee672008-04-23 05:40:25 -0700820 for (i = 0; i < num_mblocks; i++) {
821 struct mdesc_mblock *m = &mblocks[i];
David S. Miller6fc5bae2006-12-28 21:00:23 -0800822
David S. Miller919ee672008-04-23 05:40:25 -0700823 if (addr >= m->base &&
824 addr < (m->base + m->size)) {
825 addr += m->offset;
826 break;
827 }
828 }
829 return addr;
830}
831
832static int find_node(unsigned long addr)
833{
834 int i;
835
836 addr = ra_to_pa(addr);
837 for (i = 0; i < num_node_masks; i++) {
838 struct node_mem_mask *p = &node_masks[i];
839
840 if ((addr & p->mask) == p->val)
841 return i;
842 }
bob picco3dee9df2014-09-16 09:28:15 -0400843 /* The following condition has been observed on LDOM guests.*/
844 WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node"
845 " rule. Some physical memory will be owned by node 0.");
846 return 0;
David S. Miller919ee672008-04-23 05:40:25 -0700847}
848
Tejun Heof9b18db2011-07-12 10:46:32 +0200849static u64 memblock_nid_range(u64 start, u64 end, int *nid)
David S. Miller919ee672008-04-23 05:40:25 -0700850{
851 *nid = find_node(start);
852 start += PAGE_SIZE;
853 while (start < end) {
854 int n = find_node(start);
855
856 if (n != *nid)
857 break;
858 start += PAGE_SIZE;
859 }
860
David S. Millerc918dcc2008-08-14 01:41:39 -0700861 if (start > end)
862 start = end;
863
David S. Miller919ee672008-04-23 05:40:25 -0700864 return start;
865}
David S. Miller919ee672008-04-23 05:40:25 -0700866#endif
867
868/* This must be invoked after performing all of the necessary
Tejun Heo2a4814d2011-12-08 10:22:08 -0800869 * memblock_set_node() calls for 'nid'. We need to be able to get
David S. Miller919ee672008-04-23 05:40:25 -0700870 * correct data from get_pfn_range_for_nid().
871 */
872static void __init allocate_node_data(int nid)
873{
David S. Miller919ee672008-04-23 05:40:25 -0700874 struct pglist_data *p;
Paul Gortmakeraa6f0792012-05-09 20:44:29 -0400875 unsigned long start_pfn, end_pfn;
David S. Miller919ee672008-04-23 05:40:25 -0700876#ifdef CONFIG_NEED_MULTIPLE_NODES
Paul Gortmakeraa6f0792012-05-09 20:44:29 -0400877 unsigned long paddr;
878
Benjamin Herrenschmidt9d1e2492010-07-06 15:39:17 -0700879 paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
David S. Miller919ee672008-04-23 05:40:25 -0700880 if (!paddr) {
881 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
882 prom_halt();
883 }
884 NODE_DATA(nid) = __va(paddr);
885 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
886
David S. Miller625d6932012-04-25 13:13:43 -0700887 NODE_DATA(nid)->node_id = nid;
David S. Miller919ee672008-04-23 05:40:25 -0700888#endif
889
890 p = NODE_DATA(nid);
891
892 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
893 p->node_start_pfn = start_pfn;
894 p->node_spanned_pages = end_pfn - start_pfn;
David S. Miller919ee672008-04-23 05:40:25 -0700895}
896
897static void init_node_masks_nonnuma(void)
898{
Sam Ravnborg48d37212014-05-16 23:26:12 +0200899#ifdef CONFIG_NEED_MULTIPLE_NODES
David S. Miller919ee672008-04-23 05:40:25 -0700900 int i;
Sam Ravnborg48d37212014-05-16 23:26:12 +0200901#endif
David S. Miller919ee672008-04-23 05:40:25 -0700902
903 numadbg("Initializing tables for non-numa.\n");
904
905 node_masks[0].mask = node_masks[0].val = 0;
906 num_node_masks = 1;
907
Sam Ravnborg48d37212014-05-16 23:26:12 +0200908#ifdef CONFIG_NEED_MULTIPLE_NODES
David S. Miller919ee672008-04-23 05:40:25 -0700909 for (i = 0; i < NR_CPUS; i++)
910 numa_cpu_lookup_table[i] = 0;
911
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -0700912 cpumask_setall(&numa_cpumask_lookup_table[0]);
Sam Ravnborg48d37212014-05-16 23:26:12 +0200913#endif
David S. Miller919ee672008-04-23 05:40:25 -0700914}
915
916#ifdef CONFIG_NEED_MULTIPLE_NODES
917struct pglist_data *node_data[MAX_NUMNODES];
918
919EXPORT_SYMBOL(numa_cpu_lookup_table);
920EXPORT_SYMBOL(numa_cpumask_lookup_table);
921EXPORT_SYMBOL(node_data);
922
923struct mdesc_mlgroup {
924 u64 node;
925 u64 latency;
926 u64 match;
927 u64 mask;
928};
929static struct mdesc_mlgroup *mlgroups;
930static int num_mlgroups;
931
932static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
933 u32 cfg_handle)
934{
935 u64 arc;
936
937 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
938 u64 target = mdesc_arc_target(md, arc);
939 const u64 *val;
940
941 val = mdesc_get_property(md, target,
942 "cfg-handle", NULL);
943 if (val && *val == cfg_handle)
944 return 0;
945 }
946 return -ENODEV;
947}
948
949static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
950 u32 cfg_handle)
951{
952 u64 arc, candidate, best_latency = ~(u64)0;
953
954 candidate = MDESC_NODE_NULL;
955 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
956 u64 target = mdesc_arc_target(md, arc);
957 const char *name = mdesc_node_name(md, target);
958 const u64 *val;
959
960 if (strcmp(name, "pio-latency-group"))
961 continue;
962
963 val = mdesc_get_property(md, target, "latency", NULL);
964 if (!val)
965 continue;
966
967 if (*val < best_latency) {
968 candidate = target;
969 best_latency = *val;
970 }
971 }
972
973 if (candidate == MDESC_NODE_NULL)
974 return -ENODEV;
975
976 return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
977}
978
979int of_node_to_nid(struct device_node *dp)
980{
981 const struct linux_prom64_registers *regs;
982 struct mdesc_handle *md;
983 u32 cfg_handle;
984 int count, nid;
985 u64 grp;
986
David S. Miller072bd412008-08-18 20:36:17 -0700987 /* This is the right thing to do on currently supported
988 * SUN4U NUMA platforms as well, as the PCI controller does
989 * not sit behind any particular memory controller.
990 */
David S. Miller919ee672008-04-23 05:40:25 -0700991 if (!mlgroups)
992 return -1;
993
994 regs = of_get_property(dp, "reg", NULL);
995 if (!regs)
996 return -1;
997
998 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
999
1000 md = mdesc_grab();
1001
1002 count = 0;
1003 nid = -1;
1004 mdesc_for_each_node_by_name(md, grp, "group") {
1005 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
1006 nid = count;
1007 break;
1008 }
1009 count++;
1010 }
1011
1012 mdesc_release(md);
1013
1014 return nid;
1015}
1016
David S. Miller01c453812009-04-07 01:05:22 -07001017static void __init add_node_ranges(void)
David S. Miller919ee672008-04-23 05:40:25 -07001018{
Benjamin Herrenschmidt08b84792010-08-04 13:43:31 +10001019 struct memblock_region *reg;
David S. Miller919ee672008-04-23 05:40:25 -07001020
Benjamin Herrenschmidt08b84792010-08-04 13:43:31 +10001021 for_each_memblock(memory, reg) {
1022 unsigned long size = reg->size;
David S. Miller919ee672008-04-23 05:40:25 -07001023 unsigned long start, end;
1024
Benjamin Herrenschmidt08b84792010-08-04 13:43:31 +10001025 start = reg->base;
David S. Miller919ee672008-04-23 05:40:25 -07001026 end = start + size;
1027 while (start < end) {
1028 unsigned long this_end;
1029 int nid;
1030
Benjamin Herrenschmidt35a1f0b2010-07-06 15:38:58 -07001031 this_end = memblock_nid_range(start, end, &nid);
David S. Miller919ee672008-04-23 05:40:25 -07001032
Tejun Heo2a4814d2011-12-08 10:22:08 -08001033 numadbg("Setting memblock NUMA node nid[%d] "
David S. Miller919ee672008-04-23 05:40:25 -07001034 "start[%lx] end[%lx]\n",
1035 nid, start, this_end);
1036
Tang Chene7e8de52014-01-21 15:49:26 -08001037 memblock_set_node(start, this_end - start,
1038 &memblock.memory, nid);
David S. Miller919ee672008-04-23 05:40:25 -07001039 start = this_end;
1040 }
1041 }
1042}
1043
1044static int __init grab_mlgroups(struct mdesc_handle *md)
1045{
1046 unsigned long paddr;
1047 int count = 0;
1048 u64 node;
1049
1050 mdesc_for_each_node_by_name(md, node, "memory-latency-group")
1051 count++;
1052 if (!count)
1053 return -ENOENT;
1054
Yinghai Lu95f72d12010-07-12 14:36:09 +10001055 paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup),
David S. Miller919ee672008-04-23 05:40:25 -07001056 SMP_CACHE_BYTES);
1057 if (!paddr)
1058 return -ENOMEM;
1059
1060 mlgroups = __va(paddr);
1061 num_mlgroups = count;
1062
1063 count = 0;
1064 mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
1065 struct mdesc_mlgroup *m = &mlgroups[count++];
1066 const u64 *val;
1067
1068 m->node = node;
1069
1070 val = mdesc_get_property(md, node, "latency", NULL);
1071 m->latency = *val;
1072 val = mdesc_get_property(md, node, "address-match", NULL);
1073 m->match = *val;
1074 val = mdesc_get_property(md, node, "address-mask", NULL);
1075 m->mask = *val;
1076
Sam Ravnborg90181132009-01-06 13:19:28 -08001077 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
1078 "match[%llx] mask[%llx]\n",
David S. Miller919ee672008-04-23 05:40:25 -07001079 count - 1, m->node, m->latency, m->match, m->mask);
1080 }
1081
1082 return 0;
1083}
1084
1085static int __init grab_mblocks(struct mdesc_handle *md)
1086{
1087 unsigned long paddr;
1088 int count = 0;
1089 u64 node;
1090
1091 mdesc_for_each_node_by_name(md, node, "mblock")
1092 count++;
1093 if (!count)
1094 return -ENOENT;
1095
Yinghai Lu95f72d12010-07-12 14:36:09 +10001096 paddr = memblock_alloc(count * sizeof(struct mdesc_mblock),
David S. Miller919ee672008-04-23 05:40:25 -07001097 SMP_CACHE_BYTES);
1098 if (!paddr)
1099 return -ENOMEM;
1100
1101 mblocks = __va(paddr);
1102 num_mblocks = count;
1103
1104 count = 0;
1105 mdesc_for_each_node_by_name(md, node, "mblock") {
1106 struct mdesc_mblock *m = &mblocks[count++];
1107 const u64 *val;
1108
1109 val = mdesc_get_property(md, node, "base", NULL);
1110 m->base = *val;
1111 val = mdesc_get_property(md, node, "size", NULL);
1112 m->size = *val;
1113 val = mdesc_get_property(md, node,
1114 "address-congruence-offset", NULL);
bob picco771a37f2013-06-11 14:54:51 -04001115
1116 /* The address-congruence-offset property is optional.
1117 * Explicity zero it be identifty this.
1118 */
1119 if (val)
1120 m->offset = *val;
1121 else
1122 m->offset = 0UL;
David S. Miller919ee672008-04-23 05:40:25 -07001123
Sam Ravnborg90181132009-01-06 13:19:28 -08001124 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
David S. Miller919ee672008-04-23 05:40:25 -07001125 count - 1, m->base, m->size, m->offset);
1126 }
1127
1128 return 0;
1129}
1130
1131static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1132 u64 grp, cpumask_t *mask)
1133{
1134 u64 arc;
1135
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001136 cpumask_clear(mask);
David S. Miller919ee672008-04-23 05:40:25 -07001137
1138 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1139 u64 target = mdesc_arc_target(md, arc);
1140 const char *name = mdesc_node_name(md, target);
1141 const u64 *id;
1142
1143 if (strcmp(name, "cpu"))
1144 continue;
1145 id = mdesc_get_property(md, target, "id", NULL);
Rusty Russelle305cb8f2009-03-16 14:40:23 +10301146 if (*id < nr_cpu_ids)
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001147 cpumask_set_cpu(*id, mask);
David S. Miller919ee672008-04-23 05:40:25 -07001148 }
1149}
1150
1151static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
1152{
1153 int i;
1154
1155 for (i = 0; i < num_mlgroups; i++) {
1156 struct mdesc_mlgroup *m = &mlgroups[i];
1157 if (m->node == node)
1158 return m;
1159 }
1160 return NULL;
1161}
1162
1163static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
1164 int index)
1165{
1166 struct mdesc_mlgroup *candidate = NULL;
1167 u64 arc, best_latency = ~(u64)0;
1168 struct node_mem_mask *n;
1169
1170 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1171 u64 target = mdesc_arc_target(md, arc);
1172 struct mdesc_mlgroup *m = find_mlgroup(target);
1173 if (!m)
1174 continue;
1175 if (m->latency < best_latency) {
1176 candidate = m;
1177 best_latency = m->latency;
1178 }
1179 }
1180 if (!candidate)
1181 return -ENOENT;
1182
1183 if (num_node_masks != index) {
1184 printk(KERN_ERR "Inconsistent NUMA state, "
1185 "index[%d] != num_node_masks[%d]\n",
1186 index, num_node_masks);
1187 return -EINVAL;
1188 }
1189
1190 n = &node_masks[num_node_masks++];
1191
1192 n->mask = candidate->mask;
1193 n->val = candidate->match;
1194
Sam Ravnborg90181132009-01-06 13:19:28 -08001195 numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n",
David S. Miller919ee672008-04-23 05:40:25 -07001196 index, n->mask, n->val, candidate->latency);
1197
1198 return 0;
1199}
1200
1201static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1202 int index)
1203{
1204 cpumask_t mask;
1205 int cpu;
1206
1207 numa_parse_mdesc_group_cpus(md, grp, &mask);
1208
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001209 for_each_cpu(cpu, &mask)
David S. Miller919ee672008-04-23 05:40:25 -07001210 numa_cpu_lookup_table[cpu] = index;
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001211 cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
David S. Miller919ee672008-04-23 05:40:25 -07001212
1213 if (numa_debug) {
1214 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001215 for_each_cpu(cpu, &mask)
David S. Miller919ee672008-04-23 05:40:25 -07001216 printk("%d ", cpu);
1217 printk("]\n");
1218 }
1219
1220 return numa_attach_mlgroup(md, grp, index);
1221}
1222
1223static int __init numa_parse_mdesc(void)
1224{
1225 struct mdesc_handle *md = mdesc_grab();
1226 int i, err, count;
1227 u64 node;
1228
1229 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1230 if (node == MDESC_NODE_NULL) {
1231 mdesc_release(md);
1232 return -ENOENT;
1233 }
1234
1235 err = grab_mblocks(md);
1236 if (err < 0)
1237 goto out;
1238
1239 err = grab_mlgroups(md);
1240 if (err < 0)
1241 goto out;
1242
1243 count = 0;
1244 mdesc_for_each_node_by_name(md, node, "group") {
1245 err = numa_parse_mdesc_group(md, node, count);
1246 if (err < 0)
1247 break;
1248 count++;
1249 }
1250
1251 add_node_ranges();
1252
1253 for (i = 0; i < num_node_masks; i++) {
1254 allocate_node_data(i);
1255 node_set_online(i);
1256 }
1257
1258 err = 0;
1259out:
1260 mdesc_release(md);
1261 return err;
1262}
1263
David S. Miller072bd412008-08-18 20:36:17 -07001264static int __init numa_parse_jbus(void)
1265{
1266 unsigned long cpu, index;
1267
1268 /* NUMA node id is encoded in bits 36 and higher, and there is
1269 * a 1-to-1 mapping from CPU ID to NUMA node ID.
1270 */
1271 index = 0;
1272 for_each_present_cpu(cpu) {
1273 numa_cpu_lookup_table[cpu] = index;
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001274 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
David S. Miller072bd412008-08-18 20:36:17 -07001275 node_masks[index].mask = ~((1UL << 36UL) - 1UL);
1276 node_masks[index].val = cpu << 36UL;
1277
1278 index++;
1279 }
1280 num_node_masks = index;
1281
1282 add_node_ranges();
1283
1284 for (index = 0; index < num_node_masks; index++) {
1285 allocate_node_data(index);
1286 node_set_online(index);
1287 }
1288
1289 return 0;
1290}
1291
David S. Miller919ee672008-04-23 05:40:25 -07001292static int __init numa_parse_sun4u(void)
1293{
David S. Miller072bd412008-08-18 20:36:17 -07001294 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1295 unsigned long ver;
1296
1297 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
1298 if ((ver >> 32UL) == __JALAPENO_ID ||
1299 (ver >> 32UL) == __SERRANO_ID)
1300 return numa_parse_jbus();
1301 }
David S. Miller919ee672008-04-23 05:40:25 -07001302 return -1;
1303}
1304
1305static int __init bootmem_init_numa(void)
1306{
1307 int err = -1;
1308
1309 numadbg("bootmem_init_numa()\n");
1310
1311 if (numa_enabled) {
1312 if (tlb_type == hypervisor)
1313 err = numa_parse_mdesc();
1314 else
1315 err = numa_parse_sun4u();
1316 }
1317 return err;
1318}
1319
1320#else
1321
1322static int bootmem_init_numa(void)
1323{
1324 return -1;
1325}
1326
1327#endif
1328
1329static void __init bootmem_init_nonnuma(void)
1330{
Yinghai Lu95f72d12010-07-12 14:36:09 +10001331 unsigned long top_of_ram = memblock_end_of_DRAM();
1332 unsigned long total_ram = memblock_phys_mem_size();
David S. Miller919ee672008-04-23 05:40:25 -07001333
1334 numadbg("bootmem_init_nonnuma()\n");
1335
1336 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1337 top_of_ram, total_ram);
1338 printk(KERN_INFO "Memory hole size: %ldMB\n",
1339 (top_of_ram - total_ram) >> 20);
1340
1341 init_node_masks_nonnuma();
Tang Chene7e8de52014-01-21 15:49:26 -08001342 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
David S. Miller919ee672008-04-23 05:40:25 -07001343 allocate_node_data(0);
David S. Miller919ee672008-04-23 05:40:25 -07001344 node_set_online(0);
1345}
1346
David S. Miller919ee672008-04-23 05:40:25 -07001347static unsigned long __init bootmem_init(unsigned long phys_base)
1348{
1349 unsigned long end_pfn;
David S. Miller919ee672008-04-23 05:40:25 -07001350
Yinghai Lu95f72d12010-07-12 14:36:09 +10001351 end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 max_pfn = max_low_pfn = end_pfn;
David S. Millerd1112012006-03-08 02:16:07 -08001353 min_low_pfn = (phys_base >> PAGE_SHIFT);
1354
David S. Miller919ee672008-04-23 05:40:25 -07001355 if (bootmem_init_numa() < 0)
1356 bootmem_init_nonnuma();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357
David S. Miller625d6932012-04-25 13:13:43 -07001358 /* Dump memblock with node info. */
1359 memblock_dump_all();
1360
David S. Miller919ee672008-04-23 05:40:25 -07001361 /* XXX cpu notifier XXX */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362
David S. Miller625d6932012-04-25 13:13:43 -07001363 sparse_memory_present_with_active_regions(MAX_NUMNODES);
David S. Millerd1112012006-03-08 02:16:07 -08001364 sparse_init();
1365
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 return end_pfn;
1367}
1368
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001369static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1370static int pall_ents __initdata;
1371
David S. Miller56425302005-09-25 16:46:57 -07001372#ifdef CONFIG_DEBUG_PAGEALLOC
Sam Ravnborg896aef42008-02-24 19:49:52 -08001373static unsigned long __ref kernel_map_range(unsigned long pstart,
1374 unsigned long pend, pgprot_t prot)
David S. Miller56425302005-09-25 16:46:57 -07001375{
1376 unsigned long vstart = PAGE_OFFSET + pstart;
1377 unsigned long vend = PAGE_OFFSET + pend;
1378 unsigned long alloc_bytes = 0UL;
1379
1380 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
David S. Miller13edad72005-09-29 17:58:26 -07001381 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
David S. Miller56425302005-09-25 16:46:57 -07001382 vstart, vend);
1383 prom_halt();
1384 }
1385
1386 while (vstart < vend) {
1387 unsigned long this_end, paddr = __pa(vstart);
1388 pgd_t *pgd = pgd_offset_k(vstart);
1389 pud_t *pud;
1390 pmd_t *pmd;
1391 pte_t *pte;
1392
David S. Millerac55c762014-09-26 21:19:46 -07001393 if (pgd_none(*pgd)) {
1394 pud_t *new;
1395
1396 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1397 alloc_bytes += PAGE_SIZE;
1398 pgd_populate(&init_mm, pgd, new);
1399 }
David S. Miller56425302005-09-25 16:46:57 -07001400 pud = pud_offset(pgd, vstart);
1401 if (pud_none(*pud)) {
1402 pmd_t *new;
1403
1404 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1405 alloc_bytes += PAGE_SIZE;
1406 pud_populate(&init_mm, pud, new);
1407 }
1408
1409 pmd = pmd_offset(pud, vstart);
1410 if (!pmd_present(*pmd)) {
1411 pte_t *new;
1412
1413 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1414 alloc_bytes += PAGE_SIZE;
1415 pmd_populate_kernel(&init_mm, pmd, new);
1416 }
1417
1418 pte = pte_offset_kernel(pmd, vstart);
1419 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1420 if (this_end > vend)
1421 this_end = vend;
1422
1423 while (vstart < this_end) {
1424 pte_val(*pte) = (paddr | pgprot_val(prot));
1425
1426 vstart += PAGE_SIZE;
1427 paddr += PAGE_SIZE;
1428 pte++;
1429 }
1430 }
1431
1432 return alloc_bytes;
1433}
1434
David S. Miller56425302005-09-25 16:46:57 -07001435extern unsigned int kvmap_linear_patch[1];
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001436#endif /* CONFIG_DEBUG_PAGEALLOC */
1437
David S. Miller4f93d212012-09-06 18:13:58 -07001438static void __init kpte_set_val(unsigned long index, unsigned long val)
1439{
1440 unsigned long *ptr = kpte_linear_bitmap;
1441
1442 val <<= ((index % (BITS_PER_LONG / 2)) * 2);
1443 ptr += (index / (BITS_PER_LONG / 2));
1444
1445 *ptr |= val;
1446}
1447
1448static const unsigned long kpte_shift_min = 28; /* 256MB */
1449static const unsigned long kpte_shift_max = 34; /* 16GB */
1450static const unsigned long kpte_shift_incr = 3;
1451
1452static unsigned long kpte_mark_using_shift(unsigned long start, unsigned long end,
1453 unsigned long shift)
1454{
1455 unsigned long size = (1UL << shift);
1456 unsigned long mask = (size - 1UL);
1457 unsigned long remains = end - start;
1458 unsigned long val;
1459
1460 if (remains < size || (start & mask))
1461 return start;
1462
1463 /* VAL maps:
1464 *
1465 * shift 28 --> kern_linear_pte_xor index 1
1466 * shift 31 --> kern_linear_pte_xor index 2
1467 * shift 34 --> kern_linear_pte_xor index 3
1468 */
1469 val = ((shift - kpte_shift_min) / kpte_shift_incr) + 1;
1470
1471 remains &= ~mask;
1472 if (shift != kpte_shift_max)
1473 remains = size;
1474
1475 while (remains) {
1476 unsigned long index = start >> kpte_shift_min;
1477
1478 kpte_set_val(index, val);
1479
1480 start += 1UL << kpte_shift_min;
1481 remains -= 1UL << kpte_shift_min;
1482 }
1483
1484 return start;
1485}
1486
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001487static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
1488{
David S. Miller4f93d212012-09-06 18:13:58 -07001489 unsigned long smallest_size, smallest_mask;
1490 unsigned long s;
1491
1492 smallest_size = (1UL << kpte_shift_min);
1493 smallest_mask = (smallest_size - 1UL);
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001494
1495 while (start < end) {
David S. Miller4f93d212012-09-06 18:13:58 -07001496 unsigned long orig_start = start;
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001497
David S. Miller4f93d212012-09-06 18:13:58 -07001498 for (s = kpte_shift_max; s >= kpte_shift_min; s -= kpte_shift_incr) {
1499 start = kpte_mark_using_shift(start, end, s);
David S. Millerf7c00332006-03-05 22:18:50 -08001500
David S. Miller4f93d212012-09-06 18:13:58 -07001501 if (start != orig_start)
1502 break;
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001503 }
1504
David S. Miller4f93d212012-09-06 18:13:58 -07001505 if (start == orig_start)
1506 start = (start + smallest_size) & ~smallest_mask;
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001507 }
1508}
David S. Miller56425302005-09-25 16:46:57 -07001509
David S. Miller8f3614532007-12-13 06:13:38 -08001510static void __init init_kpte_bitmap(void)
David S. Miller56425302005-09-25 16:46:57 -07001511{
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001512 unsigned long i;
David S. Miller13edad72005-09-29 17:58:26 -07001513
1514 for (i = 0; i < pall_ents; i++) {
David S. Miller56425302005-09-25 16:46:57 -07001515 unsigned long phys_start, phys_end;
1516
David S. Miller13edad72005-09-29 17:58:26 -07001517 phys_start = pall[i].phys_addr;
1518 phys_end = phys_start + pall[i].reg_size;
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001519
1520 mark_kpte_bitmap(phys_start, phys_end);
David S. Miller8f3614532007-12-13 06:13:38 -08001521 }
1522}
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001523
David S. Miller8f3614532007-12-13 06:13:38 -08001524static void __init kernel_physical_mapping_init(void)
1525{
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001526#ifdef CONFIG_DEBUG_PAGEALLOC
David S. Miller8f3614532007-12-13 06:13:38 -08001527 unsigned long i, mem_alloced = 0UL;
1528
1529 for (i = 0; i < pall_ents; i++) {
1530 unsigned long phys_start, phys_end;
1531
1532 phys_start = pall[i].phys_addr;
1533 phys_end = phys_start + pall[i].reg_size;
1534
David S. Miller56425302005-09-25 16:46:57 -07001535 mem_alloced += kernel_map_range(phys_start, phys_end,
1536 PAGE_KERNEL);
David S. Miller56425302005-09-25 16:46:57 -07001537 }
1538
1539 printk("Allocated %ld bytes for kernel page tables.\n",
1540 mem_alloced);
1541
1542 kvmap_linear_patch[0] = 0x01000000; /* nop */
1543 flushi(&kvmap_linear_patch[0]);
1544
1545 __flush_tlb_all();
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001546#endif
David S. Miller56425302005-09-25 16:46:57 -07001547}
1548
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001549#ifdef CONFIG_DEBUG_PAGEALLOC
David S. Miller56425302005-09-25 16:46:57 -07001550void kernel_map_pages(struct page *page, int numpages, int enable)
1551{
1552 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1553 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1554
1555 kernel_map_range(phys_start, phys_end,
1556 (enable ? PAGE_KERNEL : __pgprot(0)));
1557
David S. Miller74bf4312006-01-31 18:29:18 -08001558 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1559 PAGE_OFFSET + phys_end);
1560
David S. Miller56425302005-09-25 16:46:57 -07001561 /* we should perform an IPI and flush all tlbs,
1562 * but that can deadlock->flush only current cpu.
1563 */
1564 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1565 PAGE_OFFSET + phys_end);
1566}
1567#endif
1568
David S. Miller10147572005-09-28 21:46:43 -07001569unsigned long __init find_ecache_flush_span(unsigned long size)
1570{
David S. Miller13edad72005-09-29 17:58:26 -07001571 int i;
David S. Miller10147572005-09-28 21:46:43 -07001572
David S. Miller13edad72005-09-29 17:58:26 -07001573 for (i = 0; i < pavail_ents; i++) {
1574 if (pavail[i].reg_size >= size)
1575 return pavail[i].phys_addr;
David S. Miller10147572005-09-28 21:46:43 -07001576 }
1577
1578 return ~0UL;
1579}
1580
David S. Millerb2d43832013-09-20 21:50:41 -07001581unsigned long PAGE_OFFSET;
1582EXPORT_SYMBOL(PAGE_OFFSET);
1583
1584static void __init page_offset_shift_patch_one(unsigned int *insn, unsigned long phys_bits)
1585{
1586 unsigned long final_shift;
1587 unsigned int val = *insn;
1588 unsigned int cnt;
1589
1590 /* We are patching in ilog2(max_supported_phys_address), and
1591 * we are doing so in a manner similar to a relocation addend.
1592 * That is, we are adding the shift value to whatever value
1593 * is in the shift instruction count field already.
1594 */
1595 cnt = (val & 0x3f);
1596 val &= ~0x3f;
1597
1598 /* If we are trying to shift >= 64 bits, clear the destination
1599 * register. This can happen when phys_bits ends up being equal
1600 * to MAX_PHYS_ADDRESS_BITS.
1601 */
1602 final_shift = (cnt + (64 - phys_bits));
1603 if (final_shift >= 64) {
1604 unsigned int rd = (val >> 25) & 0x1f;
1605
1606 val = 0x80100000 | (rd << 25);
1607 } else {
1608 val |= final_shift;
1609 }
1610 *insn = val;
1611
1612 __asm__ __volatile__("flush %0"
1613 : /* no outputs */
1614 : "r" (insn));
1615}
1616
1617static void __init page_offset_shift_patch(unsigned long phys_bits)
1618{
1619 extern unsigned int __page_offset_shift_patch;
1620 extern unsigned int __page_offset_shift_patch_end;
1621 unsigned int *p;
1622
1623 p = &__page_offset_shift_patch;
1624 while (p < &__page_offset_shift_patch_end) {
1625 unsigned int *insn = (unsigned int *)(unsigned long)*p;
1626
1627 page_offset_shift_patch_one(insn, phys_bits);
1628
1629 p++;
1630 }
1631}
1632
David S. Miller4397bed2014-09-26 21:58:33 -07001633unsigned long sparc64_va_hole_top = 0xfffff80000000000UL;
1634unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL;
1635
David S. Millerb2d43832013-09-20 21:50:41 -07001636static void __init setup_page_offset(void)
1637{
1638 unsigned long max_phys_bits = 40;
1639
1640 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
David S. Miller4397bed2014-09-26 21:58:33 -07001641 /* Cheetah/Panther support a full 64-bit virtual
1642 * address, so we can use all that our page tables
1643 * support.
1644 */
1645 sparc64_va_hole_top = 0xfff0000000000000UL;
1646 sparc64_va_hole_bottom = 0x0010000000000000UL;
1647
David S. Millerb2d43832013-09-20 21:50:41 -07001648 max_phys_bits = 42;
1649 } else if (tlb_type == hypervisor) {
1650 switch (sun4v_chip_type) {
1651 case SUN4V_CHIP_NIAGARA1:
1652 case SUN4V_CHIP_NIAGARA2:
David S. Miller4397bed2014-09-26 21:58:33 -07001653 /* T1 and T2 support 48-bit virtual addresses. */
1654 sparc64_va_hole_top = 0xffff800000000000UL;
1655 sparc64_va_hole_bottom = 0x0000800000000000UL;
1656
David S. Millerb2d43832013-09-20 21:50:41 -07001657 max_phys_bits = 39;
1658 break;
1659 case SUN4V_CHIP_NIAGARA3:
David S. Miller4397bed2014-09-26 21:58:33 -07001660 /* T3 supports 48-bit virtual addresses. */
1661 sparc64_va_hole_top = 0xffff800000000000UL;
1662 sparc64_va_hole_bottom = 0x0000800000000000UL;
1663
David S. Millerb2d43832013-09-20 21:50:41 -07001664 max_phys_bits = 43;
1665 break;
1666 case SUN4V_CHIP_NIAGARA4:
1667 case SUN4V_CHIP_NIAGARA5:
1668 case SUN4V_CHIP_SPARC64X:
1669 default:
David S. Miller4397bed2014-09-26 21:58:33 -07001670 /* T4 and later support 52-bit virtual addresses. */
1671 sparc64_va_hole_top = 0xfff8000000000000UL;
1672 sparc64_va_hole_bottom = 0x0008000000000000UL;
David S. Millerb2d43832013-09-20 21:50:41 -07001673 max_phys_bits = 47;
1674 break;
1675 }
1676 }
1677
1678 if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) {
1679 prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n",
1680 max_phys_bits);
1681 prom_halt();
1682 }
1683
1684 PAGE_OFFSET = PAGE_OFFSET_BY_BITS(max_phys_bits);
1685
1686 pr_info("PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
1687 PAGE_OFFSET, max_phys_bits);
1688
1689 page_offset_shift_patch(max_phys_bits);
1690}
1691
David S. Miller517af332006-02-01 15:55:21 -08001692static void __init tsb_phys_patch(void)
1693{
David S. Millerd257d5d2006-02-06 23:44:37 -08001694 struct tsb_ldquad_phys_patch_entry *pquad;
David S. Miller517af332006-02-01 15:55:21 -08001695 struct tsb_phys_patch_entry *p;
1696
David S. Millerd257d5d2006-02-06 23:44:37 -08001697 pquad = &__tsb_ldquad_phys_patch;
1698 while (pquad < &__tsb_ldquad_phys_patch_end) {
1699 unsigned long addr = pquad->addr;
1700
1701 if (tlb_type == hypervisor)
1702 *(unsigned int *) addr = pquad->sun4v_insn;
1703 else
1704 *(unsigned int *) addr = pquad->sun4u_insn;
1705 wmb();
1706 __asm__ __volatile__("flush %0"
1707 : /* no outputs */
1708 : "r" (addr));
1709
1710 pquad++;
1711 }
1712
David S. Miller517af332006-02-01 15:55:21 -08001713 p = &__tsb_phys_patch;
1714 while (p < &__tsb_phys_patch_end) {
1715 unsigned long addr = p->addr;
1716
1717 *(unsigned int *) addr = p->insn;
1718 wmb();
1719 __asm__ __volatile__("flush %0"
1720 : /* no outputs */
1721 : "r" (addr));
1722
1723 p++;
1724 }
1725}
1726
David S. Miller490384e2006-02-11 14:41:18 -08001727/* Don't mark as init, we give this to the Hypervisor. */
David S. Millerd1acb422007-03-16 17:20:28 -07001728#ifndef CONFIG_DEBUG_PAGEALLOC
1729#define NUM_KTSB_DESCR 2
1730#else
1731#define NUM_KTSB_DESCR 1
1732#endif
1733static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
David S. Miller490384e2006-02-11 14:41:18 -08001734extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
1735
David S. Miller8c82dc02014-09-17 10:14:56 -07001736/* The swapper TSBs are loaded with a base sequence of:
1737 *
1738 * sethi %uhi(SYMBOL), REG1
1739 * sethi %hi(SYMBOL), REG2
1740 * or REG1, %ulo(SYMBOL), REG1
1741 * or REG2, %lo(SYMBOL), REG2
1742 * sllx REG1, 32, REG1
1743 * or REG1, REG2, REG1
1744 *
1745 * When we use physical addressing for the TSB accesses, we patch the
1746 * first four instructions in the above sequence.
1747 */
1748
David S. Miller9076d0e2011-08-05 00:53:57 -07001749static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
1750{
David S. Miller8c82dc02014-09-17 10:14:56 -07001751 unsigned long high_bits, low_bits;
1752
1753 high_bits = (pa >> 32) & 0xffffffff;
1754 low_bits = (pa >> 0) & 0xffffffff;
David S. Miller9076d0e2011-08-05 00:53:57 -07001755
1756 while (start < end) {
1757 unsigned int *ia = (unsigned int *)(unsigned long)*start;
1758
David S. Miller8c82dc02014-09-17 10:14:56 -07001759 ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10);
David S. Miller9076d0e2011-08-05 00:53:57 -07001760 __asm__ __volatile__("flush %0" : : "r" (ia));
1761
David S. Miller8c82dc02014-09-17 10:14:56 -07001762 ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10);
David S. Miller9076d0e2011-08-05 00:53:57 -07001763 __asm__ __volatile__("flush %0" : : "r" (ia + 1));
1764
David S. Miller8c82dc02014-09-17 10:14:56 -07001765 ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff);
1766 __asm__ __volatile__("flush %0" : : "r" (ia + 2));
1767
1768 ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff);
1769 __asm__ __volatile__("flush %0" : : "r" (ia + 3));
1770
David S. Miller9076d0e2011-08-05 00:53:57 -07001771 start++;
1772 }
1773}
1774
1775static void ktsb_phys_patch(void)
1776{
1777 extern unsigned int __swapper_tsb_phys_patch;
1778 extern unsigned int __swapper_tsb_phys_patch_end;
David S. Miller9076d0e2011-08-05 00:53:57 -07001779 unsigned long ktsb_pa;
1780
1781 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1782 patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
1783 &__swapper_tsb_phys_patch_end, ktsb_pa);
1784#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Miller0785a8e2011-08-06 05:26:35 -07001785 {
1786 extern unsigned int __swapper_4m_tsb_phys_patch;
1787 extern unsigned int __swapper_4m_tsb_phys_patch_end;
David S. Miller9076d0e2011-08-05 00:53:57 -07001788 ktsb_pa = (kern_base +
1789 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1790 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
1791 &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
David S. Miller0785a8e2011-08-06 05:26:35 -07001792 }
David S. Miller9076d0e2011-08-05 00:53:57 -07001793#endif
1794}
1795
David S. Miller490384e2006-02-11 14:41:18 -08001796static void __init sun4v_ktsb_init(void)
1797{
1798 unsigned long ktsb_pa;
1799
David S. Millerd7744a02006-02-21 22:31:11 -08001800 /* First KTSB for PAGE_SIZE mappings. */
David S. Miller490384e2006-02-11 14:41:18 -08001801 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1802
1803 switch (PAGE_SIZE) {
1804 case 8 * 1024:
1805 default:
1806 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
1807 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
1808 break;
1809
1810 case 64 * 1024:
1811 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
1812 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
1813 break;
1814
1815 case 512 * 1024:
1816 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
1817 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
1818 break;
1819
1820 case 4 * 1024 * 1024:
1821 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
1822 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
1823 break;
Joe Perches6cb79b32011-06-03 14:45:23 +00001824 }
David S. Miller490384e2006-02-11 14:41:18 -08001825
David S. Miller3f19a842006-02-17 12:03:20 -08001826 ktsb_descr[0].assoc = 1;
David S. Miller490384e2006-02-11 14:41:18 -08001827 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
1828 ktsb_descr[0].ctx_idx = 0;
1829 ktsb_descr[0].tsb_base = ktsb_pa;
1830 ktsb_descr[0].resv = 0;
1831
David S. Millerd1acb422007-03-16 17:20:28 -07001832#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Miller4f93d212012-09-06 18:13:58 -07001833 /* Second KTSB for 4MB/256MB/2GB/16GB mappings. */
David S. Millerd7744a02006-02-21 22:31:11 -08001834 ktsb_pa = (kern_base +
1835 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1836
1837 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
David S. Millerc69ad0a2012-09-06 20:35:36 -07001838 ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB |
1839 HV_PGSZ_MASK_256MB |
1840 HV_PGSZ_MASK_2GB |
1841 HV_PGSZ_MASK_16GB) &
1842 cpu_pgsz_mask);
David S. Millerd7744a02006-02-21 22:31:11 -08001843 ktsb_descr[1].assoc = 1;
1844 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
1845 ktsb_descr[1].ctx_idx = 0;
1846 ktsb_descr[1].tsb_base = ktsb_pa;
1847 ktsb_descr[1].resv = 0;
David S. Millerd1acb422007-03-16 17:20:28 -07001848#endif
David S. Miller490384e2006-02-11 14:41:18 -08001849}
1850
Paul Gortmaker2066aad2013-06-17 15:43:14 -04001851void sun4v_ktsb_register(void)
David S. Miller490384e2006-02-11 14:41:18 -08001852{
David S. Miller7db35f32007-05-29 02:22:14 -07001853 unsigned long pa, ret;
David S. Miller490384e2006-02-11 14:41:18 -08001854
1855 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
1856
David S. Miller7db35f32007-05-29 02:22:14 -07001857 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
1858 if (ret != 0) {
1859 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
1860 "errors with %lx\n", pa, ret);
1861 prom_halt();
1862 }
David S. Miller490384e2006-02-11 14:41:18 -08001863}
1864
David S. Millerc69ad0a2012-09-06 20:35:36 -07001865static void __init sun4u_linear_pte_xor_finalize(void)
1866{
1867#ifndef CONFIG_DEBUG_PAGEALLOC
1868 /* This is where we would add Panther support for
1869 * 32MB and 256MB pages.
1870 */
1871#endif
1872}
1873
1874static void __init sun4v_linear_pte_xor_finalize(void)
1875{
1876#ifndef CONFIG_DEBUG_PAGEALLOC
1877 if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
1878 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
David S. Miller922631b2013-09-18 12:00:00 -07001879 PAGE_OFFSET;
David S. Millerc69ad0a2012-09-06 20:35:36 -07001880 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1881 _PAGE_P_4V | _PAGE_W_4V);
1882 } else {
1883 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
1884 }
1885
1886 if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
1887 kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
David S. Miller922631b2013-09-18 12:00:00 -07001888 PAGE_OFFSET;
David S. Millerc69ad0a2012-09-06 20:35:36 -07001889 kern_linear_pte_xor[2] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1890 _PAGE_P_4V | _PAGE_W_4V);
1891 } else {
1892 kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
1893 }
1894
1895 if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
1896 kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
David S. Miller922631b2013-09-18 12:00:00 -07001897 PAGE_OFFSET;
David S. Millerc69ad0a2012-09-06 20:35:36 -07001898 kern_linear_pte_xor[3] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1899 _PAGE_P_4V | _PAGE_W_4V);
1900 } else {
1901 kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
1902 }
1903#endif
1904}
1905
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906/* paging_init() sets up the page tables */
1907
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908static unsigned long last_valid_pfn;
David S. Millerac55c762014-09-26 21:19:46 -07001909
1910/* These must be page aligned in order to not trigger the
1911 * alignment tests of pgd_bad() and pud_bad().
1912 */
1913pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((aligned (PAGE_SIZE)));
1914static pud_t swapper_pud_dir[PTRS_PER_PUD] __attribute__ ((aligned (PAGE_SIZE)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915
David S. Millerc4bce902006-02-11 21:57:54 -08001916static void sun4u_pgprot_init(void);
1917static void sun4v_pgprot_init(void);
1918
bob picco7c21d532014-09-16 09:29:54 -04001919static phys_addr_t __init available_memory(void)
1920{
1921 phys_addr_t available = 0ULL;
1922 phys_addr_t pa_start, pa_end;
1923 u64 i;
1924
1925 for_each_free_mem_range(i, NUMA_NO_NODE, &pa_start, &pa_end, NULL)
1926 available = available + (pa_end - pa_start);
1927
1928 return available;
1929}
1930
1931/* We need to exclude reserved regions. This exclusion will include
1932 * vmlinux and initrd. To be more precise the initrd size could be used to
1933 * compute a new lower limit because it is freed later during initialization.
1934 */
1935static void __init reduce_memory(phys_addr_t limit_ram)
1936{
1937 phys_addr_t avail_ram = available_memory();
1938 phys_addr_t pa_start, pa_end;
1939 u64 i;
1940
1941 if (limit_ram >= avail_ram)
1942 return;
1943
1944 for_each_free_mem_range(i, NUMA_NO_NODE, &pa_start, &pa_end, NULL) {
1945 phys_addr_t region_size = pa_end - pa_start;
1946 phys_addr_t clip_start = pa_start;
1947
1948 avail_ram = avail_ram - region_size;
1949 /* Are we consuming too much? */
1950 if (avail_ram < limit_ram) {
1951 phys_addr_t give_back = limit_ram - avail_ram;
1952
1953 region_size = region_size - give_back;
1954 clip_start = clip_start + give_back;
1955 }
1956
1957 memblock_remove(clip_start, region_size);
1958
1959 if (avail_ram <= limit_ram)
1960 break;
1961 i = 0UL;
1962 }
1963}
1964
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965void __init paging_init(void)
1966{
David S. Miller919ee672008-04-23 05:40:25 -07001967 unsigned long end_pfn, shift, phys_base;
David S. Miller0836a0e2005-09-28 21:38:08 -07001968 unsigned long real_end, i;
David S. Millerac55c762014-09-26 21:19:46 -07001969 pud_t *pud;
1970 pmd_t *pmd;
Paul Gortmakeraa6f0792012-05-09 20:44:29 -04001971 int node;
David S. Miller0836a0e2005-09-28 21:38:08 -07001972
David S. Millerb2d43832013-09-20 21:50:41 -07001973 setup_page_offset();
1974
David S. Miller22adb352007-05-26 01:14:43 -07001975 /* These build time checkes make sure that the dcache_dirty_cpu()
1976 * page->flags usage will work.
1977 *
1978 * When a page gets marked as dcache-dirty, we store the
1979 * cpu number starting at bit 32 in the page->flags. Also,
1980 * functions like clear_dcache_dirty_cpu use the cpu mask
1981 * in 13-bit signed-immediate instruction fields.
1982 */
Christoph Lameter9223b4192008-04-28 02:12:48 -07001983
1984 /*
1985 * Page flags must not reach into upper 32 bits that are used
1986 * for the cpu number
1987 */
1988 BUILD_BUG_ON(NR_PAGEFLAGS > 32);
1989
1990 /*
1991 * The bit fields placed in the high range must not reach below
1992 * the 32 bit boundary. Otherwise we cannot place the cpu field
1993 * at the 32 bit boundary.
1994 */
David S. Miller22adb352007-05-26 01:14:43 -07001995 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
Christoph Lameter9223b4192008-04-28 02:12:48 -07001996 ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
1997
David S. Miller22adb352007-05-26 01:14:43 -07001998 BUILD_BUG_ON(NR_CPUS > 4096);
1999
David S. Miller0eef3312014-05-03 22:52:50 -07002000 kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
David S. Miller481295f2006-02-07 21:51:08 -08002001 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
2002
David S. Millerd7744a02006-02-21 22:31:11 -08002003 /* Invalidate both kernel TSBs. */
David S. Miller8b234272006-02-17 18:01:02 -08002004 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
David S. Millerd1acb422007-03-16 17:20:28 -07002005#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Millerd7744a02006-02-21 22:31:11 -08002006 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
David S. Millerd1acb422007-03-16 17:20:28 -07002007#endif
David S. Miller8b234272006-02-17 18:01:02 -08002008
David S. Millerc4bce902006-02-11 21:57:54 -08002009 if (tlb_type == hypervisor)
2010 sun4v_pgprot_init();
2011 else
2012 sun4u_pgprot_init();
2013
David S. Millerd257d5d2006-02-06 23:44:37 -08002014 if (tlb_type == cheetah_plus ||
David S. Miller9076d0e2011-08-05 00:53:57 -07002015 tlb_type == hypervisor) {
David S. Miller517af332006-02-01 15:55:21 -08002016 tsb_phys_patch();
David S. Miller9076d0e2011-08-05 00:53:57 -07002017 ktsb_phys_patch();
2018 }
David S. Miller517af332006-02-01 15:55:21 -08002019
David S. Millerc69ad0a2012-09-06 20:35:36 -07002020 if (tlb_type == hypervisor)
David S. Millerd257d5d2006-02-06 23:44:37 -08002021 sun4v_patch_tlb_handlers();
2022
David S. Millera94a1722008-05-11 21:04:48 -07002023 /* Find available physical memory...
2024 *
2025 * Read it twice in order to work around a bug in openfirmware.
2026 * The call to grab this table itself can cause openfirmware to
2027 * allocate memory, which in turn can take away some space from
2028 * the list of available memory. Reading it twice makes sure
2029 * we really do get the final value.
2030 */
2031 read_obp_translations();
2032 read_obp_memory("reg", &pall[0], &pall_ents);
2033 read_obp_memory("available", &pavail[0], &pavail_ents);
David S. Miller13edad72005-09-29 17:58:26 -07002034 read_obp_memory("available", &pavail[0], &pavail_ents);
David S. Miller0836a0e2005-09-28 21:38:08 -07002035
2036 phys_base = 0xffffffffffffffffUL;
David S. Miller3b2a7e22008-02-13 18:13:20 -08002037 for (i = 0; i < pavail_ents; i++) {
David S. Miller13edad72005-09-29 17:58:26 -07002038 phys_base = min(phys_base, pavail[i].phys_addr);
Yinghai Lu95f72d12010-07-12 14:36:09 +10002039 memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
David S. Miller3b2a7e22008-02-13 18:13:20 -08002040 }
2041
Yinghai Lu95f72d12010-07-12 14:36:09 +10002042 memblock_reserve(kern_base, kern_size);
David S. Miller0836a0e2005-09-28 21:38:08 -07002043
David S. Miller4e82c9a2008-02-13 18:00:03 -08002044 find_ramdisk(phys_base);
2045
bob picco7c21d532014-09-16 09:29:54 -04002046 if (cmdline_memory_size)
2047 reduce_memory(cmdline_memory_size);
David S. Miller25b0c652008-02-13 18:20:14 -08002048
Tejun Heo1aadc052011-12-08 10:22:08 -08002049 memblock_allow_resize();
Yinghai Lu95f72d12010-07-12 14:36:09 +10002050 memblock_dump_all();
David S. Miller3b2a7e22008-02-13 18:13:20 -08002051
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052 set_bit(0, mmu_context_bmap);
2053
David S. Miller2bdb3cb2005-09-22 01:08:57 -07002054 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
2055
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056 real_end = (unsigned long)_end;
David S. Miller0eef3312014-05-03 22:52:50 -07002057 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
David S. Miller64658742008-03-21 17:01:38 -07002058 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
2059 num_kernel_image_mappings);
David S. Miller2bdb3cb2005-09-22 01:08:57 -07002060
2061 /* Set kernel pgd to upper alias so physical page computations
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 * work.
2063 */
2064 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
2065
David S. Miller56425302005-09-25 16:46:57 -07002066 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067
David S. Millerac55c762014-09-26 21:19:46 -07002068 /* The kernel page tables we publish into what the rest of the
2069 * world sees must be adjusted so that they see the PAGE_OFFSET
2070 * address of these in-kerenel data structures. However right
2071 * here we must access them from the kernel image side, because
2072 * the trap tables haven't been taken over and therefore we cannot
2073 * take TLB misses in the PAGE_OFFSET linear mappings yet.
2074 */
2075 pud = swapper_pud_dir + (shift / sizeof(pud_t));
2076 pgd_set(&swapper_pg_dir[0], pud);
2077
2078 pmd = swapper_low_pmd_dir + (shift / sizeof(pmd_t));
2079 pud_set(&swapper_pud_dir[0], pmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080
David S. Millerc9c10832005-10-12 12:22:46 -07002081 inherit_prom_mappings();
David S. Miller5085b4a2005-09-22 00:45:41 -07002082
David S. Miller8f3614532007-12-13 06:13:38 -08002083 init_kpte_bitmap();
2084
David S. Millera8b900d2006-01-31 18:33:37 -08002085 /* Ok, we can use our TLB miss and window trap handlers safely. */
2086 setup_tba();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087
David S. Millerc9c10832005-10-12 12:22:46 -07002088 __flush_tlb_all();
David S. Miller9ad98c52005-10-05 15:12:00 -07002089
David S. Millerad072002008-02-13 19:21:51 -08002090 prom_build_devicetree();
David S. Millerb696fdc2009-05-26 22:37:25 -07002091 of_populate_present_mask();
David S. Millerb99c6eb2009-06-18 01:44:19 -07002092#ifndef CONFIG_SMP
2093 of_fill_in_cpu_data();
2094#endif
David S. Millerad072002008-02-13 19:21:51 -08002095
David S. Miller890db402009-04-01 03:13:15 -07002096 if (tlb_type == hypervisor) {
David S. Miller4a283332008-02-13 19:22:23 -08002097 sun4v_mdesc_init();
Stephen Rothwell6ac5c612009-06-15 03:06:18 -07002098 mdesc_populate_present_mask(cpu_all_mask);
David S. Millerb99c6eb2009-06-18 01:44:19 -07002099#ifndef CONFIG_SMP
2100 mdesc_fill_in_cpu_data(cpu_all_mask);
2101#endif
David S. Millerce33fdc2012-09-06 19:01:25 -07002102 mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask);
David S. Millerc69ad0a2012-09-06 20:35:36 -07002103
2104 sun4v_linear_pte_xor_finalize();
2105
2106 sun4v_ktsb_init();
2107 sun4v_ktsb_register();
David S. Millerce33fdc2012-09-06 19:01:25 -07002108 } else {
2109 unsigned long impl, ver;
2110
2111 cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
2112 HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
2113
2114 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
2115 impl = ((ver >> 32) & 0xffff);
2116 if (impl == PANTHER_IMPL)
2117 cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB |
2118 HV_PGSZ_MASK_256MB);
David S. Millerc69ad0a2012-09-06 20:35:36 -07002119
2120 sun4u_linear_pte_xor_finalize();
David S. Miller890db402009-04-01 03:13:15 -07002121 }
David S. Miller4a283332008-02-13 19:22:23 -08002122
David S. Millerc69ad0a2012-09-06 20:35:36 -07002123 /* Flush the TLBs and the 4M TSB so that the updated linear
2124 * pte XOR settings are realized for all mappings.
2125 */
2126 __flush_tlb_all();
2127#ifndef CONFIG_DEBUG_PAGEALLOC
2128 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2129#endif
2130 __flush_tlb_all();
2131
David S. Miller2bdb3cb2005-09-22 01:08:57 -07002132 /* Setup bootmem... */
David S. Miller919ee672008-04-23 05:40:25 -07002133 last_valid_pfn = end_pfn = bootmem_init(phys_base);
David S. Millerd1112012006-03-08 02:16:07 -08002134
David S. Miller5ed56f12012-04-26 20:50:34 -07002135 /* Once the OF device tree and MDESC have been setup, we know
2136 * the list of possible cpus. Therefore we can allocate the
2137 * IRQ stacks.
2138 */
2139 for_each_possible_cpu(i) {
Paul Gortmakeraa6f0792012-05-09 20:44:29 -04002140 node = cpu_to_node(i);
David S. Miller5ed56f12012-04-26 20:50:34 -07002141
2142 softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
2143 THREAD_SIZE,
2144 THREAD_SIZE, 0);
2145 hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
2146 THREAD_SIZE,
2147 THREAD_SIZE, 0);
2148 }
2149
David S. Miller56425302005-09-25 16:46:57 -07002150 kernel_physical_mapping_init();
David S. Miller56425302005-09-25 16:46:57 -07002151
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 {
David S. Miller919ee672008-04-23 05:40:25 -07002153 unsigned long max_zone_pfns[MAX_NR_ZONES];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154
David S. Miller919ee672008-04-23 05:40:25 -07002155 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156
David S. Miller919ee672008-04-23 05:40:25 -07002157 max_zone_pfns[ZONE_NORMAL] = end_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158
David S. Miller919ee672008-04-23 05:40:25 -07002159 free_area_init_nodes(max_zone_pfns);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 }
2161
David S. Miller3c62a2d2008-02-17 23:22:50 -08002162 printk("Booting Linux...\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163}
2164
Greg Kroah-Hartman7c9503b2012-12-21 14:03:26 -08002165int page_in_phys_avail(unsigned long paddr)
David S. Miller919ee672008-04-23 05:40:25 -07002166{
2167 int i;
2168
2169 paddr &= PAGE_MASK;
2170
2171 for (i = 0; i < pavail_ents; i++) {
2172 unsigned long start, end;
2173
2174 start = pavail[i].phys_addr;
2175 end = start + pavail[i].reg_size;
2176
2177 if (paddr >= start && paddr < end)
2178 return 1;
2179 }
2180 if (paddr >= kern_base && paddr < (kern_base + kern_size))
2181 return 1;
2182#ifdef CONFIG_BLK_DEV_INITRD
2183 if (paddr >= __pa(initrd_start) &&
2184 paddr < __pa(PAGE_ALIGN(initrd_end)))
2185 return 1;
2186#endif
2187
2188 return 0;
2189}
2190
2191static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
2192static int pavail_rescan_ents __initdata;
2193
2194/* Certain OBP calls, such as fetching "available" properties, can
2195 * claim physical memory. So, along with initializing the valid
2196 * address bitmap, what we do here is refetch the physical available
2197 * memory list again, and make sure it provides at least as much
2198 * memory as 'pavail' does.
2199 */
David S. Millerd8ed1d42009-08-25 16:47:46 -07002200static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202 int i;
2203
David S. Miller13edad72005-09-29 17:58:26 -07002204 read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205
David S. Miller13edad72005-09-29 17:58:26 -07002206 for (i = 0; i < pavail_ents; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207 unsigned long old_start, old_end;
2208
David S. Miller13edad72005-09-29 17:58:26 -07002209 old_start = pavail[i].phys_addr;
David S. Miller919ee672008-04-23 05:40:25 -07002210 old_end = old_start + pavail[i].reg_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211 while (old_start < old_end) {
2212 int n;
2213
David S. Millerc2a5a462006-06-22 00:01:56 -07002214 for (n = 0; n < pavail_rescan_ents; n++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215 unsigned long new_start, new_end;
2216
David S. Miller13edad72005-09-29 17:58:26 -07002217 new_start = pavail_rescan[n].phys_addr;
2218 new_end = new_start +
2219 pavail_rescan[n].reg_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220
2221 if (new_start <= old_start &&
2222 new_end >= (old_start + PAGE_SIZE)) {
David S. Miller0eef3312014-05-03 22:52:50 -07002223 set_bit(old_start >> ILOG2_4MB, bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 goto do_next_page;
2225 }
2226 }
David S. Miller919ee672008-04-23 05:40:25 -07002227
2228 prom_printf("mem_init: Lost memory in pavail\n");
2229 prom_printf("mem_init: OLD start[%lx] size[%lx]\n",
2230 pavail[i].phys_addr,
2231 pavail[i].reg_size);
2232 prom_printf("mem_init: NEW start[%lx] size[%lx]\n",
2233 pavail_rescan[i].phys_addr,
2234 pavail_rescan[i].reg_size);
2235 prom_printf("mem_init: Cannot continue, aborting.\n");
2236 prom_halt();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237
2238 do_next_page:
2239 old_start += PAGE_SIZE;
2240 }
2241 }
2242}
2243
David S. Millerd8ed1d42009-08-25 16:47:46 -07002244static void __init patch_tlb_miss_handler_bitmap(void)
2245{
2246 extern unsigned int valid_addr_bitmap_insn[];
2247 extern unsigned int valid_addr_bitmap_patch[];
2248
2249 valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1];
2250 mb();
2251 valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0];
2252 flushi(&valid_addr_bitmap_insn[0]);
2253}
2254
Yinghai Lu961f8fa2012-11-16 19:39:21 -08002255static void __init register_page_bootmem_info(void)
2256{
2257#ifdef CONFIG_NEED_MULTIPLE_NODES
2258 int i;
2259
2260 for_each_online_node(i)
2261 if (NODE_DATA(i)->node_spanned_pages)
2262 register_page_bootmem_info_node(NODE_DATA(i));
2263#endif
2264}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265void __init mem_init(void)
2266{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 unsigned long addr, last;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268
2269 addr = PAGE_OFFSET + kern_base;
2270 last = PAGE_ALIGN(kern_size) + addr;
2271 while (addr < last) {
David S. Miller0eef3312014-05-03 22:52:50 -07002272 set_bit(__pa(addr) >> ILOG2_4MB, sparc64_valid_addr_bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 addr += PAGE_SIZE;
2274 }
2275
David S. Millerd8ed1d42009-08-25 16:47:46 -07002276 setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap);
2277 patch_tlb_miss_handler_bitmap();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
2280
Yinghai Lu961f8fa2012-11-16 19:39:21 -08002281 register_page_bootmem_info();
Jiang Liu0c988532013-07-03 15:03:24 -07002282 free_all_bootmem();
David S. Miller919ee672008-04-23 05:40:25 -07002283
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284 /*
2285 * Set up the zero page, mark it reserved, so that page count
2286 * is not manipulated when freeing the page from user ptes.
2287 */
2288 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
2289 if (mem_map_zero == NULL) {
2290 prom_printf("paging_init: Cannot alloc zero page.\n");
2291 prom_halt();
2292 }
Jiang Liu70affe42013-05-07 16:18:08 -07002293 mark_page_reserved(mem_map_zero);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294
Jiang Liudceccbe2013-07-03 15:04:14 -07002295 mem_init_print_info(NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296
2297 if (tlb_type == cheetah || tlb_type == cheetah_plus)
2298 cheetah_ecache_flush_init();
2299}
2300
David S. Miller898cf0e2005-09-23 11:59:44 -07002301void free_initmem(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302{
2303 unsigned long addr, initend;
David S. Millerf2b60792008-08-14 01:45:41 -07002304 int do_free = 1;
2305
2306 /* If the physical memory maps were trimmed by kernel command
2307 * line options, don't even try freeing this initmem stuff up.
2308 * The kernel image could have been in the trimmed out region
2309 * and if so the freeing below will free invalid page structs.
2310 */
2311 if (cmdline_memory_size)
2312 do_free = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313
2314 /*
2315 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2316 */
2317 addr = PAGE_ALIGN((unsigned long)(__init_begin));
2318 initend = (unsigned long)(__init_end) & PAGE_MASK;
2319 for (; addr < initend; addr += PAGE_SIZE) {
2320 unsigned long page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321
2322 page = (addr +
2323 ((unsigned long) __va(kern_base)) -
2324 ((unsigned long) KERNBASE));
Randy Dunlapc9cf5522006-06-27 02:53:52 -07002325 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326
Jiang Liu70affe42013-05-07 16:18:08 -07002327 if (do_free)
2328 free_reserved_page(virt_to_page(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 }
2330}
2331
2332#ifdef CONFIG_BLK_DEV_INITRD
2333void free_initrd_mem(unsigned long start, unsigned long end)
2334{
Jiang Liudceccbe2013-07-03 15:04:14 -07002335 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
2336 "initrd");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337}
2338#endif
David S. Millerc4bce902006-02-11 21:57:54 -08002339
David S. Millerc4bce902006-02-11 21:57:54 -08002340#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2341#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2342#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2343#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2344#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2345#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2346
2347pgprot_t PAGE_KERNEL __read_mostly;
2348EXPORT_SYMBOL(PAGE_KERNEL);
2349
2350pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
2351pgprot_t PAGE_COPY __read_mostly;
David S. Miller0f159522006-02-18 12:43:16 -08002352
2353pgprot_t PAGE_SHARED __read_mostly;
2354EXPORT_SYMBOL(PAGE_SHARED);
2355
David S. Millerc4bce902006-02-11 21:57:54 -08002356unsigned long pg_iobits __read_mostly;
2357
2358unsigned long _PAGE_IE __read_mostly;
David S. Miller987c74f2006-06-25 01:34:43 -07002359EXPORT_SYMBOL(_PAGE_IE);
David S. Millerb2bef442006-02-23 01:55:55 -08002360
David S. Millerc4bce902006-02-11 21:57:54 -08002361unsigned long _PAGE_E __read_mostly;
David S. Millerb2bef442006-02-23 01:55:55 -08002362EXPORT_SYMBOL(_PAGE_E);
2363
David S. Millerc4bce902006-02-11 21:57:54 -08002364unsigned long _PAGE_CACHE __read_mostly;
David S. Millerb2bef442006-02-23 01:55:55 -08002365EXPORT_SYMBOL(_PAGE_CACHE);
David S. Millerc4bce902006-02-11 21:57:54 -08002366
David Miller46644c22007-10-16 01:24:16 -07002367#ifdef CONFIG_SPARSEMEM_VMEMMAP
David Miller46644c22007-10-16 01:24:16 -07002368unsigned long vmemmap_table[VMEMMAP_SIZE];
2369
David S. Miller2856cc22012-08-15 00:37:29 -07002370static long __meminitdata addr_start, addr_end;
2371static int __meminitdata node_start;
2372
Johannes Weiner0aad8182013-04-29 15:07:50 -07002373int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
2374 int node)
David Miller46644c22007-10-16 01:24:16 -07002375{
David Miller46644c22007-10-16 01:24:16 -07002376 unsigned long phys_start = (vstart - VMEMMAP_BASE);
2377 unsigned long phys_end = (vend - VMEMMAP_BASE);
2378 unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
2379 unsigned long end = VMEMMAP_ALIGN(phys_end);
2380 unsigned long pte_base;
2381
2382 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2383 _PAGE_CP_4U | _PAGE_CV_4U |
2384 _PAGE_P_4U | _PAGE_W_4U);
2385 if (tlb_type == hypervisor)
2386 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2387 _PAGE_CP_4V | _PAGE_CV_4V |
2388 _PAGE_P_4V | _PAGE_W_4V);
2389
2390 for (; addr < end; addr += VMEMMAP_CHUNK) {
2391 unsigned long *vmem_pp =
2392 vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT);
2393 void *block;
2394
2395 if (!(*vmem_pp & _PAGE_VALID)) {
David S. Miller0eef3312014-05-03 22:52:50 -07002396 block = vmemmap_alloc_block(1UL << ILOG2_4MB, node);
David Miller46644c22007-10-16 01:24:16 -07002397 if (!block)
2398 return -ENOMEM;
2399
2400 *vmem_pp = pte_base | __pa(block);
2401
David S. Miller2856cc22012-08-15 00:37:29 -07002402 /* check to see if we have contiguous blocks */
2403 if (addr_end != addr || node_start != node) {
2404 if (addr_start)
2405 printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
2406 addr_start, addr_end-1, node_start);
2407 addr_start = addr;
2408 node_start = node;
2409 }
2410 addr_end = addr + VMEMMAP_CHUNK;
David Miller46644c22007-10-16 01:24:16 -07002411 }
2412 }
2413 return 0;
2414}
David S. Miller2856cc22012-08-15 00:37:29 -07002415
2416void __meminit vmemmap_populate_print_last(void)
2417{
2418 if (addr_start) {
2419 printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
2420 addr_start, addr_end-1, node_start);
2421 addr_start = 0;
2422 addr_end = 0;
2423 node_start = 0;
2424 }
2425}
Yasuaki Ishimatsu46723bf2013-02-22 16:33:00 -08002426
Johannes Weiner0aad8182013-04-29 15:07:50 -07002427void vmemmap_free(unsigned long start, unsigned long end)
Tang Chen01975182013-02-22 16:33:08 -08002428{
2429}
2430
David Miller46644c22007-10-16 01:24:16 -07002431#endif /* CONFIG_SPARSEMEM_VMEMMAP */
2432
David S. Millerc4bce902006-02-11 21:57:54 -08002433static void prot_init_common(unsigned long page_none,
2434 unsigned long page_shared,
2435 unsigned long page_copy,
2436 unsigned long page_readonly,
2437 unsigned long page_exec_bit)
2438{
2439 PAGE_COPY = __pgprot(page_copy);
David S. Miller0f159522006-02-18 12:43:16 -08002440 PAGE_SHARED = __pgprot(page_shared);
David S. Millerc4bce902006-02-11 21:57:54 -08002441
2442 protection_map[0x0] = __pgprot(page_none);
2443 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
2444 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
2445 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
2446 protection_map[0x4] = __pgprot(page_readonly);
2447 protection_map[0x5] = __pgprot(page_readonly);
2448 protection_map[0x6] = __pgprot(page_copy);
2449 protection_map[0x7] = __pgprot(page_copy);
2450 protection_map[0x8] = __pgprot(page_none);
2451 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
2452 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
2453 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
2454 protection_map[0xc] = __pgprot(page_readonly);
2455 protection_map[0xd] = __pgprot(page_readonly);
2456 protection_map[0xe] = __pgprot(page_shared);
2457 protection_map[0xf] = __pgprot(page_shared);
2458}
2459
2460static void __init sun4u_pgprot_init(void)
2461{
2462 unsigned long page_none, page_shared, page_copy, page_readonly;
2463 unsigned long page_exec_bit;
David S. Miller4f93d212012-09-06 18:13:58 -07002464 int i;
David S. Millerc4bce902006-02-11 21:57:54 -08002465
2466 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2467 _PAGE_CACHE_4U | _PAGE_P_4U |
2468 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2469 _PAGE_EXEC_4U);
2470 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2471 _PAGE_CACHE_4U | _PAGE_P_4U |
2472 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2473 _PAGE_EXEC_4U | _PAGE_L_4U);
David S. Millerc4bce902006-02-11 21:57:54 -08002474
2475 _PAGE_IE = _PAGE_IE_4U;
2476 _PAGE_E = _PAGE_E_4U;
2477 _PAGE_CACHE = _PAGE_CACHE_4U;
2478
2479 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
2480 __ACCESS_BITS_4U | _PAGE_E_4U);
2481
David S. Millerd1acb422007-03-16 17:20:28 -07002482#ifdef CONFIG_DEBUG_PAGEALLOC
David S. Miller922631b2013-09-18 12:00:00 -07002483 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
David S. Millerd1acb422007-03-16 17:20:28 -07002484#else
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002485 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
David S. Miller922631b2013-09-18 12:00:00 -07002486 PAGE_OFFSET;
David S. Millerd1acb422007-03-16 17:20:28 -07002487#endif
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002488 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2489 _PAGE_P_4U | _PAGE_W_4U);
2490
David S. Miller4f93d212012-09-06 18:13:58 -07002491 for (i = 1; i < 4; i++)
2492 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
David S. Millerc4bce902006-02-11 21:57:54 -08002493
David S. Millerc4bce902006-02-11 21:57:54 -08002494 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
2495 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
2496 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
2497
2498
2499 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
2500 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2501 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
2502 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2503 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2504 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2505 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2506
2507 page_exec_bit = _PAGE_EXEC_4U;
2508
2509 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2510 page_exec_bit);
2511}
2512
2513static void __init sun4v_pgprot_init(void)
2514{
2515 unsigned long page_none, page_shared, page_copy, page_readonly;
2516 unsigned long page_exec_bit;
David S. Miller4f93d212012-09-06 18:13:58 -07002517 int i;
David S. Millerc4bce902006-02-11 21:57:54 -08002518
2519 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
2520 _PAGE_CACHE_4V | _PAGE_P_4V |
2521 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
2522 _PAGE_EXEC_4V);
2523 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
David S. Millerc4bce902006-02-11 21:57:54 -08002524
2525 _PAGE_IE = _PAGE_IE_4V;
2526 _PAGE_E = _PAGE_E_4V;
2527 _PAGE_CACHE = _PAGE_CACHE_4V;
2528
David S. Millerd1acb422007-03-16 17:20:28 -07002529#ifdef CONFIG_DEBUG_PAGEALLOC
David S. Miller922631b2013-09-18 12:00:00 -07002530 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
David S. Millerd1acb422007-03-16 17:20:28 -07002531#else
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002532 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
David S. Miller922631b2013-09-18 12:00:00 -07002533 PAGE_OFFSET;
David S. Millerd1acb422007-03-16 17:20:28 -07002534#endif
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002535 kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
2536 _PAGE_P_4V | _PAGE_W_4V);
2537
David S. Millerc69ad0a2012-09-06 20:35:36 -07002538 for (i = 1; i < 4; i++)
2539 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
David S. Miller4f93d212012-09-06 18:13:58 -07002540
David S. Millerc4bce902006-02-11 21:57:54 -08002541 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
2542 __ACCESS_BITS_4V | _PAGE_E_4V);
2543
David S. Millerc4bce902006-02-11 21:57:54 -08002544 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
2545 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
2546 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2547 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2548
2549 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
2550 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2551 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
2552 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2553 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2554 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2555 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2556
2557 page_exec_bit = _PAGE_EXEC_4V;
2558
2559 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2560 page_exec_bit);
2561}
2562
2563unsigned long pte_sz_bits(unsigned long sz)
2564{
2565 if (tlb_type == hypervisor) {
2566 switch (sz) {
2567 case 8 * 1024:
2568 default:
2569 return _PAGE_SZ8K_4V;
2570 case 64 * 1024:
2571 return _PAGE_SZ64K_4V;
2572 case 512 * 1024:
2573 return _PAGE_SZ512K_4V;
2574 case 4 * 1024 * 1024:
2575 return _PAGE_SZ4MB_4V;
Joe Perches6cb79b32011-06-03 14:45:23 +00002576 }
David S. Millerc4bce902006-02-11 21:57:54 -08002577 } else {
2578 switch (sz) {
2579 case 8 * 1024:
2580 default:
2581 return _PAGE_SZ8K_4U;
2582 case 64 * 1024:
2583 return _PAGE_SZ64K_4U;
2584 case 512 * 1024:
2585 return _PAGE_SZ512K_4U;
2586 case 4 * 1024 * 1024:
2587 return _PAGE_SZ4MB_4U;
Joe Perches6cb79b32011-06-03 14:45:23 +00002588 }
David S. Millerc4bce902006-02-11 21:57:54 -08002589 }
2590}
2591
2592pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2593{
2594 pte_t pte;
David S. Millercf627152006-02-12 21:10:07 -08002595
2596 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
David S. Millerc4bce902006-02-11 21:57:54 -08002597 pte_val(pte) |= (((unsigned long)space) << 32);
2598 pte_val(pte) |= pte_sz_bits(page_size);
David S. Millercf627152006-02-12 21:10:07 -08002599
David S. Millerc4bce902006-02-11 21:57:54 -08002600 return pte;
2601}
2602
David S. Millerc4bce902006-02-11 21:57:54 -08002603static unsigned long kern_large_tte(unsigned long paddr)
2604{
2605 unsigned long val;
2606
2607 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2608 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
2609 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2610 if (tlb_type == hypervisor)
2611 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2612 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
2613 _PAGE_EXEC_4V | _PAGE_W_4V);
2614
2615 return val | paddr;
2616}
2617
David S. Millerc4bce902006-02-11 21:57:54 -08002618/* If not locked, zap it. */
2619void __flush_tlb_all(void)
2620{
2621 unsigned long pstate;
2622 int i;
2623
2624 __asm__ __volatile__("flushw\n\t"
2625 "rdpr %%pstate, %0\n\t"
2626 "wrpr %0, %1, %%pstate"
2627 : "=r" (pstate)
2628 : "i" (PSTATE_IE));
David S. Miller8f3614532007-12-13 06:13:38 -08002629 if (tlb_type == hypervisor) {
2630 sun4v_mmu_demap_all();
2631 } else if (tlb_type == spitfire) {
David S. Millerc4bce902006-02-11 21:57:54 -08002632 for (i = 0; i < 64; i++) {
2633 /* Spitfire Errata #32 workaround */
2634 /* NOTE: Always runs on spitfire, so no
2635 * cheetah+ page size encodings.
2636 */
2637 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2638 "flush %%g6"
2639 : /* No outputs */
2640 : "r" (0),
2641 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2642
2643 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
2644 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2645 "membar #Sync"
2646 : /* no outputs */
2647 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
2648 spitfire_put_dtlb_data(i, 0x0UL);
2649 }
2650
2651 /* Spitfire Errata #32 workaround */
2652 /* NOTE: Always runs on spitfire, so no
2653 * cheetah+ page size encodings.
2654 */
2655 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2656 "flush %%g6"
2657 : /* No outputs */
2658 : "r" (0),
2659 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2660
2661 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2662 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2663 "membar #Sync"
2664 : /* no outputs */
2665 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
2666 spitfire_put_itlb_data(i, 0x0UL);
2667 }
2668 }
2669 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
2670 cheetah_flush_dtlb_all();
2671 cheetah_flush_itlb_all();
2672 }
2673 __asm__ __volatile__("wrpr %0, 0, %%pstate"
2674 : : "r" (pstate));
2675}
David Millerc460bec2012-10-08 16:34:22 -07002676
David Millerc460bec2012-10-08 16:34:22 -07002677pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
2678 unsigned long address)
2679{
David S. Miller37b3a8f2013-09-25 13:48:49 -07002680 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
2681 __GFP_REPEAT | __GFP_ZERO);
2682 pte_t *pte = NULL;
David Millerc460bec2012-10-08 16:34:22 -07002683
David Millerc460bec2012-10-08 16:34:22 -07002684 if (page)
2685 pte = (pte_t *) page_address(page);
2686
2687 return pte;
2688}
2689
2690pgtable_t pte_alloc_one(struct mm_struct *mm,
2691 unsigned long address)
2692{
David S. Miller37b3a8f2013-09-25 13:48:49 -07002693 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
2694 __GFP_REPEAT | __GFP_ZERO);
Kirill A. Shutemov1ae9ae52013-11-14 14:31:42 -08002695 if (!page)
2696 return NULL;
2697 if (!pgtable_page_ctor(page)) {
2698 free_hot_cold_page(page, 0);
2699 return NULL;
David Millerc460bec2012-10-08 16:34:22 -07002700 }
Kirill A. Shutemov1ae9ae52013-11-14 14:31:42 -08002701 return (pte_t *) page_address(page);
David Millerc460bec2012-10-08 16:34:22 -07002702}
2703
2704void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
2705{
David S. Miller37b3a8f2013-09-25 13:48:49 -07002706 free_page((unsigned long)pte);
David Millerc460bec2012-10-08 16:34:22 -07002707}
2708
2709static void __pte_free(pgtable_t pte)
2710{
2711 struct page *page = virt_to_page(pte);
David S. Miller37b3a8f2013-09-25 13:48:49 -07002712
2713 pgtable_page_dtor(page);
2714 __free_page(page);
David Millerc460bec2012-10-08 16:34:22 -07002715}
2716
2717void pte_free(struct mm_struct *mm, pgtable_t pte)
2718{
2719 __pte_free(pte);
2720}
2721
2722void pgtable_free(void *table, bool is_page)
2723{
2724 if (is_page)
2725 __pte_free(table);
2726 else
2727 kmem_cache_free(pgtable_cache, table);
2728}
David Miller9e695d22012-10-08 16:34:29 -07002729
2730#ifdef CONFIG_TRANSPARENT_HUGEPAGE
David Miller9e695d22012-10-08 16:34:29 -07002731void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
2732 pmd_t *pmd)
2733{
2734 unsigned long pte, flags;
2735 struct mm_struct *mm;
2736 pmd_t entry = *pmd;
David Miller9e695d22012-10-08 16:34:29 -07002737
2738 if (!pmd_large(entry) || !pmd_young(entry))
2739 return;
2740
David S. Millera7b94032013-09-26 13:45:15 -07002741 pte = pmd_val(entry);
David Miller9e695d22012-10-08 16:34:29 -07002742
David S. Miller18f38132014-08-04 16:34:01 -07002743 /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */
2744 if (!(pte & _PAGE_VALID))
2745 return;
2746
David S. Miller37b3a8f2013-09-25 13:48:49 -07002747 /* We are fabricating 8MB pages using 4MB real hw pages. */
2748 pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
David Miller9e695d22012-10-08 16:34:29 -07002749
2750 mm = vma->vm_mm;
2751
2752 spin_lock_irqsave(&mm->context.lock, flags);
2753
2754 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
David S. Miller37b3a8f2013-09-25 13:48:49 -07002755 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
David Miller9e695d22012-10-08 16:34:29 -07002756 addr, pte);
2757
2758 spin_unlock_irqrestore(&mm->context.lock, flags);
2759}
2760#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2761
2762#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
2763static void context_reload(void *__data)
2764{
2765 struct mm_struct *mm = __data;
2766
2767 if (mm == current->mm)
2768 load_secondary_context(mm);
2769}
2770
David S. Miller0fbebed2013-02-19 22:34:10 -08002771void hugetlb_setup(struct pt_regs *regs)
David Miller9e695d22012-10-08 16:34:29 -07002772{
David S. Miller0fbebed2013-02-19 22:34:10 -08002773 struct mm_struct *mm = current->mm;
2774 struct tsb_config *tp;
David Miller9e695d22012-10-08 16:34:29 -07002775
David S. Miller0fbebed2013-02-19 22:34:10 -08002776 if (in_atomic() || !mm) {
2777 const struct exception_table_entry *entry;
David Miller9e695d22012-10-08 16:34:29 -07002778
David S. Miller0fbebed2013-02-19 22:34:10 -08002779 entry = search_exception_tables(regs->tpc);
2780 if (entry) {
2781 regs->tpc = entry->fixup;
2782 regs->tnpc = regs->tpc + 4;
2783 return;
2784 }
2785 pr_alert("Unexpected HugeTLB setup in atomic context.\n");
2786 die_if_kernel("HugeTSB in atomic", regs);
2787 }
2788
2789 tp = &mm->context.tsb_block[MM_TSB_HUGE];
2790 if (likely(tp->tsb == NULL))
2791 tsb_grow(mm, MM_TSB_HUGE, 0);
2792
David Miller9e695d22012-10-08 16:34:29 -07002793 tsb_context_switch(mm);
2794 smp_tsb_sync(mm);
2795
2796 /* On UltraSPARC-III+ and later, configure the second half of
2797 * the Data-TLB for huge pages.
2798 */
2799 if (tlb_type == cheetah_plus) {
2800 unsigned long ctx;
2801
2802 spin_lock(&ctx_alloc_lock);
2803 ctx = mm->context.sparc64_ctx_val;
2804 ctx &= ~CTX_PGSZ_MASK;
2805 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
2806 ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
2807
2808 if (ctx != mm->context.sparc64_ctx_val) {
2809 /* When changing the page size fields, we
2810 * must perform a context flush so that no
2811 * stale entries match. This flush must
2812 * occur with the original context register
2813 * settings.
2814 */
2815 do_flush_tlb_mm(mm);
2816
2817 /* Reload the context register of all processors
2818 * also executing in this address space.
2819 */
2820 mm->context.sparc64_ctx_val = ctx;
2821 on_each_cpu(context_reload, mm, 0);
2822 }
2823 spin_unlock(&ctx_alloc_lock);
2824 }
2825}
2826#endif
bob piccof6d4fb52014-03-03 11:54:42 -05002827
2828static struct resource code_resource = {
2829 .name = "Kernel code",
2830 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
2831};
2832
2833static struct resource data_resource = {
2834 .name = "Kernel data",
2835 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
2836};
2837
2838static struct resource bss_resource = {
2839 .name = "Kernel bss",
2840 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
2841};
2842
2843static inline resource_size_t compute_kern_paddr(void *addr)
2844{
2845 return (resource_size_t) (addr - KERNBASE + kern_base);
2846}
2847
2848static void __init kernel_lds_init(void)
2849{
2850 code_resource.start = compute_kern_paddr(_text);
2851 code_resource.end = compute_kern_paddr(_etext - 1);
2852 data_resource.start = compute_kern_paddr(_etext);
2853 data_resource.end = compute_kern_paddr(_edata - 1);
2854 bss_resource.start = compute_kern_paddr(__bss_start);
2855 bss_resource.end = compute_kern_paddr(_end - 1);
2856}
2857
2858static int __init report_memory(void)
2859{
2860 int i;
2861 struct resource *res;
2862
2863 kernel_lds_init();
2864
2865 for (i = 0; i < pavail_ents; i++) {
2866 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
2867
2868 if (!res) {
2869 pr_warn("Failed to allocate source.\n");
2870 break;
2871 }
2872
2873 res->name = "System RAM";
2874 res->start = pavail[i].phys_addr;
2875 res->end = pavail[i].phys_addr + pavail[i].reg_size - 1;
2876 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
2877
2878 if (insert_resource(&iomem_resource, res) < 0) {
2879 pr_warn("Resource insertion failed.\n");
2880 break;
2881 }
2882
2883 insert_resource(res, &code_resource);
2884 insert_resource(res, &data_resource);
2885 insert_resource(res, &bss_resource);
2886 }
2887
2888 return 0;
2889}
2890device_initcall(report_memory);
David S. Millere9011d02014-08-05 18:57:18 -07002891
David S. Miller4ca9a232014-08-04 20:07:37 -07002892#ifdef CONFIG_SMP
2893#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
2894#else
2895#define do_flush_tlb_kernel_range __flush_tlb_kernel_range
2896#endif
2897
2898void flush_tlb_kernel_range(unsigned long start, unsigned long end)
2899{
2900 if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
2901 if (start < LOW_OBP_ADDRESS) {
2902 flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
2903 do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
2904 }
2905 if (end > HI_OBP_ADDRESS) {
David S. Miller473ad7f2014-10-04 21:05:14 -07002906 flush_tsb_kernel_range(HI_OBP_ADDRESS, end);
2907 do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end);
David S. Miller4ca9a232014-08-04 20:07:37 -07002908 }
2909 } else {
2910 flush_tsb_kernel_range(start, end);
2911 do_flush_tlb_kernel_range(start, end);
2912 }
2913}