blob: ace32d7d3864c480cb0e51b1694f01e1559d947f [file] [log] [blame]
Chris Metcalf867e3592010-05-28 23:09:12 -04001/*
2 * Copyright (C) 1995 Linus Torvalds
3 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation, version 2.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for
13 * more details.
14 */
15
16#include <linux/module.h>
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/string.h>
22#include <linux/types.h>
23#include <linux/ptrace.h>
24#include <linux/mman.h>
25#include <linux/mm.h>
26#include <linux/hugetlb.h>
27#include <linux/swap.h>
28#include <linux/smp.h>
29#include <linux/init.h>
30#include <linux/highmem.h>
31#include <linux/pagemap.h>
32#include <linux/poison.h>
33#include <linux/bootmem.h>
34#include <linux/slab.h>
35#include <linux/proc_fs.h>
36#include <linux/efi.h>
37#include <linux/memory_hotplug.h>
38#include <linux/uaccess.h>
39#include <asm/mmu_context.h>
40#include <asm/processor.h>
Chris Metcalf867e3592010-05-28 23:09:12 -040041#include <asm/pgtable.h>
42#include <asm/pgalloc.h>
43#include <asm/dma.h>
44#include <asm/fixmap.h>
45#include <asm/tlb.h>
46#include <asm/tlbflush.h>
47#include <asm/sections.h>
48#include <asm/setup.h>
49#include <asm/homecache.h>
50#include <hv/hypervisor.h>
51#include <arch/chip.h>
52
53#include "migrate.h"
54
Chris Metcalf867e3592010-05-28 23:09:12 -040055#define clear_pgd(pmdptr) (*(pmdptr) = hv_pte(0))
56
Chris Metcalf0707ad32010-06-25 17:04:17 -040057#ifndef __tilegx__
Chris Metcalf867e3592010-05-28 23:09:12 -040058unsigned long VMALLOC_RESERVE = CONFIG_VMALLOC_RESERVE;
Chris Metcalf00dce03132011-02-28 15:51:25 -050059EXPORT_SYMBOL(VMALLOC_RESERVE);
Chris Metcalf0707ad32010-06-25 17:04:17 -040060#endif
Chris Metcalf867e3592010-05-28 23:09:12 -040061
Chris Metcalf867e3592010-05-28 23:09:12 -040062/* Create an L2 page table */
63static pte_t * __init alloc_pte(void)
64{
65 return __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0);
66}
67
68/*
69 * L2 page tables per controller. We allocate these all at once from
70 * the bootmem allocator and store them here. This saves on kernel L2
71 * page table memory, compared to allocating a full 64K page per L2
72 * page table, and also means that in cases where we use huge pages,
73 * we are guaranteed to later be able to shatter those huge pages and
74 * switch to using these page tables instead, without requiring
75 * further allocation. Each l2_ptes[] entry points to the first page
76 * table for the first hugepage-size piece of memory on the
77 * controller; other page tables are just indexed directly, i.e. the
78 * L2 page tables are contiguous in memory for each controller.
79 */
80static pte_t *l2_ptes[MAX_NUMNODES];
81static int num_l2_ptes[MAX_NUMNODES];
82
83static void init_prealloc_ptes(int node, int pages)
84{
Chris Metcalfd5d14ed2012-03-29 13:58:43 -040085 BUG_ON(pages & (PTRS_PER_PTE - 1));
Chris Metcalf867e3592010-05-28 23:09:12 -040086 if (pages) {
87 num_l2_ptes[node] = pages;
88 l2_ptes[node] = __alloc_bootmem(pages * sizeof(pte_t),
89 HV_PAGE_TABLE_ALIGN, 0);
90 }
91}
92
93pte_t *get_prealloc_pte(unsigned long pfn)
94{
95 int node = pfn_to_nid(pfn);
96 pfn &= ~(-1UL << (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT));
97 BUG_ON(node >= MAX_NUMNODES);
98 BUG_ON(pfn >= num_l2_ptes[node]);
99 return &l2_ptes[node][pfn];
100}
101
102/*
103 * What caching do we expect pages from the heap to have when
104 * they are allocated during bootup? (Once we've installed the
105 * "real" swapper_pg_dir.)
106 */
107static int initial_heap_home(void)
108{
Chris Metcalf867e3592010-05-28 23:09:12 -0400109 if (hash_default)
110 return PAGE_HOME_HASH;
Chris Metcalf867e3592010-05-28 23:09:12 -0400111 return smp_processor_id();
112}
113
114/*
115 * Place a pointer to an L2 page table in a middle page
116 * directory entry.
117 */
118static void __init assign_pte(pmd_t *pmd, pte_t *page_table)
119{
120 phys_addr_t pa = __pa(page_table);
121 unsigned long l2_ptfn = pa >> HV_LOG2_PAGE_TABLE_ALIGN;
122 pte_t pteval = hv_pte_set_ptfn(__pgprot(_PAGE_TABLE), l2_ptfn);
123 BUG_ON((pa & (HV_PAGE_TABLE_ALIGN-1)) != 0);
124 pteval = pte_set_home(pteval, initial_heap_home());
125 *(pte_t *)pmd = pteval;
126 if (page_table != (pte_t *)pmd_page_vaddr(*pmd))
127 BUG();
128}
129
130#ifdef __tilegx__
131
Chris Metcalf867e3592010-05-28 23:09:12 -0400132static inline pmd_t *alloc_pmd(void)
133{
Chris Metcalfd5d14ed2012-03-29 13:58:43 -0400134 return __alloc_bootmem(L1_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0);
Chris Metcalf867e3592010-05-28 23:09:12 -0400135}
136
137static inline void assign_pmd(pud_t *pud, pmd_t *pmd)
138{
139 assign_pte((pmd_t *)pud, (pte_t *)pmd);
140}
141
142#endif /* __tilegx__ */
143
144/* Replace the given pmd with a full PTE table. */
145void __init shatter_pmd(pmd_t *pmd)
146{
147 pte_t *pte = get_prealloc_pte(pte_pfn(*(pte_t *)pmd));
148 assign_pte(pmd, pte);
149}
150
Chris Metcalfbbaa22c2012-06-13 14:46:40 -0400151#ifdef __tilegx__
152static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va)
153{
154 pud_t *pud = pud_offset(&pgtables[pgd_index(va)], va);
155 if (pud_none(*pud))
156 assign_pmd(pud, alloc_pmd());
157 return pmd_offset(pud, va);
158}
159#else
160static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va)
161{
162 return pmd_offset(pud_offset(&pgtables[pgd_index(va)], va), va);
163}
164#endif
165
Chris Metcalf867e3592010-05-28 23:09:12 -0400166/*
167 * This function initializes a certain range of kernel virtual memory
168 * with new bootmem page tables, everywhere page tables are missing in
169 * the given range.
170 */
171
172/*
173 * NOTE: The pagetables are allocated contiguous on the physical space
174 * so we can cache the place of the first one and move around without
175 * checking the pgd every time.
176 */
177static void __init page_table_range_init(unsigned long start,
Chris Metcalfbbaa22c2012-06-13 14:46:40 -0400178 unsigned long end, pgd_t *pgd)
Chris Metcalf867e3592010-05-28 23:09:12 -0400179{
Chris Metcalf867e3592010-05-28 23:09:12 -0400180 unsigned long vaddr;
Chris Metcalfbbaa22c2012-06-13 14:46:40 -0400181 start = round_down(start, PMD_SIZE);
182 end = round_up(end, PMD_SIZE);
183 for (vaddr = start; vaddr < end; vaddr += PMD_SIZE) {
184 pmd_t *pmd = get_pmd(pgd, vaddr);
Chris Metcalf867e3592010-05-28 23:09:12 -0400185 if (pmd_none(*pmd))
186 assign_pte(pmd, alloc_pte());
Chris Metcalf867e3592010-05-28 23:09:12 -0400187 }
188}
Chris Metcalf867e3592010-05-28 23:09:12 -0400189
190
Chris Metcalf867e3592010-05-28 23:09:12 -0400191static int __initdata ktext_hash = 1; /* .text pages */
192static int __initdata kdata_hash = 1; /* .data and .bss pages */
193int __write_once hash_default = 1; /* kernel allocator pages */
194EXPORT_SYMBOL(hash_default);
195int __write_once kstack_hash = 1; /* if no homecaching, use h4h */
Chris Metcalf867e3592010-05-28 23:09:12 -0400196
197/*
198 * CPUs to use to for striping the pages of kernel data. If hash-for-home
199 * is available, this is only relevant if kcache_hash sets up the
200 * .data and .bss to be page-homed, and we don't want the default mode
201 * of using the full set of kernel cpus for the striping.
202 */
203static __initdata struct cpumask kdata_mask;
204static __initdata int kdata_arg_seen;
205
206int __write_once kdata_huge; /* if no homecaching, small pages */
207
208
209/* Combine a generic pgprot_t with cache home to get a cache-aware pgprot. */
210static pgprot_t __init construct_pgprot(pgprot_t prot, int home)
211{
212 prot = pte_set_home(prot, home);
Chris Metcalf867e3592010-05-28 23:09:12 -0400213 if (home == PAGE_HOME_IMMUTABLE) {
214 if (ktext_hash)
215 prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_HASH_L3);
216 else
217 prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_NO_L3);
218 }
Chris Metcalf867e3592010-05-28 23:09:12 -0400219 return prot;
220}
221
222/*
223 * For a given kernel data VA, how should it be cached?
224 * We return the complete pgprot_t with caching bits set.
225 */
226static pgprot_t __init init_pgprot(ulong address)
227{
228 int cpu;
229 unsigned long page;
Chris Metcalfacbde1d2013-09-03 14:41:36 -0400230 enum { CODE_DELTA = MEM_SV_START - PAGE_OFFSET };
Chris Metcalf867e3592010-05-28 23:09:12 -0400231
Chris Metcalf867e3592010-05-28 23:09:12 -0400232 /* For kdata=huge, everything is just hash-for-home. */
233 if (kdata_huge)
234 return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
Chris Metcalf867e3592010-05-28 23:09:12 -0400235
236 /* We map the aliased pages of permanent text inaccessible. */
237 if (address < (ulong) _sinittext - CODE_DELTA)
238 return PAGE_NONE;
239
Chris Metcalfd7c96612013-08-15 16:23:24 -0400240 /* We map read-only data non-coherent for performance. */
Chris Metcalf867e3592010-05-28 23:09:12 -0400241 if ((address >= (ulong) __start_rodata &&
242 address < (ulong) __end_rodata) ||
243 address == (ulong) empty_zero_page) {
244 return construct_pgprot(PAGE_KERNEL_RO, PAGE_HOME_IMMUTABLE);
245 }
246
Chris Metcalf867e3592010-05-28 23:09:12 -0400247#ifndef __tilegx__
Chris Metcalf867e3592010-05-28 23:09:12 -0400248 /* Force the atomic_locks[] array page to be hash-for-home. */
249 if (address == (ulong) atomic_locks)
250 return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
251#endif
Chris Metcalf867e3592010-05-28 23:09:12 -0400252
253 /*
254 * Everything else that isn't data or bss is heap, so mark it
255 * with the initial heap home (hash-for-home, or this cpu). This
Chris Metcalf0707ad32010-06-25 17:04:17 -0400256 * includes any addresses after the loaded image and any address before
Geert Uytterhoeven454ac3e2013-11-12 20:42:20 +0100257 * __init_end, since we already captured the case of text before
258 * _sinittext, and __pa(einittext) is approximately __pa(__init_begin).
Chris Metcalf867e3592010-05-28 23:09:12 -0400259 *
260 * All the LOWMEM pages that we mark this way will get their
261 * struct page homecache properly marked later, in set_page_homes().
262 * The HIGHMEM pages we leave with a default zero for their
263 * homes, but with a zero free_time we don't have to actually
264 * do a flush action the first time we use them, either.
265 */
Geert Uytterhoeven454ac3e2013-11-12 20:42:20 +0100266 if (address >= (ulong) _end || address < (ulong) __init_end)
Chris Metcalf867e3592010-05-28 23:09:12 -0400267 return construct_pgprot(PAGE_KERNEL, initial_heap_home());
268
Chris Metcalf867e3592010-05-28 23:09:12 -0400269 /* Use hash-for-home if requested for data/bss. */
270 if (kdata_hash)
271 return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
Chris Metcalf867e3592010-05-28 23:09:12 -0400272
273 /*
274 * Otherwise we just hand out consecutive cpus. To avoid
275 * requiring this function to hold state, we just walk forward from
Wang Sheng-Huie540e832014-05-07 15:28:12 +0800276 * __end_rodata by PAGE_SIZE, skipping the readonly and init data, to
277 * reach the requested address, while walking cpu home around
278 * kdata_mask. This is typically no more than a dozen or so iterations.
Chris Metcalf867e3592010-05-28 23:09:12 -0400279 */
Chris Metcalfce61cdc2013-08-15 16:29:02 -0400280 page = (((ulong)__end_rodata) + PAGE_SIZE - 1) & PAGE_MASK;
Chris Metcalf0707ad32010-06-25 17:04:17 -0400281 BUG_ON(address < page || address >= (ulong)_end);
282 cpu = cpumask_first(&kdata_mask);
283 for (; page < address; page += PAGE_SIZE) {
284 if (page >= (ulong)&init_thread_union &&
285 page < (ulong)&init_thread_union + THREAD_SIZE)
286 continue;
Chris Metcalf867e3592010-05-28 23:09:12 -0400287 if (page == (ulong)empty_zero_page)
Chris Metcalf0707ad32010-06-25 17:04:17 -0400288 continue;
Chris Metcalf867e3592010-05-28 23:09:12 -0400289#ifndef __tilegx__
Chris Metcalf867e3592010-05-28 23:09:12 -0400290 if (page == (ulong)atomic_locks)
Chris Metcalf0707ad32010-06-25 17:04:17 -0400291 continue;
Chris Metcalf867e3592010-05-28 23:09:12 -0400292#endif
Chris Metcalf0707ad32010-06-25 17:04:17 -0400293 cpu = cpumask_next(cpu, &kdata_mask);
294 if (cpu == NR_CPUS)
295 cpu = cpumask_first(&kdata_mask);
Chris Metcalf867e3592010-05-28 23:09:12 -0400296 }
297 return construct_pgprot(PAGE_KERNEL, cpu);
298}
299
300/*
301 * This function sets up how we cache the kernel text. If we have
302 * hash-for-home support, normally that is used instead (see the
303 * kcache_hash boot flag for more information). But if we end up
304 * using a page-based caching technique, this option sets up the
305 * details of that. In addition, the "ktext=nocache" option may
306 * always be used to disable local caching of text pages, if desired.
307 */
308
309static int __initdata ktext_arg_seen;
310static int __initdata ktext_small;
311static int __initdata ktext_local;
312static int __initdata ktext_all;
313static int __initdata ktext_nondataplane;
314static int __initdata ktext_nocache;
315static struct cpumask __initdata ktext_mask;
316
317static int __init setup_ktext(char *str)
318{
319 if (str == NULL)
320 return -EINVAL;
321
322 /* If you have a leading "nocache", turn off ktext caching */
323 if (strncmp(str, "nocache", 7) == 0) {
324 ktext_nocache = 1;
Chris Metcalf0707ad32010-06-25 17:04:17 -0400325 pr_info("ktext: disabling local caching of kernel text\n");
Chris Metcalf867e3592010-05-28 23:09:12 -0400326 str += 7;
327 if (*str == ',')
328 ++str;
329 if (*str == '\0')
330 return 0;
331 }
332
333 ktext_arg_seen = 1;
334
Chris Metcalfd7c96612013-08-15 16:23:24 -0400335 /* Default setting: use a huge page */
Chris Metcalf867e3592010-05-28 23:09:12 -0400336 if (strcmp(str, "huge") == 0)
Chris Metcalf0707ad32010-06-25 17:04:17 -0400337 pr_info("ktext: using one huge locally cached page\n");
Chris Metcalf867e3592010-05-28 23:09:12 -0400338
339 /* Pay TLB cost but get no cache benefit: cache small pages locally */
340 else if (strcmp(str, "local") == 0) {
341 ktext_small = 1;
342 ktext_local = 1;
Chris Metcalf0707ad32010-06-25 17:04:17 -0400343 pr_info("ktext: using small pages with local caching\n");
Chris Metcalf867e3592010-05-28 23:09:12 -0400344 }
345
346 /* Neighborhood cache ktext pages on all cpus. */
347 else if (strcmp(str, "all") == 0) {
348 ktext_small = 1;
349 ktext_all = 1;
Chris Metcalf0707ad32010-06-25 17:04:17 -0400350 pr_info("ktext: using maximal caching neighborhood\n");
Chris Metcalf867e3592010-05-28 23:09:12 -0400351 }
352
353
354 /* Neighborhood ktext pages on specified mask */
355 else if (cpulist_parse(str, &ktext_mask) == 0) {
Chris Metcalf867e3592010-05-28 23:09:12 -0400356 if (cpumask_weight(&ktext_mask) > 1) {
357 ktext_small = 1;
Tejun Heo839b2682015-02-13 14:37:09 -0800358 pr_info("ktext: using caching neighborhood %*pbl with small pages\n",
359 cpumask_pr_args(&ktext_mask));
Chris Metcalf867e3592010-05-28 23:09:12 -0400360 } else {
Tejun Heo839b2682015-02-13 14:37:09 -0800361 pr_info("ktext: caching on cpu %*pbl with one huge page\n",
362 cpumask_pr_args(&ktext_mask));
Chris Metcalf867e3592010-05-28 23:09:12 -0400363 }
364 }
365
366 else if (*str)
367 return -EINVAL;
368
369 return 0;
370}
371
372early_param("ktext", setup_ktext);
373
374
375static inline pgprot_t ktext_set_nocache(pgprot_t prot)
376{
377 if (!ktext_nocache)
378 prot = hv_pte_set_nc(prot);
Chris Metcalf867e3592010-05-28 23:09:12 -0400379 else
380 prot = hv_pte_set_no_alloc_l2(prot);
Chris Metcalf867e3592010-05-28 23:09:12 -0400381 return prot;
382}
383
Chris Metcalf867e3592010-05-28 23:09:12 -0400384/* Temporary page table we use for staging. */
385static pgd_t pgtables[PTRS_PER_PGD]
Chris Metcalf2cb82402011-02-27 18:52:24 -0500386 __attribute__((aligned(HV_PAGE_TABLE_ALIGN)));
Chris Metcalf867e3592010-05-28 23:09:12 -0400387
388/*
389 * This maps the physical memory to kernel virtual address space, a total
390 * of max_low_pfn pages, by creating page tables starting from address
391 * PAGE_OFFSET.
392 *
393 * This routine transitions us from using a set of compiled-in large
394 * pages to using some more precise caching, including removing access
395 * to code pages mapped at PAGE_OFFSET (executed only at MEM_SV_START)
396 * marking read-only data as locally cacheable, striping the remaining
397 * .data and .bss across all the available tiles, and removing access
398 * to pages above the top of RAM (thus ensuring a page fault from a bad
399 * virtual address rather than a hypervisor shoot down for accessing
400 * memory outside the assigned limits).
401 */
402static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
403{
Chris Metcalf51007002012-03-27 15:40:20 -0400404 unsigned long long irqmask;
Chris Metcalf867e3592010-05-28 23:09:12 -0400405 unsigned long address, pfn;
406 pmd_t *pmd;
407 pte_t *pte;
408 int pte_ofs;
409 const struct cpumask *my_cpu_mask = cpumask_of(smp_processor_id());
410 struct cpumask kstripe_mask;
411 int rc, i;
412
Chris Metcalf867e3592010-05-28 23:09:12 -0400413 if (ktext_arg_seen && ktext_hash) {
Joe Perchesf4743672014-10-31 10:50:46 -0700414 pr_warn("warning: \"ktext\" boot argument ignored if \"kcache_hash\" sets up text hash-for-home\n");
Chris Metcalf867e3592010-05-28 23:09:12 -0400415 ktext_small = 0;
416 }
417
418 if (kdata_arg_seen && kdata_hash) {
Joe Perchesf4743672014-10-31 10:50:46 -0700419 pr_warn("warning: \"kdata\" boot argument ignored if \"kcache_hash\" sets up data hash-for-home\n");
Chris Metcalf867e3592010-05-28 23:09:12 -0400420 }
421
422 if (kdata_huge && !hash_default) {
Joe Perchesf4743672014-10-31 10:50:46 -0700423 pr_warn("warning: disabling \"kdata=huge\"; requires kcache_hash=all or =allbutstack\n");
Chris Metcalf867e3592010-05-28 23:09:12 -0400424 kdata_huge = 0;
425 }
Chris Metcalf867e3592010-05-28 23:09:12 -0400426
427 /*
428 * Set up a mask for cpus to use for kernel striping.
429 * This is normally all cpus, but minus dataplane cpus if any.
430 * If the dataplane covers the whole chip, we stripe over
431 * the whole chip too.
432 */
433 cpumask_copy(&kstripe_mask, cpu_possible_mask);
434 if (!kdata_arg_seen)
435 kdata_mask = kstripe_mask;
436
437 /* Allocate and fill in L2 page tables */
438 for (i = 0; i < MAX_NUMNODES; ++i) {
439#ifdef CONFIG_HIGHMEM
440 unsigned long end_pfn = node_lowmem_end_pfn[i];
441#else
442 unsigned long end_pfn = node_end_pfn[i];
443#endif
444 unsigned long end_huge_pfn = 0;
445
446 /* Pre-shatter the last huge page to allow per-cpu pages. */
447 if (kdata_huge)
448 end_huge_pfn = end_pfn - (HPAGE_SIZE >> PAGE_SHIFT);
449
450 pfn = node_start_pfn[i];
451
452 /* Allocate enough memory to hold L2 page tables for node. */
453 init_prealloc_ptes(i, end_pfn - pfn);
454
455 address = (unsigned long) pfn_to_kaddr(pfn);
456 while (pfn < end_pfn) {
457 BUG_ON(address & (HPAGE_SIZE-1));
458 pmd = get_pmd(pgtables, address);
459 pte = get_prealloc_pte(pfn);
460 if (pfn < end_huge_pfn) {
461 pgprot_t prot = init_pgprot(address);
462 *(pte_t *)pmd = pte_mkhuge(pfn_pte(pfn, prot));
463 for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
464 pfn++, pte_ofs++, address += PAGE_SIZE)
465 pte[pte_ofs] = pfn_pte(pfn, prot);
466 } else {
467 if (kdata_huge)
Joe Perchesf4743672014-10-31 10:50:46 -0700468 printk(KERN_DEBUG "pre-shattered huge page at %#lx\n",
469 address);
Chris Metcalf867e3592010-05-28 23:09:12 -0400470 for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
471 pfn++, pte_ofs++, address += PAGE_SIZE) {
472 pgprot_t prot = init_pgprot(address);
473 pte[pte_ofs] = pfn_pte(pfn, prot);
474 }
475 assign_pte(pmd, pte);
476 }
477 }
478 }
479
480 /*
481 * Set or check ktext_map now that we have cpu_possible_mask
482 * and kstripe_mask to work with.
483 */
484 if (ktext_all)
485 cpumask_copy(&ktext_mask, cpu_possible_mask);
486 else if (ktext_nondataplane)
487 ktext_mask = kstripe_mask;
488 else if (!cpumask_empty(&ktext_mask)) {
489 /* Sanity-check any mask that was requested */
490 struct cpumask bad;
491 cpumask_andnot(&bad, &ktext_mask, cpu_possible_mask);
492 cpumask_and(&ktext_mask, &ktext_mask, cpu_possible_mask);
Tejun Heo839b2682015-02-13 14:37:09 -0800493 if (!cpumask_empty(&bad))
494 pr_info("ktext: not using unavailable cpus %*pbl\n",
495 cpumask_pr_args(&bad));
Chris Metcalf867e3592010-05-28 23:09:12 -0400496 if (cpumask_empty(&ktext_mask)) {
Joe Perchesf4743672014-10-31 10:50:46 -0700497 pr_warn("ktext: no valid cpus; caching on %d\n",
498 smp_processor_id());
Chris Metcalf867e3592010-05-28 23:09:12 -0400499 cpumask_copy(&ktext_mask,
500 cpumask_of(smp_processor_id()));
501 }
502 }
503
Chris Metcalfacbde1d2013-09-03 14:41:36 -0400504 address = MEM_SV_START;
Chris Metcalf867e3592010-05-28 23:09:12 -0400505 pmd = get_pmd(pgtables, address);
Chris Metcalf7a7039e2012-03-29 15:42:27 -0400506 pfn = 0; /* code starts at PA 0 */
Chris Metcalf867e3592010-05-28 23:09:12 -0400507 if (ktext_small) {
508 /* Allocate an L2 PTE for the kernel text */
509 int cpu = 0;
510 pgprot_t prot = construct_pgprot(PAGE_KERNEL_EXEC,
511 PAGE_HOME_IMMUTABLE);
512
513 if (ktext_local) {
514 if (ktext_nocache)
515 prot = hv_pte_set_mode(prot,
516 HV_PTE_MODE_UNCACHED);
517 else
518 prot = hv_pte_set_mode(prot,
519 HV_PTE_MODE_CACHE_NO_L3);
520 } else {
521 prot = hv_pte_set_mode(prot,
522 HV_PTE_MODE_CACHE_TILE_L3);
523 cpu = cpumask_first(&ktext_mask);
524
525 prot = ktext_set_nocache(prot);
526 }
527
Jiang Liu40a3b8d2013-07-03 15:03:39 -0700528 BUG_ON(address != (unsigned long)_text);
Chris Metcalf7a7039e2012-03-29 15:42:27 -0400529 pte = NULL;
530 for (; address < (unsigned long)_einittext;
531 pfn++, address += PAGE_SIZE) {
532 pte_ofs = pte_index(address);
533 if (pte_ofs == 0) {
534 if (pte)
535 assign_pte(pmd++, pte);
536 pte = alloc_pte();
537 }
Chris Metcalf867e3592010-05-28 23:09:12 -0400538 if (!ktext_local) {
539 prot = set_remote_cache_cpu(prot, cpu);
540 cpu = cpumask_next(cpu, &ktext_mask);
541 if (cpu == NR_CPUS)
542 cpu = cpumask_first(&ktext_mask);
543 }
544 pte[pte_ofs] = pfn_pte(pfn, prot);
545 }
Chris Metcalf7a7039e2012-03-29 15:42:27 -0400546 if (pte)
547 assign_pte(pmd, pte);
Chris Metcalf867e3592010-05-28 23:09:12 -0400548 } else {
549 pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC);
550 pteval = pte_mkhuge(pteval);
Chris Metcalf867e3592010-05-28 23:09:12 -0400551 if (ktext_hash) {
552 pteval = hv_pte_set_mode(pteval,
553 HV_PTE_MODE_CACHE_HASH_L3);
554 pteval = ktext_set_nocache(pteval);
555 } else
Chris Metcalf867e3592010-05-28 23:09:12 -0400556 if (cpumask_weight(&ktext_mask) == 1) {
557 pteval = set_remote_cache_cpu(pteval,
558 cpumask_first(&ktext_mask));
559 pteval = hv_pte_set_mode(pteval,
560 HV_PTE_MODE_CACHE_TILE_L3);
561 pteval = ktext_set_nocache(pteval);
562 } else if (ktext_nocache)
563 pteval = hv_pte_set_mode(pteval,
564 HV_PTE_MODE_UNCACHED);
565 else
566 pteval = hv_pte_set_mode(pteval,
567 HV_PTE_MODE_CACHE_NO_L3);
Chris Metcalf7a7039e2012-03-29 15:42:27 -0400568 for (; address < (unsigned long)_einittext;
569 pfn += PFN_DOWN(HPAGE_SIZE), address += HPAGE_SIZE)
570 *(pte_t *)(pmd++) = pfn_pte(pfn, pteval);
Chris Metcalf867e3592010-05-28 23:09:12 -0400571 }
572
573 /* Set swapper_pgprot here so it is flushed to memory right away. */
574 swapper_pgprot = init_pgprot((unsigned long)swapper_pg_dir);
575
576 /*
577 * Since we may be changing the caching of the stack and page
578 * table itself, we invoke an assembly helper to do the
579 * following steps:
580 *
581 * - flush the cache so we start with an empty slate
582 * - install pgtables[] as the real page table
583 * - flush the TLB so the new page table takes effect
584 */
Chris Metcalf51007002012-03-27 15:40:20 -0400585 irqmask = interrupt_mask_save_mask();
586 interrupt_mask_set_mask(-1ULL);
Chris Metcalf867e3592010-05-28 23:09:12 -0400587 rc = flush_and_install_context(__pa(pgtables),
588 init_pgprot((unsigned long)pgtables),
Christoph Lameterb4f50192014-08-17 12:30:50 -0500589 __this_cpu_read(current_asid),
Chris Metcalf867e3592010-05-28 23:09:12 -0400590 cpumask_bits(my_cpu_mask));
Chris Metcalf51007002012-03-27 15:40:20 -0400591 interrupt_mask_restore_mask(irqmask);
Chris Metcalf867e3592010-05-28 23:09:12 -0400592 BUG_ON(rc != 0);
593
594 /* Copy the page table back to the normal swapper_pg_dir. */
595 memcpy(pgd_base, pgtables, sizeof(pgtables));
Christoph Lameterb4f50192014-08-17 12:30:50 -0500596 __install_page_table(pgd_base, __this_cpu_read(current_asid),
Chris Metcalf867e3592010-05-28 23:09:12 -0400597 swapper_pgprot);
Chris Metcalf401586e2011-02-28 15:01:53 -0500598
599 /*
600 * We just read swapper_pgprot and thus brought it into the cache,
601 * with its new home & caching mode. When we start the other CPUs,
602 * they're going to reference swapper_pgprot via their initial fake
603 * VA-is-PA mappings, which cache everything locally. At that
604 * time, if it's in our cache with a conflicting home, the
605 * simulator's coherence checker will complain. So, flush it out
606 * of our cache; we're not going to ever use it again anyway.
607 */
608 __insn_finv(&swapper_pgprot);
Chris Metcalf867e3592010-05-28 23:09:12 -0400609}
610
611/*
612 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
613 * is valid. The argument is a physical page number.
614 *
615 * On Tile, the only valid things for which we can just hand out unchecked
616 * PTEs are the kernel code and data. Anything else might change its
617 * homing with time, and we wouldn't know to adjust the /dev/mem PTEs.
618 * Note that init_thread_union is released to heap soon after boot,
619 * so we include it in the init data.
620 *
621 * For TILE-Gx, we might want to consider allowing access to PA
622 * regions corresponding to PCI space, etc.
623 */
624int devmem_is_allowed(unsigned long pagenr)
625{
626 return pagenr < kaddr_to_pfn(_end) &&
627 !(pagenr >= kaddr_to_pfn(&init_thread_union) ||
Geert Uytterhoeven454ac3e2013-11-12 20:42:20 +0100628 pagenr < kaddr_to_pfn(__init_end)) &&
Chris Metcalf867e3592010-05-28 23:09:12 -0400629 !(pagenr >= kaddr_to_pfn(_sinittext) ||
630 pagenr <= kaddr_to_pfn(_einittext-1));
631}
632
633#ifdef CONFIG_HIGHMEM
634static void __init permanent_kmaps_init(pgd_t *pgd_base)
635{
636 pgd_t *pgd;
637 pud_t *pud;
638 pmd_t *pmd;
639 pte_t *pte;
640 unsigned long vaddr;
641
642 vaddr = PKMAP_BASE;
643 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
644
645 pgd = swapper_pg_dir + pgd_index(vaddr);
646 pud = pud_offset(pgd, vaddr);
647 pmd = pmd_offset(pud, vaddr);
648 pte = pte_offset_kernel(pmd, vaddr);
649 pkmap_page_table = pte;
650}
651#endif /* CONFIG_HIGHMEM */
652
653
Chris Metcalf621b1952012-04-01 14:04:21 -0400654#ifndef CONFIG_64BIT
Chris Metcalf867e3592010-05-28 23:09:12 -0400655static void __init init_free_pfn_range(unsigned long start, unsigned long end)
656{
657 unsigned long pfn;
658 struct page *page = pfn_to_page(start);
659
660 for (pfn = start; pfn < end; ) {
661 /* Optimize by freeing pages in large batches */
662 int order = __ffs(pfn);
663 int count, i;
664 struct page *p;
665
666 if (order >= MAX_ORDER)
667 order = MAX_ORDER-1;
668 count = 1 << order;
669 while (pfn + count > end) {
670 count >>= 1;
671 --order;
672 }
673 for (p = page, i = 0; i < count; ++i, ++p) {
674 __ClearPageReserved(p);
675 /*
676 * Hacky direct set to avoid unnecessary
677 * lock take/release for EVERY page here.
678 */
679 p->_count.counter = 0;
680 p->_mapcount.counter = -1;
681 }
682 init_page_count(page);
683 __free_pages(page, order);
Jiang Liuabd1b6d2013-07-03 15:03:01 -0700684 adjust_managed_page_count(page, count);
Chris Metcalf867e3592010-05-28 23:09:12 -0400685
686 page += count;
687 pfn += count;
688 }
689}
690
691static void __init set_non_bootmem_pages_init(void)
692{
693 struct zone *z;
694 for_each_zone(z) {
695 unsigned long start, end;
696 int nid = z->zone_pgdat->node_id;
Chris Metcalfeef015c2012-05-09 12:26:30 -0400697#ifdef CONFIG_HIGHMEM
Chris Metcalf0707ad32010-06-25 17:04:17 -0400698 int idx = zone_idx(z);
Chris Metcalfeef015c2012-05-09 12:26:30 -0400699#endif
Chris Metcalf867e3592010-05-28 23:09:12 -0400700
701 start = z->zone_start_pfn;
Chris Metcalf867e3592010-05-28 23:09:12 -0400702 end = start + z->spanned_pages;
Chris Metcalfeef015c2012-05-09 12:26:30 -0400703 start = max(start, node_free_pfn[nid]);
704 start = max(start, max_low_pfn);
705
Chris Metcalf867e3592010-05-28 23:09:12 -0400706#ifdef CONFIG_HIGHMEM
Chris Metcalf0707ad32010-06-25 17:04:17 -0400707 if (idx == ZONE_HIGHMEM)
Chris Metcalf867e3592010-05-28 23:09:12 -0400708 totalhigh_pages += z->spanned_pages;
709#endif
710 if (kdata_huge) {
711 unsigned long percpu_pfn = node_percpu_pfn[nid];
712 if (start < percpu_pfn && end > percpu_pfn)
713 end = percpu_pfn;
714 }
715#ifdef CONFIG_PCI
716 if (start <= pci_reserve_start_pfn &&
717 end > pci_reserve_start_pfn) {
718 if (end > pci_reserve_end_pfn)
719 init_free_pfn_range(pci_reserve_end_pfn, end);
720 end = pci_reserve_start_pfn;
721 }
722#endif
723 init_free_pfn_range(start, end);
724 }
725}
Chris Metcalf621b1952012-04-01 14:04:21 -0400726#endif
Chris Metcalf867e3592010-05-28 23:09:12 -0400727
728/*
729 * paging_init() sets up the page tables - note that all of lowmem is
730 * already mapped by head.S.
731 */
732void __init paging_init(void)
733{
Chris Metcalf867e3592010-05-28 23:09:12 -0400734#ifdef __tilegx__
735 pud_t *pud;
736#endif
737 pgd_t *pgd_base = swapper_pg_dir;
738
739 kernel_physical_mapping_init(pgd_base);
740
Chris Metcalf084fe6a2013-08-09 16:17:03 -0400741 /* Fixed mappings, only the page table structure has to be created. */
Chris Metcalfbbaa22c2012-06-13 14:46:40 -0400742 page_table_range_init(fix_to_virt(__end_of_fixed_addresses - 1),
743 FIXADDR_TOP, pgd_base);
744
745#ifdef CONFIG_HIGHMEM
Chris Metcalf867e3592010-05-28 23:09:12 -0400746 permanent_kmaps_init(pgd_base);
747#endif
748
749#ifdef __tilegx__
750 /*
751 * Since GX allocates just one pmd_t array worth of vmalloc space,
752 * we go ahead and allocate it statically here, then share it
753 * globally. As a result we don't have to worry about any task
754 * changing init_mm once we get up and running, and there's no
755 * need for e.g. vmalloc_sync_all().
756 */
Chris Metcalfd5d14ed2012-03-29 13:58:43 -0400757 BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END - 1));
Chris Metcalf867e3592010-05-28 23:09:12 -0400758 pud = pud_offset(pgd_base + pgd_index(VMALLOC_START), VMALLOC_START);
759 assign_pmd(pud, alloc_pmd());
760#endif
761}
762
763
764/*
765 * Walk the kernel page tables and derive the page_home() from
766 * the PTEs, so that set_pte() can properly validate the caching
767 * of all PTEs it sees.
768 */
769void __init set_page_homes(void)
770{
771}
772
773static void __init set_max_mapnr_init(void)
774{
775#ifdef CONFIG_FLATMEM
776 max_mapnr = max_low_pfn;
777#endif
778}
779
780void __init mem_init(void)
781{
Chris Metcalf867e3592010-05-28 23:09:12 -0400782 int i;
783#ifndef __tilegx__
784 void *last;
785#endif
786
787#ifdef CONFIG_FLATMEM
Julia Lawalld1afa652011-08-02 12:35:04 +0200788 BUG_ON(!mem_map);
Chris Metcalf867e3592010-05-28 23:09:12 -0400789#endif
790
791#ifdef CONFIG_HIGHMEM
792 /* check that fixmap and pkmap do not overlap */
793 if (PKMAP_ADDR(LAST_PKMAP-1) >= FIXADDR_START) {
Joe Perchesf4743672014-10-31 10:50:46 -0700794 pr_err("fixmap and kmap areas overlap - this will crash\n");
Chris Metcalf0707ad32010-06-25 17:04:17 -0400795 pr_err("pkstart: %lxh pkend: %lxh fixstart %lxh\n",
Joe Perchesf4743672014-10-31 10:50:46 -0700796 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1), FIXADDR_START);
Chris Metcalf867e3592010-05-28 23:09:12 -0400797 BUG();
798 }
799#endif
800
801 set_max_mapnr_init();
802
803 /* this will put all bootmem onto the freelists */
Jiang Liu0c988532013-07-03 15:03:24 -0700804 free_all_bootmem();
Chris Metcalf867e3592010-05-28 23:09:12 -0400805
Chris Metcalf621b1952012-04-01 14:04:21 -0400806#ifndef CONFIG_64BIT
Chris Metcalf867e3592010-05-28 23:09:12 -0400807 /* count all remaining LOWMEM and give all HIGHMEM to page allocator */
808 set_non_bootmem_pages_init();
Chris Metcalf621b1952012-04-01 14:04:21 -0400809#endif
Chris Metcalf867e3592010-05-28 23:09:12 -0400810
Jiang Liu3f29c332013-07-03 15:04:15 -0700811 mem_init_print_info(NULL);
Chris Metcalf867e3592010-05-28 23:09:12 -0400812
813 /*
814 * In debug mode, dump some interesting memory mappings.
815 */
816#ifdef CONFIG_HIGHMEM
817 printk(KERN_DEBUG " KMAP %#lx - %#lx\n",
818 FIXADDR_START, FIXADDR_TOP + PAGE_SIZE - 1);
819 printk(KERN_DEBUG " PKMAP %#lx - %#lx\n",
820 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP) - 1);
821#endif
Chris Metcalf867e3592010-05-28 23:09:12 -0400822 printk(KERN_DEBUG " VMALLOC %#lx - %#lx\n",
823 _VMALLOC_START, _VMALLOC_END - 1);
824#ifdef __tilegx__
825 for (i = MAX_NUMNODES-1; i >= 0; --i) {
826 struct pglist_data *node = &node_data[i];
827 if (node->node_present_pages) {
828 unsigned long start = (unsigned long)
829 pfn_to_kaddr(node->node_start_pfn);
830 unsigned long end = start +
831 (node->node_present_pages << PAGE_SHIFT);
832 printk(KERN_DEBUG " MEM%d %#lx - %#lx\n",
833 i, start, end - 1);
834 }
835 }
836#else
837 last = high_memory;
838 for (i = MAX_NUMNODES-1; i >= 0; --i) {
839 if ((unsigned long)vbase_map[i] != -1UL) {
840 printk(KERN_DEBUG " LOWMEM%d %#lx - %#lx\n",
841 i, (unsigned long) (vbase_map[i]),
842 (unsigned long) (last-1));
843 last = vbase_map[i];
844 }
845 }
846#endif
847
848#ifndef __tilegx__
849 /*
850 * Convert from using one lock for all atomic operations to
851 * one per cpu.
852 */
853 __init_atomic_per_cpu();
854#endif
855}
856
857/*
858 * this is for the non-NUMA, single node SMP system case.
859 * Specifically, in the case of x86, we will always add
860 * memory to the highmem for now.
861 */
862#ifndef CONFIG_NEED_MULTIPLE_NODES
863int arch_add_memory(u64 start, u64 size)
864{
865 struct pglist_data *pgdata = &contig_page_data;
866 struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
867 unsigned long start_pfn = start >> PAGE_SHIFT;
868 unsigned long nr_pages = size >> PAGE_SHIFT;
869
870 return __add_pages(zone, start_pfn, nr_pages);
871}
872
873int remove_memory(u64 start, u64 size)
874{
875 return -EINVAL;
876}
Wen Congyang24d335c2013-02-22 16:32:58 -0800877
878#ifdef CONFIG_MEMORY_HOTREMOVE
879int arch_remove_memory(u64 start, u64 size)
880{
881 /* TODO */
882 return -EBUSY;
883}
884#endif
Chris Metcalf867e3592010-05-28 23:09:12 -0400885#endif
886
887struct kmem_cache *pgd_cache;
888
889void __init pgtable_cache_init(void)
890{
Chris Metcalf76c567f2011-02-28 16:37:34 -0500891 pgd_cache = kmem_cache_create("pgd", SIZEOF_PGD, SIZEOF_PGD, 0, NULL);
Chris Metcalf867e3592010-05-28 23:09:12 -0400892 if (!pgd_cache)
893 panic("pgtable_cache_init(): Cannot create pgd cache");
894}
895
Chris Metcalf867e3592010-05-28 23:09:12 -0400896#ifdef CONFIG_DEBUG_PAGEALLOC
897static long __write_once initfree;
898#else
899static long __write_once initfree = 1;
900#endif
901
902/* Select whether to free (1) or mark unusable (0) the __init pages. */
903static int __init set_initfree(char *str)
904{
Chris Metcalfd59e6092010-11-01 15:25:16 -0400905 long val;
Daniel Walterb2dfa042014-05-26 22:59:32 +0100906 if (kstrtol(str, 0, &val) == 0) {
Chris Metcalfd59e6092010-11-01 15:25:16 -0400907 initfree = val;
908 pr_info("initfree: %s free init pages\n",
909 initfree ? "will" : "won't");
910 }
Chris Metcalf867e3592010-05-28 23:09:12 -0400911 return 1;
912}
913__setup("initfree=", set_initfree);
914
915static void free_init_pages(char *what, unsigned long begin, unsigned long end)
916{
917 unsigned long addr = (unsigned long) begin;
918
919 if (kdata_huge && !initfree) {
Joe Perchesf4743672014-10-31 10:50:46 -0700920 pr_warn("Warning: ignoring initfree=0: incompatible with kdata=huge\n");
Chris Metcalf867e3592010-05-28 23:09:12 -0400921 initfree = 1;
922 }
923 end = (end + PAGE_SIZE - 1) & PAGE_MASK;
924 local_flush_tlb_pages(NULL, begin, PAGE_SIZE, end - begin);
925 for (addr = begin; addr < end; addr += PAGE_SIZE) {
926 /*
927 * Note we just reset the home here directly in the
928 * page table. We know this is safe because our caller
929 * just flushed the caches on all the other cpus,
930 * and they won't be touching any of these pages.
931 */
932 int pfn = kaddr_to_pfn((void *)addr);
933 struct page *page = pfn_to_page(pfn);
Chris Metcalf640710a2013-08-12 15:08:09 -0400934 pte_t *ptep = virt_to_kpte(addr);
Chris Metcalf867e3592010-05-28 23:09:12 -0400935 if (!initfree) {
936 /*
937 * If debugging page accesses then do not free
938 * this memory but mark them not present - any
939 * buggy init-section access will create a
940 * kernel page fault:
941 */
942 pte_clear(&init_mm, addr, ptep);
943 continue;
944 }
Chris Metcalf867e3592010-05-28 23:09:12 -0400945 if (pte_huge(*ptep))
946 BUG_ON(!kdata_huge);
947 else
948 set_pte_at(&init_mm, addr, ptep,
949 pfn_pte(pfn, PAGE_KERNEL));
950 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
Jiang Liuabd1b6d2013-07-03 15:03:01 -0700951 free_reserved_page(page);
Chris Metcalf867e3592010-05-28 23:09:12 -0400952 }
Chris Metcalf0707ad32010-06-25 17:04:17 -0400953 pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
Chris Metcalf867e3592010-05-28 23:09:12 -0400954}
955
956void free_initmem(void)
957{
Chris Metcalfacbde1d2013-09-03 14:41:36 -0400958 const unsigned long text_delta = MEM_SV_START - PAGE_OFFSET;
Chris Metcalf867e3592010-05-28 23:09:12 -0400959
960 /*
Chris Metcalfce61cdc2013-08-15 16:29:02 -0400961 * Evict the cache on all cores to avoid incoherence.
Chris Metcalfd7c96612013-08-15 16:23:24 -0400962 * We are guaranteed that no one will touch the init pages any more.
Chris Metcalf867e3592010-05-28 23:09:12 -0400963 */
964 homecache_evict(&cpu_cacheable_map);
965
966 /* Free the data pages that we won't use again after init. */
967 free_init_pages("unused kernel data",
Geert Uytterhoeven454ac3e2013-11-12 20:42:20 +0100968 (unsigned long)__init_begin,
969 (unsigned long)__init_end);
Chris Metcalf867e3592010-05-28 23:09:12 -0400970
971 /*
972 * Free the pages mapped from 0xc0000000 that correspond to code
Chris Metcalfacbde1d2013-09-03 14:41:36 -0400973 * pages from MEM_SV_START that we won't use again after init.
Chris Metcalf867e3592010-05-28 23:09:12 -0400974 */
975 free_init_pages("unused kernel text",
976 (unsigned long)_sinittext - text_delta,
977 (unsigned long)_einittext - text_delta);
Chris Metcalf867e3592010-05-28 23:09:12 -0400978 /* Do a global TLB flush so everyone sees the changes. */
979 flush_tlb_all();
980}