blob: 7711554a94f44321b0f944b611547853bb325fcb [file] [log] [blame]
Catalin Marinasc1cc1552012-03-05 11:49:27 +00001/*
2 * Based on arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/export.h>
21#include <linux/kernel.h>
22#include <linux/errno.h>
23#include <linux/init.h>
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +020024#include <linux/libfdt.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000025#include <linux/mman.h>
26#include <linux/nodemask.h>
27#include <linux/memblock.h>
28#include <linux/fs.h>
Catalin Marinas2475ff92012-10-23 14:55:08 +010029#include <linux/io.h>
Catalin Marinas41089352015-01-29 17:33:35 +000030#include <linux/slab.h>
Laura Abbottda141702015-01-21 17:36:06 -080031#include <linux/stop_machine.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000032
Mark Rutland21ab99c2016-01-25 11:44:56 +000033#include <asm/barrier.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000034#include <asm/cputype.h>
Laura Abbottaf86e592014-11-21 21:50:42 +000035#include <asm/fixmap.h>
Mark Rutland068a17a2016-01-25 11:45:12 +000036#include <asm/kasan.h>
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +010037#include <asm/kernel-pgtable.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000038#include <asm/sections.h>
39#include <asm/setup.h>
40#include <asm/sizes.h>
41#include <asm/tlb.h>
Jungseok Leec79b9542014-05-12 18:40:51 +090042#include <asm/memblock.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000043#include <asm/mmu_context.h>
44
45#include "mm.h"
46
Ard Biesheuveldd006da2015-03-19 16:42:27 +000047u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
48
Catalin Marinasc1cc1552012-03-05 11:49:27 +000049/*
50 * Empty_zero_page is a special page that is used for zero-initialized data
51 * and COW.
52 */
Mark Rutland5227cfa2016-01-25 11:44:57 +000053unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
Catalin Marinasc1cc1552012-03-05 11:49:27 +000054EXPORT_SYMBOL(empty_zero_page);
55
Catalin Marinasc1cc1552012-03-05 11:49:27 +000056pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
57 unsigned long size, pgprot_t vma_prot)
58{
59 if (!pfn_valid(pfn))
60 return pgprot_noncached(vma_prot);
61 else if (file->f_flags & O_SYNC)
62 return pgprot_writecombine(vma_prot);
63 return vma_prot;
64}
65EXPORT_SYMBOL(phys_mem_access_prot);
66
Mark Rutlandf4710442016-01-25 11:45:08 +000067static phys_addr_t __init early_pgtable_alloc(void)
Catalin Marinasc1cc1552012-03-05 11:49:27 +000068{
Suzuki K. Poulose71423922015-11-20 17:45:40 +000069 phys_addr_t phys;
70 void *ptr;
71
Mark Rutland21ab99c2016-01-25 11:44:56 +000072 phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
Suzuki K. Poulose71423922015-11-20 17:45:40 +000073 BUG_ON(!phys);
Mark Rutlandf4710442016-01-25 11:45:08 +000074
75 /*
76 * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
77 * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
78 * any level of table.
79 */
80 ptr = pte_set_fixmap(phys);
81
Mark Rutland21ab99c2016-01-25 11:44:56 +000082 memset(ptr, 0, PAGE_SIZE);
83
Mark Rutlandf4710442016-01-25 11:45:08 +000084 /*
85 * Implicit barriers also ensure the zeroed page is visible to the page
86 * table walker
87 */
88 pte_clear_fixmap();
89
90 return phys;
Catalin Marinasc1cc1552012-03-05 11:49:27 +000091}
92
Laura Abbottda141702015-01-21 17:36:06 -080093/*
94 * remap a PMD into pages
95 */
96static void split_pmd(pmd_t *pmd, pte_t *pte)
97{
98 unsigned long pfn = pmd_pfn(*pmd);
99 int i = 0;
100
101 do {
102 /*
103 * Need to have the least restrictive permissions available
Catalin Marinas667c2752015-11-26 15:42:41 +0000104 * permissions will be fixed up later
Laura Abbottda141702015-01-21 17:36:06 -0800105 */
Catalin Marinas667c2752015-11-26 15:42:41 +0000106 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
Laura Abbottda141702015-01-21 17:36:06 -0800107 pfn++;
108 } while (pte++, i++, i < PTRS_PER_PTE);
109}
110
111static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
Catalin Marinas667c2752015-11-26 15:42:41 +0000112 unsigned long end, unsigned long pfn,
Laura Abbottda141702015-01-21 17:36:06 -0800113 pgprot_t prot,
Mark Rutlandf4710442016-01-25 11:45:08 +0000114 phys_addr_t (*pgtable_alloc)(void))
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000115{
116 pte_t *pte;
117
Mark Rutlanda1c76572015-01-27 16:36:30 +0000118 if (pmd_none(*pmd) || pmd_sect(*pmd)) {
Mark Rutlandf4710442016-01-25 11:45:08 +0000119 phys_addr_t pte_phys = pgtable_alloc();
120 pte = pte_set_fixmap(pte_phys);
Laura Abbottda141702015-01-21 17:36:06 -0800121 if (pmd_sect(*pmd))
122 split_pmd(pmd, pte);
Mark Rutlandf4710442016-01-25 11:45:08 +0000123 __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
Laura Abbottda141702015-01-21 17:36:06 -0800124 flush_tlb_all();
Mark Rutlandf4710442016-01-25 11:45:08 +0000125 pte_clear_fixmap();
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000126 }
Mark Rutlanda1c76572015-01-27 16:36:30 +0000127 BUG_ON(pmd_bad(*pmd));
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000128
Mark Rutlandf4710442016-01-25 11:45:08 +0000129 pte = pte_set_fixmap_offset(pmd, addr);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000130 do {
Catalin Marinas667c2752015-11-26 15:42:41 +0000131 set_pte(pte, pfn_pte(pfn, prot));
132 pfn++;
133 } while (pte++, addr += PAGE_SIZE, addr != end);
Mark Rutlandf4710442016-01-25 11:45:08 +0000134
135 pte_clear_fixmap();
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000136}
137
Jisheng Zhang9a17a212015-11-12 20:04:43 +0800138static void split_pud(pud_t *old_pud, pmd_t *pmd)
Laura Abbottda141702015-01-21 17:36:06 -0800139{
140 unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
141 pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
142 int i = 0;
143
144 do {
Ard Biesheuvel1e43ba92015-06-30 18:04:49 +0200145 set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
Laura Abbottda141702015-01-21 17:36:06 -0800146 addr += PMD_SIZE;
147 } while (pmd++, i++, i < PTRS_PER_PMD);
148}
149
Mark Rutland11509a32016-01-25 11:45:10 +0000150static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
Laura Abbottda141702015-01-21 17:36:06 -0800151 phys_addr_t phys, pgprot_t prot,
Mark Rutlandf4710442016-01-25 11:45:08 +0000152 phys_addr_t (*pgtable_alloc)(void))
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000153{
154 pmd_t *pmd;
155 unsigned long next;
156
157 /*
158 * Check for initial section mappings in the pgd/pud and remove them.
159 */
Mark Rutlanda1c76572015-01-27 16:36:30 +0000160 if (pud_none(*pud) || pud_sect(*pud)) {
Mark Rutlandf4710442016-01-25 11:45:08 +0000161 phys_addr_t pmd_phys = pgtable_alloc();
162 pmd = pmd_set_fixmap(pmd_phys);
Laura Abbottda141702015-01-21 17:36:06 -0800163 if (pud_sect(*pud)) {
164 /*
165 * need to have the 1G of mappings continue to be
166 * present
167 */
168 split_pud(pud, pmd);
169 }
Mark Rutlandf4710442016-01-25 11:45:08 +0000170 __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
Laura Abbottda141702015-01-21 17:36:06 -0800171 flush_tlb_all();
Mark Rutlandf4710442016-01-25 11:45:08 +0000172 pmd_clear_fixmap();
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000173 }
Mark Rutlanda1c76572015-01-27 16:36:30 +0000174 BUG_ON(pud_bad(*pud));
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000175
Mark Rutlandf4710442016-01-25 11:45:08 +0000176 pmd = pmd_set_fixmap_offset(pud, addr);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000177 do {
178 next = pmd_addr_end(addr, end);
179 /* try section mapping first */
Catalin Marinasa55f9922014-02-04 16:01:31 +0000180 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
181 pmd_t old_pmd =*pmd;
Ard Biesheuvel8ce837c2014-10-20 15:42:07 +0200182 set_pmd(pmd, __pmd(phys |
183 pgprot_val(mk_sect_prot(prot))));
Catalin Marinasa55f9922014-02-04 16:01:31 +0000184 /*
185 * Check for previous table entries created during
186 * boot (__create_page_tables) and flush them.
187 */
zhichang.yuan523d6e92014-12-09 07:26:47 +0000188 if (!pmd_none(old_pmd)) {
Catalin Marinasa55f9922014-02-04 16:01:31 +0000189 flush_tlb_all();
zhichang.yuan523d6e92014-12-09 07:26:47 +0000190 if (pmd_table(old_pmd)) {
Mark Rutland316b39db2016-01-25 11:45:05 +0000191 phys_addr_t table = pmd_page_paddr(old_pmd);
Catalin Marinas41089352015-01-29 17:33:35 +0000192 if (!WARN_ON_ONCE(slab_is_available()))
193 memblock_free(table, PAGE_SIZE);
zhichang.yuan523d6e92014-12-09 07:26:47 +0000194 }
195 }
Catalin Marinasa55f9922014-02-04 16:01:31 +0000196 } else {
Catalin Marinas667c2752015-11-26 15:42:41 +0000197 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
Mark Rutland21ab99c2016-01-25 11:44:56 +0000198 prot, pgtable_alloc);
Catalin Marinasa55f9922014-02-04 16:01:31 +0000199 }
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000200 phys += next - addr;
201 } while (pmd++, addr = next, addr != end);
Mark Rutlandf4710442016-01-25 11:45:08 +0000202
203 pmd_clear_fixmap();
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000204}
205
Laura Abbottda141702015-01-21 17:36:06 -0800206static inline bool use_1G_block(unsigned long addr, unsigned long next,
207 unsigned long phys)
208{
209 if (PAGE_SHIFT != 12)
210 return false;
211
212 if (((addr | next | phys) & ~PUD_MASK) != 0)
213 return false;
214
215 return true;
216}
217
Mark Rutland11509a32016-01-25 11:45:10 +0000218static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
Laura Abbottda141702015-01-21 17:36:06 -0800219 phys_addr_t phys, pgprot_t prot,
Mark Rutlandf4710442016-01-25 11:45:08 +0000220 phys_addr_t (*pgtable_alloc)(void))
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000221{
Jungseok Leec79b9542014-05-12 18:40:51 +0900222 pud_t *pud;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000223 unsigned long next;
224
Jungseok Leec79b9542014-05-12 18:40:51 +0900225 if (pgd_none(*pgd)) {
Mark Rutlandf4710442016-01-25 11:45:08 +0000226 phys_addr_t pud_phys = pgtable_alloc();
227 __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
Jungseok Leec79b9542014-05-12 18:40:51 +0900228 }
229 BUG_ON(pgd_bad(*pgd));
230
Mark Rutlandf4710442016-01-25 11:45:08 +0000231 pud = pud_set_fixmap_offset(pgd, addr);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000232 do {
233 next = pud_addr_end(addr, end);
Steve Capper206a2a72014-05-06 14:02:27 +0100234
235 /*
236 * For 4K granule only, attempt to put down a 1GB block
237 */
Laura Abbottda141702015-01-21 17:36:06 -0800238 if (use_1G_block(addr, next, phys)) {
Steve Capper206a2a72014-05-06 14:02:27 +0100239 pud_t old_pud = *pud;
Ard Biesheuvel8ce837c2014-10-20 15:42:07 +0200240 set_pud(pud, __pud(phys |
241 pgprot_val(mk_sect_prot(prot))));
Steve Capper206a2a72014-05-06 14:02:27 +0100242
243 /*
244 * If we have an old value for a pud, it will
245 * be pointing to a pmd table that we no longer
246 * need (from swapper_pg_dir).
247 *
248 * Look up the old pmd table and free it.
249 */
250 if (!pud_none(old_pud)) {
Steve Capper206a2a72014-05-06 14:02:27 +0100251 flush_tlb_all();
zhichang.yuan523d6e92014-12-09 07:26:47 +0000252 if (pud_table(old_pud)) {
Mark Rutland316b39db2016-01-25 11:45:05 +0000253 phys_addr_t table = pud_page_paddr(old_pud);
Catalin Marinas41089352015-01-29 17:33:35 +0000254 if (!WARN_ON_ONCE(slab_is_available()))
255 memblock_free(table, PAGE_SIZE);
zhichang.yuan523d6e92014-12-09 07:26:47 +0000256 }
Steve Capper206a2a72014-05-06 14:02:27 +0100257 }
258 } else {
Mark Rutland11509a32016-01-25 11:45:10 +0000259 alloc_init_pmd(pud, addr, next, phys, prot,
Mark Rutland21ab99c2016-01-25 11:44:56 +0000260 pgtable_alloc);
Steve Capper206a2a72014-05-06 14:02:27 +0100261 }
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000262 phys += next - addr;
263 } while (pud++, addr = next, addr != end);
Mark Rutlandf4710442016-01-25 11:45:08 +0000264
265 pud_clear_fixmap();
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000266}
267
268/*
269 * Create the page directory entries and any necessary page tables for the
270 * mapping specified by 'md'.
271 */
Mark Rutland11509a32016-01-25 11:45:10 +0000272static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt,
Laura Abbottda141702015-01-21 17:36:06 -0800273 phys_addr_t size, pgprot_t prot,
Mark Rutlandf4710442016-01-25 11:45:08 +0000274 phys_addr_t (*pgtable_alloc)(void))
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000275{
276 unsigned long addr, length, end, next;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000277
Mark Rutlandcc5d2b32015-11-23 13:26:19 +0000278 /*
279 * If the virtual and physical address don't have the same offset
280 * within a page, we cannot map the region as the caller expects.
281 */
282 if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
283 return;
284
Mark Rutland9c4e08a2015-11-23 13:26:20 +0000285 phys &= PAGE_MASK;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000286 addr = virt & PAGE_MASK;
287 length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
288
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000289 end = addr + length;
290 do {
291 next = pgd_addr_end(addr, end);
Mark Rutland11509a32016-01-25 11:45:10 +0000292 alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000293 phys += next - addr;
294 } while (pgd++, addr = next, addr != end);
295}
296
Mark Rutlandf4710442016-01-25 11:45:08 +0000297static phys_addr_t late_pgtable_alloc(void)
Laura Abbottda141702015-01-21 17:36:06 -0800298{
Mark Rutland21ab99c2016-01-25 11:44:56 +0000299 void *ptr = (void *)__get_free_page(PGALLOC_GFP);
Laura Abbottda141702015-01-21 17:36:06 -0800300 BUG_ON(!ptr);
Mark Rutland21ab99c2016-01-25 11:44:56 +0000301
302 /* Ensure the zeroed page is visible to the page table walker */
303 dsb(ishst);
Mark Rutlandf4710442016-01-25 11:45:08 +0000304 return __pa(ptr);
Laura Abbottda141702015-01-21 17:36:06 -0800305}
306
Mark Rutland11509a32016-01-25 11:45:10 +0000307static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
308 unsigned long virt, phys_addr_t size,
309 pgprot_t prot,
310 phys_addr_t (*alloc)(void))
311{
312 init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc);
313}
314
Mark Rutlandc53e0ba2015-07-28 10:31:06 +0100315static void __init create_mapping(phys_addr_t phys, unsigned long virt,
Laura Abbottda141702015-01-21 17:36:06 -0800316 phys_addr_t size, pgprot_t prot)
Mark Salterd7ecbdd2014-03-12 12:28:06 -0400317{
318 if (virt < VMALLOC_START) {
319 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
320 &phys, virt);
321 return;
322 }
Mark Rutland11509a32016-01-25 11:45:10 +0000323 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
324 early_pgtable_alloc);
Mark Salterd7ecbdd2014-03-12 12:28:06 -0400325}
326
Ard Biesheuvel8ce837c2014-10-20 15:42:07 +0200327void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
328 unsigned long virt, phys_addr_t size,
329 pgprot_t prot)
330{
Mark Rutland11509a32016-01-25 11:45:10 +0000331 __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
332 late_pgtable_alloc);
Mark Salterd7ecbdd2014-03-12 12:28:06 -0400333}
334
Laura Abbottda141702015-01-21 17:36:06 -0800335static void create_mapping_late(phys_addr_t phys, unsigned long virt,
336 phys_addr_t size, pgprot_t prot)
337{
338 if (virt < VMALLOC_START) {
339 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
340 &phys, virt);
341 return;
342 }
343
Mark Rutland11509a32016-01-25 11:45:10 +0000344 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
345 late_pgtable_alloc);
Laura Abbottda141702015-01-21 17:36:06 -0800346}
347
Mark Rutland068a17a2016-01-25 11:45:12 +0000348static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
Laura Abbottda141702015-01-21 17:36:06 -0800349{
Laura Abbottda141702015-01-21 17:36:06 -0800350
Mark Rutland068a17a2016-01-25 11:45:12 +0000351 unsigned long kernel_start = __pa(_stext);
352 unsigned long kernel_end = __pa(_end);
353
354 /*
355 * The kernel itself is mapped at page granularity. Map all other
356 * memory, making sure we don't overwrite the existing kernel mappings.
357 */
358
359 /* No overlap with the kernel. */
360 if (end < kernel_start || start >= kernel_end) {
361 __create_pgd_mapping(pgd, start, __phys_to_virt(start),
362 end - start, PAGE_KERNEL,
363 early_pgtable_alloc);
364 return;
Laura Abbottda141702015-01-21 17:36:06 -0800365 }
366
Mark Rutland068a17a2016-01-25 11:45:12 +0000367 /*
368 * This block overlaps the kernel mapping. Map the portion(s) which
369 * don't overlap.
370 */
371 if (start < kernel_start)
372 __create_pgd_mapping(pgd, start,
373 __phys_to_virt(start),
374 kernel_start - start, PAGE_KERNEL,
375 early_pgtable_alloc);
376 if (kernel_end < end)
377 __create_pgd_mapping(pgd, kernel_end,
378 __phys_to_virt(kernel_end),
379 end - kernel_end, PAGE_KERNEL,
380 early_pgtable_alloc);
Laura Abbottda141702015-01-21 17:36:06 -0800381}
Laura Abbottda141702015-01-21 17:36:06 -0800382
Mark Rutland068a17a2016-01-25 11:45:12 +0000383static void __init map_mem(pgd_t *pgd)
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000384{
385 struct memblock_region *reg;
Steve Capperf6bc87c2013-04-30 11:00:33 +0100386
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000387 /* map all the memory banks */
388 for_each_memblock(memory, reg) {
389 phys_addr_t start = reg->base;
390 phys_addr_t end = start + reg->size;
391
392 if (start >= end)
393 break;
Ard Biesheuvel68709f42015-11-30 13:28:16 +0100394 if (memblock_is_nomap(reg))
395 continue;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000396
Mark Rutland068a17a2016-01-25 11:45:12 +0000397 __map_memblock(pgd, start, end);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000398 }
399}
400
Laura Abbottda141702015-01-21 17:36:06 -0800401#ifdef CONFIG_DEBUG_RODATA
402void mark_rodata_ro(void)
403{
404 create_mapping_late(__pa(_stext), (unsigned long)_stext,
405 (unsigned long)_etext - (unsigned long)_stext,
Laura Abbott0b2aa5b2015-11-12 12:21:10 -0800406 PAGE_KERNEL_ROX);
Laura Abbottda141702015-01-21 17:36:06 -0800407
408}
409#endif
410
411void fixup_init(void)
412{
413 create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
414 (unsigned long)__init_end - (unsigned long)__init_begin,
415 PAGE_KERNEL);
416}
417
Mark Rutland068a17a2016-01-25 11:45:12 +0000418static void __init map_kernel_chunk(pgd_t *pgd, void *va_start, void *va_end,
419 pgprot_t prot)
420{
421 phys_addr_t pa_start = __pa(va_start);
422 unsigned long size = va_end - va_start;
423
424 BUG_ON(!PAGE_ALIGNED(pa_start));
425 BUG_ON(!PAGE_ALIGNED(size));
426
427 __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
428 early_pgtable_alloc);
429}
430
431/*
432 * Create fine-grained mappings for the kernel.
433 */
434static void __init map_kernel(pgd_t *pgd)
435{
436
437 map_kernel_chunk(pgd, _stext, _etext, PAGE_KERNEL_EXEC);
438 map_kernel_chunk(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC);
439 map_kernel_chunk(pgd, _data, _end, PAGE_KERNEL);
440
441 /*
442 * The fixmap falls in a separate pgd to the kernel, and doesn't live
443 * in the carveout for the swapper_pg_dir. We can simply re-use the
444 * existing dir for the fixmap.
445 */
446 set_pgd(pgd_offset_raw(pgd, FIXADDR_START), *pgd_offset_k(FIXADDR_START));
447
448 kasan_copy_shadow(pgd);
449}
450
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000451/*
452 * paging_init() sets up the page tables, initialises the zone memory
453 * maps and sets up the zero page.
454 */
455void __init paging_init(void)
456{
Mark Rutland068a17a2016-01-25 11:45:12 +0000457 phys_addr_t pgd_phys = early_pgtable_alloc();
458 pgd_t *pgd = pgd_set_fixmap(pgd_phys);
459
460 map_kernel(pgd);
461 map_mem(pgd);
462
463 /*
464 * We want to reuse the original swapper_pg_dir so we don't have to
465 * communicate the new address to non-coherent secondaries in
466 * secondary_entry, and so cpu_switch_mm can generate the address with
467 * adrp+add rather than a load from some global variable.
468 *
469 * To do this we need to go via a temporary pgd.
470 */
471 cpu_replace_ttbr1(__va(pgd_phys));
472 memcpy(swapper_pg_dir, pgd, PAGE_SIZE);
473 cpu_replace_ttbr1(swapper_pg_dir);
474
475 pgd_clear_fixmap();
476 memblock_free(pgd_phys, PAGE_SIZE);
477
478 /*
479 * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
480 * allocated with it.
481 */
482 memblock_free(__pa(swapper_pg_dir) + PAGE_SIZE,
483 SWAPPER_DIR_SIZE - PAGE_SIZE);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000484
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000485 bootmem_init();
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000486}
487
488/*
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000489 * Check whether a kernel address is valid (derived from arch/x86/).
490 */
491int kern_addr_valid(unsigned long addr)
492{
493 pgd_t *pgd;
494 pud_t *pud;
495 pmd_t *pmd;
496 pte_t *pte;
497
498 if ((((long)addr) >> VA_BITS) != -1UL)
499 return 0;
500
501 pgd = pgd_offset_k(addr);
502 if (pgd_none(*pgd))
503 return 0;
504
505 pud = pud_offset(pgd, addr);
506 if (pud_none(*pud))
507 return 0;
508
Steve Capper206a2a72014-05-06 14:02:27 +0100509 if (pud_sect(*pud))
510 return pfn_valid(pud_pfn(*pud));
511
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000512 pmd = pmd_offset(pud, addr);
513 if (pmd_none(*pmd))
514 return 0;
515
Dave Andersonda6e4cb2014-04-15 18:53:24 +0100516 if (pmd_sect(*pmd))
517 return pfn_valid(pmd_pfn(*pmd));
518
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000519 pte = pte_offset_kernel(pmd, addr);
520 if (pte_none(*pte))
521 return 0;
522
523 return pfn_valid(pte_pfn(*pte));
524}
525#ifdef CONFIG_SPARSEMEM_VMEMMAP
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +0100526#if !ARM64_SWAPPER_USES_SECTION_MAPS
Johannes Weiner0aad8182013-04-29 15:07:50 -0700527int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000528{
Johannes Weiner0aad8182013-04-29 15:07:50 -0700529 return vmemmap_populate_basepages(start, end, node);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000530}
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +0100531#else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
Johannes Weiner0aad8182013-04-29 15:07:50 -0700532int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000533{
Johannes Weiner0aad8182013-04-29 15:07:50 -0700534 unsigned long addr = start;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000535 unsigned long next;
536 pgd_t *pgd;
537 pud_t *pud;
538 pmd_t *pmd;
539
540 do {
541 next = pmd_addr_end(addr, end);
542
543 pgd = vmemmap_pgd_populate(addr, node);
544 if (!pgd)
545 return -ENOMEM;
546
547 pud = vmemmap_pud_populate(pgd, addr, node);
548 if (!pud)
549 return -ENOMEM;
550
551 pmd = pmd_offset(pud, addr);
552 if (pmd_none(*pmd)) {
553 void *p = NULL;
554
555 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
556 if (!p)
557 return -ENOMEM;
558
Catalin Marinasa501e322014-04-03 15:57:15 +0100559 set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000560 } else
561 vmemmap_verify((pte_t *)pmd, node, addr, next);
562 } while (addr = next, addr != end);
563
564 return 0;
565}
566#endif /* CONFIG_ARM64_64K_PAGES */
Johannes Weiner0aad8182013-04-29 15:07:50 -0700567void vmemmap_free(unsigned long start, unsigned long end)
Tang Chen01975182013-02-22 16:33:08 -0800568{
569}
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000570#endif /* CONFIG_SPARSEMEM_VMEMMAP */
Laura Abbottaf86e592014-11-21 21:50:42 +0000571
572static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
Kirill A. Shutemov9f25e6a2015-04-14 15:45:39 -0700573#if CONFIG_PGTABLE_LEVELS > 2
Laura Abbottaf86e592014-11-21 21:50:42 +0000574static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
575#endif
Kirill A. Shutemov9f25e6a2015-04-14 15:45:39 -0700576#if CONFIG_PGTABLE_LEVELS > 3
Laura Abbottaf86e592014-11-21 21:50:42 +0000577static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
578#endif
579
580static inline pud_t * fixmap_pud(unsigned long addr)
581{
582 pgd_t *pgd = pgd_offset_k(addr);
583
584 BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
585
586 return pud_offset(pgd, addr);
587}
588
589static inline pmd_t * fixmap_pmd(unsigned long addr)
590{
591 pud_t *pud = fixmap_pud(addr);
592
593 BUG_ON(pud_none(*pud) || pud_bad(*pud));
594
595 return pmd_offset(pud, addr);
596}
597
598static inline pte_t * fixmap_pte(unsigned long addr)
599{
600 pmd_t *pmd = fixmap_pmd(addr);
601
602 BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
603
604 return pte_offset_kernel(pmd, addr);
605}
606
607void __init early_fixmap_init(void)
608{
609 pgd_t *pgd;
610 pud_t *pud;
611 pmd_t *pmd;
612 unsigned long addr = FIXADDR_START;
613
614 pgd = pgd_offset_k(addr);
615 pgd_populate(&init_mm, pgd, bm_pud);
616 pud = pud_offset(pgd, addr);
617 pud_populate(&init_mm, pud, bm_pmd);
618 pmd = pmd_offset(pud, addr);
619 pmd_populate_kernel(&init_mm, pmd, bm_pte);
620
621 /*
622 * The boot-ioremap range spans multiple pmds, for which
623 * we are not preparted:
624 */
625 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
626 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
627
628 if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
629 || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
630 WARN_ON(1);
631 pr_warn("pmd %p != %p, %p\n",
632 pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
633 fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
634 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
635 fix_to_virt(FIX_BTMAP_BEGIN));
636 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
637 fix_to_virt(FIX_BTMAP_END));
638
639 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
640 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
641 }
642}
643
644void __set_fixmap(enum fixed_addresses idx,
645 phys_addr_t phys, pgprot_t flags)
646{
647 unsigned long addr = __fix_to_virt(idx);
648 pte_t *pte;
649
Mark Rutlandb63dbef2015-03-04 13:27:35 +0000650 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
Laura Abbottaf86e592014-11-21 21:50:42 +0000651
652 pte = fixmap_pte(addr);
653
654 if (pgprot_val(flags)) {
655 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
656 } else {
657 pte_clear(&init_mm, addr, pte);
658 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
659 }
660}
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +0200661
662void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
663{
664 const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
Ard Biesheuvelfb226c32015-11-09 09:55:46 +0100665 pgprot_t prot = PAGE_KERNEL_RO;
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +0100666 int size, offset;
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +0200667 void *dt_virt;
668
669 /*
670 * Check whether the physical FDT address is set and meets the minimum
671 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
672 * at least 8 bytes so that we can always access the size field of the
673 * FDT header after mapping the first chunk, double check here if that
674 * is indeed the case.
675 */
676 BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
677 if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
678 return NULL;
679
680 /*
681 * Make sure that the FDT region can be mapped without the need to
682 * allocate additional translation table pages, so that it is safe
683 * to call create_mapping() this early.
684 *
685 * On 64k pages, the FDT will be mapped using PTEs, so we need to
686 * be in the same PMD as the rest of the fixmap.
687 * On 4k pages, we'll use section mappings for the FDT so we only
688 * have to be in the same PUD.
689 */
690 BUILD_BUG_ON(dt_virt_base % SZ_2M);
691
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +0100692 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
693 __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +0200694
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +0100695 offset = dt_phys % SWAPPER_BLOCK_SIZE;
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +0200696 dt_virt = (void *)dt_virt_base + offset;
697
698 /* map the first chunk so we can read the size from the header */
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +0100699 create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
700 SWAPPER_BLOCK_SIZE, prot);
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +0200701
702 if (fdt_check_header(dt_virt) != 0)
703 return NULL;
704
705 size = fdt_totalsize(dt_virt);
706 if (size > MAX_FDT_SIZE)
707 return NULL;
708
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +0100709 if (offset + size > SWAPPER_BLOCK_SIZE)
710 create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
711 round_up(offset + size, SWAPPER_BLOCK_SIZE), prot);
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +0200712
713 memblock_reserve(dt_phys, size);
714
715 return dt_virt;
716}