blob: 55816f65076991f85b0004edd52f7acfc24bac5e [file] [log] [blame]
Catalin Marinasc1cc1552012-03-05 11:49:27 +00001/*
2 * Based on arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/export.h>
21#include <linux/kernel.h>
22#include <linux/errno.h>
23#include <linux/init.h>
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +020024#include <linux/libfdt.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000025#include <linux/mman.h>
26#include <linux/nodemask.h>
27#include <linux/memblock.h>
28#include <linux/fs.h>
Catalin Marinas2475ff92012-10-23 14:55:08 +010029#include <linux/io.h>
Catalin Marinas41089352015-01-29 17:33:35 +000030#include <linux/slab.h>
Laura Abbottda141702015-01-21 17:36:06 -080031#include <linux/stop_machine.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000032
Mark Rutland21ab99c2016-01-25 11:44:56 +000033#include <asm/barrier.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000034#include <asm/cputype.h>
Laura Abbottaf86e592014-11-21 21:50:42 +000035#include <asm/fixmap.h>
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +010036#include <asm/kernel-pgtable.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000037#include <asm/sections.h>
38#include <asm/setup.h>
39#include <asm/sizes.h>
40#include <asm/tlb.h>
Jungseok Leec79b9542014-05-12 18:40:51 +090041#include <asm/memblock.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000042#include <asm/mmu_context.h>
43
44#include "mm.h"
45
Ard Biesheuveldd006da2015-03-19 16:42:27 +000046u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
47
Catalin Marinasc1cc1552012-03-05 11:49:27 +000048/*
49 * Empty_zero_page is a special page that is used for zero-initialized data
50 * and COW.
51 */
Mark Rutland5227cfa2016-01-25 11:44:57 +000052unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
Catalin Marinasc1cc1552012-03-05 11:49:27 +000053EXPORT_SYMBOL(empty_zero_page);
54
Catalin Marinasc1cc1552012-03-05 11:49:27 +000055pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
56 unsigned long size, pgprot_t vma_prot)
57{
58 if (!pfn_valid(pfn))
59 return pgprot_noncached(vma_prot);
60 else if (file->f_flags & O_SYNC)
61 return pgprot_writecombine(vma_prot);
62 return vma_prot;
63}
64EXPORT_SYMBOL(phys_mem_access_prot);
65
Mark Rutlandf4710442016-01-25 11:45:08 +000066static phys_addr_t __init early_pgtable_alloc(void)
Catalin Marinasc1cc1552012-03-05 11:49:27 +000067{
Suzuki K. Poulose71423922015-11-20 17:45:40 +000068 phys_addr_t phys;
69 void *ptr;
70
Mark Rutland21ab99c2016-01-25 11:44:56 +000071 phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
Suzuki K. Poulose71423922015-11-20 17:45:40 +000072 BUG_ON(!phys);
Mark Rutlandf4710442016-01-25 11:45:08 +000073
74 /*
75 * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
76 * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
77 * any level of table.
78 */
79 ptr = pte_set_fixmap(phys);
80
Mark Rutland21ab99c2016-01-25 11:44:56 +000081 memset(ptr, 0, PAGE_SIZE);
82
Mark Rutlandf4710442016-01-25 11:45:08 +000083 /*
84 * Implicit barriers also ensure the zeroed page is visible to the page
85 * table walker
86 */
87 pte_clear_fixmap();
88
89 return phys;
Catalin Marinasc1cc1552012-03-05 11:49:27 +000090}
91
Laura Abbottda141702015-01-21 17:36:06 -080092/*
93 * remap a PMD into pages
94 */
95static void split_pmd(pmd_t *pmd, pte_t *pte)
96{
97 unsigned long pfn = pmd_pfn(*pmd);
98 int i = 0;
99
100 do {
101 /*
102 * Need to have the least restrictive permissions available
Catalin Marinas667c2752015-11-26 15:42:41 +0000103 * permissions will be fixed up later
Laura Abbottda141702015-01-21 17:36:06 -0800104 */
Catalin Marinas667c2752015-11-26 15:42:41 +0000105 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
Laura Abbottda141702015-01-21 17:36:06 -0800106 pfn++;
107 } while (pte++, i++, i < PTRS_PER_PTE);
108}
109
110static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
Catalin Marinas667c2752015-11-26 15:42:41 +0000111 unsigned long end, unsigned long pfn,
Laura Abbottda141702015-01-21 17:36:06 -0800112 pgprot_t prot,
Mark Rutlandf4710442016-01-25 11:45:08 +0000113 phys_addr_t (*pgtable_alloc)(void))
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000114{
115 pte_t *pte;
116
Mark Rutlanda1c76572015-01-27 16:36:30 +0000117 if (pmd_none(*pmd) || pmd_sect(*pmd)) {
Mark Rutlandf4710442016-01-25 11:45:08 +0000118 phys_addr_t pte_phys = pgtable_alloc();
119 pte = pte_set_fixmap(pte_phys);
Laura Abbottda141702015-01-21 17:36:06 -0800120 if (pmd_sect(*pmd))
121 split_pmd(pmd, pte);
Mark Rutlandf4710442016-01-25 11:45:08 +0000122 __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
Laura Abbottda141702015-01-21 17:36:06 -0800123 flush_tlb_all();
Mark Rutlandf4710442016-01-25 11:45:08 +0000124 pte_clear_fixmap();
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000125 }
Mark Rutlanda1c76572015-01-27 16:36:30 +0000126 BUG_ON(pmd_bad(*pmd));
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000127
Mark Rutlandf4710442016-01-25 11:45:08 +0000128 pte = pte_set_fixmap_offset(pmd, addr);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000129 do {
Catalin Marinas667c2752015-11-26 15:42:41 +0000130 set_pte(pte, pfn_pte(pfn, prot));
131 pfn++;
132 } while (pte++, addr += PAGE_SIZE, addr != end);
Mark Rutlandf4710442016-01-25 11:45:08 +0000133
134 pte_clear_fixmap();
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000135}
136
Jisheng Zhang9a17a212015-11-12 20:04:43 +0800137static void split_pud(pud_t *old_pud, pmd_t *pmd)
Laura Abbottda141702015-01-21 17:36:06 -0800138{
139 unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
140 pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
141 int i = 0;
142
143 do {
Ard Biesheuvel1e43ba92015-06-30 18:04:49 +0200144 set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
Laura Abbottda141702015-01-21 17:36:06 -0800145 addr += PMD_SIZE;
146 } while (pmd++, i++, i < PTRS_PER_PMD);
147}
148
149static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
Ard Biesheuvele1e1fdd2014-10-20 14:02:15 +0200150 unsigned long addr, unsigned long end,
Laura Abbottda141702015-01-21 17:36:06 -0800151 phys_addr_t phys, pgprot_t prot,
Mark Rutlandf4710442016-01-25 11:45:08 +0000152 phys_addr_t (*pgtable_alloc)(void))
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000153{
154 pmd_t *pmd;
155 unsigned long next;
156
157 /*
158 * Check for initial section mappings in the pgd/pud and remove them.
159 */
Mark Rutlanda1c76572015-01-27 16:36:30 +0000160 if (pud_none(*pud) || pud_sect(*pud)) {
Mark Rutlandf4710442016-01-25 11:45:08 +0000161 phys_addr_t pmd_phys = pgtable_alloc();
162 pmd = pmd_set_fixmap(pmd_phys);
Laura Abbottda141702015-01-21 17:36:06 -0800163 if (pud_sect(*pud)) {
164 /*
165 * need to have the 1G of mappings continue to be
166 * present
167 */
168 split_pud(pud, pmd);
169 }
Mark Rutlandf4710442016-01-25 11:45:08 +0000170 __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
Laura Abbottda141702015-01-21 17:36:06 -0800171 flush_tlb_all();
Mark Rutlandf4710442016-01-25 11:45:08 +0000172 pmd_clear_fixmap();
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000173 }
Mark Rutlanda1c76572015-01-27 16:36:30 +0000174 BUG_ON(pud_bad(*pud));
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000175
Mark Rutlandf4710442016-01-25 11:45:08 +0000176 pmd = pmd_set_fixmap_offset(pud, addr);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000177 do {
178 next = pmd_addr_end(addr, end);
179 /* try section mapping first */
Catalin Marinasa55f9922014-02-04 16:01:31 +0000180 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
181 pmd_t old_pmd =*pmd;
Ard Biesheuvel8ce837c2014-10-20 15:42:07 +0200182 set_pmd(pmd, __pmd(phys |
183 pgprot_val(mk_sect_prot(prot))));
Catalin Marinasa55f9922014-02-04 16:01:31 +0000184 /*
185 * Check for previous table entries created during
186 * boot (__create_page_tables) and flush them.
187 */
zhichang.yuan523d6e92014-12-09 07:26:47 +0000188 if (!pmd_none(old_pmd)) {
Catalin Marinasa55f9922014-02-04 16:01:31 +0000189 flush_tlb_all();
zhichang.yuan523d6e92014-12-09 07:26:47 +0000190 if (pmd_table(old_pmd)) {
Mark Rutland316b39db2016-01-25 11:45:05 +0000191 phys_addr_t table = pmd_page_paddr(old_pmd);
Catalin Marinas41089352015-01-29 17:33:35 +0000192 if (!WARN_ON_ONCE(slab_is_available()))
193 memblock_free(table, PAGE_SIZE);
zhichang.yuan523d6e92014-12-09 07:26:47 +0000194 }
195 }
Catalin Marinasa55f9922014-02-04 16:01:31 +0000196 } else {
Catalin Marinas667c2752015-11-26 15:42:41 +0000197 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
Mark Rutland21ab99c2016-01-25 11:44:56 +0000198 prot, pgtable_alloc);
Catalin Marinasa55f9922014-02-04 16:01:31 +0000199 }
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000200 phys += next - addr;
201 } while (pmd++, addr = next, addr != end);
Mark Rutlandf4710442016-01-25 11:45:08 +0000202
203 pmd_clear_fixmap();
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000204}
205
Laura Abbottda141702015-01-21 17:36:06 -0800206static inline bool use_1G_block(unsigned long addr, unsigned long next,
207 unsigned long phys)
208{
209 if (PAGE_SHIFT != 12)
210 return false;
211
212 if (((addr | next | phys) & ~PUD_MASK) != 0)
213 return false;
214
215 return true;
216}
217
218static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
Ard Biesheuvele1e1fdd2014-10-20 14:02:15 +0200219 unsigned long addr, unsigned long end,
Laura Abbottda141702015-01-21 17:36:06 -0800220 phys_addr_t phys, pgprot_t prot,
Mark Rutlandf4710442016-01-25 11:45:08 +0000221 phys_addr_t (*pgtable_alloc)(void))
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000222{
Jungseok Leec79b9542014-05-12 18:40:51 +0900223 pud_t *pud;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000224 unsigned long next;
225
Jungseok Leec79b9542014-05-12 18:40:51 +0900226 if (pgd_none(*pgd)) {
Mark Rutlandf4710442016-01-25 11:45:08 +0000227 phys_addr_t pud_phys = pgtable_alloc();
228 __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
Jungseok Leec79b9542014-05-12 18:40:51 +0900229 }
230 BUG_ON(pgd_bad(*pgd));
231
Mark Rutlandf4710442016-01-25 11:45:08 +0000232 pud = pud_set_fixmap_offset(pgd, addr);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000233 do {
234 next = pud_addr_end(addr, end);
Steve Capper206a2a72014-05-06 14:02:27 +0100235
236 /*
237 * For 4K granule only, attempt to put down a 1GB block
238 */
Laura Abbottda141702015-01-21 17:36:06 -0800239 if (use_1G_block(addr, next, phys)) {
Steve Capper206a2a72014-05-06 14:02:27 +0100240 pud_t old_pud = *pud;
Ard Biesheuvel8ce837c2014-10-20 15:42:07 +0200241 set_pud(pud, __pud(phys |
242 pgprot_val(mk_sect_prot(prot))));
Steve Capper206a2a72014-05-06 14:02:27 +0100243
244 /*
245 * If we have an old value for a pud, it will
246 * be pointing to a pmd table that we no longer
247 * need (from swapper_pg_dir).
248 *
249 * Look up the old pmd table and free it.
250 */
251 if (!pud_none(old_pud)) {
Steve Capper206a2a72014-05-06 14:02:27 +0100252 flush_tlb_all();
zhichang.yuan523d6e92014-12-09 07:26:47 +0000253 if (pud_table(old_pud)) {
Mark Rutland316b39db2016-01-25 11:45:05 +0000254 phys_addr_t table = pud_page_paddr(old_pud);
Catalin Marinas41089352015-01-29 17:33:35 +0000255 if (!WARN_ON_ONCE(slab_is_available()))
256 memblock_free(table, PAGE_SIZE);
zhichang.yuan523d6e92014-12-09 07:26:47 +0000257 }
Steve Capper206a2a72014-05-06 14:02:27 +0100258 }
259 } else {
Mark Rutland21ab99c2016-01-25 11:44:56 +0000260 alloc_init_pmd(mm, pud, addr, next, phys, prot,
261 pgtable_alloc);
Steve Capper206a2a72014-05-06 14:02:27 +0100262 }
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000263 phys += next - addr;
264 } while (pud++, addr = next, addr != end);
Mark Rutlandf4710442016-01-25 11:45:08 +0000265
266 pud_clear_fixmap();
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000267}
268
269/*
270 * Create the page directory entries and any necessary page tables for the
271 * mapping specified by 'md'.
272 */
Laura Abbottda141702015-01-21 17:36:06 -0800273static void __create_mapping(struct mm_struct *mm, pgd_t *pgd,
Ard Biesheuvele1e1fdd2014-10-20 14:02:15 +0200274 phys_addr_t phys, unsigned long virt,
Laura Abbottda141702015-01-21 17:36:06 -0800275 phys_addr_t size, pgprot_t prot,
Mark Rutlandf4710442016-01-25 11:45:08 +0000276 phys_addr_t (*pgtable_alloc)(void))
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000277{
278 unsigned long addr, length, end, next;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000279
Mark Rutlandcc5d2b32015-11-23 13:26:19 +0000280 /*
281 * If the virtual and physical address don't have the same offset
282 * within a page, we cannot map the region as the caller expects.
283 */
284 if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
285 return;
286
Mark Rutland9c4e08a2015-11-23 13:26:20 +0000287 phys &= PAGE_MASK;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000288 addr = virt & PAGE_MASK;
289 length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
290
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000291 end = addr + length;
292 do {
293 next = pgd_addr_end(addr, end);
Mark Rutland21ab99c2016-01-25 11:44:56 +0000294 alloc_init_pud(mm, pgd, addr, next, phys, prot, pgtable_alloc);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000295 phys += next - addr;
296 } while (pgd++, addr = next, addr != end);
297}
298
Mark Rutlandf4710442016-01-25 11:45:08 +0000299static phys_addr_t late_pgtable_alloc(void)
Laura Abbottda141702015-01-21 17:36:06 -0800300{
Mark Rutland21ab99c2016-01-25 11:44:56 +0000301 void *ptr = (void *)__get_free_page(PGALLOC_GFP);
Laura Abbottda141702015-01-21 17:36:06 -0800302 BUG_ON(!ptr);
Mark Rutland21ab99c2016-01-25 11:44:56 +0000303
304 /* Ensure the zeroed page is visible to the page table walker */
305 dsb(ishst);
Mark Rutlandf4710442016-01-25 11:45:08 +0000306 return __pa(ptr);
Laura Abbottda141702015-01-21 17:36:06 -0800307}
308
Mark Rutlandc53e0ba2015-07-28 10:31:06 +0100309static void __init create_mapping(phys_addr_t phys, unsigned long virt,
Laura Abbottda141702015-01-21 17:36:06 -0800310 phys_addr_t size, pgprot_t prot)
Mark Salterd7ecbdd2014-03-12 12:28:06 -0400311{
312 if (virt < VMALLOC_START) {
313 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
314 &phys, virt);
315 return;
316 }
Mark Rutlande2c30ee2015-12-09 12:44:36 +0000317 __create_mapping(&init_mm, pgd_offset_k(virt), phys, virt,
Mark Rutland21ab99c2016-01-25 11:44:56 +0000318 size, prot, early_pgtable_alloc);
Mark Salterd7ecbdd2014-03-12 12:28:06 -0400319}
320
Ard Biesheuvel8ce837c2014-10-20 15:42:07 +0200321void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
322 unsigned long virt, phys_addr_t size,
323 pgprot_t prot)
324{
Laura Abbottda141702015-01-21 17:36:06 -0800325 __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
Mark Rutland21ab99c2016-01-25 11:44:56 +0000326 late_pgtable_alloc);
Mark Salterd7ecbdd2014-03-12 12:28:06 -0400327}
328
Laura Abbottda141702015-01-21 17:36:06 -0800329static void create_mapping_late(phys_addr_t phys, unsigned long virt,
330 phys_addr_t size, pgprot_t prot)
331{
332 if (virt < VMALLOC_START) {
333 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
334 &phys, virt);
335 return;
336 }
337
Mark Rutlande2c30ee2015-12-09 12:44:36 +0000338 return __create_mapping(&init_mm, pgd_offset_k(virt),
Mark Rutland21ab99c2016-01-25 11:44:56 +0000339 phys, virt, size, prot, late_pgtable_alloc);
Laura Abbottda141702015-01-21 17:36:06 -0800340}
341
342#ifdef CONFIG_DEBUG_RODATA
343static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
344{
345 /*
346 * Set up the executable regions using the existing section mappings
347 * for now. This will get more fine grained later once all memory
348 * is mapped
349 */
Ard Biesheuvel4fee9f32015-11-16 11:18:14 +0100350 unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
351 unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
Laura Abbottda141702015-01-21 17:36:06 -0800352
353 if (end < kernel_x_start) {
354 create_mapping(start, __phys_to_virt(start),
355 end - start, PAGE_KERNEL);
356 } else if (start >= kernel_x_end) {
357 create_mapping(start, __phys_to_virt(start),
358 end - start, PAGE_KERNEL);
359 } else {
360 if (start < kernel_x_start)
361 create_mapping(start, __phys_to_virt(start),
362 kernel_x_start - start,
363 PAGE_KERNEL);
364 create_mapping(kernel_x_start,
365 __phys_to_virt(kernel_x_start),
366 kernel_x_end - kernel_x_start,
367 PAGE_KERNEL_EXEC);
368 if (kernel_x_end < end)
369 create_mapping(kernel_x_end,
370 __phys_to_virt(kernel_x_end),
371 end - kernel_x_end,
372 PAGE_KERNEL);
373 }
374
375}
376#else
377static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
378{
379 create_mapping(start, __phys_to_virt(start), end - start,
380 PAGE_KERNEL_EXEC);
381}
382#endif
383
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000384static void __init map_mem(void)
385{
386 struct memblock_region *reg;
Steve Capperf6bc87c2013-04-30 11:00:33 +0100387
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000388 /* map all the memory banks */
389 for_each_memblock(memory, reg) {
390 phys_addr_t start = reg->base;
391 phys_addr_t end = start + reg->size;
392
393 if (start >= end)
394 break;
Ard Biesheuvel68709f42015-11-30 13:28:16 +0100395 if (memblock_is_nomap(reg))
396 continue;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000397
Laura Abbottda141702015-01-21 17:36:06 -0800398 __map_memblock(start, end);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000399 }
400}
401
Jisheng Zhang9a17a212015-11-12 20:04:43 +0800402static void __init fixup_executable(void)
Laura Abbottda141702015-01-21 17:36:06 -0800403{
404#ifdef CONFIG_DEBUG_RODATA
405 /* now that we are actually fully mapped, make the start/end more fine grained */
Ard Biesheuvel4fee9f32015-11-16 11:18:14 +0100406 if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
Laura Abbottda141702015-01-21 17:36:06 -0800407 unsigned long aligned_start = round_down(__pa(_stext),
Ard Biesheuvel4fee9f32015-11-16 11:18:14 +0100408 SWAPPER_BLOCK_SIZE);
Laura Abbottda141702015-01-21 17:36:06 -0800409
410 create_mapping(aligned_start, __phys_to_virt(aligned_start),
411 __pa(_stext) - aligned_start,
412 PAGE_KERNEL);
413 }
414
Ard Biesheuvel4fee9f32015-11-16 11:18:14 +0100415 if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
Laura Abbottda141702015-01-21 17:36:06 -0800416 unsigned long aligned_end = round_up(__pa(__init_end),
Ard Biesheuvel4fee9f32015-11-16 11:18:14 +0100417 SWAPPER_BLOCK_SIZE);
Laura Abbottda141702015-01-21 17:36:06 -0800418 create_mapping(__pa(__init_end), (unsigned long)__init_end,
419 aligned_end - __pa(__init_end),
420 PAGE_KERNEL);
421 }
422#endif
423}
424
425#ifdef CONFIG_DEBUG_RODATA
426void mark_rodata_ro(void)
427{
428 create_mapping_late(__pa(_stext), (unsigned long)_stext,
429 (unsigned long)_etext - (unsigned long)_stext,
Laura Abbott0b2aa5b2015-11-12 12:21:10 -0800430 PAGE_KERNEL_ROX);
Laura Abbottda141702015-01-21 17:36:06 -0800431
432}
433#endif
434
435void fixup_init(void)
436{
437 create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
438 (unsigned long)__init_end - (unsigned long)__init_begin,
439 PAGE_KERNEL);
440}
441
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000442/*
443 * paging_init() sets up the page tables, initialises the zone memory
444 * maps and sets up the zero page.
445 */
446void __init paging_init(void)
447{
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000448 map_mem();
Laura Abbottda141702015-01-21 17:36:06 -0800449 fixup_executable();
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000450
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000451 bootmem_init();
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000452}
453
454/*
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000455 * Check whether a kernel address is valid (derived from arch/x86/).
456 */
457int kern_addr_valid(unsigned long addr)
458{
459 pgd_t *pgd;
460 pud_t *pud;
461 pmd_t *pmd;
462 pte_t *pte;
463
464 if ((((long)addr) >> VA_BITS) != -1UL)
465 return 0;
466
467 pgd = pgd_offset_k(addr);
468 if (pgd_none(*pgd))
469 return 0;
470
471 pud = pud_offset(pgd, addr);
472 if (pud_none(*pud))
473 return 0;
474
Steve Capper206a2a72014-05-06 14:02:27 +0100475 if (pud_sect(*pud))
476 return pfn_valid(pud_pfn(*pud));
477
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000478 pmd = pmd_offset(pud, addr);
479 if (pmd_none(*pmd))
480 return 0;
481
Dave Andersonda6e4cb2014-04-15 18:53:24 +0100482 if (pmd_sect(*pmd))
483 return pfn_valid(pmd_pfn(*pmd));
484
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000485 pte = pte_offset_kernel(pmd, addr);
486 if (pte_none(*pte))
487 return 0;
488
489 return pfn_valid(pte_pfn(*pte));
490}
491#ifdef CONFIG_SPARSEMEM_VMEMMAP
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +0100492#if !ARM64_SWAPPER_USES_SECTION_MAPS
Johannes Weiner0aad8182013-04-29 15:07:50 -0700493int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000494{
Johannes Weiner0aad8182013-04-29 15:07:50 -0700495 return vmemmap_populate_basepages(start, end, node);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000496}
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +0100497#else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
Johannes Weiner0aad8182013-04-29 15:07:50 -0700498int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000499{
Johannes Weiner0aad8182013-04-29 15:07:50 -0700500 unsigned long addr = start;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000501 unsigned long next;
502 pgd_t *pgd;
503 pud_t *pud;
504 pmd_t *pmd;
505
506 do {
507 next = pmd_addr_end(addr, end);
508
509 pgd = vmemmap_pgd_populate(addr, node);
510 if (!pgd)
511 return -ENOMEM;
512
513 pud = vmemmap_pud_populate(pgd, addr, node);
514 if (!pud)
515 return -ENOMEM;
516
517 pmd = pmd_offset(pud, addr);
518 if (pmd_none(*pmd)) {
519 void *p = NULL;
520
521 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
522 if (!p)
523 return -ENOMEM;
524
Catalin Marinasa501e322014-04-03 15:57:15 +0100525 set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000526 } else
527 vmemmap_verify((pte_t *)pmd, node, addr, next);
528 } while (addr = next, addr != end);
529
530 return 0;
531}
532#endif /* CONFIG_ARM64_64K_PAGES */
Johannes Weiner0aad8182013-04-29 15:07:50 -0700533void vmemmap_free(unsigned long start, unsigned long end)
Tang Chen01975182013-02-22 16:33:08 -0800534{
535}
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000536#endif /* CONFIG_SPARSEMEM_VMEMMAP */
Laura Abbottaf86e592014-11-21 21:50:42 +0000537
538static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
Kirill A. Shutemov9f25e6a2015-04-14 15:45:39 -0700539#if CONFIG_PGTABLE_LEVELS > 2
Laura Abbottaf86e592014-11-21 21:50:42 +0000540static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
541#endif
Kirill A. Shutemov9f25e6a2015-04-14 15:45:39 -0700542#if CONFIG_PGTABLE_LEVELS > 3
Laura Abbottaf86e592014-11-21 21:50:42 +0000543static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
544#endif
545
546static inline pud_t * fixmap_pud(unsigned long addr)
547{
548 pgd_t *pgd = pgd_offset_k(addr);
549
550 BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
551
552 return pud_offset(pgd, addr);
553}
554
555static inline pmd_t * fixmap_pmd(unsigned long addr)
556{
557 pud_t *pud = fixmap_pud(addr);
558
559 BUG_ON(pud_none(*pud) || pud_bad(*pud));
560
561 return pmd_offset(pud, addr);
562}
563
564static inline pte_t * fixmap_pte(unsigned long addr)
565{
566 pmd_t *pmd = fixmap_pmd(addr);
567
568 BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
569
570 return pte_offset_kernel(pmd, addr);
571}
572
573void __init early_fixmap_init(void)
574{
575 pgd_t *pgd;
576 pud_t *pud;
577 pmd_t *pmd;
578 unsigned long addr = FIXADDR_START;
579
580 pgd = pgd_offset_k(addr);
581 pgd_populate(&init_mm, pgd, bm_pud);
582 pud = pud_offset(pgd, addr);
583 pud_populate(&init_mm, pud, bm_pmd);
584 pmd = pmd_offset(pud, addr);
585 pmd_populate_kernel(&init_mm, pmd, bm_pte);
586
587 /*
588 * The boot-ioremap range spans multiple pmds, for which
589 * we are not preparted:
590 */
591 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
592 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
593
594 if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
595 || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
596 WARN_ON(1);
597 pr_warn("pmd %p != %p, %p\n",
598 pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
599 fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
600 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
601 fix_to_virt(FIX_BTMAP_BEGIN));
602 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
603 fix_to_virt(FIX_BTMAP_END));
604
605 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
606 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
607 }
608}
609
610void __set_fixmap(enum fixed_addresses idx,
611 phys_addr_t phys, pgprot_t flags)
612{
613 unsigned long addr = __fix_to_virt(idx);
614 pte_t *pte;
615
Mark Rutlandb63dbef2015-03-04 13:27:35 +0000616 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
Laura Abbottaf86e592014-11-21 21:50:42 +0000617
618 pte = fixmap_pte(addr);
619
620 if (pgprot_val(flags)) {
621 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
622 } else {
623 pte_clear(&init_mm, addr, pte);
624 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
625 }
626}
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +0200627
628void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
629{
630 const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
Ard Biesheuvelfb226c32015-11-09 09:55:46 +0100631 pgprot_t prot = PAGE_KERNEL_RO;
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +0100632 int size, offset;
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +0200633 void *dt_virt;
634
635 /*
636 * Check whether the physical FDT address is set and meets the minimum
637 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
638 * at least 8 bytes so that we can always access the size field of the
639 * FDT header after mapping the first chunk, double check here if that
640 * is indeed the case.
641 */
642 BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
643 if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
644 return NULL;
645
646 /*
647 * Make sure that the FDT region can be mapped without the need to
648 * allocate additional translation table pages, so that it is safe
649 * to call create_mapping() this early.
650 *
651 * On 64k pages, the FDT will be mapped using PTEs, so we need to
652 * be in the same PMD as the rest of the fixmap.
653 * On 4k pages, we'll use section mappings for the FDT so we only
654 * have to be in the same PUD.
655 */
656 BUILD_BUG_ON(dt_virt_base % SZ_2M);
657
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +0100658 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
659 __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +0200660
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +0100661 offset = dt_phys % SWAPPER_BLOCK_SIZE;
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +0200662 dt_virt = (void *)dt_virt_base + offset;
663
664 /* map the first chunk so we can read the size from the header */
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +0100665 create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
666 SWAPPER_BLOCK_SIZE, prot);
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +0200667
668 if (fdt_check_header(dt_virt) != 0)
669 return NULL;
670
671 size = fdt_totalsize(dt_virt);
672 if (size > MAX_FDT_SIZE)
673 return NULL;
674
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +0100675 if (offset + size > SWAPPER_BLOCK_SIZE)
676 create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
677 round_up(offset + size, SWAPPER_BLOCK_SIZE), prot);
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +0200678
679 memblock_reserve(dt_phys, size);
680
681 return dt_virt;
682}