blob: 78d91b1eab84876ae06f53c4c8edae625921fcee [file] [log] [blame]
Catalin Marinasc1cc1552012-03-05 11:49:27 +00001/*
2 * Based on arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/export.h>
21#include <linux/kernel.h>
22#include <linux/errno.h>
23#include <linux/init.h>
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +020024#include <linux/libfdt.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000025#include <linux/mman.h>
26#include <linux/nodemask.h>
27#include <linux/memblock.h>
28#include <linux/fs.h>
Catalin Marinas2475ff92012-10-23 14:55:08 +010029#include <linux/io.h>
Catalin Marinas41089352015-01-29 17:33:35 +000030#include <linux/slab.h>
Laura Abbottda141702015-01-21 17:36:06 -080031#include <linux/stop_machine.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000032
33#include <asm/cputype.h>
Laura Abbottaf86e592014-11-21 21:50:42 +000034#include <asm/fixmap.h>
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +010035#include <asm/kernel-pgtable.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000036#include <asm/sections.h>
37#include <asm/setup.h>
38#include <asm/sizes.h>
39#include <asm/tlb.h>
Jungseok Leec79b9542014-05-12 18:40:51 +090040#include <asm/memblock.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000041#include <asm/mmu_context.h>
42
43#include "mm.h"
44
Ard Biesheuveldd006da2015-03-19 16:42:27 +000045u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
46
Catalin Marinasc1cc1552012-03-05 11:49:27 +000047/*
48 * Empty_zero_page is a special page that is used for zero-initialized data
49 * and COW.
50 */
51struct page *empty_zero_page;
52EXPORT_SYMBOL(empty_zero_page);
53
Catalin Marinasc1cc1552012-03-05 11:49:27 +000054pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
55 unsigned long size, pgprot_t vma_prot)
56{
57 if (!pfn_valid(pfn))
58 return pgprot_noncached(vma_prot);
59 else if (file->f_flags & O_SYNC)
60 return pgprot_writecombine(vma_prot);
61 return vma_prot;
62}
63EXPORT_SYMBOL(phys_mem_access_prot);
64
65static void __init *early_alloc(unsigned long sz)
66{
Suzuki K. Poulose71423922015-11-20 17:45:40 +000067 phys_addr_t phys;
68 void *ptr;
69
70 phys = memblock_alloc(sz, sz);
71 BUG_ON(!phys);
72 ptr = __va(phys);
Catalin Marinasc1cc1552012-03-05 11:49:27 +000073 memset(ptr, 0, sz);
74 return ptr;
75}
76
Laura Abbottda141702015-01-21 17:36:06 -080077/*
78 * remap a PMD into pages
79 */
80static void split_pmd(pmd_t *pmd, pte_t *pte)
81{
82 unsigned long pfn = pmd_pfn(*pmd);
83 int i = 0;
84
85 do {
86 /*
87 * Need to have the least restrictive permissions available
Jeremy Linton348a65c2015-10-07 12:00:25 -050088 * permissions will be fixed up later. Default the new page
89 * range as contiguous ptes.
Laura Abbottda141702015-01-21 17:36:06 -080090 */
Jeremy Linton348a65c2015-10-07 12:00:25 -050091 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC_CONT));
Laura Abbottda141702015-01-21 17:36:06 -080092 pfn++;
93 } while (pte++, i++, i < PTRS_PER_PTE);
94}
95
Jeremy Linton348a65c2015-10-07 12:00:25 -050096/*
97 * Given a PTE with the CONT bit set, determine where the CONT range
98 * starts, and clear the entire range of PTE CONT bits.
99 */
100static void clear_cont_pte_range(pte_t *pte, unsigned long addr)
101{
102 int i;
103
104 pte -= CONT_RANGE_OFFSET(addr);
105 for (i = 0; i < CONT_PTES; i++) {
106 set_pte(pte, pte_mknoncont(*pte));
107 pte++;
108 }
109 flush_tlb_all();
110}
111
112/*
113 * Given a range of PTEs set the pfn and provided page protection flags
114 */
115static void __populate_init_pte(pte_t *pte, unsigned long addr,
116 unsigned long end, phys_addr_t phys,
117 pgprot_t prot)
118{
119 unsigned long pfn = __phys_to_pfn(phys);
120
121 do {
122 /* clear all the bits except the pfn, then apply the prot */
123 set_pte(pte, pfn_pte(pfn, prot));
124 pte++;
125 pfn++;
126 addr += PAGE_SIZE;
127 } while (addr != end);
128}
129
Laura Abbottda141702015-01-21 17:36:06 -0800130static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
Jeremy Linton348a65c2015-10-07 12:00:25 -0500131 unsigned long end, phys_addr_t phys,
Laura Abbottda141702015-01-21 17:36:06 -0800132 pgprot_t prot,
133 void *(*alloc)(unsigned long size))
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000134{
135 pte_t *pte;
Jeremy Linton348a65c2015-10-07 12:00:25 -0500136 unsigned long next;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000137
Mark Rutlanda1c76572015-01-27 16:36:30 +0000138 if (pmd_none(*pmd) || pmd_sect(*pmd)) {
Laura Abbottda141702015-01-21 17:36:06 -0800139 pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
140 if (pmd_sect(*pmd))
141 split_pmd(pmd, pte);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000142 __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
Laura Abbottda141702015-01-21 17:36:06 -0800143 flush_tlb_all();
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000144 }
Mark Rutlanda1c76572015-01-27 16:36:30 +0000145 BUG_ON(pmd_bad(*pmd));
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000146
147 pte = pte_offset_kernel(pmd, addr);
148 do {
Jeremy Linton348a65c2015-10-07 12:00:25 -0500149 next = min(end, (addr + CONT_SIZE) & CONT_MASK);
150 if (((addr | next | phys) & ~CONT_MASK) == 0) {
151 /* a block of CONT_PTES */
152 __populate_init_pte(pte, addr, next, phys,
Ard Biesheuvelb2195452015-11-09 09:55:45 +0100153 __pgprot(pgprot_val(prot) | PTE_CONT));
Jeremy Linton348a65c2015-10-07 12:00:25 -0500154 } else {
155 /*
156 * If the range being split is already inside of a
157 * contiguous range but this PTE isn't going to be
158 * contiguous, then we want to unmark the adjacent
159 * ranges, then update the portion of the range we
160 * are interrested in.
161 */
162 clear_cont_pte_range(pte, addr);
163 __populate_init_pte(pte, addr, next, phys, prot);
164 }
165
166 pte += (next - addr) >> PAGE_SHIFT;
167 phys += next - addr;
168 addr = next;
169 } while (addr != end);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000170}
171
Jisheng Zhang9a17a212015-11-12 20:04:43 +0800172static void split_pud(pud_t *old_pud, pmd_t *pmd)
Laura Abbottda141702015-01-21 17:36:06 -0800173{
174 unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
175 pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
176 int i = 0;
177
178 do {
Ard Biesheuvel1e43ba92015-06-30 18:04:49 +0200179 set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
Laura Abbottda141702015-01-21 17:36:06 -0800180 addr += PMD_SIZE;
181 } while (pmd++, i++, i < PTRS_PER_PMD);
182}
183
184static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
Ard Biesheuvele1e1fdd2014-10-20 14:02:15 +0200185 unsigned long addr, unsigned long end,
Laura Abbottda141702015-01-21 17:36:06 -0800186 phys_addr_t phys, pgprot_t prot,
187 void *(*alloc)(unsigned long size))
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000188{
189 pmd_t *pmd;
190 unsigned long next;
191
192 /*
193 * Check for initial section mappings in the pgd/pud and remove them.
194 */
Mark Rutlanda1c76572015-01-27 16:36:30 +0000195 if (pud_none(*pud) || pud_sect(*pud)) {
Laura Abbottda141702015-01-21 17:36:06 -0800196 pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
197 if (pud_sect(*pud)) {
198 /*
199 * need to have the 1G of mappings continue to be
200 * present
201 */
202 split_pud(pud, pmd);
203 }
Ard Biesheuvele1e1fdd2014-10-20 14:02:15 +0200204 pud_populate(mm, pud, pmd);
Laura Abbottda141702015-01-21 17:36:06 -0800205 flush_tlb_all();
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000206 }
Mark Rutlanda1c76572015-01-27 16:36:30 +0000207 BUG_ON(pud_bad(*pud));
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000208
209 pmd = pmd_offset(pud, addr);
210 do {
211 next = pmd_addr_end(addr, end);
212 /* try section mapping first */
Catalin Marinasa55f9922014-02-04 16:01:31 +0000213 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
214 pmd_t old_pmd =*pmd;
Ard Biesheuvel8ce837c2014-10-20 15:42:07 +0200215 set_pmd(pmd, __pmd(phys |
216 pgprot_val(mk_sect_prot(prot))));
Catalin Marinasa55f9922014-02-04 16:01:31 +0000217 /*
218 * Check for previous table entries created during
219 * boot (__create_page_tables) and flush them.
220 */
zhichang.yuan523d6e92014-12-09 07:26:47 +0000221 if (!pmd_none(old_pmd)) {
Catalin Marinasa55f9922014-02-04 16:01:31 +0000222 flush_tlb_all();
zhichang.yuan523d6e92014-12-09 07:26:47 +0000223 if (pmd_table(old_pmd)) {
224 phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
Catalin Marinas41089352015-01-29 17:33:35 +0000225 if (!WARN_ON_ONCE(slab_is_available()))
226 memblock_free(table, PAGE_SIZE);
zhichang.yuan523d6e92014-12-09 07:26:47 +0000227 }
228 }
Catalin Marinasa55f9922014-02-04 16:01:31 +0000229 } else {
Jeremy Linton348a65c2015-10-07 12:00:25 -0500230 alloc_init_pte(pmd, addr, next, phys, prot, alloc);
Catalin Marinasa55f9922014-02-04 16:01:31 +0000231 }
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000232 phys += next - addr;
233 } while (pmd++, addr = next, addr != end);
234}
235
Laura Abbottda141702015-01-21 17:36:06 -0800236static inline bool use_1G_block(unsigned long addr, unsigned long next,
237 unsigned long phys)
238{
239 if (PAGE_SHIFT != 12)
240 return false;
241
242 if (((addr | next | phys) & ~PUD_MASK) != 0)
243 return false;
244
245 return true;
246}
247
248static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
Ard Biesheuvele1e1fdd2014-10-20 14:02:15 +0200249 unsigned long addr, unsigned long end,
Laura Abbottda141702015-01-21 17:36:06 -0800250 phys_addr_t phys, pgprot_t prot,
251 void *(*alloc)(unsigned long size))
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000252{
Jungseok Leec79b9542014-05-12 18:40:51 +0900253 pud_t *pud;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000254 unsigned long next;
255
Jungseok Leec79b9542014-05-12 18:40:51 +0900256 if (pgd_none(*pgd)) {
Laura Abbottda141702015-01-21 17:36:06 -0800257 pud = alloc(PTRS_PER_PUD * sizeof(pud_t));
Ard Biesheuvele1e1fdd2014-10-20 14:02:15 +0200258 pgd_populate(mm, pgd, pud);
Jungseok Leec79b9542014-05-12 18:40:51 +0900259 }
260 BUG_ON(pgd_bad(*pgd));
261
262 pud = pud_offset(pgd, addr);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000263 do {
264 next = pud_addr_end(addr, end);
Steve Capper206a2a72014-05-06 14:02:27 +0100265
266 /*
267 * For 4K granule only, attempt to put down a 1GB block
268 */
Laura Abbottda141702015-01-21 17:36:06 -0800269 if (use_1G_block(addr, next, phys)) {
Steve Capper206a2a72014-05-06 14:02:27 +0100270 pud_t old_pud = *pud;
Ard Biesheuvel8ce837c2014-10-20 15:42:07 +0200271 set_pud(pud, __pud(phys |
272 pgprot_val(mk_sect_prot(prot))));
Steve Capper206a2a72014-05-06 14:02:27 +0100273
274 /*
275 * If we have an old value for a pud, it will
276 * be pointing to a pmd table that we no longer
277 * need (from swapper_pg_dir).
278 *
279 * Look up the old pmd table and free it.
280 */
281 if (!pud_none(old_pud)) {
Steve Capper206a2a72014-05-06 14:02:27 +0100282 flush_tlb_all();
zhichang.yuan523d6e92014-12-09 07:26:47 +0000283 if (pud_table(old_pud)) {
284 phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
Catalin Marinas41089352015-01-29 17:33:35 +0000285 if (!WARN_ON_ONCE(slab_is_available()))
286 memblock_free(table, PAGE_SIZE);
zhichang.yuan523d6e92014-12-09 07:26:47 +0000287 }
Steve Capper206a2a72014-05-06 14:02:27 +0100288 }
289 } else {
Laura Abbottda141702015-01-21 17:36:06 -0800290 alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc);
Steve Capper206a2a72014-05-06 14:02:27 +0100291 }
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000292 phys += next - addr;
293 } while (pud++, addr = next, addr != end);
294}
295
296/*
297 * Create the page directory entries and any necessary page tables for the
298 * mapping specified by 'md'.
299 */
Laura Abbottda141702015-01-21 17:36:06 -0800300static void __create_mapping(struct mm_struct *mm, pgd_t *pgd,
Ard Biesheuvele1e1fdd2014-10-20 14:02:15 +0200301 phys_addr_t phys, unsigned long virt,
Laura Abbottda141702015-01-21 17:36:06 -0800302 phys_addr_t size, pgprot_t prot,
303 void *(*alloc)(unsigned long size))
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000304{
305 unsigned long addr, length, end, next;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000306
307 addr = virt & PAGE_MASK;
308 length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
309
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000310 end = addr + length;
311 do {
312 next = pgd_addr_end(addr, end);
Laura Abbottda141702015-01-21 17:36:06 -0800313 alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000314 phys += next - addr;
315 } while (pgd++, addr = next, addr != end);
316}
317
Laura Abbottda141702015-01-21 17:36:06 -0800318static void *late_alloc(unsigned long size)
319{
320 void *ptr;
321
322 BUG_ON(size > PAGE_SIZE);
323 ptr = (void *)__get_free_page(PGALLOC_GFP);
324 BUG_ON(!ptr);
325 return ptr;
326}
327
Mark Rutlandc53e0ba2015-07-28 10:31:06 +0100328static void __init create_mapping(phys_addr_t phys, unsigned long virt,
Laura Abbottda141702015-01-21 17:36:06 -0800329 phys_addr_t size, pgprot_t prot)
Mark Salterd7ecbdd2014-03-12 12:28:06 -0400330{
331 if (virt < VMALLOC_START) {
332 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
333 &phys, virt);
334 return;
335 }
Ard Biesheuvele1e1fdd2014-10-20 14:02:15 +0200336 __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
Laura Abbottda141702015-01-21 17:36:06 -0800337 size, prot, early_alloc);
Mark Salterd7ecbdd2014-03-12 12:28:06 -0400338}
339
Ard Biesheuvel8ce837c2014-10-20 15:42:07 +0200340void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
341 unsigned long virt, phys_addr_t size,
342 pgprot_t prot)
343{
Laura Abbottda141702015-01-21 17:36:06 -0800344 __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
Ard Biesheuvel60305db2015-01-22 10:01:40 +0000345 late_alloc);
Mark Salterd7ecbdd2014-03-12 12:28:06 -0400346}
347
Laura Abbottda141702015-01-21 17:36:06 -0800348static void create_mapping_late(phys_addr_t phys, unsigned long virt,
349 phys_addr_t size, pgprot_t prot)
350{
351 if (virt < VMALLOC_START) {
352 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
353 &phys, virt);
354 return;
355 }
356
357 return __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK),
358 phys, virt, size, prot, late_alloc);
359}
360
361#ifdef CONFIG_DEBUG_RODATA
362static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
363{
364 /*
365 * Set up the executable regions using the existing section mappings
366 * for now. This will get more fine grained later once all memory
367 * is mapped
368 */
Ard Biesheuvel4fee9f32015-11-16 11:18:14 +0100369 unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
370 unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
Laura Abbottda141702015-01-21 17:36:06 -0800371
372 if (end < kernel_x_start) {
373 create_mapping(start, __phys_to_virt(start),
374 end - start, PAGE_KERNEL);
375 } else if (start >= kernel_x_end) {
376 create_mapping(start, __phys_to_virt(start),
377 end - start, PAGE_KERNEL);
378 } else {
379 if (start < kernel_x_start)
380 create_mapping(start, __phys_to_virt(start),
381 kernel_x_start - start,
382 PAGE_KERNEL);
383 create_mapping(kernel_x_start,
384 __phys_to_virt(kernel_x_start),
385 kernel_x_end - kernel_x_start,
386 PAGE_KERNEL_EXEC);
387 if (kernel_x_end < end)
388 create_mapping(kernel_x_end,
389 __phys_to_virt(kernel_x_end),
390 end - kernel_x_end,
391 PAGE_KERNEL);
392 }
393
394}
395#else
396static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
397{
398 create_mapping(start, __phys_to_virt(start), end - start,
399 PAGE_KERNEL_EXEC);
400}
401#endif
402
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000403static void __init map_mem(void)
404{
405 struct memblock_region *reg;
Catalin Marinase25208f2013-08-23 18:04:44 +0100406 phys_addr_t limit;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000407
Steve Capperf6bc87c2013-04-30 11:00:33 +0100408 /*
409 * Temporarily limit the memblock range. We need to do this as
410 * create_mapping requires puds, pmds and ptes to be allocated from
411 * memory addressable from the initial direct kernel mapping.
412 *
Catalin Marinas3dec0fe2014-10-24 18:16:47 +0100413 * The initial direct kernel mapping, located at swapper_pg_dir, gives
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +0100414 * us PUD_SIZE (with SECTION maps) or PMD_SIZE (without SECTION maps,
415 * memory starting from PHYS_OFFSET (which must be aligned to 2MB as
416 * per Documentation/arm64/booting.txt).
Steve Capperf6bc87c2013-04-30 11:00:33 +0100417 */
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +0100418 limit = PHYS_OFFSET + SWAPPER_INIT_MAP_SIZE;
Catalin Marinase25208f2013-08-23 18:04:44 +0100419 memblock_set_current_limit(limit);
Steve Capperf6bc87c2013-04-30 11:00:33 +0100420
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000421 /* map all the memory banks */
422 for_each_memblock(memory, reg) {
423 phys_addr_t start = reg->base;
424 phys_addr_t end = start + reg->size;
425
426 if (start >= end)
427 break;
428
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +0100429 if (ARM64_SWAPPER_USES_SECTION_MAPS) {
430 /*
431 * For the first memory bank align the start address and
432 * current memblock limit to prevent create_mapping() from
433 * allocating pte page tables from unmapped memory. With
434 * the section maps, if the first block doesn't end on section
435 * size boundary, create_mapping() will try to allocate a pte
436 * page, which may be returned from an unmapped area.
437 * When section maps are not used, the pte page table for the
438 * current limit is already present in swapper_pg_dir.
439 */
440 if (start < limit)
441 start = ALIGN(start, SECTION_SIZE);
442 if (end < limit) {
443 limit = end & SECTION_MASK;
444 memblock_set_current_limit(limit);
445 }
Catalin Marinase25208f2013-08-23 18:04:44 +0100446 }
Laura Abbottda141702015-01-21 17:36:06 -0800447 __map_memblock(start, end);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000448 }
Steve Capperf6bc87c2013-04-30 11:00:33 +0100449
450 /* Limit no longer required. */
451 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000452}
453
Jisheng Zhang9a17a212015-11-12 20:04:43 +0800454static void __init fixup_executable(void)
Laura Abbottda141702015-01-21 17:36:06 -0800455{
456#ifdef CONFIG_DEBUG_RODATA
457 /* now that we are actually fully mapped, make the start/end more fine grained */
Ard Biesheuvel4fee9f32015-11-16 11:18:14 +0100458 if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
Laura Abbottda141702015-01-21 17:36:06 -0800459 unsigned long aligned_start = round_down(__pa(_stext),
Ard Biesheuvel4fee9f32015-11-16 11:18:14 +0100460 SWAPPER_BLOCK_SIZE);
Laura Abbottda141702015-01-21 17:36:06 -0800461
462 create_mapping(aligned_start, __phys_to_virt(aligned_start),
463 __pa(_stext) - aligned_start,
464 PAGE_KERNEL);
465 }
466
Ard Biesheuvel4fee9f32015-11-16 11:18:14 +0100467 if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
Laura Abbottda141702015-01-21 17:36:06 -0800468 unsigned long aligned_end = round_up(__pa(__init_end),
Ard Biesheuvel4fee9f32015-11-16 11:18:14 +0100469 SWAPPER_BLOCK_SIZE);
Laura Abbottda141702015-01-21 17:36:06 -0800470 create_mapping(__pa(__init_end), (unsigned long)__init_end,
471 aligned_end - __pa(__init_end),
472 PAGE_KERNEL);
473 }
474#endif
475}
476
477#ifdef CONFIG_DEBUG_RODATA
478void mark_rodata_ro(void)
479{
480 create_mapping_late(__pa(_stext), (unsigned long)_stext,
481 (unsigned long)_etext - (unsigned long)_stext,
Laura Abbott0b2aa5b2015-11-12 12:21:10 -0800482 PAGE_KERNEL_ROX);
Laura Abbottda141702015-01-21 17:36:06 -0800483
484}
485#endif
486
487void fixup_init(void)
488{
489 create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
490 (unsigned long)__init_end - (unsigned long)__init_begin,
491 PAGE_KERNEL);
492}
493
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000494/*
495 * paging_init() sets up the page tables, initialises the zone memory
496 * maps and sets up the zero page.
497 */
498void __init paging_init(void)
499{
500 void *zero_page;
501
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000502 map_mem();
Laura Abbottda141702015-01-21 17:36:06 -0800503 fixup_executable();
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000504
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000505 /* allocate the zero page. */
506 zero_page = early_alloc(PAGE_SIZE);
507
508 bootmem_init();
509
510 empty_zero_page = virt_to_page(zero_page);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000511
512 /*
513 * TTBR0 is only used for the identity mapping at this stage. Make it
514 * point to zero page to avoid speculatively fetching new entries.
515 */
516 cpu_set_reserved_ttbr0();
Will Deacon8e63d382015-10-06 18:46:23 +0100517 local_flush_tlb_all();
Ard Biesheuveldd006da2015-03-19 16:42:27 +0000518 cpu_set_default_tcr_t0sz();
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000519}
520
521/*
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000522 * Check whether a kernel address is valid (derived from arch/x86/).
523 */
524int kern_addr_valid(unsigned long addr)
525{
526 pgd_t *pgd;
527 pud_t *pud;
528 pmd_t *pmd;
529 pte_t *pte;
530
531 if ((((long)addr) >> VA_BITS) != -1UL)
532 return 0;
533
534 pgd = pgd_offset_k(addr);
535 if (pgd_none(*pgd))
536 return 0;
537
538 pud = pud_offset(pgd, addr);
539 if (pud_none(*pud))
540 return 0;
541
Steve Capper206a2a72014-05-06 14:02:27 +0100542 if (pud_sect(*pud))
543 return pfn_valid(pud_pfn(*pud));
544
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000545 pmd = pmd_offset(pud, addr);
546 if (pmd_none(*pmd))
547 return 0;
548
Dave Andersonda6e4cb2014-04-15 18:53:24 +0100549 if (pmd_sect(*pmd))
550 return pfn_valid(pmd_pfn(*pmd));
551
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000552 pte = pte_offset_kernel(pmd, addr);
553 if (pte_none(*pte))
554 return 0;
555
556 return pfn_valid(pte_pfn(*pte));
557}
558#ifdef CONFIG_SPARSEMEM_VMEMMAP
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +0100559#if !ARM64_SWAPPER_USES_SECTION_MAPS
Johannes Weiner0aad8182013-04-29 15:07:50 -0700560int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000561{
Johannes Weiner0aad8182013-04-29 15:07:50 -0700562 return vmemmap_populate_basepages(start, end, node);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000563}
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +0100564#else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
Johannes Weiner0aad8182013-04-29 15:07:50 -0700565int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000566{
Johannes Weiner0aad8182013-04-29 15:07:50 -0700567 unsigned long addr = start;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000568 unsigned long next;
569 pgd_t *pgd;
570 pud_t *pud;
571 pmd_t *pmd;
572
573 do {
574 next = pmd_addr_end(addr, end);
575
576 pgd = vmemmap_pgd_populate(addr, node);
577 if (!pgd)
578 return -ENOMEM;
579
580 pud = vmemmap_pud_populate(pgd, addr, node);
581 if (!pud)
582 return -ENOMEM;
583
584 pmd = pmd_offset(pud, addr);
585 if (pmd_none(*pmd)) {
586 void *p = NULL;
587
588 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
589 if (!p)
590 return -ENOMEM;
591
Catalin Marinasa501e322014-04-03 15:57:15 +0100592 set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000593 } else
594 vmemmap_verify((pte_t *)pmd, node, addr, next);
595 } while (addr = next, addr != end);
596
597 return 0;
598}
599#endif /* CONFIG_ARM64_64K_PAGES */
Johannes Weiner0aad8182013-04-29 15:07:50 -0700600void vmemmap_free(unsigned long start, unsigned long end)
Tang Chen01975182013-02-22 16:33:08 -0800601{
602}
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000603#endif /* CONFIG_SPARSEMEM_VMEMMAP */
Laura Abbottaf86e592014-11-21 21:50:42 +0000604
605static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
Kirill A. Shutemov9f25e6a2015-04-14 15:45:39 -0700606#if CONFIG_PGTABLE_LEVELS > 2
Laura Abbottaf86e592014-11-21 21:50:42 +0000607static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
608#endif
Kirill A. Shutemov9f25e6a2015-04-14 15:45:39 -0700609#if CONFIG_PGTABLE_LEVELS > 3
Laura Abbottaf86e592014-11-21 21:50:42 +0000610static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
611#endif
612
613static inline pud_t * fixmap_pud(unsigned long addr)
614{
615 pgd_t *pgd = pgd_offset_k(addr);
616
617 BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
618
619 return pud_offset(pgd, addr);
620}
621
622static inline pmd_t * fixmap_pmd(unsigned long addr)
623{
624 pud_t *pud = fixmap_pud(addr);
625
626 BUG_ON(pud_none(*pud) || pud_bad(*pud));
627
628 return pmd_offset(pud, addr);
629}
630
631static inline pte_t * fixmap_pte(unsigned long addr)
632{
633 pmd_t *pmd = fixmap_pmd(addr);
634
635 BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
636
637 return pte_offset_kernel(pmd, addr);
638}
639
640void __init early_fixmap_init(void)
641{
642 pgd_t *pgd;
643 pud_t *pud;
644 pmd_t *pmd;
645 unsigned long addr = FIXADDR_START;
646
647 pgd = pgd_offset_k(addr);
648 pgd_populate(&init_mm, pgd, bm_pud);
649 pud = pud_offset(pgd, addr);
650 pud_populate(&init_mm, pud, bm_pmd);
651 pmd = pmd_offset(pud, addr);
652 pmd_populate_kernel(&init_mm, pmd, bm_pte);
653
654 /*
655 * The boot-ioremap range spans multiple pmds, for which
656 * we are not preparted:
657 */
658 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
659 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
660
661 if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
662 || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
663 WARN_ON(1);
664 pr_warn("pmd %p != %p, %p\n",
665 pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
666 fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
667 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
668 fix_to_virt(FIX_BTMAP_BEGIN));
669 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
670 fix_to_virt(FIX_BTMAP_END));
671
672 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
673 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
674 }
675}
676
677void __set_fixmap(enum fixed_addresses idx,
678 phys_addr_t phys, pgprot_t flags)
679{
680 unsigned long addr = __fix_to_virt(idx);
681 pte_t *pte;
682
Mark Rutlandb63dbef2015-03-04 13:27:35 +0000683 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
Laura Abbottaf86e592014-11-21 21:50:42 +0000684
685 pte = fixmap_pte(addr);
686
687 if (pgprot_val(flags)) {
688 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
689 } else {
690 pte_clear(&init_mm, addr, pte);
691 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
692 }
693}
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +0200694
695void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
696{
697 const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
Ard Biesheuvelfb226c32015-11-09 09:55:46 +0100698 pgprot_t prot = PAGE_KERNEL_RO;
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +0100699 int size, offset;
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +0200700 void *dt_virt;
701
702 /*
703 * Check whether the physical FDT address is set and meets the minimum
704 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
705 * at least 8 bytes so that we can always access the size field of the
706 * FDT header after mapping the first chunk, double check here if that
707 * is indeed the case.
708 */
709 BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
710 if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
711 return NULL;
712
713 /*
714 * Make sure that the FDT region can be mapped without the need to
715 * allocate additional translation table pages, so that it is safe
716 * to call create_mapping() this early.
717 *
718 * On 64k pages, the FDT will be mapped using PTEs, so we need to
719 * be in the same PMD as the rest of the fixmap.
720 * On 4k pages, we'll use section mappings for the FDT so we only
721 * have to be in the same PUD.
722 */
723 BUILD_BUG_ON(dt_virt_base % SZ_2M);
724
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +0100725 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
726 __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +0200727
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +0100728 offset = dt_phys % SWAPPER_BLOCK_SIZE;
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +0200729 dt_virt = (void *)dt_virt_base + offset;
730
731 /* map the first chunk so we can read the size from the header */
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +0100732 create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
733 SWAPPER_BLOCK_SIZE, prot);
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +0200734
735 if (fdt_check_header(dt_virt) != 0)
736 return NULL;
737
738 size = fdt_totalsize(dt_virt);
739 if (size > MAX_FDT_SIZE)
740 return NULL;
741
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +0100742 if (offset + size > SWAPPER_BLOCK_SIZE)
743 create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
744 round_up(offset + size, SWAPPER_BLOCK_SIZE), prot);
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +0200745
746 memblock_reserve(dt_phys, size);
747
748 return dt_virt;
749}