blob: a421f535d351787ad7c551be6bfc4fc03210dd25 [file] [log] [blame]
Catalin Marinasc1cc1552012-03-05 11:49:27 +00001/*
2 * Based on arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/export.h>
21#include <linux/kernel.h>
22#include <linux/errno.h>
23#include <linux/init.h>
24#include <linux/mman.h>
25#include <linux/nodemask.h>
26#include <linux/memblock.h>
27#include <linux/fs.h>
Catalin Marinas2475ff92012-10-23 14:55:08 +010028#include <linux/io.h>
Laura Abbottda141702015-01-21 17:36:06 -080029#include <linux/stop_machine.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000030
31#include <asm/cputype.h>
Laura Abbottaf86e592014-11-21 21:50:42 +000032#include <asm/fixmap.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000033#include <asm/sections.h>
34#include <asm/setup.h>
35#include <asm/sizes.h>
36#include <asm/tlb.h>
Jungseok Leec79b9542014-05-12 18:40:51 +090037#include <asm/memblock.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000038#include <asm/mmu_context.h>
39
40#include "mm.h"
41
42/*
43 * Empty_zero_page is a special page that is used for zero-initialized data
44 * and COW.
45 */
46struct page *empty_zero_page;
47EXPORT_SYMBOL(empty_zero_page);
48
Catalin Marinasc1cc1552012-03-05 11:49:27 +000049pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
50 unsigned long size, pgprot_t vma_prot)
51{
52 if (!pfn_valid(pfn))
53 return pgprot_noncached(vma_prot);
54 else if (file->f_flags & O_SYNC)
55 return pgprot_writecombine(vma_prot);
56 return vma_prot;
57}
58EXPORT_SYMBOL(phys_mem_access_prot);
59
60static void __init *early_alloc(unsigned long sz)
61{
62 void *ptr = __va(memblock_alloc(sz, sz));
Laura Abbottda141702015-01-21 17:36:06 -080063 BUG_ON(!ptr);
Catalin Marinasc1cc1552012-03-05 11:49:27 +000064 memset(ptr, 0, sz);
65 return ptr;
66}
67
Laura Abbottda141702015-01-21 17:36:06 -080068/*
69 * remap a PMD into pages
70 */
71static void split_pmd(pmd_t *pmd, pte_t *pte)
72{
73 unsigned long pfn = pmd_pfn(*pmd);
74 int i = 0;
75
76 do {
77 /*
78 * Need to have the least restrictive permissions available
79 * permissions will be fixed up later
80 */
81 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
82 pfn++;
83 } while (pte++, i++, i < PTRS_PER_PTE);
84}
85
86static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
Mark Salterd7ecbdd2014-03-12 12:28:06 -040087 unsigned long end, unsigned long pfn,
Laura Abbottda141702015-01-21 17:36:06 -080088 pgprot_t prot,
89 void *(*alloc)(unsigned long size))
Catalin Marinasc1cc1552012-03-05 11:49:27 +000090{
91 pte_t *pte;
92
Laura Abbottda141702015-01-21 17:36:06 -080093 if (pmd_none(*pmd) || pmd_bad(*pmd)) {
94 pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
95 if (pmd_sect(*pmd))
96 split_pmd(pmd, pte);
Catalin Marinasc1cc1552012-03-05 11:49:27 +000097 __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
Laura Abbottda141702015-01-21 17:36:06 -080098 flush_tlb_all();
Catalin Marinasc1cc1552012-03-05 11:49:27 +000099 }
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000100
101 pte = pte_offset_kernel(pmd, addr);
102 do {
Mark Salterd7ecbdd2014-03-12 12:28:06 -0400103 set_pte(pte, pfn_pte(pfn, prot));
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000104 pfn++;
105 } while (pte++, addr += PAGE_SIZE, addr != end);
106}
107
Laura Abbottda141702015-01-21 17:36:06 -0800108void split_pud(pud_t *old_pud, pmd_t *pmd)
109{
110 unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
111 pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
112 int i = 0;
113
114 do {
115 set_pmd(pmd, __pmd(addr | prot));
116 addr += PMD_SIZE;
117 } while (pmd++, i++, i < PTRS_PER_PMD);
118}
119
120static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
Ard Biesheuvele1e1fdd2014-10-20 14:02:15 +0200121 unsigned long addr, unsigned long end,
Laura Abbottda141702015-01-21 17:36:06 -0800122 phys_addr_t phys, pgprot_t prot,
123 void *(*alloc)(unsigned long size))
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000124{
125 pmd_t *pmd;
126 unsigned long next;
127
128 /*
129 * Check for initial section mappings in the pgd/pud and remove them.
130 */
131 if (pud_none(*pud) || pud_bad(*pud)) {
Laura Abbottda141702015-01-21 17:36:06 -0800132 pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
133 if (pud_sect(*pud)) {
134 /*
135 * need to have the 1G of mappings continue to be
136 * present
137 */
138 split_pud(pud, pmd);
139 }
Ard Biesheuvele1e1fdd2014-10-20 14:02:15 +0200140 pud_populate(mm, pud, pmd);
Laura Abbottda141702015-01-21 17:36:06 -0800141 flush_tlb_all();
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000142 }
143
144 pmd = pmd_offset(pud, addr);
145 do {
146 next = pmd_addr_end(addr, end);
147 /* try section mapping first */
Catalin Marinasa55f9922014-02-04 16:01:31 +0000148 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
149 pmd_t old_pmd =*pmd;
Ard Biesheuvel8ce837c2014-10-20 15:42:07 +0200150 set_pmd(pmd, __pmd(phys |
151 pgprot_val(mk_sect_prot(prot))));
Catalin Marinasa55f9922014-02-04 16:01:31 +0000152 /*
153 * Check for previous table entries created during
154 * boot (__create_page_tables) and flush them.
155 */
zhichang.yuan523d6e92014-12-09 07:26:47 +0000156 if (!pmd_none(old_pmd)) {
Catalin Marinasa55f9922014-02-04 16:01:31 +0000157 flush_tlb_all();
zhichang.yuan523d6e92014-12-09 07:26:47 +0000158 if (pmd_table(old_pmd)) {
159 phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
160 BUG_ON(alloc != early_alloc);
161 memblock_free(table, PAGE_SIZE);
162 }
163 }
Catalin Marinasa55f9922014-02-04 16:01:31 +0000164 } else {
Mark Salterd7ecbdd2014-03-12 12:28:06 -0400165 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
Laura Abbottda141702015-01-21 17:36:06 -0800166 prot, alloc);
Catalin Marinasa55f9922014-02-04 16:01:31 +0000167 }
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000168 phys += next - addr;
169 } while (pmd++, addr = next, addr != end);
170}
171
Laura Abbottda141702015-01-21 17:36:06 -0800172static inline bool use_1G_block(unsigned long addr, unsigned long next,
173 unsigned long phys)
174{
175 if (PAGE_SHIFT != 12)
176 return false;
177
178 if (((addr | next | phys) & ~PUD_MASK) != 0)
179 return false;
180
181 return true;
182}
183
184static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
Ard Biesheuvele1e1fdd2014-10-20 14:02:15 +0200185 unsigned long addr, unsigned long end,
Laura Abbottda141702015-01-21 17:36:06 -0800186 phys_addr_t phys, pgprot_t prot,
187 void *(*alloc)(unsigned long size))
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000188{
Jungseok Leec79b9542014-05-12 18:40:51 +0900189 pud_t *pud;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000190 unsigned long next;
191
Jungseok Leec79b9542014-05-12 18:40:51 +0900192 if (pgd_none(*pgd)) {
Laura Abbottda141702015-01-21 17:36:06 -0800193 pud = alloc(PTRS_PER_PUD * sizeof(pud_t));
Ard Biesheuvele1e1fdd2014-10-20 14:02:15 +0200194 pgd_populate(mm, pgd, pud);
Jungseok Leec79b9542014-05-12 18:40:51 +0900195 }
196 BUG_ON(pgd_bad(*pgd));
197
198 pud = pud_offset(pgd, addr);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000199 do {
200 next = pud_addr_end(addr, end);
Steve Capper206a2a72014-05-06 14:02:27 +0100201
202 /*
203 * For 4K granule only, attempt to put down a 1GB block
204 */
Laura Abbottda141702015-01-21 17:36:06 -0800205 if (use_1G_block(addr, next, phys)) {
Steve Capper206a2a72014-05-06 14:02:27 +0100206 pud_t old_pud = *pud;
Ard Biesheuvel8ce837c2014-10-20 15:42:07 +0200207 set_pud(pud, __pud(phys |
208 pgprot_val(mk_sect_prot(prot))));
Steve Capper206a2a72014-05-06 14:02:27 +0100209
210 /*
211 * If we have an old value for a pud, it will
212 * be pointing to a pmd table that we no longer
213 * need (from swapper_pg_dir).
214 *
215 * Look up the old pmd table and free it.
216 */
217 if (!pud_none(old_pud)) {
Steve Capper206a2a72014-05-06 14:02:27 +0100218 flush_tlb_all();
zhichang.yuan523d6e92014-12-09 07:26:47 +0000219 if (pud_table(old_pud)) {
220 phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
221 BUG_ON(alloc != early_alloc);
222 memblock_free(table, PAGE_SIZE);
223 }
Steve Capper206a2a72014-05-06 14:02:27 +0100224 }
225 } else {
Laura Abbottda141702015-01-21 17:36:06 -0800226 alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc);
Steve Capper206a2a72014-05-06 14:02:27 +0100227 }
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000228 phys += next - addr;
229 } while (pud++, addr = next, addr != end);
230}
231
232/*
233 * Create the page directory entries and any necessary page tables for the
234 * mapping specified by 'md'.
235 */
Laura Abbottda141702015-01-21 17:36:06 -0800236static void __create_mapping(struct mm_struct *mm, pgd_t *pgd,
Ard Biesheuvele1e1fdd2014-10-20 14:02:15 +0200237 phys_addr_t phys, unsigned long virt,
Laura Abbottda141702015-01-21 17:36:06 -0800238 phys_addr_t size, pgprot_t prot,
239 void *(*alloc)(unsigned long size))
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000240{
241 unsigned long addr, length, end, next;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000242
243 addr = virt & PAGE_MASK;
244 length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
245
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000246 end = addr + length;
247 do {
248 next = pgd_addr_end(addr, end);
Laura Abbottda141702015-01-21 17:36:06 -0800249 alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000250 phys += next - addr;
251 } while (pgd++, addr = next, addr != end);
252}
253
Laura Abbottda141702015-01-21 17:36:06 -0800254static void *late_alloc(unsigned long size)
255{
256 void *ptr;
257
258 BUG_ON(size > PAGE_SIZE);
259 ptr = (void *)__get_free_page(PGALLOC_GFP);
260 BUG_ON(!ptr);
261 return ptr;
262}
263
264static void __ref create_mapping(phys_addr_t phys, unsigned long virt,
265 phys_addr_t size, pgprot_t prot)
Mark Salterd7ecbdd2014-03-12 12:28:06 -0400266{
267 if (virt < VMALLOC_START) {
268 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
269 &phys, virt);
270 return;
271 }
Ard Biesheuvele1e1fdd2014-10-20 14:02:15 +0200272 __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
Laura Abbottda141702015-01-21 17:36:06 -0800273 size, prot, early_alloc);
Mark Salterd7ecbdd2014-03-12 12:28:06 -0400274}
275
Ard Biesheuvel8ce837c2014-10-20 15:42:07 +0200276void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
277 unsigned long virt, phys_addr_t size,
278 pgprot_t prot)
279{
Laura Abbottda141702015-01-21 17:36:06 -0800280 __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
Ard Biesheuvel60305db2015-01-22 10:01:40 +0000281 late_alloc);
Mark Salterd7ecbdd2014-03-12 12:28:06 -0400282}
283
Laura Abbottda141702015-01-21 17:36:06 -0800284static void create_mapping_late(phys_addr_t phys, unsigned long virt,
285 phys_addr_t size, pgprot_t prot)
286{
287 if (virt < VMALLOC_START) {
288 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
289 &phys, virt);
290 return;
291 }
292
293 return __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK),
294 phys, virt, size, prot, late_alloc);
295}
296
297#ifdef CONFIG_DEBUG_RODATA
298static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
299{
300 /*
301 * Set up the executable regions using the existing section mappings
302 * for now. This will get more fine grained later once all memory
303 * is mapped
304 */
305 unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
306 unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
307
308 if (end < kernel_x_start) {
309 create_mapping(start, __phys_to_virt(start),
310 end - start, PAGE_KERNEL);
311 } else if (start >= kernel_x_end) {
312 create_mapping(start, __phys_to_virt(start),
313 end - start, PAGE_KERNEL);
314 } else {
315 if (start < kernel_x_start)
316 create_mapping(start, __phys_to_virt(start),
317 kernel_x_start - start,
318 PAGE_KERNEL);
319 create_mapping(kernel_x_start,
320 __phys_to_virt(kernel_x_start),
321 kernel_x_end - kernel_x_start,
322 PAGE_KERNEL_EXEC);
323 if (kernel_x_end < end)
324 create_mapping(kernel_x_end,
325 __phys_to_virt(kernel_x_end),
326 end - kernel_x_end,
327 PAGE_KERNEL);
328 }
329
330}
331#else
332static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
333{
334 create_mapping(start, __phys_to_virt(start), end - start,
335 PAGE_KERNEL_EXEC);
336}
337#endif
338
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000339static void __init map_mem(void)
340{
341 struct memblock_region *reg;
Catalin Marinase25208f2013-08-23 18:04:44 +0100342 phys_addr_t limit;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000343
Steve Capperf6bc87c2013-04-30 11:00:33 +0100344 /*
345 * Temporarily limit the memblock range. We need to do this as
346 * create_mapping requires puds, pmds and ptes to be allocated from
347 * memory addressable from the initial direct kernel mapping.
348 *
Catalin Marinas3dec0fe2014-10-24 18:16:47 +0100349 * The initial direct kernel mapping, located at swapper_pg_dir, gives
350 * us PUD_SIZE (4K pages) or PMD_SIZE (64K pages) memory starting from
351 * PHYS_OFFSET (which must be aligned to 2MB as per
352 * Documentation/arm64/booting.txt).
Steve Capperf6bc87c2013-04-30 11:00:33 +0100353 */
Catalin Marinas3dec0fe2014-10-24 18:16:47 +0100354 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
355 limit = PHYS_OFFSET + PMD_SIZE;
356 else
357 limit = PHYS_OFFSET + PUD_SIZE;
Catalin Marinase25208f2013-08-23 18:04:44 +0100358 memblock_set_current_limit(limit);
Steve Capperf6bc87c2013-04-30 11:00:33 +0100359
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000360 /* map all the memory banks */
361 for_each_memblock(memory, reg) {
362 phys_addr_t start = reg->base;
363 phys_addr_t end = start + reg->size;
364
365 if (start >= end)
366 break;
367
Catalin Marinase25208f2013-08-23 18:04:44 +0100368#ifndef CONFIG_ARM64_64K_PAGES
369 /*
370 * For the first memory bank align the start address and
371 * current memblock limit to prevent create_mapping() from
372 * allocating pte page tables from unmapped memory.
373 * When 64K pages are enabled, the pte page table for the
374 * first PGDIR_SIZE is already present in swapper_pg_dir.
375 */
376 if (start < limit)
377 start = ALIGN(start, PMD_SIZE);
378 if (end < limit) {
379 limit = end & PMD_MASK;
380 memblock_set_current_limit(limit);
381 }
382#endif
Laura Abbottda141702015-01-21 17:36:06 -0800383 __map_memblock(start, end);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000384 }
Steve Capperf6bc87c2013-04-30 11:00:33 +0100385
386 /* Limit no longer required. */
387 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000388}
389
Laura Abbottda141702015-01-21 17:36:06 -0800390void __init fixup_executable(void)
391{
392#ifdef CONFIG_DEBUG_RODATA
393 /* now that we are actually fully mapped, make the start/end more fine grained */
394 if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {
395 unsigned long aligned_start = round_down(__pa(_stext),
396 SECTION_SIZE);
397
398 create_mapping(aligned_start, __phys_to_virt(aligned_start),
399 __pa(_stext) - aligned_start,
400 PAGE_KERNEL);
401 }
402
403 if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) {
404 unsigned long aligned_end = round_up(__pa(__init_end),
405 SECTION_SIZE);
406 create_mapping(__pa(__init_end), (unsigned long)__init_end,
407 aligned_end - __pa(__init_end),
408 PAGE_KERNEL);
409 }
410#endif
411}
412
413#ifdef CONFIG_DEBUG_RODATA
414void mark_rodata_ro(void)
415{
416 create_mapping_late(__pa(_stext), (unsigned long)_stext,
417 (unsigned long)_etext - (unsigned long)_stext,
418 PAGE_KERNEL_EXEC | PTE_RDONLY);
419
420}
421#endif
422
423void fixup_init(void)
424{
425 create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
426 (unsigned long)__init_end - (unsigned long)__init_begin,
427 PAGE_KERNEL);
428}
429
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000430/*
431 * paging_init() sets up the page tables, initialises the zone memory
432 * maps and sets up the zero page.
433 */
434void __init paging_init(void)
435{
436 void *zero_page;
437
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000438 map_mem();
Laura Abbottda141702015-01-21 17:36:06 -0800439 fixup_executable();
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000440
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000441 /* allocate the zero page. */
442 zero_page = early_alloc(PAGE_SIZE);
443
444 bootmem_init();
445
446 empty_zero_page = virt_to_page(zero_page);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000447
448 /*
449 * TTBR0 is only used for the identity mapping at this stage. Make it
450 * point to zero page to avoid speculatively fetching new entries.
451 */
452 cpu_set_reserved_ttbr0();
453 flush_tlb_all();
454}
455
456/*
457 * Enable the identity mapping to allow the MMU disabling.
458 */
459void setup_mm_for_reboot(void)
460{
461 cpu_switch_mm(idmap_pg_dir, &init_mm);
462 flush_tlb_all();
463}
464
465/*
466 * Check whether a kernel address is valid (derived from arch/x86/).
467 */
468int kern_addr_valid(unsigned long addr)
469{
470 pgd_t *pgd;
471 pud_t *pud;
472 pmd_t *pmd;
473 pte_t *pte;
474
475 if ((((long)addr) >> VA_BITS) != -1UL)
476 return 0;
477
478 pgd = pgd_offset_k(addr);
479 if (pgd_none(*pgd))
480 return 0;
481
482 pud = pud_offset(pgd, addr);
483 if (pud_none(*pud))
484 return 0;
485
Steve Capper206a2a72014-05-06 14:02:27 +0100486 if (pud_sect(*pud))
487 return pfn_valid(pud_pfn(*pud));
488
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000489 pmd = pmd_offset(pud, addr);
490 if (pmd_none(*pmd))
491 return 0;
492
Dave Andersonda6e4cb2014-04-15 18:53:24 +0100493 if (pmd_sect(*pmd))
494 return pfn_valid(pmd_pfn(*pmd));
495
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000496 pte = pte_offset_kernel(pmd, addr);
497 if (pte_none(*pte))
498 return 0;
499
500 return pfn_valid(pte_pfn(*pte));
501}
502#ifdef CONFIG_SPARSEMEM_VMEMMAP
503#ifdef CONFIG_ARM64_64K_PAGES
Johannes Weiner0aad8182013-04-29 15:07:50 -0700504int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000505{
Johannes Weiner0aad8182013-04-29 15:07:50 -0700506 return vmemmap_populate_basepages(start, end, node);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000507}
508#else /* !CONFIG_ARM64_64K_PAGES */
Johannes Weiner0aad8182013-04-29 15:07:50 -0700509int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000510{
Johannes Weiner0aad8182013-04-29 15:07:50 -0700511 unsigned long addr = start;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000512 unsigned long next;
513 pgd_t *pgd;
514 pud_t *pud;
515 pmd_t *pmd;
516
517 do {
518 next = pmd_addr_end(addr, end);
519
520 pgd = vmemmap_pgd_populate(addr, node);
521 if (!pgd)
522 return -ENOMEM;
523
524 pud = vmemmap_pud_populate(pgd, addr, node);
525 if (!pud)
526 return -ENOMEM;
527
528 pmd = pmd_offset(pud, addr);
529 if (pmd_none(*pmd)) {
530 void *p = NULL;
531
532 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
533 if (!p)
534 return -ENOMEM;
535
Catalin Marinasa501e322014-04-03 15:57:15 +0100536 set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000537 } else
538 vmemmap_verify((pte_t *)pmd, node, addr, next);
539 } while (addr = next, addr != end);
540
541 return 0;
542}
543#endif /* CONFIG_ARM64_64K_PAGES */
Johannes Weiner0aad8182013-04-29 15:07:50 -0700544void vmemmap_free(unsigned long start, unsigned long end)
Tang Chen01975182013-02-22 16:33:08 -0800545{
546}
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000547#endif /* CONFIG_SPARSEMEM_VMEMMAP */
Laura Abbottaf86e592014-11-21 21:50:42 +0000548
549static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
550#if CONFIG_ARM64_PGTABLE_LEVELS > 2
551static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
552#endif
553#if CONFIG_ARM64_PGTABLE_LEVELS > 3
554static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
555#endif
556
557static inline pud_t * fixmap_pud(unsigned long addr)
558{
559 pgd_t *pgd = pgd_offset_k(addr);
560
561 BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
562
563 return pud_offset(pgd, addr);
564}
565
566static inline pmd_t * fixmap_pmd(unsigned long addr)
567{
568 pud_t *pud = fixmap_pud(addr);
569
570 BUG_ON(pud_none(*pud) || pud_bad(*pud));
571
572 return pmd_offset(pud, addr);
573}
574
575static inline pte_t * fixmap_pte(unsigned long addr)
576{
577 pmd_t *pmd = fixmap_pmd(addr);
578
579 BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
580
581 return pte_offset_kernel(pmd, addr);
582}
583
584void __init early_fixmap_init(void)
585{
586 pgd_t *pgd;
587 pud_t *pud;
588 pmd_t *pmd;
589 unsigned long addr = FIXADDR_START;
590
591 pgd = pgd_offset_k(addr);
592 pgd_populate(&init_mm, pgd, bm_pud);
593 pud = pud_offset(pgd, addr);
594 pud_populate(&init_mm, pud, bm_pmd);
595 pmd = pmd_offset(pud, addr);
596 pmd_populate_kernel(&init_mm, pmd, bm_pte);
597
598 /*
599 * The boot-ioremap range spans multiple pmds, for which
600 * we are not preparted:
601 */
602 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
603 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
604
605 if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
606 || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
607 WARN_ON(1);
608 pr_warn("pmd %p != %p, %p\n",
609 pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
610 fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
611 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
612 fix_to_virt(FIX_BTMAP_BEGIN));
613 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
614 fix_to_virt(FIX_BTMAP_END));
615
616 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
617 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
618 }
619}
620
621void __set_fixmap(enum fixed_addresses idx,
622 phys_addr_t phys, pgprot_t flags)
623{
624 unsigned long addr = __fix_to_virt(idx);
625 pte_t *pte;
626
627 if (idx >= __end_of_fixed_addresses) {
628 BUG();
629 return;
630 }
631
632 pte = fixmap_pte(addr);
633
634 if (pgprot_val(flags)) {
635 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
636 } else {
637 pte_clear(&init_mm, addr, pte);
638 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
639 }
640}