blob: 3e511bec69b836cb8a59d5dada7b683b346ec69f [file] [log] [blame]
Christoffer Dall9e9a3672013-01-20 18:43:10 -05001#include <linux/module.h>
Russell King614dd052010-11-21 11:41:57 +00002#include <linux/kernel.h>
Christoffer Dall9e9a3672013-01-20 18:43:10 -05003#include <linux/slab.h>
Ingo Molnar589ee622017-02-04 00:16:44 +01004#include <linux/mm_types.h>
Russell King614dd052010-11-21 11:41:57 +00005
6#include <asm/cputype.h>
Will Deacon89038262011-09-30 11:43:29 +01007#include <asm/idmap.h>
Russell King614dd052010-11-21 11:41:57 +00008#include <asm/pgalloc.h>
9#include <asm/pgtable.h>
Will Deacon89038262011-09-30 11:43:29 +010010#include <asm/sections.h>
David Howells9f97da72012-03-28 18:30:01 +010011#include <asm/system_info.h>
Will Deacon89038262011-09-30 11:43:29 +010012
Russell Kingc5cc87f2014-07-29 12:18:34 +010013/*
14 * Note: accesses outside of the kernel image and the identity map area
15 * are not supported on any CPU using the idmap tables as its current
16 * page tables.
17 */
Will Deacon89038262011-09-30 11:43:29 +010018pgd_t *idmap_pgd;
Russell King981b6712016-03-15 14:55:03 +000019long long arch_phys_to_idmap_offset;
Russell King614dd052010-11-21 11:41:57 +000020
Catalin Marinasae2de1012011-11-22 17:30:32 +000021#ifdef CONFIG_ARM_LPAE
22static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
23 unsigned long prot)
24{
25 pmd_t *pmd;
26 unsigned long next;
27
28 if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) {
29 pmd = pmd_alloc_one(&init_mm, addr);
30 if (!pmd) {
Joe Perches8b521cb2014-09-16 20:41:43 +010031 pr_warn("Failed to allocate identity pmd.\n");
Catalin Marinasae2de1012011-11-22 17:30:32 +000032 return;
33 }
Konstantin Khlebnikov811a2402014-07-25 09:17:12 +010034 /*
35 * Copy the original PMD to ensure that the PMD entries for
36 * the kernel image are preserved.
37 */
38 if (!pud_none(*pud))
39 memcpy(pmd, pmd_offset(pud, 0),
40 PTRS_PER_PMD * sizeof(pmd_t));
Catalin Marinasae2de1012011-11-22 17:30:32 +000041 pud_populate(&init_mm, pud, pmd);
42 pmd += pmd_index(addr);
43 } else
44 pmd = pmd_offset(pud, addr);
45
46 do {
47 next = pmd_addr_end(addr, end);
48 *pmd = __pmd((addr & PMD_MASK) | prot);
49 flush_pmd_entry(pmd);
50 } while (pmd++, addr = next, addr != end);
51}
52#else /* !CONFIG_ARM_LPAE */
Russell King516295e2010-11-21 16:27:49 +000053static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
Russell Kingaf3813d2010-11-21 11:48:16 +000054 unsigned long prot)
55{
Russell King516295e2010-11-21 16:27:49 +000056 pmd_t *pmd = pmd_offset(pud, addr);
Russell Kingaf3813d2010-11-21 11:48:16 +000057
58 addr = (addr & PMD_MASK) | prot;
59 pmd[0] = __pmd(addr);
60 addr += SECTION_SIZE;
61 pmd[1] = __pmd(addr);
62 flush_pmd_entry(pmd);
63}
Catalin Marinasae2de1012011-11-22 17:30:32 +000064#endif /* CONFIG_ARM_LPAE */
Russell Kingaf3813d2010-11-21 11:48:16 +000065
Russell King516295e2010-11-21 16:27:49 +000066static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
67 unsigned long prot)
68{
69 pud_t *pud = pud_offset(pgd, addr);
70 unsigned long next;
71
72 do {
73 next = pud_addr_end(addr, end);
74 idmap_add_pmd(pud, addr, next, prot);
75 } while (pud++, addr = next, addr != end);
76}
77
Christoffer Dall9e9a3672013-01-20 18:43:10 -050078static void identity_mapping_add(pgd_t *pgd, const char *text_start,
79 const char *text_end, unsigned long prot)
Russell King614dd052010-11-21 11:41:57 +000080{
Christoffer Dall9e9a3672013-01-20 18:43:10 -050081 unsigned long addr, end;
82 unsigned long next;
Russell King614dd052010-11-21 11:41:57 +000083
Santosh Shilimkar4dc9a812013-07-31 12:44:42 -040084 addr = virt_to_idmap(text_start);
85 end = virt_to_idmap(text_end);
Santosh Shilimkarc1a5f4f2013-07-31 12:44:43 -040086 pr_info("Setting up static identity map for 0x%lx - 0x%lx\n", addr, end);
Christoffer Dall9e9a3672013-01-20 18:43:10 -050087
88 prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
89
Arnd Bergmannd33c43a2014-04-15 15:38:39 +020090 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale_family())
Russell King614dd052010-11-21 11:41:57 +000091 prot |= PMD_BIT4;
92
Russell Kingaf3813d2010-11-21 11:48:16 +000093 pgd += pgd_index(addr);
94 do {
95 next = pgd_addr_end(addr, end);
Russell King516295e2010-11-21 16:27:49 +000096 idmap_add_pud(pgd, addr, next, prot);
Russell Kingaf3813d2010-11-21 11:48:16 +000097 } while (pgd++, addr = next, addr != end);
Russell King614dd052010-11-21 11:41:57 +000098}
99
Will Deacon89038262011-09-30 11:43:29 +0100100extern char __idmap_text_start[], __idmap_text_end[];
101
102static int __init init_static_idmap(void)
103{
Will Deacon89038262011-09-30 11:43:29 +0100104 idmap_pgd = pgd_alloc(&init_mm);
105 if (!idmap_pgd)
106 return -ENOMEM;
107
Christoffer Dall9e9a3672013-01-20 18:43:10 -0500108 identity_mapping_add(idmap_pgd, __idmap_text_start,
109 __idmap_text_end, 0);
Will Deacon89038262011-09-30 11:43:29 +0100110
Nicolas Pitree4067852012-11-08 19:46:07 +0100111 /* Flush L1 for the hardware to see this page table content */
112 flush_cache_louis();
113
Marc Zyngier2fb41052013-04-12 19:12:03 +0100114 return 0;
Will Deacon89038262011-09-30 11:43:29 +0100115}
Will Deacon4e8ee7d2011-11-23 12:26:25 +0000116early_initcall(init_static_idmap);
Will Deacon89038262011-09-30 11:43:29 +0100117
Russell King614dd052010-11-21 11:41:57 +0000118/*
Will Deacon2c8951a2011-06-08 15:53:34 +0100119 * In order to soft-boot, we need to switch to a 1:1 mapping for the
120 * cpu_reset functions. This will then ensure that we have predictable
121 * results when turning off the mmu.
Russell King614dd052010-11-21 11:41:57 +0000122 */
Russell King5aafec12011-11-01 10:15:27 +0000123void setup_mm_for_reboot(void)
Russell King614dd052010-11-21 11:41:57 +0000124{
Will Deacon2c8951a2011-06-08 15:53:34 +0100125 /* Switch to the identity mapping. */
126 cpu_switch_mm(idmap_pgd, &init_mm);
Will Deacon89c7e4b2013-02-28 17:48:40 +0100127 local_flush_bp_all();
Will Deacon2c8951a2011-06-08 15:53:34 +0100128
Nicolas Pitree4067852012-11-08 19:46:07 +0100129#ifdef CONFIG_CPU_HAS_ASID
130 /*
131 * We don't have a clean ASID for the identity mapping, which
132 * may clash with virtual addresses of the previous page tables
133 * and therefore potentially in the TLB.
134 */
Russell King614dd052010-11-21 11:41:57 +0000135 local_flush_tlb_all();
Nicolas Pitree4067852012-11-08 19:46:07 +0100136#endif
Russell King614dd052010-11-21 11:41:57 +0000137}