blob: cda5ea3157a7d393fb67eff8381bf3db2bc970ce [file] [log] [blame]
Russell King614dd052010-11-21 11:41:57 +00001#include <linux/kernel.h>
2
3#include <asm/cputype.h>
Will Deacon89038262011-09-30 11:43:29 +01004#include <asm/idmap.h>
Russell King614dd052010-11-21 11:41:57 +00005#include <asm/pgalloc.h>
6#include <asm/pgtable.h>
Will Deacon89038262011-09-30 11:43:29 +01007#include <asm/sections.h>
8
9pgd_t *idmap_pgd;
Russell King614dd052010-11-21 11:41:57 +000010
Russell King516295e2010-11-21 16:27:49 +000011static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
Russell Kingaf3813d2010-11-21 11:48:16 +000012 unsigned long prot)
13{
Russell King516295e2010-11-21 16:27:49 +000014 pmd_t *pmd = pmd_offset(pud, addr);
Russell Kingaf3813d2010-11-21 11:48:16 +000015
16 addr = (addr & PMD_MASK) | prot;
17 pmd[0] = __pmd(addr);
18 addr += SECTION_SIZE;
19 pmd[1] = __pmd(addr);
20 flush_pmd_entry(pmd);
21}
22
Russell King516295e2010-11-21 16:27:49 +000023static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
24 unsigned long prot)
25{
26 pud_t *pud = pud_offset(pgd, addr);
27 unsigned long next;
28
29 do {
30 next = pud_addr_end(addr, end);
31 idmap_add_pmd(pud, addr, next, prot);
32 } while (pud++, addr = next, addr != end);
33}
34
Russell King614dd052010-11-21 11:41:57 +000035void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
36{
Russell Kingaf3813d2010-11-21 11:48:16 +000037 unsigned long prot, next;
Russell King614dd052010-11-21 11:41:57 +000038
39 prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
40 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
41 prot |= PMD_BIT4;
42
Russell Kingaf3813d2010-11-21 11:48:16 +000043 pgd += pgd_index(addr);
44 do {
45 next = pgd_addr_end(addr, end);
Russell King516295e2010-11-21 16:27:49 +000046 idmap_add_pud(pgd, addr, next, prot);
Russell Kingaf3813d2010-11-21 11:48:16 +000047 } while (pgd++, addr = next, addr != end);
Russell King614dd052010-11-21 11:41:57 +000048}
49
50#ifdef CONFIG_SMP
Russell King516295e2010-11-21 16:27:49 +000051static void idmap_del_pmd(pud_t *pud, unsigned long addr, unsigned long end)
Russell Kingaf3813d2010-11-21 11:48:16 +000052{
Russell King516295e2010-11-21 16:27:49 +000053 pmd_t *pmd = pmd_offset(pud, addr);
Russell Kingaf3813d2010-11-21 11:48:16 +000054 pmd_clear(pmd);
55}
56
Russell King516295e2010-11-21 16:27:49 +000057static void idmap_del_pud(pgd_t *pgd, unsigned long addr, unsigned long end)
58{
59 pud_t *pud = pud_offset(pgd, addr);
60 unsigned long next;
61
62 do {
63 next = pud_addr_end(addr, end);
64 idmap_del_pmd(pud, addr, next);
65 } while (pud++, addr = next, addr != end);
66}
67
Russell King614dd052010-11-21 11:41:57 +000068void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end)
69{
Russell Kingaf3813d2010-11-21 11:48:16 +000070 unsigned long next;
71
72 pgd += pgd_index(addr);
73 do {
74 next = pgd_addr_end(addr, end);
Russell King516295e2010-11-21 16:27:49 +000075 idmap_del_pud(pgd, addr, next);
Russell Kingaf3813d2010-11-21 11:48:16 +000076 } while (pgd++, addr = next, addr != end);
Russell King614dd052010-11-21 11:41:57 +000077}
78#endif
79
Will Deacon89038262011-09-30 11:43:29 +010080extern char __idmap_text_start[], __idmap_text_end[];
81
82static int __init init_static_idmap(void)
83{
84 phys_addr_t idmap_start, idmap_end;
85
86 idmap_pgd = pgd_alloc(&init_mm);
87 if (!idmap_pgd)
88 return -ENOMEM;
89
90 /* Add an identity mapping for the physical address of the section. */
91 idmap_start = virt_to_phys((void *)__idmap_text_start);
92 idmap_end = virt_to_phys((void *)__idmap_text_end);
93
94 pr_info("Setting up static identity map for 0x%llx - 0x%llx\n",
95 (long long)idmap_start, (long long)idmap_end);
96 identity_mapping_add(idmap_pgd, idmap_start, idmap_end);
97
98 return 0;
99}
100arch_initcall(init_static_idmap);
101
Russell King614dd052010-11-21 11:41:57 +0000102/*
103 * In order to soft-boot, we need to insert a 1:1 mapping in place of
104 * the user-mode pages. This will then ensure that we have predictable
105 * results when turning the mmu off
106 */
Russell King5aafec12011-11-01 10:15:27 +0000107void setup_mm_for_reboot(void)
Russell King614dd052010-11-21 11:41:57 +0000108{
109 /*
110 * We need to access to user-mode page tables here. For kernel threads
111 * we don't have any user-mode mappings so we use the context that we
112 * "borrowed".
113 */
114 identity_mapping_add(current->active_mm->pgd, 0, TASK_SIZE);
115 local_flush_tlb_all();
116}