blob: e4f9c2710b72104e0e20cd94eb7094f117a27254 [file] [log] [blame]
Channagoud Kadabi70f9c4e2015-06-17 17:29:10 -07001/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
2
3Redistribution and use in source and binary forms, with or without
4modification, are permitted provided that the following conditions are
5met:
6 * Redistributions of source code must retain the above copyright
7 notice, this list of conditions and the following disclaimer.
8 * Redistributions in binary form must reproduce the above
9 copyright notice, this list of conditions and the following
10 disclaimer in the documentation and/or other materials provided
11 with the distribution.
12 * Neither the name of The Linux Foundation nor the names of its
13 contributors may be used to endorse or promote products derived
14 from this software without specific prior written permission.
15
16THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
17WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
18MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
19ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27*/
28
29#include <debug.h>
30#include <sys/types.h>
31#include <compiler.h>
32#include <arch.h>
33#include <arch/arm.h>
Channagoud Kadabic17db962015-09-15 15:46:10 -070034#include <arch/ops.h>
Channagoud Kadabi70f9c4e2015-06-17 17:29:10 -070035#include <arch/defines.h>
36#include <arch/arm/mmu.h>
37#include <mmu.h>
38#include <platform.h>
Channagoud Kadabic17db962015-09-15 15:46:10 -070039#include <stdlib.h>
Channagoud Kadabi70f9c4e2015-06-17 17:29:10 -070040
41#if ARM_WITH_MMU
42
43#define LPAE_SIZE (1ULL << 40)
44#define LPAE_MASK (LPAE_SIZE - 1)
45#define L1_PT_INDEX 0x7FC0000000
46#define PT_TABLE_DESC_BIT 0x2
47#define SIZE_1GB (0x400000000UL)
48#define SIZE_2MB (0x200000)
49#define MMU_L2_PT_SIZE 512
50#define MMU_PT_BLOCK_DESCRIPTOR 0x1
51#define MMU_PT_TABLE_DESCRIPTOR 0x3
52#define MMU_AP_FLAG (0x1 << 10)
53#define L2_PT_MASK 0xFFFFE00000
54#define L2_INDEX_MASK 0x3FE00000
55
Channagoud Kadabic17db962015-09-15 15:46:10 -070056uint64_t mmu_l1_pagetable[ROUNDUP(L1_PT_SZ, CACHE_LINE)] __attribute__ ((aligned(4096))); /* Max is 8 */
57uint64_t mmu_l2_pagetable[ROUNDUP(L2_PT_SZ*MMU_L2_PT_SIZE, CACHE_LINE)] __attribute__ ((aligned(4096))); /* Macro from target code * 512 */
Channagoud Kadabi70f9c4e2015-06-17 17:29:10 -070058uint64_t avail_l2_pt = L2_PT_SZ;
59uint64_t *empty_l2_pt = mmu_l2_pagetable;
60
61/************************************************************/
62/* MAP 2MB granules in 1GB section in L2 page table */
63/***********************************************************/
64
65static void mmu_map_l2_entry(mmu_section_t *block)
66{
67 uint64_t *l2_pt = NULL;
68 uint64_t address_start;
69 uint64_t address_end;
70 uint64_t p_addr;
71
72 /* First initialize the first level descriptor for each 1 GB
73 * Bits[47:12] provide the physical base address of the level 2 page table
74 *
75 * ________________________________________________________________________________
76 * | | | | | | | | | |
77 * |63|62-61|60| 59|58---52|51----40|39------------------------12|11----2|1------- 0|
78 * |NS| AP |XN|PXN|Ignored|UNK|SBZP|Next-level table addr[39:12]|Ignored|Descriptor|
79 * |__|_____|__|___|_______|________|____________________________|_______|__________|
80 * NS: Used only in secure state
81 * AP: Access protection
82 */
83
84 /* Convert the virtual address[38:30] into an index of the L1 page table */
85 address_start = (block->vaddress & LPAE_MASK) >> 30;
86
87 /* Check if this 1GB entry has L2 page table mapped already
88 * if L1 entry hasn't mapped any L2 page table, allocate a L2 page table for it
89 */
90
91 if((mmu_l1_pagetable[address_start] & PT_TABLE_DESC_BIT) == 0)
92 {
93 ASSERT(avail_l2_pt);
94
95 /* Get the first l2 empty page table and fill in the L1 PTE with a table descriptor,
96 * The l2_pt address bits 12:39 are used for L1 PTE entry
97 */
98 l2_pt = empty_l2_pt;
99
100 /* Bits 39.12 of the page table address are mapped into the L1 PTE entry */
101 mmu_l1_pagetable[address_start] = ((uint64_t)(uintptr_t)l2_pt & 0x0FFFFFFF000) | MMU_PT_TABLE_DESCRIPTOR;
102
103 /* Advance pointer to next empty l2 page table */
104 empty_l2_pt += MMU_L2_PT_SIZE;
105 avail_l2_pt--;
Channagoud Kadabic17db962015-09-15 15:46:10 -0700106 arch_clean_invalidate_cache_range((addr_t) mmu_l1_pagetable, L1_PT_SZ);
Channagoud Kadabi70f9c4e2015-06-17 17:29:10 -0700107 }
108 else
109 {
110 /* Entry has L2 page table mapped already, so just get the existing L2 page table address */
111 l2_pt = (uint64_t *) (uintptr_t)(mmu_l1_pagetable[address_start] & 0xFFFFFFF000);
112 }
113
114 /* Get the physical address of 2MB sections, bits 21:39 are used to populate the L2 entry */
115 p_addr = block->paddress & L2_PT_MASK;
116
117 /* Start index into the L2 page table for this section using the virtual address[29:21]*/
118 address_start = (block->vaddress & L2_INDEX_MASK) >> 21;
119
120 /* The end index for the given section. size given is in MB convert it to number of 2MB segments */
121 address_end = address_start + ((block->size) >> 1);
122
123 /*
124 * ___________________________________________________________________________________________________________________
125 * | | | | | | | | | | | | | | | |
126 * |63---59|58----55|54|53 |52 |51----40|39--------------21|20----12|11|10|9 8|7 6|5 |4-----------2| 1 0 |
127 * |Ignored|Reserved|XN|PXN|Cont|UNK|SBZP|Output addr[39:21]|UNK|SBZP|nG|AF|SH[1:0]|AP[2:1]|NS|AttrIndx[2:0]|Descriptor|
128 * |_______|________|__|___|____|________|__________________|________|__|__|_______|_______|__|_____________|__________|
129 */
130
131 /* Map all the 2MB segments in the 1GB section */
132 while (address_start < address_end)
133 {
134 l2_pt[address_start] = (p_addr) | MMU_PT_BLOCK_DESCRIPTOR | MMU_AP_FLAG | block->flags;
135 address_start++;
136 /* Increment to the next 2MB segment in current L2 page table*/
137 p_addr += SIZE_2MB;
138 arm_invalidate_tlb();
139 }
Channagoud Kadabic17db962015-09-15 15:46:10 -0700140 arch_clean_invalidate_cache_range((addr_t) mmu_l2_pagetable, (L2_PT_SZ*MMU_L2_PT_SIZE));
Channagoud Kadabi70f9c4e2015-06-17 17:29:10 -0700141}
142
143/************************************************************/
144/* MAP 1GB granules in L1 page table */
145/***********************************************************/
146static void mmu_map_l1_entry(mmu_section_t *block)
147{
148 uint64_t address_start;
149 uint64_t address_end;
150 uint64_t p_addr;
151
152 /* Convert our base address into an index into the page table */
153 address_start = (block->vaddress & LPAE_MASK) >> 30;
154
155 /* Get the end address into the page table, size is in MB, convert to GB for L1 mapping */
156 address_end = address_start + ((block->size) >> 10);
157
158 /* bits 38:30 provide the physical base address of the section */
159 p_addr = block->paddress & L1_PT_INDEX;
160
161 while(address_start < address_end)
162 {
163 /*
164 * A Block descriptor for first stage, level one is as follows (Descriptor = 0b01):
165 * ___________________________________________________________________________________________________________________
166 * | | | | | | | | | | | | | | | |
167 * |63---59|58----55|54|53 |52 |51----40|39--------------30|n-1 --12|11|10|9 8|7 6|5 |4-----------2| 1 0 |
168 * |Ignored|Reserved|XN|PXN|Cont|UNK/SBZP|Output addr[47:30]|UNK/SBZP|nG|AF|SH[1:0]|AP[2:1]|NS|AttrIndx[2:0]|Descriptor|
169 * |_______|________|__|___|____|________|__________________|________|__|__|_______|_______|__|_____________|__________|
170 */
171
172 mmu_l1_pagetable[address_start] = (p_addr) | block->flags | MMU_AP_FLAG | MMU_PT_BLOCK_DESCRIPTOR;
173
174 p_addr += SIZE_1GB; /* Point to next level */
175 address_start++;
176 arm_invalidate_tlb();
177 }
Channagoud Kadabic17db962015-09-15 15:46:10 -0700178 arch_clean_invalidate_cache_range((addr_t) mmu_l1_pagetable, L1_PT_SZ);
Channagoud Kadabi70f9c4e2015-06-17 17:29:10 -0700179}
180
181void arm_mmu_map_entry(mmu_section_t *entry)
182{
183 ASSERT(entry);
184
185 if (entry->type == MMU_L1_NS_SECTION_MAPPING)
186 mmu_map_l1_entry(entry);
187 else if(entry->type == MMU_L2_NS_SECTION_MAPPING)
188 mmu_map_l2_entry(entry);
189 else
190 dprintf(CRITICAL, "Invalid mapping type in the mmu table: %d\n", entry->type);
191}
192
193void arm_mmu_init(void)
194{
195 /* set some mmu specific control bits:
196 * access flag disabled, TEX remap disabled, mmu disabled
197 */
198 arm_write_cr1(arm_read_cr1() & ~((1<<29)|(1<<28)|(1<<0)));
199
200 platform_init_mmu_mappings();
201
202 /* set up the translation table base */
203 arm_write_ttbr((uint32_t)mmu_l1_pagetable);
204
205 /* set up the Memory Attribute Indirection Registers 0 and 1 */
206 arm_write_mair0(MAIR0);
207 arm_write_mair1(MAIR1);
208
Channagoud Kadabic17db962015-09-15 15:46:10 -0700209 /* TTBCR.EAE = 1 & IRGN0 [9:8], ORNG0 bits [11:10]: 01 */
210 arm_write_ttbcr(0x80000500);
Channagoud Kadabi70f9c4e2015-06-17 17:29:10 -0700211
212 /* Enable TRE */
213 arm_write_cr1(arm_read_cr1() | (1<<28));
214
215 /* turn on the mmu */
216 arm_write_cr1(arm_read_cr1() | 0x1);
217}
218
219void arch_disable_mmu(void)
220{
221 /* Ensure all memory access are complete
222 * before disabling MMU
223 */
224 dsb();
225 arm_write_cr1(arm_read_cr1() & ~(1<<0));
226 arm_invalidate_tlb();
227}
228
229uint64_t virtual_to_physical_mapping(uint32_t vaddr)
230{
231 uint32_t l1_index;
232 uint64_t *l2_pt = NULL;
233 uint32_t l2_index;
234 uint32_t offset = 0;
235 uint64_t paddr = 0;
236
237 /* Find the L1 index from virtual address */
238 l1_index = (vaddr & LPAE_MASK) >> 30;
239
240 if ((mmu_l1_pagetable[l1_index] & MMU_PT_TABLE_DESCRIPTOR) == MMU_PT_TABLE_DESCRIPTOR)
241 {
242 /* Get the l2 page table address */
243 l2_pt = (uint64_t *) (uintptr_t) (mmu_l1_pagetable[l1_index] & 0x0FFFFFFF000);
244 /* Get the l2 index from virtual address */
245 l2_index = (vaddr & L2_INDEX_MASK) >> 21;
246 /* Calculate the offset from vaddr. */
247 offset = vaddr & 0x1FFFFF;
248 /* Get the physical address bits from 21 to 39 */
249 paddr = (l2_pt[l2_index] & L2_PT_MASK) + offset;
250 } else if ((mmu_l1_pagetable[l1_index] & MMU_PT_TABLE_DESCRIPTOR) == MMU_PT_BLOCK_DESCRIPTOR)
251 {
252 /* Calculate the offset from bits 0 to 30 */
253 offset = vaddr & 0x3FFFFFFF;
254 /* Return the entry from l1 page table */
255 paddr = (mmu_l1_pagetable[l1_index] & L1_PT_INDEX) + offset;
256 } else
257 {
258 ASSERT(0);
259 }
260
261 return paddr;
262}
263
264uint32_t physical_to_virtual_mapping(uint64_t paddr)
265{
266 uint32_t i, j;
267 uint32_t vaddr_index = 0;
268 uint32_t vaddr = 0;
269 uint64_t paddr_base_l1;
270 uint64_t paddr_base_l2;
271 uint64_t *l2_pt = NULL;
272 bool l1_index_found = false;
273 uint32_t l1_index = 0;
274 uint32_t offset = 0;
275
276 /* Traverse through the L1 page table to look for block descriptor */
277 for (i = 0; i < L1_PT_SZ; i++)
278 {
279 /* Find the L1 page table index */
280 paddr_base_l1 = paddr & L1_PT_INDEX;
281
282 /* If the L1 index is unused continue */
283 if ((mmu_l1_pagetable[i] & MMU_PT_TABLE_DESCRIPTOR) == 0)
284 continue;
285
286 /* If Its a block entry, find the matching entry and return the index */
287 if ((mmu_l1_pagetable[i] & MMU_PT_TABLE_DESCRIPTOR) == MMU_PT_BLOCK_DESCRIPTOR)
288 {
289 if ((mmu_l1_pagetable[i] & L1_PT_INDEX) == paddr_base_l1)
290 {
291 offset = paddr - paddr_base_l1;
292 vaddr_index = i;
293 l1_index_found = true;
294 goto end;
295 } /* If the entry is table, extract table address */
296 } else if ((mmu_l1_pagetable[i] & MMU_PT_TABLE_DESCRIPTOR) == MMU_PT_TABLE_DESCRIPTOR)
297 {
298 l1_index = i;
299 l2_pt = (uint64_t *) (uintptr_t) (mmu_l1_pagetable[l1_index] & 0x0FFFFFFF000);
300 paddr_base_l2 = paddr & L2_PT_MASK;
301 /* Search the table to find index in L2 page table */
302 for (j = 0; j < MMU_L2_PT_SIZE; j++)
303 {
304 if (paddr_base_l2 == (l2_pt[j] & L2_PT_MASK))
305 {
306 vaddr_index = j;
307 offset = paddr - paddr_base_l2;
308 goto end;
309 }
310 }
311 }
312 }
313 /* If we reach here, that means the addrss is either no mapped or invalid request */
314 dprintf(CRITICAL, "The address %llx is not mapped\n", paddr);
315 ASSERT(0);
316
317end:
318 /* Convert the index into the virtual address */
319 if (l1_index_found)
320 vaddr = (vaddr_index << 30);
321 else
322 vaddr = ((vaddr_index << 21) & L2_INDEX_MASK) + (l1_index << 30);
323
324 vaddr += offset;
325
326 return vaddr;
327}
328#endif // ARM_WITH_MMU