blob: a3a98d1970a9d14efa657ce74b0ded4eb8cad53f [file] [log] [blame]
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +00001/*
2 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3 *
dp-arm82cb2c12017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +00005 */
6
7#include <arch.h>
8#include <arch_helpers.h>
9#include <assert.h>
10#include <cassert.h>
11#include <common_def.h>
12#include <debug.h>
13#include <errno.h>
14#include <platform_def.h>
15#include <string.h>
16#include <types.h>
17#include <utils.h>
Sandrine Bailleux8933c342017-05-19 09:59:37 +010018#include <xlat_tables_arch.h>
19#include <xlat_tables_defs.h>
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +000020#include <xlat_tables_v2.h>
Sandrine Bailleux8933c342017-05-19 09:59:37 +010021
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +000022#include "xlat_tables_private.h"
23
Antonio Nino Diaz0b64f4e2017-02-27 17:23:54 +000024#if PLAT_XLAT_TABLES_DYNAMIC
25
26/*
27 * The following functions assume that they will be called using subtables only.
28 * The base table can't be unmapped, so it is not needed to do any special
29 * handling for it.
30 */
31
32/*
33 * Returns the index of the array corresponding to the specified translation
34 * table.
35 */
36static int xlat_table_get_index(xlat_ctx_t *ctx, const uint64_t *table)
37{
Varun Wadekar6311f632017-06-07 09:57:42 -070038 for (unsigned int i = 0; i < ctx->tables_num; i++)
Antonio Nino Diaz0b64f4e2017-02-27 17:23:54 +000039 if (ctx->tables[i] == table)
40 return i;
41
42 /*
43 * Maybe we were asked to get the index of the base level table, which
44 * should never happen.
45 */
46 assert(0);
47
48 return -1;
49}
50
51/* Returns a pointer to an empty translation table. */
52static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
53{
Varun Wadekar6311f632017-06-07 09:57:42 -070054 for (unsigned int i = 0; i < ctx->tables_num; i++)
Antonio Nino Diaz0b64f4e2017-02-27 17:23:54 +000055 if (ctx->tables_mapped_regions[i] == 0)
56 return ctx->tables[i];
57
58 return NULL;
59}
60
61/* Increments region count for a given table. */
62static void xlat_table_inc_regions_count(xlat_ctx_t *ctx, const uint64_t *table)
63{
64 ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]++;
65}
66
67/* Decrements region count for a given table. */
68static void xlat_table_dec_regions_count(xlat_ctx_t *ctx, const uint64_t *table)
69{
70 ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]--;
71}
72
73/* Returns 0 if the speficied table isn't empty, otherwise 1. */
74static int xlat_table_is_empty(xlat_ctx_t *ctx, const uint64_t *table)
75{
76 return !ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)];
77}
78
79#else /* PLAT_XLAT_TABLES_DYNAMIC */
80
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +000081/* Returns a pointer to the first empty translation table. */
82static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
83{
84 assert(ctx->next_table < ctx->tables_num);
85
86 return ctx->tables[ctx->next_table++];
87}
88
Antonio Nino Diaz0b64f4e2017-02-27 17:23:54 +000089#endif /* PLAT_XLAT_TABLES_DYNAMIC */
90
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +000091/* Returns a block/page table descriptor for the given level and attributes. */
Sandrine Bailleux28fa2e92017-04-19 14:02:23 +010092static uint64_t xlat_desc(mmap_attr_t attr, unsigned long long addr_pa,
Antonio Nino Diaza5640252017-04-27 13:30:22 +010093 int level, uint64_t execute_never_mask)
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +000094{
95 uint64_t desc;
96 int mem_type;
97
98 /* Make sure that the granularity is fine enough to map this address. */
99 assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0);
100
101 desc = addr_pa;
102 /*
103 * There are different translation table descriptors for level 3 and the
104 * rest.
105 */
106 desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
107 /*
108 * Always set the access flag, as TF doesn't manage access flag faults.
109 * Deduce other fields of the descriptor based on the MT_NS and MT_RW
110 * memory region attributes.
111 */
112 desc |= (attr & MT_NS) ? LOWER_ATTRS(NS) : 0;
113 desc |= (attr & MT_RW) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
114 desc |= LOWER_ATTRS(ACCESS_FLAG);
115
116 /*
117 * Deduce shareability domain and executability of the memory region
118 * from the memory type of the attributes (MT_TYPE).
119 *
120 * Data accesses to device memory and non-cacheable normal memory are
121 * coherent for all observers in the system, and correspondingly are
122 * always treated as being Outer Shareable. Therefore, for these 2 types
123 * of memory, it is not strictly needed to set the shareability field
124 * in the translation tables.
125 */
126 mem_type = MT_TYPE(attr);
127 if (mem_type == MT_DEVICE) {
128 desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
129 /*
130 * Always map device memory as execute-never.
131 * This is to avoid the possibility of a speculative instruction
132 * fetch, which could be an issue if this memory region
133 * corresponds to a read-sensitive peripheral.
134 */
Antonio Nino Diaza5640252017-04-27 13:30:22 +0100135 desc |= execute_never_mask;
136
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +0000137 } else { /* Normal memory */
138 /*
139 * Always map read-write normal memory as execute-never.
140 * (Trusted Firmware doesn't self-modify its code, therefore
141 * R/W memory is reserved for data storage, which must not be
142 * executable.)
143 * Note that setting the XN bit here is for consistency only.
Antonio Nino Diaza5640252017-04-27 13:30:22 +0100144 * The function that enables the MMU sets the SCTLR_ELx.WXN bit,
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +0000145 * which makes any writable memory region to be treated as
146 * execute-never, regardless of the value of the XN bit in the
147 * translation table.
148 *
149 * For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
150 * attribute to figure out the value of the XN bit.
151 */
Antonio Nino Diaza5640252017-04-27 13:30:22 +0100152 if ((attr & MT_RW) || (attr & MT_EXECUTE_NEVER)) {
153 desc |= execute_never_mask;
154 }
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +0000155
156 if (mem_type == MT_MEMORY) {
157 desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
158 } else {
159 assert(mem_type == MT_NON_CACHEABLE);
160 desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH);
161 }
162 }
163
164 return desc;
165}
166
167/*
168 * Enumeration of actions that can be made when mapping table entries depending
169 * on the previous value in that entry and information about the region being
170 * mapped.
171 */
172typedef enum {
173
174 /* Do nothing */
175 ACTION_NONE,
176
177 /* Write a block (or page, if in level 3) entry. */
178 ACTION_WRITE_BLOCK_ENTRY,
179
180 /*
181 * Create a new table and write a table entry pointing to it. Recurse
182 * into it for further processing.
183 */
184 ACTION_CREATE_NEW_TABLE,
185
186 /*
187 * There is a table descriptor in this entry, read it and recurse into
188 * that table for further processing.
189 */
190 ACTION_RECURSE_INTO_TABLE,
191
192} action_t;
193
Antonio Nino Diaz0b64f4e2017-02-27 17:23:54 +0000194#if PLAT_XLAT_TABLES_DYNAMIC
195
196/*
197 * Recursive function that writes to the translation tables and unmaps the
198 * specified region.
199 */
200static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
201 const uintptr_t table_base_va,
202 uint64_t *const table_base,
203 const int table_entries,
Varun Wadekar6311f632017-06-07 09:57:42 -0700204 const unsigned int level)
Antonio Nino Diaz0b64f4e2017-02-27 17:23:54 +0000205{
206 assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
207
208 uint64_t *subtable;
209 uint64_t desc;
210
211 uintptr_t table_idx_va;
212 uintptr_t table_idx_end_va; /* End VA of this entry */
213
214 uintptr_t region_end_va = mm->base_va + mm->size - 1;
215
216 int table_idx;
217
218 if (mm->base_va > table_base_va) {
219 /* Find the first index of the table affected by the region. */
220 table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
221
222 table_idx = (table_idx_va - table_base_va) >>
223 XLAT_ADDR_SHIFT(level);
224
225 assert(table_idx < table_entries);
226 } else {
227 /* Start from the beginning of the table. */
228 table_idx_va = table_base_va;
229 table_idx = 0;
230 }
231
232 while (table_idx < table_entries) {
233
234 table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1;
235
236 desc = table_base[table_idx];
237 uint64_t desc_type = desc & DESC_MASK;
238
239 action_t action = ACTION_NONE;
240
241 if ((mm->base_va <= table_idx_va) &&
242 (region_end_va >= table_idx_end_va)) {
243
244 /* Region covers all block */
245
246 if (level == 3) {
247 /*
248 * Last level, only page descriptors allowed,
249 * erase it.
250 */
251 assert(desc_type == PAGE_DESC);
252
253 action = ACTION_WRITE_BLOCK_ENTRY;
254 } else {
255 /*
256 * Other levels can have table descriptors. If
257 * so, recurse into it and erase descriptors
258 * inside it as needed. If there is a block
259 * descriptor, just erase it. If an invalid
260 * descriptor is found, this table isn't
261 * actually mapped, which shouldn't happen.
262 */
263 if (desc_type == TABLE_DESC) {
264 action = ACTION_RECURSE_INTO_TABLE;
265 } else {
266 assert(desc_type == BLOCK_DESC);
267 action = ACTION_WRITE_BLOCK_ENTRY;
268 }
269 }
270
271 } else if ((mm->base_va <= table_idx_end_va) ||
272 (region_end_va >= table_idx_va)) {
273
274 /*
275 * Region partially covers block.
276 *
277 * It can't happen in level 3.
278 *
279 * There must be a table descriptor here, if not there
280 * was a problem when mapping the region.
281 */
282
283 assert(level < 3);
284
285 assert(desc_type == TABLE_DESC);
286
287 action = ACTION_RECURSE_INTO_TABLE;
288 }
289
290 if (action == ACTION_WRITE_BLOCK_ENTRY) {
291
292 table_base[table_idx] = INVALID_DESC;
293 xlat_arch_tlbi_va(table_idx_va);
294
295 } else if (action == ACTION_RECURSE_INTO_TABLE) {
296
297 subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
298
299 /* Recurse to write into subtable */
300 xlat_tables_unmap_region(ctx, mm, table_idx_va,
301 subtable, XLAT_TABLE_ENTRIES,
302 level + 1);
303
304 /*
305 * If the subtable is now empty, remove its reference.
306 */
307 if (xlat_table_is_empty(ctx, subtable)) {
308 table_base[table_idx] = INVALID_DESC;
309 xlat_arch_tlbi_va(table_idx_va);
310 }
311
312 } else {
313 assert(action == ACTION_NONE);
314 }
315
316 table_idx++;
317 table_idx_va += XLAT_BLOCK_SIZE(level);
318
319 /* If reached the end of the region, exit */
320 if (region_end_va <= table_idx_va)
321 break;
322 }
323
324 if (level > ctx->base_level)
325 xlat_table_dec_regions_count(ctx, table_base);
326}
327
328#endif /* PLAT_XLAT_TABLES_DYNAMIC */
329
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +0000330/*
331 * From the given arguments, it decides which action to take when mapping the
332 * specified region.
333 */
334static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
335 const int desc_type, const unsigned long long dest_pa,
336 const uintptr_t table_entry_base_va, const int level)
337{
338 uintptr_t mm_end_va = mm->base_va + mm->size - 1;
339 uintptr_t table_entry_end_va =
340 table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1;
341
342 /*
343 * The descriptor types allowed depend on the current table level.
344 */
345
346 if ((mm->base_va <= table_entry_base_va) &&
347 (mm_end_va >= table_entry_end_va)) {
348
349 /*
350 * Table entry is covered by region
351 * --------------------------------
352 *
353 * This means that this table entry can describe the whole
354 * translation with this granularity in principle.
355 */
356
357 if (level == 3) {
358 /*
359 * Last level, only page descriptors are allowed.
360 */
361 if (desc_type == PAGE_DESC) {
362 /*
363 * There's another region mapped here, don't
364 * overwrite.
365 */
366 return ACTION_NONE;
367 } else {
368 assert(desc_type == INVALID_DESC);
369 return ACTION_WRITE_BLOCK_ENTRY;
370 }
371
372 } else {
373
374 /*
375 * Other levels. Table descriptors are allowed. Block
376 * descriptors too, but they have some limitations.
377 */
378
379 if (desc_type == TABLE_DESC) {
380 /* There's already a table, recurse into it. */
381 return ACTION_RECURSE_INTO_TABLE;
382
383 } else if (desc_type == INVALID_DESC) {
384 /*
385 * There's nothing mapped here, create a new
386 * entry.
387 *
388 * Check if the destination granularity allows
389 * us to use a block descriptor or we need a
390 * finer table for it.
391 *
392 * Also, check if the current level allows block
393 * descriptors. If not, create a table instead.
394 */
395 if ((dest_pa & XLAT_BLOCK_MASK(level)) ||
396 (level < MIN_LVL_BLOCK_DESC))
397 return ACTION_CREATE_NEW_TABLE;
398 else
399 return ACTION_WRITE_BLOCK_ENTRY;
400
401 } else {
402 /*
403 * There's another region mapped here, don't
404 * overwrite.
405 */
406 assert(desc_type == BLOCK_DESC);
407
408 return ACTION_NONE;
409 }
410 }
411
412 } else if ((mm->base_va <= table_entry_end_va) ||
413 (mm_end_va >= table_entry_base_va)) {
414
415 /*
416 * Region partially covers table entry
417 * -----------------------------------
418 *
419 * This means that this table entry can't describe the whole
420 * translation, a finer table is needed.
421
422 * There cannot be partial block overlaps in level 3. If that
423 * happens, some of the preliminary checks when adding the
424 * mmap region failed to detect that PA and VA must at least be
425 * aligned to PAGE_SIZE.
426 */
427 assert(level < 3);
428
429 if (desc_type == INVALID_DESC) {
430 /*
431 * The block is not fully covered by the region. Create
432 * a new table, recurse into it and try to map the
433 * region with finer granularity.
434 */
435 return ACTION_CREATE_NEW_TABLE;
436
437 } else {
438 assert(desc_type == TABLE_DESC);
439 /*
440 * The block is not fully covered by the region, but
441 * there is already a table here. Recurse into it and
442 * try to map with finer granularity.
443 *
444 * PAGE_DESC for level 3 has the same value as
445 * TABLE_DESC, but this code can't run on a level 3
446 * table because there can't be overlaps in level 3.
447 */
448 return ACTION_RECURSE_INTO_TABLE;
449 }
450 }
451
452 /*
453 * This table entry is outside of the region specified in the arguments,
454 * don't write anything to it.
455 */
456 return ACTION_NONE;
457}
458
459/*
460 * Recursive function that writes to the translation tables and maps the
Antonio Nino Diaz0b64f4e2017-02-27 17:23:54 +0000461 * specified region. On success, it returns the VA of the last byte that was
462 * succesfully mapped. On error, it returns the VA of the next entry that
463 * should have been mapped.
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +0000464 */
Antonio Nino Diaz0b64f4e2017-02-27 17:23:54 +0000465static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +0000466 const uintptr_t table_base_va,
467 uint64_t *const table_base,
468 const int table_entries,
Varun Wadekar6311f632017-06-07 09:57:42 -0700469 const unsigned int level)
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +0000470{
471 assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
472
473 uintptr_t mm_end_va = mm->base_va + mm->size - 1;
474
475 uintptr_t table_idx_va;
476 unsigned long long table_idx_pa;
477
478 uint64_t *subtable;
479 uint64_t desc;
480
481 int table_idx;
482
483 if (mm->base_va > table_base_va) {
484 /* Find the first index of the table affected by the region. */
485 table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
486
487 table_idx = (table_idx_va - table_base_va) >>
488 XLAT_ADDR_SHIFT(level);
489
490 assert(table_idx < table_entries);
491 } else {
492 /* Start from the beginning of the table. */
493 table_idx_va = table_base_va;
494 table_idx = 0;
495 }
496
Antonio Nino Diaz0b64f4e2017-02-27 17:23:54 +0000497#if PLAT_XLAT_TABLES_DYNAMIC
498 if (level > ctx->base_level)
499 xlat_table_inc_regions_count(ctx, table_base);
500#endif
501
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +0000502 while (table_idx < table_entries) {
503
504 desc = table_base[table_idx];
505
506 table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
507
508 action_t action = xlat_tables_map_region_action(mm,
509 desc & DESC_MASK, table_idx_pa, table_idx_va, level);
510
511 if (action == ACTION_WRITE_BLOCK_ENTRY) {
512
513 table_base[table_idx] =
Antonio Nino Diaza5640252017-04-27 13:30:22 +0100514 xlat_desc(mm->attr, table_idx_pa, level,
515 ctx->execute_never_mask);
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +0000516
517 } else if (action == ACTION_CREATE_NEW_TABLE) {
518
519 subtable = xlat_table_get_empty(ctx);
Antonio Nino Diaz0b64f4e2017-02-27 17:23:54 +0000520 if (subtable == NULL) {
521 /* Not enough free tables to map this region */
522 return table_idx_va;
523 }
524
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +0000525 /* Point to new subtable from this one. */
Antonio Nino Diaz0b64f4e2017-02-27 17:23:54 +0000526 table_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
527
528 /* Recurse to write into subtable */
529 uintptr_t end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
530 subtable, XLAT_TABLE_ENTRIES,
531 level + 1);
532 if (end_va != table_idx_va + XLAT_BLOCK_SIZE(level) - 1)
533 return end_va;
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +0000534
535 } else if (action == ACTION_RECURSE_INTO_TABLE) {
536
537 subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
538 /* Recurse to write into subtable */
Antonio Nino Diaz0b64f4e2017-02-27 17:23:54 +0000539 uintptr_t end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
540 subtable, XLAT_TABLE_ENTRIES,
541 level + 1);
542 if (end_va != table_idx_va + XLAT_BLOCK_SIZE(level) - 1)
543 return end_va;
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +0000544
545 } else {
546
547 assert(action == ACTION_NONE);
548
549 }
550
551 table_idx++;
552 table_idx_va += XLAT_BLOCK_SIZE(level);
553
554 /* If reached the end of the region, exit */
555 if (mm_end_va <= table_idx_va)
556 break;
557 }
Antonio Nino Diaz0b64f4e2017-02-27 17:23:54 +0000558
559 return table_idx_va - 1;
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +0000560}
561
562void print_mmap(mmap_region_t *const mmap)
563{
564#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
565 tf_printf("mmap:\n");
566 mmap_region_t *mm = mmap;
567
568 while (mm->size) {
569 tf_printf(" VA:%p PA:0x%llx size:0x%zx attr:0x%x\n",
570 (void *)mm->base_va, mm->base_pa,
571 mm->size, mm->attr);
572 ++mm;
573 };
574 tf_printf("\n");
575#endif
576}
577
578/*
579 * Function that verifies that a region can be mapped.
580 * Returns:
581 * 0: Success, the mapping is allowed.
582 * EINVAL: Invalid values were used as arguments.
583 * ERANGE: The memory limits were surpassed.
584 * ENOMEM: There is not enough memory in the mmap array.
585 * EPERM: Region overlaps another one in an invalid way.
586 */
587static int mmap_add_region_check(xlat_ctx_t *ctx, unsigned long long base_pa,
588 uintptr_t base_va, size_t size,
Sandrine Bailleux28fa2e92017-04-19 14:02:23 +0100589 mmap_attr_t attr)
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +0000590{
591 mmap_region_t *mm = ctx->mmap;
592 unsigned long long end_pa = base_pa + size - 1;
593 uintptr_t end_va = base_va + size - 1;
594
595 if (!IS_PAGE_ALIGNED(base_pa) || !IS_PAGE_ALIGNED(base_va) ||
596 !IS_PAGE_ALIGNED(size))
597 return -EINVAL;
598
599 /* Check for overflows */
600 if ((base_pa > end_pa) || (base_va > end_va))
601 return -ERANGE;
602
603 if ((base_va + (uintptr_t)size - (uintptr_t)1) > ctx->va_max_address)
604 return -ERANGE;
605
606 if ((base_pa + (unsigned long long)size - 1ULL) > ctx->pa_max_address)
607 return -ERANGE;
608
609 /* Check that there is space in the mmap array */
610 if (ctx->mmap[ctx->mmap_num - 1].size != 0)
611 return -ENOMEM;
612
613 /* Check for PAs and VAs overlaps with all other regions */
614 for (mm = ctx->mmap; mm->size; ++mm) {
615
616 uintptr_t mm_end_va = mm->base_va + mm->size - 1;
617
618 /*
619 * Check if one of the regions is completely inside the other
620 * one.
621 */
622 int fully_overlapped_va =
623 ((base_va >= mm->base_va) && (end_va <= mm_end_va)) ||
624 ((mm->base_va >= base_va) && (mm_end_va <= end_va));
625
626 /*
627 * Full VA overlaps are only allowed if both regions are
628 * identity mapped (zero offset) or have the same VA to PA
629 * offset. Also, make sure that it's not the exact same area.
Antonio Nino Diaz0b64f4e2017-02-27 17:23:54 +0000630 * This can only be done with static regions.
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +0000631 */
632 if (fully_overlapped_va) {
633
Antonio Nino Diaz0b64f4e2017-02-27 17:23:54 +0000634#if PLAT_XLAT_TABLES_DYNAMIC
635 if ((attr & MT_DYNAMIC) || (mm->attr & MT_DYNAMIC))
636 return -EPERM;
637#endif /* PLAT_XLAT_TABLES_DYNAMIC */
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +0000638 if ((mm->base_va - mm->base_pa) != (base_va - base_pa))
639 return -EPERM;
640
641 if ((base_va == mm->base_va) && (size == mm->size))
642 return -EPERM;
643
644 } else {
645 /*
646 * If the regions do not have fully overlapping VAs,
647 * then they must have fully separated VAs and PAs.
648 * Partial overlaps are not allowed
649 */
650
651 unsigned long long mm_end_pa =
652 mm->base_pa + mm->size - 1;
653
654 int separated_pa =
655 (end_pa < mm->base_pa) || (base_pa > mm_end_pa);
656 int separated_va =
657 (end_va < mm->base_va) || (base_va > mm_end_va);
658
659 if (!(separated_va && separated_pa))
660 return -EPERM;
661 }
662 }
663
664 return 0;
665}
666
667void mmap_add_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
668{
669 mmap_region_t *mm_cursor = ctx->mmap;
670 mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
671 unsigned long long end_pa = mm->base_pa + mm->size - 1;
672 uintptr_t end_va = mm->base_va + mm->size - 1;
673 int ret;
674
675 /* Ignore empty regions */
676 if (!mm->size)
677 return;
678
Antonio Nino Diaz0b64f4e2017-02-27 17:23:54 +0000679 /* Static regions must be added before initializing the xlat tables. */
680 assert(!ctx->initialized);
681
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +0000682 ret = mmap_add_region_check(ctx, mm->base_pa, mm->base_va, mm->size,
683 mm->attr);
684 if (ret != 0) {
685 ERROR("mmap_add_region_check() failed. error %d\n", ret);
686 assert(0);
687 return;
688 }
689
690 /*
691 * Find correct place in mmap to insert new region.
692 *
693 * 1 - Lower region VA end first.
694 * 2 - Smaller region size first.
695 *
696 * VA 0 0xFF
697 *
698 * 1st |------|
699 * 2nd |------------|
700 * 3rd |------|
701 * 4th |---|
702 * 5th |---|
703 * 6th |----------|
704 * 7th |-------------------------------------|
705 *
706 * This is required for overlapping regions only. It simplifies adding
707 * regions with the loop in xlat_tables_init_internal because the outer
708 * ones won't overwrite block or page descriptors of regions added
709 * previously.
Antonio Nino Diaz0b64f4e2017-02-27 17:23:54 +0000710 *
711 * Overlapping is only allowed for static regions.
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +0000712 */
713
714 while ((mm_cursor->base_va + mm_cursor->size - 1) < end_va
715 && mm_cursor->size)
716 ++mm_cursor;
717
718 while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va)
719 && (mm_cursor->size < mm->size))
720 ++mm_cursor;
721
722 /* Make room for new region by moving other regions up by one place */
723 memmove(mm_cursor + 1, mm_cursor,
724 (uintptr_t)mm_last - (uintptr_t)mm_cursor);
725
726 /*
727 * Check we haven't lost the empty sentinel from the end of the array.
728 * This shouldn't happen as we have checked in mmap_add_region_check
729 * that there is free space.
730 */
731 assert(mm_last->size == 0);
732
733 mm_cursor->base_pa = mm->base_pa;
734 mm_cursor->base_va = mm->base_va;
735 mm_cursor->size = mm->size;
736 mm_cursor->attr = mm->attr;
737
738 if (end_pa > ctx->max_pa)
739 ctx->max_pa = end_pa;
740 if (end_va > ctx->max_va)
741 ctx->max_va = end_va;
742}
743
Antonio Nino Diaz0b64f4e2017-02-27 17:23:54 +0000744#if PLAT_XLAT_TABLES_DYNAMIC
745
746int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
747{
748 mmap_region_t *mm_cursor = ctx->mmap;
749 mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
750 unsigned long long end_pa = mm->base_pa + mm->size - 1;
751 uintptr_t end_va = mm->base_va + mm->size - 1;
752 int ret;
753
754 /* Nothing to do */
755 if (!mm->size)
756 return 0;
757
758 ret = mmap_add_region_check(ctx, mm->base_pa, mm->base_va, mm->size, mm->attr | MT_DYNAMIC);
759 if (ret != 0)
760 return ret;
761
762 /*
763 * Find the adequate entry in the mmap array in the same way done for
764 * static regions in mmap_add_region_ctx().
765 */
766
767 while ((mm_cursor->base_va + mm_cursor->size - 1) < end_va && mm_cursor->size)
768 ++mm_cursor;
769
770 while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va) && (mm_cursor->size < mm->size))
771 ++mm_cursor;
772
773 /* Make room for new region by moving other regions up by one place */
774 memmove(mm_cursor + 1, mm_cursor, (uintptr_t)mm_last - (uintptr_t)mm_cursor);
775
776 /*
777 * Check we haven't lost the empty sentinal from the end of the array.
778 * This shouldn't happen as we have checked in mmap_add_region_check
779 * that there is free space.
780 */
781 assert(mm_last->size == 0);
782
783 mm_cursor->base_pa = mm->base_pa;
784 mm_cursor->base_va = mm->base_va;
785 mm_cursor->size = mm->size;
786 mm_cursor->attr = mm->attr | MT_DYNAMIC;
787
788 /*
789 * Update the translation tables if the xlat tables are initialized. If
790 * not, this region will be mapped when they are initialized.
791 */
792 if (ctx->initialized) {
793 uintptr_t end_va = xlat_tables_map_region(ctx, mm_cursor, 0, ctx->base_table,
794 ctx->base_table_entries, ctx->base_level);
795
796 /* Failed to map, remove mmap entry, unmap and return error. */
797 if (end_va != mm_cursor->base_va + mm_cursor->size - 1) {
798 memmove(mm_cursor, mm_cursor + 1, (uintptr_t)mm_last - (uintptr_t)mm_cursor);
799
800 /*
801 * Check if the mapping function actually managed to map
802 * anything. If not, just return now.
803 */
804 if (mm_cursor->base_va >= end_va)
805 return -ENOMEM;
806
807 /*
808 * Something went wrong after mapping some table entries,
809 * undo every change done up to this point.
810 */
811 mmap_region_t unmap_mm = {
812 .base_pa = 0,
813 .base_va = mm->base_va,
814 .size = end_va - mm->base_va,
815 .attr = 0
816 };
817 xlat_tables_unmap_region(ctx, &unmap_mm, 0, ctx->base_table,
818 ctx->base_table_entries, ctx->base_level);
819
820 return -ENOMEM;
821 }
822
823 /*
824 * Make sure that all entries are written to the memory. There
825 * is no need to invalidate entries when mapping dynamic regions
826 * because new table/block/page descriptors only replace old
827 * invalid descriptors, that aren't TLB cached.
828 */
829 dsbishst();
830 }
831
832 if (end_pa > ctx->max_pa)
833 ctx->max_pa = end_pa;
834 if (end_va > ctx->max_va)
835 ctx->max_va = end_va;
836
837 return 0;
838}
839
840/*
841 * Removes the region with given base Virtual Address and size from the given
842 * context.
843 *
844 * Returns:
845 * 0: Success.
846 * EINVAL: Invalid values were used as arguments (region not found).
847 * EPERM: Tried to remove a static region.
848 */
849int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
850 size_t size)
851{
852 mmap_region_t *mm = ctx->mmap;
853 mmap_region_t *mm_last = mm + ctx->mmap_num;
854 int update_max_va_needed = 0;
855 int update_max_pa_needed = 0;
856
857 /* Check sanity of mmap array. */
858 assert(mm[ctx->mmap_num].size == 0);
859
860 while (mm->size) {
861 if ((mm->base_va == base_va) && (mm->size == size))
862 break;
863 ++mm;
864 }
865
866 /* Check that the region was found */
867 if (mm->size == 0)
868 return -EINVAL;
869
870 /* If the region is static it can't be removed */
871 if (!(mm->attr & MT_DYNAMIC))
872 return -EPERM;
873
874 /* Check if this region is using the top VAs or PAs. */
875 if ((mm->base_va + mm->size - 1) == ctx->max_va)
876 update_max_va_needed = 1;
877 if ((mm->base_pa + mm->size - 1) == ctx->max_pa)
878 update_max_pa_needed = 1;
879
880 /* Update the translation tables if needed */
881 if (ctx->initialized) {
882 xlat_tables_unmap_region(ctx, mm, 0, ctx->base_table,
883 ctx->base_table_entries,
884 ctx->base_level);
885 xlat_arch_tlbi_va_sync();
886 }
887
888 /* Remove this region by moving the rest down by one place. */
889 memmove(mm, mm + 1, (uintptr_t)mm_last - (uintptr_t)mm);
890
891 /* Check if we need to update the max VAs and PAs */
892 if (update_max_va_needed) {
893 ctx->max_va = 0;
894 mm = ctx->mmap;
895 while (mm->size) {
896 if ((mm->base_va + mm->size - 1) > ctx->max_va)
897 ctx->max_va = mm->base_va + mm->size - 1;
898 ++mm;
899 }
900 }
901
902 if (update_max_pa_needed) {
903 ctx->max_pa = 0;
904 mm = ctx->mmap;
905 while (mm->size) {
906 if ((mm->base_pa + mm->size - 1) > ctx->max_pa)
907 ctx->max_pa = mm->base_pa + mm->size - 1;
908 ++mm;
909 }
910 }
911
912 return 0;
913}
914
915#endif /* PLAT_XLAT_TABLES_DYNAMIC */
916
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +0000917#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
918
919/* Print the attributes of the specified block descriptor. */
Antonio Nino Diaza5640252017-04-27 13:30:22 +0100920static void xlat_desc_print(uint64_t desc, uint64_t execute_never_mask)
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +0000921{
922 int mem_type_index = ATTR_INDEX_GET(desc);
923
924 if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
925 tf_printf("MEM");
926 } else if (mem_type_index == ATTR_NON_CACHEABLE_INDEX) {
927 tf_printf("NC");
928 } else {
929 assert(mem_type_index == ATTR_DEVICE_INDEX);
930 tf_printf("DEV");
931 }
932
933 tf_printf(LOWER_ATTRS(AP_RO) & desc ? "-RO" : "-RW");
934 tf_printf(LOWER_ATTRS(NS) & desc ? "-NS" : "-S");
Antonio Nino Diaza5640252017-04-27 13:30:22 +0100935 tf_printf(execute_never_mask & desc ? "-XN" : "-EXEC");
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +0000936}
937
938static const char * const level_spacers[] = {
Antonio Nino Diazf10644c2017-02-13 11:35:49 +0000939 "[LV0] ",
940 " [LV1] ",
941 " [LV2] ",
942 " [LV3] "
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +0000943};
944
Antonio Nino Diazf10644c2017-02-13 11:35:49 +0000945static const char *invalid_descriptors_ommited =
946 "%s(%d invalid descriptors omitted)\n";
947
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +0000948/*
949 * Recursive function that reads the translation tables passed as an argument
950 * and prints their status.
951 */
952static void xlat_tables_print_internal(const uintptr_t table_base_va,
953 uint64_t *const table_base, const int table_entries,
Varun Wadekar2676f6a2017-06-16 14:15:34 -0700954 const unsigned int level, const uint64_t execute_never_mask)
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +0000955{
956 assert(level <= XLAT_TABLE_LEVEL_MAX);
957
958 uint64_t desc;
959 uintptr_t table_idx_va = table_base_va;
960 int table_idx = 0;
961
962 size_t level_size = XLAT_BLOCK_SIZE(level);
963
Antonio Nino Diazf10644c2017-02-13 11:35:49 +0000964 /*
965 * Keep track of how many invalid descriptors are counted in a row.
966 * Whenever multiple invalid descriptors are found, only the first one
967 * is printed, and a line is added to inform about how many descriptors
968 * have been omitted.
969 */
970 int invalid_row_count = 0;
971
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +0000972 while (table_idx < table_entries) {
973
974 desc = table_base[table_idx];
975
976 if ((desc & DESC_MASK) == INVALID_DESC) {
977
Antonio Nino Diazf10644c2017-02-13 11:35:49 +0000978 if (invalid_row_count == 0) {
979 tf_printf("%sVA:%p size:0x%zx\n",
980 level_spacers[level],
981 (void *)table_idx_va, level_size);
982 }
983 invalid_row_count++;
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +0000984
985 } else {
986
Antonio Nino Diazf10644c2017-02-13 11:35:49 +0000987 if (invalid_row_count > 1) {
988 tf_printf(invalid_descriptors_ommited,
989 level_spacers[level],
990 invalid_row_count - 1);
991 }
992 invalid_row_count = 0;
993
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +0000994 /*
995 * Check if this is a table or a block. Tables are only
996 * allowed in levels other than 3, but DESC_PAGE has the
997 * same value as DESC_TABLE, so we need to check.
998 */
999 if (((desc & DESC_MASK) == TABLE_DESC) &&
1000 (level < XLAT_TABLE_LEVEL_MAX)) {
1001 /*
1002 * Do not print any PA for a table descriptor,
1003 * as it doesn't directly map physical memory
1004 * but instead points to the next translation
1005 * table in the translation table walk.
1006 */
1007 tf_printf("%sVA:%p size:0x%zx\n",
1008 level_spacers[level],
1009 (void *)table_idx_va, level_size);
1010
1011 uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
1012
1013 xlat_tables_print_internal(table_idx_va,
1014 (uint64_t *)addr_inner,
Antonio Nino Diaza5640252017-04-27 13:30:22 +01001015 XLAT_TABLE_ENTRIES, level+1,
1016 execute_never_mask);
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +00001017 } else {
1018 tf_printf("%sVA:%p PA:0x%llx size:0x%zx ",
1019 level_spacers[level],
1020 (void *)table_idx_va,
1021 (unsigned long long)(desc & TABLE_ADDR_MASK),
1022 level_size);
Antonio Nino Diaza5640252017-04-27 13:30:22 +01001023 xlat_desc_print(desc, execute_never_mask);
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +00001024 tf_printf("\n");
1025 }
1026 }
1027
1028 table_idx++;
1029 table_idx_va += level_size;
1030 }
Antonio Nino Diazf10644c2017-02-13 11:35:49 +00001031
1032 if (invalid_row_count > 1) {
1033 tf_printf(invalid_descriptors_ommited,
1034 level_spacers[level], invalid_row_count - 1);
1035 }
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +00001036}
1037
1038#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
1039
1040void xlat_tables_print(xlat_ctx_t *ctx)
1041{
1042#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
Sandrine Bailleux0350bc62017-05-26 15:47:08 +01001043 VERBOSE("Translation tables state:\n");
1044 VERBOSE(" Max allowed PA: 0x%llx\n", ctx->pa_max_address);
1045 VERBOSE(" Max allowed VA: %p\n", (void *) ctx->va_max_address);
1046 VERBOSE(" Max mapped PA: 0x%llx\n", ctx->max_pa);
1047 VERBOSE(" Max mapped VA: %p\n", (void *) ctx->max_va);
1048
1049 VERBOSE(" Initial lookup level: %i\n", ctx->base_level);
1050 VERBOSE(" Entries @initial lookup level: %i\n",
1051 ctx->base_table_entries);
1052
1053 int used_page_tables;
1054#if PLAT_XLAT_TABLES_DYNAMIC
1055 used_page_tables = 0;
1056 for (int i = 0; i < ctx->tables_num; ++i) {
1057 if (ctx->tables_mapped_regions[i] != 0)
1058 ++used_page_tables;
1059 }
1060#else
1061 used_page_tables = ctx->next_table;
1062#endif
1063 VERBOSE(" Used %i sub-tables out of %i (spare: %i)\n",
1064 used_page_tables, ctx->tables_num,
1065 ctx->tables_num - used_page_tables);
1066
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +00001067 xlat_tables_print_internal(0, ctx->base_table, ctx->base_table_entries,
Antonio Nino Diaza5640252017-04-27 13:30:22 +01001068 ctx->base_level, ctx->execute_never_mask);
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +00001069#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
1070}
1071
1072void init_xlation_table(xlat_ctx_t *ctx)
1073{
1074 mmap_region_t *mm = ctx->mmap;
1075
1076 /* All tables must be zeroed before mapping any region. */
1077
Varun Wadekar6311f632017-06-07 09:57:42 -07001078 for (unsigned int i = 0; i < ctx->base_table_entries; i++)
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +00001079 ctx->base_table[i] = INVALID_DESC;
1080
Varun Wadekar6311f632017-06-07 09:57:42 -07001081 for (unsigned int j = 0; j < ctx->tables_num; j++) {
Antonio Nino Diaz0b64f4e2017-02-27 17:23:54 +00001082#if PLAT_XLAT_TABLES_DYNAMIC
1083 ctx->tables_mapped_regions[j] = 0;
1084#endif
Varun Wadekar6311f632017-06-07 09:57:42 -07001085 for (unsigned int i = 0; i < XLAT_TABLE_ENTRIES; i++)
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +00001086 ctx->tables[j][i] = INVALID_DESC;
1087 }
1088
Antonio Nino Diaz0b64f4e2017-02-27 17:23:54 +00001089 while (mm->size) {
1090 uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0, ctx->base_table,
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +00001091 ctx->base_table_entries, ctx->base_level);
1092
Antonio Nino Diaz0b64f4e2017-02-27 17:23:54 +00001093 if (end_va != mm->base_va + mm->size - 1) {
1094 ERROR("Not enough memory to map region:\n"
1095 " VA:%p PA:0x%llx size:0x%zx attr:0x%x\n",
1096 (void *)mm->base_va, mm->base_pa, mm->size, mm->attr);
1097 panic();
1098 }
1099
1100 mm++;
1101 }
1102
Antonio Nino Diaz7bb01fb2017-03-08 14:40:23 +00001103 ctx->initialized = 1;
1104}