blob: 220772579113e87edbfcc6177c51d9bf67049dd1 [file] [log] [blame]
Christophe Leroya372acf2016-02-09 17:07:50 +01001/*
2 * This file contains the routines for initializing the MMU
3 * on the 8xx series of chips.
4 * -- christophe
5 *
6 * Derived from arch/powerpc/mm/40x_mmu.c:
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 */
14
15#include <linux/memblock.h>
Christophe Leroy4badd432016-05-17 09:02:45 +020016#include <asm/fixmap.h>
17#include <asm/code-patching.h>
Christophe Leroya372acf2016-02-09 17:07:50 +010018
19#include "mmu_decl.h"
20
Christophe Leroy4badd432016-05-17 09:02:45 +020021#define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT)
22
Christophe Leroya372acf2016-02-09 17:07:50 +010023extern int __map_without_ltlbs;
Christophe Leroy4badd432016-05-17 09:02:45 +020024
25/*
26 * Return PA for this VA if it is in IMMR area, or 0
27 */
28phys_addr_t v_block_mapped(unsigned long va)
29{
30 unsigned long p = PHYS_IMMR_BASE;
31
32 if (__map_without_ltlbs)
33 return 0;
34 if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE)
35 return p + va - VIRT_IMMR_BASE;
36 return 0;
37}
38
39/*
40 * Return VA for a given PA or 0 if not mapped
41 */
42unsigned long p_block_mapped(phys_addr_t pa)
43{
44 unsigned long p = PHYS_IMMR_BASE;
45
46 if (__map_without_ltlbs)
47 return 0;
48 if (pa >= p && pa < p + IMMR_SIZE)
49 return VIRT_IMMR_BASE + pa - p;
50 return 0;
51}
52
Christophe Leroya372acf2016-02-09 17:07:50 +010053/*
54 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
55 */
56void __init MMU_init_hw(void)
57{
58 /* Nothing to do for the time being but keep it similar to other PPC */
59}
60
61#define LARGE_PAGE_SIZE_4M (1<<22)
62#define LARGE_PAGE_SIZE_8M (1<<23)
63#define LARGE_PAGE_SIZE_64M (1<<26)
64
Christophe Leroy4badd432016-05-17 09:02:45 +020065static void mmu_mapin_immr(void)
66{
67 unsigned long p = PHYS_IMMR_BASE;
68 unsigned long v = VIRT_IMMR_BASE;
69 unsigned long f = pgprot_val(PAGE_KERNEL_NCG);
70 int offset;
71
72 for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE)
73 map_page(v + offset, p + offset, f);
74}
75
76/* Address of instructions to patch */
77#ifndef CONFIG_PIN_TLB
78extern unsigned int DTLBMiss_jmp;
79#endif
80
Christophe Leroya372acf2016-02-09 17:07:50 +010081unsigned long __init mmu_mapin_ram(unsigned long top)
82{
83 unsigned long v, s, mapped;
84 phys_addr_t p;
85
86 v = KERNELBASE;
87 p = 0;
88 s = top;
89
Christophe Leroy4badd432016-05-17 09:02:45 +020090 if (__map_without_ltlbs) {
91 mmu_mapin_immr();
92#ifndef CONFIG_PIN_TLB
93 patch_instruction(&DTLBMiss_jmp, PPC_INST_NOP);
94#endif
Christophe Leroya372acf2016-02-09 17:07:50 +010095 return 0;
Christophe Leroy4badd432016-05-17 09:02:45 +020096 }
Christophe Leroya372acf2016-02-09 17:07:50 +010097
98#ifdef CONFIG_PPC_4K_PAGES
99 while (s >= LARGE_PAGE_SIZE_8M) {
100 pmd_t *pmdp;
101 unsigned long val = p | MD_PS8MEG;
102
103 pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v);
104 *pmdp++ = __pmd(val);
105 *pmdp++ = __pmd(val + LARGE_PAGE_SIZE_4M);
106
107 v += LARGE_PAGE_SIZE_8M;
108 p += LARGE_PAGE_SIZE_8M;
109 s -= LARGE_PAGE_SIZE_8M;
110 }
111#else /* CONFIG_PPC_16K_PAGES */
112 while (s >= LARGE_PAGE_SIZE_64M) {
113 pmd_t *pmdp;
114 unsigned long val = p | MD_PS8MEG;
115
116 pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v);
117 *pmdp++ = __pmd(val);
118
119 v += LARGE_PAGE_SIZE_64M;
120 p += LARGE_PAGE_SIZE_64M;
121 s -= LARGE_PAGE_SIZE_64M;
122 }
123#endif
124
125 mapped = top - s;
126
127 /* If the size of RAM is not an exact power of two, we may not
128 * have covered RAM in its entirety with 8 MiB
129 * pages. Consequently, restrict the top end of RAM currently
130 * allocable so that calls to the MEMBLOCK to allocate PTEs for "tail"
131 * coverage with normal-sized pages (or other reasons) do not
132 * attempt to allocate outside the allowed range.
133 */
134 memblock_set_current_limit(mapped);
135
136 return mapped;
137}
Christophe Leroy516d9182016-02-09 17:07:54 +0100138
139void setup_initial_memory_limit(phys_addr_t first_memblock_base,
140 phys_addr_t first_memblock_size)
141{
142 /* We don't currently support the first MEMBLOCK not mapping 0
143 * physical on those processors
144 */
145 BUG_ON(first_memblock_base != 0);
146
147#ifdef CONFIG_PIN_TLB
148 /* 8xx can only access 24MB at the moment */
149 memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01800000));
150#else
151 /* 8xx can only access 8MB at the moment */
152 memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000));
153#endif
154}
Christophe Leroya7761fe2016-02-09 17:08:18 +0100155
156/*
157 * Set up to use a given MMU context.
158 * id is context number, pgd is PGD pointer.
159 *
160 * We place the physical address of the new task page directory loaded
161 * into the MMU base register, and set the ASID compare register with
162 * the new "context."
163 */
164void set_context(unsigned long id, pgd_t *pgd)
165{
166 s16 offset = (s16)(__pa(swapper_pg_dir));
167
168#ifdef CONFIG_BDI_SWITCH
169 pgd_t **ptr = *(pgd_t ***)(KERNELBASE + 0xf0);
170
171 /* Context switch the PTE pointer for the Abatron BDI2000.
172 * The PGDIR is passed as second argument.
173 */
174 *(ptr + 1) = pgd;
175#endif
176
177 /* Register M_TW will contain base address of level 1 table minus the
178 * lower part of the kernel PGDIR base address, so that all accesses to
179 * level 1 table are done relative to lower part of kernel PGDIR base
180 * address.
181 */
182 mtspr(SPRN_M_TW, __pa(pgd) - offset);
183
184 /* Update context */
185 mtspr(SPRN_M_CASID, id);
186 /* sync */
187 mb();
188}
Christophe Leroy766d45c2016-02-09 17:08:21 +0100189
190void flush_instruction_cache(void)
191{
192 isync();
193 mtspr(SPRN_IC_CST, IDC_INVALL);
194 isync();
195}