blob: 8b9df38bc997f25870686d27555c28cb4e86e1f8 [file] [log] [blame]
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +01001/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "config.h"
21#include "cpu.h"
22#include "exec/exec-all.h"
23#include "exec/cputlb.h"
24
25#ifdef CONFIG_MEMCHECK
26#include "memcheck/memcheck_api.h"
27#endif
28
29/* statistics */
30int tlb_flush_count;
31
32static const CPUTLBEntry s_cputlb_empty_entry = {
33 .addr_read = -1,
34 .addr_write = -1,
35 .addr_code = -1,
36 .addend = -1,
37};
38
39/* NOTE: if flush_global is true, also flush global entries (not
40 implemented yet) */
41void tlb_flush(CPUArchState *env, int flush_global)
42{
43 int i;
44
45#if defined(DEBUG_TLB)
46 printf("tlb_flush:\n");
47#endif
48 /* must reset current TB so that interrupts cannot modify the
49 links while we are modifying them */
50 env->current_tb = NULL;
51
52 for (i = 0; i < CPU_TLB_SIZE; i++) {
53 int mmu_idx;
54
55 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
56 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
57 }
58 }
59
60 memset(env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
61
62#ifdef CONFIG_KQEMU
63 if (env->kqemu_enabled) {
64 kqemu_flush(env, flush_global);
65 }
66#endif
67 tlb_flush_count++;
68}
69
70static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
71{
72 if (addr == (tlb_entry->addr_read &
73 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
74 addr == (tlb_entry->addr_write &
75 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
76 addr == (tlb_entry->addr_code &
77 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
78 *tlb_entry = s_cputlb_empty_entry;
79 }
80}
81
82void tlb_flush_page(CPUArchState *env, target_ulong addr)
83{
84 int i;
85 int mmu_idx;
86
87#if defined(DEBUG_TLB)
88 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
89#endif
90 /* must reset current TB so that interrupts cannot modify the
91 links while we are modifying them */
92 env->current_tb = NULL;
93
94 addr &= TARGET_PAGE_MASK;
95 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
96 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
97 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
98 }
99
100 tb_flush_jmp_cache(env, addr);
101}
102
103/* update the TLBs so that writes to code in the virtual page 'addr'
104 can be detected */
105void tlb_protect_code(ram_addr_t ram_addr)
106{
107 cpu_physical_memory_reset_dirty(ram_addr,
108 ram_addr + TARGET_PAGE_SIZE,
109 CODE_DIRTY_FLAG);
110}
111
112/* update the TLB so that writes in physical page 'phys_addr' are no longer
113 tested for self modifying code */
114void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
115 target_ulong vaddr)
116{
117 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
118}
119
120void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
121 uintptr_t start, uintptr_t length)
122{
123 uintptr_t addr;
124 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
125 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
126 if ((addr - start) < length) {
127 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
128 }
129 }
130}
131
132static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
133{
134 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
135 tlb_entry->addr_write = vaddr;
136}
137
138/* update the TLB corresponding to virtual page vaddr
139 so that it is no longer dirty */
140void tlb_set_dirty(CPUArchState *env, target_ulong vaddr)
141{
142 int i;
143 int mmu_idx;
144
145 vaddr &= TARGET_PAGE_MASK;
146 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
147 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
148 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
149}
150
151/* add a new TLB entry. At most one entry for a given virtual address
152 is permitted. Return 0 if OK or 2 if the page could not be mapped
153 (can only happen in non SOFTMMU mode for I/O pages or pages
154 conflicting with the host address space). */
David 'Digit' Turner4d6613c2014-01-22 18:19:00 +0100155int tlb_set_page_exec(CPUArchState *env, target_ulong vaddr,
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100156 hwaddr paddr, int prot,
157 int mmu_idx, int is_softmmu)
158{
159 PhysPageDesc *p;
160 unsigned long pd;
161 unsigned int index;
162 target_ulong address;
163 target_ulong code_address;
164 ptrdiff_t addend;
165 int ret;
166 CPUTLBEntry *te;
167 CPUWatchpoint *wp;
168 hwaddr iotlb;
169
170 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
171 if (!p) {
172 pd = IO_MEM_UNASSIGNED;
173 } else {
174 pd = p->phys_offset;
175 }
176#if defined(DEBUG_TLB)
177 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
178 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
179#endif
180
181 ret = 0;
182 address = vaddr;
183 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
184 /* IO memory case (romd handled later) */
185 address |= TLB_MMIO;
186 }
187 addend = (ptrdiff_t)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
188 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
189 /* Normal RAM. */
190 iotlb = pd & TARGET_PAGE_MASK;
191 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
192 iotlb |= IO_MEM_NOTDIRTY;
193 else
194 iotlb |= IO_MEM_ROM;
195 } else {
196 /* IO handlers are currently passed a physical address.
197 It would be nice to pass an offset from the base address
198 of that region. This would avoid having to special case RAM,
199 and avoid full address decoding in every device.
200 We can't use the high bits of pd for this because
201 IO_MEM_ROMD uses these as a ram address. */
202 iotlb = (pd & ~TARGET_PAGE_MASK);
203 if (p) {
204 iotlb += p->region_offset;
205 } else {
206 iotlb += paddr;
207 }
208 }
209
210 code_address = address;
211 /* Make accesses to pages with watchpoints go via the
212 watchpoint trap routines. */
213 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
214 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
215 iotlb = io_mem_watch + paddr;
216 /* TODO: The memory case can be optimized by not trapping
217 reads of pages with a write breakpoint. */
218 address |= TLB_MMIO;
219 }
220 }
221
222 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
223 env->iotlb[mmu_idx][index] = iotlb - vaddr;
224 te = &env->tlb_table[mmu_idx][index];
225 te->addend = addend - vaddr;
226 if (prot & PAGE_READ) {
227 te->addr_read = address;
228 } else {
229 te->addr_read = -1;
230 }
231
232 if (prot & PAGE_EXEC) {
233 te->addr_code = code_address;
234 } else {
235 te->addr_code = -1;
236 }
237 if (prot & PAGE_WRITE) {
238 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
239 (pd & IO_MEM_ROMD)) {
240 /* Write access calls the I/O callback. */
241 te->addr_write = address | TLB_MMIO;
242 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
243 !cpu_physical_memory_is_dirty(pd)) {
244 te->addr_write = address | TLB_NOTDIRTY;
245 } else {
246 te->addr_write = address;
247 }
248 } else {
249 te->addr_write = -1;
250 }
251
252#ifdef CONFIG_MEMCHECK
253 /*
254 * If we have memchecker running, we need to make sure that page, cached
255 * into TLB as the result of this operation will comply with our requirement
256 * to cause __ld/__stx_mmu being called for memory access on the pages
257 * containing memory blocks that require access violation checks.
258 *
259 * We need to check with memory checker if we should invalidate this page
260 * iff:
261 * - Memchecking is enabled.
262 * - Page that's been cached belongs to the user space.
263 * - Request to cache this page didn't come from softmmu. We're covered
264 * there, because after page was cached here we will invalidate it in
265 * the __ld/__stx_mmu wrapper.
266 * - Cached page belongs to RAM, not I/O area.
267 * - Page is cached for read, or write access.
268 */
269 if (memcheck_instrument_mmu && mmu_idx == 1 && !is_softmmu &&
270 (pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
271 (prot & (PAGE_READ | PAGE_WRITE)) &&
272 memcheck_is_checked(vaddr & TARGET_PAGE_MASK, TARGET_PAGE_SIZE)) {
273 if (prot & PAGE_READ) {
274 te->addr_read ^= TARGET_PAGE_MASK;
275 }
276 if (prot & PAGE_WRITE) {
277 te->addr_write ^= TARGET_PAGE_MASK;
278 }
279 }
280#endif // CONFIG_MEMCHECK
281
282 return ret;
283}
284
David 'Digit' Turner0e051542014-01-23 02:41:42 +0100285int tlb_set_page(CPUArchState *env1, target_ulong vaddr,
286 hwaddr paddr, int prot,
287 int mmu_idx, int is_softmmu)
288{
289 if (prot & PAGE_READ)
290 prot |= PAGE_EXEC;
291 return tlb_set_page_exec(env1, vaddr, paddr, prot, mmu_idx, is_softmmu);
292}
293
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100294#define MMUSUFFIX _cmmu
295#define GETPC() NULL
296#define env cpu_single_env
297#define SOFTMMU_CODE_ACCESS
298
299#define SHIFT 0
300#include "exec/softmmu_template.h"
301
302#define SHIFT 1
303#include "exec/softmmu_template.h"
304
305#define SHIFT 2
306#include "exec/softmmu_template.h"
307
308#define SHIFT 3
309#include "exec/softmmu_template.h"
310
311#undef env