blob: d68c25daba1c0ac7afade7d295f46e7bb8dec92d [file] [log] [blame]
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +01001/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "config.h"
21#include "cpu.h"
22#include "exec/exec-all.h"
23#include "exec/cputlb.h"
24
David 'Digit' Turner5bb450e2014-03-14 17:19:45 +010025#ifdef CONFIG_ANDROID_MEMCHECK
David 'Digit' Turner96e493a2014-03-14 17:17:26 +010026#include "android/qemu/memcheck/memcheck_api.h"
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +010027#endif
28
29/* statistics */
30int tlb_flush_count;
31
32static const CPUTLBEntry s_cputlb_empty_entry = {
33 .addr_read = -1,
34 .addr_write = -1,
35 .addr_code = -1,
36 .addend = -1,
37};
38
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +010039/* NOTE:
40 * If flush_global is true (the usual case), flush all tlb entries.
41 * If flush_global is false, flush (at least) all tlb entries not
42 * marked global.
43 *
44 * Since QEMU doesn't currently implement a global/not-global flag
45 * for tlb entries, at the moment tlb_flush() will also flush all
46 * tlb entries in the flush_global == false case. This is OK because
47 * CPU architectures generally permit an implementation to drop
48 * entries from the TLB at any time, so flushing more entries than
49 * required is only an efficiency issue, not a correctness issue.
50 */
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +010051void tlb_flush(CPUArchState *env, int flush_global)
52{
53 int i;
54
55#if defined(DEBUG_TLB)
56 printf("tlb_flush:\n");
57#endif
58 /* must reset current TB so that interrupts cannot modify the
59 links while we are modifying them */
60 env->current_tb = NULL;
61
62 for (i = 0; i < CPU_TLB_SIZE; i++) {
63 int mmu_idx;
64
65 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
66 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
67 }
68 }
69
70 memset(env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
David 'Digit' Turner0d8b2352014-03-20 17:13:13 +010071
72 env->tlb_flush_addr = -1;
73 env->tlb_flush_mask = 0;
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +010074 tlb_flush_count++;
75}
76
77static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
78{
79 if (addr == (tlb_entry->addr_read &
80 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
81 addr == (tlb_entry->addr_write &
82 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
83 addr == (tlb_entry->addr_code &
84 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
85 *tlb_entry = s_cputlb_empty_entry;
86 }
87}
88
89void tlb_flush_page(CPUArchState *env, target_ulong addr)
90{
91 int i;
92 int mmu_idx;
93
94#if defined(DEBUG_TLB)
95 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
96#endif
David 'Digit' Turner0d8b2352014-03-20 17:13:13 +010097 /* Check if we need to flush due to large pages. */
98 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
99#if defined(DEBUG_TLB)
100 printf("tlb_flush_page: forced full flush ("
101 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
102 env->tlb_flush_addr, env->tlb_flush_mask);
103#endif
104 tlb_flush(env, 1);
105 return;
106 }
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100107 /* must reset current TB so that interrupts cannot modify the
108 links while we are modifying them */
109 env->current_tb = NULL;
110
111 addr &= TARGET_PAGE_MASK;
112 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
113 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
114 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
115 }
116
117 tb_flush_jmp_cache(env, addr);
118}
119
120/* update the TLBs so that writes to code in the virtual page 'addr'
121 can be detected */
122void tlb_protect_code(ram_addr_t ram_addr)
123{
124 cpu_physical_memory_reset_dirty(ram_addr,
125 ram_addr + TARGET_PAGE_SIZE,
126 CODE_DIRTY_FLAG);
127}
128
129/* update the TLB so that writes in physical page 'phys_addr' are no longer
130 tested for self modifying code */
131void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
132 target_ulong vaddr)
133{
134 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
135}
136
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100137static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
138{
139 return (tlbe->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM;
140}
141
142void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
143 uintptr_t length)
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100144{
145 uintptr_t addr;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100146
147 if (tlb_is_dirty_ram(tlb_entry)) {
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100148 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
149 if ((addr - start) < length) {
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100150 tlb_entry->addr_write &= TARGET_PAGE_MASK;
151 tlb_entry->addr_write |= TLB_NOTDIRTY;
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100152 }
153 }
154}
155
156static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
157{
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100158 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100159 tlb_entry->addr_write = vaddr;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100160 }
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100161}
162
163/* update the TLB corresponding to virtual page vaddr
164 so that it is no longer dirty */
165void tlb_set_dirty(CPUArchState *env, target_ulong vaddr)
166{
167 int i;
168 int mmu_idx;
169
170 vaddr &= TARGET_PAGE_MASK;
171 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100172 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100173 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100174 }
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100175}
176
David 'Digit' Turner0d8b2352014-03-20 17:13:13 +0100177/* Our TLB does not support large pages, so remember the area covered by
178 large pages and trigger a full TLB flush if these are invalidated. */
179static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
180 target_ulong size)
181{
182 target_ulong mask = ~(size - 1);
183
184 if (env->tlb_flush_addr == (target_ulong)-1) {
185 env->tlb_flush_addr = vaddr & mask;
186 env->tlb_flush_mask = mask;
187 return;
188 }
189 /* Extend the existing region to include the new page.
190 This is a compromise between unnecessary flushes and the cost
191 of maintaining a full variable size TLB. */
192 mask &= env->tlb_flush_mask;
193 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
194 mask <<= 1;
195 }
196 env->tlb_flush_addr &= mask;
197 env->tlb_flush_mask = mask;
198}
199
200/* Add a new TLB entry. At most one entry for a given virtual address
201 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
202 supplied size is only used by tlb_flush_page. */
203void tlb_set_page(CPUArchState *env, target_ulong vaddr,
204 hwaddr paddr, int prot,
205 int mmu_idx, target_ulong size)
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100206{
207 PhysPageDesc *p;
208 unsigned long pd;
209 unsigned int index;
210 target_ulong address;
211 target_ulong code_address;
212 ptrdiff_t addend;
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100213 CPUTLBEntry *te;
214 CPUWatchpoint *wp;
215 hwaddr iotlb;
216
David 'Digit' Turner0d8b2352014-03-20 17:13:13 +0100217 assert(size >= TARGET_PAGE_SIZE);
218 if (size != TARGET_PAGE_SIZE) {
219 tlb_add_large_page(env, vaddr, size);
220 }
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100221 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
222 if (!p) {
223 pd = IO_MEM_UNASSIGNED;
224 } else {
225 pd = p->phys_offset;
226 }
227#if defined(DEBUG_TLB)
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100228 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
David 'Digit' Turner0d8b2352014-03-20 17:13:13 +0100229 " prot=%x idx=%d pd=0x%08lx\n",
230 vaddr, paddr, prot, mmu_idx, pd);
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100231#endif
232
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100233 address = vaddr;
234 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
235 /* IO memory case (romd handled later) */
236 address |= TLB_MMIO;
237 }
238 addend = (ptrdiff_t)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
239 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
240 /* Normal RAM. */
241 iotlb = pd & TARGET_PAGE_MASK;
242 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
243 iotlb |= IO_MEM_NOTDIRTY;
244 else
245 iotlb |= IO_MEM_ROM;
246 } else {
247 /* IO handlers are currently passed a physical address.
248 It would be nice to pass an offset from the base address
249 of that region. This would avoid having to special case RAM,
250 and avoid full address decoding in every device.
251 We can't use the high bits of pd for this because
252 IO_MEM_ROMD uses these as a ram address. */
253 iotlb = (pd & ~TARGET_PAGE_MASK);
254 if (p) {
255 iotlb += p->region_offset;
256 } else {
257 iotlb += paddr;
258 }
259 }
260
261 code_address = address;
262 /* Make accesses to pages with watchpoints go via the
263 watchpoint trap routines. */
264 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
265 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
266 iotlb = io_mem_watch + paddr;
267 /* TODO: The memory case can be optimized by not trapping
268 reads of pages with a write breakpoint. */
269 address |= TLB_MMIO;
270 }
271 }
272
273 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
274 env->iotlb[mmu_idx][index] = iotlb - vaddr;
275 te = &env->tlb_table[mmu_idx][index];
276 te->addend = addend - vaddr;
277 if (prot & PAGE_READ) {
278 te->addr_read = address;
279 } else {
280 te->addr_read = -1;
281 }
282
283 if (prot & PAGE_EXEC) {
284 te->addr_code = code_address;
285 } else {
286 te->addr_code = -1;
287 }
288 if (prot & PAGE_WRITE) {
289 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
290 (pd & IO_MEM_ROMD)) {
291 /* Write access calls the I/O callback. */
292 te->addr_write = address | TLB_MMIO;
293 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
294 !cpu_physical_memory_is_dirty(pd)) {
295 te->addr_write = address | TLB_NOTDIRTY;
296 } else {
297 te->addr_write = address;
298 }
299 } else {
300 te->addr_write = -1;
301 }
302
David 'Digit' Turner5bb450e2014-03-14 17:19:45 +0100303#ifdef CONFIG_ANDROID_MEMCHECK
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100304 /*
305 * If we have memchecker running, we need to make sure that page, cached
306 * into TLB as the result of this operation will comply with our requirement
307 * to cause __ld/__stx_mmu being called for memory access on the pages
308 * containing memory blocks that require access violation checks.
309 *
310 * We need to check with memory checker if we should invalidate this page
311 * iff:
312 * - Memchecking is enabled.
313 * - Page that's been cached belongs to the user space.
314 * - Request to cache this page didn't come from softmmu. We're covered
315 * there, because after page was cached here we will invalidate it in
316 * the __ld/__stx_mmu wrapper.
317 * - Cached page belongs to RAM, not I/O area.
318 * - Page is cached for read, or write access.
319 */
David 'Digit' Turner0d8b2352014-03-20 17:13:13 +0100320#if 0
321 if (memcheck_instrument_mmu && mmu_idx == 1 &&
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100322 (pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
323 (prot & (PAGE_READ | PAGE_WRITE)) &&
324 memcheck_is_checked(vaddr & TARGET_PAGE_MASK, TARGET_PAGE_SIZE)) {
325 if (prot & PAGE_READ) {
326 te->addr_read ^= TARGET_PAGE_MASK;
327 }
328 if (prot & PAGE_WRITE) {
329 te->addr_write ^= TARGET_PAGE_MASK;
330 }
331 }
David 'Digit' Turner0d8b2352014-03-20 17:13:13 +0100332#endif
David 'Digit' Turner5bb450e2014-03-14 17:19:45 +0100333#endif // CONFIG_ANDROID_MEMCHECK
David 'Digit' Turner0e051542014-01-23 02:41:42 +0100334}
335
David 'Digit' Turner01ee5b82014-03-14 13:47:37 +0100336/* NOTE: this function can trigger an exception */
337/* NOTE2: the returned address is not exactly the physical address: it
338 is the offset relative to phys_ram_base */
339tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
340{
341 int mmu_idx, page_index, pd;
342 void *p;
343
344 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
345 mmu_idx = cpu_mmu_index(env1);
346 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
347 (addr & TARGET_PAGE_MASK))) {
David 'Digit' Turnereca7bc22014-03-14 23:58:39 +0100348 cpu_ldub_code(env1, addr);
David 'Digit' Turner01ee5b82014-03-14 13:47:37 +0100349 }
350 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
351 if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
352#if defined(TARGET_SPARC) || defined(TARGET_MIPS)
353 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
354#else
355 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
356#endif
357 }
358 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
359 return qemu_ram_addr_from_host_nofail(p);
360}
361
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100362#define MMUSUFFIX _cmmu
David 'Digit' Turner3d82f5a2014-03-14 13:28:38 +0100363#undef GETPC
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100364#define GETPC() NULL
365#define env cpu_single_env
366#define SOFTMMU_CODE_ACCESS
367
368#define SHIFT 0
369#include "exec/softmmu_template.h"
370
371#define SHIFT 1
372#include "exec/softmmu_template.h"
373
374#define SHIFT 2
375#include "exec/softmmu_template.h"
376
377#define SHIFT 3
378#include "exec/softmmu_template.h"
379
380#undef env