blob: 64b48c91de907b4686184b2acd3c6fae59b13575 [file] [log] [blame]
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +01001/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "config.h"
21#include "cpu.h"
22#include "exec/exec-all.h"
23#include "exec/cputlb.h"
24
25#ifdef CONFIG_MEMCHECK
26#include "memcheck/memcheck_api.h"
27#endif
28
29/* statistics */
30int tlb_flush_count;
31
32static const CPUTLBEntry s_cputlb_empty_entry = {
33 .addr_read = -1,
34 .addr_write = -1,
35 .addr_code = -1,
36 .addend = -1,
37};
38
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +010039/* NOTE:
40 * If flush_global is true (the usual case), flush all tlb entries.
41 * If flush_global is false, flush (at least) all tlb entries not
42 * marked global.
43 *
44 * Since QEMU doesn't currently implement a global/not-global flag
45 * for tlb entries, at the moment tlb_flush() will also flush all
46 * tlb entries in the flush_global == false case. This is OK because
47 * CPU architectures generally permit an implementation to drop
48 * entries from the TLB at any time, so flushing more entries than
49 * required is only an efficiency issue, not a correctness issue.
50 */
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +010051void tlb_flush(CPUArchState *env, int flush_global)
52{
53 int i;
54
55#if defined(DEBUG_TLB)
56 printf("tlb_flush:\n");
57#endif
58 /* must reset current TB so that interrupts cannot modify the
59 links while we are modifying them */
60 env->current_tb = NULL;
61
62 for (i = 0; i < CPU_TLB_SIZE; i++) {
63 int mmu_idx;
64
65 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
66 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
67 }
68 }
69
70 memset(env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +010071 tlb_flush_count++;
72}
73
74static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
75{
76 if (addr == (tlb_entry->addr_read &
77 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
78 addr == (tlb_entry->addr_write &
79 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
80 addr == (tlb_entry->addr_code &
81 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
82 *tlb_entry = s_cputlb_empty_entry;
83 }
84}
85
86void tlb_flush_page(CPUArchState *env, target_ulong addr)
87{
88 int i;
89 int mmu_idx;
90
91#if defined(DEBUG_TLB)
92 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
93#endif
94 /* must reset current TB so that interrupts cannot modify the
95 links while we are modifying them */
96 env->current_tb = NULL;
97
98 addr &= TARGET_PAGE_MASK;
99 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
100 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
101 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
102 }
103
104 tb_flush_jmp_cache(env, addr);
105}
106
107/* update the TLBs so that writes to code in the virtual page 'addr'
108 can be detected */
109void tlb_protect_code(ram_addr_t ram_addr)
110{
111 cpu_physical_memory_reset_dirty(ram_addr,
112 ram_addr + TARGET_PAGE_SIZE,
113 CODE_DIRTY_FLAG);
114}
115
116/* update the TLB so that writes in physical page 'phys_addr' are no longer
117 tested for self modifying code */
118void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
119 target_ulong vaddr)
120{
121 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
122}
123
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100124static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
125{
126 return (tlbe->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM;
127}
128
129void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
130 uintptr_t length)
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100131{
132 uintptr_t addr;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100133
134 if (tlb_is_dirty_ram(tlb_entry)) {
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100135 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
136 if ((addr - start) < length) {
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100137 tlb_entry->addr_write &= TARGET_PAGE_MASK;
138 tlb_entry->addr_write |= TLB_NOTDIRTY;
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100139 }
140 }
141}
142
143static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
144{
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100145 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100146 tlb_entry->addr_write = vaddr;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100147 }
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100148}
149
150/* update the TLB corresponding to virtual page vaddr
151 so that it is no longer dirty */
152void tlb_set_dirty(CPUArchState *env, target_ulong vaddr)
153{
154 int i;
155 int mmu_idx;
156
157 vaddr &= TARGET_PAGE_MASK;
158 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100159 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100160 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100161 }
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100162}
163
164/* add a new TLB entry. At most one entry for a given virtual address
165 is permitted. Return 0 if OK or 2 if the page could not be mapped
166 (can only happen in non SOFTMMU mode for I/O pages or pages
167 conflicting with the host address space). */
David 'Digit' Turner4d6613c2014-01-22 18:19:00 +0100168int tlb_set_page_exec(CPUArchState *env, target_ulong vaddr,
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100169 hwaddr paddr, int prot,
170 int mmu_idx, int is_softmmu)
171{
172 PhysPageDesc *p;
173 unsigned long pd;
174 unsigned int index;
175 target_ulong address;
176 target_ulong code_address;
177 ptrdiff_t addend;
178 int ret;
179 CPUTLBEntry *te;
180 CPUWatchpoint *wp;
181 hwaddr iotlb;
182
183 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
184 if (!p) {
185 pd = IO_MEM_UNASSIGNED;
186 } else {
187 pd = p->phys_offset;
188 }
189#if defined(DEBUG_TLB)
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100190 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
191 " prot=%x idx=%d smmu=%d pd=0x%08lx\n",
192 vaddr, paddr, prot, mmu_idx, is_softmmu, pd);
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100193#endif
194
195 ret = 0;
196 address = vaddr;
197 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
198 /* IO memory case (romd handled later) */
199 address |= TLB_MMIO;
200 }
201 addend = (ptrdiff_t)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
202 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
203 /* Normal RAM. */
204 iotlb = pd & TARGET_PAGE_MASK;
205 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
206 iotlb |= IO_MEM_NOTDIRTY;
207 else
208 iotlb |= IO_MEM_ROM;
209 } else {
210 /* IO handlers are currently passed a physical address.
211 It would be nice to pass an offset from the base address
212 of that region. This would avoid having to special case RAM,
213 and avoid full address decoding in every device.
214 We can't use the high bits of pd for this because
215 IO_MEM_ROMD uses these as a ram address. */
216 iotlb = (pd & ~TARGET_PAGE_MASK);
217 if (p) {
218 iotlb += p->region_offset;
219 } else {
220 iotlb += paddr;
221 }
222 }
223
224 code_address = address;
225 /* Make accesses to pages with watchpoints go via the
226 watchpoint trap routines. */
227 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
228 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
229 iotlb = io_mem_watch + paddr;
230 /* TODO: The memory case can be optimized by not trapping
231 reads of pages with a write breakpoint. */
232 address |= TLB_MMIO;
233 }
234 }
235
236 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
237 env->iotlb[mmu_idx][index] = iotlb - vaddr;
238 te = &env->tlb_table[mmu_idx][index];
239 te->addend = addend - vaddr;
240 if (prot & PAGE_READ) {
241 te->addr_read = address;
242 } else {
243 te->addr_read = -1;
244 }
245
246 if (prot & PAGE_EXEC) {
247 te->addr_code = code_address;
248 } else {
249 te->addr_code = -1;
250 }
251 if (prot & PAGE_WRITE) {
252 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
253 (pd & IO_MEM_ROMD)) {
254 /* Write access calls the I/O callback. */
255 te->addr_write = address | TLB_MMIO;
256 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
257 !cpu_physical_memory_is_dirty(pd)) {
258 te->addr_write = address | TLB_NOTDIRTY;
259 } else {
260 te->addr_write = address;
261 }
262 } else {
263 te->addr_write = -1;
264 }
265
266#ifdef CONFIG_MEMCHECK
267 /*
268 * If we have memchecker running, we need to make sure that page, cached
269 * into TLB as the result of this operation will comply with our requirement
270 * to cause __ld/__stx_mmu being called for memory access on the pages
271 * containing memory blocks that require access violation checks.
272 *
273 * We need to check with memory checker if we should invalidate this page
274 * iff:
275 * - Memchecking is enabled.
276 * - Page that's been cached belongs to the user space.
277 * - Request to cache this page didn't come from softmmu. We're covered
278 * there, because after page was cached here we will invalidate it in
279 * the __ld/__stx_mmu wrapper.
280 * - Cached page belongs to RAM, not I/O area.
281 * - Page is cached for read, or write access.
282 */
283 if (memcheck_instrument_mmu && mmu_idx == 1 && !is_softmmu &&
284 (pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
285 (prot & (PAGE_READ | PAGE_WRITE)) &&
286 memcheck_is_checked(vaddr & TARGET_PAGE_MASK, TARGET_PAGE_SIZE)) {
287 if (prot & PAGE_READ) {
288 te->addr_read ^= TARGET_PAGE_MASK;
289 }
290 if (prot & PAGE_WRITE) {
291 te->addr_write ^= TARGET_PAGE_MASK;
292 }
293 }
294#endif // CONFIG_MEMCHECK
295
296 return ret;
297}
298
David 'Digit' Turner0e051542014-01-23 02:41:42 +0100299int tlb_set_page(CPUArchState *env1, target_ulong vaddr,
300 hwaddr paddr, int prot,
301 int mmu_idx, int is_softmmu)
302{
303 if (prot & PAGE_READ)
304 prot |= PAGE_EXEC;
305 return tlb_set_page_exec(env1, vaddr, paddr, prot, mmu_idx, is_softmmu);
306}
307
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100308#define MMUSUFFIX _cmmu
309#define GETPC() NULL
310#define env cpu_single_env
311#define SOFTMMU_CODE_ACCESS
312
313#define SHIFT 0
314#include "exec/softmmu_template.h"
315
316#define SHIFT 1
317#include "exec/softmmu_template.h"
318
319#define SHIFT 2
320#include "exec/softmmu_template.h"
321
322#define SHIFT 3
323#include "exec/softmmu_template.h"
324
325#undef env