blob: f175fa39557476e3ab3ed12ee0ae49251a9bbbd4 [file] [log] [blame]
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +01001/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "config.h"
21#include "cpu.h"
22#include "exec/exec-all.h"
23#include "exec/cputlb.h"
24
25#ifdef CONFIG_MEMCHECK
26#include "memcheck/memcheck_api.h"
27#endif
28
29/* statistics */
30int tlb_flush_count;
31
32static const CPUTLBEntry s_cputlb_empty_entry = {
33 .addr_read = -1,
34 .addr_write = -1,
35 .addr_code = -1,
36 .addend = -1,
37};
38
39/* NOTE: if flush_global is true, also flush global entries (not
40 implemented yet) */
41void tlb_flush(CPUArchState *env, int flush_global)
42{
43 int i;
44
45#if defined(DEBUG_TLB)
46 printf("tlb_flush:\n");
47#endif
48 /* must reset current TB so that interrupts cannot modify the
49 links while we are modifying them */
50 env->current_tb = NULL;
51
52 for (i = 0; i < CPU_TLB_SIZE; i++) {
53 int mmu_idx;
54
55 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
56 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
57 }
58 }
59
60 memset(env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +010061 tlb_flush_count++;
62}
63
64static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
65{
66 if (addr == (tlb_entry->addr_read &
67 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
68 addr == (tlb_entry->addr_write &
69 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
70 addr == (tlb_entry->addr_code &
71 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
72 *tlb_entry = s_cputlb_empty_entry;
73 }
74}
75
76void tlb_flush_page(CPUArchState *env, target_ulong addr)
77{
78 int i;
79 int mmu_idx;
80
81#if defined(DEBUG_TLB)
82 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
83#endif
84 /* must reset current TB so that interrupts cannot modify the
85 links while we are modifying them */
86 env->current_tb = NULL;
87
88 addr &= TARGET_PAGE_MASK;
89 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
90 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
91 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
92 }
93
94 tb_flush_jmp_cache(env, addr);
95}
96
97/* update the TLBs so that writes to code in the virtual page 'addr'
98 can be detected */
99void tlb_protect_code(ram_addr_t ram_addr)
100{
101 cpu_physical_memory_reset_dirty(ram_addr,
102 ram_addr + TARGET_PAGE_SIZE,
103 CODE_DIRTY_FLAG);
104}
105
106/* update the TLB so that writes in physical page 'phys_addr' are no longer
107 tested for self modifying code */
108void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
109 target_ulong vaddr)
110{
111 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
112}
113
114void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
115 uintptr_t start, uintptr_t length)
116{
117 uintptr_t addr;
118 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
119 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
120 if ((addr - start) < length) {
121 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
122 }
123 }
124}
125
126static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
127{
128 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
129 tlb_entry->addr_write = vaddr;
130}
131
132/* update the TLB corresponding to virtual page vaddr
133 so that it is no longer dirty */
134void tlb_set_dirty(CPUArchState *env, target_ulong vaddr)
135{
136 int i;
137 int mmu_idx;
138
139 vaddr &= TARGET_PAGE_MASK;
140 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
141 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
142 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
143}
144
145/* add a new TLB entry. At most one entry for a given virtual address
146 is permitted. Return 0 if OK or 2 if the page could not be mapped
147 (can only happen in non SOFTMMU mode for I/O pages or pages
148 conflicting with the host address space). */
David 'Digit' Turner4d6613c2014-01-22 18:19:00 +0100149int tlb_set_page_exec(CPUArchState *env, target_ulong vaddr,
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100150 hwaddr paddr, int prot,
151 int mmu_idx, int is_softmmu)
152{
153 PhysPageDesc *p;
154 unsigned long pd;
155 unsigned int index;
156 target_ulong address;
157 target_ulong code_address;
158 ptrdiff_t addend;
159 int ret;
160 CPUTLBEntry *te;
161 CPUWatchpoint *wp;
162 hwaddr iotlb;
163
164 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
165 if (!p) {
166 pd = IO_MEM_UNASSIGNED;
167 } else {
168 pd = p->phys_offset;
169 }
170#if defined(DEBUG_TLB)
171 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
172 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
173#endif
174
175 ret = 0;
176 address = vaddr;
177 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
178 /* IO memory case (romd handled later) */
179 address |= TLB_MMIO;
180 }
181 addend = (ptrdiff_t)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
182 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
183 /* Normal RAM. */
184 iotlb = pd & TARGET_PAGE_MASK;
185 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
186 iotlb |= IO_MEM_NOTDIRTY;
187 else
188 iotlb |= IO_MEM_ROM;
189 } else {
190 /* IO handlers are currently passed a physical address.
191 It would be nice to pass an offset from the base address
192 of that region. This would avoid having to special case RAM,
193 and avoid full address decoding in every device.
194 We can't use the high bits of pd for this because
195 IO_MEM_ROMD uses these as a ram address. */
196 iotlb = (pd & ~TARGET_PAGE_MASK);
197 if (p) {
198 iotlb += p->region_offset;
199 } else {
200 iotlb += paddr;
201 }
202 }
203
204 code_address = address;
205 /* Make accesses to pages with watchpoints go via the
206 watchpoint trap routines. */
207 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
208 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
209 iotlb = io_mem_watch + paddr;
210 /* TODO: The memory case can be optimized by not trapping
211 reads of pages with a write breakpoint. */
212 address |= TLB_MMIO;
213 }
214 }
215
216 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
217 env->iotlb[mmu_idx][index] = iotlb - vaddr;
218 te = &env->tlb_table[mmu_idx][index];
219 te->addend = addend - vaddr;
220 if (prot & PAGE_READ) {
221 te->addr_read = address;
222 } else {
223 te->addr_read = -1;
224 }
225
226 if (prot & PAGE_EXEC) {
227 te->addr_code = code_address;
228 } else {
229 te->addr_code = -1;
230 }
231 if (prot & PAGE_WRITE) {
232 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
233 (pd & IO_MEM_ROMD)) {
234 /* Write access calls the I/O callback. */
235 te->addr_write = address | TLB_MMIO;
236 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
237 !cpu_physical_memory_is_dirty(pd)) {
238 te->addr_write = address | TLB_NOTDIRTY;
239 } else {
240 te->addr_write = address;
241 }
242 } else {
243 te->addr_write = -1;
244 }
245
246#ifdef CONFIG_MEMCHECK
247 /*
248 * If we have memchecker running, we need to make sure that page, cached
249 * into TLB as the result of this operation will comply with our requirement
250 * to cause __ld/__stx_mmu being called for memory access on the pages
251 * containing memory blocks that require access violation checks.
252 *
253 * We need to check with memory checker if we should invalidate this page
254 * iff:
255 * - Memchecking is enabled.
256 * - Page that's been cached belongs to the user space.
257 * - Request to cache this page didn't come from softmmu. We're covered
258 * there, because after page was cached here we will invalidate it in
259 * the __ld/__stx_mmu wrapper.
260 * - Cached page belongs to RAM, not I/O area.
261 * - Page is cached for read, or write access.
262 */
263 if (memcheck_instrument_mmu && mmu_idx == 1 && !is_softmmu &&
264 (pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
265 (prot & (PAGE_READ | PAGE_WRITE)) &&
266 memcheck_is_checked(vaddr & TARGET_PAGE_MASK, TARGET_PAGE_SIZE)) {
267 if (prot & PAGE_READ) {
268 te->addr_read ^= TARGET_PAGE_MASK;
269 }
270 if (prot & PAGE_WRITE) {
271 te->addr_write ^= TARGET_PAGE_MASK;
272 }
273 }
274#endif // CONFIG_MEMCHECK
275
276 return ret;
277}
278
David 'Digit' Turner0e051542014-01-23 02:41:42 +0100279int tlb_set_page(CPUArchState *env1, target_ulong vaddr,
280 hwaddr paddr, int prot,
281 int mmu_idx, int is_softmmu)
282{
283 if (prot & PAGE_READ)
284 prot |= PAGE_EXEC;
285 return tlb_set_page_exec(env1, vaddr, paddr, prot, mmu_idx, is_softmmu);
286}
287
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100288#define MMUSUFFIX _cmmu
289#define GETPC() NULL
290#define env cpu_single_env
291#define SOFTMMU_CODE_ACCESS
292
293#define SHIFT 0
294#include "exec/softmmu_template.h"
295
296#define SHIFT 1
297#include "exec/softmmu_template.h"
298
299#define SHIFT 2
300#include "exec/softmmu_template.h"
301
302#define SHIFT 3
303#include "exec/softmmu_template.h"
304
305#undef env