blob: 84e382dc1ce84b183aba3df9a5ce5c0e13168564 [file] [log] [blame]
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +01001/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "config.h"
21#include "cpu.h"
22#include "exec/exec-all.h"
23#include "exec/cputlb.h"
24
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +010025/* statistics */
26int tlb_flush_count;
27
28static const CPUTLBEntry s_cputlb_empty_entry = {
29 .addr_read = -1,
30 .addr_write = -1,
31 .addr_code = -1,
32 .addend = -1,
33};
34
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +010035/* NOTE:
36 * If flush_global is true (the usual case), flush all tlb entries.
37 * If flush_global is false, flush (at least) all tlb entries not
38 * marked global.
39 *
40 * Since QEMU doesn't currently implement a global/not-global flag
41 * for tlb entries, at the moment tlb_flush() will also flush all
42 * tlb entries in the flush_global == false case. This is OK because
43 * CPU architectures generally permit an implementation to drop
44 * entries from the TLB at any time, so flushing more entries than
45 * required is only an efficiency issue, not a correctness issue.
46 */
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +010047void tlb_flush(CPUArchState *env, int flush_global)
48{
49 int i;
50
51#if defined(DEBUG_TLB)
52 printf("tlb_flush:\n");
53#endif
54 /* must reset current TB so that interrupts cannot modify the
55 links while we are modifying them */
56 env->current_tb = NULL;
57
58 for (i = 0; i < CPU_TLB_SIZE; i++) {
59 int mmu_idx;
60
61 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
62 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
63 }
64 }
65
66 memset(env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
David 'Digit' Turner0d8b2352014-03-20 17:13:13 +010067
68 env->tlb_flush_addr = -1;
69 env->tlb_flush_mask = 0;
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +010070 tlb_flush_count++;
71}
72
73static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
74{
75 if (addr == (tlb_entry->addr_read &
76 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
77 addr == (tlb_entry->addr_write &
78 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
79 addr == (tlb_entry->addr_code &
80 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
81 *tlb_entry = s_cputlb_empty_entry;
82 }
83}
84
85void tlb_flush_page(CPUArchState *env, target_ulong addr)
86{
87 int i;
88 int mmu_idx;
89
90#if defined(DEBUG_TLB)
91 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
92#endif
David 'Digit' Turner0d8b2352014-03-20 17:13:13 +010093 /* Check if we need to flush due to large pages. */
94 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
95#if defined(DEBUG_TLB)
96 printf("tlb_flush_page: forced full flush ("
97 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
98 env->tlb_flush_addr, env->tlb_flush_mask);
99#endif
100 tlb_flush(env, 1);
101 return;
102 }
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100103 /* must reset current TB so that interrupts cannot modify the
104 links while we are modifying them */
105 env->current_tb = NULL;
106
107 addr &= TARGET_PAGE_MASK;
108 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
109 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
110 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
111 }
112
113 tb_flush_jmp_cache(env, addr);
114}
115
116/* update the TLBs so that writes to code in the virtual page 'addr'
117 can be detected */
118void tlb_protect_code(ram_addr_t ram_addr)
119{
120 cpu_physical_memory_reset_dirty(ram_addr,
121 ram_addr + TARGET_PAGE_SIZE,
122 CODE_DIRTY_FLAG);
123}
124
125/* update the TLB so that writes in physical page 'phys_addr' are no longer
126 tested for self modifying code */
127void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
128 target_ulong vaddr)
129{
130 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
131}
132
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100133static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
134{
135 return (tlbe->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM;
136}
137
138void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
139 uintptr_t length)
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100140{
141 uintptr_t addr;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100142
143 if (tlb_is_dirty_ram(tlb_entry)) {
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100144 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
145 if ((addr - start) < length) {
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100146 tlb_entry->addr_write &= TARGET_PAGE_MASK;
147 tlb_entry->addr_write |= TLB_NOTDIRTY;
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100148 }
149 }
150}
151
152static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
153{
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100154 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100155 tlb_entry->addr_write = vaddr;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100156 }
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100157}
158
159/* update the TLB corresponding to virtual page vaddr
160 so that it is no longer dirty */
161void tlb_set_dirty(CPUArchState *env, target_ulong vaddr)
162{
163 int i;
164 int mmu_idx;
165
166 vaddr &= TARGET_PAGE_MASK;
167 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100168 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100169 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100170 }
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100171}
172
David 'Digit' Turner0d8b2352014-03-20 17:13:13 +0100173/* Our TLB does not support large pages, so remember the area covered by
174 large pages and trigger a full TLB flush if these are invalidated. */
175static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
176 target_ulong size)
177{
178 target_ulong mask = ~(size - 1);
179
180 if (env->tlb_flush_addr == (target_ulong)-1) {
181 env->tlb_flush_addr = vaddr & mask;
182 env->tlb_flush_mask = mask;
183 return;
184 }
185 /* Extend the existing region to include the new page.
186 This is a compromise between unnecessary flushes and the cost
187 of maintaining a full variable size TLB. */
188 mask &= env->tlb_flush_mask;
189 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
190 mask <<= 1;
191 }
192 env->tlb_flush_addr &= mask;
193 env->tlb_flush_mask = mask;
194}
195
196/* Add a new TLB entry. At most one entry for a given virtual address
197 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
198 supplied size is only used by tlb_flush_page. */
199void tlb_set_page(CPUArchState *env, target_ulong vaddr,
200 hwaddr paddr, int prot,
201 int mmu_idx, target_ulong size)
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100202{
203 PhysPageDesc *p;
204 unsigned long pd;
205 unsigned int index;
206 target_ulong address;
207 target_ulong code_address;
208 ptrdiff_t addend;
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100209 CPUTLBEntry *te;
210 CPUWatchpoint *wp;
211 hwaddr iotlb;
212
David 'Digit' Turner0d8b2352014-03-20 17:13:13 +0100213 assert(size >= TARGET_PAGE_SIZE);
214 if (size != TARGET_PAGE_SIZE) {
215 tlb_add_large_page(env, vaddr, size);
216 }
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100217 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
218 if (!p) {
219 pd = IO_MEM_UNASSIGNED;
220 } else {
221 pd = p->phys_offset;
222 }
223#if defined(DEBUG_TLB)
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100224 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
David 'Digit' Turner0d8b2352014-03-20 17:13:13 +0100225 " prot=%x idx=%d pd=0x%08lx\n",
226 vaddr, paddr, prot, mmu_idx, pd);
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100227#endif
228
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100229 address = vaddr;
230 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
231 /* IO memory case (romd handled later) */
232 address |= TLB_MMIO;
233 }
234 addend = (ptrdiff_t)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
235 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
236 /* Normal RAM. */
237 iotlb = pd & TARGET_PAGE_MASK;
238 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
239 iotlb |= IO_MEM_NOTDIRTY;
240 else
241 iotlb |= IO_MEM_ROM;
242 } else {
243 /* IO handlers are currently passed a physical address.
244 It would be nice to pass an offset from the base address
245 of that region. This would avoid having to special case RAM,
246 and avoid full address decoding in every device.
247 We can't use the high bits of pd for this because
248 IO_MEM_ROMD uses these as a ram address. */
249 iotlb = (pd & ~TARGET_PAGE_MASK);
250 if (p) {
251 iotlb += p->region_offset;
252 } else {
253 iotlb += paddr;
254 }
255 }
256
257 code_address = address;
258 /* Make accesses to pages with watchpoints go via the
259 watchpoint trap routines. */
260 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
261 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
262 iotlb = io_mem_watch + paddr;
263 /* TODO: The memory case can be optimized by not trapping
264 reads of pages with a write breakpoint. */
265 address |= TLB_MMIO;
266 }
267 }
268
269 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
270 env->iotlb[mmu_idx][index] = iotlb - vaddr;
271 te = &env->tlb_table[mmu_idx][index];
272 te->addend = addend - vaddr;
273 if (prot & PAGE_READ) {
274 te->addr_read = address;
275 } else {
276 te->addr_read = -1;
277 }
278
279 if (prot & PAGE_EXEC) {
280 te->addr_code = code_address;
281 } else {
282 te->addr_code = -1;
283 }
284 if (prot & PAGE_WRITE) {
285 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
286 (pd & IO_MEM_ROMD)) {
287 /* Write access calls the I/O callback. */
288 te->addr_write = address | TLB_MMIO;
289 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
290 !cpu_physical_memory_is_dirty(pd)) {
291 te->addr_write = address | TLB_NOTDIRTY;
292 } else {
293 te->addr_write = address;
294 }
295 } else {
296 te->addr_write = -1;
297 }
David 'Digit' Turner0e051542014-01-23 02:41:42 +0100298}
299
David 'Digit' Turner01ee5b82014-03-14 13:47:37 +0100300/* NOTE: this function can trigger an exception */
301/* NOTE2: the returned address is not exactly the physical address: it
302 is the offset relative to phys_ram_base */
303tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
304{
305 int mmu_idx, page_index, pd;
306 void *p;
307
308 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
309 mmu_idx = cpu_mmu_index(env1);
310 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
311 (addr & TARGET_PAGE_MASK))) {
David 'Digit' Turnereca7bc22014-03-14 23:58:39 +0100312 cpu_ldub_code(env1, addr);
David 'Digit' Turner01ee5b82014-03-14 13:47:37 +0100313 }
314 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
315 if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
316#if defined(TARGET_SPARC) || defined(TARGET_MIPS)
317 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
318#else
319 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
320#endif
321 }
322 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
323 return qemu_ram_addr_from_host_nofail(p);
324}
325
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100326#define MMUSUFFIX _cmmu
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100327#define SOFTMMU_CODE_ACCESS
328
329#define SHIFT 0
330#include "exec/softmmu_template.h"
331
332#define SHIFT 1
333#include "exec/softmmu_template.h"
334
335#define SHIFT 2
336#include "exec/softmmu_template.h"
337
338#define SHIFT 3
339#include "exec/softmmu_template.h"