blob: ab23e02a7c84d17aeaeafd952214686b4dfa471e [file] [log] [blame]
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -08001/*
2 * Software MMU support
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
David 'Digit' Turner2910f182010-05-10 18:48:35 -070017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -080018 */
David Turner6a9ef172010-09-09 22:54:36 +020019#include "qemu-timer.h"
20
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -080021#define DATA_SIZE (1 << SHIFT)
22
23#if DATA_SIZE == 8
24#define SUFFIX q
25#define USUFFIX q
26#define DATA_TYPE uint64_t
27#elif DATA_SIZE == 4
28#define SUFFIX l
29#define USUFFIX l
30#define DATA_TYPE uint32_t
31#elif DATA_SIZE == 2
32#define SUFFIX w
33#define USUFFIX uw
34#define DATA_TYPE uint16_t
35#elif DATA_SIZE == 1
36#define SUFFIX b
37#define USUFFIX ub
38#define DATA_TYPE uint8_t
39#else
40#error unsupported data size
41#endif
42
43#ifdef SOFTMMU_CODE_ACCESS
44#define READ_ACCESS_TYPE 2
45#define ADDR_READ addr_code
46#else
47#define READ_ACCESS_TYPE 0
48#define ADDR_READ addr_read
49#endif
50
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -080051#if defined(CONFIG_MEMCHECK) && !defined(OUTSIDE_JIT) && !defined(SOFTMMU_CODE_ACCESS)
52/*
53 * Support for memory access checker.
54 * We need to instrument __ldx/__stx_mmu routines implemented in this file with
55 * callbacks to access validation routines implemented by the memory checker.
56 * Note that (at least for now) we don't do that instrumentation for memory
57 * addressing the code (SOFTMMU_CODE_ACCESS controls that). Also, we don't want
58 * to instrument code that is used by emulator itself (OUTSIDE_JIT controls
59 * that).
60 */
61#define CONFIG_MEMCHECK_MMU
62#include "memcheck/memcheck_api.h"
63#endif // CONFIG_MEMCHECK && !OUTSIDE_JIT && !SOFTMMU_CODE_ACCESS
64
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -080065static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
66 int mmu_idx,
67 void *retaddr);
68static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
69 target_ulong addr,
70 void *retaddr)
71{
72 DATA_TYPE res;
73 int index;
74 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
75 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
76 env->mem_io_pc = (unsigned long)retaddr;
77 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
78 && !can_do_io(env)) {
79 cpu_io_recompile(env, retaddr);
80 }
81
David 'Digit' Turner5d8f37a2009-09-14 14:32:27 -070082 env->mem_io_vaddr = addr;
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -080083#if SHIFT <= 2
84 res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
85#else
86#ifdef TARGET_WORDS_BIGENDIAN
87 res = (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr) << 32;
88 res |= io_mem_read[index][2](io_mem_opaque[index], physaddr + 4);
89#else
90 res = io_mem_read[index][2](io_mem_opaque[index], physaddr);
91 res |= (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr + 4) << 32;
92#endif
93#endif /* SHIFT > 2 */
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -080094 return res;
95}
96
97/* handle all cases except unaligned access which span two pages */
98DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
99 int mmu_idx)
100{
101 DATA_TYPE res;
102 int index;
103 target_ulong tlb_addr;
104 target_phys_addr_t addend;
105 void *retaddr;
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800106#ifdef CONFIG_MEMCHECK_MMU
107 int invalidate_cache = 0;
108#endif // CONFIG_MEMCHECK_MMU
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800109
110 /* test if there is match for unaligned or IO access */
111 /* XXX: could done more in memory macro in a non portable way */
112 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
113 redo:
114 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
115 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
116 if (tlb_addr & ~TARGET_PAGE_MASK) {
117 /* IO access */
118 if ((addr & (DATA_SIZE - 1)) != 0)
119 goto do_unaligned_access;
120 retaddr = GETPC();
121 addend = env->iotlb[mmu_idx][index];
122 res = glue(io_read, SUFFIX)(addend, addr, retaddr);
123 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800124 /* This is not I/O access: do access verification. */
125#ifdef CONFIG_MEMCHECK_MMU
126 /* We only validate access to the guest's user space, for which
127 * mmu_idx is set to 1. */
128 if (memcheck_instrument_mmu && mmu_idx == 1 &&
129 memcheck_validate_ld(addr, DATA_SIZE, (target_ulong)GETPC())) {
130 /* Memory read breaks page boundary. So, if required, we
131 * must invalidate two caches in TLB. */
132 invalidate_cache = 2;
133 }
134#endif // CONFIG_MEMCHECK_MMU
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800135 /* slow unaligned access (it spans two pages or IO) */
136 do_unaligned_access:
137 retaddr = GETPC();
138#ifdef ALIGNED_ONLY
139 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
140#endif
141 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
142 mmu_idx, retaddr);
143 } else {
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800144#ifdef CONFIG_MEMCHECK_MMU
145 /* We only validate access to the guest's user space, for which
146 * mmu_idx is set to 1. */
147 if (memcheck_instrument_mmu && mmu_idx == 1) {
148 invalidate_cache = memcheck_validate_ld(addr, DATA_SIZE,
149 (target_ulong)GETPC());
150 }
151#endif // CONFIG_MEMCHECK_MMU
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800152 /* unaligned/aligned access in the same page */
153#ifdef ALIGNED_ONLY
154 if ((addr & (DATA_SIZE - 1)) != 0) {
155 retaddr = GETPC();
156 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
157 }
158#endif
159 addend = env->tlb_table[mmu_idx][index].addend;
160 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
161 }
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800162#ifdef CONFIG_MEMCHECK_MMU
163 if (invalidate_cache) {
164 /* Accessed memory is under memchecker control. We must invalidate
165 * containing page(s) in order to make sure that next access to them
166 * will invoke _ld/_st_mmu. */
167 env->tlb_table[mmu_idx][index].addr_read ^= TARGET_PAGE_MASK;
168 env->tlb_table[mmu_idx][index].addr_write ^= TARGET_PAGE_MASK;
169 if ((invalidate_cache == 2) && (index < CPU_TLB_SIZE)) {
170 // Read crossed page boundaris. Invalidate second cache too.
171 env->tlb_table[mmu_idx][index + 1].addr_read ^= TARGET_PAGE_MASK;
172 env->tlb_table[mmu_idx][index + 1].addr_write ^= TARGET_PAGE_MASK;
173 }
174 }
175#endif // CONFIG_MEMCHECK_MMU
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800176 } else {
177 /* the page is not in the TLB : fill it */
178 retaddr = GETPC();
179#ifdef ALIGNED_ONLY
180 if ((addr & (DATA_SIZE - 1)) != 0)
181 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
182#endif
183 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
184 goto redo;
185 }
186 return res;
187}
188
189/* handle all unaligned cases */
190static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
191 int mmu_idx,
192 void *retaddr)
193{
194 DATA_TYPE res, res1, res2;
195 int index, shift;
196 target_phys_addr_t addend;
197 target_ulong tlb_addr, addr1, addr2;
198
199 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
200 redo:
201 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
202 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
203 if (tlb_addr & ~TARGET_PAGE_MASK) {
204 /* IO access */
205 if ((addr & (DATA_SIZE - 1)) != 0)
206 goto do_unaligned_access;
207 retaddr = GETPC();
208 addend = env->iotlb[mmu_idx][index];
209 res = glue(io_read, SUFFIX)(addend, addr, retaddr);
210 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
211 do_unaligned_access:
212 /* slow unaligned access (it spans two pages) */
213 addr1 = addr & ~(DATA_SIZE - 1);
214 addr2 = addr1 + DATA_SIZE;
215 res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
216 mmu_idx, retaddr);
217 res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
218 mmu_idx, retaddr);
219 shift = (addr & (DATA_SIZE - 1)) * 8;
220#ifdef TARGET_WORDS_BIGENDIAN
221 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
222#else
223 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
224#endif
225 res = (DATA_TYPE)res;
226 } else {
227 /* unaligned/aligned access in the same page */
228 addend = env->tlb_table[mmu_idx][index].addend;
229 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
230 }
231 } else {
232 /* the page is not in the TLB : fill it */
233 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
234 goto redo;
235 }
236 return res;
237}
238
239#ifndef SOFTMMU_CODE_ACCESS
240
241static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
242 DATA_TYPE val,
243 int mmu_idx,
244 void *retaddr);
245
246static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
247 DATA_TYPE val,
248 target_ulong addr,
249 void *retaddr)
250{
251 int index;
252 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
253 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
254 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
255 && !can_do_io(env)) {
256 cpu_io_recompile(env, retaddr);
257 }
258
259 env->mem_io_vaddr = addr;
260 env->mem_io_pc = (unsigned long)retaddr;
261#if SHIFT <= 2
262 io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
263#else
264#ifdef TARGET_WORDS_BIGENDIAN
265 io_mem_write[index][2](io_mem_opaque[index], physaddr, val >> 32);
266 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val);
267#else
268 io_mem_write[index][2](io_mem_opaque[index], physaddr, val);
269 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val >> 32);
270#endif
271#endif /* SHIFT > 2 */
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800272}
273
274void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
275 DATA_TYPE val,
276 int mmu_idx)
277{
278 target_phys_addr_t addend;
279 target_ulong tlb_addr;
280 void *retaddr;
281 int index;
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800282#ifdef CONFIG_MEMCHECK_MMU
283 int invalidate_cache = 0;
284#endif // CONFIG_MEMCHECK_MMU
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800285
286 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
287 redo:
288 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
289 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
290 if (tlb_addr & ~TARGET_PAGE_MASK) {
291 /* IO access */
292 if ((addr & (DATA_SIZE - 1)) != 0)
293 goto do_unaligned_access;
294 retaddr = GETPC();
295 addend = env->iotlb[mmu_idx][index];
296 glue(io_write, SUFFIX)(addend, val, addr, retaddr);
297 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800298 /* This is not I/O access: do access verification. */
299#ifdef CONFIG_MEMCHECK_MMU
300 /* We only validate access to the guest's user space, for which
301 * mmu_idx is set to 1. */
302 if (memcheck_instrument_mmu && mmu_idx == 1 &&
303 memcheck_validate_st(addr, DATA_SIZE, (uint64_t)val,
304 (target_ulong)GETPC())) {
305 /* Memory write breaks page boundary. So, if required, we
306 * must invalidate two caches in TLB. */
307 invalidate_cache = 2;
308 }
309#endif // CONFIG_MEMCHECK_MMU
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800310 do_unaligned_access:
311 retaddr = GETPC();
312#ifdef ALIGNED_ONLY
313 do_unaligned_access(addr, 1, mmu_idx, retaddr);
314#endif
315 glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
316 mmu_idx, retaddr);
317 } else {
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800318#ifdef CONFIG_MEMCHECK_MMU
319 /* We only validate access to the guest's user space, for which
320 * mmu_idx is set to 1. */
321 if (memcheck_instrument_mmu && mmu_idx == 1) {
322 invalidate_cache = memcheck_validate_st(addr, DATA_SIZE,
323 (uint64_t)val,
324 (target_ulong)GETPC());
325 }
326#endif // CONFIG_MEMCHECK_MMU
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800327 /* aligned/unaligned access in the same page */
328#ifdef ALIGNED_ONLY
329 if ((addr & (DATA_SIZE - 1)) != 0) {
330 retaddr = GETPC();
331 do_unaligned_access(addr, 1, mmu_idx, retaddr);
332 }
333#endif
334 addend = env->tlb_table[mmu_idx][index].addend;
335 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
336 }
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800337#ifdef CONFIG_MEMCHECK_MMU
338 if (invalidate_cache) {
339 /* Accessed memory is under memchecker control. We must invalidate
340 * containing page(s) in order to make sure that next access to them
341 * will invoke _ld/_st_mmu. */
342 env->tlb_table[mmu_idx][index].addr_read ^= TARGET_PAGE_MASK;
343 env->tlb_table[mmu_idx][index].addr_write ^= TARGET_PAGE_MASK;
344 if ((invalidate_cache == 2) && (index < CPU_TLB_SIZE)) {
345 // Write crossed page boundaris. Invalidate second cache too.
346 env->tlb_table[mmu_idx][index + 1].addr_read ^= TARGET_PAGE_MASK;
347 env->tlb_table[mmu_idx][index + 1].addr_write ^= TARGET_PAGE_MASK;
348 }
349 }
350#endif // CONFIG_MEMCHECK_MMU
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800351 } else {
352 /* the page is not in the TLB : fill it */
353 retaddr = GETPC();
354#ifdef ALIGNED_ONLY
355 if ((addr & (DATA_SIZE - 1)) != 0)
356 do_unaligned_access(addr, 1, mmu_idx, retaddr);
357#endif
358 tlb_fill(addr, 1, mmu_idx, retaddr);
359 goto redo;
360 }
361}
362
363/* handles all unaligned cases */
364static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
365 DATA_TYPE val,
366 int mmu_idx,
367 void *retaddr)
368{
369 target_phys_addr_t addend;
370 target_ulong tlb_addr;
371 int index, i;
372
373 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
374 redo:
375 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
376 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
377 if (tlb_addr & ~TARGET_PAGE_MASK) {
378 /* IO access */
379 if ((addr & (DATA_SIZE - 1)) != 0)
380 goto do_unaligned_access;
381 addend = env->iotlb[mmu_idx][index];
382 glue(io_write, SUFFIX)(addend, val, addr, retaddr);
383 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
384 do_unaligned_access:
385 /* XXX: not efficient, but simple */
386 /* Note: relies on the fact that tlb_fill() does not remove the
387 * previous page from the TLB cache. */
388 for(i = DATA_SIZE - 1; i >= 0; i--) {
389#ifdef TARGET_WORDS_BIGENDIAN
390 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
391 mmu_idx, retaddr);
392#else
393 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
394 mmu_idx, retaddr);
395#endif
396 }
397 } else {
398 /* aligned/unaligned access in the same page */
399 addend = env->tlb_table[mmu_idx][index].addend;
400 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
401 }
402 } else {
403 /* the page is not in the TLB : fill it */
404 tlb_fill(addr, 1, mmu_idx, retaddr);
405 goto redo;
406 }
407}
408
409#endif /* !defined(SOFTMMU_CODE_ACCESS) */
410
411#undef READ_ACCESS_TYPE
412#undef SHIFT
413#undef DATA_TYPE
414#undef SUFFIX
415#undef USUFFIX
416#undef DATA_SIZE
417#undef ADDR_READ