blob: ad00c4da6a7eabdf904c63c48e7fe8bb8bfdf34c [file] [log] [blame]
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -08001/*
2 * Software MMU support
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
David 'Digit' Turner5d8f37a2009-09-14 14:32:27 -070018 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -080019 */
20#define DATA_SIZE (1 << SHIFT)
21
22#if DATA_SIZE == 8
23#define SUFFIX q
24#define USUFFIX q
25#define DATA_TYPE uint64_t
26#elif DATA_SIZE == 4
27#define SUFFIX l
28#define USUFFIX l
29#define DATA_TYPE uint32_t
30#elif DATA_SIZE == 2
31#define SUFFIX w
32#define USUFFIX uw
33#define DATA_TYPE uint16_t
34#elif DATA_SIZE == 1
35#define SUFFIX b
36#define USUFFIX ub
37#define DATA_TYPE uint8_t
38#else
39#error unsupported data size
40#endif
41
42#ifdef SOFTMMU_CODE_ACCESS
43#define READ_ACCESS_TYPE 2
44#define ADDR_READ addr_code
45#else
46#define READ_ACCESS_TYPE 0
47#define ADDR_READ addr_read
48#endif
49
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -080050#if defined(CONFIG_MEMCHECK) && !defined(OUTSIDE_JIT) && !defined(SOFTMMU_CODE_ACCESS)
51/*
52 * Support for memory access checker.
53 * We need to instrument __ldx/__stx_mmu routines implemented in this file with
54 * callbacks to access validation routines implemented by the memory checker.
55 * Note that (at least for now) we don't do that instrumentation for memory
56 * addressing the code (SOFTMMU_CODE_ACCESS controls that). Also, we don't want
57 * to instrument code that is used by emulator itself (OUTSIDE_JIT controls
58 * that).
59 */
60#define CONFIG_MEMCHECK_MMU
61#include "memcheck/memcheck_api.h"
62#endif // CONFIG_MEMCHECK && !OUTSIDE_JIT && !SOFTMMU_CODE_ACCESS
63
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -080064static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
65 int mmu_idx,
66 void *retaddr);
67static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
68 target_ulong addr,
69 void *retaddr)
70{
71 DATA_TYPE res;
72 int index;
73 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
74 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
75 env->mem_io_pc = (unsigned long)retaddr;
76 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
77 && !can_do_io(env)) {
78 cpu_io_recompile(env, retaddr);
79 }
80
David 'Digit' Turner5d8f37a2009-09-14 14:32:27 -070081 env->mem_io_vaddr = addr;
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -080082#if SHIFT <= 2
83 res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
84#else
85#ifdef TARGET_WORDS_BIGENDIAN
86 res = (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr) << 32;
87 res |= io_mem_read[index][2](io_mem_opaque[index], physaddr + 4);
88#else
89 res = io_mem_read[index][2](io_mem_opaque[index], physaddr);
90 res |= (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr + 4) << 32;
91#endif
92#endif /* SHIFT > 2 */
David 'Digit' Turner5d8f37a2009-09-14 14:32:27 -070093#ifdef CONFIG_KQEMU
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -080094 env->last_io_time = cpu_get_time_fast();
95#endif
96 return res;
97}
98
99/* handle all cases except unaligned access which span two pages */
100DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
101 int mmu_idx)
102{
103 DATA_TYPE res;
104 int index;
105 target_ulong tlb_addr;
106 target_phys_addr_t addend;
107 void *retaddr;
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800108#ifdef CONFIG_MEMCHECK_MMU
109 int invalidate_cache = 0;
110#endif // CONFIG_MEMCHECK_MMU
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800111
112 /* test if there is match for unaligned or IO access */
113 /* XXX: could done more in memory macro in a non portable way */
114 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
115 redo:
116 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
117 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
118 if (tlb_addr & ~TARGET_PAGE_MASK) {
119 /* IO access */
120 if ((addr & (DATA_SIZE - 1)) != 0)
121 goto do_unaligned_access;
122 retaddr = GETPC();
123 addend = env->iotlb[mmu_idx][index];
124 res = glue(io_read, SUFFIX)(addend, addr, retaddr);
125 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800126 /* This is not I/O access: do access verification. */
127#ifdef CONFIG_MEMCHECK_MMU
128 /* We only validate access to the guest's user space, for which
129 * mmu_idx is set to 1. */
130 if (memcheck_instrument_mmu && mmu_idx == 1 &&
131 memcheck_validate_ld(addr, DATA_SIZE, (target_ulong)GETPC())) {
132 /* Memory read breaks page boundary. So, if required, we
133 * must invalidate two caches in TLB. */
134 invalidate_cache = 2;
135 }
136#endif // CONFIG_MEMCHECK_MMU
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800137 /* slow unaligned access (it spans two pages or IO) */
138 do_unaligned_access:
139 retaddr = GETPC();
140#ifdef ALIGNED_ONLY
141 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
142#endif
143 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
144 mmu_idx, retaddr);
145 } else {
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800146#ifdef CONFIG_MEMCHECK_MMU
147 /* We only validate access to the guest's user space, for which
148 * mmu_idx is set to 1. */
149 if (memcheck_instrument_mmu && mmu_idx == 1) {
150 invalidate_cache = memcheck_validate_ld(addr, DATA_SIZE,
151 (target_ulong)GETPC());
152 }
153#endif // CONFIG_MEMCHECK_MMU
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800154 /* unaligned/aligned access in the same page */
155#ifdef ALIGNED_ONLY
156 if ((addr & (DATA_SIZE - 1)) != 0) {
157 retaddr = GETPC();
158 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
159 }
160#endif
161 addend = env->tlb_table[mmu_idx][index].addend;
162 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
163 }
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800164#ifdef CONFIG_MEMCHECK_MMU
165 if (invalidate_cache) {
166 /* Accessed memory is under memchecker control. We must invalidate
167 * containing page(s) in order to make sure that next access to them
168 * will invoke _ld/_st_mmu. */
169 env->tlb_table[mmu_idx][index].addr_read ^= TARGET_PAGE_MASK;
170 env->tlb_table[mmu_idx][index].addr_write ^= TARGET_PAGE_MASK;
171 if ((invalidate_cache == 2) && (index < CPU_TLB_SIZE)) {
172 // Read crossed page boundaris. Invalidate second cache too.
173 env->tlb_table[mmu_idx][index + 1].addr_read ^= TARGET_PAGE_MASK;
174 env->tlb_table[mmu_idx][index + 1].addr_write ^= TARGET_PAGE_MASK;
175 }
176 }
177#endif // CONFIG_MEMCHECK_MMU
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800178 } else {
179 /* the page is not in the TLB : fill it */
180 retaddr = GETPC();
181#ifdef ALIGNED_ONLY
182 if ((addr & (DATA_SIZE - 1)) != 0)
183 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
184#endif
185 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
186 goto redo;
187 }
188 return res;
189}
190
191/* handle all unaligned cases */
192static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
193 int mmu_idx,
194 void *retaddr)
195{
196 DATA_TYPE res, res1, res2;
197 int index, shift;
198 target_phys_addr_t addend;
199 target_ulong tlb_addr, addr1, addr2;
200
201 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
202 redo:
203 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
204 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
205 if (tlb_addr & ~TARGET_PAGE_MASK) {
206 /* IO access */
207 if ((addr & (DATA_SIZE - 1)) != 0)
208 goto do_unaligned_access;
209 retaddr = GETPC();
210 addend = env->iotlb[mmu_idx][index];
211 res = glue(io_read, SUFFIX)(addend, addr, retaddr);
212 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
213 do_unaligned_access:
214 /* slow unaligned access (it spans two pages) */
215 addr1 = addr & ~(DATA_SIZE - 1);
216 addr2 = addr1 + DATA_SIZE;
217 res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
218 mmu_idx, retaddr);
219 res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
220 mmu_idx, retaddr);
221 shift = (addr & (DATA_SIZE - 1)) * 8;
222#ifdef TARGET_WORDS_BIGENDIAN
223 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
224#else
225 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
226#endif
227 res = (DATA_TYPE)res;
228 } else {
229 /* unaligned/aligned access in the same page */
230 addend = env->tlb_table[mmu_idx][index].addend;
231 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
232 }
233 } else {
234 /* the page is not in the TLB : fill it */
235 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
236 goto redo;
237 }
238 return res;
239}
240
241#ifndef SOFTMMU_CODE_ACCESS
242
243static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
244 DATA_TYPE val,
245 int mmu_idx,
246 void *retaddr);
247
248static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
249 DATA_TYPE val,
250 target_ulong addr,
251 void *retaddr)
252{
253 int index;
254 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
255 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
256 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
257 && !can_do_io(env)) {
258 cpu_io_recompile(env, retaddr);
259 }
260
261 env->mem_io_vaddr = addr;
262 env->mem_io_pc = (unsigned long)retaddr;
263#if SHIFT <= 2
264 io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
265#else
266#ifdef TARGET_WORDS_BIGENDIAN
267 io_mem_write[index][2](io_mem_opaque[index], physaddr, val >> 32);
268 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val);
269#else
270 io_mem_write[index][2](io_mem_opaque[index], physaddr, val);
271 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val >> 32);
272#endif
273#endif /* SHIFT > 2 */
David 'Digit' Turner5d8f37a2009-09-14 14:32:27 -0700274#ifdef CONFIG_KQEMU
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800275 env->last_io_time = cpu_get_time_fast();
276#endif
277}
278
279void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
280 DATA_TYPE val,
281 int mmu_idx)
282{
283 target_phys_addr_t addend;
284 target_ulong tlb_addr;
285 void *retaddr;
286 int index;
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800287#ifdef CONFIG_MEMCHECK_MMU
288 int invalidate_cache = 0;
289#endif // CONFIG_MEMCHECK_MMU
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800290
291 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
292 redo:
293 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
294 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
295 if (tlb_addr & ~TARGET_PAGE_MASK) {
296 /* IO access */
297 if ((addr & (DATA_SIZE - 1)) != 0)
298 goto do_unaligned_access;
299 retaddr = GETPC();
300 addend = env->iotlb[mmu_idx][index];
301 glue(io_write, SUFFIX)(addend, val, addr, retaddr);
302 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800303 /* This is not I/O access: do access verification. */
304#ifdef CONFIG_MEMCHECK_MMU
305 /* We only validate access to the guest's user space, for which
306 * mmu_idx is set to 1. */
307 if (memcheck_instrument_mmu && mmu_idx == 1 &&
308 memcheck_validate_st(addr, DATA_SIZE, (uint64_t)val,
309 (target_ulong)GETPC())) {
310 /* Memory write breaks page boundary. So, if required, we
311 * must invalidate two caches in TLB. */
312 invalidate_cache = 2;
313 }
314#endif // CONFIG_MEMCHECK_MMU
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800315 do_unaligned_access:
316 retaddr = GETPC();
317#ifdef ALIGNED_ONLY
318 do_unaligned_access(addr, 1, mmu_idx, retaddr);
319#endif
320 glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
321 mmu_idx, retaddr);
322 } else {
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800323#ifdef CONFIG_MEMCHECK_MMU
324 /* We only validate access to the guest's user space, for which
325 * mmu_idx is set to 1. */
326 if (memcheck_instrument_mmu && mmu_idx == 1) {
327 invalidate_cache = memcheck_validate_st(addr, DATA_SIZE,
328 (uint64_t)val,
329 (target_ulong)GETPC());
330 }
331#endif // CONFIG_MEMCHECK_MMU
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800332 /* aligned/unaligned access in the same page */
333#ifdef ALIGNED_ONLY
334 if ((addr & (DATA_SIZE - 1)) != 0) {
335 retaddr = GETPC();
336 do_unaligned_access(addr, 1, mmu_idx, retaddr);
337 }
338#endif
339 addend = env->tlb_table[mmu_idx][index].addend;
340 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
341 }
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800342#ifdef CONFIG_MEMCHECK_MMU
343 if (invalidate_cache) {
344 /* Accessed memory is under memchecker control. We must invalidate
345 * containing page(s) in order to make sure that next access to them
346 * will invoke _ld/_st_mmu. */
347 env->tlb_table[mmu_idx][index].addr_read ^= TARGET_PAGE_MASK;
348 env->tlb_table[mmu_idx][index].addr_write ^= TARGET_PAGE_MASK;
349 if ((invalidate_cache == 2) && (index < CPU_TLB_SIZE)) {
350 // Write crossed page boundaris. Invalidate second cache too.
351 env->tlb_table[mmu_idx][index + 1].addr_read ^= TARGET_PAGE_MASK;
352 env->tlb_table[mmu_idx][index + 1].addr_write ^= TARGET_PAGE_MASK;
353 }
354 }
355#endif // CONFIG_MEMCHECK_MMU
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800356 } else {
357 /* the page is not in the TLB : fill it */
358 retaddr = GETPC();
359#ifdef ALIGNED_ONLY
360 if ((addr & (DATA_SIZE - 1)) != 0)
361 do_unaligned_access(addr, 1, mmu_idx, retaddr);
362#endif
363 tlb_fill(addr, 1, mmu_idx, retaddr);
364 goto redo;
365 }
366}
367
368/* handles all unaligned cases */
369static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
370 DATA_TYPE val,
371 int mmu_idx,
372 void *retaddr)
373{
374 target_phys_addr_t addend;
375 target_ulong tlb_addr;
376 int index, i;
377
378 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
379 redo:
380 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
381 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
382 if (tlb_addr & ~TARGET_PAGE_MASK) {
383 /* IO access */
384 if ((addr & (DATA_SIZE - 1)) != 0)
385 goto do_unaligned_access;
386 addend = env->iotlb[mmu_idx][index];
387 glue(io_write, SUFFIX)(addend, val, addr, retaddr);
388 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
389 do_unaligned_access:
390 /* XXX: not efficient, but simple */
391 /* Note: relies on the fact that tlb_fill() does not remove the
392 * previous page from the TLB cache. */
393 for(i = DATA_SIZE - 1; i >= 0; i--) {
394#ifdef TARGET_WORDS_BIGENDIAN
395 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
396 mmu_idx, retaddr);
397#else
398 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
399 mmu_idx, retaddr);
400#endif
401 }
402 } else {
403 /* aligned/unaligned access in the same page */
404 addend = env->tlb_table[mmu_idx][index].addend;
405 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
406 }
407 } else {
408 /* the page is not in the TLB : fill it */
409 tlb_fill(addr, 1, mmu_idx, retaddr);
410 goto redo;
411 }
412}
413
414#endif /* !defined(SOFTMMU_CODE_ACCESS) */
415
416#undef READ_ACCESS_TYPE
417#undef SHIFT
418#undef DATA_TYPE
419#undef SUFFIX
420#undef USUFFIX
421#undef DATA_SIZE
422#undef ADDR_READ