blob: ddad91f373acc646c3cc847184d30b80c32f8c24 [file] [log] [blame]
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -08001/*
2 * Host code generation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
David 'Digit' Turner2910f182010-05-10 18:48:35 -070017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -080018 */
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +010019#ifdef _WIN32
20#include <windows.h>
21#else
22#include <sys/types.h>
23#include <sys/mman.h>
24#endif
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -080025#include <stdarg.h>
26#include <stdlib.h>
27#include <stdio.h>
28#include <string.h>
29#include <inttypes.h>
30
31#include "config.h"
32
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +010033#include "qemu-common.h"
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -080034#define NO_CPU_IO_DEFS
35#include "cpu.h"
David 'Digit' Turner852088c2013-12-14 23:04:12 +010036#include "exec/exec-all.h"
David 'Digit' Turnercc33b2d2013-12-15 00:09:42 +010037#include "disas/disas.h"
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -080038#include "tcg.h"
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +010039#include "exec/cputlb.h"
40#include "translate-all.h"
David 'Digit' Turner7a78db72013-12-14 11:46:01 +010041#include "qemu/timer.h"
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -080042
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +010043//#define DEBUG_TB_INVALIDATE
44//#define DEBUG_FLUSH
45/* make various TB consistency checks */
46//#define DEBUG_TB_CHECK
47
48#if !defined(CONFIG_USER_ONLY)
49/* TB consistency checks only implemented for usermode emulation. */
50#undef DEBUG_TB_CHECK
51#endif
52
David 'Digit' Turner975bba82014-02-17 23:33:29 +010053#define SMC_BITMAP_USE_THRESHOLD 10
54
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +010055typedef struct PageDesc {
56 /* list of TBs intersecting this ram page */
57 TranslationBlock *first_tb;
58 /* in order to optimize self modifying code, we count the number
59 of lookups we do to a given page to use a bitmap */
60 unsigned int code_write_count;
61 uint8_t *code_bitmap;
62#if defined(CONFIG_USER_ONLY)
63 unsigned long flags;
64#endif
65} PageDesc;
66
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +010067/* In system mode we want L1_MAP to be based on ram offsets,
68 while in user mode we want it to be based on virtual addresses. */
69#if !defined(CONFIG_USER_ONLY)
70#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
71# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +010072#else
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +010073# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
74#endif
75#else
76# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +010077#endif
78
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +010079/* The bits remaining after N lower levels of page tables. */
80#define V_L1_BITS_REM \
81 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
82
83#if V_L1_BITS_REM < 4
84#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
85#else
86#define V_L1_BITS V_L1_BITS_REM
87#endif
88
89#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
90
91#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +010092
93uintptr_t qemu_real_host_page_size;
94uintptr_t qemu_host_page_size;
95uintptr_t qemu_host_page_mask;
96
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +010097/* This is a multi-level map on the virtual address space.
98 The bottom level has pointers to PageDesc. */
99static void *l1_map[V_L1_SIZE];
100static void* l1_phys_map[V_L1_SIZE];
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100101
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800102/* code generation context */
103TCGContext tcg_ctx;
104
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800105#ifdef CONFIG_MEMCHECK
106/*
107 * Memchecker code in this module copies TB PC <-> Guest PC map to the TB
108 * descriptor after guest code has been translated in cpu_gen_init routine.
109 */
110#include "memcheck/memcheck_api.h"
111
112/* Array of (tb_pc, guest_pc) pairs, big enough for all translations. This
113 * array is used to obtain guest PC address from a translated PC address.
114 * tcg_gen_code_common will fill it up when memchecker is enabled. */
David 'Digit' Turnerd9b6cb92010-10-20 19:07:28 +0200115static void* gen_opc_tpc2gpc[OPC_BUF_SIZE * 2];
116void** gen_opc_tpc2gpc_ptr = &gen_opc_tpc2gpc[0];
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800117/* Number of (tb_pc, guest_pc) pairs stored in gen_opc_tpc2gpc array. */
118unsigned int gen_opc_tpc2gpc_pairs;
119#endif // CONFIG_MEMCHECK
120
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800121/* XXX: suppress that */
122unsigned long code_gen_max_block_size(void)
123{
124 static unsigned long max;
125
126 if (max == 0) {
127 max = TCG_MAX_OP_SIZE;
David 'Digit' Turnerf1d9bf12011-05-11 18:19:41 +0200128#define DEF(name, iarg, oarg, carg, flags) DEF2((iarg) + (oarg) + (carg))
129#define DEF2(copy_size) max = (copy_size > max) ? copy_size : max;
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800130#include "tcg-opc.h"
131#undef DEF
David 'Digit' Turnerf1d9bf12011-05-11 18:19:41 +0200132#undef DEF2
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800133 max *= OPC_MAX_SIZE;
134 }
135
136 return max;
137}
138
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100139static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
140 tb_page_addr_t phys_page2);
141
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800142void cpu_gen_init(void)
143{
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800144 tcg_context_init(&tcg_ctx);
David 'Digit' Turnere2678e12014-01-16 15:56:43 +0100145 tcg_set_frame(&tcg_ctx, TCG_AREG0, offsetof(CPUOldState, temp_buf),
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800146 CPU_TEMP_BUF_NLONGS * sizeof(long));
147}
148
149/* return non zero if the very first instruction is invalid so that
150 the virtual CPU can trigger an exception.
151
152 '*gen_code_size_ptr' contains the size of the generated code (host
153 code).
154*/
David 'Digit' Turner4d6613c2014-01-22 18:19:00 +0100155int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr)
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800156{
157 TCGContext *s = &tcg_ctx;
158 uint8_t *gen_code_buf;
159 int gen_code_size;
160#ifdef CONFIG_PROFILER
161 int64_t ti;
162#endif
163
164#ifdef CONFIG_PROFILER
165 s->tb_count1++; /* includes aborted translations because of
166 exceptions */
167 ti = profile_getclock();
168#endif
169 tcg_func_start(s);
170
171 gen_intermediate_code(env, tb);
172
173 /* generate machine code */
174 gen_code_buf = tb->tc_ptr;
175 tb->tb_next_offset[0] = 0xffff;
176 tb->tb_next_offset[1] = 0xffff;
177 s->tb_next_offset = tb->tb_next_offset;
178#ifdef USE_DIRECT_JUMP
179 s->tb_jmp_offset = tb->tb_jmp_offset;
180 s->tb_next = NULL;
181 /* the following two entries are optional (only used for string ops) */
182 /* XXX: not used ? */
183 tb->tb_jmp_offset[2] = 0xffff;
184 tb->tb_jmp_offset[3] = 0xffff;
185#else
186 s->tb_jmp_offset = NULL;
187 s->tb_next = tb->tb_next;
188#endif
189
190#ifdef CONFIG_PROFILER
191 s->tb_count++;
192 s->interm_time += profile_getclock() - ti;
193 s->code_time -= profile_getclock();
194#endif
David 'Digit' Turner5d8f37a2009-09-14 14:32:27 -0700195 gen_code_size = tcg_gen_code(s, gen_code_buf);
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800196 *gen_code_size_ptr = gen_code_size;
197#ifdef CONFIG_PROFILER
198 s->code_time += profile_getclock();
199 s->code_in_len += tb->size;
200 s->code_out_len += gen_code_size;
201#endif
202
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800203#ifdef CONFIG_MEMCHECK
204 /* Save translated PC -> guest PC map into TB. */
205 if (memcheck_enabled && gen_opc_tpc2gpc_pairs && is_cpu_user(env)) {
206 tb->tpc2gpc =
David 'Digit' Turneraa8236d2014-01-10 17:02:29 +0100207 g_malloc(gen_opc_tpc2gpc_pairs * 2 * sizeof(uintptr_t));
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800208 if (tb->tpc2gpc != NULL) {
209 memcpy(tb->tpc2gpc, gen_opc_tpc2gpc_ptr,
Andrey Petrovc5111a02013-07-10 19:57:36 -0700210 gen_opc_tpc2gpc_pairs * 2 * sizeof(uintptr_t));
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800211 tb->tpc2gpc_pairs = gen_opc_tpc2gpc_pairs;
212 }
Andrey Petrovc5111a02013-07-10 19:57:36 -0700213
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800214 }
215#endif // CONFIG_MEMCHECK
216
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800217#ifdef DEBUG_DISAS
David 'Digit' Turner5d8f37a2009-09-14 14:32:27 -0700218 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
219 qemu_log("OUT: [size=%d]\n", *gen_code_size_ptr);
220 log_disas(tb->tc_ptr, *gen_code_size_ptr);
221 qemu_log("\n");
222 qemu_log_flush();
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800223 }
224#endif
225 return 0;
226}
227
228/* The cpu state corresponding to 'searched_pc' is restored.
229 */
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100230static int cpu_restore_state_from_tb(TranslationBlock *tb, CPUArchState *env,
231 uintptr_t searched_pc)
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800232{
233 TCGContext *s = &tcg_ctx;
234 int j;
David 'Digit' Turner85c62202014-02-16 20:53:40 +0100235 uintptr_t tc_ptr;
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800236#ifdef CONFIG_PROFILER
237 int64_t ti;
238#endif
239
240#ifdef CONFIG_PROFILER
241 ti = profile_getclock();
242#endif
243 tcg_func_start(s);
244
245 gen_intermediate_code_pc(env, tb);
246
247 if (use_icount) {
248 /* Reset the cycle counter to the start of the block. */
249 env->icount_decr.u16.low += tb->icount;
250 /* Clear the IO flag. */
251 env->can_do_io = 0;
252 }
253
254 /* find opc index corresponding to search_pc */
David 'Digit' Turner85c62202014-02-16 20:53:40 +0100255 tc_ptr = (uintptr_t)tb->tc_ptr;
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800256 if (searched_pc < tc_ptr)
257 return -1;
258
259 s->tb_next_offset = tb->tb_next_offset;
260#ifdef USE_DIRECT_JUMP
261 s->tb_jmp_offset = tb->tb_jmp_offset;
262 s->tb_next = NULL;
263#else
264 s->tb_jmp_offset = NULL;
265 s->tb_next = tb->tb_next;
266#endif
David 'Digit' Turner5d8f37a2009-09-14 14:32:27 -0700267 j = tcg_gen_code_search_pc(s, (uint8_t *)tc_ptr, searched_pc - tc_ptr);
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800268 if (j < 0)
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100269 return -1;
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800270 /* now find start of instruction before */
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100271 while (s->gen_opc_instr_start[j] == 0) {
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800272 j--;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100273 }
274 env->icount_decr.u16.low -= s->gen_opc_icount[j];
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800275
David 'Digit' Turnerd3d44682011-05-10 17:49:00 +0200276 restore_state_to_opc(env, tb, j);
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800277
278#ifdef CONFIG_PROFILER
279 s->restore_time += profile_getclock() - ti;
280 s->restore_count++;
281#endif
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100282 return 0;
283}
284
285bool cpu_restore_state(CPUArchState *env, uintptr_t retaddr)
286{
287 TranslationBlock *tb;
288
289 tb = tb_find_pc(retaddr);
290 if (tb) {
291 cpu_restore_state_from_tb(tb, env, retaddr);
292 return true;
293 }
294 return false;
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800295}
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100296
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100297#ifdef _WIN32
David 'Digit' Turner975bba82014-02-17 23:33:29 +0100298static inline void map_exec(void *addr, long size)
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100299{
300 DWORD old_protect;
301 VirtualProtect(addr, size,
302 PAGE_EXECUTE_READWRITE, &old_protect);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100303}
304#else
David 'Digit' Turner975bba82014-02-17 23:33:29 +0100305static inline void map_exec(void *addr, long size)
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100306{
307 unsigned long start, end, page_size;
308
309 page_size = getpagesize();
310 start = (unsigned long)addr;
311 start &= ~(page_size - 1);
312
313 end = (unsigned long)addr + size;
314 end += page_size - 1;
315 end &= ~(page_size - 1);
316
317 mprotect((void *)start, end - start,
318 PROT_READ | PROT_WRITE | PROT_EXEC);
319}
320#endif
321
322static void page_init(void)
323{
324 /* NOTE: we can always suppose that qemu_host_page_size >=
325 TARGET_PAGE_SIZE */
326#ifdef _WIN32
327 {
328 SYSTEM_INFO system_info;
329
330 GetSystemInfo(&system_info);
331 qemu_real_host_page_size = system_info.dwPageSize;
332 }
333#else
334 qemu_real_host_page_size = getpagesize();
335#endif
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100336 if (qemu_host_page_size == 0) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100337 qemu_host_page_size = qemu_real_host_page_size;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100338 }
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100339 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
340 qemu_host_page_size = TARGET_PAGE_SIZE;
341 }
342 qemu_host_page_mask = ~(qemu_host_page_size - 1);
343
344#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
345 {
346#ifdef HAVE_KINFO_GETVMMAP
347 struct kinfo_vmentry *freep;
348 int i, cnt;
349
350 freep = kinfo_getvmmap(getpid(), &cnt);
351 if (freep) {
352 mmap_lock();
353 for (i = 0; i < cnt; i++) {
354 unsigned long startaddr, endaddr;
355
356 startaddr = freep[i].kve_start;
357 endaddr = freep[i].kve_end;
358 if (h2g_valid(startaddr)) {
359 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
360
361 if (h2g_valid(endaddr)) {
362 endaddr = h2g(endaddr);
363 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
364 } else {
365#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
366 endaddr = ~0ul;
367 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100368#endif
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100369 }
370 }
371 }
372 free(freep);
373 mmap_unlock();
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100374 }
375#else
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100376 FILE *f;
377
378 last_brk = (unsigned long)sbrk(0);
379
380 f = fopen("/compat/linux/proc/self/maps", "r");
381 if (f) {
382 mmap_lock();
383
384 do {
385 unsigned long startaddr, endaddr;
386 int n;
387
388 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
389
390 if (n == 2 && h2g_valid(startaddr)) {
391 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
392
393 if (h2g_valid(endaddr)) {
394 endaddr = h2g(endaddr);
395 } else {
396 endaddr = ~0ul;
397 }
398 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
399 }
400 } while (!feof(f));
401
402 fclose(f);
403 mmap_unlock();
404 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100405#endif
406 }
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100407#endif
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100408}
409
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100410static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100411{
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100412 PageDesc *pd;
413 void **lp;
414 int i;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100415
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100416#if defined(CONFIG_USER_ONLY)
417 /* We can't use g_malloc because it may recurse into a locked mutex. */
418# define ALLOC(P, SIZE) \
419 do { \
420 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
421 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
422 } while (0)
423#else
424# define ALLOC(P, SIZE) \
425 do { P = g_malloc0(SIZE); } while (0)
426#endif
427
428 /* Level 1. Always allocated. */
429 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
430
431 /* Level 2..N-1. */
432 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
433 void **p = *lp;
434
435 if (p == NULL) {
436 if (!alloc) {
437 return NULL;
438 }
439 ALLOC(p, sizeof(void *) * L2_SIZE);
440 *lp = p;
441 }
442
443 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100444 }
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100445
446 pd = *lp;
447 if (pd == NULL) {
448 if (!alloc) {
449 return NULL;
450 }
451 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
452 *lp = pd;
453 }
454
455#undef ALLOC
456
457 return pd + (index & (L2_SIZE - 1));
458}
459
460static inline PageDesc *page_find(tb_page_addr_t index)
461{
462 return page_find_alloc(index, 0);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100463}
464
465PhysPageDesc *phys_page_find_alloc(hwaddr index, int alloc)
466{
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100467 void **lp;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100468 PhysPageDesc *pd;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100469 int i;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100470
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100471 /* Level 1. Always allocated. */
472 lp = l1_phys_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100473
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100474 /* Level 2..N-1 */
475 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
476 void **p = *lp;
477
478 if (p == NULL) {
479 if (!alloc) {
480 return NULL;
481 }
482 p = g_malloc0(sizeof(void *) * L2_SIZE);
483 *lp = p;
484 }
485
486 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100487 }
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100488
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100489 pd = *lp;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100490 if (pd == NULL) {
491 if (!alloc) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100492 return NULL;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100493 }
494 pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100495 *lp = pd;
496 for (i = 0; i < L2_SIZE; i++) {
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100497 pd[i].phys_offset = IO_MEM_UNASSIGNED;
498 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100499 }
500 }
501 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
502}
503
504PhysPageDesc *phys_page_find(hwaddr index)
505{
506 return phys_page_find_alloc(index, 0);
507}
508
509#if !defined(CONFIG_USER_ONLY)
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100510#define mmap_lock() do { } while (0)
511#define mmap_unlock() do { } while (0)
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100512#endif
513
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100514#if defined(CONFIG_USER_ONLY)
515/* Currently it is not recommended to allocate big chunks of data in
David 'Digit' Turner975bba82014-02-17 23:33:29 +0100516 user mode. It will change when a dedicated libc will be used. */
517/* ??? 64-bit hosts ought to have no problem mmaping data outside the
518 region in which the guest needs to run. Revisit this. */
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100519#define USE_STATIC_CODE_GEN_BUFFER
520#endif
521
David 'Digit' Turner975bba82014-02-17 23:33:29 +0100522/* ??? Should configure for this, not list operating systems here. */
523#if (defined(__linux__) \
524 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
525 || defined(__DragonFly__) || defined(__OpenBSD__) \
526 || defined(__NetBSD__))
527# define USE_MMAP
528#endif
529
530/* Minimum size of the code gen buffer. This number is randomly chosen,
531 but not so small that we can't have a fair number of TB's live. */
532#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
533
534/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
535 indicated, this is constrained by the range of direct branches on the
536 host cpu, as used by the TCG implementation of goto_tb. */
537#if defined(__x86_64__)
538# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
539#elif defined(__sparc__)
540# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
541#elif defined(__aarch64__)
542# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
543#elif defined(__arm__)
544# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
545#elif defined(__s390x__)
546 /* We have a +- 4GB range on the branches; leave some slop. */
547# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
548#else
549# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
550#endif
551
552#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
553
554#define DEFAULT_CODE_GEN_BUFFER_SIZE \
555 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
556 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
557
558static inline size_t size_code_gen_buffer(size_t tb_size)
559{
560 /* Size the buffer. */
561 if (tb_size == 0) {
562#ifdef USE_STATIC_CODE_GEN_BUFFER
563 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
564#else
565 /* ??? Needs adjustments. */
566 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
567 static buffer, we could size this on RESERVED_VA, on the text
568 segment size of the executable, or continue to use the default. */
569 tb_size = (unsigned long)(ram_size / 4);
570#endif
571 }
572 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
573 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
574 }
575 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
576 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
577 }
578 tcg_ctx.code_gen_buffer_size = tb_size;
579 return tb_size;
580}
581
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100582#ifdef USE_STATIC_CODE_GEN_BUFFER
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100583static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
David 'Digit' Turner975bba82014-02-17 23:33:29 +0100584 __attribute__((aligned(CODE_GEN_ALIGN)));
585
586static inline void *alloc_code_gen_buffer(void)
587{
588 map_exec(static_code_gen_buffer, tcg_ctx.code_gen_buffer_size);
589 return static_code_gen_buffer;
590}
591#elif defined(USE_MMAP)
592static inline void *alloc_code_gen_buffer(void)
593{
594 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
595 uintptr_t start = 0;
596 void *buf;
597
598 /* Constrain the position of the buffer based on the host cpu.
599 Note that these addresses are chosen in concert with the
600 addresses assigned in the relevant linker script file. */
601# if defined(__PIE__) || defined(__PIC__)
602 /* Don't bother setting a preferred location if we're building
603 a position-independent executable. We're more likely to get
604 an address near the main executable if we let the kernel
605 choose the address. */
606# elif defined(__x86_64__) && defined(MAP_32BIT)
607 /* Force the memory down into low memory with the executable.
608 Leave the choice of exact location with the kernel. */
609 flags |= MAP_32BIT;
610 /* Cannot expect to map more than 800MB in low memory. */
611 if (tcg_ctx.code_gen_buffer_size > 800u * 1024 * 1024) {
612 tcg_ctx.code_gen_buffer_size = 800u * 1024 * 1024;
613 }
614# elif defined(__sparc__)
615 start = 0x40000000ul;
616# elif defined(__s390x__)
617 start = 0x90000000ul;
618# endif
619
620 buf = mmap((void *)start, tcg_ctx.code_gen_buffer_size,
621 PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
622 return buf == MAP_FAILED ? NULL : buf;
623}
624#else
625static inline void *alloc_code_gen_buffer(void)
626{
627 void *buf = g_malloc(tcg_ctx.code_gen_buffer_size);
628
629 if (buf) {
630 map_exec(buf, tcg_ctx.code_gen_buffer_size);
631 }
632 return buf;
633}
634#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100635
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100636static inline void code_gen_alloc(size_t tb_size)
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100637{
David 'Digit' Turner975bba82014-02-17 23:33:29 +0100638 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
639 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
640 if (tcg_ctx.code_gen_buffer == NULL) {
641 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
642 exit(1);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100643 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100644
David 'Digit' Turner975bba82014-02-17 23:33:29 +0100645 qemu_madvise(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size,
646 QEMU_MADV_HUGEPAGE);
647
648 /* Steal room for the prologue at the end of the buffer. This ensures
649 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
650 from TB's to the prologue are going to be in range. It also means
651 that we don't need to mark (additional) portions of the data segment
652 as executable. */
653 tcg_ctx.code_gen_prologue = tcg_ctx.code_gen_buffer +
654 tcg_ctx.code_gen_buffer_size - 1024;
655 tcg_ctx.code_gen_buffer_size -= 1024;
656
657 tcg_ctx.code_gen_buffer_max_size = tcg_ctx.code_gen_buffer_size -
658 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
659 tcg_ctx.code_gen_max_blocks = tcg_ctx.code_gen_buffer_size /
660 CODE_GEN_AVG_BLOCK_SIZE;
661 tcg_ctx.tb_ctx.tbs =
662 g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100663}
664
665/* Must be called before using the QEMU cpus. 'tb_size' is the size
666 (in bytes) allocated to the translation buffer. Zero means default
667 size. */
668void tcg_exec_init(unsigned long tb_size)
669{
670 cpu_gen_init();
671 code_gen_alloc(tb_size);
David 'Digit' Turner975bba82014-02-17 23:33:29 +0100672 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100673 page_init();
674#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
675 /* There's no guest base to take into account, so go ahead and
676 initialize the prologue now. */
677 tcg_prologue_init(&tcg_ctx);
678#endif
679}
680
681bool tcg_enabled(void)
682{
David 'Digit' Turner975bba82014-02-17 23:33:29 +0100683 return tcg_ctx.code_gen_buffer != NULL;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100684}
685
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100686/* Allocate a new translation block. Flush the translation buffer if
687 too many translation blocks or too much generated code. */
688static TranslationBlock *tb_alloc(target_ulong pc)
689{
690 TranslationBlock *tb;
691
692 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks ||
693 (tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) >=
694 tcg_ctx.code_gen_buffer_max_size) {
695 return NULL;
696 }
697 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
698 tb->pc = pc;
699 tb->cflags = 0;
700#ifdef CONFIG_MEMCHECK
701 tb->tpc2gpc = NULL;
702 tb->tpc2gpc_pairs = 0;
703#endif // CONFIG_MEMCHECK
704 return tb;
705}
706
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100707void tb_free(TranslationBlock *tb)
708{
709 /* In practice this is mostly used for single use temporary TB
710 Ignore the hard cases and just back up if this TB happens to
711 be the last one generated. */
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100712 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
713 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
David 'Digit' Turner975bba82014-02-17 23:33:29 +0100714 tcg_ctx.code_gen_ptr = tb->tc_ptr;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100715 tcg_ctx.tb_ctx.nb_tbs--;
716 }
717}
718
719static inline void invalidate_page_bitmap(PageDesc *p)
720{
721 if (p->code_bitmap) {
722 g_free(p->code_bitmap);
723 p->code_bitmap = NULL;
724 }
725 p->code_write_count = 0;
726}
727
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100728/* Set to NULL all the 'first_tb' fields in all PageDescs. */
729static void page_flush_tb_1(int level, void **lp)
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100730{
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100731 int i;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100732
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100733 if (*lp == NULL) {
734 return;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100735 }
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100736 if (level == 0) {
737 PageDesc *pd = *lp;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100738
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100739 for (i = 0; i < L2_SIZE; ++i) {
740 pd[i].first_tb = NULL;
741 invalidate_page_bitmap(pd + i);
742 }
743 } else {
744 void **pp = *lp;
745
746 for (i = 0; i < L2_SIZE; ++i) {
747 page_flush_tb_1(level - 1, pp + i);
748 }
749 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100750}
751
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100752static void page_flush_tb(void)
753{
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100754 int i;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100755
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100756 for (i = 0; i < V_L1_SIZE; i++) {
757 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100758 }
759}
760
761/* flush all the translation blocks */
762/* XXX: tb_flush is currently not thread safe */
763void tb_flush(CPUArchState *env1)
764{
765 CPUArchState *env;
766#if defined(DEBUG_FLUSH)
767 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
David 'Digit' Turner975bba82014-02-17 23:33:29 +0100768 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100769 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
770 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
771 tcg_ctx.tb_ctx.nb_tbs : 0);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100772#endif
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100773 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
774 > tcg_ctx.code_gen_buffer_size) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100775 cpu_abort(env1, "Internal error: code buffer overflow\n");
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100776 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100777 tcg_ctx.tb_ctx.nb_tbs = 0;
778
779 for(env = first_cpu; env != NULL; env = env->next_cpu) {
780#ifdef CONFIG_MEMCHECK
781 int tb_to_clean;
782 for (tb_to_clean = 0; tb_to_clean < TB_JMP_CACHE_SIZE; tb_to_clean++) {
783 if (env->tb_jmp_cache[tb_to_clean] != NULL &&
784 env->tb_jmp_cache[tb_to_clean]->tpc2gpc != NULL) {
785 g_free(env->tb_jmp_cache[tb_to_clean]->tpc2gpc);
786 env->tb_jmp_cache[tb_to_clean]->tpc2gpc = NULL;
787 env->tb_jmp_cache[tb_to_clean]->tpc2gpc_pairs = 0;
788 }
789 }
790#endif // CONFIG_MEMCHECK
791 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
792 }
793
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100794 memset(tcg_ctx.tb_ctx.tb_phys_hash, 0,
795 CODE_GEN_PHYS_HASH_SIZE * sizeof(void *));
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100796 page_flush_tb();
797
David 'Digit' Turner975bba82014-02-17 23:33:29 +0100798 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100799 /* XXX: flush processor icache at this point if cache flush is
800 expensive */
801 tcg_ctx.tb_ctx.tb_flush_count++;
802}
803
804#ifdef DEBUG_TB_CHECK
805
806static void tb_invalidate_check(target_ulong address)
807{
808 TranslationBlock *tb;
809 int i;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100810
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100811 address &= TARGET_PAGE_MASK;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100812 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
813 for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100814 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
815 address >= tb->pc + tb->size)) {
816 printf("ERROR invalidate: address=" TARGET_FMT_lx
817 " PC=%08lx size=%04x\n",
818 address, (long)tb->pc, tb->size);
819 }
820 }
821 }
822}
823
824/* verify that all the pages have correct rights for code */
825static void tb_page_check(void)
826{
827 TranslationBlock *tb;
828 int i, flags1, flags2;
829
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100830 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
831 for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
832 tb = tb->phys_hash_next) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100833 flags1 = page_get_flags(tb->pc);
834 flags2 = page_get_flags(tb->pc + tb->size - 1);
835 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
836 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
837 (long)tb->pc, tb->size, flags1, flags2);
838 }
839 }
840 }
841}
842
843#endif
844
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100845static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100846{
847 TranslationBlock *tb1;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100848
849 for (;;) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100850 tb1 = *ptb;
851 if (tb1 == tb) {
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100852 *ptb = tb1->phys_hash_next;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100853 break;
854 }
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100855 ptb = &tb1->phys_hash_next;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100856 }
857}
858
859static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
860{
861 TranslationBlock *tb1;
862 unsigned int n1;
863
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100864 for (;;) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100865 tb1 = *ptb;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100866 n1 = (uintptr_t)tb1 & 3;
867 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100868 if (tb1 == tb) {
869 *ptb = tb1->page_next[n1];
870 break;
871 }
872 ptb = &tb1->page_next[n1];
873 }
874}
875
876static inline void tb_jmp_remove(TranslationBlock *tb, int n)
877{
878 TranslationBlock *tb1, **ptb;
879 unsigned int n1;
880
881 ptb = &tb->jmp_next[n];
882 tb1 = *ptb;
883 if (tb1) {
884 /* find tb(n) in circular list */
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100885 for (;;) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100886 tb1 = *ptb;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100887 n1 = (uintptr_t)tb1 & 3;
888 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
889 if (n1 == n && tb1 == tb) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100890 break;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100891 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100892 if (n1 == 2) {
893 ptb = &tb1->jmp_first;
894 } else {
895 ptb = &tb1->jmp_next[n1];
896 }
897 }
898 /* now we can suppress tb(n) from the list */
899 *ptb = tb->jmp_next[n];
900
901 tb->jmp_next[n] = NULL;
902 }
903}
904
905/* reset the jump entry 'n' of a TB so that it is not chained to
906 another TB */
907static inline void tb_reset_jump(TranslationBlock *tb, int n)
908{
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100909 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100910}
911
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100912/* invalidate one TB */
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100913void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
914{
915 CPUArchState *env;
916 PageDesc *p;
917 unsigned int h, n1;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100918 tb_page_addr_t phys_pc;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100919 TranslationBlock *tb1, *tb2;
920
921 /* remove the TB from the hash list */
922 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
923 h = tb_phys_hash_func(phys_pc);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100924 tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100925
926 /* remove the TB from the page list */
927 if (tb->page_addr[0] != page_addr) {
928 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
929 tb_page_remove(&p->first_tb, tb);
930 invalidate_page_bitmap(p);
931 }
932 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
933 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
934 tb_page_remove(&p->first_tb, tb);
935 invalidate_page_bitmap(p);
936 }
937
938 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
939
940 /* remove the TB from the hash list */
941 h = tb_jmp_cache_hash_func(tb->pc);
942 for(env = first_cpu; env != NULL; env = env->next_cpu) {
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100943 if (env->tb_jmp_cache[h] == tb) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100944 env->tb_jmp_cache[h] = NULL;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100945 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100946 }
947
948 /* suppress this TB from the two jump lists */
949 tb_jmp_remove(tb, 0);
950 tb_jmp_remove(tb, 1);
951
952 /* suppress any remaining jumps to this TB */
953 tb1 = tb->jmp_first;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100954 for (;;) {
955 n1 = (uintptr_t)tb1 & 3;
956 if (n1 == 2) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100957 break;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100958 }
959 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100960 tb2 = tb1->jmp_next[n1];
961 tb_reset_jump(tb1, n1);
962 tb1->jmp_next[n1] = NULL;
963 tb1 = tb2;
964 }
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100965 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100966
967#ifdef CONFIG_MEMCHECK
968 if (tb->tpc2gpc != NULL) {
969 g_free(tb->tpc2gpc);
970 tb->tpc2gpc = NULL;
971 tb->tpc2gpc_pairs = 0;
972 }
973#endif // CONFIG_MEMCHECK
974
975 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
976}
977
978static inline void set_bits(uint8_t *tab, int start, int len)
979{
980 int end, mask, end1;
981
982 end = start + len;
983 tab += start >> 3;
984 mask = 0xff << (start & 7);
985 if ((start & ~7) == (end & ~7)) {
986 if (start < end) {
987 mask &= ~(0xff << (end & 7));
988 *tab |= mask;
989 }
990 } else {
991 *tab++ |= mask;
992 start = (start + 8) & ~7;
993 end1 = end & ~7;
994 while (start < end1) {
995 *tab++ = 0xff;
996 start += 8;
997 }
998 if (start < end) {
999 mask = ~(0xff << (end & 7));
1000 *tab |= mask;
1001 }
1002 }
1003}
1004
1005static void build_page_bitmap(PageDesc *p)
1006{
1007 int n, tb_start, tb_end;
1008 TranslationBlock *tb;
1009
1010 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
1011
1012 tb = p->first_tb;
1013 while (tb != NULL) {
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001014 n = (uintptr_t)tb & 3;
1015 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001016 /* NOTE: this is subtle as a TB may span two physical pages */
1017 if (n == 0) {
1018 /* NOTE: tb_end may be after the end of the page, but
1019 it is not a problem */
1020 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1021 tb_end = tb_start + tb->size;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001022 if (tb_end > TARGET_PAGE_SIZE) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001023 tb_end = TARGET_PAGE_SIZE;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001024 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001025 } else {
1026 tb_start = 0;
1027 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1028 }
1029 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1030 tb = tb->page_next[n];
1031 }
1032}
1033
1034TranslationBlock *tb_gen_code(CPUArchState *env,
1035 target_ulong pc, target_ulong cs_base,
1036 int flags, int cflags)
1037{
1038 TranslationBlock *tb;
1039 uint8_t *tc_ptr;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001040 tb_page_addr_t phys_pc, phys_page2;
1041 target_ulong virt_page2;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001042 int code_gen_size;
1043
1044 phys_pc = get_page_addr_code(env, pc);
1045 tb = tb_alloc(pc);
1046 if (!tb) {
1047 /* flush must be done */
1048 tb_flush(env);
1049 /* cannot fail at this point */
1050 tb = tb_alloc(pc);
1051 /* Don't forget to invalidate previous TB info. */
1052 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
1053 }
David 'Digit' Turner975bba82014-02-17 23:33:29 +01001054 tc_ptr = tcg_ctx.code_gen_ptr;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001055 tb->tc_ptr = tc_ptr;
1056 tb->cs_base = cs_base;
1057 tb->flags = flags;
1058 tb->cflags = cflags;
1059 cpu_gen_code(env, tb, &code_gen_size);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001060 tcg_ctx.code_gen_ptr = (void *)(((uintptr_t)tcg_ctx.code_gen_ptr +
1061 code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001062
1063 /* check next page if needed */
1064 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1065 phys_page2 = -1;
1066 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1067 phys_page2 = get_page_addr_code(env, virt_page2);
1068 }
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001069 tb_link_page(tb, phys_pc, phys_page2);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001070 return tb;
1071}
1072
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001073/*
1074 * Invalidate all TBs which intersect with the target physical address range
1075 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1076 * 'is_cpu_write_access' should be true if called from a real cpu write
1077 * access: the virtual CPU will exit the current TB if code is modified inside
1078 * this TB.
1079 */
1080void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1081 int is_cpu_write_access)
1082{
1083 while (start < end) {
1084 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1085 start &= TARGET_PAGE_MASK;
1086 start += TARGET_PAGE_SIZE;
1087 }
1088}
1089
1090/*
1091 * Invalidate all TBs which intersect with the target physical address range
1092 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1093 * 'is_cpu_write_access' should be true if called from a real cpu write
1094 * access: the virtual CPU will exit the current TB if code is modified inside
1095 * this TB.
1096 */
1097void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001098 int is_cpu_write_access)
1099{
1100 TranslationBlock *tb, *tb_next, *saved_tb;
1101 CPUArchState *env = cpu_single_env;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001102 tb_page_addr_t tb_start, tb_end;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001103 PageDesc *p;
1104 int n;
1105#ifdef TARGET_HAS_PRECISE_SMC
1106 int current_tb_not_found = is_cpu_write_access;
1107 TranslationBlock *current_tb = NULL;
1108 int current_tb_modified = 0;
1109 target_ulong current_pc = 0;
1110 target_ulong current_cs_base = 0;
1111 int current_flags = 0;
1112#endif /* TARGET_HAS_PRECISE_SMC */
1113
1114 p = page_find(start >> TARGET_PAGE_BITS);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001115 if (!p) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001116 return;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001117 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001118 if (!p->code_bitmap &&
1119 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1120 is_cpu_write_access) {
1121 /* build code bitmap */
1122 build_page_bitmap(p);
1123 }
1124
1125 /* we remove all the TBs in the range [start, end[ */
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001126 /* XXX: see if in some cases it could be faster to invalidate all
1127 the code */
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001128 tb = p->first_tb;
1129 while (tb != NULL) {
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001130 n = (uintptr_t)tb & 3;
1131 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001132 tb_next = tb->page_next[n];
1133 /* NOTE: this is subtle as a TB may span two physical pages */
1134 if (n == 0) {
1135 /* NOTE: tb_end may be after the end of the page, but
1136 it is not a problem */
1137 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1138 tb_end = tb_start + tb->size;
1139 } else {
1140 tb_start = tb->page_addr[1];
1141 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1142 }
1143 if (!(tb_end <= start || tb_start >= end)) {
1144#ifdef TARGET_HAS_PRECISE_SMC
1145 if (current_tb_not_found) {
1146 current_tb_not_found = 0;
1147 current_tb = NULL;
1148 if (env->mem_io_pc) {
1149 /* now we have a real cpu fault */
1150 current_tb = tb_find_pc(env->mem_io_pc);
1151 }
1152 }
1153 if (current_tb == tb &&
1154 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1155 /* If we are modifying the current TB, we must stop
1156 its execution. We could be more precise by checking
1157 that the modification is after the current PC, but it
1158 would require a specialized function to partially
1159 restore the CPU state */
1160
1161 current_tb_modified = 1;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001162 cpu_restore_state_from_tb(current_tb, env, env->mem_io_pc);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001163 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1164 &current_flags);
1165 }
1166#endif /* TARGET_HAS_PRECISE_SMC */
1167 /* we need to do that to handle the case where a signal
1168 occurs while doing tb_phys_invalidate() */
1169 saved_tb = NULL;
1170 if (env) {
1171 saved_tb = env->current_tb;
1172 env->current_tb = NULL;
1173 }
1174 tb_phys_invalidate(tb, -1);
1175 if (env) {
1176 env->current_tb = saved_tb;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001177 if (env->interrupt_request && env->current_tb) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001178 cpu_interrupt(env, env->interrupt_request);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001179 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001180 }
1181 }
1182 tb = tb_next;
1183 }
1184#if !defined(CONFIG_USER_ONLY)
1185 /* if no code remaining, no need to continue to use slow writes */
1186 if (!p->first_tb) {
1187 invalidate_page_bitmap(p);
1188 if (is_cpu_write_access) {
1189 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1190 }
1191 }
1192#endif
1193#ifdef TARGET_HAS_PRECISE_SMC
1194 if (current_tb_modified) {
1195 /* we generate a block containing just the instruction
1196 modifying the memory. It will ensure that it cannot modify
1197 itself */
1198 env->current_tb = NULL;
1199 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1200 cpu_resume_from_signal(env, NULL);
1201 }
1202#endif
1203}
1204
1205/* len must be <= 8 and start must be a multiple of len */
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001206void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001207{
1208 PageDesc *p;
1209 int offset, b;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001210
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001211#if 0
1212 if (1) {
1213 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1214 cpu_single_env->mem_io_vaddr, len,
1215 cpu_single_env->eip,
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001216 cpu_single_env->eip +
1217 (intptr_t)cpu_single_env->segs[R_CS].base);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001218 }
1219#endif
1220 p = page_find(start >> TARGET_PAGE_BITS);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001221 if (!p) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001222 return;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001223 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001224 if (p->code_bitmap) {
1225 offset = start & ~TARGET_PAGE_MASK;
1226 b = p->code_bitmap[offset >> 3] >> (offset & 7);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001227 if (b & ((1 << len) - 1)) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001228 goto do_invalidate;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001229 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001230 } else {
1231 do_invalidate:
1232 tb_invalidate_phys_page_range(start, start + len, 1);
1233 }
1234}
1235
1236void tb_invalidate_phys_page_fast0(hwaddr start, int len) {
1237 tb_invalidate_phys_page_fast(start, len);
1238}
1239
1240#if !defined(CONFIG_SOFTMMU)
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001241static void tb_invalidate_phys_page(tb_page_addr_t addr,
1242 uintptr_t pc, void *puc,
1243 bool locked)
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001244{
1245 TranslationBlock *tb;
1246 PageDesc *p;
1247 int n;
1248#ifdef TARGET_HAS_PRECISE_SMC
1249 TranslationBlock *current_tb = NULL;
1250 CPUArchState *env = cpu_single_env;
1251 int current_tb_modified = 0;
1252 target_ulong current_pc = 0;
1253 target_ulong current_cs_base = 0;
1254 int current_flags = 0;
1255#endif
1256
1257 addr &= TARGET_PAGE_MASK;
1258 p = page_find(addr >> TARGET_PAGE_BITS);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001259 if (!p) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001260 return;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001261 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001262 tb = p->first_tb;
1263#ifdef TARGET_HAS_PRECISE_SMC
1264 if (tb && pc != 0) {
1265 current_tb = tb_find_pc(pc);
1266 }
1267#endif
1268 while (tb != NULL) {
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001269 n = (uintptr_t)tb & 3;
1270 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001271#ifdef TARGET_HAS_PRECISE_SMC
1272 if (current_tb == tb &&
1273 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1274 /* If we are modifying the current TB, we must stop
1275 its execution. We could be more precise by checking
1276 that the modification is after the current PC, but it
1277 would require a specialized function to partially
1278 restore the CPU state */
1279
1280 current_tb_modified = 1;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001281 cpu_restore_state_from_tb(current_tb, env, pc);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001282 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1283 &current_flags);
1284 }
1285#endif /* TARGET_HAS_PRECISE_SMC */
1286 tb_phys_invalidate(tb, addr);
1287 tb = tb->page_next[n];
1288 }
1289 p->first_tb = NULL;
1290#ifdef TARGET_HAS_PRECISE_SMC
1291 if (current_tb_modified) {
1292 /* we generate a block containing just the instruction
1293 modifying the memory. It will ensure that it cannot modify
1294 itself */
1295 env->current_tb = NULL;
1296 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001297 if (locked) {
1298 mmap_unlock();
1299 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001300 cpu_resume_from_signal(env, puc);
1301 }
1302#endif
1303}
1304#endif
1305
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001306/* add the tb in the target page and protect it if necessary */
1307static inline void tb_alloc_page(TranslationBlock *tb,
1308 unsigned int n, tb_page_addr_t page_addr)
1309{
1310 PageDesc *p;
1311#ifndef CONFIG_USER_ONLY
1312 bool page_already_protected;
1313#endif
1314
1315 tb->page_addr[n] = page_addr;
1316 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1317 tb->page_next[n] = p->first_tb;
1318#ifndef CONFIG_USER_ONLY
1319 page_already_protected = p->first_tb != NULL;
1320#endif
1321 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1322 invalidate_page_bitmap(p);
1323
1324#if defined(TARGET_HAS_SMC) || 1
1325
1326#if defined(CONFIG_USER_ONLY)
1327 if (p->flags & PAGE_WRITE) {
1328 target_ulong addr;
1329 PageDesc *p2;
1330 int prot;
1331
1332 /* force the host page as non writable (writes will have a
1333 page fault + mprotect overhead) */
1334 page_addr &= qemu_host_page_mask;
1335 prot = 0;
1336 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1337 addr += TARGET_PAGE_SIZE) {
1338
1339 p2 = page_find(addr >> TARGET_PAGE_BITS);
1340 if (!p2) {
1341 continue;
1342 }
1343 prot |= p2->flags;
1344 p2->flags &= ~PAGE_WRITE;
1345 }
1346 mprotect(g2h(page_addr), qemu_host_page_size,
1347 (prot & PAGE_BITS) & ~PAGE_WRITE);
1348#ifdef DEBUG_TB_INVALIDATE
1349 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1350 page_addr);
1351#endif
1352 }
1353#else
1354 /* if some code is already present, then the pages are already
1355 protected. So we handle the case where only the first TB is
1356 allocated in a physical page */
1357 if (!page_already_protected) {
1358 tlb_protect_code(page_addr);
1359 }
1360#endif
1361
1362#endif /* TARGET_HAS_SMC */
1363}
1364
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001365/* add a new TB and link it to the physical page tables. phys_page2 is
1366 (-1) to indicate that only one page contains the TB. */
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001367static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1368 tb_page_addr_t phys_page2)
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001369{
1370 unsigned int h;
1371 TranslationBlock **ptb;
1372
1373 /* Grab the mmap lock to stop another thread invalidating this TB
1374 before we are done. */
1375 mmap_lock();
1376 /* add in the physical hash table */
1377 h = tb_phys_hash_func(phys_pc);
1378 ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
1379 tb->phys_hash_next = *ptb;
1380 *ptb = tb;
1381
1382 /* add in the page list */
1383 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001384 if (phys_page2 != -1) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001385 tb_alloc_page(tb, 1, phys_page2);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001386 } else {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001387 tb->page_addr[1] = -1;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001388 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001389
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001390 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001391 tb->jmp_next[0] = NULL;
1392 tb->jmp_next[1] = NULL;
1393
1394 /* init original jump addresses */
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001395 if (tb->tb_next_offset[0] != 0xffff) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001396 tb_reset_jump(tb, 0);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001397 }
1398 if (tb->tb_next_offset[1] != 0xffff) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001399 tb_reset_jump(tb, 1);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001400 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001401
1402#ifdef DEBUG_TB_CHECK
1403 tb_page_check();
1404#endif
1405 mmap_unlock();
1406}
1407
1408/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1409 tb[1].tc_ptr. Return NULL if not found */
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001410TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001411{
1412 int m_min, m_max, m;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001413 uintptr_t v;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001414 TranslationBlock *tb;
1415
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001416 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001417 return NULL;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001418 }
1419 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1420 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001421 return NULL;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001422 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001423 /* binary search (cf Knuth) */
1424 m_min = 0;
1425 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1426 while (m_min <= m_max) {
1427 m = (m_min + m_max) >> 1;
1428 tb = &tcg_ctx.tb_ctx.tbs[m];
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001429 v = (uintptr_t)tb->tc_ptr;
1430 if (v == tc_ptr) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001431 return tb;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001432 } else if (tc_ptr < v) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001433 m_max = m - 1;
1434 } else {
1435 m_min = m + 1;
1436 }
1437 }
1438 return &tcg_ctx.tb_ctx.tbs[m_max];
1439}
1440
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001441#ifndef CONFIG_ANDROID
1442#if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY)
1443void tb_invalidate_phys_addr(hwaddr addr)
1444{
1445 ram_addr_t ram_addr;
1446 MemoryRegion *mr;
1447 hwaddr l = 1;
1448
1449 mr = address_space_translate(&address_space_memory, addr, &addr, &l, false);
1450 if (!(memory_region_is_ram(mr)
1451 || memory_region_is_romd(mr))) {
1452 return;
1453 }
1454 ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
1455 + addr;
1456 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1457}
1458#endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */
1459
1460void tb_check_watchpoint(CPUArchState *env)
1461{
1462 TranslationBlock *tb;
1463
1464 tb = tb_find_pc(env->mem_io_pc);
1465 if (!tb) {
1466 cpu_abort(env, "check_watchpoint: could not find TB for pc=%p",
1467 (void *)env->mem_io_pc);
1468 }
1469 cpu_restore_state_from_tb(tb, env, env->mem_io_pc);
1470 tb_phys_invalidate(tb, -1);
1471}
1472#endif // !CONFIG_ANDROID
1473
1474#ifndef CONFIG_USER_ONLY
1475/* mask must never be zero, except for A20 change call */
1476void cpu_interrupt(CPUArchState *cpu, int mask)
1477{
1478 CPUArchState *env = cpu;
1479 int old_mask;
1480
1481 old_mask = cpu->interrupt_request;
1482 cpu->interrupt_request |= mask;
1483
1484 /*
1485 * If called from iothread context, wake the target cpu in
1486 * case its halted.
1487 */
1488 if (!qemu_cpu_self(cpu)) {
1489 qemu_cpu_kick(cpu);
1490 return;
1491 }
1492
1493 if (use_icount) {
1494 env->icount_decr.u16.high = 0xffff;
1495 if (!can_do_io(env)
1496 && (mask & ~old_mask) != 0) {
1497 cpu_abort(env, "Raised interrupt while not in I/O function");
1498 }
1499 } else {
1500 // cpu->tcg_exit_req = 1;
1501 cpu_unlink_tb(env);
1502 }
1503}
1504
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001505static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1506{
1507 TranslationBlock *tb1, *tb_next, **ptb;
1508 unsigned int n1;
1509
1510 tb1 = tb->jmp_next[n];
1511 if (tb1 != NULL) {
1512 /* find head of list */
1513 for(;;) {
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001514 n1 = (uintptr_t)tb1 & 3;
1515 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001516 if (n1 == 2)
1517 break;
1518 tb1 = tb1->jmp_next[n1];
1519 }
1520 /* we are now sure now that tb jumps to tb1 */
1521 tb_next = tb1;
1522
1523 /* remove tb from the jmp_first list */
1524 ptb = &tb_next->jmp_first;
1525 for(;;) {
1526 tb1 = *ptb;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001527 n1 = (uintptr_t)tb1 & 3;
1528 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001529 if (n1 == n && tb1 == tb)
1530 break;
1531 ptb = &tb1->jmp_next[n1];
1532 }
1533 *ptb = tb->jmp_next[n];
1534 tb->jmp_next[n] = NULL;
1535
1536 /* suppress the jump to next tb in generated code */
1537 tb_reset_jump(tb, n);
1538
1539 /* suppress jumps in the tb on which we could have jumped */
1540 tb_reset_jump_recursive(tb_next);
1541 }
1542}
1543
1544void tb_reset_jump_recursive(TranslationBlock *tb)
1545{
1546 tb_reset_jump_recursive2(tb, 0);
1547 tb_reset_jump_recursive2(tb, 1);
1548}
1549
1550/* in deterministic execution mode, instructions doing device I/Os
1551 must be at the end of the TB */
1552void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
1553{
1554 TranslationBlock *tb;
1555 uint32_t n, cflags;
1556 target_ulong pc, cs_base;
1557 uint64_t flags;
1558
1559 tb = tb_find_pc(retaddr);
1560 if (!tb) {
1561 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001562 (void *)retaddr);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001563 }
1564 n = env->icount_decr.u16.low + tb->icount;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001565 cpu_restore_state_from_tb(tb, env, retaddr);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001566 /* Calculate how many instructions had been executed before the fault
1567 occurred. */
1568 n = n - env->icount_decr.u16.low;
1569 /* Generate a new TB ending on the I/O insn. */
1570 n++;
1571 /* On MIPS and SH, delay slot instructions can only be restarted if
1572 they were already the first instruction in the TB. If this is not
1573 the first instruction in a TB then re-execute the preceding
1574 branch. */
1575#if defined(TARGET_MIPS)
1576 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1577 env->active_tc.PC -= 4;
1578 env->icount_decr.u16.low++;
1579 env->hflags &= ~MIPS_HFLAG_BMASK;
1580 }
1581#elif defined(TARGET_SH4)
1582 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1583 && n > 1) {
1584 env->pc -= 2;
1585 env->icount_decr.u16.low++;
1586 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1587 }
1588#endif
1589 /* This should never happen. */
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001590 if (n > CF_COUNT_MASK) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001591 cpu_abort(env, "TB too big during recompile");
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001592 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001593
1594 cflags = n | CF_LAST_IO;
1595 pc = tb->pc;
1596 cs_base = tb->cs_base;
1597 flags = tb->flags;
1598 tb_phys_invalidate(tb, -1);
1599 /* FIXME: In theory this could raise an exception. In practice
1600 we have already translated the block once so it's probably ok. */
1601 tb_gen_code(env, pc, cs_base, flags, cflags);
1602 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1603 the first in the TB) then we end up generating a whole new TB and
1604 repeating the fault, which is horribly inefficient.
1605 Better would be to execute just this insn uncached, or generate a
1606 second new TB. */
1607 cpu_resume_from_signal(env, NULL);
1608}
1609
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001610void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
1611{
1612 unsigned int i;
1613
1614 /* Discard jump cache entries for any tb which might potentially
1615 overlap the flushed page. */
1616 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1617 memset(&env->tb_jmp_cache[i], 0,
1618 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1619
1620 i = tb_jmp_cache_hash_page(addr);
1621 memset(&env->tb_jmp_cache[i], 0,
1622 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1623}
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001624
1625void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1626{
1627 int i, target_code_size, max_target_code_size;
1628 int direct_jmp_count, direct_jmp2_count, cross_page;
1629 TranslationBlock *tb;
1630
1631 target_code_size = 0;
1632 max_target_code_size = 0;
1633 cross_page = 0;
1634 direct_jmp_count = 0;
1635 direct_jmp2_count = 0;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001636 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001637 tb = &tcg_ctx.tb_ctx.tbs[i];
1638 target_code_size += tb->size;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001639 if (tb->size > max_target_code_size) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001640 max_target_code_size = tb->size;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001641 }
1642 if (tb->page_addr[1] != -1) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001643 cross_page++;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001644 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001645 if (tb->tb_next_offset[0] != 0xffff) {
1646 direct_jmp_count++;
1647 if (tb->tb_next_offset[1] != 0xffff) {
1648 direct_jmp2_count++;
1649 }
1650 }
1651 }
1652 /* XXX: avoid using doubles ? */
1653 cpu_fprintf(f, "Translation buffer state:\n");
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001654 cpu_fprintf(f, "gen code size %td/%zd\n",
David 'Digit' Turner975bba82014-02-17 23:33:29 +01001655 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001656 tcg_ctx.code_gen_buffer_max_size);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001657 cpu_fprintf(f, "TB count %d/%d\n",
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001658 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001659 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001660 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1661 tcg_ctx.tb_ctx.nb_tbs : 0,
1662 max_target_code_size);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001663 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001664 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1665 tcg_ctx.code_gen_buffer) /
1666 tcg_ctx.tb_ctx.nb_tbs : 0,
1667 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1668 tcg_ctx.code_gen_buffer) /
1669 target_code_size : 0);
1670 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1671 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1672 tcg_ctx.tb_ctx.nb_tbs : 0);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001673 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1674 direct_jmp_count,
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001675 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1676 tcg_ctx.tb_ctx.nb_tbs : 0,
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001677 direct_jmp2_count,
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001678 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1679 tcg_ctx.tb_ctx.nb_tbs : 0);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001680 cpu_fprintf(f, "\nStatistics:\n");
1681 cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001682 cpu_fprintf(f, "TB invalidate count %d\n",
1683 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001684 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1685 tcg_dump_info(f, cpu_fprintf);
1686}
1687
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001688#else /* CONFIG_USER_ONLY */
1689
1690void cpu_interrupt(CPUState *cpu, int mask)
1691{
1692 cpu->interrupt_request |= mask;
1693 cpu->tcg_exit_req = 1;
1694}
1695
1696/*
1697 * Walks guest process memory "regions" one by one
1698 * and calls callback function 'fn' for each region.
1699 */
1700struct walk_memory_regions_data {
1701 walk_memory_regions_fn fn;
1702 void *priv;
1703 uintptr_t start;
1704 int prot;
1705};
1706
1707static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1708 abi_ulong end, int new_prot)
1709{
1710 if (data->start != -1ul) {
1711 int rc = data->fn(data->priv, data->start, end, data->prot);
1712 if (rc != 0) {
1713 return rc;
1714 }
1715 }
1716
1717 data->start = (new_prot ? end : -1ul);
1718 data->prot = new_prot;
1719
1720 return 0;
1721}
1722
1723static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1724 abi_ulong base, int level, void **lp)
1725{
1726 abi_ulong pa;
1727 int i, rc;
1728
1729 if (*lp == NULL) {
1730 return walk_memory_regions_end(data, base, 0);
1731 }
1732
1733 if (level == 0) {
1734 PageDesc *pd = *lp;
1735
1736 for (i = 0; i < L2_SIZE; ++i) {
1737 int prot = pd[i].flags;
1738
1739 pa = base | (i << TARGET_PAGE_BITS);
1740 if (prot != data->prot) {
1741 rc = walk_memory_regions_end(data, pa, prot);
1742 if (rc != 0) {
1743 return rc;
1744 }
1745 }
1746 }
1747 } else {
1748 void **pp = *lp;
1749
1750 for (i = 0; i < L2_SIZE; ++i) {
1751 pa = base | ((abi_ulong)i <<
1752 (TARGET_PAGE_BITS + L2_BITS * level));
1753 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1754 if (rc != 0) {
1755 return rc;
1756 }
1757 }
1758 }
1759
1760 return 0;
1761}
1762
1763int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1764{
1765 struct walk_memory_regions_data data;
1766 uintptr_t i;
1767
1768 data.fn = fn;
1769 data.priv = priv;
1770 data.start = -1ul;
1771 data.prot = 0;
1772
1773 for (i = 0; i < V_L1_SIZE; i++) {
1774 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
1775 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
1776
1777 if (rc != 0) {
1778 return rc;
1779 }
1780 }
1781
1782 return walk_memory_regions_end(&data, 0, 0);
1783}
1784
1785static int dump_region(void *priv, abi_ulong start,
1786 abi_ulong end, unsigned long prot)
1787{
1788 FILE *f = (FILE *)priv;
1789
1790 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
1791 " "TARGET_ABI_FMT_lx" %c%c%c\n",
1792 start, end, end - start,
1793 ((prot & PAGE_READ) ? 'r' : '-'),
1794 ((prot & PAGE_WRITE) ? 'w' : '-'),
1795 ((prot & PAGE_EXEC) ? 'x' : '-'));
1796
1797 return 0;
1798}
1799
1800/* dump memory mappings */
1801void page_dump(FILE *f)
1802{
1803 const int length = sizeof(abi_ulong) * 2;
1804 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
1805 length, "start", length, "end", length, "size", "prot");
1806 walk_memory_regions(f, dump_region);
1807}
1808
1809int page_get_flags(target_ulong address)
1810{
1811 PageDesc *p;
1812
1813 p = page_find(address >> TARGET_PAGE_BITS);
1814 if (!p) {
1815 return 0;
1816 }
1817 return p->flags;
1818}
1819
1820/* Modify the flags of a page and invalidate the code if necessary.
1821 The flag PAGE_WRITE_ORG is positioned automatically depending
1822 on PAGE_WRITE. The mmap_lock should already be held. */
1823void page_set_flags(target_ulong start, target_ulong end, int flags)
1824{
1825 target_ulong addr, len;
1826
1827 /* This function should never be called with addresses outside the
1828 guest address space. If this assert fires, it probably indicates
1829 a missing call to h2g_valid. */
1830#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1831 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1832#endif
1833 assert(start < end);
1834
1835 start = start & TARGET_PAGE_MASK;
1836 end = TARGET_PAGE_ALIGN(end);
1837
1838 if (flags & PAGE_WRITE) {
1839 flags |= PAGE_WRITE_ORG;
1840 }
1841
1842 for (addr = start, len = end - start;
1843 len != 0;
1844 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1845 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1846
1847 /* If the write protection bit is set, then we invalidate
1848 the code inside. */
1849 if (!(p->flags & PAGE_WRITE) &&
1850 (flags & PAGE_WRITE) &&
1851 p->first_tb) {
1852 tb_invalidate_phys_page(addr, 0, NULL, false);
1853 }
1854 p->flags = flags;
1855 }
1856}
1857
1858int page_check_range(target_ulong start, target_ulong len, int flags)
1859{
1860 PageDesc *p;
1861 target_ulong end;
1862 target_ulong addr;
1863
1864 /* This function should never be called with addresses outside the
1865 guest address space. If this assert fires, it probably indicates
1866 a missing call to h2g_valid. */
1867#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1868 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001869#endif
1870
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001871 if (len == 0) {
1872 return 0;
1873 }
1874 if (start + len - 1 < start) {
1875 /* We've wrapped around. */
1876 return -1;
1877 }
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +01001878
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001879 /* must do before we loose bits in the next step */
1880 end = TARGET_PAGE_ALIGN(start + len);
1881 start = start & TARGET_PAGE_MASK;
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +01001882
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001883 for (addr = start, len = end - start;
1884 len != 0;
1885 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1886 p = page_find(addr >> TARGET_PAGE_BITS);
1887 if (!p) {
1888 return -1;
1889 }
1890 if (!(p->flags & PAGE_VALID)) {
1891 return -1;
1892 }
1893
1894 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1895 return -1;
1896 }
1897 if (flags & PAGE_WRITE) {
1898 if (!(p->flags & PAGE_WRITE_ORG)) {
1899 return -1;
1900 }
1901 /* unprotect the page if it was put read-only because it
1902 contains translated code */
1903 if (!(p->flags & PAGE_WRITE)) {
1904 if (!page_unprotect(addr, 0, NULL)) {
1905 return -1;
1906 }
1907 }
1908 return 0;
1909 }
1910 }
1911 return 0;
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +01001912}
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001913
1914/* called from signal handler: invalidate the code and unprotect the
1915 page. Return TRUE if the fault was successfully handled. */
1916int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
1917{
1918 unsigned int prot;
1919 PageDesc *p;
1920 target_ulong host_start, host_end, addr;
1921
1922 /* Technically this isn't safe inside a signal handler. However we
1923 know this only ever happens in a synchronous SEGV handler, so in
1924 practice it seems to be ok. */
1925 mmap_lock();
1926
1927 p = page_find(address >> TARGET_PAGE_BITS);
1928 if (!p) {
1929 mmap_unlock();
1930 return 0;
1931 }
1932
1933 /* if the page was really writable, then we change its
1934 protection back to writable */
1935 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
1936 host_start = address & qemu_host_page_mask;
1937 host_end = host_start + qemu_host_page_size;
1938
1939 prot = 0;
1940 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
1941 p = page_find(addr >> TARGET_PAGE_BITS);
1942 p->flags |= PAGE_WRITE;
1943 prot |= p->flags;
1944
1945 /* and since the content will be modified, we must invalidate
1946 the corresponding translated code. */
1947 tb_invalidate_phys_page(addr, pc, puc, true);
1948#ifdef DEBUG_TB_CHECK
1949 tb_invalidate_check(addr);
1950#endif
1951 }
1952 mprotect((void *)g2h(host_start), qemu_host_page_size,
1953 prot & PAGE_BITS);
1954
1955 mmap_unlock();
1956 return 1;
1957 }
1958 mmap_unlock();
1959 return 0;
1960}
1961#endif /* CONFIG_USER_ONLY */