blob: 55a511a90bbffa6752c5252f205979d1157144a3 [file] [log] [blame]
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -08001/*
2 * Host code generation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
David 'Digit' Turner2910f182010-05-10 18:48:35 -070017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -080018 */
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +010019#ifdef _WIN32
20#include <windows.h>
21#else
22#include <sys/types.h>
23#include <sys/mman.h>
24#endif
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -080025#include <stdarg.h>
26#include <stdlib.h>
27#include <stdio.h>
28#include <string.h>
29#include <inttypes.h>
30
31#include "config.h"
32
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +010033#include "qemu-common.h"
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -080034#define NO_CPU_IO_DEFS
35#include "cpu.h"
David 'Digit' Turner852088c2013-12-14 23:04:12 +010036#include "exec/exec-all.h"
David 'Digit' Turnercc33b2d2013-12-15 00:09:42 +010037#include "disas/disas.h"
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -080038#include "tcg.h"
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +010039#include "exec/cputlb.h"
40#include "translate-all.h"
David 'Digit' Turner7a78db72013-12-14 11:46:01 +010041#include "qemu/timer.h"
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -080042
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +010043//#define DEBUG_TB_INVALIDATE
44//#define DEBUG_FLUSH
45/* make various TB consistency checks */
46//#define DEBUG_TB_CHECK
47
48#if !defined(CONFIG_USER_ONLY)
49/* TB consistency checks only implemented for usermode emulation. */
50#undef DEBUG_TB_CHECK
51#endif
52
David 'Digit' Turner975bba82014-02-17 23:33:29 +010053#define SMC_BITMAP_USE_THRESHOLD 10
54
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +010055typedef struct PageDesc {
56 /* list of TBs intersecting this ram page */
57 TranslationBlock *first_tb;
58 /* in order to optimize self modifying code, we count the number
59 of lookups we do to a given page to use a bitmap */
60 unsigned int code_write_count;
61 uint8_t *code_bitmap;
62#if defined(CONFIG_USER_ONLY)
63 unsigned long flags;
64#endif
65} PageDesc;
66
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +010067/* In system mode we want L1_MAP to be based on ram offsets,
68 while in user mode we want it to be based on virtual addresses. */
69#if !defined(CONFIG_USER_ONLY)
70#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
71# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +010072#else
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +010073# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
74#endif
75#else
76# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +010077#endif
78
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +010079/* The bits remaining after N lower levels of page tables. */
80#define V_L1_BITS_REM \
81 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
82
83#if V_L1_BITS_REM < 4
84#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
85#else
86#define V_L1_BITS V_L1_BITS_REM
87#endif
88
89#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
90
91#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +010092
93uintptr_t qemu_real_host_page_size;
94uintptr_t qemu_host_page_size;
95uintptr_t qemu_host_page_mask;
96
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +010097/* This is a multi-level map on the virtual address space.
98 The bottom level has pointers to PageDesc. */
99static void *l1_map[V_L1_SIZE];
100static void* l1_phys_map[V_L1_SIZE];
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100101
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800102/* code generation context */
103TCGContext tcg_ctx;
104
David 'Digit' Turner5bb450e2014-03-14 17:19:45 +0100105#ifdef CONFIG_ANDROID_MEMCHECK
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800106/*
107 * Memchecker code in this module copies TB PC <-> Guest PC map to the TB
108 * descriptor after guest code has been translated in cpu_gen_init routine.
109 */
David 'Digit' Turner96e493a2014-03-14 17:17:26 +0100110#include "android/qemu/memcheck/memcheck_api.h"
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800111
112/* Array of (tb_pc, guest_pc) pairs, big enough for all translations. This
113 * array is used to obtain guest PC address from a translated PC address.
114 * tcg_gen_code_common will fill it up when memchecker is enabled. */
David 'Digit' Turnerd9b6cb92010-10-20 19:07:28 +0200115static void* gen_opc_tpc2gpc[OPC_BUF_SIZE * 2];
116void** gen_opc_tpc2gpc_ptr = &gen_opc_tpc2gpc[0];
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800117/* Number of (tb_pc, guest_pc) pairs stored in gen_opc_tpc2gpc array. */
118unsigned int gen_opc_tpc2gpc_pairs;
David 'Digit' Turner5bb450e2014-03-14 17:19:45 +0100119#endif // CONFIG_ANDROID_MEMCHECK
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800120
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800121/* XXX: suppress that */
122unsigned long code_gen_max_block_size(void)
123{
124 static unsigned long max;
125
126 if (max == 0) {
127 max = TCG_MAX_OP_SIZE;
David 'Digit' Turnerf1d9bf12011-05-11 18:19:41 +0200128#define DEF(name, iarg, oarg, carg, flags) DEF2((iarg) + (oarg) + (carg))
129#define DEF2(copy_size) max = (copy_size > max) ? copy_size : max;
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800130#include "tcg-opc.h"
131#undef DEF
David 'Digit' Turnerf1d9bf12011-05-11 18:19:41 +0200132#undef DEF2
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800133 max *= OPC_MAX_SIZE;
134 }
135
136 return max;
137}
138
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100139static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
140 tb_page_addr_t phys_page2);
141
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800142void cpu_gen_init(void)
143{
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800144 tcg_context_init(&tcg_ctx);
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800145}
146
147/* return non zero if the very first instruction is invalid so that
148 the virtual CPU can trigger an exception.
149
150 '*gen_code_size_ptr' contains the size of the generated code (host
151 code).
152*/
David 'Digit' Turner4d6613c2014-01-22 18:19:00 +0100153int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr)
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800154{
155 TCGContext *s = &tcg_ctx;
156 uint8_t *gen_code_buf;
157 int gen_code_size;
158#ifdef CONFIG_PROFILER
159 int64_t ti;
160#endif
161
162#ifdef CONFIG_PROFILER
163 s->tb_count1++; /* includes aborted translations because of
164 exceptions */
165 ti = profile_getclock();
166#endif
167 tcg_func_start(s);
168
169 gen_intermediate_code(env, tb);
170
171 /* generate machine code */
172 gen_code_buf = tb->tc_ptr;
173 tb->tb_next_offset[0] = 0xffff;
174 tb->tb_next_offset[1] = 0xffff;
175 s->tb_next_offset = tb->tb_next_offset;
176#ifdef USE_DIRECT_JUMP
177 s->tb_jmp_offset = tb->tb_jmp_offset;
178 s->tb_next = NULL;
179 /* the following two entries are optional (only used for string ops) */
180 /* XXX: not used ? */
181 tb->tb_jmp_offset[2] = 0xffff;
182 tb->tb_jmp_offset[3] = 0xffff;
183#else
184 s->tb_jmp_offset = NULL;
185 s->tb_next = tb->tb_next;
186#endif
187
188#ifdef CONFIG_PROFILER
189 s->tb_count++;
190 s->interm_time += profile_getclock() - ti;
191 s->code_time -= profile_getclock();
192#endif
David 'Digit' Turner5d8f37a2009-09-14 14:32:27 -0700193 gen_code_size = tcg_gen_code(s, gen_code_buf);
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800194 *gen_code_size_ptr = gen_code_size;
195#ifdef CONFIG_PROFILER
196 s->code_time += profile_getclock();
197 s->code_in_len += tb->size;
198 s->code_out_len += gen_code_size;
199#endif
200
David 'Digit' Turner5bb450e2014-03-14 17:19:45 +0100201#ifdef CONFIG_ANDROID_MEMCHECK
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800202 /* Save translated PC -> guest PC map into TB. */
203 if (memcheck_enabled && gen_opc_tpc2gpc_pairs && is_cpu_user(env)) {
204 tb->tpc2gpc =
David 'Digit' Turneraa8236d2014-01-10 17:02:29 +0100205 g_malloc(gen_opc_tpc2gpc_pairs * 2 * sizeof(uintptr_t));
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800206 if (tb->tpc2gpc != NULL) {
207 memcpy(tb->tpc2gpc, gen_opc_tpc2gpc_ptr,
Andrey Petrovc5111a02013-07-10 19:57:36 -0700208 gen_opc_tpc2gpc_pairs * 2 * sizeof(uintptr_t));
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800209 tb->tpc2gpc_pairs = gen_opc_tpc2gpc_pairs;
210 }
Andrey Petrovc5111a02013-07-10 19:57:36 -0700211
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800212 }
David 'Digit' Turner5bb450e2014-03-14 17:19:45 +0100213#endif // CONFIG_ANDROID_MEMCHECK
Vladimir Chtchetkine5389aa12010-02-16 10:38:35 -0800214
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800215#ifdef DEBUG_DISAS
David 'Digit' Turner5d8f37a2009-09-14 14:32:27 -0700216 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
217 qemu_log("OUT: [size=%d]\n", *gen_code_size_ptr);
218 log_disas(tb->tc_ptr, *gen_code_size_ptr);
219 qemu_log("\n");
220 qemu_log_flush();
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800221 }
222#endif
223 return 0;
224}
225
226/* The cpu state corresponding to 'searched_pc' is restored.
227 */
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100228static int cpu_restore_state_from_tb(TranslationBlock *tb, CPUArchState *env,
229 uintptr_t searched_pc)
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800230{
231 TCGContext *s = &tcg_ctx;
232 int j;
David 'Digit' Turner85c62202014-02-16 20:53:40 +0100233 uintptr_t tc_ptr;
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800234#ifdef CONFIG_PROFILER
235 int64_t ti;
236#endif
237
238#ifdef CONFIG_PROFILER
239 ti = profile_getclock();
240#endif
241 tcg_func_start(s);
242
243 gen_intermediate_code_pc(env, tb);
244
245 if (use_icount) {
246 /* Reset the cycle counter to the start of the block. */
247 env->icount_decr.u16.low += tb->icount;
248 /* Clear the IO flag. */
249 env->can_do_io = 0;
250 }
251
252 /* find opc index corresponding to search_pc */
David 'Digit' Turner85c62202014-02-16 20:53:40 +0100253 tc_ptr = (uintptr_t)tb->tc_ptr;
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800254 if (searched_pc < tc_ptr)
255 return -1;
256
257 s->tb_next_offset = tb->tb_next_offset;
258#ifdef USE_DIRECT_JUMP
259 s->tb_jmp_offset = tb->tb_jmp_offset;
260 s->tb_next = NULL;
261#else
262 s->tb_jmp_offset = NULL;
263 s->tb_next = tb->tb_next;
264#endif
David 'Digit' Turner5d8f37a2009-09-14 14:32:27 -0700265 j = tcg_gen_code_search_pc(s, (uint8_t *)tc_ptr, searched_pc - tc_ptr);
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800266 if (j < 0)
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100267 return -1;
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800268 /* now find start of instruction before */
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100269 while (s->gen_opc_instr_start[j] == 0) {
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800270 j--;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100271 }
272 env->icount_decr.u16.low -= s->gen_opc_icount[j];
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800273
David 'Digit' Turnerd3d44682011-05-10 17:49:00 +0200274 restore_state_to_opc(env, tb, j);
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800275
276#ifdef CONFIG_PROFILER
277 s->restore_time += profile_getclock() - ti;
278 s->restore_count++;
279#endif
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100280 return 0;
281}
282
283bool cpu_restore_state(CPUArchState *env, uintptr_t retaddr)
284{
285 TranslationBlock *tb;
286
287 tb = tb_find_pc(retaddr);
288 if (tb) {
289 cpu_restore_state_from_tb(tb, env, retaddr);
290 return true;
291 }
292 return false;
The Android Open Source Project8b23a6c2009-03-03 19:30:32 -0800293}
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +0100294
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100295#ifdef _WIN32
David 'Digit' Turner975bba82014-02-17 23:33:29 +0100296static inline void map_exec(void *addr, long size)
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100297{
298 DWORD old_protect;
299 VirtualProtect(addr, size,
300 PAGE_EXECUTE_READWRITE, &old_protect);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100301}
302#else
David 'Digit' Turner975bba82014-02-17 23:33:29 +0100303static inline void map_exec(void *addr, long size)
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100304{
305 unsigned long start, end, page_size;
306
307 page_size = getpagesize();
308 start = (unsigned long)addr;
309 start &= ~(page_size - 1);
310
311 end = (unsigned long)addr + size;
312 end += page_size - 1;
313 end &= ~(page_size - 1);
314
315 mprotect((void *)start, end - start,
316 PROT_READ | PROT_WRITE | PROT_EXEC);
317}
318#endif
319
320static void page_init(void)
321{
322 /* NOTE: we can always suppose that qemu_host_page_size >=
323 TARGET_PAGE_SIZE */
324#ifdef _WIN32
325 {
326 SYSTEM_INFO system_info;
327
328 GetSystemInfo(&system_info);
329 qemu_real_host_page_size = system_info.dwPageSize;
330 }
331#else
332 qemu_real_host_page_size = getpagesize();
333#endif
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100334 if (qemu_host_page_size == 0) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100335 qemu_host_page_size = qemu_real_host_page_size;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100336 }
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100337 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
338 qemu_host_page_size = TARGET_PAGE_SIZE;
339 }
340 qemu_host_page_mask = ~(qemu_host_page_size - 1);
341
342#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
343 {
344#ifdef HAVE_KINFO_GETVMMAP
345 struct kinfo_vmentry *freep;
346 int i, cnt;
347
348 freep = kinfo_getvmmap(getpid(), &cnt);
349 if (freep) {
350 mmap_lock();
351 for (i = 0; i < cnt; i++) {
352 unsigned long startaddr, endaddr;
353
354 startaddr = freep[i].kve_start;
355 endaddr = freep[i].kve_end;
356 if (h2g_valid(startaddr)) {
357 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
358
359 if (h2g_valid(endaddr)) {
360 endaddr = h2g(endaddr);
361 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
362 } else {
363#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
364 endaddr = ~0ul;
365 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100366#endif
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100367 }
368 }
369 }
370 free(freep);
371 mmap_unlock();
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100372 }
373#else
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100374 FILE *f;
375
376 last_brk = (unsigned long)sbrk(0);
377
378 f = fopen("/compat/linux/proc/self/maps", "r");
379 if (f) {
380 mmap_lock();
381
382 do {
383 unsigned long startaddr, endaddr;
384 int n;
385
386 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
387
388 if (n == 2 && h2g_valid(startaddr)) {
389 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
390
391 if (h2g_valid(endaddr)) {
392 endaddr = h2g(endaddr);
393 } else {
394 endaddr = ~0ul;
395 }
396 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
397 }
398 } while (!feof(f));
399
400 fclose(f);
401 mmap_unlock();
402 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100403#endif
404 }
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100405#endif
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100406}
407
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100408static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100409{
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100410 PageDesc *pd;
411 void **lp;
412 int i;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100413
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100414#if defined(CONFIG_USER_ONLY)
415 /* We can't use g_malloc because it may recurse into a locked mutex. */
416# define ALLOC(P, SIZE) \
417 do { \
418 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
419 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
420 } while (0)
421#else
422# define ALLOC(P, SIZE) \
423 do { P = g_malloc0(SIZE); } while (0)
424#endif
425
426 /* Level 1. Always allocated. */
427 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
428
429 /* Level 2..N-1. */
430 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
431 void **p = *lp;
432
433 if (p == NULL) {
434 if (!alloc) {
435 return NULL;
436 }
437 ALLOC(p, sizeof(void *) * L2_SIZE);
438 *lp = p;
439 }
440
441 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100442 }
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100443
444 pd = *lp;
445 if (pd == NULL) {
446 if (!alloc) {
447 return NULL;
448 }
449 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
450 *lp = pd;
451 }
452
453#undef ALLOC
454
455 return pd + (index & (L2_SIZE - 1));
456}
457
458static inline PageDesc *page_find(tb_page_addr_t index)
459{
460 return page_find_alloc(index, 0);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100461}
462
463PhysPageDesc *phys_page_find_alloc(hwaddr index, int alloc)
464{
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100465 void **lp;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100466 PhysPageDesc *pd;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100467 int i;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100468
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100469 /* Level 1. Always allocated. */
470 lp = l1_phys_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100471
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100472 /* Level 2..N-1 */
473 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
474 void **p = *lp;
475
476 if (p == NULL) {
477 if (!alloc) {
478 return NULL;
479 }
480 p = g_malloc0(sizeof(void *) * L2_SIZE);
481 *lp = p;
482 }
483
484 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100485 }
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100486
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100487 pd = *lp;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100488 if (pd == NULL) {
489 if (!alloc) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100490 return NULL;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100491 }
492 pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100493 *lp = pd;
494 for (i = 0; i < L2_SIZE; i++) {
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100495 pd[i].phys_offset = IO_MEM_UNASSIGNED;
496 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100497 }
498 }
499 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
500}
501
502PhysPageDesc *phys_page_find(hwaddr index)
503{
504 return phys_page_find_alloc(index, 0);
505}
506
507#if !defined(CONFIG_USER_ONLY)
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100508#define mmap_lock() do { } while (0)
509#define mmap_unlock() do { } while (0)
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100510#endif
511
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100512#if defined(CONFIG_USER_ONLY)
513/* Currently it is not recommended to allocate big chunks of data in
David 'Digit' Turner975bba82014-02-17 23:33:29 +0100514 user mode. It will change when a dedicated libc will be used. */
515/* ??? 64-bit hosts ought to have no problem mmaping data outside the
516 region in which the guest needs to run. Revisit this. */
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100517#define USE_STATIC_CODE_GEN_BUFFER
518#endif
519
David 'Digit' Turner975bba82014-02-17 23:33:29 +0100520/* ??? Should configure for this, not list operating systems here. */
521#if (defined(__linux__) \
522 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
523 || defined(__DragonFly__) || defined(__OpenBSD__) \
524 || defined(__NetBSD__))
525# define USE_MMAP
526#endif
527
528/* Minimum size of the code gen buffer. This number is randomly chosen,
529 but not so small that we can't have a fair number of TB's live. */
530#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
531
532/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
533 indicated, this is constrained by the range of direct branches on the
534 host cpu, as used by the TCG implementation of goto_tb. */
535#if defined(__x86_64__)
536# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
537#elif defined(__sparc__)
538# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
539#elif defined(__aarch64__)
540# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
541#elif defined(__arm__)
542# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
543#elif defined(__s390x__)
544 /* We have a +- 4GB range on the branches; leave some slop. */
545# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
546#else
547# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
548#endif
549
550#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
551
552#define DEFAULT_CODE_GEN_BUFFER_SIZE \
553 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
554 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
555
556static inline size_t size_code_gen_buffer(size_t tb_size)
557{
558 /* Size the buffer. */
559 if (tb_size == 0) {
560#ifdef USE_STATIC_CODE_GEN_BUFFER
561 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
562#else
563 /* ??? Needs adjustments. */
564 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
565 static buffer, we could size this on RESERVED_VA, on the text
566 segment size of the executable, or continue to use the default. */
567 tb_size = (unsigned long)(ram_size / 4);
568#endif
569 }
570 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
571 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
572 }
573 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
574 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
575 }
576 tcg_ctx.code_gen_buffer_size = tb_size;
577 return tb_size;
578}
579
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100580#ifdef USE_STATIC_CODE_GEN_BUFFER
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100581static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
David 'Digit' Turner975bba82014-02-17 23:33:29 +0100582 __attribute__((aligned(CODE_GEN_ALIGN)));
583
584static inline void *alloc_code_gen_buffer(void)
585{
586 map_exec(static_code_gen_buffer, tcg_ctx.code_gen_buffer_size);
587 return static_code_gen_buffer;
588}
589#elif defined(USE_MMAP)
590static inline void *alloc_code_gen_buffer(void)
591{
592 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
593 uintptr_t start = 0;
594 void *buf;
595
596 /* Constrain the position of the buffer based on the host cpu.
597 Note that these addresses are chosen in concert with the
598 addresses assigned in the relevant linker script file. */
599# if defined(__PIE__) || defined(__PIC__)
600 /* Don't bother setting a preferred location if we're building
601 a position-independent executable. We're more likely to get
602 an address near the main executable if we let the kernel
603 choose the address. */
604# elif defined(__x86_64__) && defined(MAP_32BIT)
605 /* Force the memory down into low memory with the executable.
606 Leave the choice of exact location with the kernel. */
607 flags |= MAP_32BIT;
608 /* Cannot expect to map more than 800MB in low memory. */
609 if (tcg_ctx.code_gen_buffer_size > 800u * 1024 * 1024) {
610 tcg_ctx.code_gen_buffer_size = 800u * 1024 * 1024;
611 }
612# elif defined(__sparc__)
613 start = 0x40000000ul;
614# elif defined(__s390x__)
615 start = 0x90000000ul;
616# endif
617
618 buf = mmap((void *)start, tcg_ctx.code_gen_buffer_size,
619 PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
620 return buf == MAP_FAILED ? NULL : buf;
621}
622#else
623static inline void *alloc_code_gen_buffer(void)
624{
625 void *buf = g_malloc(tcg_ctx.code_gen_buffer_size);
626
627 if (buf) {
628 map_exec(buf, tcg_ctx.code_gen_buffer_size);
629 }
630 return buf;
631}
632#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100633
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100634static inline void code_gen_alloc(size_t tb_size)
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100635{
David 'Digit' Turner975bba82014-02-17 23:33:29 +0100636 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
637 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
638 if (tcg_ctx.code_gen_buffer == NULL) {
639 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
640 exit(1);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100641 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100642
David 'Digit' Turner975bba82014-02-17 23:33:29 +0100643 qemu_madvise(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size,
644 QEMU_MADV_HUGEPAGE);
645
646 /* Steal room for the prologue at the end of the buffer. This ensures
647 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
648 from TB's to the prologue are going to be in range. It also means
649 that we don't need to mark (additional) portions of the data segment
650 as executable. */
651 tcg_ctx.code_gen_prologue = tcg_ctx.code_gen_buffer +
652 tcg_ctx.code_gen_buffer_size - 1024;
653 tcg_ctx.code_gen_buffer_size -= 1024;
654
655 tcg_ctx.code_gen_buffer_max_size = tcg_ctx.code_gen_buffer_size -
656 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
657 tcg_ctx.code_gen_max_blocks = tcg_ctx.code_gen_buffer_size /
658 CODE_GEN_AVG_BLOCK_SIZE;
659 tcg_ctx.tb_ctx.tbs =
660 g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100661}
662
663/* Must be called before using the QEMU cpus. 'tb_size' is the size
664 (in bytes) allocated to the translation buffer. Zero means default
665 size. */
666void tcg_exec_init(unsigned long tb_size)
667{
668 cpu_gen_init();
669 code_gen_alloc(tb_size);
David 'Digit' Turner975bba82014-02-17 23:33:29 +0100670 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100671 page_init();
672#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
673 /* There's no guest base to take into account, so go ahead and
674 initialize the prologue now. */
675 tcg_prologue_init(&tcg_ctx);
676#endif
677}
678
679bool tcg_enabled(void)
680{
David 'Digit' Turner975bba82014-02-17 23:33:29 +0100681 return tcg_ctx.code_gen_buffer != NULL;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100682}
683
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100684/* Allocate a new translation block. Flush the translation buffer if
685 too many translation blocks or too much generated code. */
686static TranslationBlock *tb_alloc(target_ulong pc)
687{
688 TranslationBlock *tb;
689
690 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks ||
691 (tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) >=
692 tcg_ctx.code_gen_buffer_max_size) {
693 return NULL;
694 }
695 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
696 tb->pc = pc;
697 tb->cflags = 0;
David 'Digit' Turner5bb450e2014-03-14 17:19:45 +0100698#ifdef CONFIG_ANDROID_MEMCHECK
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100699 tb->tpc2gpc = NULL;
700 tb->tpc2gpc_pairs = 0;
David 'Digit' Turner5bb450e2014-03-14 17:19:45 +0100701#endif // CONFIG_ANDROID_MEMCHECK
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100702 return tb;
703}
704
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100705void tb_free(TranslationBlock *tb)
706{
707 /* In practice this is mostly used for single use temporary TB
708 Ignore the hard cases and just back up if this TB happens to
709 be the last one generated. */
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100710 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
711 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
David 'Digit' Turner975bba82014-02-17 23:33:29 +0100712 tcg_ctx.code_gen_ptr = tb->tc_ptr;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100713 tcg_ctx.tb_ctx.nb_tbs--;
714 }
715}
716
717static inline void invalidate_page_bitmap(PageDesc *p)
718{
719 if (p->code_bitmap) {
720 g_free(p->code_bitmap);
721 p->code_bitmap = NULL;
722 }
723 p->code_write_count = 0;
724}
725
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100726/* Set to NULL all the 'first_tb' fields in all PageDescs. */
727static void page_flush_tb_1(int level, void **lp)
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100728{
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100729 int i;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100730
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100731 if (*lp == NULL) {
732 return;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100733 }
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100734 if (level == 0) {
735 PageDesc *pd = *lp;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100736
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100737 for (i = 0; i < L2_SIZE; ++i) {
738 pd[i].first_tb = NULL;
739 invalidate_page_bitmap(pd + i);
740 }
741 } else {
742 void **pp = *lp;
743
744 for (i = 0; i < L2_SIZE; ++i) {
745 page_flush_tb_1(level - 1, pp + i);
746 }
747 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100748}
749
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100750static void page_flush_tb(void)
751{
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100752 int i;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100753
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100754 for (i = 0; i < V_L1_SIZE; i++) {
755 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100756 }
757}
758
759/* flush all the translation blocks */
760/* XXX: tb_flush is currently not thread safe */
761void tb_flush(CPUArchState *env1)
762{
David 'Digit' Turner66576782014-03-24 16:57:57 +0100763 CPUState *cpu;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100764#if defined(DEBUG_FLUSH)
765 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
David 'Digit' Turner975bba82014-02-17 23:33:29 +0100766 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100767 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
768 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
769 tcg_ctx.tb_ctx.nb_tbs : 0);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100770#endif
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100771 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
772 > tcg_ctx.code_gen_buffer_size) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100773 cpu_abort(env1, "Internal error: code buffer overflow\n");
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100774 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100775 tcg_ctx.tb_ctx.nb_tbs = 0;
776
David 'Digit' Turner66576782014-03-24 16:57:57 +0100777 CPU_FOREACH(cpu) {
778 CPUArchState *env = cpu->env_ptr;
David 'Digit' Turner5bb450e2014-03-14 17:19:45 +0100779#ifdef CONFIG_ANDROID_MEMCHECK
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100780 int tb_to_clean;
781 for (tb_to_clean = 0; tb_to_clean < TB_JMP_CACHE_SIZE; tb_to_clean++) {
782 if (env->tb_jmp_cache[tb_to_clean] != NULL &&
783 env->tb_jmp_cache[tb_to_clean]->tpc2gpc != NULL) {
784 g_free(env->tb_jmp_cache[tb_to_clean]->tpc2gpc);
785 env->tb_jmp_cache[tb_to_clean]->tpc2gpc = NULL;
786 env->tb_jmp_cache[tb_to_clean]->tpc2gpc_pairs = 0;
787 }
788 }
David 'Digit' Turner5bb450e2014-03-14 17:19:45 +0100789#endif // CONFIG_ANDROID_MEMCHECK
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100790 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
791 }
792
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100793 memset(tcg_ctx.tb_ctx.tb_phys_hash, 0,
794 CODE_GEN_PHYS_HASH_SIZE * sizeof(void *));
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100795 page_flush_tb();
796
David 'Digit' Turner975bba82014-02-17 23:33:29 +0100797 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100798 /* XXX: flush processor icache at this point if cache flush is
799 expensive */
800 tcg_ctx.tb_ctx.tb_flush_count++;
801}
802
803#ifdef DEBUG_TB_CHECK
804
805static void tb_invalidate_check(target_ulong address)
806{
807 TranslationBlock *tb;
808 int i;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100809
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100810 address &= TARGET_PAGE_MASK;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100811 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
812 for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100813 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
814 address >= tb->pc + tb->size)) {
815 printf("ERROR invalidate: address=" TARGET_FMT_lx
816 " PC=%08lx size=%04x\n",
817 address, (long)tb->pc, tb->size);
818 }
819 }
820 }
821}
822
823/* verify that all the pages have correct rights for code */
824static void tb_page_check(void)
825{
826 TranslationBlock *tb;
827 int i, flags1, flags2;
828
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100829 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
830 for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
831 tb = tb->phys_hash_next) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100832 flags1 = page_get_flags(tb->pc);
833 flags2 = page_get_flags(tb->pc + tb->size - 1);
834 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
835 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
836 (long)tb->pc, tb->size, flags1, flags2);
837 }
838 }
839 }
840}
841
842#endif
843
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100844static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100845{
846 TranslationBlock *tb1;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100847
848 for (;;) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100849 tb1 = *ptb;
850 if (tb1 == tb) {
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100851 *ptb = tb1->phys_hash_next;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100852 break;
853 }
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100854 ptb = &tb1->phys_hash_next;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100855 }
856}
857
858static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
859{
860 TranslationBlock *tb1;
861 unsigned int n1;
862
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100863 for (;;) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100864 tb1 = *ptb;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100865 n1 = (uintptr_t)tb1 & 3;
866 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100867 if (tb1 == tb) {
868 *ptb = tb1->page_next[n1];
869 break;
870 }
871 ptb = &tb1->page_next[n1];
872 }
873}
874
875static inline void tb_jmp_remove(TranslationBlock *tb, int n)
876{
877 TranslationBlock *tb1, **ptb;
878 unsigned int n1;
879
880 ptb = &tb->jmp_next[n];
881 tb1 = *ptb;
882 if (tb1) {
883 /* find tb(n) in circular list */
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100884 for (;;) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100885 tb1 = *ptb;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100886 n1 = (uintptr_t)tb1 & 3;
887 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
888 if (n1 == n && tb1 == tb) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100889 break;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100890 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100891 if (n1 == 2) {
892 ptb = &tb1->jmp_first;
893 } else {
894 ptb = &tb1->jmp_next[n1];
895 }
896 }
897 /* now we can suppress tb(n) from the list */
898 *ptb = tb->jmp_next[n];
899
900 tb->jmp_next[n] = NULL;
901 }
902}
903
904/* reset the jump entry 'n' of a TB so that it is not chained to
905 another TB */
906static inline void tb_reset_jump(TranslationBlock *tb, int n)
907{
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100908 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100909}
910
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100911/* invalidate one TB */
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100912void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
913{
David 'Digit' Turner66576782014-03-24 16:57:57 +0100914 CPUState *cpu;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100915 PageDesc *p;
916 unsigned int h, n1;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100917 tb_page_addr_t phys_pc;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100918 TranslationBlock *tb1, *tb2;
919
920 /* remove the TB from the hash list */
921 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
922 h = tb_phys_hash_func(phys_pc);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100923 tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100924
925 /* remove the TB from the page list */
926 if (tb->page_addr[0] != page_addr) {
927 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
928 tb_page_remove(&p->first_tb, tb);
929 invalidate_page_bitmap(p);
930 }
931 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
932 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
933 tb_page_remove(&p->first_tb, tb);
934 invalidate_page_bitmap(p);
935 }
936
937 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
938
939 /* remove the TB from the hash list */
940 h = tb_jmp_cache_hash_func(tb->pc);
David 'Digit' Turner66576782014-03-24 16:57:57 +0100941 CPU_FOREACH(cpu) {
942 CPUArchState *env = cpu->env_ptr;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100943 if (env->tb_jmp_cache[h] == tb) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100944 env->tb_jmp_cache[h] = NULL;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100945 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100946 }
947
948 /* suppress this TB from the two jump lists */
949 tb_jmp_remove(tb, 0);
950 tb_jmp_remove(tb, 1);
951
952 /* suppress any remaining jumps to this TB */
953 tb1 = tb->jmp_first;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100954 for (;;) {
955 n1 = (uintptr_t)tb1 & 3;
956 if (n1 == 2) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100957 break;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100958 }
959 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100960 tb2 = tb1->jmp_next[n1];
961 tb_reset_jump(tb1, n1);
962 tb1->jmp_next[n1] = NULL;
963 tb1 = tb2;
964 }
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +0100965 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100966
David 'Digit' Turner5bb450e2014-03-14 17:19:45 +0100967#ifdef CONFIG_ANDROID_MEMCHECK
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100968 if (tb->tpc2gpc != NULL) {
969 g_free(tb->tpc2gpc);
970 tb->tpc2gpc = NULL;
971 tb->tpc2gpc_pairs = 0;
972 }
David 'Digit' Turner5bb450e2014-03-14 17:19:45 +0100973#endif // CONFIG_ANDROID_MEMCHECK
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +0100974
975 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
976}
977
978static inline void set_bits(uint8_t *tab, int start, int len)
979{
980 int end, mask, end1;
981
982 end = start + len;
983 tab += start >> 3;
984 mask = 0xff << (start & 7);
985 if ((start & ~7) == (end & ~7)) {
986 if (start < end) {
987 mask &= ~(0xff << (end & 7));
988 *tab |= mask;
989 }
990 } else {
991 *tab++ |= mask;
992 start = (start + 8) & ~7;
993 end1 = end & ~7;
994 while (start < end1) {
995 *tab++ = 0xff;
996 start += 8;
997 }
998 if (start < end) {
999 mask = ~(0xff << (end & 7));
1000 *tab |= mask;
1001 }
1002 }
1003}
1004
1005static void build_page_bitmap(PageDesc *p)
1006{
1007 int n, tb_start, tb_end;
1008 TranslationBlock *tb;
1009
1010 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
1011
1012 tb = p->first_tb;
1013 while (tb != NULL) {
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001014 n = (uintptr_t)tb & 3;
1015 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001016 /* NOTE: this is subtle as a TB may span two physical pages */
1017 if (n == 0) {
1018 /* NOTE: tb_end may be after the end of the page, but
1019 it is not a problem */
1020 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1021 tb_end = tb_start + tb->size;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001022 if (tb_end > TARGET_PAGE_SIZE) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001023 tb_end = TARGET_PAGE_SIZE;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001024 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001025 } else {
1026 tb_start = 0;
1027 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1028 }
1029 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1030 tb = tb->page_next[n];
1031 }
1032}
1033
1034TranslationBlock *tb_gen_code(CPUArchState *env,
1035 target_ulong pc, target_ulong cs_base,
1036 int flags, int cflags)
1037{
1038 TranslationBlock *tb;
1039 uint8_t *tc_ptr;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001040 tb_page_addr_t phys_pc, phys_page2;
1041 target_ulong virt_page2;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001042 int code_gen_size;
1043
1044 phys_pc = get_page_addr_code(env, pc);
1045 tb = tb_alloc(pc);
1046 if (!tb) {
1047 /* flush must be done */
1048 tb_flush(env);
1049 /* cannot fail at this point */
1050 tb = tb_alloc(pc);
1051 /* Don't forget to invalidate previous TB info. */
1052 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
1053 }
David 'Digit' Turner975bba82014-02-17 23:33:29 +01001054 tc_ptr = tcg_ctx.code_gen_ptr;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001055 tb->tc_ptr = tc_ptr;
1056 tb->cs_base = cs_base;
1057 tb->flags = flags;
1058 tb->cflags = cflags;
1059 cpu_gen_code(env, tb, &code_gen_size);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001060 tcg_ctx.code_gen_ptr = (void *)(((uintptr_t)tcg_ctx.code_gen_ptr +
1061 code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001062
1063 /* check next page if needed */
1064 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1065 phys_page2 = -1;
1066 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1067 phys_page2 = get_page_addr_code(env, virt_page2);
1068 }
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001069 tb_link_page(tb, phys_pc, phys_page2);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001070 return tb;
1071}
1072
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001073/*
1074 * Invalidate all TBs which intersect with the target physical address range
1075 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1076 * 'is_cpu_write_access' should be true if called from a real cpu write
1077 * access: the virtual CPU will exit the current TB if code is modified inside
1078 * this TB.
1079 */
1080void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1081 int is_cpu_write_access)
1082{
1083 while (start < end) {
1084 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1085 start &= TARGET_PAGE_MASK;
1086 start += TARGET_PAGE_SIZE;
1087 }
1088}
1089
1090/*
1091 * Invalidate all TBs which intersect with the target physical address range
1092 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1093 * 'is_cpu_write_access' should be true if called from a real cpu write
1094 * access: the virtual CPU will exit the current TB if code is modified inside
1095 * this TB.
1096 */
1097void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001098 int is_cpu_write_access)
1099{
1100 TranslationBlock *tb, *tb_next, *saved_tb;
David 'Digit' Turner66576782014-03-24 16:57:57 +01001101 CPUState *cpu = current_cpu;
1102 CPUArchState *env = cpu->env_ptr;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001103 tb_page_addr_t tb_start, tb_end;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001104 PageDesc *p;
1105 int n;
1106#ifdef TARGET_HAS_PRECISE_SMC
1107 int current_tb_not_found = is_cpu_write_access;
1108 TranslationBlock *current_tb = NULL;
1109 int current_tb_modified = 0;
1110 target_ulong current_pc = 0;
1111 target_ulong current_cs_base = 0;
1112 int current_flags = 0;
1113#endif /* TARGET_HAS_PRECISE_SMC */
1114
1115 p = page_find(start >> TARGET_PAGE_BITS);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001116 if (!p) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001117 return;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001118 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001119 if (!p->code_bitmap &&
1120 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1121 is_cpu_write_access) {
1122 /* build code bitmap */
1123 build_page_bitmap(p);
1124 }
1125
1126 /* we remove all the TBs in the range [start, end[ */
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001127 /* XXX: see if in some cases it could be faster to invalidate all
1128 the code */
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001129 tb = p->first_tb;
1130 while (tb != NULL) {
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001131 n = (uintptr_t)tb & 3;
1132 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001133 tb_next = tb->page_next[n];
1134 /* NOTE: this is subtle as a TB may span two physical pages */
1135 if (n == 0) {
1136 /* NOTE: tb_end may be after the end of the page, but
1137 it is not a problem */
1138 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1139 tb_end = tb_start + tb->size;
1140 } else {
1141 tb_start = tb->page_addr[1];
1142 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1143 }
1144 if (!(tb_end <= start || tb_start >= end)) {
1145#ifdef TARGET_HAS_PRECISE_SMC
1146 if (current_tb_not_found) {
1147 current_tb_not_found = 0;
1148 current_tb = NULL;
1149 if (env->mem_io_pc) {
1150 /* now we have a real cpu fault */
1151 current_tb = tb_find_pc(env->mem_io_pc);
1152 }
1153 }
1154 if (current_tb == tb &&
1155 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1156 /* If we are modifying the current TB, we must stop
1157 its execution. We could be more precise by checking
1158 that the modification is after the current PC, but it
1159 would require a specialized function to partially
1160 restore the CPU state */
1161
1162 current_tb_modified = 1;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001163 cpu_restore_state_from_tb(current_tb, env, env->mem_io_pc);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001164 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1165 &current_flags);
1166 }
1167#endif /* TARGET_HAS_PRECISE_SMC */
1168 /* we need to do that to handle the case where a signal
1169 occurs while doing tb_phys_invalidate() */
1170 saved_tb = NULL;
1171 if (env) {
1172 saved_tb = env->current_tb;
1173 env->current_tb = NULL;
1174 }
1175 tb_phys_invalidate(tb, -1);
1176 if (env) {
1177 env->current_tb = saved_tb;
David 'Digit' Turner66576782014-03-24 16:57:57 +01001178 if (cpu->interrupt_request && env->current_tb) {
1179 cpu_interrupt(env, cpu->interrupt_request);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001180 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001181 }
1182 }
1183 tb = tb_next;
1184 }
1185#if !defined(CONFIG_USER_ONLY)
1186 /* if no code remaining, no need to continue to use slow writes */
1187 if (!p->first_tb) {
1188 invalidate_page_bitmap(p);
1189 if (is_cpu_write_access) {
1190 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1191 }
1192 }
1193#endif
1194#ifdef TARGET_HAS_PRECISE_SMC
1195 if (current_tb_modified) {
1196 /* we generate a block containing just the instruction
1197 modifying the memory. It will ensure that it cannot modify
1198 itself */
1199 env->current_tb = NULL;
1200 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1201 cpu_resume_from_signal(env, NULL);
1202 }
1203#endif
1204}
1205
1206/* len must be <= 8 and start must be a multiple of len */
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001207void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001208{
1209 PageDesc *p;
1210 int offset, b;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001211
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001212#if 0
1213 if (1) {
1214 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1215 cpu_single_env->mem_io_vaddr, len,
1216 cpu_single_env->eip,
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001217 cpu_single_env->eip +
1218 (intptr_t)cpu_single_env->segs[R_CS].base);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001219 }
1220#endif
1221 p = page_find(start >> TARGET_PAGE_BITS);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001222 if (!p) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001223 return;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001224 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001225 if (p->code_bitmap) {
1226 offset = start & ~TARGET_PAGE_MASK;
1227 b = p->code_bitmap[offset >> 3] >> (offset & 7);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001228 if (b & ((1 << len) - 1)) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001229 goto do_invalidate;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001230 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001231 } else {
1232 do_invalidate:
1233 tb_invalidate_phys_page_range(start, start + len, 1);
1234 }
1235}
1236
1237void tb_invalidate_phys_page_fast0(hwaddr start, int len) {
1238 tb_invalidate_phys_page_fast(start, len);
1239}
1240
1241#if !defined(CONFIG_SOFTMMU)
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001242static void tb_invalidate_phys_page(tb_page_addr_t addr,
1243 uintptr_t pc, void *puc,
1244 bool locked)
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001245{
1246 TranslationBlock *tb;
1247 PageDesc *p;
1248 int n;
1249#ifdef TARGET_HAS_PRECISE_SMC
1250 TranslationBlock *current_tb = NULL;
1251 CPUArchState *env = cpu_single_env;
1252 int current_tb_modified = 0;
1253 target_ulong current_pc = 0;
1254 target_ulong current_cs_base = 0;
1255 int current_flags = 0;
1256#endif
1257
1258 addr &= TARGET_PAGE_MASK;
1259 p = page_find(addr >> TARGET_PAGE_BITS);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001260 if (!p) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001261 return;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001262 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001263 tb = p->first_tb;
1264#ifdef TARGET_HAS_PRECISE_SMC
1265 if (tb && pc != 0) {
1266 current_tb = tb_find_pc(pc);
1267 }
1268#endif
1269 while (tb != NULL) {
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001270 n = (uintptr_t)tb & 3;
1271 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001272#ifdef TARGET_HAS_PRECISE_SMC
1273 if (current_tb == tb &&
1274 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1275 /* If we are modifying the current TB, we must stop
1276 its execution. We could be more precise by checking
1277 that the modification is after the current PC, but it
1278 would require a specialized function to partially
1279 restore the CPU state */
1280
1281 current_tb_modified = 1;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001282 cpu_restore_state_from_tb(current_tb, env, pc);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001283 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1284 &current_flags);
1285 }
1286#endif /* TARGET_HAS_PRECISE_SMC */
1287 tb_phys_invalidate(tb, addr);
1288 tb = tb->page_next[n];
1289 }
1290 p->first_tb = NULL;
1291#ifdef TARGET_HAS_PRECISE_SMC
1292 if (current_tb_modified) {
1293 /* we generate a block containing just the instruction
1294 modifying the memory. It will ensure that it cannot modify
1295 itself */
1296 env->current_tb = NULL;
1297 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001298 if (locked) {
1299 mmap_unlock();
1300 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001301 cpu_resume_from_signal(env, puc);
1302 }
1303#endif
1304}
1305#endif
1306
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001307/* add the tb in the target page and protect it if necessary */
1308static inline void tb_alloc_page(TranslationBlock *tb,
1309 unsigned int n, tb_page_addr_t page_addr)
1310{
1311 PageDesc *p;
1312#ifndef CONFIG_USER_ONLY
1313 bool page_already_protected;
1314#endif
1315
1316 tb->page_addr[n] = page_addr;
1317 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1318 tb->page_next[n] = p->first_tb;
1319#ifndef CONFIG_USER_ONLY
1320 page_already_protected = p->first_tb != NULL;
1321#endif
1322 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1323 invalidate_page_bitmap(p);
1324
1325#if defined(TARGET_HAS_SMC) || 1
1326
1327#if defined(CONFIG_USER_ONLY)
1328 if (p->flags & PAGE_WRITE) {
1329 target_ulong addr;
1330 PageDesc *p2;
1331 int prot;
1332
1333 /* force the host page as non writable (writes will have a
1334 page fault + mprotect overhead) */
1335 page_addr &= qemu_host_page_mask;
1336 prot = 0;
1337 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1338 addr += TARGET_PAGE_SIZE) {
1339
1340 p2 = page_find(addr >> TARGET_PAGE_BITS);
1341 if (!p2) {
1342 continue;
1343 }
1344 prot |= p2->flags;
1345 p2->flags &= ~PAGE_WRITE;
1346 }
1347 mprotect(g2h(page_addr), qemu_host_page_size,
1348 (prot & PAGE_BITS) & ~PAGE_WRITE);
1349#ifdef DEBUG_TB_INVALIDATE
1350 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1351 page_addr);
1352#endif
1353 }
1354#else
1355 /* if some code is already present, then the pages are already
1356 protected. So we handle the case where only the first TB is
1357 allocated in a physical page */
1358 if (!page_already_protected) {
1359 tlb_protect_code(page_addr);
1360 }
1361#endif
1362
1363#endif /* TARGET_HAS_SMC */
1364}
1365
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001366/* add a new TB and link it to the physical page tables. phys_page2 is
1367 (-1) to indicate that only one page contains the TB. */
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001368static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1369 tb_page_addr_t phys_page2)
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001370{
1371 unsigned int h;
1372 TranslationBlock **ptb;
1373
1374 /* Grab the mmap lock to stop another thread invalidating this TB
1375 before we are done. */
1376 mmap_lock();
1377 /* add in the physical hash table */
1378 h = tb_phys_hash_func(phys_pc);
1379 ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
1380 tb->phys_hash_next = *ptb;
1381 *ptb = tb;
1382
1383 /* add in the page list */
1384 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001385 if (phys_page2 != -1) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001386 tb_alloc_page(tb, 1, phys_page2);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001387 } else {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001388 tb->page_addr[1] = -1;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001389 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001390
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001391 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001392 tb->jmp_next[0] = NULL;
1393 tb->jmp_next[1] = NULL;
1394
1395 /* init original jump addresses */
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001396 if (tb->tb_next_offset[0] != 0xffff) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001397 tb_reset_jump(tb, 0);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001398 }
1399 if (tb->tb_next_offset[1] != 0xffff) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001400 tb_reset_jump(tb, 1);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001401 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001402
1403#ifdef DEBUG_TB_CHECK
1404 tb_page_check();
1405#endif
1406 mmap_unlock();
1407}
1408
1409/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1410 tb[1].tc_ptr. Return NULL if not found */
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001411TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001412{
1413 int m_min, m_max, m;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001414 uintptr_t v;
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001415 TranslationBlock *tb;
1416
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001417 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001418 return NULL;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001419 }
1420 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1421 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001422 return NULL;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001423 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001424 /* binary search (cf Knuth) */
1425 m_min = 0;
1426 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1427 while (m_min <= m_max) {
1428 m = (m_min + m_max) >> 1;
1429 tb = &tcg_ctx.tb_ctx.tbs[m];
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001430 v = (uintptr_t)tb->tc_ptr;
1431 if (v == tc_ptr) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001432 return tb;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001433 } else if (tc_ptr < v) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001434 m_max = m - 1;
1435 } else {
1436 m_min = m + 1;
1437 }
1438 }
1439 return &tcg_ctx.tb_ctx.tbs[m_max];
1440}
1441
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001442#ifndef CONFIG_ANDROID
1443#if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY)
1444void tb_invalidate_phys_addr(hwaddr addr)
1445{
1446 ram_addr_t ram_addr;
1447 MemoryRegion *mr;
1448 hwaddr l = 1;
1449
1450 mr = address_space_translate(&address_space_memory, addr, &addr, &l, false);
1451 if (!(memory_region_is_ram(mr)
1452 || memory_region_is_romd(mr))) {
1453 return;
1454 }
1455 ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
1456 + addr;
1457 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1458}
1459#endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */
1460
1461void tb_check_watchpoint(CPUArchState *env)
1462{
1463 TranslationBlock *tb;
1464
1465 tb = tb_find_pc(env->mem_io_pc);
1466 if (!tb) {
1467 cpu_abort(env, "check_watchpoint: could not find TB for pc=%p",
1468 (void *)env->mem_io_pc);
1469 }
1470 cpu_restore_state_from_tb(tb, env, env->mem_io_pc);
1471 tb_phys_invalidate(tb, -1);
1472}
1473#endif // !CONFIG_ANDROID
1474
1475#ifndef CONFIG_USER_ONLY
1476/* mask must never be zero, except for A20 change call */
David 'Digit' Turner66576782014-03-24 16:57:57 +01001477void cpu_interrupt(CPUArchState *env, int mask)
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001478{
David 'Digit' Turner66576782014-03-24 16:57:57 +01001479 CPUState *cpu = ENV_GET_CPU(env);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001480 int old_mask;
1481
1482 old_mask = cpu->interrupt_request;
1483 cpu->interrupt_request |= mask;
1484
1485 /*
1486 * If called from iothread context, wake the target cpu in
1487 * case its halted.
1488 */
1489 if (!qemu_cpu_self(cpu)) {
1490 qemu_cpu_kick(cpu);
1491 return;
1492 }
1493
1494 if (use_icount) {
1495 env->icount_decr.u16.high = 0xffff;
1496 if (!can_do_io(env)
1497 && (mask & ~old_mask) != 0) {
1498 cpu_abort(env, "Raised interrupt while not in I/O function");
1499 }
1500 } else {
1501 // cpu->tcg_exit_req = 1;
1502 cpu_unlink_tb(env);
1503 }
1504}
1505
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001506static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1507{
1508 TranslationBlock *tb1, *tb_next, **ptb;
1509 unsigned int n1;
1510
1511 tb1 = tb->jmp_next[n];
1512 if (tb1 != NULL) {
1513 /* find head of list */
1514 for(;;) {
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001515 n1 = (uintptr_t)tb1 & 3;
1516 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001517 if (n1 == 2)
1518 break;
1519 tb1 = tb1->jmp_next[n1];
1520 }
1521 /* we are now sure now that tb jumps to tb1 */
1522 tb_next = tb1;
1523
1524 /* remove tb from the jmp_first list */
1525 ptb = &tb_next->jmp_first;
1526 for(;;) {
1527 tb1 = *ptb;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001528 n1 = (uintptr_t)tb1 & 3;
1529 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001530 if (n1 == n && tb1 == tb)
1531 break;
1532 ptb = &tb1->jmp_next[n1];
1533 }
1534 *ptb = tb->jmp_next[n];
1535 tb->jmp_next[n] = NULL;
1536
1537 /* suppress the jump to next tb in generated code */
1538 tb_reset_jump(tb, n);
1539
1540 /* suppress jumps in the tb on which we could have jumped */
1541 tb_reset_jump_recursive(tb_next);
1542 }
1543}
1544
1545void tb_reset_jump_recursive(TranslationBlock *tb)
1546{
1547 tb_reset_jump_recursive2(tb, 0);
1548 tb_reset_jump_recursive2(tb, 1);
1549}
1550
1551/* in deterministic execution mode, instructions doing device I/Os
1552 must be at the end of the TB */
1553void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
1554{
1555 TranslationBlock *tb;
1556 uint32_t n, cflags;
1557 target_ulong pc, cs_base;
1558 uint64_t flags;
1559
1560 tb = tb_find_pc(retaddr);
1561 if (!tb) {
1562 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001563 (void *)retaddr);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001564 }
1565 n = env->icount_decr.u16.low + tb->icount;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001566 cpu_restore_state_from_tb(tb, env, retaddr);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001567 /* Calculate how many instructions had been executed before the fault
1568 occurred. */
1569 n = n - env->icount_decr.u16.low;
1570 /* Generate a new TB ending on the I/O insn. */
1571 n++;
1572 /* On MIPS and SH, delay slot instructions can only be restarted if
1573 they were already the first instruction in the TB. If this is not
1574 the first instruction in a TB then re-execute the preceding
1575 branch. */
1576#if defined(TARGET_MIPS)
1577 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1578 env->active_tc.PC -= 4;
1579 env->icount_decr.u16.low++;
1580 env->hflags &= ~MIPS_HFLAG_BMASK;
1581 }
1582#elif defined(TARGET_SH4)
1583 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1584 && n > 1) {
1585 env->pc -= 2;
1586 env->icount_decr.u16.low++;
1587 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1588 }
1589#endif
1590 /* This should never happen. */
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001591 if (n > CF_COUNT_MASK) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001592 cpu_abort(env, "TB too big during recompile");
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001593 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001594
1595 cflags = n | CF_LAST_IO;
1596 pc = tb->pc;
1597 cs_base = tb->cs_base;
1598 flags = tb->flags;
1599 tb_phys_invalidate(tb, -1);
1600 /* FIXME: In theory this could raise an exception. In practice
1601 we have already translated the block once so it's probably ok. */
1602 tb_gen_code(env, pc, cs_base, flags, cflags);
1603 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1604 the first in the TB) then we end up generating a whole new TB and
1605 repeating the fault, which is horribly inefficient.
1606 Better would be to execute just this insn uncached, or generate a
1607 second new TB. */
1608 cpu_resume_from_signal(env, NULL);
1609}
1610
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001611void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
1612{
1613 unsigned int i;
1614
1615 /* Discard jump cache entries for any tb which might potentially
1616 overlap the flushed page. */
1617 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1618 memset(&env->tb_jmp_cache[i], 0,
1619 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1620
1621 i = tb_jmp_cache_hash_page(addr);
1622 memset(&env->tb_jmp_cache[i], 0,
1623 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1624}
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001625
1626void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1627{
1628 int i, target_code_size, max_target_code_size;
1629 int direct_jmp_count, direct_jmp2_count, cross_page;
1630 TranslationBlock *tb;
1631
1632 target_code_size = 0;
1633 max_target_code_size = 0;
1634 cross_page = 0;
1635 direct_jmp_count = 0;
1636 direct_jmp2_count = 0;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001637 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001638 tb = &tcg_ctx.tb_ctx.tbs[i];
1639 target_code_size += tb->size;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001640 if (tb->size > max_target_code_size) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001641 max_target_code_size = tb->size;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001642 }
1643 if (tb->page_addr[1] != -1) {
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001644 cross_page++;
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001645 }
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001646 if (tb->tb_next_offset[0] != 0xffff) {
1647 direct_jmp_count++;
1648 if (tb->tb_next_offset[1] != 0xffff) {
1649 direct_jmp2_count++;
1650 }
1651 }
1652 }
1653 /* XXX: avoid using doubles ? */
1654 cpu_fprintf(f, "Translation buffer state:\n");
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001655 cpu_fprintf(f, "gen code size %td/%zd\n",
David 'Digit' Turner975bba82014-02-17 23:33:29 +01001656 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001657 tcg_ctx.code_gen_buffer_max_size);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001658 cpu_fprintf(f, "TB count %d/%d\n",
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001659 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001660 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001661 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1662 tcg_ctx.tb_ctx.nb_tbs : 0,
1663 max_target_code_size);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001664 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001665 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1666 tcg_ctx.code_gen_buffer) /
1667 tcg_ctx.tb_ctx.nb_tbs : 0,
1668 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1669 tcg_ctx.code_gen_buffer) /
1670 target_code_size : 0);
1671 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1672 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1673 tcg_ctx.tb_ctx.nb_tbs : 0);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001674 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1675 direct_jmp_count,
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001676 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1677 tcg_ctx.tb_ctx.nb_tbs : 0,
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001678 direct_jmp2_count,
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001679 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1680 tcg_ctx.tb_ctx.nb_tbs : 0);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001681 cpu_fprintf(f, "\nStatistics:\n");
1682 cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001683 cpu_fprintf(f, "TB invalidate count %d\n",
1684 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001685 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1686 tcg_dump_info(f, cpu_fprintf);
1687}
1688
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001689#else /* CONFIG_USER_ONLY */
1690
1691void cpu_interrupt(CPUState *cpu, int mask)
1692{
1693 cpu->interrupt_request |= mask;
1694 cpu->tcg_exit_req = 1;
1695}
1696
1697/*
1698 * Walks guest process memory "regions" one by one
1699 * and calls callback function 'fn' for each region.
1700 */
1701struct walk_memory_regions_data {
1702 walk_memory_regions_fn fn;
1703 void *priv;
1704 uintptr_t start;
1705 int prot;
1706};
1707
1708static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1709 abi_ulong end, int new_prot)
1710{
1711 if (data->start != -1ul) {
1712 int rc = data->fn(data->priv, data->start, end, data->prot);
1713 if (rc != 0) {
1714 return rc;
1715 }
1716 }
1717
1718 data->start = (new_prot ? end : -1ul);
1719 data->prot = new_prot;
1720
1721 return 0;
1722}
1723
1724static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1725 abi_ulong base, int level, void **lp)
1726{
1727 abi_ulong pa;
1728 int i, rc;
1729
1730 if (*lp == NULL) {
1731 return walk_memory_regions_end(data, base, 0);
1732 }
1733
1734 if (level == 0) {
1735 PageDesc *pd = *lp;
1736
1737 for (i = 0; i < L2_SIZE; ++i) {
1738 int prot = pd[i].flags;
1739
1740 pa = base | (i << TARGET_PAGE_BITS);
1741 if (prot != data->prot) {
1742 rc = walk_memory_regions_end(data, pa, prot);
1743 if (rc != 0) {
1744 return rc;
1745 }
1746 }
1747 }
1748 } else {
1749 void **pp = *lp;
1750
1751 for (i = 0; i < L2_SIZE; ++i) {
1752 pa = base | ((abi_ulong)i <<
1753 (TARGET_PAGE_BITS + L2_BITS * level));
1754 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1755 if (rc != 0) {
1756 return rc;
1757 }
1758 }
1759 }
1760
1761 return 0;
1762}
1763
1764int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1765{
1766 struct walk_memory_regions_data data;
1767 uintptr_t i;
1768
1769 data.fn = fn;
1770 data.priv = priv;
1771 data.start = -1ul;
1772 data.prot = 0;
1773
1774 for (i = 0; i < V_L1_SIZE; i++) {
1775 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
1776 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
1777
1778 if (rc != 0) {
1779 return rc;
1780 }
1781 }
1782
1783 return walk_memory_regions_end(&data, 0, 0);
1784}
1785
1786static int dump_region(void *priv, abi_ulong start,
1787 abi_ulong end, unsigned long prot)
1788{
1789 FILE *f = (FILE *)priv;
1790
1791 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
1792 " "TARGET_ABI_FMT_lx" %c%c%c\n",
1793 start, end, end - start,
1794 ((prot & PAGE_READ) ? 'r' : '-'),
1795 ((prot & PAGE_WRITE) ? 'w' : '-'),
1796 ((prot & PAGE_EXEC) ? 'x' : '-'));
1797
1798 return 0;
1799}
1800
1801/* dump memory mappings */
1802void page_dump(FILE *f)
1803{
1804 const int length = sizeof(abi_ulong) * 2;
1805 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
1806 length, "start", length, "end", length, "size", "prot");
1807 walk_memory_regions(f, dump_region);
1808}
1809
1810int page_get_flags(target_ulong address)
1811{
1812 PageDesc *p;
1813
1814 p = page_find(address >> TARGET_PAGE_BITS);
1815 if (!p) {
1816 return 0;
1817 }
1818 return p->flags;
1819}
1820
1821/* Modify the flags of a page and invalidate the code if necessary.
1822 The flag PAGE_WRITE_ORG is positioned automatically depending
1823 on PAGE_WRITE. The mmap_lock should already be held. */
1824void page_set_flags(target_ulong start, target_ulong end, int flags)
1825{
1826 target_ulong addr, len;
1827
1828 /* This function should never be called with addresses outside the
1829 guest address space. If this assert fires, it probably indicates
1830 a missing call to h2g_valid. */
1831#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1832 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1833#endif
1834 assert(start < end);
1835
1836 start = start & TARGET_PAGE_MASK;
1837 end = TARGET_PAGE_ALIGN(end);
1838
1839 if (flags & PAGE_WRITE) {
1840 flags |= PAGE_WRITE_ORG;
1841 }
1842
1843 for (addr = start, len = end - start;
1844 len != 0;
1845 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1846 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1847
1848 /* If the write protection bit is set, then we invalidate
1849 the code inside. */
1850 if (!(p->flags & PAGE_WRITE) &&
1851 (flags & PAGE_WRITE) &&
1852 p->first_tb) {
1853 tb_invalidate_phys_page(addr, 0, NULL, false);
1854 }
1855 p->flags = flags;
1856 }
1857}
1858
1859int page_check_range(target_ulong start, target_ulong len, int flags)
1860{
1861 PageDesc *p;
1862 target_ulong end;
1863 target_ulong addr;
1864
1865 /* This function should never be called with addresses outside the
1866 guest address space. If this assert fires, it probably indicates
1867 a missing call to h2g_valid. */
1868#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1869 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
David 'Digit' Turnerff9a2b82014-02-17 22:31:24 +01001870#endif
1871
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001872 if (len == 0) {
1873 return 0;
1874 }
1875 if (start + len - 1 < start) {
1876 /* We've wrapped around. */
1877 return -1;
1878 }
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +01001879
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001880 /* must do before we loose bits in the next step */
1881 end = TARGET_PAGE_ALIGN(start + len);
1882 start = start & TARGET_PAGE_MASK;
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +01001883
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001884 for (addr = start, len = end - start;
1885 len != 0;
1886 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1887 p = page_find(addr >> TARGET_PAGE_BITS);
1888 if (!p) {
1889 return -1;
1890 }
1891 if (!(p->flags & PAGE_VALID)) {
1892 return -1;
1893 }
1894
1895 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1896 return -1;
1897 }
1898 if (flags & PAGE_WRITE) {
1899 if (!(p->flags & PAGE_WRITE_ORG)) {
1900 return -1;
1901 }
1902 /* unprotect the page if it was put read-only because it
1903 contains translated code */
1904 if (!(p->flags & PAGE_WRITE)) {
1905 if (!page_unprotect(addr, 0, NULL)) {
1906 return -1;
1907 }
1908 }
1909 return 0;
1910 }
1911 }
1912 return 0;
David 'Digit' Turner3dc53fc2014-01-17 01:23:40 +01001913}
David 'Digit' Turner3e0677d2014-03-07 15:01:06 +01001914
1915/* called from signal handler: invalidate the code and unprotect the
1916 page. Return TRUE if the fault was successfully handled. */
1917int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
1918{
1919 unsigned int prot;
1920 PageDesc *p;
1921 target_ulong host_start, host_end, addr;
1922
1923 /* Technically this isn't safe inside a signal handler. However we
1924 know this only ever happens in a synchronous SEGV handler, so in
1925 practice it seems to be ok. */
1926 mmap_lock();
1927
1928 p = page_find(address >> TARGET_PAGE_BITS);
1929 if (!p) {
1930 mmap_unlock();
1931 return 0;
1932 }
1933
1934 /* if the page was really writable, then we change its
1935 protection back to writable */
1936 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
1937 host_start = address & qemu_host_page_mask;
1938 host_end = host_start + qemu_host_page_size;
1939
1940 prot = 0;
1941 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
1942 p = page_find(addr >> TARGET_PAGE_BITS);
1943 p->flags |= PAGE_WRITE;
1944 prot |= p->flags;
1945
1946 /* and since the content will be modified, we must invalidate
1947 the corresponding translated code. */
1948 tb_invalidate_phys_page(addr, pc, puc, true);
1949#ifdef DEBUG_TB_CHECK
1950 tb_invalidate_check(addr);
1951#endif
1952 }
1953 mprotect((void *)g2h(host_start), qemu_host_page_size,
1954 prot & PAGE_BITS);
1955
1956 mmap_unlock();
1957 return 1;
1958 }
1959 mmap_unlock();
1960 return 0;
1961}
1962#endif /* CONFIG_USER_ONLY */