Create TBContext inside of TCGContext

This matches upstream.

Change-Id: I605dde69aab64dcec3a81875912a6e611ce0cf05
diff --git a/cpu-exec.c b/cpu-exec.c
index a3a13e9..0dafa44 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -136,11 +136,11 @@
     tb_invalidated_flag = 0;
 
     /* find translated block using physical mappings */
-    phys_pc = get_phys_addr_code(env, pc);
+    phys_pc = get_page_addr_code(env, pc);
     phys_page1 = phys_pc & TARGET_PAGE_MASK;
     phys_page2 = -1;
     h = tb_phys_hash_func(phys_pc);
-    ptb1 = &tb_phys_hash[h];
+    ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
     for(;;) {
         tb = *ptb1;
         if (!tb)
@@ -153,7 +153,7 @@
             if (tb->page_addr[1] != -1) {
                 virt_page2 = (pc & TARGET_PAGE_MASK) +
                     TARGET_PAGE_SIZE;
-                phys_page2 = get_phys_addr_code(env, virt_page2);
+                phys_page2 = get_page_addr_code(env, virt_page2);
                 if (tb->page_addr[1] == phys_page2)
                     goto found;
             } else {
@@ -170,8 +170,8 @@
     /* Move the last found TB to the head of the list */
     if (likely(*ptb1)) {
         *ptb1 = tb->phys_hash_next;
-        tb->phys_hash_next = tb_phys_hash[h];
-        tb_phys_hash[h] = tb;
+        tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
+        tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
     }
     /* we add the TB in the virtual pc hash table */
     env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
@@ -198,12 +198,9 @@
 
 static CPUDebugExcpHandler *debug_excp_handler;
 
-CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
+void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
 {
-    CPUDebugExcpHandler *old_handler = debug_excp_handler;
-
     debug_excp_handler = handler;
-    return old_handler;
 }
 
 static void cpu_handle_debug_exception(CPUOldState *env)
@@ -624,7 +621,7 @@
 #endif
                 }
 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
-                spin_lock(&tb_lock);
+                spin_lock(&tcg_ctx.tb_ctx.tb_lock);
                 tb = tb_find_fast();
                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
                    doing it in tb_find_slow */
@@ -646,7 +643,7 @@
                 if (next_tb != 0 && tb->page_addr[1] == -1) {
                     tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
                 }
-                spin_unlock(&tb_lock);
+                spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
 
                 /* cpu_interrupt might be called while translating the
                    TB, but before it is linked into a potentially
@@ -745,7 +742,7 @@
        where NIP != read address on PowerPC */
 #if 0
     target_ulong phys_addr;
-    phys_addr = get_phys_addr_code(env, start);
+    phys_addr = get_page_addr_code(env, start);
     tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
 #endif
 }
diff --git a/exec.c b/exec.c
index d204134..e77490a 100644
--- a/exec.c
+++ b/exec.c
@@ -59,10 +59,7 @@
 
 #define SMC_BITMAP_USE_THRESHOLD 10
 
-static TranslationBlock *tbs;
 int code_gen_max_blocks;
-TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
-static int nb_tbs;
 /* any access to the tbs or the page table must use this lock */
 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
 
@@ -163,10 +160,6 @@
 int loglevel;
 static int log_append = 0;
 
-/* statistics */
-static int tb_flush_count;
-static int tb_phys_invalidate_count;
-
 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
 typedef struct subpage_t {
     hwaddr base;
@@ -468,7 +461,7 @@
     code_gen_buffer_max_size = code_gen_buffer_size -
         code_gen_max_block_size();
     code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
-    tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
+    tcg_ctx.tb_ctx.tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
 }
 
 /* Must be called before using the QEMU cpus. 'tb_size' is the size
@@ -619,7 +612,7 @@
     if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
         cpu_abort(env1, "Internal error: code buffer overflow\n");
 
-    nb_tbs = 0;
+    tcg_ctx.tb_ctx.nb_tbs = 0;
 
     for(env = first_cpu; env != NULL; env = env->next_cpu) {
 #ifdef CONFIG_MEMCHECK
@@ -636,13 +629,13 @@
         memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
     }
 
-    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
+    memset (tcg_ctx.tb_ctx.tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
     page_flush_tb();
 
     code_gen_ptr = code_gen_buffer;
     /* XXX: flush processor icache at this point if cache flush is
        expensive */
-    tb_flush_count++;
+    tcg_ctx.tb_ctx.tb_flush_count++;
 }
 
 #ifdef DEBUG_TB_CHECK
@@ -762,7 +755,7 @@
     /* remove the TB from the hash list */
     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
     h = tb_phys_hash_func(phys_pc);
-    tb_remove(&tb_phys_hash[h], tb,
+    tb_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb,
               offsetof(TranslationBlock, phys_hash_next));
 
     /* remove the TB from the page list */
@@ -777,7 +770,7 @@
         invalidate_page_bitmap(p);
     }
 
-    tb_invalidated_flag = 1;
+    tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
 
     /* remove the TB from the hash list */
     h = tb_jmp_cache_hash_func(tb->pc);
@@ -812,7 +805,7 @@
     }
 #endif  // CONFIG_MEMCHECK
 
-    tb_phys_invalidate_count++;
+    tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
 }
 
 static inline void set_bits(uint8_t *tab, int start, int len)
@@ -879,7 +872,7 @@
     target_ulong phys_pc, phys_page2, virt_page2;
     int code_gen_size;
 
-    phys_pc = get_phys_addr_code(env, pc);
+    phys_pc = get_page_addr_code(env, pc);
     tb = tb_alloc(pc);
     if (!tb) {
         /* flush must be done */
@@ -887,7 +880,7 @@
         /* cannot fail at this point */
         tb = tb_alloc(pc);
         /* Don't forget to invalidate previous TB info.  */
-        tb_invalidated_flag = 1;
+        tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
     }
     tc_ptr = code_gen_ptr;
     tb->tc_ptr = tc_ptr;
@@ -901,7 +894,7 @@
     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
     phys_page2 = -1;
     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
-        phys_page2 = get_phys_addr_code(env, virt_page2);
+        phys_page2 = get_page_addr_code(env, virt_page2);
     }
     tb_link_phys(tb, phys_pc, phys_page2);
     return tb;
@@ -1166,10 +1159,10 @@
 {
     TranslationBlock *tb;
 
-    if (nb_tbs >= code_gen_max_blocks ||
+    if (tcg_ctx.tb_ctx.nb_tbs >= code_gen_max_blocks ||
         (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
         return NULL;
-    tb = &tbs[nb_tbs++];
+    tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
     tb->pc = pc;
     tb->cflags = 0;
 #ifdef CONFIG_MEMCHECK
@@ -1184,9 +1177,9 @@
     /* In practice this is mostly used for single use temporary TB
        Ignore the hard cases and just back up if this TB happens to
        be the last one generated.  */
-    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
+    if (tcg_ctx.tb_ctx.nb_tbs > 0 && tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
         code_gen_ptr = tb->tc_ptr;
-        nb_tbs--;
+        tcg_ctx.tb_ctx.nb_tbs--;
     }
 }
 
@@ -1203,7 +1196,7 @@
     mmap_lock();
     /* add in the physical hash table */
     h = tb_phys_hash_func(phys_pc);
-    ptb = &tb_phys_hash[h];
+    ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
     tb->phys_hash_next = *ptb;
     *ptb = tb;
 
@@ -1238,17 +1231,17 @@
     unsigned long v;
     TranslationBlock *tb;
 
-    if (nb_tbs <= 0)
+    if (tcg_ctx.tb_ctx.nb_tbs <= 0)
         return NULL;
     if (tc_ptr < (unsigned long)code_gen_buffer ||
         tc_ptr >= (unsigned long)code_gen_ptr)
         return NULL;
     /* binary search (cf Knuth) */
     m_min = 0;
-    m_max = nb_tbs - 1;
+    m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
     while (m_min <= m_max) {
         m = (m_min + m_max) >> 1;
-        tb = &tbs[m];
+        tb = &tcg_ctx.tb_ctx.tbs[m];
         v = (unsigned long)tb->tc_ptr;
         if (v == tc_ptr)
             return tb;
@@ -1258,7 +1251,7 @@
             m_min = m + 1;
         }
     }
-    return &tbs[m_max];
+    return &tcg_ctx.tb_ctx.tbs[m_max];
 }
 
 static void tb_reset_jump_recursive(TranslationBlock *tb);
@@ -3857,8 +3850,8 @@
     cross_page = 0;
     direct_jmp_count = 0;
     direct_jmp2_count = 0;
-    for(i = 0; i < nb_tbs; i++) {
-        tb = &tbs[i];
+    for(i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
+        tb = &tcg_ctx.tb_ctx.tbs[i];
         target_code_size += tb->size;
         if (tb->size > max_target_code_size)
             max_target_code_size = tb->size;
@@ -3876,24 +3869,24 @@
     cpu_fprintf(f, "gen code size       %td/%ld\n",
                 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
     cpu_fprintf(f, "TB count            %d/%d\n",
-                nb_tbs, code_gen_max_blocks);
+                tcg_ctx.tb_ctx.nb_tbs, code_gen_max_blocks);
     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
-                nb_tbs ? target_code_size / nb_tbs : 0,
+                tcg_ctx.tb_ctx.nb_tbs ? target_code_size / tcg_ctx.tb_ctx.nb_tbs : 0,
                 max_target_code_size);
     cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
-                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
+                tcg_ctx.tb_ctx.nb_tbs ? (code_gen_ptr - code_gen_buffer) / tcg_ctx.tb_ctx.nb_tbs : 0,
                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
             cross_page,
-            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
+            tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) / tcg_ctx.tb_ctx.nb_tbs : 0);
     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
                 direct_jmp_count,
-                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
+                tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) / tcg_ctx.tb_ctx.nb_tbs : 0,
                 direct_jmp2_count,
-                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
+                tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) / tcg_ctx.tb_ctx.nb_tbs : 0);
     cpu_fprintf(f, "\nStatistics:\n");
-    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
-    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
+    cpu_fprintf(f, "TB flush count      %d\n", tcg_ctx.tb_ctx.tb_flush_count);
+    cpu_fprintf(f, "TB invalidate count %d\n", tcg_ctx.tb_ctx.tb_phys_invalidate_count);
     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
     tcg_dump_info(f, cpu_fprintf);
 }
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index c1a1839..674581e 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -92,6 +92,7 @@
 void tb_invalidate_phys_page_range(hwaddr start, hwaddr end,
                                    int is_cpu_write_access);
 void tb_invalidate_page_range(target_ulong start, target_ulong end);
+#if !defined(CONFIG_USER_ONLY)
 void tlb_flush_page(CPUArchState *env, target_ulong addr);
 void tlb_flush(CPUArchState *env, int flush_global);
 int tlb_set_page_exec(CPUArchState *env, target_ulong vaddr,
@@ -100,6 +101,16 @@
 int tlb_set_page(CPUArchState *env1, target_ulong vaddr,
                  hwaddr paddr, int prot,
                  int mmu_idx, int is_softmmu);
+#else
+#error BOO
+static inline void tlb_flush_page(CPUArchState *env, target_ulong addr)
+{
+}
+
+static inline void tlb_flush(CPUArchState *env, int flush_global)
+{
+}
+#endif
 
 typedef struct PhysPageDesc {
     /* offset in host memory of the page + io_index in the low bits */
@@ -181,6 +192,25 @@
 #endif  // CONFIG_MEMCHECK
 };
 
+#include "exec/spinlock.h"
+
+typedef struct TBContext TBContext;
+
+struct TBContext {
+
+    TranslationBlock *tbs;
+    TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
+    int nb_tbs;
+    /* any access to the tbs or the page table must use this lock */
+    spinlock_t tb_lock;
+
+    /* statistics */
+    int tb_flush_count;
+    int tb_phys_invalidate_count;
+
+    int tb_invalidated_flag;
+};
+
 static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
 {
     target_ulong tmp;
@@ -262,7 +292,6 @@
                   target_ulong phys_pc, target_ulong phys_page2);
 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
 
-extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
 extern uint8_t *code_gen_ptr;
 extern int code_gen_max_blocks;
 
@@ -353,20 +382,39 @@
     }
 }
 
+/* GETRA is the true target of the return instruction that we'll execute,
+   defined here for simplicity of defining the follow-up macros.  */
+#if defined(CONFIG_TCG_INTERPRETER)
+extern uintptr_t tci_tb_ptr;
+# define GETRA() tci_tb_ptr
+#else
+# define GETRA() \
+    ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
+#endif
+
+/* The true return address will often point to a host insn that is part of
+   the next translated guest insn.  Adjust the address backward to point to
+   the middle of the call insn.  Subtracting one would do the job except for
+   several compressed mode architectures (arm, mips) which set the low bit
+   to indicate the compressed mode; subtracting two works around that.  It
+   is also the case that there are no host isas that contain a call insn
+   smaller than 4 bytes, so we don't worry about special-casing this.  */
+#if defined(CONFIG_TCG_INTERPRETER)
+# define GETPC_ADJ   0
+#else
+# define GETPC_ADJ   2
+#endif
+
+#if !defined(CONFIG_USER_ONLY)
+
+void phys_mem_set_alloc(void *(*alloc)(size_t));
+
 TranslationBlock *tb_find_pc(unsigned long pc_ptr);
 
 extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
 extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
 extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
 
-#include "exec/spinlock.h"
-
-extern spinlock_t tb_lock;
-
-extern int tb_invalidated_flag;
-
-#if !defined(CONFIG_USER_ONLY)
-
 void tlb_fill(target_ulong addr, int is_write, int mmu_idx,
               void *retaddr);
 
@@ -395,7 +443,7 @@
 #endif
 
 #if defined(CONFIG_USER_ONLY)
-static inline target_ulong get_phys_addr_code(CPUArchState *env1, target_ulong addr)
+static inline target_ulong get_page_addr_code(CPUArchState *env1, target_ulong addr)
 {
     return addr;
 }
@@ -403,7 +451,7 @@
 /* NOTE: this function can trigger an exception */
 /* NOTE2: the returned address is not exactly the physical address: it
    is the offset relative to phys_ram_base */
-static inline target_ulong get_phys_addr_code(CPUArchState *env1, target_ulong addr)
+static inline target_ulong get_page_addr_code(CPUArchState *env1, target_ulong addr)
 {
     int mmu_idx, page_index, pd;
     void *p;
@@ -430,7 +478,7 @@
 
 typedef void (CPUDebugExcpHandler)(CPUArchState *env);
 
-CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
+void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
 
 /* vl.c */
 extern int singlestep;
diff --git a/target-arm/translate-android.h b/target-arm/translate-android.h
index 61e934d..5479920 100644
--- a/target-arm/translate-android.h
+++ b/target-arm/translate-android.h
@@ -135,9 +135,9 @@
     if ((0x90000000 <= addr && addr <= 0xBFFFFFFF)) {
         /* Address belongs to a module that always loads at this fixed address.
          * So, we can keep this address in the global array. */
-        ret = addrarray_add(&ret_addresses, get_phys_addr_code(env, addr));
+        ret = addrarray_add(&ret_addresses, get_page_addr_code(env, addr));
     } else {
-        ret = addrarray_add(&ret_addresses, get_phys_addr_code(env, addr));
+        ret = addrarray_add(&ret_addresses, get_page_addr_code(env, addr));
     }
     assert(ret != 0);
 
@@ -153,10 +153,10 @@
          * code contains it. This inconsistency will lead to an immanent
          * segmentation fault.*/
         TranslationBlock* tb;
-        const target_ulong phys_pc = get_phys_addr_code(env, addr);
+        const target_ulong phys_pc = get_page_addr_code(env, addr);
         const target_ulong phys_page1 = phys_pc & TARGET_PAGE_MASK;
 
-        for(tb = tb_phys_hash[tb_phys_hash_func(phys_pc)]; tb != NULL;
+        for(tb = tcg_ctx.tb_ctx.tb_phys_hash[tb_phys_hash_func(phys_pc)]; tb != NULL;
             tb = tb->phys_hash_next) {
             if (tb->pc == addr && tb->page_addr[0] == phys_page1) {
                 tb_phys_invalidate(tb, -1);
@@ -175,9 +175,9 @@
 is_ret_address(CPUARMState* env, target_ulong addr)
 {
     if ((0x90000000 <= addr && addr <= 0xBFFFFFFF)) {
-        return addrarray_check(&ret_addresses, get_phys_addr_code(env, addr));
+        return addrarray_check(&ret_addresses, get_page_addr_code(env, addr));
     } else {
-        return addrarray_check(&ret_addresses, get_phys_addr_code(env, addr));
+        return addrarray_check(&ret_addresses, get_page_addr_code(env, addr));
     }
 }
 
diff --git a/target-i386/helper.c b/target-i386/helper.c
index 1b96133..f239099 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -1427,8 +1427,6 @@
     return hit_enabled;
 }
 
-static CPUDebugExcpHandler *prev_debug_excp_handler;
-
 void raise_exception(int exception_index);
 
 static void breakpoint_handler(CPUX86State *env)
@@ -1453,8 +1451,6 @@
                 break;
             }
     }
-    if (prev_debug_excp_handler)
-        prev_debug_excp_handler(env);
 }
 
 
@@ -1777,8 +1773,7 @@
         inited = 1;
         optimize_flags_init();
 #ifndef CONFIG_USER_ONLY
-        prev_debug_excp_handler =
-            cpu_set_debug_excp_handler(breakpoint_handler);
+        cpu_set_debug_excp_handler(breakpoint_handler);
 #endif
     }
     if (cpu_x86_register(env, cpu_model) < 0) {
diff --git a/tcg/tcg.h b/tcg/tcg.h
index 3598e96..2a866a8 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -327,6 +327,8 @@
 #ifdef CONFIG_DEBUG_TCG
     int temps_in_use;
 #endif
+
+    TBContext tb_ctx;
 };
 
 extern TCGContext tcg_ctx;