diff options
Diffstat (limited to 'accel')
| -rw-r--r-- | accel/tcg/atomic_common.c.inc | 107 | ||||
| -rw-r--r-- | accel/tcg/atomic_template.h | 141 | ||||
| -rw-r--r-- | accel/tcg/cpu-exec.c | 207 | ||||
| -rw-r--r-- | accel/tcg/cputlb.c | 49 | ||||
| -rw-r--r-- | accel/tcg/tcg-runtime.h | 46 | ||||
| -rw-r--r-- | accel/tcg/translate-all.c | 7 | ||||
| -rw-r--r-- | accel/tcg/translator.c | 39 | ||||
| -rw-r--r-- | accel/tcg/user-exec.c | 41 |
8 files changed, 347 insertions, 290 deletions
diff --git a/accel/tcg/atomic_common.c.inc b/accel/tcg/atomic_common.c.inc index 344525b0bb..6c0339f610 100644 --- a/accel/tcg/atomic_common.c.inc +++ b/accel/tcg/atomic_common.c.inc @@ -13,42 +13,125 @@ * See the COPYING file in the top-level directory. */ -static inline -void atomic_trace_rmw_pre(CPUArchState *env, target_ulong addr, uint16_t info) +static uint16_t atomic_trace_rmw_pre(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi) { CPUState *cpu = env_cpu(env); + uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), false); trace_guest_mem_before_exec(cpu, addr, info); trace_guest_mem_before_exec(cpu, addr, info | TRACE_MEM_ST); + + return info; } -static inline void -atomic_trace_rmw_post(CPUArchState *env, target_ulong addr, uint16_t info) +static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr, + uint16_t info) { qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info | TRACE_MEM_ST); } -static inline -void atomic_trace_ld_pre(CPUArchState *env, target_ulong addr, uint16_t info) +#if HAVE_ATOMIC128 +static uint16_t atomic_trace_ld_pre(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi) { + uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), false); + trace_guest_mem_before_exec(env_cpu(env), addr, info); + + return info; } -static inline -void atomic_trace_ld_post(CPUArchState *env, target_ulong addr, uint16_t info) +static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr, + uint16_t info) { qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info); } -static inline -void atomic_trace_st_pre(CPUArchState *env, target_ulong addr, uint16_t info) +static uint16_t atomic_trace_st_pre(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi) { + uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), true); + trace_guest_mem_before_exec(env_cpu(env), addr, info); + + return info; } -static inline -void atomic_trace_st_post(CPUArchState *env, target_ulong addr, uint16_t info) +static void atomic_trace_st_post(CPUArchState *env, target_ulong addr, + uint16_t info) { qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info); } +#endif + +/* + * Atomic helpers callable from TCG. + * These have a common interface and all defer to cpu_atomic_* + * using the host return address from GETPC(). + */ + +#define CMPXCHG_HELPER(OP, TYPE) \ + TYPE HELPER(atomic_##OP)(CPUArchState *env, target_ulong addr, \ + TYPE oldv, TYPE newv, uint32_t oi) \ + { return cpu_atomic_##OP##_mmu(env, addr, oldv, newv, oi, GETPC()); } + +CMPXCHG_HELPER(cmpxchgb, uint32_t) +CMPXCHG_HELPER(cmpxchgw_be, uint32_t) +CMPXCHG_HELPER(cmpxchgw_le, uint32_t) +CMPXCHG_HELPER(cmpxchgl_be, uint32_t) +CMPXCHG_HELPER(cmpxchgl_le, uint32_t) + +#ifdef CONFIG_ATOMIC64 +CMPXCHG_HELPER(cmpxchgq_be, uint64_t) +CMPXCHG_HELPER(cmpxchgq_le, uint64_t) +#endif + +#undef CMPXCHG_HELPER + +#define ATOMIC_HELPER(OP, TYPE) \ + TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, target_ulong addr, \ + TYPE val, uint32_t oi) \ + { return glue(glue(cpu_atomic_,OP),_mmu)(env, addr, val, oi, GETPC()); } + +#ifdef CONFIG_ATOMIC64 +#define GEN_ATOMIC_HELPERS(OP) \ + ATOMIC_HELPER(glue(OP,b), uint32_t) \ + ATOMIC_HELPER(glue(OP,w_be), uint32_t) \ + ATOMIC_HELPER(glue(OP,w_le), uint32_t) \ + ATOMIC_HELPER(glue(OP,l_be), uint32_t) \ + ATOMIC_HELPER(glue(OP,l_le), uint32_t) \ + ATOMIC_HELPER(glue(OP,q_be), uint64_t) \ + ATOMIC_HELPER(glue(OP,q_le), uint64_t) +#else +#define GEN_ATOMIC_HELPERS(OP) \ + ATOMIC_HELPER(glue(OP,b), uint32_t) \ + ATOMIC_HELPER(glue(OP,w_be), uint32_t) \ + ATOMIC_HELPER(glue(OP,w_le), uint32_t) \ + ATOMIC_HELPER(glue(OP,l_be), uint32_t) \ + ATOMIC_HELPER(glue(OP,l_le), uint32_t) +#endif + +GEN_ATOMIC_HELPERS(fetch_add) +GEN_ATOMIC_HELPERS(fetch_and) +GEN_ATOMIC_HELPERS(fetch_or) +GEN_ATOMIC_HELPERS(fetch_xor) +GEN_ATOMIC_HELPERS(fetch_smin) +GEN_ATOMIC_HELPERS(fetch_umin) +GEN_ATOMIC_HELPERS(fetch_smax) +GEN_ATOMIC_HELPERS(fetch_umax) + +GEN_ATOMIC_HELPERS(add_fetch) +GEN_ATOMIC_HELPERS(and_fetch) +GEN_ATOMIC_HELPERS(or_fetch) +GEN_ATOMIC_HELPERS(xor_fetch) +GEN_ATOMIC_HELPERS(smin_fetch) +GEN_ATOMIC_HELPERS(umin_fetch) +GEN_ATOMIC_HELPERS(smax_fetch) +GEN_ATOMIC_HELPERS(umax_fetch) + +GEN_ATOMIC_HELPERS(xchg) + +#undef ATOMIC_HELPER +#undef GEN_ATOMIC_HELPERS diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h index afa8a9daf3..d89af4cc1e 100644 --- a/accel/tcg/atomic_template.h +++ b/accel/tcg/atomic_template.h @@ -28,8 +28,8 @@ # define SHIFT 4 #elif DATA_SIZE == 8 # define SUFFIX q -# define DATA_TYPE uint64_t -# define SDATA_TYPE int64_t +# define DATA_TYPE aligned_uint64_t +# define SDATA_TYPE aligned_int64_t # define BSWAP bswap64 # define SHIFT 3 #elif DATA_SIZE == 4 @@ -71,15 +71,14 @@ #endif ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, - ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS) + ABI_TYPE cmpv, ABI_TYPE newv, + TCGMemOpIdx oi, uintptr_t retaddr) { - ATOMIC_MMU_DECLS; - DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, + PAGE_READ | PAGE_WRITE, retaddr); DATA_TYPE ret; - uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, - ATOMIC_MMU_IDX); + uint16_t info = atomic_trace_rmw_pre(env, addr, oi); - atomic_trace_rmw_pre(env, addr, info); #if DATA_SIZE == 16 ret = atomic16_cmpxchg(haddr, cmpv, newv); #else @@ -92,45 +91,41 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, #if DATA_SIZE >= 16 #if HAVE_ATOMIC128 -ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS) +ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) { - ATOMIC_MMU_DECLS; - DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP_R; - uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, - ATOMIC_MMU_IDX); + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, + PAGE_READ, retaddr); + DATA_TYPE val; + uint16_t info = atomic_trace_ld_pre(env, addr, oi); - atomic_trace_ld_pre(env, addr, info); val = atomic16_read(haddr); ATOMIC_MMU_CLEANUP; atomic_trace_ld_post(env, addr, info); return val; } -void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, - ABI_TYPE val EXTRA_ARGS) +void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val, + TCGMemOpIdx oi, uintptr_t retaddr) { - ATOMIC_MMU_DECLS; - DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_W; - uint16_t info = trace_mem_build_info(SHIFT, false, 0, true, - ATOMIC_MMU_IDX); + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, + PAGE_WRITE, retaddr); + uint16_t info = atomic_trace_st_pre(env, addr, oi); - atomic_trace_st_pre(env, addr, info); atomic16_set(haddr, val); ATOMIC_MMU_CLEANUP; atomic_trace_st_post(env, addr, info); } #endif #else -ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, - ABI_TYPE val EXTRA_ARGS) +ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val, + TCGMemOpIdx oi, uintptr_t retaddr) { - ATOMIC_MMU_DECLS; - DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, + PAGE_READ | PAGE_WRITE, retaddr); DATA_TYPE ret; - uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, - ATOMIC_MMU_IDX); + uint16_t info = atomic_trace_rmw_pre(env, addr, oi); - atomic_trace_rmw_pre(env, addr, info); ret = qatomic_xchg__nocheck(haddr, val); ATOMIC_MMU_CLEANUP; atomic_trace_rmw_post(env, addr, info); @@ -139,14 +134,12 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, #define GEN_ATOMIC_HELPER(X) \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ - ABI_TYPE val EXTRA_ARGS) \ + ABI_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) \ { \ - ATOMIC_MMU_DECLS; \ - DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; \ + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \ + PAGE_READ | PAGE_WRITE, retaddr); \ DATA_TYPE ret; \ - uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, \ - ATOMIC_MMU_IDX); \ - atomic_trace_rmw_pre(env, addr, info); \ + uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \ ret = qatomic_##X(haddr, val); \ ATOMIC_MMU_CLEANUP; \ atomic_trace_rmw_post(env, addr, info); \ @@ -164,7 +157,8 @@ GEN_ATOMIC_HELPER(xor_fetch) #undef GEN_ATOMIC_HELPER -/* These helpers are, as a whole, full barriers. Within the helper, +/* + * These helpers are, as a whole, full barriers. Within the helper, * the leading barrier is explicit and the trailing barrier is within * cmpxchg primitive. * @@ -173,14 +167,12 @@ GEN_ATOMIC_HELPER(xor_fetch) */ #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ - ABI_TYPE xval EXTRA_ARGS) \ + ABI_TYPE xval, TCGMemOpIdx oi, uintptr_t retaddr) \ { \ - ATOMIC_MMU_DECLS; \ - XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; \ + XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \ + PAGE_READ | PAGE_WRITE, retaddr); \ XDATA_TYPE cmp, old, new, val = xval; \ - uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, \ - ATOMIC_MMU_IDX); \ - atomic_trace_rmw_pre(env, addr, info); \ + uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \ smp_mb(); \ cmp = qatomic_read__nocheck(haddr); \ do { \ @@ -218,15 +210,14 @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new) #endif ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, - ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS) + ABI_TYPE cmpv, ABI_TYPE newv, + TCGMemOpIdx oi, uintptr_t retaddr) { - ATOMIC_MMU_DECLS; - DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, + PAGE_READ | PAGE_WRITE, retaddr); DATA_TYPE ret; - uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false, - ATOMIC_MMU_IDX); + uint16_t info = atomic_trace_rmw_pre(env, addr, oi); - atomic_trace_rmw_pre(env, addr, info); #if DATA_SIZE == 16 ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv)); #else @@ -239,30 +230,28 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, #if DATA_SIZE >= 16 #if HAVE_ATOMIC128 -ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS) +ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) { - ATOMIC_MMU_DECLS; - DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP_R; - uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false, - ATOMIC_MMU_IDX); + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, + PAGE_READ, retaddr); + DATA_TYPE val; + uint16_t info = atomic_trace_ld_pre(env, addr, oi); - atomic_trace_ld_pre(env, addr, info); val = atomic16_read(haddr); ATOMIC_MMU_CLEANUP; atomic_trace_ld_post(env, addr, info); return BSWAP(val); } -void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, - ABI_TYPE val EXTRA_ARGS) +void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val, + TCGMemOpIdx oi, uintptr_t retaddr) { - ATOMIC_MMU_DECLS; - DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_W; - uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, true, - ATOMIC_MMU_IDX); + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, + PAGE_WRITE, retaddr); + uint16_t info = atomic_trace_st_pre(env, addr, oi); val = BSWAP(val); - atomic_trace_st_pre(env, addr, info); val = BSWAP(val); atomic16_set(haddr, val); ATOMIC_MMU_CLEANUP; @@ -270,16 +259,14 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, } #endif #else -ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, - ABI_TYPE val EXTRA_ARGS) +ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val, + TCGMemOpIdx oi, uintptr_t retaddr) { - ATOMIC_MMU_DECLS; - DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, + PAGE_READ | PAGE_WRITE, retaddr); ABI_TYPE ret; - uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false, - ATOMIC_MMU_IDX); + uint16_t info = atomic_trace_rmw_pre(env, addr, oi); - atomic_trace_rmw_pre(env, addr, info); ret = qatomic_xchg__nocheck(haddr, BSWAP(val)); ATOMIC_MMU_CLEANUP; atomic_trace_rmw_post(env, addr, info); @@ -288,14 +275,12 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, #define GEN_ATOMIC_HELPER(X) \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ - ABI_TYPE val EXTRA_ARGS) \ + ABI_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) \ { \ - ATOMIC_MMU_DECLS; \ - DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; \ + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \ + PAGE_READ | PAGE_WRITE, retaddr); \ DATA_TYPE ret; \ - uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, \ - false, ATOMIC_MMU_IDX); \ - atomic_trace_rmw_pre(env, addr, info); \ + uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \ ret = qatomic_##X(haddr, BSWAP(val)); \ ATOMIC_MMU_CLEANUP; \ atomic_trace_rmw_post(env, addr, info); \ @@ -320,14 +305,12 @@ GEN_ATOMIC_HELPER(xor_fetch) */ #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ - ABI_TYPE xval EXTRA_ARGS) \ + ABI_TYPE xval, TCGMemOpIdx oi, uintptr_t retaddr) \ { \ - ATOMIC_MMU_DECLS; \ - XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; \ + XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \ + PAGE_READ | PAGE_WRITE, retaddr); \ XDATA_TYPE ldo, ldn, old, new, val = xval; \ - uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, \ - false, ATOMIC_MMU_IDX); \ - atomic_trace_rmw_pre(env, addr, info); \ + uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \ smp_mb(); \ ldn = qatomic_read__nocheck(haddr); \ do { \ diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c index e22bcb99f7..fc895cf51e 100644 --- a/accel/tcg/cpu-exec.c +++ b/accel/tcg/cpu-exec.c @@ -145,6 +145,28 @@ static void init_delay_params(SyncClocks *sc, const CPUState *cpu) } #endif /* CONFIG USER ONLY */ +uint32_t curr_cflags(CPUState *cpu) +{ + uint32_t cflags = cpu->tcg_cflags; + + /* + * Record gdb single-step. We should be exiting the TB by raising + * EXCP_DEBUG, but to simplify other tests, disable chaining too. + * + * For singlestep and -d nochain, suppress goto_tb so that + * we can log -d cpu,exec after every TB. + */ + if (unlikely(cpu->singlestep_enabled)) { + cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1; + } else if (singlestep) { + cflags |= CF_NO_GOTO_TB | 1; + } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { + cflags |= CF_NO_GOTO_TB; + } + + return cflags; +} + /* Might cause an exception, so have a longjmp destination ready */ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, target_ulong cs_base, @@ -205,6 +227,76 @@ static inline void log_cpu_exec(target_ulong pc, CPUState *cpu, } } +static bool check_for_breakpoints(CPUState *cpu, target_ulong pc, + uint32_t *cflags) +{ + CPUBreakpoint *bp; + bool match_page = false; + + if (likely(QTAILQ_EMPTY(&cpu->breakpoints))) { + return false; + } + + /* + * Singlestep overrides breakpoints. + * This requirement is visible in the record-replay tests, where + * we would fail to make forward progress in reverse-continue. + * + * TODO: gdb singlestep should only override gdb breakpoints, + * so that one could (gdb) singlestep into the guest kernel's + * architectural breakpoint handler. + */ + if (cpu->singlestep_enabled) { + return false; + } + + QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { + /* + * If we have an exact pc match, trigger the breakpoint. + * Otherwise, note matches within the page. + */ + if (pc == bp->pc) { + bool match_bp = false; + + if (bp->flags & BP_GDB) { + match_bp = true; + } else if (bp->flags & BP_CPU) { +#ifdef CONFIG_USER_ONLY + g_assert_not_reached(); +#else + CPUClass *cc = CPU_GET_CLASS(cpu); + assert(cc->tcg_ops->debug_check_breakpoint); + match_bp = cc->tcg_ops->debug_check_breakpoint(cpu); +#endif + } + + if (match_bp) { + cpu->exception_index = EXCP_DEBUG; + return true; + } + } else if (((pc ^ bp->pc) & TARGET_PAGE_MASK) == 0) { + match_page = true; + } + } + + /* + * Within the same page as a breakpoint, single-step, + * returning to helper_lookup_tb_ptr after each insn looking + * for the actual breakpoint. + * + * TODO: Perhaps better to record all of the TBs associated + * with a given virtual page that contains a breakpoint, and + * then invalidate them when a new overlapping breakpoint is + * set on the page. Non-overlapping TBs would not be + * invalidated, nor would any TB need to be invalidated as + * breakpoints are removed. + */ + if (match_page) { + *cflags = (*cflags & ~CF_COUNT_MASK) | CF_NO_GOTO_TB | 1; + } + return false; +} + /** * helper_lookup_tb_ptr: quick check for next tb * @env: current cpu state @@ -218,11 +310,16 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env) CPUState *cpu = env_cpu(env); TranslationBlock *tb; target_ulong cs_base, pc; - uint32_t flags; + uint32_t flags, cflags; cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); - tb = tb_lookup(cpu, pc, cs_base, flags, curr_cflags(cpu)); + cflags = curr_cflags(cpu); + if (check_for_breakpoints(cpu, pc, &cflags)) { + cpu_loop_exit(cpu); + } + + tb = tb_lookup(cpu, pc, cs_base, flags, cflags); if (tb == NULL) { return tcg_code_gen_epilogue; } @@ -313,8 +410,7 @@ void cpu_exec_step_atomic(CPUState *cpu) CPUArchState *env = (CPUArchState *)cpu->env_ptr; TranslationBlock *tb; target_ulong cs_base, pc; - uint32_t flags; - uint32_t cflags = (curr_cflags(cpu) & ~CF_PARALLEL) | 1; + uint32_t flags, cflags; int tb_exit; if (sigsetjmp(cpu->jmp_env, 0) == 0) { @@ -324,8 +420,20 @@ void cpu_exec_step_atomic(CPUState *cpu) cpu->running = true; cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); - tb = tb_lookup(cpu, pc, cs_base, flags, cflags); + cflags = curr_cflags(cpu); + /* Execute in a serial context. */ + cflags &= ~CF_PARALLEL; + /* After 1 insn, return and release the exclusive lock. */ + cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1; + /* + * No need to check_for_breakpoints here. + * We only arrive in cpu_exec_step_atomic after beginning execution + * of an insn that includes an atomic operation we can't handle. + * Any breakpoint for this insn will have been recognized earlier. + */ + + tb = tb_lookup(cpu, pc, cs_base, flags, cflags); if (tb == NULL) { mmap_lock(); tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); @@ -478,41 +586,6 @@ static inline void tb_add_jump(TranslationBlock *tb, int n, return; } -static inline TranslationBlock *tb_find(CPUState *cpu, - TranslationBlock *last_tb, - int tb_exit, uint32_t cflags) -{ - CPUArchState *env = (CPUArchState *)cpu->env_ptr; - TranslationBlock *tb; - target_ulong cs_base, pc; - uint32_t flags; - - cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); - - tb = tb_lookup(cpu, pc, cs_base, flags, cflags); - if (tb == NULL) { - mmap_lock(); - tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); - mmap_unlock(); - /* We add the TB in the virtual pc hash table for the fast lookup */ - qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); - } -#ifndef CONFIG_USER_ONLY - /* We don't take care of direct jumps when address mapping changes in - * system emulation. So it's not safe to make a direct jump to a TB - * spanning two pages because the mapping for the second page can change. - */ - if (tb->page_addr[1] != -1) { - last_tb = NULL; - } -#endif - /* See if we can patch the calling TB. */ - if (last_tb) { - tb_add_jump(last_tb, tb_exit, tb); - } - return tb; -} - static inline bool cpu_handle_halt(CPUState *cpu) { if (cpu->halted) { @@ -846,22 +919,60 @@ int cpu_exec(CPUState *cpu) int tb_exit = 0; while (!cpu_handle_interrupt(cpu, &last_tb)) { - uint32_t cflags = cpu->cflags_next_tb; TranslationBlock *tb; - - /* When requested, use an exact setting for cflags for the next - execution. This is used for icount, precise smc, and stop- - after-access watchpoints. Since this request should never - have CF_INVALID set, -1 is a convenient invalid value that - does not require tcg headers for cpu_common_reset. */ + target_ulong cs_base, pc; + uint32_t flags, cflags; + + cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags); + + /* + * When requested, use an exact setting for cflags for the next + * execution. This is used for icount, precise smc, and stop- + * after-access watchpoints. Since this request should never + * have CF_INVALID set, -1 is a convenient invalid value that + * does not require tcg headers for cpu_common_reset. + */ + cflags = cpu->cflags_next_tb; if (cflags == -1) { cflags = curr_cflags(cpu); } else { cpu->cflags_next_tb = -1; } - tb = tb_find(cpu, last_tb, tb_exit, cflags); + if (check_for_breakpoints(cpu, pc, &cflags)) { + break; + } + + tb = tb_lookup(cpu, pc, cs_base, flags, cflags); + if (tb == NULL) { + mmap_lock(); + tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); + mmap_unlock(); + /* + * We add the TB in the virtual pc hash table + * for the fast lookup + */ + qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); + } + +#ifndef CONFIG_USER_ONLY + /* + * We don't take care of direct jumps when address mapping + * changes in system emulation. So it's not safe to make a + * direct jump to a TB spanning two pages because the mapping + * for the second page can change. + */ + if (tb->page_addr[1] != -1) { + last_tb = NULL; + } +#endif + /* See if we can patch the calling TB. */ + if (last_tb) { + tb_add_jump(last_tb, tb_exit, tb); + } + cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit); + /* Try to align the host and virtual clocks if the guest is in advance */ align_clocks(&sc, cpu); diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index b4e15b6aad..b1e5471f94 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -2686,19 +2686,14 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val) cpu_stq_le_data_ra(env, ptr, val, 0); } -/* First set of helpers allows passing in of OI and RETADDR. This makes - them callable from other helpers. */ +/* + * First set of functions passes in OI and RETADDR. + * This makes them callable from other helpers. + */ -#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr #define ATOMIC_NAME(X) \ - HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) -#define ATOMIC_MMU_DECLS -#define ATOMIC_MMU_LOOKUP_RW \ - atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, retaddr) -#define ATOMIC_MMU_LOOKUP_R \ - atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ, retaddr) -#define ATOMIC_MMU_LOOKUP_W \ - atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_WRITE, retaddr) + glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) + #define ATOMIC_MMU_CLEANUP #define ATOMIC_MMU_IDX get_mmuidx(oi) @@ -2723,38 +2718,6 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val) #include "atomic_template.h" #endif -/* Second set of helpers are directly callable from TCG as helpers. */ - -#undef EXTRA_ARGS -#undef ATOMIC_NAME -#undef ATOMIC_MMU_LOOKUP_RW -#undef ATOMIC_MMU_LOOKUP_R -#undef ATOMIC_MMU_LOOKUP_W - -#define EXTRA_ARGS , TCGMemOpIdx oi -#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) -#define ATOMIC_MMU_LOOKUP_RW \ - atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, GETPC()) -#define ATOMIC_MMU_LOOKUP_R \ - atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ, GETPC()) -#define ATOMIC_MMU_LOOKUP_W \ - atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_WRITE, GETPC()) - -#define DATA_SIZE 1 -#include "atomic_template.h" - -#define DATA_SIZE 2 -#include "atomic_template.h" - -#define DATA_SIZE 4 -#include "atomic_template.h" - -#ifdef CONFIG_ATOMIC64 -#define DATA_SIZE 8 -#include "atomic_template.h" -#endif -#undef ATOMIC_MMU_IDX - /* Code access functions. */ static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr, diff --git a/accel/tcg/tcg-runtime.h b/accel/tcg/tcg-runtime.h index 91a5b7e85f..37cbd722bf 100644 --- a/accel/tcg/tcg-runtime.h +++ b/accel/tcg/tcg-runtime.h @@ -39,8 +39,6 @@ DEF_HELPER_FLAGS_1(exit_atomic, TCG_CALL_NO_WG, noreturn, env) DEF_HELPER_FLAGS_3(memset, TCG_CALL_NO_RWG, ptr, ptr, int, ptr) #endif /* IN_HELPER_PROTO */ -#ifdef CONFIG_SOFTMMU - DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG, i32, env, tl, i32, i32, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgw_be, TCG_CALL_NO_WG, @@ -88,50 +86,6 @@ DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG, TCG_CALL_NO_WG, i32, env, tl, i32, i32) #endif /* CONFIG_ATOMIC64 */ -#else - -DEF_HELPER_FLAGS_4(atomic_cmpxchgb, TCG_CALL_NO_WG, i32, env, tl, i32, i32) -DEF_HELPER_FLAGS_4(atomic_cmpxchgw_be, TCG_CALL_NO_WG, i32, env, tl, i32, i32) -DEF_HELPER_FLAGS_4(atomic_cmpxchgw_le, TCG_CALL_NO_WG, i32, env, tl, i32, i32) -DEF_HELPER_FLAGS_4(atomic_cmpxchgl_be, TCG_CALL_NO_WG, i32, env, tl, i32, i32) -DEF_HELPER_FLAGS_4(atomic_cmpxchgl_le, TCG_CALL_NO_WG, i32, env, tl, i32, i32) -#ifdef CONFIG_ATOMIC64 -DEF_HELPER_FLAGS_4(atomic_cmpxchgq_be, TCG_CALL_NO_WG, i64, env, tl, i64, i64) -DEF_HELPER_FLAGS_4(atomic_cmpxchgq_le, TCG_CALL_NO_WG, i64, env, tl, i64, i64) -#endif - -#ifdef CONFIG_ATOMIC64 -#define GEN_ATOMIC_HELPERS(NAME) \ - DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), b), \ - TCG_CALL_NO_WG, i32, env, tl, i32) \ - DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_le), \ - TCG_CALL_NO_WG, i32, env, tl, i32) \ - DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_be), \ - TCG_CALL_NO_WG, i32, env, tl, i32) \ - DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_le), \ - TCG_CALL_NO_WG, i32, env, tl, i32) \ - DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_be), \ - TCG_CALL_NO_WG, i32, env, tl, i32) \ - DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), q_le), \ - TCG_CALL_NO_WG, i64, env, tl, i64) \ - DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), q_be), \ - TCG_CALL_NO_WG, i64, env, tl, i64) -#else -#define GEN_ATOMIC_HELPERS(NAME) \ - DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), b), \ - TCG_CALL_NO_WG, i32, env, tl, i32) \ - DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_le), \ - TCG_CALL_NO_WG, i32, env, tl, i32) \ - DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_be), \ - TCG_CALL_NO_WG, i32, env, tl, i32) \ - DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_le), \ - TCG_CALL_NO_WG, i32, env, tl, i32) \ - DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_be), \ - TCG_CALL_NO_WG, i32, env, tl, i32) -#endif /* CONFIG_ATOMIC64 */ - -#endif /* CONFIG_SOFTMMU */ - GEN_ATOMIC_HELPERS(fetch_add) GEN_ATOMIC_HELPERS(fetch_and) GEN_ATOMIC_HELPERS(fetch_or) diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index 4df26de858..bbfcfb698c 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -1428,14 +1428,9 @@ TranslationBlock *tb_gen_code(CPUState *cpu, max_insns = cflags & CF_COUNT_MASK; if (max_insns == 0) { - max_insns = CF_COUNT_MASK; - } - if (max_insns > TCG_MAX_INSNS) { max_insns = TCG_MAX_INSNS; } - if (cpu->singlestep_enabled || singlestep) { - max_insns = 1; - } + QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS); buffer_overflow: tb = tcg_tb_alloc(tcg_ctx); diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c index 59804af37b..c53a7f8e44 100644 --- a/accel/tcg/translator.c +++ b/accel/tcg/translator.c @@ -33,8 +33,8 @@ void translator_loop_temp_check(DisasContextBase *db) bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest) { - /* Suppress goto_tb in the case of single-steping. */ - if (db->singlestep_enabled || singlestep) { + /* Suppress goto_tb if requested. */ + if (tb_cflags(db->tb) & CF_NO_GOTO_TB) { return false; } @@ -45,7 +45,7 @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest) void translator_loop(const TranslatorOps *ops, DisasContextBase *db, CPUState *cpu, TranslationBlock *tb, int max_insns) { - int bp_insn = 0; + uint32_t cflags = tb_cflags(tb); bool plugin_enabled; /* Initialize DisasContext */ @@ -55,7 +55,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db, db->is_jmp = DISAS_NEXT; db->num_insns = 0; db->max_insns = max_insns; - db->singlestep_enabled = cpu->singlestep_enabled; + db->singlestep_enabled = cflags & CF_SINGLE_STEP; ops->init_disas_context(db, cpu); tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ @@ -68,8 +68,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db, ops->tb_start(db, cpu); tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ - plugin_enabled = plugin_gen_tb_start(cpu, tb, - tb_cflags(db->tb) & CF_MEMI_ONLY); + plugin_enabled = plugin_gen_tb_start(cpu, tb, cflags & CF_MEMI_ONLY); while (true) { db->num_insns++; @@ -80,39 +79,17 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db, plugin_gen_insn_start(cpu, db); } - /* Pass breakpoint hits to target for further processing */ - if (!db->singlestep_enabled - && unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) { - CPUBreakpoint *bp; - QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { - if (bp->pc == db->pc_next) { - if (ops->breakpoint_check(db, cpu, bp)) { - bp_insn = 1; - break; - } - } - } - /* The breakpoint_check hook may use DISAS_TOO_MANY to indicate - that only one more instruction is to be executed. Otherwise - it should use DISAS_NORETURN when generating an exception, - but may use a DISAS_TARGET_* value for Something Else. */ - if (db->is_jmp > DISAS_TOO_MANY) { - break; - } - } - /* Disassemble one instruction. The translate_insn hook should update db->pc_next and db->is_jmp to indicate what should be done next -- either exiting this loop or locate the start of the next instruction. */ - if (db->num_insns == db->max_insns - && (tb_cflags(db->tb) & CF_LAST_IO)) { + if (db->num_insns == db->max_insns && (cflags & CF_LAST_IO)) { /* Accept I/O on the last instruction. */ gen_io_start(); ops->translate_insn(db, cpu); } else { /* we should only see CF_MEMI_ONLY for io_recompile */ - tcg_debug_assert(!(tb_cflags(db->tb) & CF_MEMI_ONLY)); + tcg_debug_assert(!(cflags & CF_MEMI_ONLY)); ops->translate_insn(db, cpu); } @@ -139,7 +116,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db, /* Emit code to exit the TB, as indicated by db->is_jmp. */ ops->tb_stop(db, cpu); - gen_tb_end(db->tb, db->num_insns - bp_insn); + gen_tb_end(db->tb, db->num_insns); if (plugin_enabled) { plugin_gen_tb_end(cpu); diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index ba09fd0413..90d1a2d327 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -1221,9 +1221,14 @@ uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr) return ret; } -/* Do not allow unaligned operations to proceed. Return the host address. */ +/* + * Do not allow unaligned operations to proceed. Return the host address. + * + * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. + */ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, - int size, uintptr_t retaddr) + TCGMemOpIdx oi, int size, int prot, + uintptr_t retaddr) { /* Enforce qemu required alignment. */ if (unlikely(addr & (size - 1))) { @@ -1234,18 +1239,17 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, return ret; } -/* Macro to call the above, with local variables from the use context. */ -#define ATOMIC_MMU_DECLS do {} while (0) -#define ATOMIC_MMU_LOOKUP_RW atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC()) -#define ATOMIC_MMU_LOOKUP_R ATOMIC_MMU_LOOKUP_RW -#define ATOMIC_MMU_LOOKUP_W ATOMIC_MMU_LOOKUP_RW -#define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0) -#define ATOMIC_MMU_IDX MMU_USER_IDX +#include "atomic_common.c.inc" -#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) -#define EXTRA_ARGS +/* + * First set of functions passes in OI and RETADDR. + * This makes them callable from other helpers. + */ -#include "atomic_common.c.inc" +#define ATOMIC_NAME(X) \ + glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) +#define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0) +#define ATOMIC_MMU_IDX MMU_USER_IDX #define DATA_SIZE 1 #include "atomic_template.h" @@ -1261,20 +1265,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, #include "atomic_template.h" #endif -/* The following is only callable from other helpers, and matches up - with the softmmu version. */ - #if HAVE_ATOMIC128 || HAVE_CMPXCHG128 - -#undef EXTRA_ARGS -#undef ATOMIC_NAME -#undef ATOMIC_MMU_LOOKUP_RW - -#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr -#define ATOMIC_NAME(X) \ - HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) -#define ATOMIC_MMU_LOOKUP_RW atomic_mmu_lookup(env, addr, DATA_SIZE, retaddr) - #define DATA_SIZE 16 #include "atomic_template.h" #endif |