diff options
Diffstat (limited to 'accel/tcg/cpu-exec.c')
| -rw-r--r-- | accel/tcg/cpu-exec.c | 207 |
1 files changed, 159 insertions, 48 deletions
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c index e22bcb99f7..fc895cf51e 100644 --- a/accel/tcg/cpu-exec.c +++ b/accel/tcg/cpu-exec.c @@ -145,6 +145,28 @@ static void init_delay_params(SyncClocks *sc, const CPUState *cpu) } #endif /* CONFIG USER ONLY */ +uint32_t curr_cflags(CPUState *cpu) +{ + uint32_t cflags = cpu->tcg_cflags; + + /* + * Record gdb single-step. We should be exiting the TB by raising + * EXCP_DEBUG, but to simplify other tests, disable chaining too. + * + * For singlestep and -d nochain, suppress goto_tb so that + * we can log -d cpu,exec after every TB. + */ + if (unlikely(cpu->singlestep_enabled)) { + cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1; + } else if (singlestep) { + cflags |= CF_NO_GOTO_TB | 1; + } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { + cflags |= CF_NO_GOTO_TB; + } + + return cflags; +} + /* Might cause an exception, so have a longjmp destination ready */ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, target_ulong cs_base, @@ -205,6 +227,76 @@ static inline void log_cpu_exec(target_ulong pc, CPUState *cpu, } } +static bool check_for_breakpoints(CPUState *cpu, target_ulong pc, + uint32_t *cflags) +{ + CPUBreakpoint *bp; + bool match_page = false; + + if (likely(QTAILQ_EMPTY(&cpu->breakpoints))) { + return false; + } + + /* + * Singlestep overrides breakpoints. + * This requirement is visible in the record-replay tests, where + * we would fail to make forward progress in reverse-continue. + * + * TODO: gdb singlestep should only override gdb breakpoints, + * so that one could (gdb) singlestep into the guest kernel's + * architectural breakpoint handler. + */ + if (cpu->singlestep_enabled) { + return false; + } + + QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { + /* + * If we have an exact pc match, trigger the breakpoint. + * Otherwise, note matches within the page. + */ + if (pc == bp->pc) { + bool match_bp = false; + + if (bp->flags & BP_GDB) { + match_bp = true; + } else if (bp->flags & BP_CPU) { +#ifdef CONFIG_USER_ONLY + g_assert_not_reached(); +#else + CPUClass *cc = CPU_GET_CLASS(cpu); + assert(cc->tcg_ops->debug_check_breakpoint); + match_bp = cc->tcg_ops->debug_check_breakpoint(cpu); +#endif + } + + if (match_bp) { + cpu->exception_index = EXCP_DEBUG; + return true; + } + } else if (((pc ^ bp->pc) & TARGET_PAGE_MASK) == 0) { + match_page = true; + } + } + + /* + * Within the same page as a breakpoint, single-step, + * returning to helper_lookup_tb_ptr after each insn looking + * for the actual breakpoint. + * + * TODO: Perhaps better to record all of the TBs associated + * with a given virtual page that contains a breakpoint, and + * then invalidate them when a new overlapping breakpoint is + * set on the page. Non-overlapping TBs would not be + * invalidated, nor would any TB need to be invalidated as + * breakpoints are removed. + */ + if (match_page) { + *cflags = (*cflags & ~CF_COUNT_MASK) | CF_NO_GOTO_TB | 1; + } + return false; +} + /** * helper_lookup_tb_ptr: quick check for next tb * @env: current cpu state @@ -218,11 +310,16 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env) CPUState *cpu = env_cpu(env); TranslationBlock *tb; target_ulong cs_base, pc; - uint32_t flags; + uint32_t flags, cflags; cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); - tb = tb_lookup(cpu, pc, cs_base, flags, curr_cflags(cpu)); + cflags = curr_cflags(cpu); + if (check_for_breakpoints(cpu, pc, &cflags)) { + cpu_loop_exit(cpu); + } + + tb = tb_lookup(cpu, pc, cs_base, flags, cflags); if (tb == NULL) { return tcg_code_gen_epilogue; } @@ -313,8 +410,7 @@ void cpu_exec_step_atomic(CPUState *cpu) CPUArchState *env = (CPUArchState *)cpu->env_ptr; TranslationBlock *tb; target_ulong cs_base, pc; - uint32_t flags; - uint32_t cflags = (curr_cflags(cpu) & ~CF_PARALLEL) | 1; + uint32_t flags, cflags; int tb_exit; if (sigsetjmp(cpu->jmp_env, 0) == 0) { @@ -324,8 +420,20 @@ void cpu_exec_step_atomic(CPUState *cpu) cpu->running = true; cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); - tb = tb_lookup(cpu, pc, cs_base, flags, cflags); + cflags = curr_cflags(cpu); + /* Execute in a serial context. */ + cflags &= ~CF_PARALLEL; + /* After 1 insn, return and release the exclusive lock. */ + cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1; + /* + * No need to check_for_breakpoints here. + * We only arrive in cpu_exec_step_atomic after beginning execution + * of an insn that includes an atomic operation we can't handle. + * Any breakpoint for this insn will have been recognized earlier. + */ + + tb = tb_lookup(cpu, pc, cs_base, flags, cflags); if (tb == NULL) { mmap_lock(); tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); @@ -478,41 +586,6 @@ static inline void tb_add_jump(TranslationBlock *tb, int n, return; } -static inline TranslationBlock *tb_find(CPUState *cpu, - TranslationBlock *last_tb, - int tb_exit, uint32_t cflags) -{ - CPUArchState *env = (CPUArchState *)cpu->env_ptr; - TranslationBlock *tb; - target_ulong cs_base, pc; - uint32_t flags; - - cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); - - tb = tb_lookup(cpu, pc, cs_base, flags, cflags); - if (tb == NULL) { - mmap_lock(); - tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); - mmap_unlock(); - /* We add the TB in the virtual pc hash table for the fast lookup */ - qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); - } -#ifndef CONFIG_USER_ONLY - /* We don't take care of direct jumps when address mapping changes in - * system emulation. So it's not safe to make a direct jump to a TB - * spanning two pages because the mapping for the second page can change. - */ - if (tb->page_addr[1] != -1) { - last_tb = NULL; - } -#endif - /* See if we can patch the calling TB. */ - if (last_tb) { - tb_add_jump(last_tb, tb_exit, tb); - } - return tb; -} - static inline bool cpu_handle_halt(CPUState *cpu) { if (cpu->halted) { @@ -846,22 +919,60 @@ int cpu_exec(CPUState *cpu) int tb_exit = 0; while (!cpu_handle_interrupt(cpu, &last_tb)) { - uint32_t cflags = cpu->cflags_next_tb; TranslationBlock *tb; - - /* When requested, use an exact setting for cflags for the next - execution. This is used for icount, precise smc, and stop- - after-access watchpoints. Since this request should never - have CF_INVALID set, -1 is a convenient invalid value that - does not require tcg headers for cpu_common_reset. */ + target_ulong cs_base, pc; + uint32_t flags, cflags; + + cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags); + + /* + * When requested, use an exact setting for cflags for the next + * execution. This is used for icount, precise smc, and stop- + * after-access watchpoints. Since this request should never + * have CF_INVALID set, -1 is a convenient invalid value that + * does not require tcg headers for cpu_common_reset. + */ + cflags = cpu->cflags_next_tb; if (cflags == -1) { cflags = curr_cflags(cpu); } else { cpu->cflags_next_tb = -1; } - tb = tb_find(cpu, last_tb, tb_exit, cflags); + if (check_for_breakpoints(cpu, pc, &cflags)) { + break; + } + + tb = tb_lookup(cpu, pc, cs_base, flags, cflags); + if (tb == NULL) { + mmap_lock(); + tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); + mmap_unlock(); + /* + * We add the TB in the virtual pc hash table + * for the fast lookup + */ + qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); + } + +#ifndef CONFIG_USER_ONLY + /* + * We don't take care of direct jumps when address mapping + * changes in system emulation. So it's not safe to make a + * direct jump to a TB spanning two pages because the mapping + * for the second page can change. + */ + if (tb->page_addr[1] != -1) { + last_tb = NULL; + } +#endif + /* See if we can patch the calling TB. */ + if (last_tb) { + tb_add_jump(last_tb, tb_exit, tb); + } + cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit); + /* Try to align the host and virtual clocks if the guest is in advance */ align_clocks(&sc, cpu); |