diff options
Diffstat (limited to 'target')
77 files changed, 2666 insertions, 418 deletions
diff --git a/target/alpha/translate.c b/target/alpha/translate.c index f9bcdeb717..716b083f39 100644 --- a/target/alpha/translate.c +++ b/target/alpha/translate.c @@ -3043,7 +3043,7 @@ static const TranslatorOps alpha_tr_ops = { .disas_log = alpha_tr_disas_log, }; -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext dc; diff --git a/target/arm/monitor.c b/target/arm/arm-qmp-cmds.c index ecdd5ee817..c8fa524002 100644 --- a/target/arm/monitor.c +++ b/target/arm/arm-qmp-cmds.c @@ -227,3 +227,31 @@ CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type, return expansion_info; } + +static void arm_cpu_add_definition(gpointer data, gpointer user_data) +{ + ObjectClass *oc = data; + CpuDefinitionInfoList **cpu_list = user_data; + CpuDefinitionInfo *info; + const char *typename; + + typename = object_class_get_name(oc); + info = g_malloc0(sizeof(*info)); + info->name = g_strndup(typename, + strlen(typename) - strlen("-" TYPE_ARM_CPU)); + info->q_typename = g_strdup(typename); + + QAPI_LIST_PREPEND(*cpu_list, info); +} + +CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) +{ + CpuDefinitionInfoList *cpu_list = NULL; + GSList *list; + + list = object_class_get_list(TYPE_ARM_CPU, false); + g_slist_foreach(list, arm_cpu_add_definition, &cpu_list); + g_slist_free(list); + + return cpu_list; +} diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h index 53cac9c89b..b7bde18986 100644 --- a/target/arm/cpu-param.h +++ b/target/arm/cpu-param.h @@ -31,8 +31,6 @@ # define TARGET_PAGE_BITS_VARY # define TARGET_PAGE_BITS_MIN 10 -# define TARGET_TB_PCREL 1 - /* * Cache the attrs and shareability fields from the page table entry. * diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 0b333a749f..5182ed0c91 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -78,17 +78,17 @@ static vaddr arm_cpu_get_pc(CPUState *cs) void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { - /* The program counter is always up to date with TARGET_TB_PCREL. */ - if (!TARGET_TB_PCREL) { + /* The program counter is always up to date with CF_PCREL. */ + if (!(tb_cflags(tb) & CF_PCREL)) { CPUARMState *env = cs->env_ptr; /* * It's OK to look at env for the current mode here, because it's * never possible for an AArch64 TB to chain to an AArch32 TB. */ if (is_a64(env)) { - env->pc = tb_pc(tb); + env->pc = tb->pc; } else { - env->regs[15] = tb_pc(tb); + env->regs[15] = tb->pc; } } } @@ -100,7 +100,7 @@ void arm_restore_state_to_opc(CPUState *cs, CPUARMState *env = cs->env_ptr; if (is_a64(env)) { - if (TARGET_TB_PCREL) { + if (tb_cflags(tb) & CF_PCREL) { env->pc = (env->pc & TARGET_PAGE_MASK) | data[0]; } else { env->pc = data[0]; @@ -108,7 +108,7 @@ void arm_restore_state_to_opc(CPUState *cs, env->condexec_bits = 0; env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT; } else { - if (TARGET_TB_PCREL) { + if (tb_cflags(tb) & CF_PCREL) { env->regs[15] = (env->regs[15] & TARGET_PAGE_MASK) | data[0]; } else { env->regs[15] = data[0]; @@ -1557,6 +1557,11 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) Error *local_err = NULL; bool no_aa32 = false; + /* Use pc-relative instructions in system-mode */ +#ifndef CONFIG_USER_ONLY + cs->tcg_cflags |= CF_PCREL; +#endif + /* If we needed to query the host kernel for the CPU features * then it's possible that might have failed in the initfn, but * this is the first point where we can report it. diff --git a/target/arm/helper.c b/target/arm/helper.c index 14af7ba095..82c546f11a 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -23,7 +23,6 @@ #include "sysemu/cpu-timers.h" #include "sysemu/kvm.h" #include "sysemu/tcg.h" -#include "qapi/qapi-commands-machine-target.h" #include "qapi/error.h" #include "qemu/guest-random.h" #ifdef CONFIG_TCG @@ -9188,34 +9187,6 @@ void arm_cpu_list(void) g_slist_free(list); } -static void arm_cpu_add_definition(gpointer data, gpointer user_data) -{ - ObjectClass *oc = data; - CpuDefinitionInfoList **cpu_list = user_data; - CpuDefinitionInfo *info; - const char *typename; - - typename = object_class_get_name(oc); - info = g_malloc0(sizeof(*info)); - info->name = g_strndup(typename, - strlen(typename) - strlen("-" TYPE_ARM_CPU)); - info->q_typename = g_strdup(typename); - - QAPI_LIST_PREPEND(*cpu_list, info); -} - -CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) -{ - CpuDefinitionInfoList *cpu_list = NULL; - GSList *list; - - list = object_class_get_list(TYPE_ARM_CPU, false); - g_slist_foreach(list, arm_cpu_add_definition, &cpu_list); - g_slist_free(list); - - return cpu_list; -} - /* * Private utility function for define_one_arm_cp_reg_with_opaque(): * add a single reginfo struct to the hash table. diff --git a/target/arm/meson.build b/target/arm/meson.build index a5191b57e1..6226098ad5 100644 --- a/target/arm/meson.build +++ b/target/arm/meson.build @@ -20,8 +20,8 @@ arm_softmmu_ss = ss.source_set() arm_softmmu_ss.add(files( 'arch_dump.c', 'arm-powerctl.c', + 'arm-qmp-cmds.c', 'machine.c', - 'monitor.c', 'ptw.c', )) diff --git a/target/arm/ptw.c b/target/arm/ptw.c index be0cc6bc15..8541ef56d6 100644 --- a/target/arm/ptw.c +++ b/target/arm/ptw.c @@ -259,7 +259,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, int flags; env->tlb_fi = fi; - flags = probe_access_full(env, addr, MMU_DATA_LOAD, + flags = probe_access_full(env, addr, 0, MMU_DATA_LOAD, arm_to_core_mmu_idx(s2_mmu_idx), true, &ptw->out_host, &full, 0); env->tlb_fi = NULL; @@ -411,7 +411,7 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val, void *discard; env->tlb_fi = fi; - flags = probe_access_flags(env, ptw->out_virt, MMU_DATA_STORE, + flags = probe_access_flags(env, ptw->out_virt, 0, MMU_DATA_STORE, arm_to_core_mmu_idx(ptw->in_ptw_idx), true, &discard, 0); env->tlb_fi = NULL; diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c index 98bcf59c22..fee3c7eb96 100644 --- a/target/arm/tcg/mte_helper.c +++ b/target/arm/tcg/mte_helper.c @@ -118,7 +118,7 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, * valid. Indicate to probe_access_flags no-fault, then assert that * we received a valid page. */ - flags = probe_access_full(env, ptr, ptr_access, ptr_mmu_idx, + flags = probe_access_full(env, ptr, 0, ptr_access, ptr_mmu_idx, ra == 0, &host, &full, ra); assert(!(flags & TLB_INVALID_MASK)); @@ -154,7 +154,7 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, */ in_page = -(ptr | TARGET_PAGE_MASK); if (unlikely(ptr_size > in_page)) { - flags |= probe_access_full(env, ptr + in_page, ptr_access, + flags |= probe_access_full(env, ptr + in_page, 0, ptr_access, ptr_mmu_idx, ra == 0, &host, &full, ra); assert(!(flags & TLB_INVALID_MASK)); } diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c index 521fc9b969..9a8951afa4 100644 --- a/target/arm/tcg/sve_helper.c +++ b/target/arm/tcg/sve_helper.c @@ -5352,11 +5352,11 @@ bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env, addr = useronly_clean_ptr(addr); #ifdef CONFIG_USER_ONLY - flags = probe_access_flags(env, addr, access_type, mmu_idx, nofault, + flags = probe_access_flags(env, addr, 0, access_type, mmu_idx, nofault, &info->host, retaddr); #else CPUTLBEntryFull *full; - flags = probe_access_full(env, addr, access_type, mmu_idx, nofault, + flags = probe_access_full(env, addr, 0, access_type, mmu_idx, nofault, &info->host, &full, retaddr); #endif info->flags = flags; diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index da9f877476..f092aec801 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -143,7 +143,7 @@ static void reset_btype(DisasContext *s) static void gen_pc_plus_diff(DisasContext *s, TCGv_i64 dest, target_long diff) { assert(s->pc_save != -1); - if (TARGET_TB_PCREL) { + if (tb_cflags(s->base.tb) & CF_PCREL) { tcg_gen_addi_i64(dest, cpu_pc, (s->pc_curr - s->pc_save) + diff); } else { tcg_gen_movi_i64(dest, s->pc_curr + diff); @@ -393,7 +393,7 @@ static void gen_goto_tb(DisasContext *s, int n, int64_t diff) * update to pc to the unlinked path. A long chain of links * can thus avoid many updates to the PC. */ - if (TARGET_TB_PCREL) { + if (tb_cflags(s->base.tb) & CF_PCREL) { gen_a64_update_pc(s, diff); tcg_gen_goto_tb(n); } else { @@ -436,12 +436,6 @@ TCGv_i64 new_tmp_a64(DisasContext *s) return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64(); } -TCGv_i64 new_tmp_a64_local(DisasContext *s) -{ - assert(s->tmp_a64_count < TMP_A64_MAX); - return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_local_new_i64(); -} - TCGv_i64 new_tmp_a64_zero(DisasContext *s) { TCGv_i64 t = new_tmp_a64(s); @@ -4297,7 +4291,7 @@ static void disas_pc_rel_adr(DisasContext *s, uint32_t insn) if (page) { /* ADRP (page based) */ offset <<= 12; - /* The page offset is ok for TARGET_TB_PCREL. */ + /* The page offset is ok for CF_PCREL. */ offset -= s->pc_curr & 0xfff; } @@ -14651,7 +14645,7 @@ static bool is_guarded_page(CPUARMState *env, DisasContext *s) * that the TLB entry must be present and valid, and thus this * access will never raise an exception. */ - flags = probe_access_full(env, addr, MMU_INST_FETCH, mmu_idx, + flags = probe_access_full(env, addr, 0, MMU_INST_FETCH, mmu_idx, false, &host, &full, 0); assert(!(flags & TLB_INVALID_MASK)); @@ -14809,7 +14803,7 @@ static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) DisasContext *dc = container_of(dcbase, DisasContext, base); target_ulong pc_arg = dc->base.pc_next; - if (TARGET_TB_PCREL) { + if (tb_cflags(dcbase->tb) & CF_PCREL) { pc_arg &= ~TARGET_PAGE_MASK; } tcg_gen_insn_start(pc_arg, 0, 0); diff --git a/target/arm/tcg/translate-a64.h b/target/arm/tcg/translate-a64.h index ad3762d1ac..ca24c39dbe 100644 --- a/target/arm/tcg/translate-a64.h +++ b/target/arm/tcg/translate-a64.h @@ -19,7 +19,6 @@ #define TARGET_ARM_TRANSLATE_A64_H TCGv_i64 new_tmp_a64(DisasContext *s); -TCGv_i64 new_tmp_a64_local(DisasContext *s); TCGv_i64 new_tmp_a64_zero(DisasContext *s); TCGv_i64 cpu_reg(DisasContext *s, int reg); TCGv_i64 cpu_reg_sp(DisasContext *s, int reg); diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c index 621a2abb22..718a5bce1b 100644 --- a/target/arm/tcg/translate-sve.c +++ b/target/arm/tcg/translate-sve.c @@ -2694,7 +2694,7 @@ static bool do_clast_vector(DisasContext *s, arg_rprr_esz *a, bool before) return true; } - last = tcg_temp_local_new_i32(); + last = tcg_temp_new_i32(); over = gen_new_label(); find_last_active(s, last, esz, a->pg); @@ -4342,18 +4342,7 @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, tcg_temp_free_i64(t0); } else { TCGLabel *loop = gen_new_label(); - TCGv_ptr tp, i = tcg_const_local_ptr(0); - - /* Copy the clean address into a local temp, live across the loop. */ - t0 = clean_addr; - clean_addr = new_tmp_a64_local(s); - tcg_gen_mov_i64(clean_addr, t0); - - if (base != cpu_env) { - TCGv_ptr b = tcg_temp_local_new_ptr(); - tcg_gen_mov_ptr(b, base); - base = b; - } + TCGv_ptr tp, i = tcg_const_ptr(0); gen_set_label(loop); @@ -4370,11 +4359,6 @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop); tcg_temp_free_ptr(i); - - if (base != cpu_env) { - tcg_temp_free_ptr(base); - assert(len_remain == 0); - } } /* @@ -4443,18 +4427,7 @@ void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs, tcg_temp_free_i64(t0); } else { TCGLabel *loop = gen_new_label(); - TCGv_ptr tp, i = tcg_const_local_ptr(0); - - /* Copy the clean address into a local temp, live across the loop. */ - t0 = clean_addr; - clean_addr = new_tmp_a64_local(s); - tcg_gen_mov_i64(clean_addr, t0); - - if (base != cpu_env) { - TCGv_ptr b = tcg_temp_local_new_ptr(); - tcg_gen_mov_ptr(b, base); - base = b; - } + TCGv_ptr tp, i = tcg_const_ptr(0); gen_set_label(loop); @@ -4471,11 +4444,6 @@ void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs, tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop); tcg_temp_free_ptr(i); - - if (base != cpu_env) { - tcg_temp_free_ptr(base); - assert(len_remain == 0); - } } /* Predicate register stores can be any multiple of 2. */ diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c index c23a3462bf..f042069dc2 100644 --- a/target/arm/tcg/translate.c +++ b/target/arm/tcg/translate.c @@ -269,7 +269,7 @@ static target_long jmp_diff(DisasContext *s, target_long diff) static void gen_pc_plus_diff(DisasContext *s, TCGv_i32 var, target_long diff) { assert(s->pc_save != -1); - if (TARGET_TB_PCREL) { + if (tb_cflags(s->base.tb) & CF_PCREL) { tcg_gen_addi_i32(var, cpu_R[15], (s->pc_curr - s->pc_save) + diff); } else { tcg_gen_movi_i32(var, s->pc_curr + diff); @@ -2620,7 +2620,7 @@ static void gen_goto_tb(DisasContext *s, int n, target_long diff) * update to pc to the unlinked path. A long chain of links * can thus avoid many updates to the PC. */ - if (TARGET_TB_PCREL) { + if (tb_cflags(s->base.tb) & CF_PCREL) { gen_update_pc(s, diff); tcg_gen_goto_tb(n); } else { @@ -7136,7 +7136,7 @@ static bool op_strex(DisasContext *s, arg_STREX *a, MemOp mop, bool rel) tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); } - addr = tcg_temp_local_new_i32(); + addr = tcg_temp_new_i32(); load_reg_var(s, addr, a->rn); tcg_gen_addi_i32(addr, addr, a->imm); @@ -7289,7 +7289,7 @@ static bool op_ldrex(DisasContext *s, arg_LDREX *a, MemOp mop, bool acq) return true; } - addr = tcg_temp_local_new_i32(); + addr = tcg_temp_new_i32(); load_reg_var(s, addr, a->rn); tcg_gen_addi_i32(addr, addr, a->imm); @@ -8696,7 +8696,7 @@ static bool trans_LE(DisasContext *s, arg_LE *a) * Decrement by 1 << (4 - LTPSIZE). We need to use a TCG local * so that decr stays live after the brcondi. */ - TCGv_i32 decr = tcg_temp_local_new_i32(); + TCGv_i32 decr = tcg_temp_new_i32(); TCGv_i32 ltpsize = load_cpu_field(v7m.ltpsize); tcg_gen_sub_i32(decr, tcg_constant_i32(4), ltpsize); tcg_gen_shl_i32(decr, tcg_constant_i32(1), decr); @@ -9542,7 +9542,7 @@ static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) uint32_t condexec_bits; target_ulong pc_arg = dc->base.pc_next; - if (TARGET_TB_PCREL) { + if (tb_cflags(dcbase->tb) & CF_PCREL) { pc_arg &= ~TARGET_PAGE_MASK; } if (dc->eci) { @@ -9970,7 +9970,7 @@ static const TranslatorOps thumb_translator_ops = { }; /* generate intermediate code for basic block 'tb'. */ -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext dc = { }; diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h index 3717824b75..4001372acd 100644 --- a/target/arm/tcg/translate.h +++ b/target/arm/tcg/translate.h @@ -23,7 +23,7 @@ typedef struct DisasContext { /* The address of the current instruction being translated. */ target_ulong pc_curr; /* - * For TARGET_TB_PCREL, the full value of cpu_pc is not known + * For CF_PCREL, the full value of cpu_pc is not known * (although the page offset is known). For convenience, the * translation loop uses the full virtual address that triggered * the translation, from base.pc_start through pc_curr. diff --git a/target/avr/cpu.c b/target/avr/cpu.c index d0139804b9..a24c23c247 100644 --- a/target/avr/cpu.c +++ b/target/avr/cpu.c @@ -54,7 +54,8 @@ static void avr_cpu_synchronize_from_tb(CPUState *cs, AVRCPU *cpu = AVR_CPU(cs); CPUAVRState *env = &cpu->env; - env->pc_w = tb_pc(tb) / 2; /* internally PC points to words */ + tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); + env->pc_w = tb->pc / 2; /* internally PC points to words */ } static void avr_restore_state_to_opc(CPUState *cs, diff --git a/target/avr/translate.c b/target/avr/translate.c index 2bed56f135..e40d8e9681 100644 --- a/target/avr/translate.c +++ b/target/avr/translate.c @@ -3049,7 +3049,7 @@ static const TranslatorOps avr_tr_ops = { .disas_log = avr_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext dc = { }; diff --git a/target/cris/translate.c b/target/cris/translate.c index fbc3fd5865..a959b27373 100644 --- a/target/cris/translate.c +++ b/target/cris/translate.c @@ -1621,7 +1621,7 @@ static int dec_bound_r(CPUCRISState *env, DisasContext *dc) LOG_DIS("bound.%c $r%u, $r%u\n", memsize_char(size), dc->op1, dc->op2); cris_cc_mask(dc, CC_MASK_NZ); - l0 = tcg_temp_local_new(); + l0 = tcg_temp_new(); dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, l0); cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], cpu_R[dc->op2], l0, 4); tcg_temp_free(l0); @@ -2404,8 +2404,8 @@ static int dec_bound_m(CPUCRISState *env, DisasContext *dc) dc->op1, dc->postinc ? "+]" : "]", dc->op2); - l[0] = tcg_temp_local_new(); - l[1] = tcg_temp_local_new(); + l[0] = tcg_temp_new(); + l[1] = tcg_temp_new(); insn_len = dec_prep_alu_m(env, dc, 0, memsize, l[0], l[1]); cris_cc_mask(dc, CC_MASK_NZ); cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], l[0], l[1], 4); @@ -3286,7 +3286,7 @@ static const TranslatorOps cris_tr_ops = { .disas_log = cris_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext dc; diff --git a/target/cris/translate_v10.c.inc b/target/cris/translate_v10.c.inc index f500e93447..9660f28584 100644 --- a/target/cris/translate_v10.c.inc +++ b/target/cris/translate_v10.c.inc @@ -68,9 +68,9 @@ static void gen_store_v10_conditional(DisasContext *dc, TCGv addr, TCGv val, unsigned int size, int mem_index) { TCGLabel *l1 = gen_new_label(); - TCGv taddr = tcg_temp_local_new(); - TCGv tval = tcg_temp_local_new(); - TCGv t1 = tcg_temp_local_new(); + TCGv taddr = tcg_temp_new(); + TCGv tval = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); dc->postinc = 0; cris_evaluate_flags(dc); @@ -434,7 +434,7 @@ static void dec10_reg_bound(DisasContext *dc, int size) { TCGv t; - t = tcg_temp_local_new(); + t = tcg_temp_new(); t_gen_zext(t, cpu_R[dc->src], size); cris_alu(dc, CC_OP_BOUND, cpu_R[dc->dst], cpu_R[dc->dst], t, 4); tcg_temp_free(t); @@ -935,7 +935,7 @@ static int dec10_ind_bound(CPUCRISState *env, DisasContext *dc, int rd = dc->dst; TCGv t; - t = tcg_temp_local_new(); + t = tcg_temp_new(); insn_len += dec10_prep_move_m(env, dc, 0, size, t); cris_alu(dc, CC_OP_BOUND, cpu_R[dc->dst], cpu_R[rd], t, 4); if (dc->dst == 15) { diff --git a/target/hexagon/README b/target/hexagon/README index 6cb5affddb..2e32639fb7 100644 --- a/target/hexagon/README +++ b/target/hexagon/README @@ -81,7 +81,7 @@ tcg_funcs_generated.c.inc Insn *insn, Packet *pkt) { - TCGv RdV = tcg_temp_local_new(); + TCGv RdV = tcg_temp_new(); const int RdN = insn->regno[0]; TCGv RsV = hex_gpr[insn->regno[1]]; TCGv RtV = hex_gpr[insn->regno[2]]; @@ -146,16 +146,16 @@ istruction. const int VdN = insn->regno[0]; const intptr_t VdV_off = ctx_future_vreg_off(ctx, VdN, 1, true); - TCGv_ptr VdV = tcg_temp_local_new_ptr(); + TCGv_ptr VdV = tcg_temp_new_ptr(); tcg_gen_addi_ptr(VdV, cpu_env, VdV_off); const int VuN = insn->regno[1]; const intptr_t VuV_off = vreg_src_off(ctx, VuN); - TCGv_ptr VuV = tcg_temp_local_new_ptr(); + TCGv_ptr VuV = tcg_temp_new_ptr(); const int VvN = insn->regno[2]; const intptr_t VvV_off = vreg_src_off(ctx, VvN); - TCGv_ptr VvV = tcg_temp_local_new_ptr(); + TCGv_ptr VvV = tcg_temp_new_ptr(); tcg_gen_addi_ptr(VuV, cpu_env, VuV_off); tcg_gen_addi_ptr(VvV, cpu_env, VvV_off); TCGv slot = tcg_constant_tl(insn->slot); diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c index 807037c586..ab40cfc283 100644 --- a/target/hexagon/cpu.c +++ b/target/hexagon/cpu.c @@ -23,6 +23,7 @@ #include "qapi/error.h" #include "hw/qdev-properties.h" #include "fpu/softfloat-helpers.h" +#include "tcg/tcg.h" static void hexagon_v67_cpu_init(Object *obj) { @@ -263,7 +264,8 @@ static void hexagon_cpu_synchronize_from_tb(CPUState *cs, { HexagonCPU *cpu = HEXAGON_CPU(cs); CPUHexagonState *env = &cpu->env; - env->gpr[HEX_REG_PC] = tb_pc(tb); + tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); + env->gpr[HEX_REG_PC] = tb->pc; } static bool hexagon_cpu_has_work(CPUState *cs) diff --git a/target/hexagon/gen_tcg.h b/target/hexagon/gen_tcg.h index 19697b42a5..a219a7f5dd 100644 --- a/target/hexagon/gen_tcg.h +++ b/target/hexagon/gen_tcg.h @@ -337,7 +337,7 @@ */ #define fGEN_TCG_PRED_LOAD(GET_EA, PRED, SIZE, SIGN) \ do { \ - TCGv LSB = tcg_temp_local_new(); \ + TCGv LSB = tcg_temp_new(); \ TCGLabel *label = gen_new_label(); \ tcg_gen_movi_tl(EA, 0); \ PRED; \ @@ -397,7 +397,7 @@ /* Predicated loads into a register pair */ #define fGEN_TCG_PRED_LOAD_PAIR(GET_EA, PRED) \ do { \ - TCGv LSB = tcg_temp_local_new(); \ + TCGv LSB = tcg_temp_new(); \ TCGLabel *label = gen_new_label(); \ tcg_gen_movi_tl(EA, 0); \ PRED; \ diff --git a/target/hexagon/gen_tcg_funcs.py b/target/hexagon/gen_tcg_funcs.py index 7e8ba17ca2..dfc90712fb 100755 --- a/target/hexagon/gen_tcg_funcs.py +++ b/target/hexagon/gen_tcg_funcs.py @@ -26,18 +26,14 @@ import hex_common ## Helpers for gen_tcg_func ## def gen_decl_ea_tcg(f, tag): - if ('A_CONDEXEC' in hex_common.attribdict[tag] or - 'A_LOAD' in hex_common.attribdict[tag]): - f.write(" TCGv EA = tcg_temp_local_new();\n") - else: - f.write(" TCGv EA = tcg_temp_new();\n") + f.write(" TCGv EA = tcg_temp_new();\n") def gen_free_ea_tcg(f): f.write(" tcg_temp_free(EA);\n") def genptr_decl_pair_writable(f, tag, regtype, regid, regno): regN="%s%sN" % (regtype,regid) - f.write(" TCGv_i64 %s%sV = tcg_temp_local_new_i64();\n" % \ + f.write(" TCGv_i64 %s%sV = tcg_temp_new_i64();\n" % \ (regtype, regid)) if (regtype == "C"): f.write(" const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \ @@ -56,7 +52,7 @@ def genptr_decl_pair_writable(f, tag, regtype, regid, regno): def genptr_decl_writable(f, tag, regtype, regid, regno): regN="%s%sN" % (regtype,regid) - f.write(" TCGv %s%sV = tcg_temp_local_new();\n" % \ + f.write(" TCGv %s%sV = tcg_temp_new();\n" % \ (regtype, regid)) if (regtype == "C"): f.write(" const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \ @@ -73,7 +69,7 @@ def genptr_decl(f, tag, regtype, regid, regno): regN="%s%sN" % (regtype,regid) if (regtype == "R"): if (regid in {"ss", "tt"}): - f.write(" TCGv_i64 %s%sV = tcg_temp_local_new_i64();\n" % \ + f.write(" TCGv_i64 %s%sV = tcg_temp_new_i64();\n" % \ (regtype, regid)) f.write(" const int %s = insn->regno[%d];\n" % \ (regN, regno)) @@ -96,14 +92,14 @@ def genptr_decl(f, tag, regtype, regid, regno): print("Bad register parse: ", regtype, regid) elif (regtype == "C"): if (regid == "ss"): - f.write(" TCGv_i64 %s%sV = tcg_temp_local_new_i64();\n" % \ + f.write(" TCGv_i64 %s%sV = tcg_temp_new_i64();\n" % \ (regtype, regid)) f.write(" const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \ (regN, regno)) elif (regid == "dd"): genptr_decl_pair_writable(f, tag, regtype, regid, regno) elif (regid == "s"): - f.write(" TCGv %s%sV = tcg_temp_local_new();\n" % \ + f.write(" TCGv %s%sV = tcg_temp_new();\n" % \ (regtype, regid)) f.write(" const int %s%sN = insn->regno[%d] + HEX_REG_SA0;\n" % \ (regtype, regid, regno)) @@ -575,7 +571,7 @@ def genptr_dst_write_opn(f,regtype, regid, tag): ## We produce: ## static void generate_A2_add(DisasContext *ctx) ## { -## TCGv RdV = tcg_temp_local_new(); +## TCGv RdV = tcg_temp_new(); ## const int RdN = insn->regno[0]; ## TCGv RsV = hex_gpr[insn->regno[1]]; ## TCGv RtV = hex_gpr[insn->regno[2]]; diff --git a/target/hexagon/genptr.c b/target/hexagon/genptr.c index 90db99024f..591461b043 100644 --- a/target/hexagon/genptr.c +++ b/target/hexagon/genptr.c @@ -706,7 +706,7 @@ static void gen_cond_call(DisasContext *ctx, TCGv pred, TCGCond cond, int pc_off) { TCGv next_PC; - TCGv lsb = tcg_temp_local_new(); + TCGv lsb = tcg_temp_new(); TCGLabel *skip = gen_new_label(); tcg_gen_andi_tl(lsb, pred, 1); gen_write_new_pc_pcrel(ctx, pc_off, cond, lsb); @@ -720,7 +720,7 @@ static void gen_cond_call(DisasContext *ctx, TCGv pred, static void gen_endloop0(DisasContext *ctx) { - TCGv lpcfg = tcg_temp_local_new(); + TCGv lpcfg = tcg_temp_new(); GET_USR_FIELD(USR_LPCFG, lpcfg); @@ -852,7 +852,7 @@ static void gen_sar(TCGv dst, TCGv src, TCGv shift_amt) /* Bidirectional shift right with saturation */ static void gen_asr_r_r_sat(TCGv RdV, TCGv RsV, TCGv RtV) { - TCGv shift_amt = tcg_temp_local_new(); + TCGv shift_amt = tcg_temp_new(); TCGLabel *positive = gen_new_label(); TCGLabel *done = gen_new_label(); @@ -876,7 +876,7 @@ static void gen_asr_r_r_sat(TCGv RdV, TCGv RsV, TCGv RtV) /* Bidirectional shift left with saturation */ static void gen_asl_r_r_sat(TCGv RdV, TCGv RsV, TCGv RtV) { - TCGv shift_amt = tcg_temp_local_new(); + TCGv shift_amt = tcg_temp_new(); TCGLabel *positive = gen_new_label(); TCGLabel *done = gen_new_label(); @@ -918,7 +918,7 @@ static void gen_log_vreg_write(DisasContext *ctx, intptr_t srcoff, int num, intptr_t dstoff; if (is_predicated) { - TCGv cancelled = tcg_temp_local_new(); + TCGv cancelled = tcg_temp_new(); label_end = gen_new_label(); /* Don't do anything if the slot was cancelled */ @@ -959,7 +959,7 @@ static void gen_log_qreg_write(intptr_t srcoff, int num, int vnew, intptr_t dstoff; if (is_predicated) { - TCGv cancelled = tcg_temp_local_new(); + TCGv cancelled = tcg_temp_new(); label_end = gen_new_label(); /* Don't do anything if the slot was cancelled */ @@ -1164,10 +1164,10 @@ void gen_satu_i64_ovfl(TCGv ovfl, TCGv_i64 dest, TCGv_i64 source, int width) /* Implements the fADDSAT64 macro in TCG */ void gen_add_sat_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b) { - TCGv_i64 sum = tcg_temp_local_new_i64(); + TCGv_i64 sum = tcg_temp_new_i64(); TCGv_i64 xor = tcg_temp_new_i64(); TCGv_i64 cond1 = tcg_temp_new_i64(); - TCGv_i64 cond2 = tcg_temp_local_new_i64(); + TCGv_i64 cond2 = tcg_temp_new_i64(); TCGv_i64 cond3 = tcg_temp_new_i64(); TCGv_i64 mask = tcg_constant_i64(0x8000000000000000ULL); TCGv_i64 max_pos = tcg_constant_i64(0x7FFFFFFFFFFFFFFFLL); diff --git a/target/hexagon/idef-parser/README.rst b/target/hexagon/idef-parser/README.rst index ff6d14150a..c230fec124 100644 --- a/target/hexagon/idef-parser/README.rst +++ b/target/hexagon/idef-parser/README.rst @@ -294,9 +294,9 @@ generators the previous declarations are mapped to :: - int var1; -> TCGv_i32 var1 = tcg_temp_local_new_i32(); + int var1; -> TCGv_i32 var1 = tcg_temp_new_i32(); - int var2 = 0; -> TCGv_i32 var1 = tcg_temp_local_new_i32(); + int var2 = 0; -> TCGv_i32 var1 = tcg_temp_new_i32(); tcg_gen_movi_i32(j, ((int64_t) 0ULL)); which are later automatically freed at the end of the function they're declared diff --git a/target/hexagon/idef-parser/parser-helpers.c b/target/hexagon/idef-parser/parser-helpers.c index 8110686c51..3025040640 100644 --- a/target/hexagon/idef-parser/parser-helpers.c +++ b/target/hexagon/idef-parser/parser-helpers.c @@ -307,26 +307,6 @@ HexValue gen_tmp(Context *c, return rvalue; } -HexValue gen_tmp_local(Context *c, - YYLTYPE *locp, - unsigned bit_width, - HexSignedness signedness) -{ - HexValue rvalue; - assert(bit_width == 32 || bit_width == 64); - memset(&rvalue, 0, sizeof(HexValue)); - rvalue.type = TEMP; - rvalue.bit_width = bit_width; - rvalue.signedness = signedness; - rvalue.is_dotnew = false; - rvalue.is_manual = false; - rvalue.tmp.index = c->inst.tmp_count; - OUT(c, locp, "TCGv_i", &bit_width, " tmp_", &c->inst.tmp_count, - " = tcg_temp_local_new_i", &bit_width, "();\n"); - c->inst.tmp_count++; - return rvalue; -} - HexValue gen_tmp_value(Context *c, YYLTYPE *locp, const char *value, @@ -554,7 +534,7 @@ void gen_varid_allocate(Context *c, new_var.signedness = signedness; EMIT_HEAD(c, "TCGv_%s %s", bit_suffix, varid->var.name->str); - EMIT_HEAD(c, " = tcg_temp_local_new_%s();\n", bit_suffix); + EMIT_HEAD(c, " = tcg_temp_new_%s();\n", bit_suffix); g_array_append_val(c->inst.allocated, new_var); } @@ -2161,8 +2141,8 @@ HexValue gen_rvalue_sat(Context *c, YYLTYPE *locp, HexSat *sat, assert_signedness(c, locp, sat->signedness); unsigned_str = (sat->signedness == UNSIGNED) ? "u" : ""; - res = gen_tmp_local(c, locp, value->bit_width, sat->signedness); - ovfl = gen_tmp_local(c, locp, 32, sat->signedness); + res = gen_tmp(c, locp, value->bit_width, sat->signedness); + ovfl = gen_tmp(c, locp, 32, sat->signedness); OUT(c, locp, "gen_sat", unsigned_str, "_", bit_suffix, "_ovfl("); OUT(c, locp, &ovfl, ", ", &res, ", ", value, ", ", &width->imm.value, ");\n"); diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c index 75f28e08ad..381fdaa3a8 100644 --- a/target/hexagon/translate.c +++ b/target/hexagon/translate.c @@ -539,7 +539,7 @@ void process_store(DisasContext *ctx, int slot_num) tcg_temp_free(cancelled); } { - TCGv address = tcg_temp_local_new(); + TCGv address = tcg_temp_new(); tcg_gen_mov_tl(address, hex_store_addr[slot_num]); /* @@ -962,7 +962,7 @@ static const TranslatorOps hexagon_tr_ops = { .disas_log = hexagon_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext ctx; diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c index 55c190280e..11022f9c99 100644 --- a/target/hppa/cpu.c +++ b/target/hppa/cpu.c @@ -26,7 +26,7 @@ #include "qemu/module.h" #include "exec/exec-all.h" #include "fpu/softfloat.h" - +#include "tcg/tcg.h" static void hppa_cpu_set_pc(CPUState *cs, vaddr value) { @@ -48,8 +48,10 @@ static void hppa_cpu_synchronize_from_tb(CPUState *cs, { HPPACPU *cpu = HPPA_CPU(cs); + tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); + #ifdef CONFIG_USER_ONLY - cpu->env.iaoq_f = tb_pc(tb); + cpu->env.iaoq_f = tb->pc; cpu->env.iaoq_b = tb->cs_base; #else /* Recover the IAOQ values from the GVA + PRIV. */ @@ -59,7 +61,7 @@ static void hppa_cpu_synchronize_from_tb(CPUState *cs, int32_t diff = cs_base; cpu->env.iasq_f = iasq_f; - cpu->env.iaoq_f = (tb_pc(tb) & ~iasq_f) + priv; + cpu->env.iaoq_f = (tb->pc & ~iasq_f) + priv; if (diff) { cpu->env.iaoq_b = cpu->env.iaoq_f + diff; } diff --git a/target/hppa/translate.c b/target/hppa/translate.c index 981f8ee03d..cee960949f 100644 --- a/target/hppa/translate.c +++ b/target/hppa/translate.c @@ -35,7 +35,6 @@ #undef TCGv #undef tcg_temp_new #undef tcg_global_mem_new -#undef tcg_temp_local_new #undef tcg_temp_free #if TARGET_LONG_BITS == 64 @@ -59,7 +58,6 @@ #define tcg_temp_new tcg_temp_new_i64 #define tcg_global_mem_new tcg_global_mem_new_i64 -#define tcg_temp_local_new tcg_temp_local_new_i64 #define tcg_temp_free tcg_temp_free_i64 #define tcg_gen_movi_reg tcg_gen_movi_i64 @@ -155,7 +153,6 @@ #define TCGv_reg TCGv_i32 #define tcg_temp_new tcg_temp_new_i32 #define tcg_global_mem_new tcg_global_mem_new_i32 -#define tcg_temp_local_new tcg_temp_local_new_i32 #define tcg_temp_free tcg_temp_free_i32 #define tcg_gen_movi_reg tcg_gen_movi_i32 @@ -4359,7 +4356,7 @@ static const TranslatorOps hppa_tr_ops = { .disas_log = hppa_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext ctx; diff --git a/target/i386/cpu-param.h b/target/i386/cpu-param.h index f579b16bd2..abad52af20 100644 --- a/target/i386/cpu-param.h +++ b/target/i386/cpu-param.h @@ -25,8 +25,4 @@ #define TARGET_PAGE_BITS 12 #define NB_MMU_MODES 5 -#ifndef CONFIG_USER_ONLY -# define TARGET_TB_PCREL 1 -#endif - #endif diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 4bad3d41d3..cab1e2a957 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -31,11 +31,11 @@ #include "qapi/error.h" #include "qapi/qapi-visit-machine.h" #include "qapi/qmp/qerror.h" -#include "qapi/qapi-commands-machine-target.h" #include "standard-headers/asm-x86/kvm_para.h" #include "hw/qdev-properties.h" #include "hw/i386/topology.h" #ifndef CONFIG_USER_ONLY +#include "qapi/qapi-commands-machine-target.h" #include "exec/address-spaces.h" #include "hw/boards.h" #include "hw/i386/sgx-epc.h" @@ -4843,40 +4843,6 @@ static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v, visit_type_strList(v, "unavailable-features", &result, errp); } -/* Check for missing features that may prevent the CPU class from - * running using the current machine and accelerator. - */ -static void x86_cpu_class_check_missing_features(X86CPUClass *xcc, - strList **list) -{ - strList **tail = list; - X86CPU *xc; - Error *err = NULL; - - if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { - QAPI_LIST_APPEND(tail, g_strdup("kvm")); - return; - } - - xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc))); - - x86_cpu_expand_features(xc, &err); - if (err) { - /* Errors at x86_cpu_expand_features should never happen, - * but in case it does, just report the model as not - * runnable at all using the "type" property. - */ - QAPI_LIST_APPEND(tail, g_strdup("type")); - error_free(err); - } - - x86_cpu_filter_features(xc, false); - - x86_cpu_list_feature_names(xc->filtered_features, tail); - - object_unref(OBJECT(xc)); -} - /* Print all cpuid feature names in featureset */ static void listflags(GList *features) @@ -5005,6 +4971,42 @@ void x86_cpu_list(void) g_list_free(names); } +#ifndef CONFIG_USER_ONLY + +/* Check for missing features that may prevent the CPU class from + * running using the current machine and accelerator. + */ +static void x86_cpu_class_check_missing_features(X86CPUClass *xcc, + strList **list) +{ + strList **tail = list; + X86CPU *xc; + Error *err = NULL; + + if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { + QAPI_LIST_APPEND(tail, g_strdup("kvm")); + return; + } + + xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc))); + + x86_cpu_expand_features(xc, &err); + if (err) { + /* Errors at x86_cpu_expand_features should never happen, + * but in case it does, just report the model as not + * runnable at all using the "type" property. + */ + QAPI_LIST_APPEND(tail, g_strdup("type")); + error_free(err); + } + + x86_cpu_filter_features(xc, false); + + x86_cpu_list_feature_names(xc->filtered_features, tail); + + object_unref(OBJECT(xc)); +} + static void x86_cpu_definition_entry(gpointer data, gpointer user_data) { ObjectClass *oc = data; @@ -5045,6 +5047,8 @@ CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) return cpu_list; } +#endif /* !CONFIG_USER_ONLY */ + uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, bool migratable_only) { @@ -6534,6 +6538,11 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) static bool ht_warned; unsigned requested_lbr_fmt; + /* Use pc-relative instructions in system-mode */ +#ifndef CONFIG_USER_ONLY + cs->tcg_cflags |= CF_PCREL; +#endif + if (cpu->apic_id == UNASSIGNED_APIC_ID) { error_setg(errp, "apic-id property was not initialized properly"); return; @@ -7200,6 +7209,7 @@ static Property x86_cpu_properties[] = { * own cache information (see x86_cpu_load_def()). */ DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true), + DEFINE_PROP_BOOL("xen-vapic", X86CPU, xen_vapic, false), /* * From "Requirements for Implementing the Microsoft diff --git a/target/i386/cpu.h b/target/i386/cpu.h index ea650e68a3..d243e290d3 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -26,6 +26,9 @@ #include "exec/cpu-defs.h" #include "qapi/qapi-types-common.h" #include "qemu/cpu-float.h" +#include "qemu/timer.h" + +#define XEN_NR_VIRQS 24 /* The x86 has a strong memory model with some store-after-load re-ordering */ #define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) @@ -1799,6 +1802,20 @@ typedef struct CPUArchState { #endif #if defined(CONFIG_KVM) struct kvm_nested_state *nested_state; + MemoryRegion *xen_vcpu_info_mr; + void *xen_vcpu_info_hva; + uint64_t xen_vcpu_info_gpa; + uint64_t xen_vcpu_info_default_gpa; + uint64_t xen_vcpu_time_info_gpa; + uint64_t xen_vcpu_runstate_gpa; + uint8_t xen_vcpu_callback_vector; + bool xen_callback_asserted; + uint16_t xen_virq[XEN_NR_VIRQS]; + uint64_t xen_singleshot_timer_ns; + QEMUTimer *xen_singleshot_timer; + uint64_t xen_periodic_timer_period; + QEMUTimer *xen_periodic_timer; + QemuMutex xen_timers_lock; #endif #if defined(CONFIG_HVF) HVFX86LazyFlags hvf_lflags; @@ -1975,6 +1992,8 @@ struct ArchCPU { int32_t thread_id; int32_t hv_max_vps; + + bool xen_vapic; }; diff --git a/target/i386/helper.c b/target/i386/helper.c index 0ac2da066d..8857444819 100644 --- a/target/i386/helper.c +++ b/target/i386/helper.c @@ -520,7 +520,7 @@ static inline target_ulong get_memio_eip(CPUX86State *env) } /* Per x86_restore_state_to_opc. */ - if (TARGET_TB_PCREL) { + if (cs->tcg_cflags & CF_PCREL) { return (env->eip & TARGET_PAGE_MASK) | data[0]; } else { return data[0] - env->segs[R_CS].base; diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index d18bd2f3e8..1aef54f87e 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -22,6 +22,7 @@ #include <linux/kvm.h> #include "standard-headers/asm-x86/kvm_para.h" +#include "hw/xen/interface/arch-x86/cpuid.h" #include "cpu.h" #include "host-cpu.h" @@ -31,6 +32,7 @@ #include "sysemu/runstate.h" #include "kvm_i386.h" #include "sev.h" +#include "xen-emu.h" #include "hyperv.h" #include "hyperv-proto.h" @@ -42,6 +44,8 @@ #include "qemu/error-report.h" #include "qemu/memalign.h" #include "hw/i386/x86.h" +#include "hw/i386/kvm/xen_evtchn.h" +#include "hw/i386/pc.h" #include "hw/i386/apic.h" #include "hw/i386/apic_internal.h" #include "hw/i386/apic-msidef.h" @@ -49,6 +53,8 @@ #include "hw/i386/x86-iommu.h" #include "hw/i386/e820_memory_layout.h" +#include "hw/xen/xen.h" + #include "hw/pci/pci.h" #include "hw/pci/msi.h" #include "hw/pci/msix.h" @@ -1815,7 +1821,82 @@ int kvm_arch_init_vcpu(CPUState *cs) has_msr_hv_hypercall = true; } - if (cpu->expose_kvm) { + if (cs->kvm_state->xen_version) { +#ifdef CONFIG_XEN_EMU + struct kvm_cpuid_entry2 *xen_max_leaf; + + memcpy(signature, "XenVMMXenVMM", 12); + + xen_max_leaf = c = &cpuid_data.entries[cpuid_i++]; + c->function = kvm_base + XEN_CPUID_SIGNATURE; + c->eax = kvm_base + XEN_CPUID_TIME; + c->ebx = signature[0]; + c->ecx = signature[1]; + c->edx = signature[2]; + + c = &cpuid_data.entries[cpuid_i++]; + c->function = kvm_base + XEN_CPUID_VENDOR; + c->eax = cs->kvm_state->xen_version; + c->ebx = 0; + c->ecx = 0; + c->edx = 0; + + c = &cpuid_data.entries[cpuid_i++]; + c->function = kvm_base + XEN_CPUID_HVM_MSR; + /* Number of hypercall-transfer pages */ + c->eax = 1; + /* Hypercall MSR base address */ + if (hyperv_enabled(cpu)) { + c->ebx = XEN_HYPERCALL_MSR_HYPERV; + kvm_xen_init(cs->kvm_state, c->ebx); + } else { + c->ebx = XEN_HYPERCALL_MSR; + } + c->ecx = 0; + c->edx = 0; + + c = &cpuid_data.entries[cpuid_i++]; + c->function = kvm_base + XEN_CPUID_TIME; + c->eax = ((!!tsc_is_stable_and_known(env) << 1) | + (!!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP) << 2)); + /* default=0 (emulate if necessary) */ + c->ebx = 0; + /* guest tsc frequency */ + c->ecx = env->user_tsc_khz; + /* guest tsc incarnation (migration count) */ + c->edx = 0; + + c = &cpuid_data.entries[cpuid_i++]; + c->function = kvm_base + XEN_CPUID_HVM; + xen_max_leaf->eax = kvm_base + XEN_CPUID_HVM; + if (cs->kvm_state->xen_version >= XEN_VERSION(4, 5)) { + c->function = kvm_base + XEN_CPUID_HVM; + + if (cpu->xen_vapic) { + c->eax |= XEN_HVM_CPUID_APIC_ACCESS_VIRT; + c->eax |= XEN_HVM_CPUID_X2APIC_VIRT; + } + + c->eax |= XEN_HVM_CPUID_IOMMU_MAPPINGS; + + if (cs->kvm_state->xen_version >= XEN_VERSION(4, 6)) { + c->eax |= XEN_HVM_CPUID_VCPU_ID_PRESENT; + c->ebx = cs->cpu_index; + } + } + + r = kvm_xen_init_vcpu(cs); + if (r) { + return r; + } + + kvm_base += 0x100; +#else /* CONFIG_XEN_EMU */ + /* This should never happen as kvm_arch_init() would have died first. */ + fprintf(stderr, "Cannot enable Xen CPUID without Xen support\n"); + abort(); +#endif + } else if (cpu->expose_kvm) { memcpy(signature, "KVMKVMKVM\0\0\0", 12); c = &cpuid_data.entries[cpuid_i++]; c->function = KVM_CPUID_SIGNATURE | kvm_base; @@ -2529,6 +2610,24 @@ int kvm_arch_init(MachineState *ms, KVMState *s) } } + if (s->xen_version) { +#ifdef CONFIG_XEN_EMU + if (!object_dynamic_cast(OBJECT(ms), TYPE_PC_MACHINE)) { + error_report("kvm: Xen support only available in PC machine"); + return -ENOTSUP; + } + /* hyperv_enabled() doesn't work yet. */ + uint32_t msr = XEN_HYPERCALL_MSR; + ret = kvm_xen_init(s, msr); + if (ret < 0) { + return ret; + } +#else + error_report("kvm: Xen support not enabled in qemu"); + return -ENOTSUP; +#endif + } + ret = kvm_get_supported_msrs(s); if (ret < 0) { return ret; @@ -4652,6 +4751,15 @@ int kvm_arch_put_registers(CPUState *cpu, int level) kvm_arch_set_tsc_khz(cpu); } +#ifdef CONFIG_XEN_EMU + if (xen_mode == XEN_EMULATE && level == KVM_PUT_FULL_STATE) { + ret = kvm_put_xen_state(cpu); + if (ret < 0) { + return ret; + } + } +#endif + ret = kvm_getput_regs(x86_cpu, 1); if (ret < 0) { return ret; @@ -4751,6 +4859,14 @@ int kvm_arch_get_registers(CPUState *cs) if (ret < 0) { goto out; } +#ifdef CONFIG_XEN_EMU + if (xen_mode == XEN_EMULATE) { + ret = kvm_get_xen_state(cs); + if (ret < 0) { + goto out; + } + } +#endif ret = 0; out: cpu_sync_bndcs_hflags(&cpu->env); @@ -4875,6 +4991,17 @@ MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run) kvm_rate_limit_on_bus_lock(); } + /* + * If the callback is asserted as a GSI (or PCI INTx) then check if + * vcpu_info->evtchn_upcall_pending has been cleared, and deassert + * the callback IRQ if so. Ideally we could hook into the PIC/IOAPIC + * EOI and only resample then, exactly how the VFIO eventfd pairs + * are designed to work for level triggered interrupts. + */ + if (x86_cpu->env.xen_callback_asserted) { + kvm_xen_maybe_deassert_callback(cpu); + } + /* We need to protect the apic state against concurrent accesses from * different threads in case the userspace irqchip is used. */ if (!kvm_irqchip_in_kernel()) { @@ -5395,6 +5522,11 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) assert(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER); ret = kvm_handle_wrmsr(cpu, run); break; +#ifdef CONFIG_XEN_EMU + case KVM_EXIT_XEN: + ret = kvm_xen_handle_exit(cpu, &run->xen); + break; +#endif default: fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason); ret = -1; @@ -5523,6 +5655,20 @@ int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, } } +#ifdef CONFIG_XEN_EMU + if (xen_mode == XEN_EMULATE) { + int handled = xen_evtchn_translate_pirq_msi(route, address, data); + + /* + * If it was a PIRQ and successfully routed (handled == 0) or it was + * an error (handled < 0), return. If it wasn't a PIRQ, keep going. + */ + if (handled <= 0) { + return handled; + } + } +#endif + address = kvm_swizzle_msi_ext_dest_id(address); route->u.msi.address_hi = address >> VTD_MSI_ADDR_HI_SHIFT; route->u.msi.address_lo = address & VTD_MSI_ADDR_LO_MASK; @@ -5542,8 +5688,8 @@ struct MSIRouteEntry { static QLIST_HEAD(, MSIRouteEntry) msi_route_list = \ QLIST_HEAD_INITIALIZER(msi_route_list); -static void kvm_update_msi_routes_all(void *private, bool global, - uint32_t index, uint32_t mask) +void kvm_update_msi_routes_all(void *private, bool global, + uint32_t index, uint32_t mask) { int cnt = 0, vector; MSIRouteEntry *entry; @@ -5719,6 +5865,90 @@ static void kvm_arch_set_notify_window(Object *obj, Visitor *v, s->notify_window = value; } +static void kvm_arch_get_xen_version(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + KVMState *s = KVM_STATE(obj); + uint32_t value = s->xen_version; + + visit_type_uint32(v, name, &value, errp); +} + +static void kvm_arch_set_xen_version(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + KVMState *s = KVM_STATE(obj); + Error *error = NULL; + uint32_t value; + + visit_type_uint32(v, name, &value, &error); + if (error) { + error_propagate(errp, error); + return; + } + + s->xen_version = value; + if (value && xen_mode == XEN_DISABLED) { + xen_mode = XEN_EMULATE; + } +} + +static void kvm_arch_get_xen_gnttab_max_frames(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + KVMState *s = KVM_STATE(obj); + uint16_t value = s->xen_gnttab_max_frames; + + visit_type_uint16(v, name, &value, errp); +} + +static void kvm_arch_set_xen_gnttab_max_frames(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + KVMState *s = KVM_STATE(obj); + Error *error = NULL; + uint16_t value; + + visit_type_uint16(v, name, &value, &error); + if (error) { + error_propagate(errp, error); + return; + } + + s->xen_gnttab_max_frames = value; +} + +static void kvm_arch_get_xen_evtchn_max_pirq(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + KVMState *s = KVM_STATE(obj); + uint16_t value = s->xen_evtchn_max_pirq; + + visit_type_uint16(v, name, &value, errp); +} + +static void kvm_arch_set_xen_evtchn_max_pirq(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + KVMState *s = KVM_STATE(obj); + Error *error = NULL; + uint16_t value; + + visit_type_uint16(v, name, &value, &error); + if (error) { + error_propagate(errp, error); + return; + } + + s->xen_evtchn_max_pirq = value; +} + void kvm_arch_accel_class_init(ObjectClass *oc) { object_class_property_add_enum(oc, "notify-vmexit", "NotifyVMexitOption", @@ -5735,6 +5965,29 @@ void kvm_arch_accel_class_init(ObjectClass *oc) object_class_property_set_description(oc, "notify-window", "Clock cycles without an event window " "after which a notification VM exit occurs"); + + object_class_property_add(oc, "xen-version", "uint32", + kvm_arch_get_xen_version, + kvm_arch_set_xen_version, + NULL, NULL); + object_class_property_set_description(oc, "xen-version", + "Xen version to be emulated " + "(in XENVER_version form " + "e.g. 0x4000a for 4.10)"); + + object_class_property_add(oc, "xen-gnttab-max-frames", "uint16", + kvm_arch_get_xen_gnttab_max_frames, + kvm_arch_set_xen_gnttab_max_frames, + NULL, NULL); + object_class_property_set_description(oc, "xen-gnttab-max-frames", + "Maximum number of grant table frames"); + + object_class_property_add(oc, "xen-evtchn-max-pirq", "uint16", + kvm_arch_get_xen_evtchn_max_pirq, + kvm_arch_set_xen_evtchn_max_pirq, + NULL, NULL); + object_class_property_set_description(oc, "xen-evtchn-max-pirq", + "Maximum number of Xen PIRQs"); } void kvm_set_max_apic_id(uint32_t max_apic_id) diff --git a/target/i386/kvm/kvm_i386.h b/target/i386/kvm/kvm_i386.h index 6a5c24e3dc..e24753abfe 100644 --- a/target/i386/kvm/kvm_i386.h +++ b/target/i386/kvm/kvm_i386.h @@ -51,6 +51,8 @@ bool kvm_hv_vpindex_settable(void); bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp); uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address); +void kvm_update_msi_routes_all(void *private, bool global, + uint32_t index, uint32_t mask); bool kvm_enable_sgx_provisioning(KVMState *s); void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask); diff --git a/target/i386/kvm/meson.build b/target/i386/kvm/meson.build index 736df8b72e..322272091b 100644 --- a/target/i386/kvm/meson.build +++ b/target/i386/kvm/meson.build @@ -7,6 +7,8 @@ i386_softmmu_kvm_ss.add(files( 'kvm-cpu.c', )) +i386_softmmu_kvm_ss.add(when: 'CONFIG_XEN_EMU', if_true: files('xen-emu.c')) + i386_softmmu_kvm_ss.add(when: 'CONFIG_SEV', if_false: files('sev-stub.c')) i386_softmmu_ss.add(when: 'CONFIG_HYPERV', if_true: files('hyperv.c'), if_false: files('hyperv-stub.c')) diff --git a/target/i386/kvm/trace-events b/target/i386/kvm/trace-events index 7c369db1e1..b365a8e8e2 100644 --- a/target/i386/kvm/trace-events +++ b/target/i386/kvm/trace-events @@ -5,3 +5,10 @@ kvm_x86_fixup_msi_error(uint32_t gsi) "VT-d failed to remap interrupt for GSI %" kvm_x86_add_msi_route(int virq) "Adding route entry for virq %d" kvm_x86_remove_msi_route(int virq) "Removing route entry for virq %d" kvm_x86_update_msi_routes(int num) "Updated %d MSI routes" + +# xen-emu.c +kvm_xen_hypercall(int cpu, uint8_t cpl, uint64_t input, uint64_t a0, uint64_t a1, uint64_t a2, uint64_t ret) "xen_hypercall: cpu %d cpl %d input %" PRIu64 " a0 0x%" PRIx64 " a1 0x%" PRIx64 " a2 0x%" PRIx64" ret 0x%" PRIx64 +kvm_xen_soft_reset(void) "" +kvm_xen_set_shared_info(uint64_t gfn) "shared info at gfn 0x%" PRIx64 +kvm_xen_set_vcpu_attr(int cpu, int type, uint64_t gpa) "vcpu attr cpu %d type %d gpa 0x%" PRIx64 +kvm_xen_set_vcpu_callback(int cpu, int vector) "callback vcpu %d vector %d" diff --git a/target/i386/kvm/xen-compat.h b/target/i386/kvm/xen-compat.h new file mode 100644 index 0000000000..7f30180cc2 --- /dev/null +++ b/target/i386/kvm/xen-compat.h @@ -0,0 +1,70 @@ +/* + * Xen HVM emulation support in KVM + * + * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_I386_KVM_XEN_COMPAT_H +#define QEMU_I386_KVM_XEN_COMPAT_H + +#include "hw/xen/interface/memory.h" + +typedef uint32_t compat_pfn_t; +typedef uint32_t compat_ulong_t; +typedef uint32_t compat_ptr_t; + +#define __DEFINE_COMPAT_HANDLE(name, type) \ + typedef struct { \ + compat_ptr_t c; \ + type *_[0] __attribute__((packed)); \ + } __compat_handle_ ## name; \ + +#define DEFINE_COMPAT_HANDLE(name) __DEFINE_COMPAT_HANDLE(name, name) +#define COMPAT_HANDLE(name) __compat_handle_ ## name + +DEFINE_COMPAT_HANDLE(compat_pfn_t); +DEFINE_COMPAT_HANDLE(compat_ulong_t); +DEFINE_COMPAT_HANDLE(int); + +struct compat_xen_add_to_physmap { + domid_t domid; + uint16_t size; + unsigned int space; + compat_ulong_t idx; + compat_pfn_t gpfn; +}; + +struct compat_xen_add_to_physmap_batch { + domid_t domid; + uint16_t space; + uint16_t size; + uint16_t extra; + COMPAT_HANDLE(compat_ulong_t) idxs; + COMPAT_HANDLE(compat_pfn_t) gpfns; + COMPAT_HANDLE(int) errs; +}; + +struct compat_physdev_map_pirq { + domid_t domid; + uint16_t pad; + /* IN */ + int type; + /* IN (ignored for ..._MULTI_MSI) */ + int index; + /* IN or OUT */ + int pirq; + /* IN - high 16 bits hold segment for ..._MSI_SEG and ..._MULTI_MSI */ + int bus; + /* IN */ + int devfn; + /* IN (also OUT for ..._MULTI_MSI) */ + int entry_nr; + /* IN */ + uint64_t table_base; +} __attribute__((packed)); + +#endif /* QEMU_I386_XEN_COMPAT_H */ diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c new file mode 100644 index 0000000000..bad3131d08 --- /dev/null +++ b/target/i386/kvm/xen-emu.c @@ -0,0 +1,1897 @@ +/* + * Xen HVM emulation support in KVM + * + * Copyright © 2019 Oracle and/or its affiliates. All rights reserved. + * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/main-loop.h" +#include "hw/xen/xen.h" +#include "sysemu/kvm_int.h" +#include "sysemu/kvm_xen.h" +#include "kvm/kvm_i386.h" +#include "exec/address-spaces.h" +#include "xen-emu.h" +#include "trace.h" +#include "sysemu/runstate.h" + +#include "hw/pci/msi.h" +#include "hw/i386/apic-msidef.h" +#include "hw/i386/e820_memory_layout.h" +#include "hw/i386/kvm/xen_overlay.h" +#include "hw/i386/kvm/xen_evtchn.h" +#include "hw/i386/kvm/xen_gnttab.h" +#include "hw/i386/kvm/xen_xenstore.h" + +#include "hw/xen/interface/version.h" +#include "hw/xen/interface/sched.h" +#include "hw/xen/interface/memory.h" +#include "hw/xen/interface/hvm/hvm_op.h" +#include "hw/xen/interface/hvm/params.h" +#include "hw/xen/interface/vcpu.h" +#include "hw/xen/interface/event_channel.h" +#include "hw/xen/interface/grant_table.h" + +#include "xen-compat.h" + +static void xen_vcpu_singleshot_timer_event(void *opaque); +static void xen_vcpu_periodic_timer_event(void *opaque); + +#ifdef TARGET_X86_64 +#define hypercall_compat32(longmode) (!(longmode)) +#else +#define hypercall_compat32(longmode) (false) +#endif + +static bool kvm_gva_to_gpa(CPUState *cs, uint64_t gva, uint64_t *gpa, + size_t *len, bool is_write) +{ + struct kvm_translation tr = { + .linear_address = gva, + }; + + if (len) { + *len = TARGET_PAGE_SIZE - (gva & ~TARGET_PAGE_MASK); + } + + if (kvm_vcpu_ioctl(cs, KVM_TRANSLATE, &tr) || !tr.valid || + (is_write && !tr.writeable)) { + return false; + } + *gpa = tr.physical_address; + return true; +} + +static int kvm_gva_rw(CPUState *cs, uint64_t gva, void *_buf, size_t sz, + bool is_write) +{ + uint8_t *buf = (uint8_t *)_buf; + uint64_t gpa; + size_t len; + + while (sz) { + if (!kvm_gva_to_gpa(cs, gva, &gpa, &len, is_write)) { + return -EFAULT; + } + if (len > sz) { + len = sz; + } + + cpu_physical_memory_rw(gpa, buf, len, is_write); + + buf += len; + sz -= len; + gva += len; + } + + return 0; +} + +static inline int kvm_copy_from_gva(CPUState *cs, uint64_t gva, void *buf, + size_t sz) +{ + return kvm_gva_rw(cs, gva, buf, sz, false); +} + +static inline int kvm_copy_to_gva(CPUState *cs, uint64_t gva, void *buf, + size_t sz) +{ + return kvm_gva_rw(cs, gva, buf, sz, true); +} + +int kvm_xen_init(KVMState *s, uint32_t hypercall_msr) +{ + const int required_caps = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR | + KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL | KVM_XEN_HVM_CONFIG_SHARED_INFO; + struct kvm_xen_hvm_config cfg = { + .msr = hypercall_msr, + .flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL, + }; + int xen_caps, ret; + + xen_caps = kvm_check_extension(s, KVM_CAP_XEN_HVM); + if (required_caps & ~xen_caps) { + error_report("kvm: Xen HVM guest support not present or insufficient"); + return -ENOSYS; + } + + if (xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_SEND) { + struct kvm_xen_hvm_attr ha = { + .type = KVM_XEN_ATTR_TYPE_XEN_VERSION, + .u.xen_version = s->xen_version, + }; + (void)kvm_vm_ioctl(s, KVM_XEN_HVM_SET_ATTR, &ha); + + cfg.flags |= KVM_XEN_HVM_CONFIG_EVTCHN_SEND; + } + + ret = kvm_vm_ioctl(s, KVM_XEN_HVM_CONFIG, &cfg); + if (ret < 0) { + error_report("kvm: Failed to enable Xen HVM support: %s", + strerror(-ret)); + return ret; + } + + /* If called a second time, don't repeat the rest of the setup. */ + if (s->xen_caps) { + return 0; + } + + /* + * Event channel delivery via GSI/PCI_INTX needs to poll the vcpu_info + * of vCPU0 to deassert the IRQ when ->evtchn_upcall_pending is cleared. + * + * In the kernel, there's a notifier hook on the PIC/IOAPIC which allows + * such things to be polled at precisely the right time. We *could* do + * it nicely in the kernel: check vcpu_info[0]->evtchn_upcall_pending at + * the moment the IRQ is acked, and see if it should be reasserted. + * + * But the in-kernel irqchip is deprecated, so we're unlikely to add + * that support in the kernel. Insist on using the split irqchip mode + * instead. + * + * This leaves us polling for the level going low in QEMU, which lacks + * the appropriate hooks in its PIC/IOAPIC code. Even VFIO is sending a + * spurious 'ack' to an INTX IRQ every time there's any MMIO access to + * the device (for which it has to unmap the device and trap access, for + * some period after an IRQ!!). In the Xen case, we do it on exit from + * KVM_RUN, if the flag is set to say that the GSI is currently asserted. + * Which is kind of icky, but less so than the VFIO one. I may fix them + * both later... + */ + if (!kvm_kernel_irqchip_split()) { + error_report("kvm: Xen support requires kernel-irqchip=split"); + return -EINVAL; + } + + s->xen_caps = xen_caps; + + /* Tell fw_cfg to notify the BIOS to reserve the range. */ + ret = e820_add_entry(XEN_SPECIAL_AREA_ADDR, XEN_SPECIAL_AREA_SIZE, + E820_RESERVED); + if (ret < 0) { + fprintf(stderr, "e820_add_entry() table is full\n"); + return ret; + } + + /* The page couldn't be overlaid until KVM was initialized */ + xen_xenstore_reset(); + + return 0; +} + +int kvm_xen_init_vcpu(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + int err; + + /* + * The kernel needs to know the Xen/ACPI vCPU ID because that's + * what the guest uses in hypercalls such as timers. It doesn't + * match the APIC ID which is generally used for talking to the + * kernel about vCPUs. And if vCPU threads race with creating + * their KVM vCPUs out of order, it doesn't necessarily match + * with the kernel's internal vCPU indices either. + */ + if (kvm_xen_has_cap(EVTCHN_SEND)) { + struct kvm_xen_vcpu_attr va = { + .type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID, + .u.vcpu_id = cs->cpu_index, + }; + err = kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_SET_ATTR, &va); + if (err) { + error_report("kvm: Failed to set Xen vCPU ID attribute: %s", + strerror(-err)); + return err; + } + } + + env->xen_vcpu_info_gpa = INVALID_GPA; + env->xen_vcpu_info_default_gpa = INVALID_GPA; + env->xen_vcpu_time_info_gpa = INVALID_GPA; + env->xen_vcpu_runstate_gpa = INVALID_GPA; + + qemu_mutex_init(&env->xen_timers_lock); + env->xen_singleshot_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + xen_vcpu_singleshot_timer_event, + cpu); + if (!env->xen_singleshot_timer) { + return -ENOMEM; + } + env->xen_singleshot_timer->opaque = cs; + + env->xen_periodic_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + xen_vcpu_periodic_timer_event, + cpu); + if (!env->xen_periodic_timer) { + return -ENOMEM; + } + env->xen_periodic_timer->opaque = cs; + + return 0; +} + +uint32_t kvm_xen_get_caps(void) +{ + return kvm_state->xen_caps; +} + +static bool kvm_xen_hcall_xen_version(struct kvm_xen_exit *exit, X86CPU *cpu, + int cmd, uint64_t arg) +{ + int err = 0; + + switch (cmd) { + case XENVER_get_features: { + struct xen_feature_info fi; + + /* No need for 32/64 compat handling */ + qemu_build_assert(sizeof(fi) == 8); + + err = kvm_copy_from_gva(CPU(cpu), arg, &fi, sizeof(fi)); + if (err) { + break; + } + + fi.submap = 0; + if (fi.submap_idx == 0) { + fi.submap |= 1 << XENFEAT_writable_page_tables | + 1 << XENFEAT_writable_descriptor_tables | + 1 << XENFEAT_auto_translated_physmap | + 1 << XENFEAT_supervisor_mode_kernel | + 1 << XENFEAT_hvm_callback_vector | + 1 << XENFEAT_hvm_safe_pvclock | + 1 << XENFEAT_hvm_pirqs; + } + + err = kvm_copy_to_gva(CPU(cpu), arg, &fi, sizeof(fi)); + break; + } + + default: + return false; + } + + exit->u.hcall.result = err; + return true; +} + +static int kvm_xen_set_vcpu_attr(CPUState *cs, uint16_t type, uint64_t gpa) +{ + struct kvm_xen_vcpu_attr xhsi; + + xhsi.type = type; + xhsi.u.gpa = gpa; + + trace_kvm_xen_set_vcpu_attr(cs->cpu_index, type, gpa); + + return kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_SET_ATTR, &xhsi); +} + +static int kvm_xen_set_vcpu_callback_vector(CPUState *cs) +{ + uint8_t vector = X86_CPU(cs)->env.xen_vcpu_callback_vector; + struct kvm_xen_vcpu_attr xva; + + xva.type = KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR; + xva.u.vector = vector; + + trace_kvm_xen_set_vcpu_callback(cs->cpu_index, vector); + + return kvm_vcpu_ioctl(cs, KVM_XEN_HVM_SET_ATTR, &xva); +} + +static void do_set_vcpu_callback_vector(CPUState *cs, run_on_cpu_data data) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + env->xen_vcpu_callback_vector = data.host_int; + + if (kvm_xen_has_cap(EVTCHN_SEND)) { + kvm_xen_set_vcpu_callback_vector(cs); + } +} + +static int set_vcpu_info(CPUState *cs, uint64_t gpa) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + MemoryRegionSection mrs = { .mr = NULL }; + void *vcpu_info_hva = NULL; + int ret; + + ret = kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, gpa); + if (ret || gpa == INVALID_GPA) { + goto out; + } + + mrs = memory_region_find(get_system_memory(), gpa, + sizeof(struct vcpu_info)); + if (mrs.mr && mrs.mr->ram_block && + !int128_lt(mrs.size, int128_make64(sizeof(struct vcpu_info)))) { + vcpu_info_hva = qemu_map_ram_ptr(mrs.mr->ram_block, + mrs.offset_within_region); + } + if (!vcpu_info_hva) { + if (mrs.mr) { + memory_region_unref(mrs.mr); + mrs.mr = NULL; + } + ret = -EINVAL; + } + + out: + if (env->xen_vcpu_info_mr) { + memory_region_unref(env->xen_vcpu_info_mr); + } + env->xen_vcpu_info_hva = vcpu_info_hva; + env->xen_vcpu_info_mr = mrs.mr; + return ret; +} + +static void do_set_vcpu_info_default_gpa(CPUState *cs, run_on_cpu_data data) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + env->xen_vcpu_info_default_gpa = data.host_ulong; + + /* Changing the default does nothing if a vcpu_info was explicitly set. */ + if (env->xen_vcpu_info_gpa == INVALID_GPA) { + set_vcpu_info(cs, env->xen_vcpu_info_default_gpa); + } +} + +static void do_set_vcpu_info_gpa(CPUState *cs, run_on_cpu_data data) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + env->xen_vcpu_info_gpa = data.host_ulong; + + set_vcpu_info(cs, env->xen_vcpu_info_gpa); +} + +void *kvm_xen_get_vcpu_info_hva(uint32_t vcpu_id) +{ + CPUState *cs = qemu_get_cpu(vcpu_id); + if (!cs) { + return NULL; + } + + return X86_CPU(cs)->env.xen_vcpu_info_hva; +} + +void kvm_xen_maybe_deassert_callback(CPUState *cs) +{ + CPUX86State *env = &X86_CPU(cs)->env; + struct vcpu_info *vi = env->xen_vcpu_info_hva; + if (!vi) { + return; + } + + /* If the evtchn_upcall_pending flag is cleared, turn the GSI off. */ + if (!vi->evtchn_upcall_pending) { + qemu_mutex_lock_iothread(); + /* + * Check again now we have the lock, because it may have been + * asserted in the interim. And we don't want to take the lock + * every time because this is a fast path. + */ + if (!vi->evtchn_upcall_pending) { + X86_CPU(cs)->env.xen_callback_asserted = false; + xen_evtchn_set_callback_level(0); + } + qemu_mutex_unlock_iothread(); + } +} + +void kvm_xen_set_callback_asserted(void) +{ + CPUState *cs = qemu_get_cpu(0); + + if (cs) { + X86_CPU(cs)->env.xen_callback_asserted = true; + } +} + +void kvm_xen_inject_vcpu_callback_vector(uint32_t vcpu_id, int type) +{ + CPUState *cs = qemu_get_cpu(vcpu_id); + uint8_t vector; + + if (!cs) { + return; + } + + vector = X86_CPU(cs)->env.xen_vcpu_callback_vector; + if (vector) { + /* + * The per-vCPU callback vector injected via lapic. Just + * deliver it as an MSI. + */ + MSIMessage msg = { + .address = APIC_DEFAULT_ADDRESS | X86_CPU(cs)->apic_id, + .data = vector | (1UL << MSI_DATA_LEVEL_SHIFT), + }; + kvm_irqchip_send_msi(kvm_state, msg); + return; + } + + switch (type) { + case HVM_PARAM_CALLBACK_TYPE_VECTOR: + /* + * If the evtchn_upcall_pending field in the vcpu_info is set, then + * KVM will automatically deliver the vector on entering the vCPU + * so all we have to do is kick it out. + */ + qemu_cpu_kick(cs); + break; + + case HVM_PARAM_CALLBACK_TYPE_GSI: + case HVM_PARAM_CALLBACK_TYPE_PCI_INTX: + if (vcpu_id == 0) { + xen_evtchn_set_callback_level(1); + } + break; + } +} + +static int kvm_xen_set_vcpu_timer(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + struct kvm_xen_vcpu_attr va = { + .type = KVM_XEN_VCPU_ATTR_TYPE_TIMER, + .u.timer.port = env->xen_virq[VIRQ_TIMER], + .u.timer.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL, + .u.timer.expires_ns = env->xen_singleshot_timer_ns, + }; + + return kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_SET_ATTR, &va); +} + +static void do_set_vcpu_timer_virq(CPUState *cs, run_on_cpu_data data) +{ + kvm_xen_set_vcpu_timer(cs); +} + +int kvm_xen_set_vcpu_virq(uint32_t vcpu_id, uint16_t virq, uint16_t port) +{ + CPUState *cs = qemu_get_cpu(vcpu_id); + + if (!cs) { + return -ENOENT; + } + + /* cpu.h doesn't include the actual Xen header. */ + qemu_build_assert(NR_VIRQS == XEN_NR_VIRQS); + + if (virq >= NR_VIRQS) { + return -EINVAL; + } + + if (port && X86_CPU(cs)->env.xen_virq[virq]) { + return -EEXIST; + } + + X86_CPU(cs)->env.xen_virq[virq] = port; + if (virq == VIRQ_TIMER && kvm_xen_has_cap(EVTCHN_SEND)) { + async_run_on_cpu(cs, do_set_vcpu_timer_virq, + RUN_ON_CPU_HOST_INT(port)); + } + return 0; +} + +static void do_set_vcpu_time_info_gpa(CPUState *cs, run_on_cpu_data data) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + env->xen_vcpu_time_info_gpa = data.host_ulong; + + kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO, + env->xen_vcpu_time_info_gpa); +} + +static void do_set_vcpu_runstate_gpa(CPUState *cs, run_on_cpu_data data) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + env->xen_vcpu_runstate_gpa = data.host_ulong; + + kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR, + env->xen_vcpu_runstate_gpa); +} + +static void do_vcpu_soft_reset(CPUState *cs, run_on_cpu_data data) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + env->xen_vcpu_info_gpa = INVALID_GPA; + env->xen_vcpu_info_default_gpa = INVALID_GPA; + env->xen_vcpu_time_info_gpa = INVALID_GPA; + env->xen_vcpu_runstate_gpa = INVALID_GPA; + env->xen_vcpu_callback_vector = 0; + env->xen_singleshot_timer_ns = 0; + memset(env->xen_virq, 0, sizeof(env->xen_virq)); + + set_vcpu_info(cs, INVALID_GPA); + kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO, + INVALID_GPA); + kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR, + INVALID_GPA); + if (kvm_xen_has_cap(EVTCHN_SEND)) { + kvm_xen_set_vcpu_callback_vector(cs); + kvm_xen_set_vcpu_timer(cs); + } + +} + +static int xen_set_shared_info(uint64_t gfn) +{ + uint64_t gpa = gfn << TARGET_PAGE_BITS; + int i, err; + + QEMU_IOTHREAD_LOCK_GUARD(); + + /* + * The xen_overlay device tells KVM about it too, since it had to + * do that on migration load anyway (unless we're going to jump + * through lots of hoops to maintain the fiction that this isn't + * KVM-specific. + */ + err = xen_overlay_map_shinfo_page(gpa); + if (err) { + return err; + } + + trace_kvm_xen_set_shared_info(gfn); + + for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++) { + CPUState *cpu = qemu_get_cpu(i); + if (cpu) { + async_run_on_cpu(cpu, do_set_vcpu_info_default_gpa, + RUN_ON_CPU_HOST_ULONG(gpa)); + } + gpa += sizeof(vcpu_info_t); + } + + return err; +} + +static int add_to_physmap_one(uint32_t space, uint64_t idx, uint64_t gfn) +{ + switch (space) { + case XENMAPSPACE_shared_info: + if (idx > 0) { + return -EINVAL; + } + return xen_set_shared_info(gfn); + + case XENMAPSPACE_grant_table: + return xen_gnttab_map_page(idx, gfn); + + case XENMAPSPACE_gmfn: + case XENMAPSPACE_gmfn_range: + return -ENOTSUP; + + case XENMAPSPACE_gmfn_foreign: + case XENMAPSPACE_dev_mmio: + return -EPERM; + + default: + return -EINVAL; + } +} + +static int do_add_to_physmap(struct kvm_xen_exit *exit, X86CPU *cpu, + uint64_t arg) +{ + struct xen_add_to_physmap xatp; + CPUState *cs = CPU(cpu); + + if (hypercall_compat32(exit->u.hcall.longmode)) { + struct compat_xen_add_to_physmap xatp32; + + qemu_build_assert(sizeof(struct compat_xen_add_to_physmap) == 16); + if (kvm_copy_from_gva(cs, arg, &xatp32, sizeof(xatp32))) { + return -EFAULT; + } + xatp.domid = xatp32.domid; + xatp.size = xatp32.size; + xatp.space = xatp32.space; + xatp.idx = xatp32.idx; + xatp.gpfn = xatp32.gpfn; + } else { + if (kvm_copy_from_gva(cs, arg, &xatp, sizeof(xatp))) { + return -EFAULT; + } + } + + if (xatp.domid != DOMID_SELF && xatp.domid != xen_domid) { + return -ESRCH; + } + + return add_to_physmap_one(xatp.space, xatp.idx, xatp.gpfn); +} + +static int do_add_to_physmap_batch(struct kvm_xen_exit *exit, X86CPU *cpu, + uint64_t arg) +{ + struct xen_add_to_physmap_batch xatpb; + unsigned long idxs_gva, gpfns_gva, errs_gva; + CPUState *cs = CPU(cpu); + size_t op_sz; + + if (hypercall_compat32(exit->u.hcall.longmode)) { + struct compat_xen_add_to_physmap_batch xatpb32; + + qemu_build_assert(sizeof(struct compat_xen_add_to_physmap_batch) == 20); + if (kvm_copy_from_gva(cs, arg, &xatpb32, sizeof(xatpb32))) { + return -EFAULT; + } + xatpb.domid = xatpb32.domid; + xatpb.space = xatpb32.space; + xatpb.size = xatpb32.size; + + idxs_gva = xatpb32.idxs.c; + gpfns_gva = xatpb32.gpfns.c; + errs_gva = xatpb32.errs.c; + op_sz = sizeof(uint32_t); + } else { + if (kvm_copy_from_gva(cs, arg, &xatpb, sizeof(xatpb))) { + return -EFAULT; + } + op_sz = sizeof(unsigned long); + idxs_gva = (unsigned long)xatpb.idxs.p; + gpfns_gva = (unsigned long)xatpb.gpfns.p; + errs_gva = (unsigned long)xatpb.errs.p; + } + + if (xatpb.domid != DOMID_SELF && xatpb.domid != xen_domid) { + return -ESRCH; + } + + /* Explicitly invalid for the batch op. Not that we implement it anyway. */ + if (xatpb.space == XENMAPSPACE_gmfn_range) { + return -EINVAL; + } + + while (xatpb.size--) { + unsigned long idx = 0; + unsigned long gpfn = 0; + int err; + + /* For 32-bit compat this only copies the low 32 bits of each */ + if (kvm_copy_from_gva(cs, idxs_gva, &idx, op_sz) || + kvm_copy_from_gva(cs, gpfns_gva, &gpfn, op_sz)) { + return -EFAULT; + } + idxs_gva += op_sz; + gpfns_gva += op_sz; + + err = add_to_physmap_one(xatpb.space, idx, gpfn); + + if (kvm_copy_to_gva(cs, errs_gva, &err, sizeof(err))) { + return -EFAULT; + } + errs_gva += sizeof(err); + } + return 0; +} + +static bool kvm_xen_hcall_memory_op(struct kvm_xen_exit *exit, X86CPU *cpu, + int cmd, uint64_t arg) +{ + int err; + + switch (cmd) { + case XENMEM_add_to_physmap: + err = do_add_to_physmap(exit, cpu, arg); + break; + + case XENMEM_add_to_physmap_batch: + err = do_add_to_physmap_batch(exit, cpu, arg); + break; + + default: + return false; + } + + exit->u.hcall.result = err; + return true; +} + +static bool handle_set_param(struct kvm_xen_exit *exit, X86CPU *cpu, + uint64_t arg) +{ + CPUState *cs = CPU(cpu); + struct xen_hvm_param hp; + int err = 0; + + /* No need for 32/64 compat handling */ + qemu_build_assert(sizeof(hp) == 16); + + if (kvm_copy_from_gva(cs, arg, &hp, sizeof(hp))) { + err = -EFAULT; + goto out; + } + + if (hp.domid != DOMID_SELF && hp.domid != xen_domid) { + err = -ESRCH; + goto out; + } + + switch (hp.index) { + case HVM_PARAM_CALLBACK_IRQ: + qemu_mutex_lock_iothread(); + err = xen_evtchn_set_callback_param(hp.value); + qemu_mutex_unlock_iothread(); + xen_set_long_mode(exit->u.hcall.longmode); + break; + default: + return false; + } + +out: + exit->u.hcall.result = err; + return true; +} + +static bool handle_get_param(struct kvm_xen_exit *exit, X86CPU *cpu, + uint64_t arg) +{ + CPUState *cs = CPU(cpu); + struct xen_hvm_param hp; + int err = 0; + + /* No need for 32/64 compat handling */ + qemu_build_assert(sizeof(hp) == 16); + + if (kvm_copy_from_gva(cs, arg, &hp, sizeof(hp))) { + err = -EFAULT; + goto out; + } + + if (hp.domid != DOMID_SELF && hp.domid != xen_domid) { + err = -ESRCH; + goto out; + } + + switch (hp.index) { + case HVM_PARAM_STORE_PFN: + hp.value = XEN_SPECIAL_PFN(XENSTORE); + break; + case HVM_PARAM_STORE_EVTCHN: + hp.value = xen_xenstore_get_port(); + break; + default: + return false; + } + + if (kvm_copy_to_gva(cs, arg, &hp, sizeof(hp))) { + err = -EFAULT; + } +out: + exit->u.hcall.result = err; + return true; +} + +static int kvm_xen_hcall_evtchn_upcall_vector(struct kvm_xen_exit *exit, + X86CPU *cpu, uint64_t arg) +{ + struct xen_hvm_evtchn_upcall_vector up; + CPUState *target_cs; + + /* No need for 32/64 compat handling */ + qemu_build_assert(sizeof(up) == 8); + + if (kvm_copy_from_gva(CPU(cpu), arg, &up, sizeof(up))) { + return -EFAULT; + } + + if (up.vector < 0x10) { + return -EINVAL; + } + + target_cs = qemu_get_cpu(up.vcpu); + if (!target_cs) { + return -EINVAL; + } + + async_run_on_cpu(target_cs, do_set_vcpu_callback_vector, + RUN_ON_CPU_HOST_INT(up.vector)); + return 0; +} + +static bool kvm_xen_hcall_hvm_op(struct kvm_xen_exit *exit, X86CPU *cpu, + int cmd, uint64_t arg) +{ + int ret = -ENOSYS; + switch (cmd) { + case HVMOP_set_evtchn_upcall_vector: + ret = kvm_xen_hcall_evtchn_upcall_vector(exit, cpu, + exit->u.hcall.params[0]); + break; + + case HVMOP_pagetable_dying: + ret = -ENOSYS; + break; + + case HVMOP_set_param: + return handle_set_param(exit, cpu, arg); + + case HVMOP_get_param: + return handle_get_param(exit, cpu, arg); + + default: + return false; + } + + exit->u.hcall.result = ret; + return true; +} + +static int vcpuop_register_vcpu_info(CPUState *cs, CPUState *target, + uint64_t arg) +{ + struct vcpu_register_vcpu_info rvi; + uint64_t gpa; + + /* No need for 32/64 compat handling */ + qemu_build_assert(sizeof(rvi) == 16); + qemu_build_assert(sizeof(struct vcpu_info) == 64); + + if (!target) { + return -ENOENT; + } + + if (kvm_copy_from_gva(cs, arg, &rvi, sizeof(rvi))) { + return -EFAULT; + } + + if (rvi.offset > TARGET_PAGE_SIZE - sizeof(struct vcpu_info)) { + return -EINVAL; + } + + gpa = ((rvi.mfn << TARGET_PAGE_BITS) + rvi.offset); + async_run_on_cpu(target, do_set_vcpu_info_gpa, RUN_ON_CPU_HOST_ULONG(gpa)); + return 0; +} + +static int vcpuop_register_vcpu_time_info(CPUState *cs, CPUState *target, + uint64_t arg) +{ + struct vcpu_register_time_memory_area tma; + uint64_t gpa; + size_t len; + + /* No need for 32/64 compat handling */ + qemu_build_assert(sizeof(tma) == 8); + qemu_build_assert(sizeof(struct vcpu_time_info) == 32); + + if (!target) { + return -ENOENT; + } + + if (kvm_copy_from_gva(cs, arg, &tma, sizeof(tma))) { + return -EFAULT; + } + + /* + * Xen actually uses the GVA and does the translation through the guest + * page tables each time. But Linux/KVM uses the GPA, on the assumption + * that guests only ever use *global* addresses (kernel virtual addresses) + * for it. If Linux is changed to redo the GVA→GPA translation each time, + * it will offer a new vCPU attribute for that, and we'll use it instead. + */ + if (!kvm_gva_to_gpa(cs, tma.addr.p, &gpa, &len, false) || + len < sizeof(struct vcpu_time_info)) { + return -EFAULT; + } + + async_run_on_cpu(target, do_set_vcpu_time_info_gpa, + RUN_ON_CPU_HOST_ULONG(gpa)); + return 0; +} + +static int vcpuop_register_runstate_info(CPUState *cs, CPUState *target, + uint64_t arg) +{ + struct vcpu_register_runstate_memory_area rma; + uint64_t gpa; + size_t len; + + /* No need for 32/64 compat handling */ + qemu_build_assert(sizeof(rma) == 8); + /* The runstate area actually does change size, but Linux copes. */ + + if (!target) { + return -ENOENT; + } + + if (kvm_copy_from_gva(cs, arg, &rma, sizeof(rma))) { + return -EFAULT; + } + + /* As with vcpu_time_info, Xen actually uses the GVA but KVM doesn't. */ + if (!kvm_gva_to_gpa(cs, rma.addr.p, &gpa, &len, false)) { + return -EFAULT; + } + + async_run_on_cpu(target, do_set_vcpu_runstate_gpa, + RUN_ON_CPU_HOST_ULONG(gpa)); + return 0; +} + +static uint64_t kvm_get_current_ns(void) +{ + struct kvm_clock_data data; + int ret; + + ret = kvm_vm_ioctl(kvm_state, KVM_GET_CLOCK, &data); + if (ret < 0) { + fprintf(stderr, "KVM_GET_CLOCK failed: %s\n", strerror(ret)); + abort(); + } + + return data.clock; +} + +static void xen_vcpu_singleshot_timer_event(void *opaque) +{ + CPUState *cpu = opaque; + CPUX86State *env = &X86_CPU(cpu)->env; + uint16_t port = env->xen_virq[VIRQ_TIMER]; + + if (likely(port)) { + xen_evtchn_set_port(port); + } + + qemu_mutex_lock(&env->xen_timers_lock); + env->xen_singleshot_timer_ns = 0; + qemu_mutex_unlock(&env->xen_timers_lock); +} + +static void xen_vcpu_periodic_timer_event(void *opaque) +{ + CPUState *cpu = opaque; + CPUX86State *env = &X86_CPU(cpu)->env; + uint16_t port = env->xen_virq[VIRQ_TIMER]; + int64_t qemu_now; + + if (likely(port)) { + xen_evtchn_set_port(port); + } + + qemu_mutex_lock(&env->xen_timers_lock); + + qemu_now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + timer_mod_ns(env->xen_periodic_timer, + qemu_now + env->xen_periodic_timer_period); + + qemu_mutex_unlock(&env->xen_timers_lock); +} + +static int do_set_periodic_timer(CPUState *target, uint64_t period_ns) +{ + CPUX86State *tenv = &X86_CPU(target)->env; + int64_t qemu_now; + + timer_del(tenv->xen_periodic_timer); + + qemu_mutex_lock(&tenv->xen_timers_lock); + + qemu_now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + timer_mod_ns(tenv->xen_periodic_timer, qemu_now + period_ns); + tenv->xen_periodic_timer_period = period_ns; + + qemu_mutex_unlock(&tenv->xen_timers_lock); + return 0; +} + +#define MILLISECS(_ms) ((int64_t)((_ms) * 1000000ULL)) +#define MICROSECS(_us) ((int64_t)((_us) * 1000ULL)) +#define STIME_MAX ((time_t)((int64_t)~0ull >> 1)) +/* Chosen so (NOW() + delta) wont overflow without an uptime of 200 years */ +#define STIME_DELTA_MAX ((int64_t)((uint64_t)~0ull >> 2)) + +static int vcpuop_set_periodic_timer(CPUState *cs, CPUState *target, + uint64_t arg) +{ + struct vcpu_set_periodic_timer spt; + + qemu_build_assert(sizeof(spt) == 8); + if (kvm_copy_from_gva(cs, arg, &spt, sizeof(spt))) { + return -EFAULT; + } + + if (spt.period_ns < MILLISECS(1) || spt.period_ns > STIME_DELTA_MAX) { + return -EINVAL; + } + + return do_set_periodic_timer(target, spt.period_ns); +} + +static int vcpuop_stop_periodic_timer(CPUState *target) +{ + CPUX86State *tenv = &X86_CPU(target)->env; + + qemu_mutex_lock(&tenv->xen_timers_lock); + + timer_del(tenv->xen_periodic_timer); + tenv->xen_periodic_timer_period = 0; + + qemu_mutex_unlock(&tenv->xen_timers_lock); + return 0; +} + +static int do_set_singleshot_timer(CPUState *cs, uint64_t timeout_abs, + bool future, bool linux_wa) +{ + CPUX86State *env = &X86_CPU(cs)->env; + int64_t now = kvm_get_current_ns(); + int64_t qemu_now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + int64_t delta = timeout_abs - now; + + if (future && timeout_abs < now) { + return -ETIME; + } + + if (linux_wa && unlikely((int64_t)timeout_abs < 0 || + (delta > 0 && (uint32_t)(delta >> 50) != 0))) { + /* + * Xen has a 'Linux workaround' in do_set_timer_op() which checks + * for negative absolute timeout values (caused by integer + * overflow), and for values about 13 days in the future (2^50ns) + * which would be caused by jiffies overflow. For those cases, it + * sets the timeout 100ms in the future (not *too* soon, since if + * a guest really did set a long timeout on purpose we don't want + * to keep churning CPU time by waking it up). + */ + delta = (100 * SCALE_MS); + timeout_abs = now + delta; + } + + qemu_mutex_lock(&env->xen_timers_lock); + + timer_mod_ns(env->xen_singleshot_timer, qemu_now + delta); + env->xen_singleshot_timer_ns = now + delta; + + qemu_mutex_unlock(&env->xen_timers_lock); + return 0; +} + +static int vcpuop_set_singleshot_timer(CPUState *cs, uint64_t arg) +{ + struct vcpu_set_singleshot_timer sst = { 0 }; + + /* + * The struct is a uint64_t followed by a uint32_t. On 32-bit that + * makes it 12 bytes. On 64-bit it gets padded to 16. The parts + * that get used are identical, and there's four bytes of padding + * unused at the end. For true Xen compatibility we should attempt + * to copy the full 16 bytes from 64-bit guests, and return -EFAULT + * if we can't get the padding too. But that's daft. Just copy what + * we need. + */ + qemu_build_assert(offsetof(struct vcpu_set_singleshot_timer, flags) == 8); + qemu_build_assert(sizeof(sst) >= 12); + + if (kvm_copy_from_gva(cs, arg, &sst, 12)) { + return -EFAULT; + } + + return do_set_singleshot_timer(cs, sst.timeout_abs_ns, + !!(sst.flags & VCPU_SSHOTTMR_future), + false); +} + +static int vcpuop_stop_singleshot_timer(CPUState *cs) +{ + CPUX86State *env = &X86_CPU(cs)->env; + + qemu_mutex_lock(&env->xen_timers_lock); + + timer_del(env->xen_singleshot_timer); + env->xen_singleshot_timer_ns = 0; + + qemu_mutex_unlock(&env->xen_timers_lock); + return 0; +} + +static bool kvm_xen_hcall_set_timer_op(struct kvm_xen_exit *exit, X86CPU *cpu, + uint64_t timeout) +{ + int err; + + if (unlikely(timeout == 0)) { + err = vcpuop_stop_singleshot_timer(CPU(cpu)); + } else { + err = do_set_singleshot_timer(CPU(cpu), timeout, false, true); + } + exit->u.hcall.result = err; + return true; +} + +static bool kvm_xen_hcall_vcpu_op(struct kvm_xen_exit *exit, X86CPU *cpu, + int cmd, int vcpu_id, uint64_t arg) +{ + CPUState *cs = CPU(cpu); + CPUState *dest = cs->cpu_index == vcpu_id ? cs : qemu_get_cpu(vcpu_id); + int err; + + if (!dest) { + err = -ENOENT; + goto out; + } + + switch (cmd) { + case VCPUOP_register_runstate_memory_area: + err = vcpuop_register_runstate_info(cs, dest, arg); + break; + case VCPUOP_register_vcpu_time_memory_area: + err = vcpuop_register_vcpu_time_info(cs, dest, arg); + break; + case VCPUOP_register_vcpu_info: + err = vcpuop_register_vcpu_info(cs, dest, arg); + break; + case VCPUOP_set_singleshot_timer: { + if (cs->cpu_index == vcpu_id) { + err = vcpuop_set_singleshot_timer(dest, arg); + } else { + err = -EINVAL; + } + break; + } + case VCPUOP_stop_singleshot_timer: + if (cs->cpu_index == vcpu_id) { + err = vcpuop_stop_singleshot_timer(dest); + } else { + err = -EINVAL; + } + break; + case VCPUOP_set_periodic_timer: { + err = vcpuop_set_periodic_timer(cs, dest, arg); + break; + } + case VCPUOP_stop_periodic_timer: + err = vcpuop_stop_periodic_timer(dest); + break; + + default: + return false; + } + + out: + exit->u.hcall.result = err; + return true; +} + +static bool kvm_xen_hcall_evtchn_op(struct kvm_xen_exit *exit, X86CPU *cpu, + int cmd, uint64_t arg) +{ + CPUState *cs = CPU(cpu); + int err = -ENOSYS; + + switch (cmd) { + case EVTCHNOP_init_control: + case EVTCHNOP_expand_array: + case EVTCHNOP_set_priority: + /* We do not support FIFO channels at this point */ + err = -ENOSYS; + break; + + case EVTCHNOP_status: { + struct evtchn_status status; + + qemu_build_assert(sizeof(status) == 24); + if (kvm_copy_from_gva(cs, arg, &status, sizeof(status))) { + err = -EFAULT; + break; + } + + err = xen_evtchn_status_op(&status); + if (!err && kvm_copy_to_gva(cs, arg, &status, sizeof(status))) { + err = -EFAULT; + } + break; + } + case EVTCHNOP_close: { + struct evtchn_close close; + + qemu_build_assert(sizeof(close) == 4); + if (kvm_copy_from_gva(cs, arg, &close, sizeof(close))) { + err = -EFAULT; + break; + } + + err = xen_evtchn_close_op(&close); + break; + } + case EVTCHNOP_unmask: { + struct evtchn_unmask unmask; + + qemu_build_assert(sizeof(unmask) == 4); + if (kvm_copy_from_gva(cs, arg, &unmask, sizeof(unmask))) { + err = -EFAULT; + break; + } + + err = xen_evtchn_unmask_op(&unmask); + break; + } + case EVTCHNOP_bind_virq: { + struct evtchn_bind_virq virq; + + qemu_build_assert(sizeof(virq) == 12); + if (kvm_copy_from_gva(cs, arg, &virq, sizeof(virq))) { + err = -EFAULT; + break; + } + + err = xen_evtchn_bind_virq_op(&virq); + if (!err && kvm_copy_to_gva(cs, arg, &virq, sizeof(virq))) { + err = -EFAULT; + } + break; + } + case EVTCHNOP_bind_pirq: { + struct evtchn_bind_pirq pirq; + + qemu_build_assert(sizeof(pirq) == 12); + if (kvm_copy_from_gva(cs, arg, &pirq, sizeof(pirq))) { + err = -EFAULT; + break; + } + + err = xen_evtchn_bind_pirq_op(&pirq); + if (!err && kvm_copy_to_gva(cs, arg, &pirq, sizeof(pirq))) { + err = -EFAULT; + } + break; + } + case EVTCHNOP_bind_ipi: { + struct evtchn_bind_ipi ipi; + + qemu_build_assert(sizeof(ipi) == 8); + if (kvm_copy_from_gva(cs, arg, &ipi, sizeof(ipi))) { + err = -EFAULT; + break; + } + + err = xen_evtchn_bind_ipi_op(&ipi); + if (!err && kvm_copy_to_gva(cs, arg, &ipi, sizeof(ipi))) { + err = -EFAULT; + } + break; + } + case EVTCHNOP_send: { + struct evtchn_send send; + + qemu_build_assert(sizeof(send) == 4); + if (kvm_copy_from_gva(cs, arg, &send, sizeof(send))) { + err = -EFAULT; + break; + } + + err = xen_evtchn_send_op(&send); + break; + } + case EVTCHNOP_alloc_unbound: { + struct evtchn_alloc_unbound alloc; + + qemu_build_assert(sizeof(alloc) == 8); + if (kvm_copy_from_gva(cs, arg, &alloc, sizeof(alloc))) { + err = -EFAULT; + break; + } + + err = xen_evtchn_alloc_unbound_op(&alloc); + if (!err && kvm_copy_to_gva(cs, arg, &alloc, sizeof(alloc))) { + err = -EFAULT; + } + break; + } + case EVTCHNOP_bind_interdomain: { + struct evtchn_bind_interdomain interdomain; + + qemu_build_assert(sizeof(interdomain) == 12); + if (kvm_copy_from_gva(cs, arg, &interdomain, sizeof(interdomain))) { + err = -EFAULT; + break; + } + + err = xen_evtchn_bind_interdomain_op(&interdomain); + if (!err && + kvm_copy_to_gva(cs, arg, &interdomain, sizeof(interdomain))) { + err = -EFAULT; + } + break; + } + case EVTCHNOP_bind_vcpu: { + struct evtchn_bind_vcpu vcpu; + + qemu_build_assert(sizeof(vcpu) == 8); + if (kvm_copy_from_gva(cs, arg, &vcpu, sizeof(vcpu))) { + err = -EFAULT; + break; + } + + err = xen_evtchn_bind_vcpu_op(&vcpu); + break; + } + case EVTCHNOP_reset: { + struct evtchn_reset reset; + + qemu_build_assert(sizeof(reset) == 2); + if (kvm_copy_from_gva(cs, arg, &reset, sizeof(reset))) { + err = -EFAULT; + break; + } + + err = xen_evtchn_reset_op(&reset); + break; + } + default: + return false; + } + + exit->u.hcall.result = err; + return true; +} + +int kvm_xen_soft_reset(void) +{ + CPUState *cpu; + int err; + + assert(qemu_mutex_iothread_locked()); + + trace_kvm_xen_soft_reset(); + + err = xen_evtchn_soft_reset(); + if (err) { + return err; + } + + /* + * Zero is the reset/startup state for HVM_PARAM_CALLBACK_IRQ. Strictly, + * it maps to HVM_PARAM_CALLBACK_TYPE_GSI with GSI#0, but Xen refuses to + * to deliver to the timer interrupt and treats that as 'disabled'. + */ + err = xen_evtchn_set_callback_param(0); + if (err) { + return err; + } + + CPU_FOREACH(cpu) { + async_run_on_cpu(cpu, do_vcpu_soft_reset, RUN_ON_CPU_NULL); + } + + err = xen_overlay_map_shinfo_page(INVALID_GFN); + if (err) { + return err; + } + + err = xen_xenstore_reset(); + if (err) { + return err; + } + + return 0; +} + +static int schedop_shutdown(CPUState *cs, uint64_t arg) +{ + struct sched_shutdown shutdown; + int ret = 0; + + /* No need for 32/64 compat handling */ + qemu_build_assert(sizeof(shutdown) == 4); + + if (kvm_copy_from_gva(cs, arg, &shutdown, sizeof(shutdown))) { + return -EFAULT; + } + + switch (shutdown.reason) { + case SHUTDOWN_crash: + cpu_dump_state(cs, stderr, CPU_DUMP_CODE); + qemu_system_guest_panicked(NULL); + break; + + case SHUTDOWN_reboot: + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + break; + + case SHUTDOWN_poweroff: + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + break; + + case SHUTDOWN_soft_reset: + qemu_mutex_lock_iothread(); + ret = kvm_xen_soft_reset(); + qemu_mutex_unlock_iothread(); + break; + + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static bool kvm_xen_hcall_sched_op(struct kvm_xen_exit *exit, X86CPU *cpu, + int cmd, uint64_t arg) +{ + CPUState *cs = CPU(cpu); + int err = -ENOSYS; + + switch (cmd) { + case SCHEDOP_shutdown: + err = schedop_shutdown(cs, arg); + break; + + case SCHEDOP_poll: + /* + * Linux will panic if this doesn't work. Just yield; it's not + * worth overthinking it because with event channel handling + * in KVM, the kernel will intercept this and it will never + * reach QEMU anyway. The semantics of the hypercall explicltly + * permit spurious wakeups. + */ + case SCHEDOP_yield: + sched_yield(); + err = 0; + break; + + default: + return false; + } + + exit->u.hcall.result = err; + return true; +} + +static bool kvm_xen_hcall_gnttab_op(struct kvm_xen_exit *exit, X86CPU *cpu, + int cmd, uint64_t arg, int count) +{ + CPUState *cs = CPU(cpu); + int err; + + switch (cmd) { + case GNTTABOP_set_version: { + struct gnttab_set_version set; + + qemu_build_assert(sizeof(set) == 4); + if (kvm_copy_from_gva(cs, arg, &set, sizeof(set))) { + err = -EFAULT; + break; + } + + err = xen_gnttab_set_version_op(&set); + if (!err && kvm_copy_to_gva(cs, arg, &set, sizeof(set))) { + err = -EFAULT; + } + break; + } + case GNTTABOP_get_version: { + struct gnttab_get_version get; + + qemu_build_assert(sizeof(get) == 8); + if (kvm_copy_from_gva(cs, arg, &get, sizeof(get))) { + err = -EFAULT; + break; + } + + err = xen_gnttab_get_version_op(&get); + if (!err && kvm_copy_to_gva(cs, arg, &get, sizeof(get))) { + err = -EFAULT; + } + break; + } + case GNTTABOP_query_size: { + struct gnttab_query_size size; + + qemu_build_assert(sizeof(size) == 16); + if (kvm_copy_from_gva(cs, arg, &size, sizeof(size))) { + err = -EFAULT; + break; + } + + err = xen_gnttab_query_size_op(&size); + if (!err && kvm_copy_to_gva(cs, arg, &size, sizeof(size))) { + err = -EFAULT; + } + break; + } + case GNTTABOP_setup_table: + case GNTTABOP_copy: + case GNTTABOP_map_grant_ref: + case GNTTABOP_unmap_grant_ref: + case GNTTABOP_swap_grant_ref: + return false; + + default: + /* Xen explicitly returns -ENOSYS to HVM guests for all others */ + err = -ENOSYS; + break; + } + + exit->u.hcall.result = err; + return true; +} + +static bool kvm_xen_hcall_physdev_op(struct kvm_xen_exit *exit, X86CPU *cpu, + int cmd, uint64_t arg) +{ + CPUState *cs = CPU(cpu); + int err; + + switch (cmd) { + case PHYSDEVOP_map_pirq: { + struct physdev_map_pirq map; + + if (hypercall_compat32(exit->u.hcall.longmode)) { + struct compat_physdev_map_pirq *map32 = (void *)↦ + + if (kvm_copy_from_gva(cs, arg, map32, sizeof(*map32))) { + return -EFAULT; + } + + /* + * The only thing that's different is the alignment of the + * uint64_t table_base at the end, which gets padding to make + * it 64-bit aligned in the 64-bit version. + */ + qemu_build_assert(sizeof(*map32) == 36); + qemu_build_assert(offsetof(struct physdev_map_pirq, entry_nr) == + offsetof(struct compat_physdev_map_pirq, entry_nr)); + memmove(&map.table_base, &map32->table_base, sizeof(map.table_base)); + } else { + if (kvm_copy_from_gva(cs, arg, &map, sizeof(map))) { + err = -EFAULT; + break; + } + } + err = xen_physdev_map_pirq(&map); + /* + * Since table_base is an IN parameter and won't be changed, just + * copy the size of the compat structure back to the guest. + */ + if (!err && kvm_copy_to_gva(cs, arg, &map, + sizeof(struct compat_physdev_map_pirq))) { + err = -EFAULT; + } + break; + } + case PHYSDEVOP_unmap_pirq: { + struct physdev_unmap_pirq unmap; + + qemu_build_assert(sizeof(unmap) == 8); + if (kvm_copy_from_gva(cs, arg, &unmap, sizeof(unmap))) { + err = -EFAULT; + break; + } + + err = xen_physdev_unmap_pirq(&unmap); + if (!err && kvm_copy_to_gva(cs, arg, &unmap, sizeof(unmap))) { + err = -EFAULT; + } + break; + } + case PHYSDEVOP_eoi: { + struct physdev_eoi eoi; + + qemu_build_assert(sizeof(eoi) == 4); + if (kvm_copy_from_gva(cs, arg, &eoi, sizeof(eoi))) { + err = -EFAULT; + break; + } + + err = xen_physdev_eoi_pirq(&eoi); + if (!err && kvm_copy_to_gva(cs, arg, &eoi, sizeof(eoi))) { + err = -EFAULT; + } + break; + } + case PHYSDEVOP_irq_status_query: { + struct physdev_irq_status_query query; + + qemu_build_assert(sizeof(query) == 8); + if (kvm_copy_from_gva(cs, arg, &query, sizeof(query))) { + err = -EFAULT; + break; + } + + err = xen_physdev_query_pirq(&query); + if (!err && kvm_copy_to_gva(cs, arg, &query, sizeof(query))) { + err = -EFAULT; + } + break; + } + case PHYSDEVOP_get_free_pirq: { + struct physdev_get_free_pirq get; + + qemu_build_assert(sizeof(get) == 8); + if (kvm_copy_from_gva(cs, arg, &get, sizeof(get))) { + err = -EFAULT; + break; + } + + err = xen_physdev_get_free_pirq(&get); + if (!err && kvm_copy_to_gva(cs, arg, &get, sizeof(get))) { + err = -EFAULT; + } + break; + } + case PHYSDEVOP_pirq_eoi_gmfn_v2: /* FreeBSD 13 makes this hypercall */ + err = -ENOSYS; + break; + + default: + return false; + } + + exit->u.hcall.result = err; + return true; +} + +static bool do_kvm_xen_handle_exit(X86CPU *cpu, struct kvm_xen_exit *exit) +{ + uint16_t code = exit->u.hcall.input; + + if (exit->u.hcall.cpl > 0) { + exit->u.hcall.result = -EPERM; + return true; + } + + switch (code) { + case __HYPERVISOR_set_timer_op: + if (exit->u.hcall.longmode) { + return kvm_xen_hcall_set_timer_op(exit, cpu, + exit->u.hcall.params[0]); + } else { + /* In 32-bit mode, the 64-bit timer value is in two args. */ + uint64_t val = ((uint64_t)exit->u.hcall.params[1]) << 32 | + (uint32_t)exit->u.hcall.params[0]; + return kvm_xen_hcall_set_timer_op(exit, cpu, val); + } + case __HYPERVISOR_grant_table_op: + return kvm_xen_hcall_gnttab_op(exit, cpu, exit->u.hcall.params[0], + exit->u.hcall.params[1], + exit->u.hcall.params[2]); + case __HYPERVISOR_sched_op: + return kvm_xen_hcall_sched_op(exit, cpu, exit->u.hcall.params[0], + exit->u.hcall.params[1]); + case __HYPERVISOR_event_channel_op: + return kvm_xen_hcall_evtchn_op(exit, cpu, exit->u.hcall.params[0], + exit->u.hcall.params[1]); + case __HYPERVISOR_vcpu_op: + return kvm_xen_hcall_vcpu_op(exit, cpu, + exit->u.hcall.params[0], + exit->u.hcall.params[1], + exit->u.hcall.params[2]); + case __HYPERVISOR_hvm_op: + return kvm_xen_hcall_hvm_op(exit, cpu, exit->u.hcall.params[0], + exit->u.hcall.params[1]); + case __HYPERVISOR_memory_op: + return kvm_xen_hcall_memory_op(exit, cpu, exit->u.hcall.params[0], + exit->u.hcall.params[1]); + case __HYPERVISOR_physdev_op: + return kvm_xen_hcall_physdev_op(exit, cpu, exit->u.hcall.params[0], + exit->u.hcall.params[1]); + case __HYPERVISOR_xen_version: + return kvm_xen_hcall_xen_version(exit, cpu, exit->u.hcall.params[0], + exit->u.hcall.params[1]); + default: + return false; + } +} + +int kvm_xen_handle_exit(X86CPU *cpu, struct kvm_xen_exit *exit) +{ + if (exit->type != KVM_EXIT_XEN_HCALL) { + return -1; + } + + /* + * The kernel latches the guest 32/64 mode when the MSR is used to fill + * the hypercall page. So if we see a hypercall in a mode that doesn't + * match our own idea of the guest mode, fetch the kernel's idea of the + * "long mode" to remain in sync. + */ + if (exit->u.hcall.longmode != xen_is_long_mode()) { + xen_sync_long_mode(); + } + + if (!do_kvm_xen_handle_exit(cpu, exit)) { + /* + * Some hypercalls will be deliberately "implemented" by returning + * -ENOSYS. This case is for hypercalls which are unexpected. + */ + exit->u.hcall.result = -ENOSYS; + qemu_log_mask(LOG_UNIMP, "Unimplemented Xen hypercall %" + PRId64 " (0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 ")\n", + (uint64_t)exit->u.hcall.input, + (uint64_t)exit->u.hcall.params[0], + (uint64_t)exit->u.hcall.params[1], + (uint64_t)exit->u.hcall.params[2]); + } + + trace_kvm_xen_hypercall(CPU(cpu)->cpu_index, exit->u.hcall.cpl, + exit->u.hcall.input, exit->u.hcall.params[0], + exit->u.hcall.params[1], exit->u.hcall.params[2], + exit->u.hcall.result); + return 0; +} + +uint16_t kvm_xen_get_gnttab_max_frames(void) +{ + KVMState *s = KVM_STATE(current_accel()); + return s->xen_gnttab_max_frames; +} + +uint16_t kvm_xen_get_evtchn_max_pirq(void) +{ + KVMState *s = KVM_STATE(current_accel()); + return s->xen_evtchn_max_pirq; +} + +int kvm_put_xen_state(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + uint64_t gpa; + int ret; + + gpa = env->xen_vcpu_info_gpa; + if (gpa == INVALID_GPA) { + gpa = env->xen_vcpu_info_default_gpa; + } + + if (gpa != INVALID_GPA) { + ret = set_vcpu_info(cs, gpa); + if (ret < 0) { + return ret; + } + } + + gpa = env->xen_vcpu_time_info_gpa; + if (gpa != INVALID_GPA) { + ret = kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO, + gpa); + if (ret < 0) { + return ret; + } + } + + gpa = env->xen_vcpu_runstate_gpa; + if (gpa != INVALID_GPA) { + ret = kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR, + gpa); + if (ret < 0) { + return ret; + } + } + + if (env->xen_periodic_timer_period) { + ret = do_set_periodic_timer(cs, env->xen_periodic_timer_period); + if (ret < 0) { + return ret; + } + } + + if (!kvm_xen_has_cap(EVTCHN_SEND)) { + /* + * If the kernel has EVTCHN_SEND support then it handles timers too, + * so the timer will be restored by kvm_xen_set_vcpu_timer() below. + */ + if (env->xen_singleshot_timer_ns) { + ret = do_set_singleshot_timer(cs, env->xen_singleshot_timer_ns, + false, false); + if (ret < 0) { + return ret; + } + } + return 0; + } + + if (env->xen_vcpu_callback_vector) { + ret = kvm_xen_set_vcpu_callback_vector(cs); + if (ret < 0) { + return ret; + } + } + + if (env->xen_virq[VIRQ_TIMER]) { + ret = kvm_xen_set_vcpu_timer(cs); + if (ret < 0) { + return ret; + } + } + return 0; +} + +int kvm_get_xen_state(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + uint64_t gpa; + int ret; + + /* + * The kernel does not mark vcpu_info as dirty when it delivers interrupts + * to it. It's up to userspace to *assume* that any page shared thus is + * always considered dirty. The shared_info page is different since it's + * an overlay and migrated separately anyway. + */ + gpa = env->xen_vcpu_info_gpa; + if (gpa == INVALID_GPA) { + gpa = env->xen_vcpu_info_default_gpa; + } + if (gpa != INVALID_GPA) { + MemoryRegionSection mrs = memory_region_find(get_system_memory(), + gpa, + sizeof(struct vcpu_info)); + if (mrs.mr && + !int128_lt(mrs.size, int128_make64(sizeof(struct vcpu_info)))) { + memory_region_set_dirty(mrs.mr, mrs.offset_within_region, + sizeof(struct vcpu_info)); + } + } + + if (!kvm_xen_has_cap(EVTCHN_SEND)) { + return 0; + } + + /* + * If the kernel is accelerating timers, read out the current value of the + * singleshot timer deadline. + */ + if (env->xen_virq[VIRQ_TIMER]) { + struct kvm_xen_vcpu_attr va = { + .type = KVM_XEN_VCPU_ATTR_TYPE_TIMER, + }; + ret = kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_GET_ATTR, &va); + if (ret < 0) { + return ret; + } + env->xen_singleshot_timer_ns = va.u.timer.expires_ns; + } + + return 0; +} diff --git a/target/i386/kvm/xen-emu.h b/target/i386/kvm/xen-emu.h new file mode 100644 index 0000000000..fe85e0b195 --- /dev/null +++ b/target/i386/kvm/xen-emu.h @@ -0,0 +1,33 @@ +/* + * Xen HVM emulation support in KVM + * + * Copyright © 2019 Oracle and/or its affiliates. All rights reserved. + * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_I386_KVM_XEN_EMU_H +#define QEMU_I386_KVM_XEN_EMU_H + +#define XEN_HYPERCALL_MSR 0x40000000 +#define XEN_HYPERCALL_MSR_HYPERV 0x40000200 + +#define XEN_CPUID_SIGNATURE 0 +#define XEN_CPUID_VENDOR 1 +#define XEN_CPUID_HVM_MSR 2 +#define XEN_CPUID_TIME 3 +#define XEN_CPUID_HVM 4 + +#define XEN_VERSION(maj, min) ((maj) << 16 | (min)) + +int kvm_xen_init(KVMState *s, uint32_t hypercall_msr); +int kvm_xen_init_vcpu(CPUState *cs); +int kvm_xen_handle_exit(X86CPU *cpu, struct kvm_xen_exit *exit); +int kvm_put_xen_state(CPUState *cs); +int kvm_get_xen_state(CPUState *cs); +void kvm_xen_maybe_deassert_callback(CPUState *cs); + +#endif /* QEMU_I386_KVM_XEN_EMU_H */ diff --git a/target/i386/machine.c b/target/i386/machine.c index 310b125235..c7ac8084b2 100644 --- a/target/i386/machine.c +++ b/target/i386/machine.c @@ -6,8 +6,10 @@ #include "kvm/hyperv.h" #include "hw/i386/x86.h" #include "kvm/kvm_i386.h" +#include "hw/xen/xen.h" #include "sysemu/kvm.h" +#include "sysemu/kvm_xen.h" #include "sysemu/tcg.h" #include "qemu/error-report.h" @@ -1257,6 +1259,28 @@ static const VMStateDescription vmstate_nested_state = { } }; +static bool xen_vcpu_needed(void *opaque) +{ + return (xen_mode == XEN_EMULATE); +} + +static const VMStateDescription vmstate_xen_vcpu = { + .name = "cpu/xen_vcpu", + .version_id = 1, + .minimum_version_id = 1, + .needed = xen_vcpu_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64(env.xen_vcpu_info_gpa, X86CPU), + VMSTATE_UINT64(env.xen_vcpu_info_default_gpa, X86CPU), + VMSTATE_UINT64(env.xen_vcpu_time_info_gpa, X86CPU), + VMSTATE_UINT64(env.xen_vcpu_runstate_gpa, X86CPU), + VMSTATE_UINT8(env.xen_vcpu_callback_vector, X86CPU), + VMSTATE_UINT16_ARRAY(env.xen_virq, X86CPU, XEN_NR_VIRQS), + VMSTATE_UINT64(env.xen_singleshot_timer_ns, X86CPU), + VMSTATE_UINT64(env.xen_periodic_timer_period, X86CPU), + VMSTATE_END_OF_LIST() + } +}; #endif static bool mcg_ext_ctl_needed(void *opaque) @@ -1716,6 +1740,7 @@ const VMStateDescription vmstate_x86_cpu = { #endif #ifdef CONFIG_KVM &vmstate_nested_state, + &vmstate_xen_vcpu, #endif &vmstate_msr_tsx_ctrl, &vmstate_msr_intel_sgx, diff --git a/target/i386/tcg/sysemu/excp_helper.c b/target/i386/tcg/sysemu/excp_helper.c index 55bd1194d3..e87f90dbe3 100644 --- a/target/i386/tcg/sysemu/excp_helper.c +++ b/target/i386/tcg/sysemu/excp_helper.c @@ -64,7 +64,7 @@ static bool ptw_translate(PTETranslate *inout, hwaddr addr) int flags; inout->gaddr = addr; - flags = probe_access_full(inout->env, addr, MMU_DATA_STORE, + flags = probe_access_full(inout->env, addr, 0, MMU_DATA_STORE, inout->ptw_idx, true, &inout->haddr, &full, 0); if (unlikely(flags & TLB_INVALID_MASK)) { @@ -428,7 +428,7 @@ do_check_protect_pse36: CPUTLBEntryFull *full; int flags, nested_page_size; - flags = probe_access_full(env, paddr, access_type, + flags = probe_access_full(env, paddr, 0, access_type, MMU_NESTED_IDX, true, &pte_trans.haddr, &full, 0); if (unlikely(flags & TLB_INVALID_MASK)) { diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c index 79ac5908f7..b942c306d6 100644 --- a/target/i386/tcg/tcg-cpu.c +++ b/target/i386/tcg/tcg-cpu.c @@ -49,10 +49,10 @@ static void x86_cpu_exec_exit(CPUState *cs) static void x86_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { - /* The instruction pointer is always up to date with TARGET_TB_PCREL. */ - if (!TARGET_TB_PCREL) { + /* The instruction pointer is always up to date with CF_PCREL. */ + if (!(tb_cflags(tb) & CF_PCREL)) { CPUX86State *env = cs->env_ptr; - env->eip = tb_pc(tb) - tb->cs_base; + env->eip = tb->pc - tb->cs_base; } } @@ -64,7 +64,7 @@ static void x86_restore_state_to_opc(CPUState *cs, CPUX86State *env = &cpu->env; int cc_op = data[1]; - if (TARGET_TB_PCREL) { + if (tb_cflags(tb) & CF_PCREL) { env->eip = (env->eip & TARGET_PAGE_MASK) | data[0]; } else { env->eip = data[0] - tb->cs_base; diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c index 9d9392b009..defbc43deb 100644 --- a/target/i386/tcg/translate.c +++ b/target/i386/tcg/translate.c @@ -545,7 +545,7 @@ static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d) static void gen_update_eip_cur(DisasContext *s) { assert(s->pc_save != -1); - if (TARGET_TB_PCREL) { + if (tb_cflags(s->base.tb) & CF_PCREL) { tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save); } else { tcg_gen_movi_tl(cpu_eip, s->base.pc_next - s->cs_base); @@ -556,7 +556,7 @@ static void gen_update_eip_cur(DisasContext *s) static void gen_update_eip_next(DisasContext *s) { assert(s->pc_save != -1); - if (TARGET_TB_PCREL) { + if (tb_cflags(s->base.tb) & CF_PCREL) { tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save); } else { tcg_gen_movi_tl(cpu_eip, s->pc - s->cs_base); @@ -588,7 +588,7 @@ static TCGv_i32 eip_next_i32(DisasContext *s) if (CODE64(s)) { return tcg_constant_i32(-1); } - if (TARGET_TB_PCREL) { + if (tb_cflags(s->base.tb) & CF_PCREL) { TCGv_i32 ret = tcg_temp_new_i32(); tcg_gen_trunc_tl_i32(ret, cpu_eip); tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save); @@ -601,7 +601,7 @@ static TCGv_i32 eip_next_i32(DisasContext *s) static TCGv eip_next_tl(DisasContext *s) { assert(s->pc_save != -1); - if (TARGET_TB_PCREL) { + if (tb_cflags(s->base.tb) & CF_PCREL) { TCGv ret = tcg_temp_new(); tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save); return ret; @@ -613,7 +613,7 @@ static TCGv eip_next_tl(DisasContext *s) static TCGv eip_cur_tl(DisasContext *s) { assert(s->pc_save != -1); - if (TARGET_TB_PCREL) { + if (tb_cflags(s->base.tb) & CF_PCREL) { TCGv ret = tcg_temp_new(); tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save); return ret; @@ -1830,7 +1830,7 @@ static void gen_rot_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right) tcg_temp_free_i32(t0); tcg_temp_free_i32(t1); - /* The CC_OP value is no longer predictable. */ + /* The CC_OP value is no longer predictable. */ set_cc_op(s, CC_OP_DYNAMIC); } @@ -1923,7 +1923,7 @@ static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1, gen_op_ld_v(s, ot, s->T0, s->A0); else gen_op_mov_v_reg(s, ot, s->T0, op1); - + if (is_right) { switch (ot) { case MO_8: @@ -2319,7 +2319,7 @@ static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib) ea = cpu_regs[a.base]; } if (!ea) { - if (TARGET_TB_PCREL && a.base == -2) { + if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) { /* With cpu_eip ~= pc_save, the expression is pc-relative. */ tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save); } else { @@ -2867,7 +2867,7 @@ static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num) if (!CODE64(s)) { if (ot == MO_16) { mask = 0xffff; - if (TARGET_TB_PCREL && CODE32(s)) { + if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) { use_goto_tb = false; } } else { @@ -2879,7 +2879,7 @@ static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num) gen_update_cc_op(s); set_cc_op(s, CC_OP_DYNAMIC); - if (TARGET_TB_PCREL) { + if (tb_cflags(s->base.tb) & CF_PCREL) { tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save); /* * If we can prove the branch does not leave the page and we have @@ -2896,13 +2896,13 @@ static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num) translator_use_goto_tb(&s->base, new_eip + s->cs_base)) { /* jump to same page: we can use a direct jump */ tcg_gen_goto_tb(tb_num); - if (!TARGET_TB_PCREL) { + if (!(tb_cflags(s->base.tb) & CF_PCREL)) { tcg_gen_movi_tl(cpu_eip, new_eip); } tcg_gen_exit_tb(s->base.tb, tb_num); s->base.is_jmp = DISAS_NORETURN; } else { - if (!TARGET_TB_PCREL) { + if (!(tb_cflags(s->base.tb) & CF_PCREL)) { tcg_gen_movi_tl(cpu_eip, new_eip); } if (s->jmp_opt) { @@ -3426,13 +3426,10 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) if (mod == 3) { goto illegal_op; } - a0 = tcg_temp_local_new(); - t0 = tcg_temp_local_new(); + a0 = s->A0; + t0 = s->T0; label1 = gen_new_label(); - tcg_gen_mov_tl(a0, s->A0); - tcg_gen_mov_tl(t0, s->T0); - gen_set_label(label1); t1 = tcg_temp_new(); t2 = tcg_temp_new(); @@ -3444,9 +3441,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1); tcg_temp_free(t2); - tcg_temp_free(a0); tcg_gen_neg_tl(s->T0, t0); - tcg_temp_free(t0); } else { tcg_gen_neg_tl(s->T0, s->T0); if (mod != 3) { @@ -6248,13 +6243,13 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) #endif { TCGLabel *label1; - TCGv t0, t1, t2, a0; + TCGv t0, t1, t2; if (!PE(s) || VM86(s)) goto illegal_op; - t0 = tcg_temp_local_new(); - t1 = tcg_temp_local_new(); - t2 = tcg_temp_local_new(); + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + t2 = tcg_temp_new(); ot = MO_16; modrm = x86_ldub_code(env, s); reg = (modrm >> 3) & 7; @@ -6263,11 +6258,8 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, ot, t0, s->A0); - a0 = tcg_temp_local_new(); - tcg_gen_mov_tl(a0, s->A0); } else { gen_op_mov_v_reg(s, ot, t0, rm); - a0 = NULL; } gen_op_mov_v_reg(s, ot, t1, reg); tcg_gen_andi_tl(s->tmp0, t0, 3); @@ -6280,8 +6272,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_movi_tl(t2, CC_Z); gen_set_label(label1); if (mod != 3) { - gen_op_st_v(s, ot, t0, a0); - tcg_temp_free(a0); + gen_op_st_v(s, ot, t0, s->A0); } else { gen_op_mov_reg_v(s, ot, rm, t0); } @@ -6304,7 +6295,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | REX_R(s); gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); - t0 = tcg_temp_local_new(); + t0 = tcg_temp_new(); gen_update_cc_op(s); if (b == 0x102) { gen_helper_lar(t0, cpu_env, s->T0); @@ -7052,7 +7043,7 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) dc->tmp2_i32 = tcg_temp_new_i32(); dc->tmp3_i32 = tcg_temp_new_i32(); dc->tmp4 = tcg_temp_new(); - dc->cc_srcT = tcg_temp_local_new(); + dc->cc_srcT = tcg_temp_new(); } static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu) @@ -7065,7 +7056,7 @@ static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) target_ulong pc_arg = dc->base.pc_next; dc->prev_insn_end = tcg_last_op(); - if (TARGET_TB_PCREL) { + if (tb_cflags(dcbase->tb) & CF_PCREL) { pc_arg -= dc->cs_base; pc_arg &= ~TARGET_PAGE_MASK; } @@ -7158,7 +7149,7 @@ static const TranslatorOps i386_tr_ops = { }; /* generate intermediate code for basic block 'tb'. */ -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext dc; diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c index 290ab4d526..d6513f2d9d 100644 --- a/target/loongarch/cpu.c +++ b/target/loongarch/cpu.c @@ -12,12 +12,12 @@ #include "qemu/module.h" #include "sysemu/qtest.h" #include "exec/exec-all.h" -#include "qapi/qapi-commands-machine-target.h" #include "cpu.h" #include "internals.h" #include "fpu/softfloat-helpers.h" #include "cpu-csr.h" #include "sysemu/reset.h" +#include "tcg/tcg.h" const char * const regnames[32] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", @@ -321,7 +321,8 @@ static void loongarch_cpu_synchronize_from_tb(CPUState *cs, LoongArchCPU *cpu = LOONGARCH_CPU(cs); CPULoongArchState *env = &cpu->env; - env->pc = tb_pc(tb); + tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); + env->pc = tb->pc; } static void loongarch_restore_state_to_opc(CPUState *cs, @@ -599,7 +600,7 @@ static ObjectClass *loongarch_cpu_class_by_name(const char *cpu_model) oc = object_class_by_name(cpu_model); if (!oc) { - g_autofree char *typename + g_autofree char *typename = g_strdup_printf(LOONGARCH_CPU_TYPE_NAME("%s"), cpu_model); oc = object_class_by_name(typename); if (!oc) { @@ -748,29 +749,3 @@ static const TypeInfo loongarch_cpu_type_infos[] = { }; DEFINE_TYPES(loongarch_cpu_type_infos) - -static void loongarch_cpu_add_definition(gpointer data, gpointer user_data) -{ - ObjectClass *oc = data; - CpuDefinitionInfoList **cpu_list = user_data; - CpuDefinitionInfo *info = g_new0(CpuDefinitionInfo, 1); - const char *typename = object_class_get_name(oc); - - info->name = g_strndup(typename, - strlen(typename) - strlen("-" TYPE_LOONGARCH_CPU)); - info->q_typename = g_strdup(typename); - - QAPI_LIST_PREPEND(*cpu_list, info); -} - -CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) -{ - CpuDefinitionInfoList *cpu_list = NULL; - GSList *list; - - list = object_class_get_list(TYPE_LOONGARCH_CPU, false); - g_slist_foreach(list, loongarch_cpu_add_definition, &cpu_list); - g_slist_free(list); - - return cpu_list; -} diff --git a/target/loongarch/loongarch-qmp-cmds.c b/target/loongarch/loongarch-qmp-cmds.c new file mode 100644 index 0000000000..6c25957881 --- /dev/null +++ b/target/loongarch/loongarch-qmp-cmds.c @@ -0,0 +1,37 @@ +/* + * QEMU LoongArch CPU (monitor definitions) + * + * SPDX-FileCopyrightText: 2021 Loongson Technology Corporation Limited + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qapi/qapi-commands-machine-target.h" +#include "cpu.h" + +static void loongarch_cpu_add_definition(gpointer data, gpointer user_data) +{ + ObjectClass *oc = data; + CpuDefinitionInfoList **cpu_list = user_data; + CpuDefinitionInfo *info = g_new0(CpuDefinitionInfo, 1); + const char *typename = object_class_get_name(oc); + + info->name = g_strndup(typename, + strlen(typename) - strlen("-" TYPE_LOONGARCH_CPU)); + info->q_typename = g_strdup(typename); + + QAPI_LIST_PREPEND(*cpu_list, info); +} + +CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) +{ + CpuDefinitionInfoList *cpu_list = NULL; + GSList *list; + + list = object_class_get_list(TYPE_LOONGARCH_CPU, false); + g_slist_foreach(list, loongarch_cpu_add_definition, &cpu_list); + g_slist_free(list); + + return cpu_list; +} diff --git a/target/loongarch/meson.build b/target/loongarch/meson.build index 690633969f..9293a8ab78 100644 --- a/target/loongarch/meson.build +++ b/target/loongarch/meson.build @@ -16,6 +16,7 @@ loongarch_tcg_ss.add(zlib) loongarch_softmmu_ss = ss.source_set() loongarch_softmmu_ss.add(files( + 'loongarch-qmp-cmds.c', 'machine.c', 'tlb_helper.c', 'constant_timer.c', diff --git a/target/loongarch/translate.c b/target/loongarch/translate.c index 72a6275665..2a43ab0201 100644 --- a/target/loongarch/translate.c +++ b/target/loongarch/translate.c @@ -245,7 +245,7 @@ static const TranslatorOps loongarch_tr_ops = { .disas_log = loongarch_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext ctx; diff --git a/target/m68k/translate.c b/target/m68k/translate.c index 31178c3b1d..157c2cbb8f 100644 --- a/target/m68k/translate.c +++ b/target/m68k/translate.c @@ -6393,7 +6393,7 @@ static const TranslatorOps m68k_tr_ops = { .disas_log = m68k_tr_disas_log, }; -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext dc; diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c index a2d2f5c340..03c2c4db1f 100644 --- a/target/microblaze/cpu.c +++ b/target/microblaze/cpu.c @@ -30,6 +30,7 @@ #include "exec/exec-all.h" #include "exec/gdbstub.h" #include "fpu/softfloat-helpers.h" +#include "tcg/tcg.h" static const struct { const char *name; @@ -97,7 +98,8 @@ static void mb_cpu_synchronize_from_tb(CPUState *cs, { MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); - cpu->env.pc = tb_pc(tb); + tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); + cpu->env.pc = tb->pc; cpu->env.iflags = tb->flags & IFLAGS_TB_MASK; } diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c index 974f21eb31..037a652cb9 100644 --- a/target/microblaze/translate.c +++ b/target/microblaze/translate.c @@ -1849,7 +1849,7 @@ static const TranslatorOps mb_tr_ops = { .disas_log = mb_tr_disas_log, }; -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext dc; diff --git a/target/mips/tcg/exception.c b/target/mips/tcg/exception.c index 96e61170e6..da49a93912 100644 --- a/target/mips/tcg/exception.c +++ b/target/mips/tcg/exception.c @@ -82,7 +82,8 @@ void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) MIPSCPU *cpu = MIPS_CPU(cs); CPUMIPSState *env = &cpu->env; - env->active_tc.PC = tb_pc(tb); + tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); + env->active_tc.PC = tb->pc; env->hflags &= ~MIPS_HFLAG_BMASK; env->hflags |= tb->flags & MIPS_HFLAG_BMASK; } diff --git a/target/mips/tcg/nanomips_translate.c.inc b/target/mips/tcg/nanomips_translate.c.inc index 812c111e3c..faf6d679bd 100644 --- a/target/mips/tcg/nanomips_translate.c.inc +++ b/target/mips/tcg/nanomips_translate.c.inc @@ -1017,8 +1017,8 @@ static void gen_llwp(DisasContext *ctx, uint32_t base, int16_t offset, static void gen_scwp(DisasContext *ctx, uint32_t base, int16_t offset, uint32_t reg1, uint32_t reg2, bool eva) { - TCGv taddr = tcg_temp_local_new(); - TCGv lladdr = tcg_temp_local_new(); + TCGv taddr = tcg_temp_new(); + TCGv lladdr = tcg_temp_new(); TCGv_i64 tval = tcg_temp_new_i64(); TCGv_i64 llval = tcg_temp_new_i64(); TCGv_i64 val = tcg_temp_new_i64(); diff --git a/target/mips/tcg/sysemu/special_helper.c b/target/mips/tcg/sysemu/special_helper.c index 3c5f35c759..93276f789d 100644 --- a/target/mips/tcg/sysemu/special_helper.c +++ b/target/mips/tcg/sysemu/special_helper.c @@ -94,7 +94,7 @@ bool mips_io_recompile_replay_branch(CPUState *cs, const TranslationBlock *tb) CPUMIPSState *env = &cpu->env; if ((env->hflags & MIPS_HFLAG_BMASK) != 0 - && env->active_tc.PC != tb_pc(tb)) { + && !(cs->tcg_cflags & CF_PCREL) && env->active_tc.PC != tb->pc) { env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4); env->hflags &= ~MIPS_HFLAG_BMASK; return true; diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c index aa12bb708a..8cad3d15a0 100644 --- a/target/mips/tcg/translate.c +++ b/target/mips/tcg/translate.c @@ -2400,7 +2400,7 @@ static void gen_arith_imm(DisasContext *ctx, uint32_t opc, switch (opc) { case OPC_ADDI: { - TCGv t0 = tcg_temp_local_new(); + TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_new(); TCGv t2 = tcg_temp_new(); TCGLabel *l1 = gen_new_label(); @@ -2434,7 +2434,7 @@ static void gen_arith_imm(DisasContext *ctx, uint32_t opc, #if defined(TARGET_MIPS64) case OPC_DADDI: { - TCGv t0 = tcg_temp_local_new(); + TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_new(); TCGv t2 = tcg_temp_new(); TCGLabel *l1 = gen_new_label(); @@ -2630,7 +2630,7 @@ static void gen_arith(DisasContext *ctx, uint32_t opc, switch (opc) { case OPC_ADD: { - TCGv t0 = tcg_temp_local_new(); + TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_new(); TCGv t2 = tcg_temp_new(); TCGLabel *l1 = gen_new_label(); @@ -2666,7 +2666,7 @@ static void gen_arith(DisasContext *ctx, uint32_t opc, break; case OPC_SUB: { - TCGv t0 = tcg_temp_local_new(); + TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_new(); TCGv t2 = tcg_temp_new(); TCGLabel *l1 = gen_new_label(); @@ -2707,7 +2707,7 @@ static void gen_arith(DisasContext *ctx, uint32_t opc, #if defined(TARGET_MIPS64) case OPC_DADD: { - TCGv t0 = tcg_temp_local_new(); + TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_new(); TCGv t2 = tcg_temp_new(); TCGLabel *l1 = gen_new_label(); @@ -2741,7 +2741,7 @@ static void gen_arith(DisasContext *ctx, uint32_t opc, break; case OPC_DSUB: { - TCGv t0 = tcg_temp_local_new(); + TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_new(); TCGv t2 = tcg_temp_new(); TCGLabel *l1 = gen_new_label(); @@ -3759,26 +3759,8 @@ static void gen_loongson_integer(DisasContext *ctx, uint32_t opc, return; } - switch (opc) { - case OPC_MULT_G_2E: - case OPC_MULT_G_2F: - case OPC_MULTU_G_2E: - case OPC_MULTU_G_2F: -#if defined(TARGET_MIPS64) - case OPC_DMULT_G_2E: - case OPC_DMULT_G_2F: - case OPC_DMULTU_G_2E: - case OPC_DMULTU_G_2F: -#endif - t0 = tcg_temp_new(); - t1 = tcg_temp_new(); - break; - default: - t0 = tcg_temp_local_new(); - t1 = tcg_temp_local_new(); - break; - } - + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); gen_load_gpr(t0, rs); gen_load_gpr(t1, rt); @@ -3955,21 +3937,10 @@ static void gen_loongson_multimedia(DisasContext *ctx, int rd, int rs, int rt) TCGCond cond; opc = MASK_LMMI(ctx->opcode); - switch (opc) { - case OPC_ADD_CP2: - case OPC_SUB_CP2: - case OPC_DADD_CP2: - case OPC_DSUB_CP2: - t0 = tcg_temp_local_new_i64(); - t1 = tcg_temp_local_new_i64(); - break; - default: - t0 = tcg_temp_new_i64(); - t1 = tcg_temp_new_i64(); - break; - } - check_cp1_enabled(ctx); + + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); gen_load_fpr64(ctx, t0, rs); gen_load_fpr64(ctx, t1, rt); @@ -8650,7 +8621,7 @@ static void gen_mftr(CPUMIPSState *env, DisasContext *ctx, int rt, int rd, int u, int sel, int h) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - TCGv t0 = tcg_temp_local_new(); + TCGv t0 = tcg_temp_new(); if ((env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) == 0 && ((env->tcs[other_tc].CP0_TCBind & (0xf << CP0TCBd_CurVPE)) != @@ -8878,7 +8849,7 @@ static void gen_mttr(CPUMIPSState *env, DisasContext *ctx, int rd, int rt, int u, int sel, int h) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - TCGv t0 = tcg_temp_local_new(); + TCGv t0 = tcg_temp_new(); gen_load_gpr(t0, rt); if ((env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) == 0 && @@ -11409,7 +11380,7 @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, case OPC_ALNV_PS: check_ps(ctx); { - TCGv t0 = tcg_temp_local_new(); + TCGv t0 = tcg_temp_new(); TCGv_i32 fp = tcg_temp_new_i32(); TCGv_i32 fph = tcg_temp_new_i32(); TCGLabel *l1 = gen_new_label(); @@ -16159,7 +16130,7 @@ static const TranslatorOps mips_tr_ops = { .disas_log = mips_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext ctx; diff --git a/target/nios2/translate.c b/target/nios2/translate.c index 7aee65a089..140bc31017 100644 --- a/target/nios2/translate.c +++ b/target/nios2/translate.c @@ -1037,7 +1037,7 @@ static const TranslatorOps nios2_tr_ops = { .disas_log = nios2_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext dc; diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c index 4c11a1f7ad..0ce4f796fa 100644 --- a/target/openrisc/cpu.c +++ b/target/openrisc/cpu.c @@ -22,6 +22,7 @@ #include "qemu/qemu-print.h" #include "cpu.h" #include "exec/exec-all.h" +#include "tcg/tcg.h" static void openrisc_cpu_set_pc(CPUState *cs, vaddr value) { @@ -43,7 +44,8 @@ static void openrisc_cpu_synchronize_from_tb(CPUState *cs, { OpenRISCCPU *cpu = OPENRISC_CPU(cs); - cpu->env.pc = tb_pc(tb); + tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); + cpu->env.pc = tb->pc; } static void openrisc_restore_state_to_opc(CPUState *cs, diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c index 2f3d7c5fd1..b8cd8e0964 100644 --- a/target/openrisc/translate.c +++ b/target/openrisc/translate.c @@ -1705,7 +1705,7 @@ static const TranslatorOps openrisc_tr_ops = { .disas_log = openrisc_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext ctx; diff --git a/target/ppc/cpu-qom.h b/target/ppc/cpu-qom.h index 0fbd8b7246..9666f54f65 100644 --- a/target/ppc/cpu-qom.h +++ b/target/ppc/cpu-qom.h @@ -31,6 +31,8 @@ OBJECT_DECLARE_CPU_TYPE(PowerPCCPU, PowerPCCPUClass, POWERPC_CPU) +ObjectClass *ppc_cpu_class_by_name(const char *name); + typedef struct CPUArchState CPUPPCState; typedef struct ppc_tb_t ppc_tb_t; typedef struct ppc_dcr_t ppc_dcr_t; diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c index abee71d407..d62ffe8a6f 100644 --- a/target/ppc/cpu_init.c +++ b/target/ppc/cpu_init.c @@ -40,7 +40,6 @@ #include "qemu/cutils.h" #include "disas/capstone.h" #include "fpu/softfloat.h" -#include "qapi/qapi-commands-machine-target.h" #include "helper_regs.h" #include "internal.h" @@ -6841,7 +6840,7 @@ static const char *ppc_cpu_lookup_alias(const char *alias) return NULL; } -static ObjectClass *ppc_cpu_class_by_name(const char *name) +ObjectClass *ppc_cpu_class_by_name(const char *name) { char *cpu_model, *typename; ObjectClass *oc; @@ -6981,51 +6980,6 @@ void ppc_cpu_list(void) #endif } -static void ppc_cpu_defs_entry(gpointer data, gpointer user_data) -{ - ObjectClass *oc = data; - CpuDefinitionInfoList **first = user_data; - const char *typename; - CpuDefinitionInfo *info; - - typename = object_class_get_name(oc); - info = g_malloc0(sizeof(*info)); - info->name = g_strndup(typename, - strlen(typename) - strlen(POWERPC_CPU_TYPE_SUFFIX)); - - QAPI_LIST_PREPEND(*first, info); -} - -CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) -{ - CpuDefinitionInfoList *cpu_list = NULL; - GSList *list; - int i; - - list = object_class_get_list(TYPE_POWERPC_CPU, false); - g_slist_foreach(list, ppc_cpu_defs_entry, &cpu_list); - g_slist_free(list); - - for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) { - PowerPCCPUAlias *alias = &ppc_cpu_aliases[i]; - ObjectClass *oc; - CpuDefinitionInfo *info; - - oc = ppc_cpu_class_by_name(alias->model); - if (oc == NULL) { - continue; - } - - info = g_malloc0(sizeof(*info)); - info->name = g_strdup(alias->alias); - info->q_typename = g_strdup(object_class_get_name(oc)); - - QAPI_LIST_PREPEND(cpu_list, info); - } - - return cpu_list; -} - static void ppc_cpu_set_pc(CPUState *cs, vaddr value) { PowerPCCPU *cpu = POWERPC_CPU(cs); diff --git a/target/ppc/meson.build b/target/ppc/meson.build index 79beaff147..7929de8360 100644 --- a/target/ppc/meson.build +++ b/target/ppc/meson.build @@ -39,7 +39,7 @@ ppc_softmmu_ss.add(files( 'machine.c', 'mmu-hash32.c', 'mmu_common.c', - 'monitor.c', + 'ppc-qmp-cmds.c', )) ppc_softmmu_ss.add(when: 'CONFIG_TCG', if_true: files( 'mmu_helper.c', diff --git a/target/ppc/monitor.c b/target/ppc/ppc-qmp-cmds.c index 8250b1304e..36e5b5eff8 100644 --- a/target/ppc/monitor.c +++ b/target/ppc/ppc-qmp-cmds.c @@ -1,5 +1,5 @@ /* - * QEMU monitor + * QEMU PPC (monitor definitions) * * Copyright (c) 2003-2004 Fabrice Bellard * @@ -28,6 +28,9 @@ #include "qemu/ctype.h" #include "monitor/hmp-target.h" #include "monitor/hmp.h" +#include "qapi/qapi-commands-machine-target.h" +#include "cpu-models.h" +#include "cpu-qom.h" static target_long monitor_get_ccr(Monitor *mon, const struct MonitorDef *md, int val) @@ -172,3 +175,48 @@ int target_get_monitor_def(CPUState *cs, const char *name, uint64_t *pval) return -EINVAL; } + +static void ppc_cpu_defs_entry(gpointer data, gpointer user_data) +{ + ObjectClass *oc = data; + CpuDefinitionInfoList **first = user_data; + const char *typename; + CpuDefinitionInfo *info; + + typename = object_class_get_name(oc); + info = g_malloc0(sizeof(*info)); + info->name = g_strndup(typename, + strlen(typename) - strlen(POWERPC_CPU_TYPE_SUFFIX)); + + QAPI_LIST_PREPEND(*first, info); +} + +CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) +{ + CpuDefinitionInfoList *cpu_list = NULL; + GSList *list; + int i; + + list = object_class_get_list(TYPE_POWERPC_CPU, false); + g_slist_foreach(list, ppc_cpu_defs_entry, &cpu_list); + g_slist_free(list); + + for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) { + PowerPCCPUAlias *alias = &ppc_cpu_aliases[i]; + ObjectClass *oc; + CpuDefinitionInfo *info; + + oc = ppc_cpu_class_by_name(alias->model); + if (oc == NULL) { + continue; + } + + info = g_malloc0(sizeof(*info)); + info->name = g_strdup(alias->alias); + info->q_typename = g_strdup(object_class_get_name(oc)); + + QAPI_LIST_PREPEND(cpu_list, info); + } + + return cpu_list; +} diff --git a/target/ppc/translate.c b/target/ppc/translate.c index 1c17d5a558..2956021e89 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -4415,7 +4415,7 @@ static void gen_bcond(DisasContext *ctx, int type) TCGv target; if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) { - target = tcg_temp_local_new(); + target = tcg_temp_new(); if (type == BCOND_CTR) { tcg_gen_mov_tl(target, cpu_ctr); } else if (type == BCOND_TAR) { @@ -5594,8 +5594,8 @@ static inline void gen_405_mulladd_insn(DisasContext *ctx, int opc2, int opc3, { TCGv t0, t1; - t0 = tcg_temp_local_new(); - t1 = tcg_temp_local_new(); + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); switch (opc3 & 0x0D) { case 0x05: @@ -7707,7 +7707,7 @@ static const TranslatorOps ppc_tr_ops = { .disas_log = ppc_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext ctx; diff --git a/target/ppc/translate/spe-impl.c.inc b/target/ppc/translate/spe-impl.c.inc index 2e6e799a25..bd8963db2b 100644 --- a/target/ppc/translate/spe-impl.c.inc +++ b/target/ppc/translate/spe-impl.c.inc @@ -168,7 +168,7 @@ static inline void gen_op_evsrwu(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { TCGLabel *l1 = gen_new_label(); TCGLabel *l2 = gen_new_label(); - TCGv_i32 t0 = tcg_temp_local_new_i32(); + TCGv_i32 t0 = tcg_temp_new_i32(); /* No error here: 6 bits are used */ tcg_gen_andi_i32(t0, arg2, 0x3F); @@ -185,7 +185,7 @@ static inline void gen_op_evsrws(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { TCGLabel *l1 = gen_new_label(); TCGLabel *l2 = gen_new_label(); - TCGv_i32 t0 = tcg_temp_local_new_i32(); + TCGv_i32 t0 = tcg_temp_new_i32(); /* No error here: 6 bits are used */ tcg_gen_andi_i32(t0, arg2, 0x3F); @@ -202,7 +202,7 @@ static inline void gen_op_evslw(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { TCGLabel *l1 = gen_new_label(); TCGLabel *l2 = gen_new_label(); - TCGv_i32 t0 = tcg_temp_local_new_i32(); + TCGv_i32 t0 = tcg_temp_new_i32(); /* No error here: 6 bits are used */ tcg_gen_andi_i32(t0, arg2, 0x3F); @@ -378,7 +378,7 @@ static inline void gen_evsel(DisasContext *ctx) TCGLabel *l2 = gen_new_label(); TCGLabel *l3 = gen_new_label(); TCGLabel *l4 = gen_new_label(); - TCGv_i32 t0 = tcg_temp_local_new_i32(); + TCGv_i32 t0 = tcg_temp_new_i32(); tcg_gen_andi_i32(t0, cpu_crf[ctx->opcode & 0x07], 1 << 3); tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1); diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc index 7741f2eb49..2dd17ab106 100644 --- a/target/ppc/translate/vmx-impl.c.inc +++ b/target/ppc/translate/vmx-impl.c.inc @@ -1508,8 +1508,8 @@ static bool do_vcmpq(DisasContext *ctx, arg_VX_bf *a, bool sign) REQUIRE_INSNS_FLAGS2(ctx, ISA310); REQUIRE_VECTOR(ctx); - vra = tcg_temp_local_new_i64(); - vrb = tcg_temp_local_new_i64(); + vra = tcg_temp_new_i64(); + vrb = tcg_temp_new_i64(); gt = gen_new_label(); lt = gen_new_label(); done = gen_new_label(); diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index cac9f1dc7e..5bc0005cc7 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -34,6 +34,7 @@ #include "fpu/softfloat-helpers.h" #include "sysemu/kvm.h" #include "kvm_riscv.h" +#include "tcg/tcg.h" /* RISC-V CPU definitions */ @@ -538,10 +539,12 @@ static void riscv_cpu_synchronize_from_tb(CPUState *cs, CPURISCVState *env = &cpu->env; RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); + tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); + if (xl == MXL_RV32) { - env->pc = (int32_t)tb_pc(tb); + env->pc = (int32_t) tb->pc; } else { - env->pc = tb_pc(tb); + env->pc = tb->pc; } } diff --git a/target/riscv/translate.c b/target/riscv/translate.c index 4a957a50b5..a8d516ca3e 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -1310,7 +1310,7 @@ static const TranslatorOps riscv_tr_ops = { .disas_log = riscv_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext ctx; diff --git a/target/rx/cpu.c b/target/rx/cpu.c index 219ef28e46..67452e310c 100644 --- a/target/rx/cpu.c +++ b/target/rx/cpu.c @@ -44,7 +44,8 @@ static void rx_cpu_synchronize_from_tb(CPUState *cs, { RXCPU *cpu = RX_CPU(cs); - cpu->env.pc = tb_pc(tb); + tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); + cpu->env.pc = tb->pc; } static void rx_restore_state_to_opc(CPUState *cs, diff --git a/target/rx/translate.c b/target/rx/translate.c index 87a3f54adb..af23876cb3 100644 --- a/target/rx/translate.c +++ b/target/rx/translate.c @@ -2363,7 +2363,7 @@ static const TranslatorOps rx_tr_ops = { .disas_log = rx_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext dc; diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c index e51a0db0fe..6835c26dda 100644 --- a/target/s390x/tcg/mem_helper.c +++ b/target/s390x/tcg/mem_helper.c @@ -145,7 +145,7 @@ static inline int s390_probe_access(CPUArchState *env, target_ulong addr, int mmu_idx, bool nonfault, void **phost, uintptr_t ra) { - int flags = probe_access_flags(env, addr, access_type, mmu_idx, + int flags = probe_access_flags(env, addr, 0, access_type, mmu_idx, nonfault, phost, ra); if (unlikely(flags & TLB_INVALID_MASK)) { diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c index faa6f737ba..811049ea28 100644 --- a/target/s390x/tcg/translate.c +++ b/target/s390x/tcg/translate.c @@ -6619,7 +6619,7 @@ static const TranslatorOps s390x_tr_ops = { .disas_log = s390x_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext dc; diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c index f0934b20fa..61769ffdfa 100644 --- a/target/sh4/cpu.c +++ b/target/sh4/cpu.c @@ -26,6 +26,7 @@ #include "migration/vmstate.h" #include "exec/exec-all.h" #include "fpu/softfloat-helpers.h" +#include "tcg/tcg.h" static void superh_cpu_set_pc(CPUState *cs, vaddr value) { @@ -46,7 +47,8 @@ static void superh_cpu_synchronize_from_tb(CPUState *cs, { SuperHCPU *cpu = SUPERH_CPU(cs); - cpu->env.pc = tb_pc(tb); + tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); + cpu->env.pc = tb->pc; cpu->env.flags = tb->flags & TB_FLAG_ENVFLAGS_MASK; } @@ -73,7 +75,7 @@ static bool superh_io_recompile_replay_branch(CPUState *cs, CPUSH4State *env = &cpu->env; if ((env->flags & (TB_FLAG_DELAY_SLOT | TB_FLAG_DELAY_SLOT_COND)) - && env->pc != tb_pc(tb)) { + && !(cs->tcg_cflags & CF_PCREL) && env->pc != tb->pc) { env->pc -= 2; env->flags &= ~(TB_FLAG_DELAY_SLOT | TB_FLAG_DELAY_SLOT_COND); return true; diff --git a/target/sh4/translate.c b/target/sh4/translate.c index 7db3468b01..23563024e0 100644 --- a/target/sh4/translate.c +++ b/target/sh4/translate.c @@ -2374,7 +2374,7 @@ static const TranslatorOps sh4_tr_ops = { .disas_log = sh4_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext ctx; diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c index 1734ef8dc6..e329a7aece 100644 --- a/target/sparc/cpu.c +++ b/target/sparc/cpu.c @@ -25,6 +25,7 @@ #include "exec/exec-all.h" #include "hw/qdev-properties.h" #include "qapi/visitor.h" +#include "tcg/tcg.h" //#define DEBUG_FEATURES @@ -707,7 +708,8 @@ static void sparc_cpu_synchronize_from_tb(CPUState *cs, { SPARCCPU *cpu = SPARC_CPU(cs); - cpu->env.pc = tb_pc(tb); + tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); + cpu->env.pc = tb->pc; cpu->env.npc = tb->cs_base; } diff --git a/target/sparc/translate.c b/target/sparc/translate.c index 150aeecd14..3b0044aa66 100644 --- a/target/sparc/translate.c +++ b/target/sparc/translate.c @@ -5904,7 +5904,7 @@ static const TranslatorOps sparc_tr_ops = { .disas_log = sparc_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext dc = {}; diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c index 594cd1efd5..d0a9272961 100644 --- a/target/tricore/cpu.c +++ b/target/tricore/cpu.c @@ -55,7 +55,8 @@ static void tricore_cpu_synchronize_from_tb(CPUState *cs, TriCoreCPU *cpu = TRICORE_CPU(cs); CPUTriCoreState *env = &cpu->env; - env->PC = tb_pc(tb); + tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); + env->PC = tb->pc; } static void tricore_restore_state_to_opc(CPUState *cs, diff --git a/target/tricore/translate.c b/target/tricore/translate.c index 7ac34efd76..176ea96b2b 100644 --- a/target/tricore/translate.c +++ b/target/tricore/translate.c @@ -8881,7 +8881,7 @@ static const TranslatorOps tricore_tr_ops = { }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext ctx; diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c index 77bcd71030..4af0650deb 100644 --- a/target/xtensa/translate.c +++ b/target/xtensa/translate.c @@ -307,7 +307,7 @@ static void gen_right_shift_sar(DisasContext *dc, TCGv_i32 sa) static void gen_left_shift_sar(DisasContext *dc, TCGv_i32 sa) { if (!dc->sar_m32_allocated) { - dc->sar_m32 = tcg_temp_local_new_i32(); + dc->sar_m32 = tcg_temp_new_i32(); dc->sar_m32_allocated = true; } tcg_gen_andi_i32(dc->sar_m32, sa, 0x1f); @@ -1074,10 +1074,10 @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc) if (i == 0 || arg_copy[i].resource != resource) { resource = arg_copy[i].resource; if (arg_copy[i].arg->num_bits <= 32) { - temp = tcg_temp_local_new_i32(); + temp = tcg_temp_new_i32(); tcg_gen_mov_i32(temp, arg_copy[i].arg->in); } else if (arg_copy[i].arg->num_bits <= 64) { - temp = tcg_temp_local_new_i64(); + temp = tcg_temp_new_i64(); tcg_gen_mov_i64(temp, arg_copy[i].arg->in); } else { g_assert_not_reached(); @@ -1187,7 +1187,7 @@ static void xtensa_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu) DisasContext *dc = container_of(dcbase, DisasContext, base); if (dc->icount) { - dc->next_icount = tcg_temp_local_new_i32(); + dc->next_icount = tcg_temp_new_i32(); } } @@ -1279,7 +1279,7 @@ static const TranslatorOps xtensa_translator_ops = { .disas_log = xtensa_tr_disas_log, }; -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext dc = {}; @@ -2273,8 +2273,8 @@ static void gen_check_atomctl(DisasContext *dc, TCGv_i32 addr) static void translate_s32c1i(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { - TCGv_i32 tmp = tcg_temp_local_new_i32(); - TCGv_i32 addr = tcg_temp_local_new_i32(); + TCGv_i32 tmp = tcg_temp_new_i32(); + TCGv_i32 addr = tcg_temp_new_i32(); MemOp mop; tcg_gen_mov_i32(tmp, arg[0].in); @@ -2303,8 +2303,8 @@ static void translate_s32ex(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 prev = tcg_temp_new_i32(); - TCGv_i32 addr = tcg_temp_local_new_i32(); - TCGv_i32 res = tcg_temp_local_new_i32(); + TCGv_i32 addr = tcg_temp_new_i32(); + TCGv_i32 res = tcg_temp_new_i32(); TCGLabel *label = gen_new_label(); MemOp mop; |