diff options
Diffstat (limited to 'target')
241 files changed, 14157 insertions, 18044 deletions
diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 0dd623e590..4eb0d2f85c 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -36,7 +36,6 @@ #include "hw/loader.h" #include "hw/boards.h" #endif -#include "sysemu/sysemu.h" #include "sysemu/tcg.h" #include "sysemu/hw_accel.h" #include "kvm_arm.h" diff --git a/target/arm/helper.c b/target/arm/helper.c index 9b1b98705f..3b365a78cb 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -4742,7 +4742,7 @@ static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t pageaddr = sextract64(value << 12, 0, 56); bool secure = arm_is_secure_below_el3(env); int mask = secure ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2; - int bits = tlbbits_for_regime(env, secure ? ARMMMUIdx_E2 : ARMMMUIdx_SE2, + int bits = tlbbits_for_regime(env, secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2, pageaddr); tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits); diff --git a/target/arm/meson.build b/target/arm/meson.build index 15b936c101..5bfaf43b50 100644 --- a/target/arm/meson.build +++ b/target/arm/meson.build @@ -1,11 +1,11 @@ gen = [ decodetree.process('sve.decode', extra_args: '--decode=disas_sve'), - decodetree.process('neon-shared.decode', extra_args: '--static-decode=disas_neon_shared'), - decodetree.process('neon-dp.decode', extra_args: '--static-decode=disas_neon_dp'), - decodetree.process('neon-ls.decode', extra_args: '--static-decode=disas_neon_ls'), - decodetree.process('vfp.decode', extra_args: '--static-decode=disas_vfp'), - decodetree.process('vfp-uncond.decode', extra_args: '--static-decode=disas_vfp_uncond'), - decodetree.process('m-nocp.decode', extra_args: '--static-decode=disas_m_nocp'), + decodetree.process('neon-shared.decode', extra_args: '--decode=disas_neon_shared'), + decodetree.process('neon-dp.decode', extra_args: '--decode=disas_neon_dp'), + decodetree.process('neon-ls.decode', extra_args: '--decode=disas_neon_ls'), + decodetree.process('vfp.decode', extra_args: '--decode=disas_vfp'), + decodetree.process('vfp-uncond.decode', extra_args: '--decode=disas_vfp_uncond'), + decodetree.process('m-nocp.decode', extra_args: '--decode=disas_m_nocp'), decodetree.process('a32.decode', extra_args: '--static-decode=disas_a32'), decodetree.process('a32-uncond.decode', extra_args: '--static-decode=disas_a32_uncond'), decodetree.process('t32.decode', extra_args: '--static-decode=disas_t32'), @@ -26,6 +26,9 @@ arm_ss.add(files( 'op_helper.c', 'tlb_helper.c', 'translate.c', + 'translate-m-nocp.c', + 'translate-neon.c', + 'translate-vfp.c', 'vec_helper.c', 'vfp_helper.c', 'cpu_tcg.c', diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c index 65cb37d088..efcb600992 100644 --- a/target/arm/op_helper.c +++ b/target/arm/op_helper.c @@ -17,7 +17,6 @@ * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" -#include "qemu/log.h" #include "qemu/main-loop.h" #include "cpu.h" #include "exec/helper-proto.h" @@ -229,6 +228,7 @@ void HELPER(setend)(CPUARMState *env) arm_rebuild_hflags(env); } +#ifndef CONFIG_USER_ONLY /* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped. * The function returns the target EL (1-3) if the instruction is to be trapped; * otherwise it returns 0 indicating it is not trapped. @@ -283,9 +283,21 @@ static inline int check_wfx_trap(CPUARMState *env, bool is_wfe) return 0; } +#endif void HELPER(wfi)(CPUARMState *env, uint32_t insn_len) { +#ifdef CONFIG_USER_ONLY + /* + * WFI in the user-mode emulator is technically permitted but not + * something any real-world code would do. AArch64 Linux kernels + * trap it via SCTRL_EL1.nTWI and make it an (expensive) NOP; + * AArch32 kernels don't trap it so it will delay a bit. + * For QEMU, make it NOP here, because trying to raise EXCP_HLT + * would trigger an abort. + */ + return; +#else CPUState *cs = env_cpu(env); int target_el = check_wfx_trap(env, false); @@ -310,6 +322,7 @@ void HELPER(wfi)(CPUARMState *env, uint32_t insn_len) cs->exception_index = EXCP_HLT; cs->halted = 1; cpu_loop_exit(cs); +#endif } void HELPER(wfe)(CPUARMState *env) diff --git a/target/arm/translate-a32.h b/target/arm/translate-a32.h new file mode 100644 index 0000000000..c997f4e321 --- /dev/null +++ b/target/arm/translate-a32.h @@ -0,0 +1,144 @@ +/* + * AArch32 translation, common definitions. + * + * Copyright (c) 2021 Linaro, Ltd. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef TARGET_ARM_TRANSLATE_A64_H +#define TARGET_ARM_TRANSLATE_A64_H + +/* Prototypes for autogenerated disassembler functions */ +bool disas_m_nocp(DisasContext *dc, uint32_t insn); +bool disas_vfp(DisasContext *s, uint32_t insn); +bool disas_vfp_uncond(DisasContext *s, uint32_t insn); +bool disas_neon_dp(DisasContext *s, uint32_t insn); +bool disas_neon_ls(DisasContext *s, uint32_t insn); +bool disas_neon_shared(DisasContext *s, uint32_t insn); + +void load_reg_var(DisasContext *s, TCGv_i32 var, int reg); +void arm_gen_condlabel(DisasContext *s); +bool vfp_access_check(DisasContext *s); +void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp memop); +void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop); +void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop); +void write_neon_element64(TCGv_i64 src, int reg, int ele, MemOp memop); +TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs); +void gen_set_cpsr(TCGv_i32 var, uint32_t mask); +void gen_set_condexec(DisasContext *s); +void gen_set_pc_im(DisasContext *s, target_ulong val); +void gen_lookup_tb(DisasContext *s); +long vfp_reg_offset(bool dp, unsigned reg); +long neon_full_reg_offset(unsigned reg); +long neon_element_offset(int reg, int element, MemOp memop); +void gen_rev16(TCGv_i32 dest, TCGv_i32 var); + +static inline TCGv_i32 load_cpu_offset(int offset) +{ + TCGv_i32 tmp = tcg_temp_new_i32(); + tcg_gen_ld_i32(tmp, cpu_env, offset); + return tmp; +} + +#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name)) + +static inline void store_cpu_offset(TCGv_i32 var, int offset) +{ + tcg_gen_st_i32(var, cpu_env, offset); + tcg_temp_free_i32(var); +} + +#define store_cpu_field(var, name) \ + store_cpu_offset(var, offsetof(CPUARMState, name)) + +/* Create a new temporary and set it to the value of a CPU register. */ +static inline TCGv_i32 load_reg(DisasContext *s, int reg) +{ + TCGv_i32 tmp = tcg_temp_new_i32(); + load_reg_var(s, tmp, reg); + return tmp; +} + +void store_reg(DisasContext *s, int reg, TCGv_i32 var); + +void gen_aa32_ld_internal_i32(DisasContext *s, TCGv_i32 val, + TCGv_i32 a32, int index, MemOp opc); +void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val, + TCGv_i32 a32, int index, MemOp opc); +void gen_aa32_ld_internal_i64(DisasContext *s, TCGv_i64 val, + TCGv_i32 a32, int index, MemOp opc); +void gen_aa32_st_internal_i64(DisasContext *s, TCGv_i64 val, + TCGv_i32 a32, int index, MemOp opc); +void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, + int index, MemOp opc); +void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, + int index, MemOp opc); +void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, + int index, MemOp opc); +void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, + int index, MemOp opc); + +#define DO_GEN_LD(SUFF, OPC) \ + static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \ + TCGv_i32 a32, int index) \ + { \ + gen_aa32_ld_i32(s, val, a32, index, OPC); \ + } + +#define DO_GEN_ST(SUFF, OPC) \ + static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \ + TCGv_i32 a32, int index) \ + { \ + gen_aa32_st_i32(s, val, a32, index, OPC); \ + } + +static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val, + TCGv_i32 a32, int index) +{ + gen_aa32_ld_i64(s, val, a32, index, MO_Q); +} + +static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val, + TCGv_i32 a32, int index) +{ + gen_aa32_st_i64(s, val, a32, index, MO_Q); +} + +DO_GEN_LD(8u, MO_UB) +DO_GEN_LD(16u, MO_UW) +DO_GEN_LD(32u, MO_UL) +DO_GEN_ST(8, MO_UB) +DO_GEN_ST(16, MO_UW) +DO_GEN_ST(32, MO_UL) + +#undef DO_GEN_LD +#undef DO_GEN_ST + +#if defined(CONFIG_USER_ONLY) +#define IS_USER(s) 1 +#else +#define IS_USER(s) (s->user) +#endif + +/* Set NZCV flags from the high 4 bits of var. */ +#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV) + +/* Swap low and high halfwords. */ +static inline void gen_swap_half(TCGv_i32 dest, TCGv_i32 var) +{ + tcg_gen_rotri_i32(dest, var, 16); +} + +#endif diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index 95897e63af..0c80d0b505 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -359,14 +359,6 @@ static void gen_exception_internal_insn(DisasContext *s, uint64_t pc, int excp) s->base.is_jmp = DISAS_NORETURN; } -static void gen_exception_insn(DisasContext *s, uint64_t pc, int excp, - uint32_t syndrome, uint32_t target_el) -{ - gen_a64_set_pc_im(pc); - gen_exception(excp, syndrome, target_el); - s->base.is_jmp = DISAS_NORETURN; -} - static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome) { TCGv_i32 tcg_syn; @@ -437,13 +429,6 @@ static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest) } } -void unallocated_encoding(DisasContext *s) -{ - /* Unallocated and reserved encodings are uncategorized */ - gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), - default_exception_el(s)); -} - static void init_tmp_a64_array(DisasContext *s) { #ifdef CONFIG_DEBUG_TCG diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h index 868d355048..89437276e7 100644 --- a/target/arm/translate-a64.h +++ b/target/arm/translate-a64.h @@ -18,8 +18,6 @@ #ifndef TARGET_ARM_TRANSLATE_A64_H #define TARGET_ARM_TRANSLATE_A64_H -void unallocated_encoding(DisasContext *s); - #define unsupported_encoding(s, insn) \ do { \ qemu_log_mask(LOG_UNIMP, \ diff --git a/target/arm/translate-m-nocp.c b/target/arm/translate-m-nocp.c new file mode 100644 index 0000000000..d47eb8e153 --- /dev/null +++ b/target/arm/translate-m-nocp.c @@ -0,0 +1,221 @@ +/* + * ARM translation: M-profile NOCP special-case instructions + * + * Copyright (c) 2020 Linaro, Ltd. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "tcg/tcg-op.h" +#include "translate.h" +#include "translate-a32.h" + +#include "decode-m-nocp.c.inc" + +/* + * Decode VLLDM and VLSTM are nonstandard because: + * * if there is no FPU then these insns must NOP in + * Secure state and UNDEF in Nonsecure state + * * if there is an FPU then these insns do not have + * the usual behaviour that vfp_access_check() provides of + * being controlled by CPACR/NSACR enable bits or the + * lazy-stacking logic. + */ +static bool trans_VLLDM_VLSTM(DisasContext *s, arg_VLLDM_VLSTM *a) +{ + TCGv_i32 fptr; + + if (!arm_dc_feature(s, ARM_FEATURE_M) || + !arm_dc_feature(s, ARM_FEATURE_V8)) { + return false; + } + + if (a->op) { + /* + * T2 encoding ({D0-D31} reglist): v8.1M and up. We choose not + * to take the IMPDEF option to make memory accesses to the stack + * slots that correspond to the D16-D31 registers (discarding + * read data and writing UNKNOWN values), so for us the T2 + * encoding behaves identically to the T1 encoding. + */ + if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) { + return false; + } + } else { + /* + * T1 encoding ({D0-D15} reglist); undef if we have 32 Dregs. + * This is currently architecturally impossible, but we add the + * check to stay in line with the pseudocode. Note that we must + * emit code for the UNDEF so it takes precedence over the NOCP. + */ + if (dc_isar_feature(aa32_simd_r32, s)) { + unallocated_encoding(s); + return true; + } + } + + /* + * If not secure, UNDEF. We must emit code for this + * rather than returning false so that this takes + * precedence over the m-nocp.decode NOCP fallback. + */ + if (!s->v8m_secure) { + unallocated_encoding(s); + return true; + } + /* If no fpu, NOP. */ + if (!dc_isar_feature(aa32_vfp, s)) { + return true; + } + + fptr = load_reg(s, a->rn); + if (a->l) { + gen_helper_v7m_vlldm(cpu_env, fptr); + } else { + gen_helper_v7m_vlstm(cpu_env, fptr); + } + tcg_temp_free_i32(fptr); + + /* End the TB, because we have updated FP control bits */ + s->base.is_jmp = DISAS_UPDATE_EXIT; + return true; +} + +static bool trans_VSCCLRM(DisasContext *s, arg_VSCCLRM *a) +{ + int btmreg, topreg; + TCGv_i64 zero; + TCGv_i32 aspen, sfpa; + + if (!dc_isar_feature(aa32_m_sec_state, s)) { + /* Before v8.1M, fall through in decode to NOCP check */ + return false; + } + + /* Explicitly UNDEF because this takes precedence over NOCP */ + if (!arm_dc_feature(s, ARM_FEATURE_M_MAIN) || !s->v8m_secure) { + unallocated_encoding(s); + return true; + } + + if (!dc_isar_feature(aa32_vfp_simd, s)) { + /* NOP if we have neither FP nor MVE */ + return true; + } + + /* + * If FPCCR.ASPEN != 0 && CONTROL_S.SFPA == 0 then there is no + * active floating point context so we must NOP (without doing + * any lazy state preservation or the NOCP check). + */ + aspen = load_cpu_field(v7m.fpccr[M_REG_S]); + sfpa = load_cpu_field(v7m.control[M_REG_S]); + tcg_gen_andi_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK); + tcg_gen_xori_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK); + tcg_gen_andi_i32(sfpa, sfpa, R_V7M_CONTROL_SFPA_MASK); + tcg_gen_or_i32(sfpa, sfpa, aspen); + arm_gen_condlabel(s); + tcg_gen_brcondi_i32(TCG_COND_EQ, sfpa, 0, s->condlabel); + + if (s->fp_excp_el != 0) { + gen_exception_insn(s, s->pc_curr, EXCP_NOCP, + syn_uncategorized(), s->fp_excp_el); + return true; + } + + topreg = a->vd + a->imm - 1; + btmreg = a->vd; + + /* Convert to Sreg numbers if the insn specified in Dregs */ + if (a->size == 3) { + topreg = topreg * 2 + 1; + btmreg *= 2; + } + + if (topreg > 63 || (topreg > 31 && !(topreg & 1))) { + /* UNPREDICTABLE: we choose to undef */ + unallocated_encoding(s); + return true; + } + + /* Silently ignore requests to clear D16-D31 if they don't exist */ + if (topreg > 31 && !dc_isar_feature(aa32_simd_r32, s)) { + topreg = 31; + } + + if (!vfp_access_check(s)) { + return true; + } + + /* Zero the Sregs from btmreg to topreg inclusive. */ + zero = tcg_const_i64(0); + if (btmreg & 1) { + write_neon_element64(zero, btmreg >> 1, 1, MO_32); + btmreg++; + } + for (; btmreg + 1 <= topreg; btmreg += 2) { + write_neon_element64(zero, btmreg >> 1, 0, MO_64); + } + if (btmreg == topreg) { + write_neon_element64(zero, btmreg >> 1, 0, MO_32); + btmreg++; + } + assert(btmreg == topreg + 1); + /* TODO: when MVE is implemented, zero VPR here */ + return true; +} + +static bool trans_NOCP(DisasContext *s, arg_nocp *a) +{ + /* + * Handle M-profile early check for disabled coprocessor: + * all we need to do here is emit the NOCP exception if + * the coprocessor is disabled. Otherwise we return false + * and the real VFP/etc decode will handle the insn. + */ + assert(arm_dc_feature(s, ARM_FEATURE_M)); + + if (a->cp == 11) { + a->cp = 10; + } + if (arm_dc_feature(s, ARM_FEATURE_V8_1M) && + (a->cp == 8 || a->cp == 9 || a->cp == 14 || a->cp == 15)) { + /* in v8.1M cp 8, 9, 14, 15 also are governed by the cp10 enable */ + a->cp = 10; + } + + if (a->cp != 10) { + gen_exception_insn(s, s->pc_curr, EXCP_NOCP, + syn_uncategorized(), default_exception_el(s)); + return true; + } + + if (s->fp_excp_el != 0) { + gen_exception_insn(s, s->pc_curr, EXCP_NOCP, + syn_uncategorized(), s->fp_excp_el); + return true; + } + + return false; +} + +static bool trans_NOCP_8_1(DisasContext *s, arg_nocp *a) +{ + /* This range needs a coprocessor check for v8.1M and later only */ + if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) { + return false; + } + return trans_NOCP(s, a); +} diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c index a02b8369a1..658bd275da 100644 --- a/target/arm/translate-neon.c.inc +++ b/target/arm/translate-neon.c @@ -20,11 +20,13 @@ * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ -/* - * This file is intended to be included from translate.c; it uses - * some macros and definitions provided by that file. - * It might be possible to convert it to a standalone .c file eventually. - */ +#include "qemu/osdep.h" +#include "tcg/tcg-op.h" +#include "tcg/tcg-op-gvec.h" +#include "exec/exec-all.h" +#include "exec/gen-icount.h" +#include "translate.h" +#include "translate-a32.h" static inline int plus1(DisasContext *s, int x) { @@ -60,6 +62,13 @@ static inline int neon_3same_fp_size(DisasContext *s, int x) #include "decode-neon-ls.c.inc" #include "decode-neon-shared.c.inc" +static TCGv_ptr vfp_reg_ptr(bool dp, int reg) +{ + TCGv_ptr ret = tcg_temp_new_ptr(); + tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg)); + return ret; +} + static void neon_load_element(TCGv_i32 var, int reg, int ele, MemOp mop) { long offset = neon_element_offset(reg, ele, mop & MO_SIZE); diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c index e20d9c7ba6..3da84f30a0 100644 --- a/target/arm/translate-vfp.c.inc +++ b/target/arm/translate-vfp.c @@ -20,16 +20,38 @@ * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ -/* - * This file is intended to be included from translate.c; it uses - * some macros and definitions provided by that file. - * It might be possible to convert it to a standalone .c file eventually. - */ +#include "qemu/osdep.h" +#include "tcg/tcg-op.h" +#include "tcg/tcg-op-gvec.h" +#include "exec/exec-all.h" +#include "exec/gen-icount.h" +#include "translate.h" +#include "translate-a32.h" /* Include the generated VFP decoder */ #include "decode-vfp.c.inc" #include "decode-vfp-uncond.c.inc" +static inline void vfp_load_reg64(TCGv_i64 var, int reg) +{ + tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(true, reg)); +} + +static inline void vfp_store_reg64(TCGv_i64 var, int reg) +{ + tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(true, reg)); +} + +static inline void vfp_load_reg32(TCGv_i32 var, int reg) +{ + tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg)); +} + +static inline void vfp_store_reg32(TCGv_i32 var, int reg) +{ + tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg)); +} + /* * The imm8 encodes the sign bit, enough bits to represent an exponent in * the range 01....1xx to 10....0xx, and the most significant 4 bits of @@ -191,7 +213,7 @@ static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled) * The most usual kind of VFP access check, for everything except * FMXR/FMRX to the always-available special registers. */ -static bool vfp_access_check(DisasContext *s) +bool vfp_access_check(DisasContext *s) { return full_vfp_access_check(s, false); } @@ -3800,202 +3822,6 @@ static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a) return true; } -/* - * Decode VLLDM and VLSTM are nonstandard because: - * * if there is no FPU then these insns must NOP in - * Secure state and UNDEF in Nonsecure state - * * if there is an FPU then these insns do not have - * the usual behaviour that vfp_access_check() provides of - * being controlled by CPACR/NSACR enable bits or the - * lazy-stacking logic. - */ -static bool trans_VLLDM_VLSTM(DisasContext *s, arg_VLLDM_VLSTM *a) -{ - TCGv_i32 fptr; - - if (!arm_dc_feature(s, ARM_FEATURE_M) || - !arm_dc_feature(s, ARM_FEATURE_V8)) { - return false; - } - - if (a->op) { - /* - * T2 encoding ({D0-D31} reglist): v8.1M and up. We choose not - * to take the IMPDEF option to make memory accesses to the stack - * slots that correspond to the D16-D31 registers (discarding - * read data and writing UNKNOWN values), so for us the T2 - * encoding behaves identically to the T1 encoding. - */ - if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) { - return false; - } - } else { - /* - * T1 encoding ({D0-D15} reglist); undef if we have 32 Dregs. - * This is currently architecturally impossible, but we add the - * check to stay in line with the pseudocode. Note that we must - * emit code for the UNDEF so it takes precedence over the NOCP. - */ - if (dc_isar_feature(aa32_simd_r32, s)) { - unallocated_encoding(s); - return true; - } - } - - /* - * If not secure, UNDEF. We must emit code for this - * rather than returning false so that this takes - * precedence over the m-nocp.decode NOCP fallback. - */ - if (!s->v8m_secure) { - unallocated_encoding(s); - return true; - } - /* If no fpu, NOP. */ - if (!dc_isar_feature(aa32_vfp, s)) { - return true; - } - - fptr = load_reg(s, a->rn); - if (a->l) { - gen_helper_v7m_vlldm(cpu_env, fptr); - } else { - gen_helper_v7m_vlstm(cpu_env, fptr); - } - tcg_temp_free_i32(fptr); - - /* End the TB, because we have updated FP control bits */ - s->base.is_jmp = DISAS_UPDATE_EXIT; - return true; -} - -static bool trans_VSCCLRM(DisasContext *s, arg_VSCCLRM *a) -{ - int btmreg, topreg; - TCGv_i64 zero; - TCGv_i32 aspen, sfpa; - - if (!dc_isar_feature(aa32_m_sec_state, s)) { - /* Before v8.1M, fall through in decode to NOCP check */ - return false; - } - - /* Explicitly UNDEF because this takes precedence over NOCP */ - if (!arm_dc_feature(s, ARM_FEATURE_M_MAIN) || !s->v8m_secure) { - unallocated_encoding(s); - return true; - } - - if (!dc_isar_feature(aa32_vfp_simd, s)) { - /* NOP if we have neither FP nor MVE */ - return true; - } - - /* - * If FPCCR.ASPEN != 0 && CONTROL_S.SFPA == 0 then there is no - * active floating point context so we must NOP (without doing - * any lazy state preservation or the NOCP check). - */ - aspen = load_cpu_field(v7m.fpccr[M_REG_S]); - sfpa = load_cpu_field(v7m.control[M_REG_S]); - tcg_gen_andi_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK); - tcg_gen_xori_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK); - tcg_gen_andi_i32(sfpa, sfpa, R_V7M_CONTROL_SFPA_MASK); - tcg_gen_or_i32(sfpa, sfpa, aspen); - arm_gen_condlabel(s); - tcg_gen_brcondi_i32(TCG_COND_EQ, sfpa, 0, s->condlabel); - - if (s->fp_excp_el != 0) { - gen_exception_insn(s, s->pc_curr, EXCP_NOCP, - syn_uncategorized(), s->fp_excp_el); - return true; - } - - topreg = a->vd + a->imm - 1; - btmreg = a->vd; - - /* Convert to Sreg numbers if the insn specified in Dregs */ - if (a->size == 3) { - topreg = topreg * 2 + 1; - btmreg *= 2; - } - - if (topreg > 63 || (topreg > 31 && !(topreg & 1))) { - /* UNPREDICTABLE: we choose to undef */ - unallocated_encoding(s); - return true; - } - - /* Silently ignore requests to clear D16-D31 if they don't exist */ - if (topreg > 31 && !dc_isar_feature(aa32_simd_r32, s)) { - topreg = 31; - } - - if (!vfp_access_check(s)) { - return true; - } - - /* Zero the Sregs from btmreg to topreg inclusive. */ - zero = tcg_const_i64(0); - if (btmreg & 1) { - write_neon_element64(zero, btmreg >> 1, 1, MO_32); - btmreg++; - } - for (; btmreg + 1 <= topreg; btmreg += 2) { - write_neon_element64(zero, btmreg >> 1, 0, MO_64); - } - if (btmreg == topreg) { - write_neon_element64(zero, btmreg >> 1, 0, MO_32); - btmreg++; - } - assert(btmreg == topreg + 1); - /* TODO: when MVE is implemented, zero VPR here */ - return true; -} - -static bool trans_NOCP(DisasContext *s, arg_nocp *a) -{ - /* - * Handle M-profile early check for disabled coprocessor: - * all we need to do here is emit the NOCP exception if - * the coprocessor is disabled. Otherwise we return false - * and the real VFP/etc decode will handle the insn. - */ - assert(arm_dc_feature(s, ARM_FEATURE_M)); - - if (a->cp == 11) { - a->cp = 10; - } - if (arm_dc_feature(s, ARM_FEATURE_V8_1M) && - (a->cp == 8 || a->cp == 9 || a->cp == 14 || a->cp == 15)) { - /* in v8.1M cp 8, 9, 14, 15 also are governed by the cp10 enable */ - a->cp = 10; - } - - if (a->cp != 10) { - gen_exception_insn(s, s->pc_curr, EXCP_NOCP, - syn_uncategorized(), default_exception_el(s)); - return true; - } - - if (s->fp_excp_el != 0) { - gen_exception_insn(s, s->pc_curr, EXCP_NOCP, - syn_uncategorized(), s->fp_excp_el); - return true; - } - - return false; -} - -static bool trans_NOCP_8_1(DisasContext *s, arg_nocp *a) -{ - /* This range needs a coprocessor check for v8.1M and later only */ - if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) { - return false; - } - return trans_NOCP(s, a); -} - static bool trans_VINS(DisasContext *s, arg_VINS *a) { TCGv_i32 rd, rm; diff --git a/target/arm/translate.c b/target/arm/translate.c index 43ff0d4b8a..8e0e55c1e0 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -50,12 +50,7 @@ #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8) #include "translate.h" - -#if defined(CONFIG_USER_ONLY) -#define IS_USER(s) 1 -#else -#define IS_USER(s) (s->user) -#endif +#include "translate-a32.h" /* These are TCG temporaries used only by the legacy iwMMXt decoder */ static TCGv_i64 cpu_V0, cpu_V1, cpu_M0; @@ -71,11 +66,6 @@ static const char * const regnames[] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" }; -/* Function prototypes for gen_ functions calling Neon helpers. */ -typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32, - TCGv_i32, TCGv_i32); -/* Function prototypes for gen_ functions for fix point conversions */ -typedef void VFPGenFixPointFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr); /* initialize TCG globals. */ void arm_translate_init(void) @@ -101,7 +91,7 @@ void arm_translate_init(void) } /* Generate a label used for skipping this instruction */ -static void arm_gen_condlabel(DisasContext *s) +void arm_gen_condlabel(DisasContext *s) { if (!s->condjmp) { s->condlabel = gen_new_label(); @@ -109,30 +99,6 @@ static void arm_gen_condlabel(DisasContext *s) } } -/* - * Constant expanders for the decoders. - */ - -static int negate(DisasContext *s, int x) -{ - return -x; -} - -static int plus_2(DisasContext *s, int x) -{ - return x + 2; -} - -static int times_2(DisasContext *s, int x) -{ - return x * 2; -} - -static int times_4(DisasContext *s, int x) -{ - return x * 4; -} - /* Flags for the disas_set_da_iss info argument: * lower bits hold the Rt register number, higher bits are flags. */ @@ -211,24 +177,6 @@ static inline int get_a32_user_mem_index(DisasContext *s) } } -static inline TCGv_i32 load_cpu_offset(int offset) -{ - TCGv_i32 tmp = tcg_temp_new_i32(); - tcg_gen_ld_i32(tmp, cpu_env, offset); - return tmp; -} - -#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name)) - -static inline void store_cpu_offset(TCGv_i32 var, int offset) -{ - tcg_gen_st_i32(var, cpu_env, offset); - tcg_temp_free_i32(var); -} - -#define store_cpu_field(var, name) \ - store_cpu_offset(var, offsetof(CPUARMState, name)) - /* The architectural value of PC. */ static uint32_t read_pc(DisasContext *s) { @@ -236,7 +184,7 @@ static uint32_t read_pc(DisasContext *s) } /* Set a variable to the value of a CPU register. */ -static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg) +void load_reg_var(DisasContext *s, TCGv_i32 var, int reg) { if (reg == 15) { tcg_gen_movi_i32(var, read_pc(s)); @@ -245,20 +193,12 @@ static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg) } } -/* Create a new temporary and set it to the value of a CPU register. */ -static inline TCGv_i32 load_reg(DisasContext *s, int reg) -{ - TCGv_i32 tmp = tcg_temp_new_i32(); - load_reg_var(s, tmp, reg); - return tmp; -} - /* * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4). * This is used for load/store for which use of PC implies (literal), * or ADD that implies ADR. */ -static TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs) +TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs) { TCGv_i32 tmp = tcg_temp_new_i32(); @@ -272,7 +212,7 @@ static TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs) /* Set a CPU register. The source must be a temporary and will be marked as dead. */ -static void store_reg(DisasContext *s, int reg, TCGv_i32 var) +void store_reg(DisasContext *s, int reg, TCGv_i32 var) { if (reg == 15) { /* In Thumb mode, we must ignore bit 0. @@ -313,15 +253,12 @@ static void store_sp_checked(DisasContext *s, TCGv_i32 var) #define gen_sxtb16(var) gen_helper_sxtb16(var, var) #define gen_uxtb16(var) gen_helper_uxtb16(var, var) - -static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask) +void gen_set_cpsr(TCGv_i32 var, uint32_t mask) { TCGv_i32 tmp_mask = tcg_const_i32(mask); gen_helper_cpsr_write(cpu_env, var, tmp_mask); tcg_temp_free_i32(tmp_mask); } -/* Set NZCV flags from the high 4 bits of var. */ -#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV) static void gen_exception_internal(int excp) { @@ -388,7 +325,7 @@ static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b) } /* Byteswap each halfword. */ -static void gen_rev16(TCGv_i32 dest, TCGv_i32 var) +void gen_rev16(TCGv_i32 dest, TCGv_i32 var) { TCGv_i32 tmp = tcg_temp_new_i32(); TCGv_i32 mask = tcg_const_i32(0x00ff00ff); @@ -409,12 +346,6 @@ static void gen_revsh(TCGv_i32 dest, TCGv_i32 var) tcg_gen_ext16s_i32(dest, var); } -/* Swap low and high halfwords. */ -static void gen_swap_half(TCGv_i32 dest, TCGv_i32 var) -{ - tcg_gen_rotri_i32(dest, var, 16); -} - /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead. tmp = (t0 ^ t1) & 0x8000; t0 &= ~0x8000; @@ -746,7 +677,7 @@ void arm_gen_test_cc(int cc, TCGLabel *label) arm_free_cc(&cmp); } -static inline void gen_set_condexec(DisasContext *s) +void gen_set_condexec(DisasContext *s) { if (s->condexec_mask) { uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1); @@ -756,7 +687,7 @@ static inline void gen_set_condexec(DisasContext *s) } } -static inline void gen_set_pc_im(DisasContext *s, target_ulong val) +void gen_set_pc_im(DisasContext *s, target_ulong val) { tcg_gen_movi_i32(cpu_R[15], val); } @@ -948,24 +879,24 @@ static TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op) * Internal routines are used for NEON cases where the endianness * and/or alignment has already been taken into account and manipulated. */ -static void gen_aa32_ld_internal_i32(DisasContext *s, TCGv_i32 val, - TCGv_i32 a32, int index, MemOp opc) +void gen_aa32_ld_internal_i32(DisasContext *s, TCGv_i32 val, + TCGv_i32 a32, int index, MemOp opc) { TCGv addr = gen_aa32_addr(s, a32, opc); tcg_gen_qemu_ld_i32(val, addr, index, opc); tcg_temp_free(addr); } -static void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val, - TCGv_i32 a32, int index, MemOp opc) +void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val, + TCGv_i32 a32, int index, MemOp opc) { TCGv addr = gen_aa32_addr(s, a32, opc); tcg_gen_qemu_st_i32(val, addr, index, opc); tcg_temp_free(addr); } -static void gen_aa32_ld_internal_i64(DisasContext *s, TCGv_i64 val, - TCGv_i32 a32, int index, MemOp opc) +void gen_aa32_ld_internal_i64(DisasContext *s, TCGv_i64 val, + TCGv_i32 a32, int index, MemOp opc) { TCGv addr = gen_aa32_addr(s, a32, opc); @@ -978,8 +909,8 @@ static void gen_aa32_ld_internal_i64(DisasContext *s, TCGv_i64 val, tcg_temp_free(addr); } -static void gen_aa32_st_internal_i64(DisasContext *s, TCGv_i64 val, - TCGv_i32 a32, int index, MemOp opc) +void gen_aa32_st_internal_i64(DisasContext *s, TCGv_i64 val, + TCGv_i32 a32, int index, MemOp opc) { TCGv addr = gen_aa32_addr(s, a32, opc); @@ -995,26 +926,26 @@ static void gen_aa32_st_internal_i64(DisasContext *s, TCGv_i64 val, tcg_temp_free(addr); } -static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, - int index, MemOp opc) +void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, + int index, MemOp opc) { gen_aa32_ld_internal_i32(s, val, a32, index, finalize_memop(s, opc)); } -static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, - int index, MemOp opc) +void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, + int index, MemOp opc) { gen_aa32_st_internal_i32(s, val, a32, index, finalize_memop(s, opc)); } -static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, - int index, MemOp opc) +void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, + int index, MemOp opc) { gen_aa32_ld_internal_i64(s, val, a32, index, finalize_memop(s, opc)); } -static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, - int index, MemOp opc) +void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, + int index, MemOp opc) { gen_aa32_st_internal_i64(s, val, a32, index, finalize_memop(s, opc)); } @@ -1033,25 +964,6 @@ static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, gen_aa32_st_i32(s, val, a32, index, OPC); \ } -static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val, - TCGv_i32 a32, int index) -{ - gen_aa32_ld_i64(s, val, a32, index, MO_Q); -} - -static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val, - TCGv_i32 a32, int index) -{ - gen_aa32_st_i64(s, val, a32, index, MO_Q); -} - -DO_GEN_LD(8u, MO_UB) -DO_GEN_LD(16u, MO_UW) -DO_GEN_LD(32u, MO_UL) -DO_GEN_ST(8, MO_UB) -DO_GEN_ST(16, MO_UW) -DO_GEN_ST(32, MO_UL) - static inline void gen_hvc(DisasContext *s, int imm16) { /* The pre HVC helper handles cases when HVC gets trapped @@ -1093,11 +1005,15 @@ static void gen_exception_internal_insn(DisasContext *s, uint32_t pc, int excp) s->base.is_jmp = DISAS_NORETURN; } -static void gen_exception_insn(DisasContext *s, uint32_t pc, int excp, - int syn, uint32_t target_el) +void gen_exception_insn(DisasContext *s, uint64_t pc, int excp, + uint32_t syn, uint32_t target_el) { - gen_set_condexec(s); - gen_set_pc_im(s, pc); + if (s->aarch64) { + gen_a64_set_pc_im(pc); + } else { + gen_set_condexec(s); + gen_set_pc_im(s, pc); + } gen_exception(excp, syn, target_el); s->base.is_jmp = DISAS_NORETURN; } @@ -1114,7 +1030,7 @@ static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn) s->base.is_jmp = DISAS_NORETURN; } -static void unallocated_encoding(DisasContext *s) +void unallocated_encoding(DisasContext *s) { /* Unallocated and reserved encodings are uncategorized */ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), @@ -1138,7 +1054,7 @@ static void gen_exception_el(DisasContext *s, int excp, uint32_t syn, } /* Force a TB lookup after an instruction that changes the CPU state. */ -static inline void gen_lookup_tb(DisasContext *s) +void gen_lookup_tb(DisasContext *s) { tcg_gen_movi_i32(cpu_R[15], s->base.pc_next); s->base.is_jmp = DISAS_EXIT; @@ -1173,7 +1089,7 @@ static inline void gen_hlt(DisasContext *s, int imm) /* * Return the offset of a "full" NEON Dreg. */ -static long neon_full_reg_offset(unsigned reg) +long neon_full_reg_offset(unsigned reg) { return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]); } @@ -1182,7 +1098,7 @@ static long neon_full_reg_offset(unsigned reg) * Return the offset of a 2**SIZE piece of a NEON register, at index ELE, * where 0 is the least significant end of the register. */ -static long neon_element_offset(int reg, int element, MemOp memop) +long neon_element_offset(int reg, int element, MemOp memop) { int element_size = 1 << (memop & MO_SIZE); int ofs = element * element_size; @@ -1199,7 +1115,7 @@ static long neon_element_offset(int reg, int element, MemOp memop) } /* Return the offset of a VFP Dreg (dp = true) or VFP Sreg (dp = false). */ -static long vfp_reg_offset(bool dp, unsigned reg) +long vfp_reg_offset(bool dp, unsigned reg) { if (dp) { return neon_element_offset(reg, 0, MO_64); @@ -1208,27 +1124,7 @@ static long vfp_reg_offset(bool dp, unsigned reg) } } -static inline void vfp_load_reg64(TCGv_i64 var, int reg) -{ - tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(true, reg)); -} - -static inline void vfp_store_reg64(TCGv_i64 var, int reg) -{ - tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(true, reg)); -} - -static inline void vfp_load_reg32(TCGv_i32 var, int reg) -{ - tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg)); -} - -static inline void vfp_store_reg32(TCGv_i32 var, int reg) -{ - tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg)); -} - -static void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp memop) +void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp memop) { long off = neon_element_offset(reg, ele, memop); @@ -1254,7 +1150,7 @@ static void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp memop) } } -static void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop) +void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop) { long off = neon_element_offset(reg, ele, memop); @@ -1273,7 +1169,7 @@ static void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop) } } -static void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop) +void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop) { long off = neon_element_offset(reg, ele, memop); @@ -1292,7 +1188,7 @@ static void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop) } } -static void write_neon_element64(TCGv_i64 src, int reg, int ele, MemOp memop) +void write_neon_element64(TCGv_i64 src, int reg, int ele, MemOp memop) { long off = neon_element_offset(reg, ele, memop); @@ -1308,20 +1204,8 @@ static void write_neon_element64(TCGv_i64 src, int reg, int ele, MemOp memop) } } -static TCGv_ptr vfp_reg_ptr(bool dp, int reg) -{ - TCGv_ptr ret = tcg_temp_new_ptr(); - tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg)); - return ret; -} - #define ARM_CP_RW_BIT (1 << 20) -/* Include the VFP and Neon decoders */ -#include "decode-m-nocp.c.inc" -#include "translate-vfp.c.inc" -#include "translate-neon.c.inc" - static inline void iwmmxt_load_reg(TCGv_i64 var, int reg) { tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg])); @@ -9097,6 +8981,7 @@ static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) unsigned int insn; if (arm_pre_translate_insn(dc)) { + dc->base.pc_next += 4; return; } @@ -9166,6 +9051,7 @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) bool is_16bit; if (arm_pre_translate_insn(dc)) { + dc->base.pc_next += 2; return; } diff --git a/target/arm/translate.h b/target/arm/translate.h index ccf60c96d8..12c28b0d32 100644 --- a/target/arm/translate.h +++ b/target/arm/translate.h @@ -118,6 +118,30 @@ extern TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF; extern TCGv_i64 cpu_exclusive_addr; extern TCGv_i64 cpu_exclusive_val; +/* + * Constant expanders for the decoders. + */ + +static inline int negate(DisasContext *s, int x) +{ + return -x; +} + +static inline int plus_2(DisasContext *s, int x) +{ + return x + 2; +} + +static inline int times_2(DisasContext *s, int x) +{ + return x * 2; +} + +static inline int times_4(DisasContext *s, int x) +{ + return x * 4; +} + static inline int arm_dc_feature(DisasContext *dc, int feature) { return (dc->features & (1ULL << feature)) != 0; @@ -205,6 +229,9 @@ void arm_free_cc(DisasCompare *cmp); void arm_jump_cc(DisasCompare *cmp, TCGLabel *label); void arm_gen_test_cc(int cc, TCGLabel *label); MemOp pow2_align(unsigned i); +void unallocated_encoding(DisasContext *s); +void gen_exception_insn(DisasContext *s, uint64_t pc, int excp, + uint32_t syn, uint32_t target_el); /* Return state of Alternate Half-precision flag, caller frees result */ static inline TCGv_i32 get_ahp_flag(void) @@ -382,6 +409,8 @@ typedef void NeonGenOneOpFn(TCGv_i32, TCGv_i32); typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32); typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32); typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32); +typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32, + TCGv_i32, TCGv_i32); typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64); typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64); typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64); diff --git a/target/avr/helper.c b/target/avr/helper.c index 35e1019594..981c29da45 100644 --- a/target/avr/helper.c +++ b/target/avr/helper.c @@ -188,11 +188,7 @@ void helper_break(CPUAVRState *env) void helper_wdr(CPUAVRState *env) { - CPUState *cs = env_cpu(env); - - /* WD is not implemented yet, placeholder */ - cs->exception_index = EXCP_DEBUG; - cpu_loop_exit(cs); + qemu_log_mask(LOG_UNIMP, "WDG reset (not implemented)\n"); } /* diff --git a/target/hexagon/arch.c b/target/hexagon/arch.c index 09de124818..68a55b3bd4 100644 --- a/target/hexagon/arch.c +++ b/target/hexagon/arch.c @@ -27,6 +27,97 @@ #define SF_MANTBITS 23 #define float32_nan make_float32(0xffffffff) +/* + * These three tables are used by the cabacdecbin instruction + */ +const uint8_t rLPS_table_64x4[64][4] = { + {128, 176, 208, 240}, + {128, 167, 197, 227}, + {128, 158, 187, 216}, + {123, 150, 178, 205}, + {116, 142, 169, 195}, + {111, 135, 160, 185}, + {105, 128, 152, 175}, + {100, 122, 144, 166}, + {95, 116, 137, 158}, + {90, 110, 130, 150}, + {85, 104, 123, 142}, + {81, 99, 117, 135}, + {77, 94, 111, 128}, + {73, 89, 105, 122}, + {69, 85, 100, 116}, + {66, 80, 95, 110}, + {62, 76, 90, 104}, + {59, 72, 86, 99}, + {56, 69, 81, 94}, + {53, 65, 77, 89}, + {51, 62, 73, 85}, + {48, 59, 69, 80}, + {46, 56, 66, 76}, + {43, 53, 63, 72}, + {41, 50, 59, 69}, + {39, 48, 56, 65}, + {37, 45, 54, 62}, + {35, 43, 51, 59}, + {33, 41, 48, 56}, + {32, 39, 46, 53}, + {30, 37, 43, 50}, + {29, 35, 41, 48}, + {27, 33, 39, 45}, + {26, 31, 37, 43}, + {24, 30, 35, 41}, + {23, 28, 33, 39}, + {22, 27, 32, 37}, + {21, 26, 30, 35}, + {20, 24, 29, 33}, + {19, 23, 27, 31}, + {18, 22, 26, 30}, + {17, 21, 25, 28}, + {16, 20, 23, 27}, + {15, 19, 22, 25}, + {14, 18, 21, 24}, + {14, 17, 20, 23}, + {13, 16, 19, 22}, + {12, 15, 18, 21}, + {12, 14, 17, 20}, + {11, 14, 16, 19}, + {11, 13, 15, 18}, + {10, 12, 15, 17}, + {10, 12, 14, 16}, + {9, 11, 13, 15}, + {9, 11, 12, 14}, + {8, 10, 12, 14}, + {8, 9, 11, 13}, + {7, 9, 11, 12}, + {7, 9, 10, 12}, + {7, 8, 10, 11}, + {6, 8, 9, 11}, + {6, 7, 9, 10}, + {6, 7, 8, 9}, + {2, 2, 2, 2} +}; + +const uint8_t AC_next_state_MPS_64[64] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 62, 63 +}; + + +const uint8_t AC_next_state_LPS_64[64] = { + 0, 0, 1, 2, 2, 4, 4, 5, 6, 7, + 8, 9, 9, 11, 11, 12, 13, 13, 15, 15, + 16, 16, 18, 18, 19, 19, 21, 21, 22, 22, + 23, 24, 24, 25, 26, 26, 27, 27, 28, 29, + 29, 30, 30, 30, 31, 32, 32, 33, 33, 33, + 34, 34, 35, 35, 35, 36, 36, 36, 37, 37, + 37, 38, 38, 63 +}; + #define BITS_MASK_8 0x5555555555555555ULL #define PAIR_MASK_8 0x3333333333333333ULL #define NYBL_MASK_8 0x0f0f0f0f0f0f0f0fULL @@ -76,19 +167,6 @@ uint64_t deinterleave(uint64_t src) return myeven | (myodd << 32); } -uint32_t carry_from_add64(uint64_t a, uint64_t b, uint32_t c) -{ - uint64_t tmpa, tmpb, tmpc; - tmpa = fGETUWORD(0, a); - tmpb = fGETUWORD(0, b); - tmpc = tmpa + tmpb + c; - tmpa = fGETUWORD(1, a); - tmpb = fGETUWORD(1, b); - tmpc = tmpa + tmpb + fGETUWORD(1, tmpc); - tmpc = fGETUWORD(1, tmpc); - return tmpc; -} - int32_t conv_round(int32_t a, int n) { int64_t val; @@ -108,7 +186,7 @@ int32_t conv_round(int32_t a, int n) /* Floating Point Stuff */ -static const int softfloat_roundingmodes[] = { +static const FloatRoundMode softfloat_roundingmodes[] = { float_round_nearest_even, float_round_to_zero, float_round_down, @@ -156,12 +234,6 @@ void arch_fpop_end(CPUHexagonState *env) } } -static float32 float32_mul_pow2(float32 a, uint32_t p, float_status *fp_status) -{ - float32 b = make_float32((SF_BIAS + p) << SF_MANTBITS); - return float32_mul(a, b, fp_status); -} - int arch_sf_recip_common(float32 *Rs, float32 *Rt, float32 *Rd, int *adjust, float_status *fp_status) { @@ -200,12 +272,13 @@ int arch_sf_recip_common(float32 *Rs, float32 *Rt, float32 *Rd, int *adjust, /* or put Inf in num fixup? */ uint8_t RsV_sign = float32_is_neg(RsV); uint8_t RtV_sign = float32_is_neg(RtV); + /* Check that RsV is NOT infinite before we overwrite it */ + if (!float32_is_infinity(RsV)) { + float_raise(float_flag_divbyzero, fp_status); + } RsV = infinite_float32(RsV_sign ^ RtV_sign); RtV = float32_one; RdV = float32_one; - if (float32_is_infinity(RsV)) { - float_raise(float_flag_divbyzero, fp_status); - } } else if (float32_is_infinity(RtV)) { RsV = make_float32(0x80000000 & (RsV ^ RtV)); RtV = float32_one; @@ -230,22 +303,22 @@ int arch_sf_recip_common(float32 *Rs, float32 *Rt, float32 *Rd, int *adjust, if ((n_exp - d_exp + SF_BIAS) <= SF_MANTBITS) { /* Near quotient underflow / inexact Q */ PeV = 0x80; - RtV = float32_mul_pow2(RtV, -64, fp_status); - RsV = float32_mul_pow2(RsV, 64, fp_status); + RtV = float32_scalbn(RtV, -64, fp_status); + RsV = float32_scalbn(RsV, 64, fp_status); } else if ((n_exp - d_exp + SF_BIAS) > (SF_MAXEXP - 24)) { /* Near quotient overflow */ PeV = 0x40; - RtV = float32_mul_pow2(RtV, 32, fp_status); - RsV = float32_mul_pow2(RsV, -32, fp_status); + RtV = float32_scalbn(RtV, 32, fp_status); + RsV = float32_scalbn(RsV, -32, fp_status); } else if (n_exp <= SF_MANTBITS + 2) { - RtV = float32_mul_pow2(RtV, 64, fp_status); - RsV = float32_mul_pow2(RsV, 64, fp_status); + RtV = float32_scalbn(RtV, 64, fp_status); + RsV = float32_scalbn(RsV, 64, fp_status); } else if (d_exp <= 1) { - RtV = float32_mul_pow2(RtV, 32, fp_status); - RsV = float32_mul_pow2(RsV, 32, fp_status); + RtV = float32_scalbn(RtV, 32, fp_status); + RsV = float32_scalbn(RsV, 32, fp_status); } else if (d_exp > 252) { - RtV = float32_mul_pow2(RtV, -32, fp_status); - RsV = float32_mul_pow2(RsV, -32, fp_status); + RtV = float32_scalbn(RtV, -32, fp_status); + RsV = float32_scalbn(RsV, -32, fp_status); } RdV = 0; ret = 1; @@ -265,7 +338,7 @@ int arch_sf_invsqrt_common(float32 *Rs, float32 *Rd, int *adjust, int r_exp; int ret = 0; RsV = *Rs; - if (float32_is_infinity(RsV)) { + if (float32_is_any_nan(RsV)) { if (extract32(RsV, 22, 1) == 0) { float_raise(float_flag_invalid, fp_status); } @@ -287,7 +360,7 @@ int arch_sf_invsqrt_common(float32 *Rs, float32 *Rd, int *adjust, /* Basic checks passed */ r_exp = float32_getexp(RsV); if (r_exp <= 24) { - RsV = float32_mul_pow2(RsV, 64, fp_status); + RsV = float32_scalbn(RsV, 64, fp_status); PeV = 0xe0; } RdV = 0; @@ -298,3 +371,41 @@ int arch_sf_invsqrt_common(float32 *Rs, float32 *Rd, int *adjust, *adjust = PeV; return ret; } + +const uint8_t recip_lookup_table[128] = { + 0x0fe, 0x0fa, 0x0f6, 0x0f2, 0x0ef, 0x0eb, 0x0e7, 0x0e4, + 0x0e0, 0x0dd, 0x0d9, 0x0d6, 0x0d2, 0x0cf, 0x0cc, 0x0c9, + 0x0c6, 0x0c2, 0x0bf, 0x0bc, 0x0b9, 0x0b6, 0x0b3, 0x0b1, + 0x0ae, 0x0ab, 0x0a8, 0x0a5, 0x0a3, 0x0a0, 0x09d, 0x09b, + 0x098, 0x096, 0x093, 0x091, 0x08e, 0x08c, 0x08a, 0x087, + 0x085, 0x083, 0x080, 0x07e, 0x07c, 0x07a, 0x078, 0x075, + 0x073, 0x071, 0x06f, 0x06d, 0x06b, 0x069, 0x067, 0x065, + 0x063, 0x061, 0x05f, 0x05e, 0x05c, 0x05a, 0x058, 0x056, + 0x054, 0x053, 0x051, 0x04f, 0x04e, 0x04c, 0x04a, 0x049, + 0x047, 0x045, 0x044, 0x042, 0x040, 0x03f, 0x03d, 0x03c, + 0x03a, 0x039, 0x037, 0x036, 0x034, 0x033, 0x032, 0x030, + 0x02f, 0x02d, 0x02c, 0x02b, 0x029, 0x028, 0x027, 0x025, + 0x024, 0x023, 0x021, 0x020, 0x01f, 0x01e, 0x01c, 0x01b, + 0x01a, 0x019, 0x017, 0x016, 0x015, 0x014, 0x013, 0x012, + 0x011, 0x00f, 0x00e, 0x00d, 0x00c, 0x00b, 0x00a, 0x009, + 0x008, 0x007, 0x006, 0x005, 0x004, 0x003, 0x002, 0x000, +}; + +const uint8_t invsqrt_lookup_table[128] = { + 0x069, 0x066, 0x063, 0x061, 0x05e, 0x05b, 0x059, 0x057, + 0x054, 0x052, 0x050, 0x04d, 0x04b, 0x049, 0x047, 0x045, + 0x043, 0x041, 0x03f, 0x03d, 0x03b, 0x039, 0x037, 0x036, + 0x034, 0x032, 0x030, 0x02f, 0x02d, 0x02c, 0x02a, 0x028, + 0x027, 0x025, 0x024, 0x022, 0x021, 0x01f, 0x01e, 0x01d, + 0x01b, 0x01a, 0x019, 0x017, 0x016, 0x015, 0x014, 0x012, + 0x011, 0x010, 0x00f, 0x00d, 0x00c, 0x00b, 0x00a, 0x009, + 0x008, 0x007, 0x006, 0x005, 0x004, 0x003, 0x002, 0x001, + 0x0fe, 0x0fa, 0x0f6, 0x0f3, 0x0ef, 0x0eb, 0x0e8, 0x0e4, + 0x0e1, 0x0de, 0x0db, 0x0d7, 0x0d4, 0x0d1, 0x0ce, 0x0cb, + 0x0c9, 0x0c6, 0x0c3, 0x0c0, 0x0be, 0x0bb, 0x0b8, 0x0b6, + 0x0b3, 0x0b1, 0x0af, 0x0ac, 0x0aa, 0x0a8, 0x0a5, 0x0a3, + 0x0a1, 0x09f, 0x09d, 0x09b, 0x099, 0x097, 0x095, 0x093, + 0x091, 0x08f, 0x08d, 0x08b, 0x089, 0x087, 0x086, 0x084, + 0x082, 0x080, 0x07f, 0x07d, 0x07b, 0x07a, 0x078, 0x077, + 0x075, 0x074, 0x072, 0x071, 0x06f, 0x06e, 0x06c, 0x06b, +}; diff --git a/target/hexagon/arch.h b/target/hexagon/arch.h index 1f7f03693a..70918065d3 100644 --- a/target/hexagon/arch.h +++ b/target/hexagon/arch.h @@ -20,9 +20,12 @@ #include "qemu/int128.h" +extern const uint8_t rLPS_table_64x4[64][4]; +extern const uint8_t AC_next_state_MPS_64[64]; +extern const uint8_t AC_next_state_LPS_64[64]; + uint64_t interleave(uint32_t odd, uint32_t even); uint64_t deinterleave(uint64_t src); -uint32_t carry_from_add64(uint64_t a, uint64_t b, uint32_t c); int32_t conv_round(int32_t a, int n); void arch_fpop_start(CPUHexagonState *env); void arch_fpop_end(CPUHexagonState *env); @@ -31,4 +34,8 @@ int arch_sf_recip_common(float32 *Rs, float32 *Rt, float32 *Rd, int arch_sf_invsqrt_common(float32 *Rs, float32 *Rd, int *adjust, float_status *fp_status); +extern const uint8_t recip_lookup_table[128]; + +extern const uint8_t invsqrt_lookup_table[128]; + #endif diff --git a/target/hexagon/conv_emu.c b/target/hexagon/conv_emu.c deleted file mode 100644 index 3985b1032a..0000000000 --- a/target/hexagon/conv_emu.c +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - */ - -#include "qemu/osdep.h" -#include "qemu/host-utils.h" -#include "fpu/softfloat.h" -#include "macros.h" -#include "conv_emu.h" - -#define LL_MAX_POS 0x7fffffffffffffffULL -#define MAX_POS 0x7fffffffU - -static uint64_t conv_f64_to_8u_n(float64 in, int will_negate, - float_status *fp_status) -{ - uint8_t sign = float64_is_neg(in); - if (float64_is_infinity(in)) { - float_raise(float_flag_invalid, fp_status); - if (float64_is_neg(in)) { - return 0ULL; - } else { - return ~0ULL; - } - } - if (float64_is_any_nan(in)) { - float_raise(float_flag_invalid, fp_status); - return ~0ULL; - } - if (float64_is_zero(in)) { - return 0; - } - if (sign) { - float_raise(float_flag_invalid, fp_status); - return 0; - } - if (float64_lt(in, float64_half, fp_status)) { - /* Near zero, captures large fracshifts, denorms, etc */ - float_raise(float_flag_inexact, fp_status); - switch (get_float_rounding_mode(fp_status)) { - case float_round_down: - if (will_negate) { - return 1; - } else { - return 0; - } - case float_round_up: - if (!will_negate) { - return 1; - } else { - return 0; - } - default: - return 0; /* nearest or towards zero */ - } - } - return float64_to_uint64(in, fp_status); -} - -static void clr_float_exception_flags(uint8_t flag, float_status *fp_status) -{ - uint8_t flags = fp_status->float_exception_flags; - flags &= ~flag; - set_float_exception_flags(flags, fp_status); -} - -static uint32_t conv_df_to_4u_n(float64 fp64, int will_negate, - float_status *fp_status) -{ - uint64_t tmp; - tmp = conv_f64_to_8u_n(fp64, will_negate, fp_status); - if (tmp > 0x00000000ffffffffULL) { - clr_float_exception_flags(float_flag_inexact, fp_status); - float_raise(float_flag_invalid, fp_status); - return ~0U; - } - return (uint32_t)tmp; -} - -uint64_t conv_df_to_8u(float64 in, float_status *fp_status) -{ - return conv_f64_to_8u_n(in, 0, fp_status); -} - -uint32_t conv_df_to_4u(float64 in, float_status *fp_status) -{ - return conv_df_to_4u_n(in, 0, fp_status); -} - -int64_t conv_df_to_8s(float64 in, float_status *fp_status) -{ - uint8_t sign = float64_is_neg(in); - uint64_t tmp; - if (float64_is_any_nan(in)) { - float_raise(float_flag_invalid, fp_status); - return -1; - } - if (sign) { - float64 minus_fp64 = float64_abs(in); - tmp = conv_f64_to_8u_n(minus_fp64, 1, fp_status); - } else { - tmp = conv_f64_to_8u_n(in, 0, fp_status); - } - if (tmp > (LL_MAX_POS + sign)) { - clr_float_exception_flags(float_flag_inexact, fp_status); - float_raise(float_flag_invalid, fp_status); - tmp = (LL_MAX_POS + sign); - } - if (sign) { - return -tmp; - } else { - return tmp; - } -} - -int32_t conv_df_to_4s(float64 in, float_status *fp_status) -{ - uint8_t sign = float64_is_neg(in); - uint64_t tmp; - if (float64_is_any_nan(in)) { - float_raise(float_flag_invalid, fp_status); - return -1; - } - if (sign) { - float64 minus_fp64 = float64_abs(in); - tmp = conv_f64_to_8u_n(minus_fp64, 1, fp_status); - } else { - tmp = conv_f64_to_8u_n(in, 0, fp_status); - } - if (tmp > (MAX_POS + sign)) { - clr_float_exception_flags(float_flag_inexact, fp_status); - float_raise(float_flag_invalid, fp_status); - tmp = (MAX_POS + sign); - } - if (sign) { - return -tmp; - } else { - return tmp; - } -} - -uint64_t conv_sf_to_8u(float32 in, float_status *fp_status) -{ - float64 fp64 = float32_to_float64(in, fp_status); - return conv_df_to_8u(fp64, fp_status); -} - -uint32_t conv_sf_to_4u(float32 in, float_status *fp_status) -{ - float64 fp64 = float32_to_float64(in, fp_status); - return conv_df_to_4u(fp64, fp_status); -} - -int64_t conv_sf_to_8s(float32 in, float_status *fp_status) -{ - float64 fp64 = float32_to_float64(in, fp_status); - return conv_df_to_8s(fp64, fp_status); -} - -int32_t conv_sf_to_4s(float32 in, float_status *fp_status) -{ - float64 fp64 = float32_to_float64(in, fp_status); - return conv_df_to_4s(fp64, fp_status); -} diff --git a/target/hexagon/conv_emu.h b/target/hexagon/conv_emu.h deleted file mode 100644 index cade9de91f..0000000000 --- a/target/hexagon/conv_emu.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - */ - -#ifndef HEXAGON_CONV_EMU_H -#define HEXAGON_CONV_EMU_H - -uint64_t conv_sf_to_8u(float32 in, float_status *fp_status); -uint32_t conv_sf_to_4u(float32 in, float_status *fp_status); -int64_t conv_sf_to_8s(float32 in, float_status *fp_status); -int32_t conv_sf_to_4s(float32 in, float_status *fp_status); - -uint64_t conv_df_to_8u(float64 in, float_status *fp_status); -uint32_t conv_df_to_4u(float64 in, float_status *fp_status); -int64_t conv_df_to_8s(float64 in, float_status *fp_status); -int32_t conv_df_to_4s(float64 in, float_status *fp_status); - -#endif diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c index b0b3040dd1..ebe60a6e15 100644 --- a/target/hexagon/cpu.c +++ b/target/hexagon/cpu.c @@ -16,13 +16,13 @@ */ #include "qemu/osdep.h" -#include "qemu/log.h" #include "qemu/qemu-print.h" #include "cpu.h" #include "internal.h" #include "exec/exec-all.h" #include "qapi/error.h" #include "hw/qdev-properties.h" +#include "fpu/softfloat-helpers.h" static void hexagon_v67_cpu_init(Object *obj) { @@ -69,10 +69,9 @@ const char * const hexagon_regnames[TOTAL_PER_THREAD_REGS] = { * stacks at different locations. This is used to compensate so the diff is * cleaner. */ -static inline target_ulong adjust_stack_ptrs(CPUHexagonState *env, - target_ulong addr) +static target_ulong adjust_stack_ptrs(CPUHexagonState *env, target_ulong addr) { - HexagonCPU *cpu = container_of(env, HexagonCPU, env); + HexagonCPU *cpu = env_archcpu(env); target_ulong stack_adjust = cpu->lldb_stack_adjust; target_ulong stack_start = env->stack_start; target_ulong stack_size = 0x10000; @@ -88,7 +87,7 @@ static inline target_ulong adjust_stack_ptrs(CPUHexagonState *env, } /* HEX_REG_P3_0 (aka C4) is an alias for the predicate registers */ -static inline target_ulong read_p3_0(CPUHexagonState *env) +static target_ulong read_p3_0(CPUHexagonState *env) { int32_t control_reg = 0; int i; @@ -116,7 +115,7 @@ static void print_reg(FILE *f, CPUHexagonState *env, int regnum) static void hexagon_dump(CPUHexagonState *env, FILE *f) { - HexagonCPU *cpu = container_of(env, HexagonCPU, env); + HexagonCPU *cpu = env_archcpu(env); if (cpu->lldb_compat) { /* @@ -206,8 +205,12 @@ static void hexagon_cpu_reset(DeviceState *dev) CPUState *cs = CPU(dev); HexagonCPU *cpu = HEXAGON_CPU(cs); HexagonCPUClass *mcc = HEXAGON_CPU_GET_CLASS(cpu); + CPUHexagonState *env = &cpu->env; mcc->parent_reset(dev); + + set_default_nan_mode(1, &env->fp_status); + set_float_detect_tininess(float_tininess_before_rounding, &env->fp_status); } static void hexagon_cpu_disas_set_info(CPUState *s, disassemble_info *info) diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h index e04eac591c..2855dd3881 100644 --- a/target/hexagon/cpu.h +++ b/target/hexagon/cpu.h @@ -127,11 +127,6 @@ typedef struct HexagonCPU { target_ulong lldb_stack_adjust; } HexagonCPU; -static inline HexagonCPU *hexagon_env_get_cpu(CPUHexagonState *env) -{ - return container_of(env, HexagonCPU, env); -} - #include "cpu_bits.h" #define cpu_signal_handler cpu_hexagon_signal_handler diff --git a/target/hexagon/cpu_bits.h b/target/hexagon/cpu_bits.h index 96af834d0e..96fef71729 100644 --- a/target/hexagon/cpu_bits.h +++ b/target/hexagon/cpu_bits.h @@ -47,7 +47,7 @@ static inline uint32_t iclass_bits(uint32_t encoding) return iclass; } -static inline int is_packet_end(uint32_t endocing) +static inline bool is_packet_end(uint32_t endocing) { uint32_t bits = parse_bits(endocing); return ((bits == 0x3) || (bits == 0x0)); diff --git a/target/hexagon/decode.c b/target/hexagon/decode.c index c9bacaa1ee..d424245598 100644 --- a/target/hexagon/decode.c +++ b/target/hexagon/decode.c @@ -16,7 +16,6 @@ */ #include "qemu/osdep.h" -#include "qemu/log.h" #include "iclass.h" #include "attribs.h" #include "genptr.h" @@ -48,8 +47,8 @@ enum { DEF_REGMAP(R_16, 16, 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23) DEF_REGMAP(R__8, 8, 0, 2, 4, 6, 16, 18, 20, 22) -#define DECODE_MAPPED_REG(REGNO, NAME) \ - insn->regno[REGNO] = DECODE_REGISTER_##NAME[insn->regno[REGNO]]; +#define DECODE_MAPPED_REG(OPNUM, NAME) \ + insn->regno[OPNUM] = DECODE_REGISTER_##NAME[insn->regno[OPNUM]]; typedef struct { const struct DectreeTable *table_link; @@ -340,8 +339,8 @@ static void decode_split_cmpjump(Packet *pkt) if (GET_ATTRIB(pkt->insn[i].opcode, A_NEWCMPJUMP)) { last = pkt->num_insns; pkt->insn[last] = pkt->insn[i]; /* copy the instruction */ - pkt->insn[last].part1 = 1; /* last instruction does the CMP */ - pkt->insn[i].part1 = 0; /* existing instruction does the JUMP */ + pkt->insn[last].part1 = true; /* last insn does the CMP */ + pkt->insn[i].part1 = false; /* existing insn does the JUMP */ pkt->num_insns++; } } @@ -354,7 +353,7 @@ static void decode_split_cmpjump(Packet *pkt) } } -static inline int decode_opcode_can_jump(int opcode) +static bool decode_opcode_can_jump(int opcode) { if ((GET_ATTRIB(opcode, A_JUMP)) || (GET_ATTRIB(opcode, A_CALL)) || @@ -362,15 +361,15 @@ static inline int decode_opcode_can_jump(int opcode) (opcode == J2_pause)) { /* Exception to A_JUMP attribute */ if (opcode == J4_hintjumpr) { - return 0; + return false; } - return 1; + return true; } - return 0; + return false; } -static inline int decode_opcode_ends_loop(int opcode) +static bool decode_opcode_ends_loop(int opcode) { return GET_ATTRIB(opcode, A_HWLOOP0_END) || GET_ATTRIB(opcode, A_HWLOOP1_END); @@ -383,9 +382,9 @@ static void decode_set_insn_attr_fields(Packet *pkt) int numinsns = pkt->num_insns; uint16_t opcode; - pkt->pkt_has_cof = 0; - pkt->pkt_has_endloop = 0; - pkt->pkt_has_dczeroa = 0; + pkt->pkt_has_cof = false; + pkt->pkt_has_endloop = false; + pkt->pkt_has_dczeroa = false; for (i = 0; i < numinsns; i++) { opcode = pkt->insn[i].opcode; @@ -394,14 +393,14 @@ static void decode_set_insn_attr_fields(Packet *pkt) } if (GET_ATTRIB(opcode, A_DCZEROA)) { - pkt->pkt_has_dczeroa = 1; + pkt->pkt_has_dczeroa = true; } if (GET_ATTRIB(opcode, A_STORE)) { if (pkt->insn[i].slot == 0) { - pkt->pkt_has_store_s0 = 1; + pkt->pkt_has_store_s0 = true; } else { - pkt->pkt_has_store_s1 = 1; + pkt->pkt_has_store_s1 = true; } } @@ -422,9 +421,9 @@ static void decode_set_insn_attr_fields(Packet *pkt) */ static void decode_shuffle_for_execution(Packet *packet) { - int changed = 0; + bool changed = false; int i; - int flag; /* flag means we've seen a non-memory instruction */ + bool flag; /* flag means we've seen a non-memory instruction */ int n_mems; int last_insn = packet->num_insns - 1; @@ -437,7 +436,7 @@ static void decode_shuffle_for_execution(Packet *packet) } do { - changed = 0; + changed = false; /* * Stores go last, must not reorder. * Cannot shuffle stores past loads, either. @@ -445,13 +444,13 @@ static void decode_shuffle_for_execution(Packet *packet) * then a store, shuffle the store to the front. Don't shuffle * stores wrt each other or a load. */ - for (flag = n_mems = 0, i = last_insn; i >= 0; i--) { + for (flag = false, n_mems = 0, i = last_insn; i >= 0; i--) { int opcode = packet->insn[i].opcode; if (flag && GET_ATTRIB(opcode, A_STORE)) { decode_send_insn_to(packet, i, last_insn - n_mems); n_mems++; - changed = 1; + changed = true; } else if (GET_ATTRIB(opcode, A_STORE)) { n_mems++; } else if (GET_ATTRIB(opcode, A_LOAD)) { @@ -466,7 +465,7 @@ static void decode_shuffle_for_execution(Packet *packet) * a .new value */ } else { - flag = 1; + flag = true; } } @@ -474,7 +473,7 @@ static void decode_shuffle_for_execution(Packet *packet) continue; } /* Compares go first, may be reordered wrt each other */ - for (flag = 0, i = 0; i < last_insn + 1; i++) { + for (flag = false, i = 0; i < last_insn + 1; i++) { int opcode = packet->insn[i].opcode; if ((strstr(opcode_wregs[opcode], "Pd4") || @@ -483,7 +482,7 @@ static void decode_shuffle_for_execution(Packet *packet) /* This should be a compare (not a store conditional) */ if (flag) { decode_send_insn_to(packet, i, 0); - changed = 1; + changed = true; continue; } } else if (GET_ATTRIB(opcode, A_IMPLICIT_WRITES_P3) && @@ -495,18 +494,18 @@ static void decode_shuffle_for_execution(Packet *packet) */ if (flag) { decode_send_insn_to(packet, i, 0); - changed = 1; + changed = true; continue; } } else if (GET_ATTRIB(opcode, A_IMPLICIT_WRITES_P0) && !GET_ATTRIB(opcode, A_NEWCMPJUMP)) { if (flag) { decode_send_insn_to(packet, i, 0); - changed = 1; + changed = true; continue; } } else { - flag = 1; + flag = true; } } if (changed) { @@ -543,7 +542,7 @@ static void decode_apply_extenders(Packet *packet) int i; for (i = 0; i < packet->num_insns; i++) { if (GET_ATTRIB(packet->insn[i].opcode, A_IT_EXTENDER)) { - packet->insn[i + 1].extension_valid = 1; + packet->insn[i + 1].extension_valid = true; apply_extender(packet, i + 1, packet->insn[i].immed[0]); } } @@ -764,7 +763,7 @@ static void decode_add_endloop_insn(Insn *insn, int loopnum) } } -static inline int decode_parsebits_is_loopend(uint32_t encoding32) +static bool decode_parsebits_is_loopend(uint32_t encoding32) { uint32_t bits = parse_bits(encoding32); return bits == 0x2; @@ -775,8 +774,11 @@ decode_set_slot_number(Packet *pkt) { int slot; int i; - int hit_mem_insn = 0; - int hit_duplex = 0; + bool hit_mem_insn = false; + bool hit_duplex = false; + bool slot0_found = false; + bool slot1_found = false; + int slot1_iidx = 0; /* * The slots are encoded in reverse order @@ -801,7 +803,7 @@ decode_set_slot_number(Packet *pkt) if ((GET_ATTRIB(pkt->insn[i].opcode, A_MEMLIKE) || GET_ATTRIB(pkt->insn[i].opcode, A_MEMLIKE_PACKET_RULES)) && !hit_mem_insn) { - hit_mem_insn = 1; + hit_mem_insn = true; pkt->insn[i].slot = 0; continue; } @@ -818,7 +820,7 @@ decode_set_slot_number(Packet *pkt) for (i = pkt->num_insns - 1; i >= 0; i--) { /* First subinsn always goes to slot 0 */ if (GET_ATTRIB(pkt->insn[i].opcode, A_SUBINSN) && !hit_duplex) { - hit_duplex = 1; + hit_duplex = true; pkt->insn[i].slot = 0; continue; } @@ -830,13 +832,10 @@ decode_set_slot_number(Packet *pkt) } /* Fix the exceptions - slot 1 is never empty, always aligns to slot 0 */ - int slot0_found = 0; - int slot1_found = 0; - int slot1_iidx = 0; for (i = pkt->num_insns - 1; i >= 0; i--) { /* Is slot0 used? */ if (pkt->insn[i].slot == 0) { - int is_endloop = (pkt->insn[i].opcode == J2_endloop01); + bool is_endloop = (pkt->insn[i].opcode == J2_endloop01); is_endloop |= (pkt->insn[i].opcode == J2_endloop0); is_endloop |= (pkt->insn[i].opcode == J2_endloop1); @@ -845,17 +844,17 @@ decode_set_slot_number(Packet *pkt) * slot0 for endloop */ if (!is_endloop) { - slot0_found = 1; + slot0_found = true; } } /* Is slot1 used? */ if (pkt->insn[i].slot == 1) { - slot1_found = 1; + slot1_found = true; slot1_iidx = i; } } /* Is slot0 empty and slot1 used? */ - if ((slot0_found == 0) && (slot1_found == 1)) { + if ((!slot0_found) && slot1_found) { /* Then push it to slot0 */ pkt->insn[slot1_iidx].slot = 0; } @@ -873,7 +872,7 @@ int decode_packet(int max_words, const uint32_t *words, Packet *pkt, { int num_insns = 0; int words_read = 0; - int end_of_packet = 0; + bool end_of_packet = false; int new_insns = 0; uint32_t encoding32; @@ -890,7 +889,7 @@ int decode_packet(int max_words, const uint32_t *words, Packet *pkt, * decode works */ if (pkt->insn[num_insns].opcode == A4_ext) { - pkt->insn[num_insns + 1].extension_valid = 1; + pkt->insn[num_insns + 1].extension_valid = true; } num_insns += new_insns; words_read++; @@ -913,7 +912,7 @@ int decode_packet(int max_words, const uint32_t *words, Packet *pkt, decode_add_endloop_insn(&pkt->insn[pkt->num_insns++], 0); } if (words_read >= 3) { - uint32_t has_loop0, has_loop1; + bool has_loop0, has_loop1; has_loop0 = decode_parsebits_is_loopend(words[0]); has_loop1 = decode_parsebits_is_loopend(words[1]); if (has_loop0 && has_loop1) { diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c index 842d903710..d3b45d494f 100644 --- a/target/hexagon/fma_emu.c +++ b/target/hexagon/fma_emu.c @@ -19,7 +19,6 @@ #include "qemu/int128.h" #include "fpu/softfloat.h" #include "macros.h" -#include "conv_emu.h" #include "fma_emu.h" #define DF_INF_EXP 0x7ff @@ -64,7 +63,7 @@ typedef union { }; } Float; -static inline uint64_t float64_getmant(float64 f64) +static uint64_t float64_getmant(float64 f64) { Double a = { .i = f64 }; if (float64_is_normal(f64)) { @@ -91,7 +90,7 @@ int32_t float64_getexp(float64 f64) return -1; } -static inline uint64_t float32_getmant(float32 f32) +static uint64_t float32_getmant(float32 f32) { Float a = { .i = f32 }; if (float32_is_normal(f32)) { @@ -118,17 +117,17 @@ int32_t float32_getexp(float32 f32) return -1; } -static inline uint32_t int128_getw0(Int128 x) +static uint32_t int128_getw0(Int128 x) { return int128_getlo(x); } -static inline uint32_t int128_getw1(Int128 x) +static uint32_t int128_getw1(Int128 x) { return int128_getlo(x) >> 32; } -static inline Int128 int128_mul_6464(uint64_t ai, uint64_t bi) +static Int128 int128_mul_6464(uint64_t ai, uint64_t bi) { Int128 a, b; uint64_t pp0, pp1a, pp1b, pp1s, pp2; @@ -152,7 +151,7 @@ static inline Int128 int128_mul_6464(uint64_t ai, uint64_t bi) return int128_make128(ret_low, pp2 + (pp1s >> 32)); } -static inline Int128 int128_sub_borrow(Int128 a, Int128 b, int borrow) +static Int128 int128_sub_borrow(Int128 a, Int128 b, int borrow) { Int128 ret = int128_sub(a, b); if (borrow != 0) { @@ -170,7 +169,7 @@ typedef struct { uint8_t sticky; } Accum; -static inline void accum_init(Accum *p) +static void accum_init(Accum *p) { p->mant = int128_zero(); p->exp = 0; @@ -180,7 +179,7 @@ static inline void accum_init(Accum *p) p->sticky = 0; } -static inline Accum accum_norm_left(Accum a) +static Accum accum_norm_left(Accum a) { a.exp--; a.mant = int128_lshift(a.mant, 1); @@ -190,6 +189,7 @@ static inline Accum accum_norm_left(Accum a) return a; } +/* This function is marked inline for performance reasons */ static inline Accum accum_norm_right(Accum a, int amt) { if (amt > 130) { @@ -226,7 +226,7 @@ static inline Accum accum_norm_right(Accum a, int amt) */ static Accum accum_add(Accum a, Accum b); -static inline Accum accum_sub(Accum a, Accum b, int negate) +static Accum accum_sub(Accum a, Accum b, int negate) { Accum ret; accum_init(&ret); @@ -329,7 +329,7 @@ static Accum accum_add(Accum a, Accum b) } /* Return an infinity with requested sign */ -static inline float64 infinite_float64(uint8_t sign) +static float64 infinite_float64(uint8_t sign) { if (sign) { return make_float64(DF_MINUS_INF); @@ -339,7 +339,7 @@ static inline float64 infinite_float64(uint8_t sign) } /* Return a maximum finite value with requested sign */ -static inline float64 maxfinite_float64(uint8_t sign) +static float64 maxfinite_float64(uint8_t sign) { if (sign) { return make_float64(DF_MINUS_MAXF); @@ -349,7 +349,7 @@ static inline float64 maxfinite_float64(uint8_t sign) } /* Return a zero value with requested sign */ -static inline float64 zero_float64(uint8_t sign) +static float64 zero_float64(uint8_t sign) { if (sign) { return make_float64(0x8000000000000000); @@ -369,7 +369,7 @@ float32 infinite_float32(uint8_t sign) } /* Return a maximum finite value with the requested sign */ -static inline float32 maxfinite_float32(uint8_t sign) +static float32 maxfinite_float32(uint8_t sign) { if (sign) { return make_float32(SF_MINUS_MAXF); @@ -379,7 +379,7 @@ static inline float32 maxfinite_float32(uint8_t sign) } /* Return a zero value with requested sign */ -static inline float32 zero_float32(uint8_t sign) +static float32 zero_float32(uint8_t sign) { if (sign) { return make_float32(0x80000000); @@ -389,7 +389,7 @@ static inline float32 zero_float32(uint8_t sign) } #define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \ -static inline SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \ +static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \ { \ if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0) \ && ((a.guard | a.round | a.sticky) == 0)) { \ @@ -526,8 +526,8 @@ static bool is_inf_prod(float64 a, float64 b) (float64_is_infinity(b) && is_finite(a) && (!float64_is_zero(a)))); } -static inline float64 special_fma(float64 a, float64 b, float64 c, - float_status *fp_status) +static float64 special_fma(float64 a, float64 b, float64 c, + float_status *fp_status) { float64 ret = make_float64(0); @@ -586,8 +586,8 @@ static inline float64 special_fma(float64 a, float64 b, float64 c, g_assert_not_reached(); } -static inline float32 special_fmaf(float32 a, float32 b, float32 c, - float_status *fp_status) +static float32 special_fmaf(float32 a, float32 b, float32 c, + float_status *fp_status) { float64 aa, bb, cc; aa = float32_to_float64(a, fp_status); diff --git a/target/hexagon/gen_tcg.h b/target/hexagon/gen_tcg.h index e044deaff2..18fcdbc7e4 100644 --- a/target/hexagon/gen_tcg.h +++ b/target/hexagon/gen_tcg.h @@ -37,7 +37,10 @@ * _sp stack pointer relative r0 = memw(r29+#12) * _ap absolute set r0 = memw(r1=##variable) * _pr post increment register r0 = memw(r1++m1) + * _pbr post increment bit reverse r0 = memw(r1++m1:brev) * _pi post increment immediate r0 = memb(r1++#1) + * _pci post increment circular immediate r0 = memw(r1++#4:circ(m0)) + * _pcr post increment circular register r0 = memw(r1++I:circ(m0)) */ /* Macros for complex addressing modes */ @@ -51,12 +54,32 @@ fEA_REG(RxV); \ fPM_M(RxV, MuV); \ } while (0) +#define GET_EA_pbr \ + do { \ + gen_helper_fbrev(EA, RxV); \ + tcg_gen_add_tl(RxV, RxV, MuV); \ + } while (0) #define GET_EA_pi \ do { \ fEA_REG(RxV); \ fPM_I(RxV, siV); \ } while (0) - +#define GET_EA_pci \ + do { \ + TCGv tcgv_siV = tcg_const_tl(siV); \ + tcg_gen_mov_tl(EA, RxV); \ + gen_helper_fcircadd(RxV, RxV, tcgv_siV, MuV, \ + hex_gpr[HEX_REG_CS0 + MuN]); \ + tcg_temp_free(tcgv_siV); \ + } while (0) +#define GET_EA_pcr(SHIFT) \ + do { \ + TCGv ireg = tcg_temp_new(); \ + tcg_gen_mov_tl(EA, RxV); \ + gen_read_ireg(ireg, MuV, (SHIFT)); \ + gen_helper_fcircadd(RxV, RxV, ireg, MuV, hex_gpr[HEX_REG_CS0 + MuN]); \ + tcg_temp_free(ireg); \ + } while (0) /* Instructions with multiple definitions */ #define fGEN_TCG_LOAD_AP(RES, SIZE, SIGN) \ @@ -80,20 +103,230 @@ #define fGEN_TCG_L4_loadrd_ap(SHORTCODE) \ fGEN_TCG_LOAD_AP(RddV, 8, u) +#define fGEN_TCG_L2_loadrub_pci(SHORTCODE) SHORTCODE +#define fGEN_TCG_L2_loadrb_pci(SHORTCODE) SHORTCODE +#define fGEN_TCG_L2_loadruh_pci(SHORTCODE) SHORTCODE +#define fGEN_TCG_L2_loadrh_pci(SHORTCODE) SHORTCODE +#define fGEN_TCG_L2_loadri_pci(SHORTCODE) SHORTCODE +#define fGEN_TCG_L2_loadrd_pci(SHORTCODE) SHORTCODE + +#define fGEN_TCG_LOAD_pcr(SHIFT, LOAD) \ + do { \ + TCGv ireg = tcg_temp_new(); \ + tcg_gen_mov_tl(EA, RxV); \ + gen_read_ireg(ireg, MuV, SHIFT); \ + gen_helper_fcircadd(RxV, RxV, ireg, MuV, hex_gpr[HEX_REG_CS0 + MuN]); \ + LOAD; \ + tcg_temp_free(ireg); \ + } while (0) + +#define fGEN_TCG_L2_loadrub_pcr(SHORTCODE) \ + fGEN_TCG_LOAD_pcr(0, fLOAD(1, 1, u, EA, RdV)) +#define fGEN_TCG_L2_loadrb_pcr(SHORTCODE) \ + fGEN_TCG_LOAD_pcr(0, fLOAD(1, 1, s, EA, RdV)) +#define fGEN_TCG_L2_loadruh_pcr(SHORTCODE) \ + fGEN_TCG_LOAD_pcr(1, fLOAD(1, 2, u, EA, RdV)) +#define fGEN_TCG_L2_loadrh_pcr(SHORTCODE) \ + fGEN_TCG_LOAD_pcr(1, fLOAD(1, 2, s, EA, RdV)) +#define fGEN_TCG_L2_loadri_pcr(SHORTCODE) \ + fGEN_TCG_LOAD_pcr(2, fLOAD(1, 4, u, EA, RdV)) +#define fGEN_TCG_L2_loadrd_pcr(SHORTCODE) \ + fGEN_TCG_LOAD_pcr(3, fLOAD(1, 8, u, EA, RddV)) + #define fGEN_TCG_L2_loadrub_pr(SHORTCODE) SHORTCODE +#define fGEN_TCG_L2_loadrub_pbr(SHORTCODE) SHORTCODE #define fGEN_TCG_L2_loadrub_pi(SHORTCODE) SHORTCODE #define fGEN_TCG_L2_loadrb_pr(SHORTCODE) SHORTCODE -#define fGEN_TCG_L2_loadrb_pi(SHORTCODE) SHORTCODE; +#define fGEN_TCG_L2_loadrb_pbr(SHORTCODE) SHORTCODE +#define fGEN_TCG_L2_loadrb_pi(SHORTCODE) SHORTCODE #define fGEN_TCG_L2_loadruh_pr(SHORTCODE) SHORTCODE -#define fGEN_TCG_L2_loadruh_pi(SHORTCODE) SHORTCODE; +#define fGEN_TCG_L2_loadruh_pbr(SHORTCODE) SHORTCODE +#define fGEN_TCG_L2_loadruh_pi(SHORTCODE) SHORTCODE #define fGEN_TCG_L2_loadrh_pr(SHORTCODE) SHORTCODE +#define fGEN_TCG_L2_loadrh_pbr(SHORTCODE) SHORTCODE #define fGEN_TCG_L2_loadrh_pi(SHORTCODE) SHORTCODE #define fGEN_TCG_L2_loadri_pr(SHORTCODE) SHORTCODE +#define fGEN_TCG_L2_loadri_pbr(SHORTCODE) SHORTCODE #define fGEN_TCG_L2_loadri_pi(SHORTCODE) SHORTCODE #define fGEN_TCG_L2_loadrd_pr(SHORTCODE) SHORTCODE +#define fGEN_TCG_L2_loadrd_pbr(SHORTCODE) SHORTCODE #define fGEN_TCG_L2_loadrd_pi(SHORTCODE) SHORTCODE /* + * These instructions load 2 bytes and places them in + * two halves of the destination register. + * The GET_EA macro determines the addressing mode. + * The SIGN argument determines whether to zero-extend or + * sign-extend. + */ +#define fGEN_TCG_loadbXw2(GET_EA, SIGN) \ + do { \ + TCGv tmp = tcg_temp_new(); \ + TCGv byte = tcg_temp_new(); \ + GET_EA; \ + fLOAD(1, 2, u, EA, tmp); \ + tcg_gen_movi_tl(RdV, 0); \ + for (int i = 0; i < 2; i++) { \ + gen_set_half(i, RdV, gen_get_byte(byte, i, tmp, (SIGN))); \ + } \ + tcg_temp_free(tmp); \ + tcg_temp_free(byte); \ + } while (0) + +#define fGEN_TCG_L2_loadbzw2_io(SHORTCODE) \ + fGEN_TCG_loadbXw2(fEA_RI(RsV, siV), false) +#define fGEN_TCG_L4_loadbzw2_ur(SHORTCODE) \ + fGEN_TCG_loadbXw2(fEA_IRs(UiV, RtV, uiV), false) +#define fGEN_TCG_L2_loadbsw2_io(SHORTCODE) \ + fGEN_TCG_loadbXw2(fEA_RI(RsV, siV), true) +#define fGEN_TCG_L4_loadbsw2_ur(SHORTCODE) \ + fGEN_TCG_loadbXw2(fEA_IRs(UiV, RtV, uiV), true) +#define fGEN_TCG_L4_loadbzw2_ap(SHORTCODE) \ + fGEN_TCG_loadbXw2(GET_EA_ap, false) +#define fGEN_TCG_L2_loadbzw2_pr(SHORTCODE) \ + fGEN_TCG_loadbXw2(GET_EA_pr, false) +#define fGEN_TCG_L2_loadbzw2_pbr(SHORTCODE) \ + fGEN_TCG_loadbXw2(GET_EA_pbr, false) +#define fGEN_TCG_L2_loadbzw2_pi(SHORTCODE) \ + fGEN_TCG_loadbXw2(GET_EA_pi, false) +#define fGEN_TCG_L4_loadbsw2_ap(SHORTCODE) \ + fGEN_TCG_loadbXw2(GET_EA_ap, true) +#define fGEN_TCG_L2_loadbsw2_pr(SHORTCODE) \ + fGEN_TCG_loadbXw2(GET_EA_pr, true) +#define fGEN_TCG_L2_loadbsw2_pbr(SHORTCODE) \ + fGEN_TCG_loadbXw2(GET_EA_pbr, true) +#define fGEN_TCG_L2_loadbsw2_pi(SHORTCODE) \ + fGEN_TCG_loadbXw2(GET_EA_pi, true) +#define fGEN_TCG_L2_loadbzw2_pci(SHORTCODE) \ + fGEN_TCG_loadbXw2(GET_EA_pci, false) +#define fGEN_TCG_L2_loadbsw2_pci(SHORTCODE) \ + fGEN_TCG_loadbXw2(GET_EA_pci, true) +#define fGEN_TCG_L2_loadbzw2_pcr(SHORTCODE) \ + fGEN_TCG_loadbXw2(GET_EA_pcr(1), false) +#define fGEN_TCG_L2_loadbsw2_pcr(SHORTCODE) \ + fGEN_TCG_loadbXw2(GET_EA_pcr(1), true) + +/* + * These instructions load 4 bytes and places them in + * four halves of the destination register pair. + * The GET_EA macro determines the addressing mode. + * The SIGN argument determines whether to zero-extend or + * sign-extend. + */ +#define fGEN_TCG_loadbXw4(GET_EA, SIGN) \ + do { \ + TCGv tmp = tcg_temp_new(); \ + TCGv byte = tcg_temp_new(); \ + GET_EA; \ + fLOAD(1, 4, u, EA, tmp); \ + tcg_gen_movi_i64(RddV, 0); \ + for (int i = 0; i < 4; i++) { \ + gen_set_half_i64(i, RddV, gen_get_byte(byte, i, tmp, (SIGN))); \ + } \ + tcg_temp_free(tmp); \ + tcg_temp_free(byte); \ + } while (0) + +#define fGEN_TCG_L2_loadbzw4_io(SHORTCODE) \ + fGEN_TCG_loadbXw4(fEA_RI(RsV, siV), false) +#define fGEN_TCG_L4_loadbzw4_ur(SHORTCODE) \ + fGEN_TCG_loadbXw4(fEA_IRs(UiV, RtV, uiV), false) +#define fGEN_TCG_L2_loadbsw4_io(SHORTCODE) \ + fGEN_TCG_loadbXw4(fEA_RI(RsV, siV), true) +#define fGEN_TCG_L4_loadbsw4_ur(SHORTCODE) \ + fGEN_TCG_loadbXw4(fEA_IRs(UiV, RtV, uiV), true) +#define fGEN_TCG_L2_loadbzw4_pci(SHORTCODE) \ + fGEN_TCG_loadbXw4(GET_EA_pci, false) +#define fGEN_TCG_L2_loadbsw4_pci(SHORTCODE) \ + fGEN_TCG_loadbXw4(GET_EA_pci, true) +#define fGEN_TCG_L2_loadbzw4_pcr(SHORTCODE) \ + fGEN_TCG_loadbXw4(GET_EA_pcr(2), false) +#define fGEN_TCG_L2_loadbsw4_pcr(SHORTCODE) \ + fGEN_TCG_loadbXw4(GET_EA_pcr(2), true) +#define fGEN_TCG_L4_loadbzw4_ap(SHORTCODE) \ + fGEN_TCG_loadbXw4(GET_EA_ap, false) +#define fGEN_TCG_L2_loadbzw4_pr(SHORTCODE) \ + fGEN_TCG_loadbXw4(GET_EA_pr, false) +#define fGEN_TCG_L2_loadbzw4_pbr(SHORTCODE) \ + fGEN_TCG_loadbXw4(GET_EA_pbr, false) +#define fGEN_TCG_L2_loadbzw4_pi(SHORTCODE) \ + fGEN_TCG_loadbXw4(GET_EA_pi, false) +#define fGEN_TCG_L4_loadbsw4_ap(SHORTCODE) \ + fGEN_TCG_loadbXw4(GET_EA_ap, true) +#define fGEN_TCG_L2_loadbsw4_pr(SHORTCODE) \ + fGEN_TCG_loadbXw4(GET_EA_pr, true) +#define fGEN_TCG_L2_loadbsw4_pbr(SHORTCODE) \ + fGEN_TCG_loadbXw4(GET_EA_pbr, true) +#define fGEN_TCG_L2_loadbsw4_pi(SHORTCODE) \ + fGEN_TCG_loadbXw4(GET_EA_pi, true) + +/* + * These instructions load a half word, shift the destination right by 16 bits + * and place the loaded value in the high half word of the destination pair. + * The GET_EA macro determines the addressing mode. + */ +#define fGEN_TCG_loadalignh(GET_EA) \ + do { \ + TCGv tmp = tcg_temp_new(); \ + TCGv_i64 tmp_i64 = tcg_temp_new_i64(); \ + GET_EA; \ + fLOAD(1, 2, u, EA, tmp); \ + tcg_gen_extu_i32_i64(tmp_i64, tmp); \ + tcg_gen_shri_i64(RyyV, RyyV, 16); \ + tcg_gen_deposit_i64(RyyV, RyyV, tmp_i64, 48, 16); \ + tcg_temp_free(tmp); \ + tcg_temp_free_i64(tmp_i64); \ + } while (0) + +#define fGEN_TCG_L4_loadalignh_ur(SHORTCODE) \ + fGEN_TCG_loadalignh(fEA_IRs(UiV, RtV, uiV)) +#define fGEN_TCG_L2_loadalignh_io(SHORTCODE) \ + fGEN_TCG_loadalignh(fEA_RI(RsV, siV)) +#define fGEN_TCG_L2_loadalignh_pci(SHORTCODE) \ + fGEN_TCG_loadalignh(GET_EA_pci) +#define fGEN_TCG_L2_loadalignh_pcr(SHORTCODE) \ + fGEN_TCG_loadalignh(GET_EA_pcr(1)) +#define fGEN_TCG_L4_loadalignh_ap(SHORTCODE) \ + fGEN_TCG_loadalignh(GET_EA_ap) +#define fGEN_TCG_L2_loadalignh_pr(SHORTCODE) \ + fGEN_TCG_loadalignh(GET_EA_pr) +#define fGEN_TCG_L2_loadalignh_pbr(SHORTCODE) \ + fGEN_TCG_loadalignh(GET_EA_pbr) +#define fGEN_TCG_L2_loadalignh_pi(SHORTCODE) \ + fGEN_TCG_loadalignh(GET_EA_pi) + +/* Same as above, but loads a byte instead of half word */ +#define fGEN_TCG_loadalignb(GET_EA) \ + do { \ + TCGv tmp = tcg_temp_new(); \ + TCGv_i64 tmp_i64 = tcg_temp_new_i64(); \ + GET_EA; \ + fLOAD(1, 1, u, EA, tmp); \ + tcg_gen_extu_i32_i64(tmp_i64, tmp); \ + tcg_gen_shri_i64(RyyV, RyyV, 8); \ + tcg_gen_deposit_i64(RyyV, RyyV, tmp_i64, 56, 8); \ + tcg_temp_free(tmp); \ + tcg_temp_free_i64(tmp_i64); \ + } while (0) + +#define fGEN_TCG_L2_loadalignb_io(SHORTCODE) \ + fGEN_TCG_loadalignb(fEA_RI(RsV, siV)) +#define fGEN_TCG_L4_loadalignb_ur(SHORTCODE) \ + fGEN_TCG_loadalignb(fEA_IRs(UiV, RtV, uiV)) +#define fGEN_TCG_L2_loadalignb_pci(SHORTCODE) \ + fGEN_TCG_loadalignb(GET_EA_pci) +#define fGEN_TCG_L2_loadalignb_pcr(SHORTCODE) \ + fGEN_TCG_loadalignb(GET_EA_pcr(0)) +#define fGEN_TCG_L4_loadalignb_ap(SHORTCODE) \ + fGEN_TCG_loadalignb(GET_EA_ap) +#define fGEN_TCG_L2_loadalignb_pr(SHORTCODE) \ + fGEN_TCG_loadalignb(GET_EA_pr) +#define fGEN_TCG_L2_loadalignb_pbr(SHORTCODE) \ + fGEN_TCG_loadalignb(GET_EA_pbr) +#define fGEN_TCG_L2_loadalignb_pi(SHORTCODE) \ + fGEN_TCG_loadalignb(GET_EA_pi) + +/* * Predicated loads * Here is a primer to understand the tag names * @@ -195,6 +428,191 @@ #define fGEN_TCG_S4_stored_locked(SHORTCODE) \ do { SHORTCODE; READ_PREG(PdV, PdN); } while (0) +#define fGEN_TCG_STORE(SHORTCODE) \ + do { \ + TCGv HALF = tcg_temp_new(); \ + TCGv BYTE = tcg_temp_new(); \ + SHORTCODE; \ + tcg_temp_free(HALF); \ + tcg_temp_free(BYTE); \ + } while (0) + +#define fGEN_TCG_STORE_pcr(SHIFT, STORE) \ + do { \ + TCGv ireg = tcg_temp_new(); \ + TCGv HALF = tcg_temp_new(); \ + TCGv BYTE = tcg_temp_new(); \ + tcg_gen_mov_tl(EA, RxV); \ + gen_read_ireg(ireg, MuV, SHIFT); \ + gen_helper_fcircadd(RxV, RxV, ireg, MuV, hex_gpr[HEX_REG_CS0 + MuN]); \ + STORE; \ + tcg_temp_free(ireg); \ + tcg_temp_free(HALF); \ + tcg_temp_free(BYTE); \ + } while (0) + +#define fGEN_TCG_S2_storerb_pbr(SHORTCODE) \ + fGEN_TCG_STORE(SHORTCODE) +#define fGEN_TCG_S2_storerb_pci(SHORTCODE) \ + fGEN_TCG_STORE(SHORTCODE) +#define fGEN_TCG_S2_storerb_pcr(SHORTCODE) \ + fGEN_TCG_STORE_pcr(0, fSTORE(1, 1, EA, fGETBYTE(0, RtV))) + +#define fGEN_TCG_S2_storerh_pbr(SHORTCODE) \ + fGEN_TCG_STORE(SHORTCODE) +#define fGEN_TCG_S2_storerh_pci(SHORTCODE) \ + fGEN_TCG_STORE(SHORTCODE) +#define fGEN_TCG_S2_storerh_pcr(SHORTCODE) \ + fGEN_TCG_STORE_pcr(1, fSTORE(1, 2, EA, fGETHALF(0, RtV))) + +#define fGEN_TCG_S2_storerf_pbr(SHORTCODE) \ + fGEN_TCG_STORE(SHORTCODE) +#define fGEN_TCG_S2_storerf_pci(SHORTCODE) \ + fGEN_TCG_STORE(SHORTCODE) +#define fGEN_TCG_S2_storerf_pcr(SHORTCODE) \ + fGEN_TCG_STORE_pcr(1, fSTORE(1, 2, EA, fGETHALF(1, RtV))) + +#define fGEN_TCG_S2_storeri_pbr(SHORTCODE) \ + fGEN_TCG_STORE(SHORTCODE) +#define fGEN_TCG_S2_storeri_pci(SHORTCODE) \ + fGEN_TCG_STORE(SHORTCODE) +#define fGEN_TCG_S2_storeri_pcr(SHORTCODE) \ + fGEN_TCG_STORE_pcr(2, fSTORE(1, 4, EA, RtV)) + +#define fGEN_TCG_S2_storerd_pbr(SHORTCODE) \ + fGEN_TCG_STORE(SHORTCODE) +#define fGEN_TCG_S2_storerd_pci(SHORTCODE) \ + fGEN_TCG_STORE(SHORTCODE) +#define fGEN_TCG_S2_storerd_pcr(SHORTCODE) \ + fGEN_TCG_STORE_pcr(3, fSTORE(1, 8, EA, RttV)) + +#define fGEN_TCG_S2_storerbnew_pbr(SHORTCODE) \ + fGEN_TCG_STORE(SHORTCODE) +#define fGEN_TCG_S2_storerbnew_pci(SHORTCODE) \ + fGEN_TCG_STORE(SHORTCODE) +#define fGEN_TCG_S2_storerbnew_pcr(SHORTCODE) \ + fGEN_TCG_STORE_pcr(0, fSTORE(1, 1, EA, fGETBYTE(0, NtN))) + +#define fGEN_TCG_S2_storerhnew_pbr(SHORTCODE) \ + fGEN_TCG_STORE(SHORTCODE) +#define fGEN_TCG_S2_storerhnew_pci(SHORTCODE) \ + fGEN_TCG_STORE(SHORTCODE) +#define fGEN_TCG_S2_storerhnew_pcr(SHORTCODE) \ + fGEN_TCG_STORE_pcr(1, fSTORE(1, 2, EA, fGETHALF(0, NtN))) + +#define fGEN_TCG_S2_storerinew_pbr(SHORTCODE) \ + fGEN_TCG_STORE(SHORTCODE) +#define fGEN_TCG_S2_storerinew_pci(SHORTCODE) \ + fGEN_TCG_STORE(SHORTCODE) +#define fGEN_TCG_S2_storerinew_pcr(SHORTCODE) \ + fGEN_TCG_STORE_pcr(2, fSTORE(1, 4, EA, NtN)) + +/* + * Mathematical operations with more than one definition require + * special handling + */ +#define fGEN_TCG_A5_ACS(SHORTCODE) \ + do { \ + gen_helper_vacsh_pred(PeV, cpu_env, RxxV, RssV, RttV); \ + gen_helper_vacsh_val(RxxV, cpu_env, RxxV, RssV, RttV); \ + } while (0) + +/* + * Approximate reciprocal + * r3,p1 = sfrecipa(r0, r1) + * + * The helper packs the 2 32-bit results into a 64-bit value, + * so unpack them into the proper results. + */ +#define fGEN_TCG_F2_sfrecipa(SHORTCODE) \ + do { \ + TCGv_i64 tmp = tcg_temp_new_i64(); \ + gen_helper_sfrecipa(tmp, cpu_env, RsV, RtV); \ + tcg_gen_extrh_i64_i32(RdV, tmp); \ + tcg_gen_extrl_i64_i32(PeV, tmp); \ + tcg_temp_free_i64(tmp); \ + } while (0) + +/* + * Approximation of the reciprocal square root + * r1,p0 = sfinvsqrta(r0) + * + * The helper packs the 2 32-bit results into a 64-bit value, + * so unpack them into the proper results. + */ +#define fGEN_TCG_F2_sfinvsqrta(SHORTCODE) \ + do { \ + TCGv_i64 tmp = tcg_temp_new_i64(); \ + gen_helper_sfinvsqrta(tmp, cpu_env, RsV); \ + tcg_gen_extrh_i64_i32(RdV, tmp); \ + tcg_gen_extrl_i64_i32(PeV, tmp); \ + tcg_temp_free_i64(tmp); \ + } while (0) + +/* + * Add or subtract with carry. + * Predicate register is used as an extra input and output. + * r5:4 = add(r1:0, r3:2, p1):carry + */ +#define fGEN_TCG_A4_addp_c(SHORTCODE) \ + do { \ + TCGv_i64 carry = tcg_temp_new_i64(); \ + TCGv_i64 zero = tcg_const_i64(0); \ + tcg_gen_extu_i32_i64(carry, PxV); \ + tcg_gen_andi_i64(carry, carry, 1); \ + tcg_gen_add2_i64(RddV, carry, RssV, zero, carry, zero); \ + tcg_gen_add2_i64(RddV, carry, RddV, carry, RttV, zero); \ + tcg_gen_extrl_i64_i32(PxV, carry); \ + gen_8bitsof(PxV, PxV); \ + tcg_temp_free_i64(carry); \ + tcg_temp_free_i64(zero); \ + } while (0) + +/* r5:4 = sub(r1:0, r3:2, p1):carry */ +#define fGEN_TCG_A4_subp_c(SHORTCODE) \ + do { \ + TCGv_i64 carry = tcg_temp_new_i64(); \ + TCGv_i64 zero = tcg_const_i64(0); \ + TCGv_i64 not_RttV = tcg_temp_new_i64(); \ + tcg_gen_extu_i32_i64(carry, PxV); \ + tcg_gen_andi_i64(carry, carry, 1); \ + tcg_gen_not_i64(not_RttV, RttV); \ + tcg_gen_add2_i64(RddV, carry, RssV, zero, carry, zero); \ + tcg_gen_add2_i64(RddV, carry, RddV, carry, not_RttV, zero); \ + tcg_gen_extrl_i64_i32(PxV, carry); \ + gen_8bitsof(PxV, PxV); \ + tcg_temp_free_i64(carry); \ + tcg_temp_free_i64(zero); \ + tcg_temp_free_i64(not_RttV); \ + } while (0) + +/* + * Compare each of the 8 unsigned bytes + * The minimum is placed in each byte of the destination. + * Each bit of the predicate is set true if the bit from the first operand + * is greater than the bit from the second operand. + * r5:4,p1 = vminub(r1:0, r3:2) + */ +#define fGEN_TCG_A6_vminub_RdP(SHORTCODE) \ + do { \ + TCGv left = tcg_temp_new(); \ + TCGv right = tcg_temp_new(); \ + TCGv tmp = tcg_temp_new(); \ + tcg_gen_movi_tl(PeV, 0); \ + tcg_gen_movi_i64(RddV, 0); \ + for (int i = 0; i < 8; i++) { \ + gen_get_byte_i64(left, i, RttV, false); \ + gen_get_byte_i64(right, i, RssV, false); \ + tcg_gen_setcond_tl(TCG_COND_GT, tmp, left, right); \ + tcg_gen_deposit_tl(PeV, PeV, tmp, i, 1); \ + tcg_gen_umin_tl(tmp, left, right); \ + gen_set_byte_i64(i, RddV, tmp); \ + } \ + tcg_temp_free(left); \ + tcg_temp_free(right); \ + tcg_temp_free(tmp); \ + } while (0) + /* Floating point */ #define fGEN_TCG_F2_conv_sf2df(SHORTCODE) \ gen_helper_conv_sf2df(RddV, cpu_env, RsV) diff --git a/target/hexagon/gen_tcg_funcs.py b/target/hexagon/gen_tcg_funcs.py index db9f663a77..7ceb25b5f6 100755 --- a/target/hexagon/gen_tcg_funcs.py +++ b/target/hexagon/gen_tcg_funcs.py @@ -316,7 +316,7 @@ def genptr_dst_write(f, tag, regtype, regid): print("Bad register parse: ", regtype, regid) elif (regtype == "P"): if (regid in {"d", "e", "x"}): - f.write(" gen_log_pred_write(%s%sN, %s%sV);\n" % \ + f.write(" gen_log_pred_write(ctx, %s%sN, %s%sV);\n" % \ (regtype, regid, regtype, regid)) f.write(" ctx_log_pred_write(ctx, %s%sN);\n" % \ (regtype, regid)) diff --git a/target/hexagon/genptr.c b/target/hexagon/genptr.c index 7481f4c1dd..797a6c0cc9 100644 --- a/target/hexagon/genptr.c +++ b/target/hexagon/genptr.c @@ -15,16 +15,16 @@ * along with this program; if not, see <http://www.gnu.org/licenses/>. */ -#define QEMU_GENERATE #include "qemu/osdep.h" -#include "qemu/log.h" #include "cpu.h" #include "internal.h" #include "tcg/tcg-op.h" #include "insn.h" #include "opcodes.h" #include "translate.h" +#define QEMU_GENERATE /* Used internally by macros.h */ #include "macros.h" +#undef QEMU_GENERATE #include "gen_tcg.h" static inline TCGv gen_read_preg(TCGv pred, uint8_t num) @@ -35,20 +35,24 @@ static inline TCGv gen_read_preg(TCGv pred, uint8_t num) static inline void gen_log_predicated_reg_write(int rnum, TCGv val, int slot) { - TCGv one = tcg_const_tl(1); TCGv zero = tcg_const_tl(0); TCGv slot_mask = tcg_temp_new(); tcg_gen_andi_tl(slot_mask, hex_slot_cancelled, 1 << slot); tcg_gen_movcond_tl(TCG_COND_EQ, hex_new_value[rnum], slot_mask, zero, val, hex_new_value[rnum]); -#if HEX_DEBUG - /* Do this so HELPER(debug_commit_end) will know */ - tcg_gen_movcond_tl(TCG_COND_EQ, hex_reg_written[rnum], slot_mask, zero, - one, hex_reg_written[rnum]); -#endif + if (HEX_DEBUG) { + /* + * Do this so HELPER(debug_commit_end) will know + * + * Note that slot_mask indicates the value is not written + * (i.e., slot was cancelled), so we create a true/false value before + * or'ing with hex_reg_written[rnum]. + */ + tcg_gen_setcond_tl(TCG_COND_EQ, slot_mask, slot_mask, zero); + tcg_gen_or_tl(hex_reg_written[rnum], hex_reg_written[rnum], slot_mask); + } - tcg_temp_free(one); tcg_temp_free(zero); tcg_temp_free(slot_mask); } @@ -56,45 +60,44 @@ static inline void gen_log_predicated_reg_write(int rnum, TCGv val, int slot) static inline void gen_log_reg_write(int rnum, TCGv val) { tcg_gen_mov_tl(hex_new_value[rnum], val); -#if HEX_DEBUG - /* Do this so HELPER(debug_commit_end) will know */ - tcg_gen_movi_tl(hex_reg_written[rnum], 1); -#endif + if (HEX_DEBUG) { + /* Do this so HELPER(debug_commit_end) will know */ + tcg_gen_movi_tl(hex_reg_written[rnum], 1); + } } static void gen_log_predicated_reg_write_pair(int rnum, TCGv_i64 val, int slot) { TCGv val32 = tcg_temp_new(); - TCGv one = tcg_const_tl(1); TCGv zero = tcg_const_tl(0); TCGv slot_mask = tcg_temp_new(); tcg_gen_andi_tl(slot_mask, hex_slot_cancelled, 1 << slot); /* Low word */ tcg_gen_extrl_i64_i32(val32, val); - tcg_gen_movcond_tl(TCG_COND_EQ, hex_new_value[rnum], slot_mask, zero, - val32, hex_new_value[rnum]); -#if HEX_DEBUG - /* Do this so HELPER(debug_commit_end) will know */ - tcg_gen_movcond_tl(TCG_COND_EQ, hex_reg_written[rnum], + tcg_gen_movcond_tl(TCG_COND_EQ, hex_new_value[rnum], slot_mask, zero, - one, hex_reg_written[rnum]); -#endif - + val32, hex_new_value[rnum]); /* High word */ tcg_gen_extrh_i64_i32(val32, val); tcg_gen_movcond_tl(TCG_COND_EQ, hex_new_value[rnum + 1], slot_mask, zero, val32, hex_new_value[rnum + 1]); -#if HEX_DEBUG - /* Do this so HELPER(debug_commit_end) will know */ - tcg_gen_movcond_tl(TCG_COND_EQ, hex_reg_written[rnum + 1], - slot_mask, zero, - one, hex_reg_written[rnum + 1]); -#endif + if (HEX_DEBUG) { + /* + * Do this so HELPER(debug_commit_end) will know + * + * Note that slot_mask indicates the value is not written + * (i.e., slot was cancelled), so we create a true/false value before + * or'ing with hex_reg_written[rnum]. + */ + tcg_gen_setcond_tl(TCG_COND_EQ, slot_mask, slot_mask, zero); + tcg_gen_or_tl(hex_reg_written[rnum], hex_reg_written[rnum], slot_mask); + tcg_gen_or_tl(hex_reg_written[rnum + 1], hex_reg_written[rnum + 1], + slot_mask); + } tcg_temp_free(val32); - tcg_temp_free(one); tcg_temp_free(zero); tcg_temp_free(slot_mask); } @@ -103,33 +106,41 @@ static void gen_log_reg_write_pair(int rnum, TCGv_i64 val) { /* Low word */ tcg_gen_extrl_i64_i32(hex_new_value[rnum], val); -#if HEX_DEBUG - /* Do this so HELPER(debug_commit_end) will know */ - tcg_gen_movi_tl(hex_reg_written[rnum], 1); -#endif + if (HEX_DEBUG) { + /* Do this so HELPER(debug_commit_end) will know */ + tcg_gen_movi_tl(hex_reg_written[rnum], 1); + } /* High word */ tcg_gen_extrh_i64_i32(hex_new_value[rnum + 1], val); -#if HEX_DEBUG - /* Do this so HELPER(debug_commit_end) will know */ - tcg_gen_movi_tl(hex_reg_written[rnum + 1], 1); -#endif + if (HEX_DEBUG) { + /* Do this so HELPER(debug_commit_end) will know */ + tcg_gen_movi_tl(hex_reg_written[rnum + 1], 1); + } } -static inline void gen_log_pred_write(int pnum, TCGv val) +static inline void gen_log_pred_write(DisasContext *ctx, int pnum, TCGv val) { TCGv zero = tcg_const_tl(0); TCGv base_val = tcg_temp_new(); TCGv and_val = tcg_temp_new(); TCGv pred_written = tcg_temp_new(); - /* Multiple writes to the same preg are and'ed together */ tcg_gen_andi_tl(base_val, val, 0xff); - tcg_gen_and_tl(and_val, base_val, hex_new_pred_value[pnum]); - tcg_gen_andi_tl(pred_written, hex_pred_written, 1 << pnum); - tcg_gen_movcond_tl(TCG_COND_NE, hex_new_pred_value[pnum], - pred_written, zero, - and_val, base_val); + + /* + * Section 6.1.3 of the Hexagon V67 Programmer's Reference Manual + * + * Multiple writes to the same preg are and'ed together + * If this is the first predicate write in the packet, do a + * straight assignment. Otherwise, do an and. + */ + if (!test_bit(pnum, ctx->pregs_written)) { + tcg_gen_mov_tl(hex_new_pred_value[pnum], base_val); + } else { + tcg_gen_and_tl(hex_new_pred_value[pnum], + hex_new_pred_value[pnum], base_val); + } tcg_gen_ori_tl(hex_pred_written, hex_pred_written, 1 << pnum); tcg_temp_free(zero); @@ -254,6 +265,61 @@ static inline void gen_write_ctrl_reg_pair(DisasContext *ctx, int reg_num, } } +static TCGv gen_get_byte(TCGv result, int N, TCGv src, bool sign) +{ + if (sign) { + tcg_gen_sextract_tl(result, src, N * 8, 8); + } else { + tcg_gen_extract_tl(result, src, N * 8, 8); + } + return result; +} + +static TCGv gen_get_byte_i64(TCGv result, int N, TCGv_i64 src, bool sign) +{ + TCGv_i64 res64 = tcg_temp_new_i64(); + if (sign) { + tcg_gen_sextract_i64(res64, src, N * 8, 8); + } else { + tcg_gen_extract_i64(res64, src, N * 8, 8); + } + tcg_gen_extrl_i64_i32(result, res64); + tcg_temp_free_i64(res64); + + return result; +} + +static inline TCGv gen_get_half(TCGv result, int N, TCGv src, bool sign) +{ + if (sign) { + tcg_gen_sextract_tl(result, src, N * 16, 16); + } else { + tcg_gen_extract_tl(result, src, N * 16, 16); + } + return result; +} + +static inline void gen_set_half(int N, TCGv result, TCGv src) +{ + tcg_gen_deposit_tl(result, result, src, N * 16, 16); +} + +static inline void gen_set_half_i64(int N, TCGv_i64 result, TCGv src) +{ + TCGv_i64 src64 = tcg_temp_new_i64(); + tcg_gen_extu_i32_i64(src64, src); + tcg_gen_deposit_i64(result, result, src64, N * 16, 16); + tcg_temp_free_i64(src64); +} + +static void gen_set_byte_i64(int N, TCGv_i64 result, TCGv src) +{ + TCGv_i64 src64 = tcg_temp_new_i64(); + tcg_gen_extu_i32_i64(src64, src); + tcg_gen_deposit_i64(result, result, src64, N * 8, 8); + tcg_temp_free_i64(src64); +} + static inline void gen_load_locked4u(TCGv dest, TCGv vaddr, int mem_index) { tcg_gen_qemu_ld32u(dest, vaddr, mem_index); @@ -327,5 +393,85 @@ static inline void gen_store_conditional8(CPUHexagonState *env, tcg_gen_movi_tl(hex_llsc_addr, ~0); } +static inline void gen_store32(TCGv vaddr, TCGv src, int width, int slot) +{ + tcg_gen_mov_tl(hex_store_addr[slot], vaddr); + tcg_gen_movi_tl(hex_store_width[slot], width); + tcg_gen_mov_tl(hex_store_val32[slot], src); +} + +static inline void gen_store1(TCGv_env cpu_env, TCGv vaddr, TCGv src, + DisasContext *ctx, int slot) +{ + gen_store32(vaddr, src, 1, slot); + ctx->store_width[slot] = 1; +} + +static inline void gen_store1i(TCGv_env cpu_env, TCGv vaddr, int32_t src, + DisasContext *ctx, int slot) +{ + TCGv tmp = tcg_const_tl(src); + gen_store1(cpu_env, vaddr, tmp, ctx, slot); + tcg_temp_free(tmp); +} + +static inline void gen_store2(TCGv_env cpu_env, TCGv vaddr, TCGv src, + DisasContext *ctx, int slot) +{ + gen_store32(vaddr, src, 2, slot); + ctx->store_width[slot] = 2; +} + +static inline void gen_store2i(TCGv_env cpu_env, TCGv vaddr, int32_t src, + DisasContext *ctx, int slot) +{ + TCGv tmp = tcg_const_tl(src); + gen_store2(cpu_env, vaddr, tmp, ctx, slot); + tcg_temp_free(tmp); +} + +static inline void gen_store4(TCGv_env cpu_env, TCGv vaddr, TCGv src, + DisasContext *ctx, int slot) +{ + gen_store32(vaddr, src, 4, slot); + ctx->store_width[slot] = 4; +} + +static inline void gen_store4i(TCGv_env cpu_env, TCGv vaddr, int32_t src, + DisasContext *ctx, int slot) +{ + TCGv tmp = tcg_const_tl(src); + gen_store4(cpu_env, vaddr, tmp, ctx, slot); + tcg_temp_free(tmp); +} + +static inline void gen_store8(TCGv_env cpu_env, TCGv vaddr, TCGv_i64 src, + DisasContext *ctx, int slot) +{ + tcg_gen_mov_tl(hex_store_addr[slot], vaddr); + tcg_gen_movi_tl(hex_store_width[slot], 8); + tcg_gen_mov_i64(hex_store_val64[slot], src); + ctx->store_width[slot] = 8; +} + +static inline void gen_store8i(TCGv_env cpu_env, TCGv vaddr, int64_t src, + DisasContext *ctx, int slot) +{ + TCGv_i64 tmp = tcg_const_i64(src); + gen_store8(cpu_env, vaddr, tmp, ctx, slot); + tcg_temp_free_i64(tmp); +} + +static TCGv gen_8bitsof(TCGv result, TCGv value) +{ + TCGv zero = tcg_const_tl(0); + TCGv ones = tcg_const_tl(0xff); + tcg_gen_movcond_tl(TCG_COND_NE, result, value, zero, ones, zero); + tcg_temp_free(zero); + tcg_temp_free(ones); + + return result; +} + #include "tcg_funcs_generated.c.inc" #include "tcg_func_table_generated.c.inc" diff --git a/target/hexagon/helper.h b/target/hexagon/helper.h index a5f340ce67..ca201fb680 100644 --- a/target/hexagon/helper.h +++ b/target/hexagon/helper.h @@ -19,13 +19,16 @@ #include "helper_protos_generated.h.inc" DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_RETURN, noreturn, env, i32) -#if HEX_DEBUG DEF_HELPER_1(debug_start_packet, void, env) DEF_HELPER_FLAGS_3(debug_check_store_width, TCG_CALL_NO_WG, void, env, int, int) DEF_HELPER_FLAGS_3(debug_commit_end, TCG_CALL_NO_WG, void, env, int, int) -#endif DEF_HELPER_2(commit_store, void, env, int) DEF_HELPER_FLAGS_4(fcircadd, TCG_CALL_NO_RWG_SE, s32, s32, s32, s32, s32) +DEF_HELPER_FLAGS_1(fbrev, TCG_CALL_NO_RWG_SE, i32, i32) +DEF_HELPER_3(sfrecipa, i64, env, f32, f32) +DEF_HELPER_2(sfinvsqrta, i64, env, f32) +DEF_HELPER_4(vacsh_val, s64, env, s64, s64, s64) +DEF_HELPER_FLAGS_4(vacsh_pred, TCG_CALL_NO_RWG_SE, s32, env, s64, s64, s64) /* Floating point */ DEF_HELPER_2(conv_sf2df, f64, env, f32) @@ -38,21 +41,21 @@ DEF_HELPER_2(conv_ud2sf, f32, env, s64) DEF_HELPER_2(conv_ud2df, f64, env, s64) DEF_HELPER_2(conv_d2sf, f32, env, s64) DEF_HELPER_2(conv_d2df, f64, env, s64) -DEF_HELPER_2(conv_sf2uw, s32, env, f32) +DEF_HELPER_2(conv_sf2uw, i32, env, f32) DEF_HELPER_2(conv_sf2w, s32, env, f32) -DEF_HELPER_2(conv_sf2ud, s64, env, f32) +DEF_HELPER_2(conv_sf2ud, i64, env, f32) DEF_HELPER_2(conv_sf2d, s64, env, f32) -DEF_HELPER_2(conv_df2uw, s32, env, f64) +DEF_HELPER_2(conv_df2uw, i32, env, f64) DEF_HELPER_2(conv_df2w, s32, env, f64) -DEF_HELPER_2(conv_df2ud, s64, env, f64) +DEF_HELPER_2(conv_df2ud, i64, env, f64) DEF_HELPER_2(conv_df2d, s64, env, f64) -DEF_HELPER_2(conv_sf2uw_chop, s32, env, f32) +DEF_HELPER_2(conv_sf2uw_chop, i32, env, f32) DEF_HELPER_2(conv_sf2w_chop, s32, env, f32) -DEF_HELPER_2(conv_sf2ud_chop, s64, env, f32) +DEF_HELPER_2(conv_sf2ud_chop, i64, env, f32) DEF_HELPER_2(conv_sf2d_chop, s64, env, f32) -DEF_HELPER_2(conv_df2uw_chop, s32, env, f64) +DEF_HELPER_2(conv_df2uw_chop, i32, env, f64) DEF_HELPER_2(conv_df2w_chop, s32, env, f64) -DEF_HELPER_2(conv_df2ud_chop, s64, env, f64) +DEF_HELPER_2(conv_df2ud_chop, i64, env, f64) DEF_HELPER_2(conv_df2d_chop, s64, env, f64) DEF_HELPER_3(sfadd, f32, env, f32, f32) DEF_HELPER_3(sfsub, f32, env, f32, f32) diff --git a/target/hexagon/iclass.c b/target/hexagon/iclass.c index 378d8a6a75..6091286993 100644 --- a/target/hexagon/iclass.c +++ b/target/hexagon/iclass.c @@ -53,10 +53,6 @@ SlotMask find_iclass_slots(Opcode opcode, int itype) (opcode == Y2_isync) || (opcode == J2_pause) || (opcode == J4_hintjumpr)) { return SLOTS_2; - } else if ((itype == ICLASS_V2LDST) && (GET_ATTRIB(opcode, A_STORE))) { - return SLOTS_01; - } else if ((itype == ICLASS_V2LDST) && (!GET_ATTRIB(opcode, A_STORE))) { - return SLOTS_01; } else if (GET_ATTRIB(opcode, A_CRSLOT23)) { return SLOTS_23; } else if (GET_ATTRIB(opcode, A_RESTRICT_PREFERSLOT0)) { diff --git a/target/hexagon/imported/alu.idef b/target/hexagon/imported/alu.idef index 45cc529fbc..58477ae40a 100644 --- a/target/hexagon/imported/alu.idef +++ b/target/hexagon/imported/alu.idef @@ -153,6 +153,21 @@ Q6INSN(A2_subp,"Rdd32=sub(Rtt32,Rss32)",ATTRIBS(), "Sub", { RddV=RttV-RssV;}) +/* 64-bit with carry */ + +Q6INSN(A4_addp_c,"Rdd32=add(Rss32,Rtt32,Px4):carry",ATTRIBS(),"Add with Carry", +{ + RddV = RssV + RttV + fLSBOLD(PxV); + PxV = f8BITSOF(fCARRY_FROM_ADD(RssV,RttV,fLSBOLD(PxV))); +}) + +Q6INSN(A4_subp_c,"Rdd32=sub(Rss32,Rtt32,Px4):carry",ATTRIBS(),"Sub with Carry", +{ + RddV = RssV + ~RttV + fLSBOLD(PxV); + PxV = f8BITSOF(fCARRY_FROM_ADD(RssV,~RttV,fLSBOLD(PxV))); +}) + + /* NEG and ABS */ Q6INSN(A2_negsat,"Rd32=neg(Rs32):sat",ATTRIBS(), @@ -1240,6 +1255,35 @@ MINMAX(uw,WORD,UWORD,2) #undef VMINORMAX3 +Q6INSN(A5_ACS,"Rxx32,Pe4=vacsh(Rss32,Rtt32)",ATTRIBS(), +"Add Compare and Select elements of two vectors, record the maximums and the decisions ", +{ + fHIDE(int i;) + fHIDE(int xv;) + fHIDE(int sv;) + fHIDE(int tv;) + for (i = 0; i < 4; i++) { + xv = (int) fGETHALF(i,RxxV); + sv = (int) fGETHALF(i,RssV); + tv = (int) fGETHALF(i,RttV); + xv = xv + tv; //assumes 17bit datapath + sv = sv - tv; //assumes 17bit datapath + fSETBIT(i*2, PeV, (xv > sv)); + fSETBIT(i*2+1,PeV, (xv > sv)); + fSETHALF(i, RxxV, fSATH(fMAX(xv,sv))); + } +}) + +Q6INSN(A6_vminub_RdP,"Rdd32,Pe4=vminub(Rtt32,Rss32)",ATTRIBS(), +"Vector minimum of bytes, records minimum and decision vector", +{ + fHIDE(int i;) + for (i = 0; i < 8; i++) { + fSETBIT(i, PeV, (fGETUBYTE(i,RttV) > fGETUBYTE(i,RssV))); + fSETBYTE(i,RddV,fMIN(fGETUBYTE(i,RttV),fGETUBYTE(i,RssV))); + } +}) + /**********************************************/ /* Vector Min/Max */ /**********************************************/ diff --git a/target/hexagon/imported/compare.idef b/target/hexagon/imported/compare.idef index 3551467854..abd016ffb5 100644 --- a/target/hexagon/imported/compare.idef +++ b/target/hexagon/imported/compare.idef @@ -198,11 +198,11 @@ Q6INSN(C4_or_orn,"Pd4=or(Ps4,or(Pt4,!Pu4))",ATTRIBS(A_CRSLOT23), Q6INSN(C2_any8,"Pd4=any8(Ps4)",ATTRIBS(A_CRSLOT23), "Logical ANY of low 8 predicate bits", -{ PsV ? (PdV=0xff) : (PdV=0x00); }) +{ PdV = (PsV ? 0xff : 0x00); }) Q6INSN(C2_all8,"Pd4=all8(Ps4)",ATTRIBS(A_CRSLOT23), "Logical ALL of low 8 predicate bits", -{ (PsV==0xff) ? (PdV=0xff) : (PdV=0x00); }) +{ PdV = (PsV == 0xff ? 0xff : 0x00); }) Q6INSN(C2_vitpack,"Rd32=vitpack(Ps4,Pt4)",ATTRIBS(), "Pack the odd and even bits of two predicate registers", @@ -212,7 +212,7 @@ Q6INSN(C2_vitpack,"Rd32=vitpack(Ps4,Pt4)",ATTRIBS(), Q6INSN(C2_mux,"Rd32=mux(Pu4,Rs32,Rt32)",ATTRIBS(), "Scalar MUX", -{ (fLSBOLD(PuV)) ? (RdV=RsV):(RdV=RtV); }) +{ RdV = (fLSBOLD(PuV) ? RsV : RtV); }) Q6INSN(C2_cmovenewit,"if (Pu4.new) Rd32=#s12",ATTRIBS(A_ARCHV2), @@ -269,18 +269,18 @@ Q6INSN(C2_ccombinewf,"if (!Pu4) Rdd32=combine(Rs32,Rt32)",ATTRIBS(A_ARCHV2), Q6INSN(C2_muxii,"Rd32=mux(Pu4,#s8,#S8)",ATTRIBS(A_ARCHV2), "Scalar MUX immediates", -{ fIMMEXT(siV); (fLSBOLD(PuV)) ? (RdV=siV):(RdV=SiV); }) +{ fIMMEXT(siV); RdV = (fLSBOLD(PuV) ? siV : SiV); }) Q6INSN(C2_muxir,"Rd32=mux(Pu4,Rs32,#s8)",ATTRIBS(A_ARCHV2), "Scalar MUX register immediate", -{ fIMMEXT(siV); (fLSBOLD(PuV)) ? (RdV=RsV):(RdV=siV); }) +{ fIMMEXT(siV); RdV = (fLSBOLD(PuV) ? RsV : siV); }) Q6INSN(C2_muxri,"Rd32=mux(Pu4,#s8,Rs32)",ATTRIBS(A_ARCHV2), "Scalar MUX register immediate", -{ fIMMEXT(siV); (fLSBOLD(PuV)) ? (RdV=siV):(RdV=RsV); }) +{ fIMMEXT(siV); RdV = (fLSBOLD(PuV) ? siV : RsV); }) diff --git a/target/hexagon/imported/encode_pp.def b/target/hexagon/imported/encode_pp.def index c21cb730af..35ae3d2369 100644 --- a/target/hexagon/imported/encode_pp.def +++ b/target/hexagon/imported/encode_pp.def @@ -294,12 +294,14 @@ DEF_CLASS32(ICLASS_LD" ---- -------- PP------ --------",LD) DEF_CLASS32(ICLASS_LD" 0--- -------- PP------ --------",LD_ADDR_ROFFSET) +DEF_CLASS32(ICLASS_LD" 100- -------- PP----0- --------",LD_ADDR_POST_CIRC_IMMED) DEF_CLASS32(ICLASS_LD" 101- -------- PP00---- --------",LD_ADDR_POST_IMMED) DEF_CLASS32(ICLASS_LD" 101- -------- PP01---- --------",LD_ADDR_ABS_UPDATE_V4) DEF_CLASS32(ICLASS_LD" 101- -------- PP1----- --------",LD_ADDR_POST_IMMED_PRED_V2) DEF_CLASS32(ICLASS_LD" 110- -------- PP-0---- 0-------",LD_ADDR_POST_REG) DEF_CLASS32(ICLASS_LD" 110- -------- PP-1---- --------",LD_ADDR_ABS_PLUS_REG_V4) DEF_CLASS32(ICLASS_LD" 100- -------- PP----1- --------",LD_ADDR_POST_CREG_V2) +DEF_CLASS32(ICLASS_LD" 111- -------- PP------ 0-------",LD_ADDR_POST_BREV_REG) DEF_CLASS32(ICLASS_LD" 111- -------- PP------ 1-------",LD_ADDR_PRED_ABS_V4) DEF_FIELD32(ICLASS_LD" !!!- -------- PP------ --------",LD_Amode,"Amode") @@ -308,18 +310,24 @@ DEF_FIELD32(ICLASS_LD" ---- --!----- PP------ --------",LD_UN,"Unsigned") #define STD_LD_ENC(TAG,OPC) \ DEF_ENC32(L2_load##TAG##_io, ICLASS_LD" 0 ii "OPC" sssss PPiiiiii iiiddddd")\ +DEF_ENC32(L2_load##TAG##_pci, ICLASS_LD" 1 00 "OPC" xxxxx PPu0--0i iiiddddd")\ DEF_ENC32(L2_load##TAG##_pi, ICLASS_LD" 1 01 "OPC" xxxxx PP00---i iiiddddd")\ DEF_ENC32(L4_load##TAG##_ap, ICLASS_LD" 1 01 "OPC" eeeee PP01IIII -IIddddd")\ DEF_ENC32(L2_load##TAG##_pr, ICLASS_LD" 1 10 "OPC" xxxxx PPu0---- 0--ddddd")\ DEF_ENC32(L4_load##TAG##_ur, ICLASS_LD" 1 10 "OPC" ttttt PPi1IIII iIIddddd")\ +DEF_ENC32(L2_load##TAG##_pcr, ICLASS_LD" 1 00 "OPC" xxxxx PPu0--1- 0--ddddd")\ +DEF_ENC32(L2_load##TAG##_pbr, ICLASS_LD" 1 11 "OPC" xxxxx PPu0---- 0--ddddd") #define STD_LDX_ENC(TAG,OPC) \ DEF_ENC32(L2_load##TAG##_io, ICLASS_LD" 0 ii "OPC" sssss PPiiiiii iiiyyyyy")\ +DEF_ENC32(L2_load##TAG##_pci, ICLASS_LD" 1 00 "OPC" xxxxx PPu0--0i iiiyyyyy")\ DEF_ENC32(L2_load##TAG##_pi, ICLASS_LD" 1 01 "OPC" xxxxx PP00---i iiiyyyyy")\ DEF_ENC32(L4_load##TAG##_ap, ICLASS_LD" 1 01 "OPC" eeeee PP01IIII -IIyyyyy")\ DEF_ENC32(L2_load##TAG##_pr, ICLASS_LD" 1 10 "OPC" xxxxx PPu0---- 0--yyyyy")\ DEF_ENC32(L4_load##TAG##_ur, ICLASS_LD" 1 10 "OPC" ttttt PPi1IIII iIIyyyyy")\ +DEF_ENC32(L2_load##TAG##_pcr, ICLASS_LD" 1 00 "OPC" xxxxx PPu0--1- 0--yyyyy")\ +DEF_ENC32(L2_load##TAG##_pbr, ICLASS_LD" 1 11 "OPC" xxxxx PPu0---- 0--yyyyy") #define STD_PLD_ENC(TAG,OPC) \ @@ -334,6 +342,15 @@ DEF_ENC32(L4_pload##TAG##fnew_abs,ICLASS_LD" 1 11 "OPC" iiiii PP111tti 1--ddd /* 0 000 misc: dealloc,loadw_locked,dcfetch */ +STD_LD_ENC(bzw4,"0 101") +STD_LD_ENC(bzw2,"0 011") + +STD_LD_ENC(bsw4,"0 111") +STD_LD_ENC(bsw2,"0 001") + +STD_LDX_ENC(alignh,"0 010") +STD_LDX_ENC(alignb,"0 100") + STD_LD_ENC(rb, "1 000") STD_LD_ENC(rub, "1 001") STD_LD_ENC(rh, "1 010") @@ -351,6 +368,7 @@ STD_PLD_ENC(rd, "1 110") /* note dest reg field LSB=0, 1 is reserved */ DEF_CLASS32( ICLASS_LD" 0--0 000----- PP------ --------",LD_MISC) DEF_ANTICLASS32(ICLASS_LD" 0--0 000----- PP------ --------",LD_ADDR_ROFFSET) +DEF_ANTICLASS32(ICLASS_LD" 1000 000----- PP------ --------",LD_ADDR_POST_CIRC_IMMED) DEF_ANTICLASS32(ICLASS_LD" 1010 000----- PP------ --------",LD_ADDR_POST_IMMED) DEF_ANTICLASS32(ICLASS_LD" 1100 000----- PP------ --------",LD_ADDR_POST_REG) DEF_ANTICLASS32(ICLASS_LD" 1110 000----- PP------ --------",LD_ADDR_POST_REG) @@ -397,6 +415,7 @@ DEF_FIELD32(ICLASS_ST" ---! !!------ PP------ --------",ST_Type,"Type") DEF_FIELD32(ICLASS_ST" ---- --!----- PP------ --------",ST_UN,"Unsigned") DEF_CLASS32(ICLASS_ST" 0--1 -------- PP------ --------",ST_ADDR_ROFFSET) +DEF_CLASS32(ICLASS_ST" 1001 -------- PP------ ------0-",ST_ADDR_POST_CIRC_IMMED) DEF_CLASS32(ICLASS_ST" 1011 -------- PP0----- 0-----0-",ST_ADDR_POST_IMMED) DEF_CLASS32(ICLASS_ST" 1011 -------- PP0----- 1-------",ST_ADDR_ABS_UPDATE_V4) DEF_CLASS32(ICLASS_ST" 1011 -------- PP1----- --------",ST_ADDR_POST_IMMED_PRED_V2) @@ -404,6 +423,7 @@ DEF_CLASS32(ICLASS_ST" 1111 -------- PP------ 1-------",ST_ADDR_PRED_ABS_V4) DEF_CLASS32(ICLASS_ST" 1101 -------- PP------ 0-------",ST_ADDR_POST_REG) DEF_CLASS32(ICLASS_ST" 1101 -------- PP------ 1-------",ST_ADDR_ABS_PLUS_REG_V4) DEF_CLASS32(ICLASS_ST" 1001 -------- PP------ ------1-",ST_ADDR_POST_CREG_V2) +DEF_CLASS32(ICLASS_ST" 1111 -------- PP------ 0-------",ST_ADDR_POST_BREV_REG) DEF_CLASS32(ICLASS_ST" 0--0 1------- PP------ --------",ST_MISC_STORELIKE) DEF_CLASS32(ICLASS_ST" 1--0 0------- PP------ --------",ST_MISC_BUSOP) DEF_CLASS32(ICLASS_ST" 0--0 0------- PP------ --------",ST_MISC_CACHEOP) @@ -411,10 +431,13 @@ DEF_CLASS32(ICLASS_ST" 0--0 0------- PP------ --------",ST_MISC_CACHEOP) #define STD_ST_ENC(TAG,OPC,SRC) \ DEF_ENC32(S2_store##TAG##_io, ICLASS_ST" 0 ii "OPC" sssss PPi"SRC" iiiiiiii")\ +DEF_ENC32(S2_store##TAG##_pci, ICLASS_ST" 1 00 "OPC" xxxxx PPu"SRC" 0iiii-0-")\ DEF_ENC32(S2_store##TAG##_pi, ICLASS_ST" 1 01 "OPC" xxxxx PP0"SRC" 0iiii-0-")\ DEF_ENC32(S4_store##TAG##_ap, ICLASS_ST" 1 01 "OPC" eeeee PP0"SRC" 1-IIIIII")\ DEF_ENC32(S2_store##TAG##_pr, ICLASS_ST" 1 10 "OPC" xxxxx PPu"SRC" 0-------")\ DEF_ENC32(S4_store##TAG##_ur, ICLASS_ST" 1 10 "OPC" uuuuu PPi"SRC" 1iIIIIII")\ +DEF_ENC32(S2_store##TAG##_pcr, ICLASS_ST" 1 00 "OPC" xxxxx PPu"SRC" 0-----1-")\ +DEF_ENC32(S2_store##TAG##_pbr, ICLASS_ST" 1 11 "OPC" xxxxx PPu"SRC" 0-------") #define STD_PST_ENC(TAG,OPC,SRC) \ @@ -1017,6 +1040,8 @@ MPY_ENC(M7_dcmpyiwc_acc, "1010","xxxxx","1","0","1","0","10") +MPY_ENC(A5_ACS, "1010","xxxxx","0","1","0","1","ee") +MPY_ENC(A6_vminub_RdP, "1010","ddddd","0","1","1","1","ee") /* */ @@ -1028,6 +1053,7 @@ MPY_ENC(F2_sfmin, "1011","ddddd","0","0","0","1","01") MPY_ENC(F2_sfmpy, "1011","ddddd","0","0","1","0","00") MPY_ENC(F2_sffixupn, "1011","ddddd","0","0","1","1","00") MPY_ENC(F2_sffixupd, "1011","ddddd","0","0","1","1","01") +MPY_ENC(F2_sfrecipa, "1011","ddddd","1","1","1","1","ee") DEF_FIELDROW_DESC32(ICLASS_M" 1100 -------- PP------ --------","[#12] Rd=(Rs,Rt)") DEF_FIELD32(ICLASS_M" 1100 -------- PP------ --!-----",Mc_tH,"Rt is High") /*Rt high */ @@ -1641,6 +1667,7 @@ SH2_RR_ENC(F2_conv_sf2w, "1011","100","-","000","ddddd") SH2_RR_ENC(F2_conv_sf2uw_chop, "1011","011","-","001","ddddd") SH2_RR_ENC(F2_conv_sf2w_chop, "1011","100","-","001","ddddd") SH2_RR_ENC(F2_sffixupr, "1011","101","-","000","ddddd") +SH2_RR_ENC(F2_sfinvsqrta, "1011","111","-","0ee","ddddd") DEF_FIELDROW_DESC32(ICLASS_S2op" 1100 -------- PP------ --------","[#12] Rd=(Rs,#u6)") @@ -1740,11 +1767,14 @@ SH_RRR_ENC(S4_vxsubaddh, "0001","01-","-","110","ddddd") SH_RRR_ENC(S4_vxaddsubhr, "0001","11-","-","00-","ddddd") SH_RRR_ENC(S4_vxsubaddhr, "0001","11-","-","01-","ddddd") SH_RRR_ENC(S4_extractp_rp, "0001","11-","-","10-","ddddd") +SH_RRR_ENC(S2_cabacdecbin, "0001","11-","-","11-","ddddd") /* implicit P0 write */ DEF_FIELDROW_DESC32(ICLASS_S3op" 0010 -------- PP------ --------","[#2] Rdd=(Rss,Rtt,Pu)") SH_RRR_ENC(S2_valignrb, "0010","0--","-","-uu","ddddd") SH_RRR_ENC(S2_vsplicerb, "0010","100","-","-uu","ddddd") +SH_RRR_ENC(A4_addp_c, "0010","110","-","-xx","ddddd") +SH_RRR_ENC(A4_subp_c, "0010","111","-","-xx","ddddd") DEF_FIELDROW_DESC32(ICLASS_S3op" 0011 -------- PP------ --------","[#3] Rdd=(Rss,Rt)") diff --git a/target/hexagon/imported/float.idef b/target/hexagon/imported/float.idef index 76cecfebf5..3e75bc4604 100644 --- a/target/hexagon/imported/float.idef +++ b/target/hexagon/imported/float.idef @@ -146,6 +146,22 @@ Q6INSN(F2_sfimm_n,"Rd32=sfmake(#u10):neg",ATTRIBS(), }) +Q6INSN(F2_sfrecipa,"Rd32,Pe4=sfrecipa(Rs32,Rt32)",ATTRIBS(), +"Reciprocal Approximation for Division", +{ + fHIDE(int idx;) + fHIDE(int adjust;) + fHIDE(int mant;) + fHIDE(int exp;) + if (fSF_RECIP_COMMON(RsV,RtV,RdV,adjust)) { + PeV = adjust; + idx = (RtV >> 16) & 0x7f; + mant = (fSF_RECIP_LOOKUP(idx) << 15) | 1; + exp = fSF_BIAS() - (fSF_GETEXP(RtV) - fSF_BIAS()) - 1; + RdV = fMAKESF(fGETBIT(31,RtV),exp,mant); + } +}) + Q6INSN(F2_sffixupn,"Rd32=sffixupn(Rs32,Rt32)",ATTRIBS(), "Fix Up Numerator", { @@ -162,6 +178,22 @@ Q6INSN(F2_sffixupd,"Rd32=sffixupd(Rs32,Rt32)",ATTRIBS(), RdV = RtV; }) +Q6INSN(F2_sfinvsqrta,"Rd32,Pe4=sfinvsqrta(Rs32)",ATTRIBS(), +"Reciprocal Square Root Approximation", +{ + fHIDE(int idx;) + fHIDE(int adjust;) + fHIDE(int mant;) + fHIDE(int exp;) + if (fSF_INVSQRT_COMMON(RsV,RdV,adjust)) { + PeV = adjust; + idx = (RsV >> 17) & 0x7f; + mant = (fSF_INVSQRT_LOOKUP(idx) << 15); + exp = fSF_BIAS() - ((fSF_GETEXP(RsV) - fSF_BIAS()) >> 1) - 1; + RdV = fMAKESF(fGETBIT(31,RsV),exp,mant); + } +}) + Q6INSN(F2_sffixupr,"Rd32=sffixupr(Rs32)",ATTRIBS(), "Fix Up Radicand", { diff --git a/target/hexagon/imported/ldst.idef b/target/hexagon/imported/ldst.idef index 78a2ea441c..359d3b744e 100644 --- a/target/hexagon/imported/ldst.idef +++ b/target/hexagon/imported/ldst.idef @@ -25,7 +25,10 @@ Q6INSN(L2_##TAG##_io, OPER"(Rs32+#s11:"SHFT")", ATTRIB,DESCR,{fIMMEXT( Q6INSN(L4_##TAG##_ur, OPER"(Rt32<<#u2+#U6)", ATTRIB,DESCR,{fMUST_IMMEXT(UiV); fEA_IRs(UiV,RtV,uiV); SEMANTICS;})\ Q6INSN(L4_##TAG##_ap, OPER"(Re32=#U6)", ATTRIB,DESCR,{fMUST_IMMEXT(UiV); fEA_IMM(UiV); SEMANTICS; ReV=UiV; })\ Q6INSN(L2_##TAG##_pr, OPER"(Rx32++Mu2)", ATTRIB,DESCR,{fEA_REG(RxV); fPM_M(RxV,MuV); SEMANTICS;})\ +Q6INSN(L2_##TAG##_pbr, OPER"(Rx32++Mu2:brev)", ATTRIB,DESCR,{fEA_BREVR(RxV); fPM_M(RxV,MuV); SEMANTICS;})\ Q6INSN(L2_##TAG##_pi, OPER"(Rx32++#s4:"SHFT")", ATTRIB,DESCR,{fEA_REG(RxV); fPM_I(RxV,siV); SEMANTICS;})\ +Q6INSN(L2_##TAG##_pci, OPER"(Rx32++#s4:"SHFT":circ(Mu2))",ATTRIB,DESCR,{fEA_REG(RxV); fPM_CIRI(RxV,siV,MuV); SEMANTICS;})\ +Q6INSN(L2_##TAG##_pcr, OPER"(Rx32++I:circ(Mu2))", ATTRIB,DESCR,{fEA_REG(RxV); fPM_CIRR(RxV,fREAD_IREG(MuV)<<SCALE,MuV); SEMANTICS;}) /* The set of 32-bit load instructions */ STD_LD_AMODES(loadrub,"Rd32=memub","Load Unsigned Byte",ATTRIBS(A_LOAD),"0",fLOAD(1,1,u,EA,RdV),0) @@ -35,6 +38,68 @@ STD_LD_AMODES(loadrh, "Rd32=memh", "Load signed Half integer",ATTRIBS(A_LOAD),"1 STD_LD_AMODES(loadri, "Rd32=memw", "Load Word",ATTRIBS(A_LOAD),"2",fLOAD(1,4,u,EA,RdV),2) STD_LD_AMODES(loadrd, "Rdd32=memd","Load Double integer",ATTRIBS(A_LOAD),"3",fLOAD(1,8,u,EA,RddV),3) +/* These instructions do a load an unpack */ +STD_LD_AMODES(loadbzw2, "Rd32=memubh", "Load Bytes and Vector Zero-Extend (unpack)", +ATTRIBS(A_LOAD),"1", +{fHIDE(size2u_t tmpV; int i;) + fLOAD(1,2,u,EA,tmpV); + for (i=0;i<2;i++) { + fSETHALF(i,RdV,fGETUBYTE(i,tmpV)); + } +},1) + +STD_LD_AMODES(loadbzw4, "Rdd32=memubh", "Load Bytes and Vector Zero-Extend (unpack)", +ATTRIBS(A_LOAD),"2", +{fHIDE(size4u_t tmpV; int i;) + fLOAD(1,4,u,EA,tmpV); + for (i=0;i<4;i++) { + fSETHALF(i,RddV,fGETUBYTE(i,tmpV)); + } +},2) + + + +/* These instructions do a load an unpack */ +STD_LD_AMODES(loadbsw2, "Rd32=membh", "Load Bytes and Vector Sign-Extend (unpack)", +ATTRIBS(A_LOAD),"1", +{fHIDE(size2u_t tmpV; int i;) + fLOAD(1,2,u,EA,tmpV); + for (i=0;i<2;i++) { + fSETHALF(i,RdV,fGETBYTE(i,tmpV)); + } +},1) + +STD_LD_AMODES(loadbsw4, "Rdd32=membh", "Load Bytes and Vector Sign-Extend (unpack)", +ATTRIBS(A_LOAD),"2", +{fHIDE(size4u_t tmpV; int i;) + fLOAD(1,4,u,EA,tmpV); + for (i=0;i<4;i++) { + fSETHALF(i,RddV,fGETBYTE(i,tmpV)); + } +},2) + + + +STD_LD_AMODES(loadalignh, "Ryy32=memh_fifo", "Load Half-word into shifted vector", +ATTRIBS(A_LOAD),"1", +{ + fHIDE(size8u_t tmpV;) + fLOAD(1,2,u,EA,tmpV); + RyyV = (((size8u_t)RyyV)>>16)|(tmpV<<48); +},1) + + +STD_LD_AMODES(loadalignb, "Ryy32=memb_fifo", "Load byte into shifted vector", +ATTRIBS(A_LOAD),"0", +{ + fHIDE(size8u_t tmpV;) + fLOAD(1,1,u,EA,tmpV); + RyyV = (((size8u_t)RyyV)>>8)|(tmpV<<56); +},0) + + + + /* The set of addressing modes standard to all Store instructions */ #define STD_ST_AMODES(TAG,DEST,OPER,DESCR,ATTRIB,SHFT,SEMANTICS,SCALE)\ Q6INSN(S2_##TAG##_io, OPER"(Rs32+#s11:"SHFT")="DEST, ATTRIB,DESCR,{fIMMEXT(siV); fEA_RI(RsV,siV); SEMANTICS; })\ @@ -42,6 +107,9 @@ Q6INSN(S2_##TAG##_pi, OPER"(Rx32++#s4:"SHFT")="DEST, ATTRIB,DESCR,{fEA_REG( Q6INSN(S4_##TAG##_ap, OPER"(Re32=#U6)="DEST, ATTRIB,DESCR,{fMUST_IMMEXT(UiV); fEA_IMM(UiV); SEMANTICS; ReV=UiV; })\ Q6INSN(S2_##TAG##_pr, OPER"(Rx32++Mu2)="DEST, ATTRIB,DESCR,{fEA_REG(RxV); fPM_M(RxV,MuV); SEMANTICS; })\ Q6INSN(S4_##TAG##_ur, OPER"(Ru32<<#u2+#U6)="DEST, ATTRIB,DESCR,{fMUST_IMMEXT(UiV); fEA_IRs(UiV,RuV,uiV); SEMANTICS;})\ +Q6INSN(S2_##TAG##_pbr, OPER"(Rx32++Mu2:brev)="DEST, ATTRIB,DESCR,{fEA_BREVR(RxV); fPM_M(RxV,MuV); SEMANTICS; })\ +Q6INSN(S2_##TAG##_pci, OPER"(Rx32++#s4:"SHFT":circ(Mu2))="DEST, ATTRIB,DESCR,{fEA_REG(RxV); fPM_CIRI(RxV,siV,MuV); SEMANTICS;})\ +Q6INSN(S2_##TAG##_pcr, OPER"(Rx32++I:circ(Mu2))="DEST, ATTRIB,DESCR,{fEA_REG(RxV); fPM_CIRR(RxV,fREAD_IREG(MuV)<<SCALE,MuV); SEMANTICS;}) /* The set of 32-bit store instructions */ diff --git a/target/hexagon/imported/macros.def b/target/hexagon/imported/macros.def index 65292c7afa..32ed3bf8fc 100755 --- a/target/hexagon/imported/macros.def +++ b/target/hexagon/imported/macros.def @@ -92,6 +92,21 @@ DEF_MACRO( /* attribs */ ) + +DEF_MACRO( + fINSERT_RANGE, + { + int offset=LOWBIT; + int width=HIBIT-LOWBIT+1; + /* clear bits where new bits go */ + INREG &= ~(((fCONSTLL(1)<<width)-1)<<offset); + /* OR in new bits */ + INREG |= ((INVAL & ((fCONSTLL(1)<<width)-1)) << offset); + }, + /* attribs */ +) + + DEF_MACRO( f8BITSOF, ( (VAL) ? 0xff : 0x00), @@ -277,6 +292,12 @@ DEF_MACRO( /*************************************/ DEF_MACRO( + fREAD_IREG, /* read modifier register */ + (fSXTN(11,64,(((VAL) & 0xf0000000)>>21) | ((VAL>>17)&0x7f) )), /* behavior */ + () +) + +DEF_MACRO( fREAD_LR, /* read link register */ (READ_RREG(REG_LR)), /* behavior */ () @@ -307,6 +328,12 @@ DEF_MACRO( ) DEF_MACRO( + fREAD_CSREG, /* read CS register */ + (READ_RREG(REG_CSA+N)), /* behavior */ + () +) + +DEF_MACRO( fREAD_LC0, /* read loop count */ (READ_RREG(REG_LC0)), /* behavior */ () @@ -807,6 +834,12 @@ DEF_MACRO( ) DEF_MACRO( + fEA_BREVR, /* Calculate EA with bit reversed bottom of REGISTER */ + EA=fbrev(REG), + () +) + +DEF_MACRO( fEA_GPI, /* Calculate EA with Global Poitner + Immediate */ do { EA=fREAD_GP()+IMM; fGP_DOCHKPAGECROSS(fREAD_GP(),EA); } while (0), () @@ -825,6 +858,20 @@ DEF_MACRO( ) DEF_MACRO( + fPM_CIRI, /* Post Modify Register using Circular arithmetic by Immediate */ + do { fcirc_add(REG,siV,MuV); } while (0), + () +) + +DEF_MACRO( + fPM_CIRR, /* Post Modify Register using Circular arithmetic by register */ + do { fcirc_add(REG,VAL,MuV); } while (0), + () +) + + + +DEF_MACRO( fSCALE, /* scale by N */ (((size8s_t)(A))<<N), /* optional attributes */ diff --git a/target/hexagon/imported/shift.idef b/target/hexagon/imported/shift.idef index e328ab7329..b32c4e04d1 100644 --- a/target/hexagon/imported/shift.idef +++ b/target/hexagon/imported/shift.idef @@ -1029,6 +1029,53 @@ Q6INSN(S4_clbpaddi,"Rd32=add(clb(Rss32),#s6)",ATTRIBS(A_ARCHV2), { RdV = (fMAX(fCL1_8(RssV),fCL1_8(~RssV)))+siV;}) + +Q6INSN(S2_cabacdecbin,"Rdd32=decbin(Rss32,Rtt32)",ATTRIBS(A_ARCHV3),"CABAC decode bin", +{ + fHIDE(size4u_t state;) + fHIDE(size4u_t valMPS;) + fHIDE(size4u_t bitpos;) + fHIDE(size4u_t range;) + fHIDE(size4u_t offset;) + fHIDE(size4u_t rLPS;) + fHIDE(size4u_t rMPS;) + + state = fEXTRACTU_RANGE( fGETWORD(1,RttV) ,5,0); + valMPS = fEXTRACTU_RANGE( fGETWORD(1,RttV) ,8,8); + bitpos = fEXTRACTU_RANGE( fGETWORD(0,RttV) ,4,0); + range = fGETWORD(0,RssV); + offset = fGETWORD(1,RssV); + + /* calculate rLPS */ + range <<= bitpos; + offset <<= bitpos; + rLPS = rLPS_table_64x4[state][ (range >>29)&3]; + rLPS = rLPS << 23; /* left aligned */ + + /* calculate rMPS */ + rMPS= (range&0xff800000) - rLPS; + + /* most probable region */ + if (offset < rMPS) { + RddV = AC_next_state_MPS_64[state]; + fINSERT_RANGE(RddV,8,8,valMPS); + fINSERT_RANGE(RddV,31,23,(rMPS>>23)); + fSETWORD(1,RddV,offset); + fWRITE_P0(valMPS); + + + } + /* least probable region */ + else { + RddV = AC_next_state_LPS_64[state]; + fINSERT_RANGE(RddV,8,8,((!state)?(1-valMPS):(valMPS))); + fINSERT_RANGE(RddV,31,23,(rLPS>>23)); + fSETWORD(1,RddV,(offset-rMPS)); + fWRITE_P0((valMPS^1)); + } +}) + + Q6INSN(S2_clb,"Rd32=clb(Rs32)",ATTRIBS(), "Count leading bits", {RdV = fMAX(fCL1_4(RsV),fCL1_4(~RsV));}) diff --git a/target/hexagon/insn.h b/target/hexagon/insn.h index 5756a1d0ca..2e345912a8 100644 --- a/target/hexagon/insn.h +++ b/target/hexagon/insn.h @@ -40,14 +40,15 @@ struct Instruction { uint32_t iclass:6; uint32_t slot:3; - uint32_t part1:1; /* + uint32_t which_extended:1; /* If has an extender, which immediate */ + uint32_t new_value_producer_slot:4; + + bool part1; /* * cmp-jumps are split into two insns. * set for the compare and clear for the jump */ - uint32_t extension_valid:1; /* Has a constant extender attached */ - uint32_t which_extended:1; /* If has an extender, which immediate */ - uint32_t is_endloop:1; /* This is an end of loop */ - uint32_t new_value_producer_slot:4; + bool extension_valid; /* Has a constant extender attached */ + bool is_endloop; /* This is an end of loop */ int32_t immed[IMMEDS_MAX]; /* immediate field */ }; @@ -58,13 +59,13 @@ struct Packet { uint16_t encod_pkt_size_in_bytes; /* Pre-decodes about COF */ - uint32_t pkt_has_cof:1; /* Has any change-of-flow */ - uint32_t pkt_has_endloop:1; + bool pkt_has_cof; /* Has any change-of-flow */ + bool pkt_has_endloop; - uint32_t pkt_has_dczeroa:1; + bool pkt_has_dczeroa; - uint32_t pkt_has_store_s0:1; - uint32_t pkt_has_store_s1:1; + bool pkt_has_store_s0; + bool pkt_has_store_s1; Insn insn[INSTRUCTIONS_MAX]; }; diff --git a/target/hexagon/internal.h b/target/hexagon/internal.h index 2da85c8606..6b20affdfa 100644 --- a/target/hexagon/internal.h +++ b/target/hexagon/internal.h @@ -22,11 +22,12 @@ * Change HEX_DEBUG to 1 to turn on debugging output */ #define HEX_DEBUG 0 -#if HEX_DEBUG -#define HEX_DEBUG_LOG(...) qemu_log(__VA_ARGS__) -#else -#define HEX_DEBUG_LOG(...) do { } while (0) -#endif +#define HEX_DEBUG_LOG(...) \ + do { \ + if (HEX_DEBUG) { \ + qemu_log(__VA_ARGS__); \ + } \ + } while (0) int hexagon_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int hexagon_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); diff --git a/target/hexagon/macros.h b/target/hexagon/macros.h index cfcb8173ba..b726c3b791 100644 --- a/target/hexagon/macros.h +++ b/target/hexagon/macros.h @@ -133,6 +133,38 @@ CHECK_NOSHUF; \ tcg_gen_qemu_ld64(DST, VA, ctx->mem_idx); \ } while (0) + +#define MEM_STORE1_FUNC(X) \ + __builtin_choose_expr(TYPE_INT(X), \ + gen_store1i, \ + __builtin_choose_expr(TYPE_TCGV(X), \ + gen_store1, (void)0)) +#define MEM_STORE1(VA, DATA, SLOT) \ + MEM_STORE1_FUNC(DATA)(cpu_env, VA, DATA, ctx, SLOT) + +#define MEM_STORE2_FUNC(X) \ + __builtin_choose_expr(TYPE_INT(X), \ + gen_store2i, \ + __builtin_choose_expr(TYPE_TCGV(X), \ + gen_store2, (void)0)) +#define MEM_STORE2(VA, DATA, SLOT) \ + MEM_STORE2_FUNC(DATA)(cpu_env, VA, DATA, ctx, SLOT) + +#define MEM_STORE4_FUNC(X) \ + __builtin_choose_expr(TYPE_INT(X), \ + gen_store4i, \ + __builtin_choose_expr(TYPE_TCGV(X), \ + gen_store4, (void)0)) +#define MEM_STORE4(VA, DATA, SLOT) \ + MEM_STORE4_FUNC(DATA)(cpu_env, VA, DATA, ctx, SLOT) + +#define MEM_STORE8_FUNC(X) \ + __builtin_choose_expr(TYPE_INT(X), \ + gen_store8i, \ + __builtin_choose_expr(TYPE_TCGV_I64(X), \ + gen_store8, (void)0)) +#define MEM_STORE8(VA, DATA, SLOT) \ + MEM_STORE8_FUNC(DATA)(cpu_env, VA, DATA, ctx, SLOT) #else #define MEM_LOAD1s(VA) ((int8_t)mem_load1(env, slot, VA)) #define MEM_LOAD1u(VA) ((uint8_t)mem_load1(env, slot, VA)) @@ -190,6 +222,13 @@ static inline void gen_pred_cancel(TCGv pred, int slot_num) (((HIBIT) - (LOWBIT) + 1) ? \ extract64((INREG), (LOWBIT), ((HIBIT) - (LOWBIT) + 1)) : \ 0LL) +#define fINSERT_RANGE(INREG, HIBIT, LOWBIT, INVAL) \ + do { \ + int width = ((HIBIT) - (LOWBIT) + 1); \ + INREG = (width >= 0 ? \ + deposit64((INREG), (LOWBIT), width, (INVAL)) : \ + INREG); \ + } while (0) #define f8BITSOF(VAL) ((VAL) ? 0xff : 0x00) @@ -285,6 +324,39 @@ static inline void gen_logical_not(TCGv dest, TCGv src) #define fPCALIGN(IMM) IMM = (IMM & ~PCALIGN_MASK) +#ifdef QEMU_GENERATE +static inline TCGv gen_read_ireg(TCGv result, TCGv val, int shift) +{ + /* + * Section 2.2.4 of the Hexagon V67 Programmer's Reference Manual + * + * The "I" value from a modifier register is divided into two pieces + * LSB bits 23:17 + * MSB bits 31:28 + * The value is signed + * + * At the end we shift the result according to the shift argument + */ + TCGv msb = tcg_temp_new(); + TCGv lsb = tcg_temp_new(); + + tcg_gen_extract_tl(lsb, val, 17, 7); + tcg_gen_sari_tl(msb, val, 21); + tcg_gen_deposit_tl(result, msb, lsb, 0, 7); + + tcg_gen_shli_tl(result, result, shift); + + tcg_temp_free(msb); + tcg_temp_free(lsb); + + return result; +} +#define fREAD_IREG(VAL, SHIFT) gen_read_ireg(ireg, (VAL), (SHIFT)) +#else +#define fREAD_IREG(VAL) \ + (fSXTN(11, 64, (((VAL) & 0xf0000000) >> 21) | ((VAL >> 17) & 0x7f))) +#endif + #define fREAD_LR() (READ_REG(HEX_REG_LR)) #define fWRITE_LR(A) WRITE_RREG(HEX_REG_LR, A) @@ -341,8 +413,6 @@ static inline void gen_logical_not(TCGv dest, TCGv src) #define fWRITE_LC0(VAL) WRITE_RREG(HEX_REG_LC0, VAL) #define fWRITE_LC1(VAL) WRITE_RREG(HEX_REG_LC1, VAL) -#define fCARRY_FROM_ADD(A, B, C) carry_from_add64(A, B, C) - #define fSET_OVERFLOW() SET_USR_FIELD(USR_OVF, 1) #define fSET_LPCFG(VAL) SET_USR_FIELD(USR_LPCFG, (VAL)) #define fGET_LPCFG (GET_USR_FIELD(USR_LPCFG)) @@ -402,6 +472,21 @@ static inline void gen_logical_not(TCGv dest, TCGv src) #define fCAST8S_16S(A) (int128_exts64(A)) #define fCAST16S_8S(A) (int128_getlo(A)) +#ifdef QEMU_GENERATE +#define fEA_RI(REG, IMM) tcg_gen_addi_tl(EA, REG, IMM) +#define fEA_RRs(REG, REG2, SCALE) \ + do { \ + TCGv tmp = tcg_temp_new(); \ + tcg_gen_shli_tl(tmp, REG2, SCALE); \ + tcg_gen_add_tl(EA, REG, tmp); \ + tcg_temp_free(tmp); \ + } while (0) +#define fEA_IRs(IMM, REG, SCALE) \ + do { \ + tcg_gen_shli_tl(EA, REG, SCALE); \ + tcg_gen_addi_tl(EA, EA, IMM); \ + } while (0) +#else #define fEA_RI(REG, IMM) \ do { \ EA = REG + IMM; \ @@ -414,12 +499,21 @@ static inline void gen_logical_not(TCGv dest, TCGv src) do { \ EA = IMM + (REG << SCALE); \ } while (0) +#endif #ifdef QEMU_GENERATE #define fEA_IMM(IMM) tcg_gen_movi_tl(EA, IMM) #define fEA_REG(REG) tcg_gen_mov_tl(EA, REG) +#define fEA_BREVR(REG) gen_helper_fbrev(EA, REG) #define fPM_I(REG, IMM) tcg_gen_addi_tl(REG, REG, IMM) #define fPM_M(REG, MVAL) tcg_gen_add_tl(REG, REG, MVAL) +#define fPM_CIRI(REG, IMM, MVAL) \ + do { \ + TCGv tcgv_siV = tcg_const_tl(siV); \ + gen_helper_fcircadd(REG, REG, tcgv_siV, MuV, \ + hex_gpr[HEX_REG_CS0 + MuN]); \ + tcg_temp_free(tcgv_siV); \ + } while (0) #else #define fEA_IMM(IMM) do { EA = (IMM); } while (0) #define fEA_REG(REG) do { EA = (REG); } while (0) @@ -496,23 +590,43 @@ static inline void gen_logical_not(TCGv dest, TCGv src) gen_load_locked##SIZE##SIGN(DST, EA, ctx->mem_idx); #endif +#ifdef QEMU_GENERATE +#define fSTORE(NUM, SIZE, EA, SRC) MEM_STORE##SIZE(EA, SRC, insn->slot) +#else #define fSTORE(NUM, SIZE, EA, SRC) MEM_STORE##SIZE(EA, SRC, slot) +#endif #ifdef QEMU_GENERATE #define fSTORE_LOCKED(NUM, SIZE, EA, SRC, PRED) \ gen_store_conditional##SIZE(env, ctx, PdN, PRED, EA, SRC); #endif +#ifdef QEMU_GENERATE +#define GETBYTE_FUNC(X) \ + __builtin_choose_expr(TYPE_TCGV(X), \ + gen_get_byte, \ + __builtin_choose_expr(TYPE_TCGV_I64(X), \ + gen_get_byte_i64, (void)0)) +#define fGETBYTE(N, SRC) GETBYTE_FUNC(SRC)(BYTE, N, SRC, true) +#define fGETUBYTE(N, SRC) GETBYTE_FUNC(SRC)(BYTE, N, SRC, false) +#else #define fGETBYTE(N, SRC) ((int8_t)((SRC >> ((N) * 8)) & 0xff)) #define fGETUBYTE(N, SRC) ((uint8_t)((SRC >> ((N) * 8)) & 0xff)) +#endif #define fSETBYTE(N, DST, VAL) \ do { \ DST = (DST & ~(0x0ffLL << ((N) * 8))) | \ (((uint64_t)((VAL) & 0x0ffLL)) << ((N) * 8)); \ } while (0) + +#ifdef QEMU_GENERATE +#define fGETHALF(N, SRC) gen_get_half(HALF, N, SRC, true) +#define fGETUHALF(N, SRC) gen_get_half(HALF, N, SRC, false) +#else #define fGETHALF(N, SRC) ((int16_t)((SRC >> ((N) * 16)) & 0xffff)) #define fGETUHALF(N, SRC) ((uint16_t)((SRC >> ((N) * 16)) & 0xffff)) +#endif #define fSETHALF(N, DST, VAL) \ do { \ DST = (DST & ~(0x0ffffLL << ((N) * 16))) | \ diff --git a/target/hexagon/meson.build b/target/hexagon/meson.build index bb0b4fb621..6fd9360b74 100644 --- a/target/hexagon/meson.build +++ b/target/hexagon/meson.build @@ -173,7 +173,6 @@ hexagon_ss.add(files( 'printinsn.c', 'arch.c', 'fma_emu.c', - 'conv_emu.c', )) target_arch += {'hexagon': hexagon_ss} diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c index 2c6d718579..63dd685658 100644 --- a/target/hexagon/op_helper.c +++ b/target/hexagon/op_helper.c @@ -25,7 +25,6 @@ #include "arch.h" #include "hex_arch_types.h" #include "fma_emu.h" -#include "conv_emu.h" #define SF_BIAS 127 #define SF_MANTBITS 23 @@ -35,7 +34,7 @@ static void QEMU_NORETURN do_raise_exception_err(CPUHexagonState *env, uint32_t exception, uintptr_t pc) { - CPUState *cs = CPU(hexagon_env_get_cpu(env)); + CPUState *cs = env_cpu(env); qemu_log_mask(CPU_LOG_INT, "%s: %d\n", __func__, exception); cs->exception_index = exception; cpu_loop_exit_restore(cs, pc); @@ -46,8 +45,8 @@ void QEMU_NORETURN HELPER(raise_exception)(CPUHexagonState *env, uint32_t excp) do_raise_exception_err(env, excp, 0); } -static inline void log_reg_write(CPUHexagonState *env, int rnum, - target_ulong val, uint32_t slot) +static void log_reg_write(CPUHexagonState *env, int rnum, + target_ulong val, uint32_t slot) { HEX_DEBUG_LOG("log_reg_write[%d] = " TARGET_FMT_ld " (0x" TARGET_FMT_lx ")", rnum, val, val); @@ -57,14 +56,13 @@ static inline void log_reg_write(CPUHexagonState *env, int rnum, HEX_DEBUG_LOG("\n"); env->new_value[rnum] = val; -#if HEX_DEBUG - /* Do this so HELPER(debug_commit_end) will know */ - env->reg_written[rnum] = 1; -#endif + if (HEX_DEBUG) { + /* Do this so HELPER(debug_commit_end) will know */ + env->reg_written[rnum] = 1; + } } -static inline void log_pred_write(CPUHexagonState *env, int pnum, - target_ulong val) +static void log_pred_write(CPUHexagonState *env, int pnum, target_ulong val) { HEX_DEBUG_LOG("log_pred_write[%d] = " TARGET_FMT_ld " (0x" TARGET_FMT_lx ")\n", @@ -79,8 +77,8 @@ static inline void log_pred_write(CPUHexagonState *env, int pnum, } } -static inline void log_store32(CPUHexagonState *env, target_ulong addr, - target_ulong val, int width, int slot) +static void log_store32(CPUHexagonState *env, target_ulong addr, + target_ulong val, int width, int slot) { HEX_DEBUG_LOG("log_store%d(0x" TARGET_FMT_lx ", %" PRId32 " [0x08%" PRIx32 "])\n", @@ -90,8 +88,8 @@ static inline void log_store32(CPUHexagonState *env, target_ulong addr, env->mem_log_stores[slot].data32 = val; } -static inline void log_store64(CPUHexagonState *env, target_ulong addr, - int64_t val, int width, int slot) +static void log_store64(CPUHexagonState *env, target_ulong addr, + int64_t val, int width, int slot) { HEX_DEBUG_LOG("log_store%d(0x" TARGET_FMT_lx ", %" PRId64 " [0x016%" PRIx64 "])\n", @@ -101,7 +99,7 @@ static inline void log_store64(CPUHexagonState *env, target_ulong addr, env->mem_log_stores[slot].data64 = val; } -static inline void write_new_pc(CPUHexagonState *env, target_ulong addr) +static void write_new_pc(CPUHexagonState *env, target_ulong addr) { HEX_DEBUG_LOG("write_new_pc(0x" TARGET_FMT_lx ")\n", addr); @@ -119,7 +117,6 @@ static inline void write_new_pc(CPUHexagonState *env, target_ulong addr) } } -#if HEX_DEBUG /* Handy place to set a breakpoint */ void HELPER(debug_start_packet)(CPUHexagonState *env) { @@ -130,14 +127,12 @@ void HELPER(debug_start_packet)(CPUHexagonState *env) env->reg_written[i] = 0; } } -#endif -static inline int32_t new_pred_value(CPUHexagonState *env, int pnum) +static int32_t new_pred_value(CPUHexagonState *env, int pnum) { return env->new_pred_value[pnum]; } -#if HEX_DEBUG /* Checks for bookkeeping errors between disassembly context and runtime */ void HELPER(debug_check_store_width)(CPUHexagonState *env, int slot, int check) { @@ -147,7 +142,6 @@ void HELPER(debug_check_store_width)(CPUHexagonState *env, int slot, int check) g_assert_not_reached(); } } -#endif void HELPER(commit_store)(CPUHexagonState *env, int slot_num) { @@ -173,7 +167,6 @@ void HELPER(commit_store)(CPUHexagonState *env, int slot_num) } } -#if HEX_DEBUG static void print_store(CPUHexagonState *env, int slot) { if (!(env->slot_cancelled & (1 << slot))) { @@ -257,35 +250,26 @@ void HELPER(debug_commit_end)(CPUHexagonState *env, int has_st0, int has_st1) env->gpr[HEX_REG_QEMU_INSN_CNT]); } -#endif - -static int32_t fcircadd_v4(int32_t RxV, int32_t offset, int32_t M, int32_t CS) -{ - int32_t length = M & 0x0001ffff; - uint32_t new_ptr = RxV + offset; - uint32_t start_addr = CS; - uint32_t end_addr = start_addr + length; - - if (new_ptr >= end_addr) { - new_ptr -= length; - } else if (new_ptr < start_addr) { - new_ptr += length; - } - - return new_ptr; -} int32_t HELPER(fcircadd)(int32_t RxV, int32_t offset, int32_t M, int32_t CS) { - int32_t K_const = (M >> 24) & 0xf; - int32_t length = M & 0x1ffff; - int32_t mask = (1 << (K_const + 2)) - 1; + int32_t K_const = sextract32(M, 24, 4); + int32_t length = sextract32(M, 0, 17); uint32_t new_ptr = RxV + offset; - uint32_t start_addr = RxV & (~mask); - uint32_t end_addr = start_addr | length; + uint32_t start_addr; + uint32_t end_addr; if (K_const == 0 && length >= 4) { - return fcircadd_v4(RxV, offset, M, CS); + start_addr = CS; + end_addr = start_addr + length; + } else { + /* + * Versions v3 and earlier used the K value to specify a power-of-2 size + * 2^(K+2) that is greater than the buffer length + */ + int32_t mask = (1 << (K_const + 2)) - 1; + start_addr = RxV & (~mask); + end_addr = start_addr | length; } if (new_ptr >= end_addr) { @@ -297,24 +281,103 @@ int32_t HELPER(fcircadd)(int32_t RxV, int32_t offset, int32_t M, int32_t CS) return new_ptr; } +uint32_t HELPER(fbrev)(uint32_t addr) +{ + /* + * Bit reverse the low 16 bits of the address + */ + return deposit32(addr, 0, 16, revbit16(addr)); +} + +static float32 build_float32(uint8_t sign, uint32_t exp, uint32_t mant) +{ + return make_float32( + ((sign & 1) << 31) | + ((exp & 0xff) << SF_MANTBITS) | + (mant & ((1 << SF_MANTBITS) - 1))); +} + /* - * Hexagon FP operations return ~0 insteat of NaN - * The hex_check_sfnan/hex_check_dfnan functions perform this check + * sfrecipa, sfinvsqrta have two 32-bit results + * r0,p0=sfrecipa(r1,r2) + * r0,p0=sfinvsqrta(r1) + * + * Since helpers can only return a single value, we pack the two results + * into a 64-bit value. */ -static float32 hex_check_sfnan(float32 x) +uint64_t HELPER(sfrecipa)(CPUHexagonState *env, float32 RsV, float32 RtV) { - if (float32_is_any_nan(x)) { - return make_float32(0xFFFFFFFFU); + int32_t PeV = 0; + float32 RdV; + int idx; + int adjust; + int mant; + int exp; + + arch_fpop_start(env); + if (arch_sf_recip_common(&RsV, &RtV, &RdV, &adjust, &env->fp_status)) { + PeV = adjust; + idx = (RtV >> 16) & 0x7f; + mant = (recip_lookup_table[idx] << 15) | 1; + exp = SF_BIAS - (float32_getexp(RtV) - SF_BIAS) - 1; + RdV = build_float32(extract32(RtV, 31, 1), exp, mant); + } + arch_fpop_end(env); + return ((uint64_t)RdV << 32) | PeV; +} + +uint64_t HELPER(sfinvsqrta)(CPUHexagonState *env, float32 RsV) +{ + int PeV = 0; + float32 RdV; + int idx; + int adjust; + int mant; + int exp; + + arch_fpop_start(env); + if (arch_sf_invsqrt_common(&RsV, &RdV, &adjust, &env->fp_status)) { + PeV = adjust; + idx = (RsV >> 17) & 0x7f; + mant = (invsqrt_lookup_table[idx] << 15); + exp = SF_BIAS - ((float32_getexp(RsV) - SF_BIAS) >> 1) - 1; + RdV = build_float32(extract32(RsV, 31, 1), exp, mant); + } + arch_fpop_end(env); + return ((uint64_t)RdV << 32) | PeV; +} + +int64_t HELPER(vacsh_val)(CPUHexagonState *env, + int64_t RxxV, int64_t RssV, int64_t RttV) +{ + for (int i = 0; i < 4; i++) { + int xv = sextract64(RxxV, i * 16, 16); + int sv = sextract64(RssV, i * 16, 16); + int tv = sextract64(RttV, i * 16, 16); + int max; + xv = xv + tv; + sv = sv - tv; + max = xv > sv ? xv : sv; + /* Note that fSATH can set the OVF bit in usr */ + RxxV = deposit64(RxxV, i * 16, 16, fSATH(max)); } - return x; + return RxxV; } -static float64 hex_check_dfnan(float64 x) +int32_t HELPER(vacsh_pred)(CPUHexagonState *env, + int64_t RxxV, int64_t RssV, int64_t RttV) { - if (float64_is_any_nan(x)) { - return make_float64(0xFFFFFFFFFFFFFFFFULL); + int32_t PeV = 0; + for (int i = 0; i < 4; i++) { + int xv = sextract64(RxxV, i * 16, 16); + int sv = sextract64(RssV, i * 16, 16); + int tv = sextract64(RttV, i * 16, 16); + xv = xv + tv; + sv = sv - tv; + PeV = deposit32(PeV, i * 2, 1, (xv > sv)); + PeV = deposit32(PeV, i * 2 + 1, 1, (xv > sv)); } - return x; + return PeV; } /* @@ -332,8 +395,8 @@ static void check_noshuf(CPUHexagonState *env, uint32_t slot) } } -static inline uint8_t mem_load1(CPUHexagonState *env, uint32_t slot, - target_ulong vaddr) +static uint8_t mem_load1(CPUHexagonState *env, uint32_t slot, + target_ulong vaddr) { uint8_t retval; check_noshuf(env, slot); @@ -341,8 +404,8 @@ static inline uint8_t mem_load1(CPUHexagonState *env, uint32_t slot, return retval; } -static inline uint16_t mem_load2(CPUHexagonState *env, uint32_t slot, - target_ulong vaddr) +static uint16_t mem_load2(CPUHexagonState *env, uint32_t slot, + target_ulong vaddr) { uint16_t retval; check_noshuf(env, slot); @@ -350,8 +413,8 @@ static inline uint16_t mem_load2(CPUHexagonState *env, uint32_t slot, return retval; } -static inline uint32_t mem_load4(CPUHexagonState *env, uint32_t slot, - target_ulong vaddr) +static uint32_t mem_load4(CPUHexagonState *env, uint32_t slot, + target_ulong vaddr) { uint32_t retval; check_noshuf(env, slot); @@ -359,8 +422,8 @@ static inline uint32_t mem_load4(CPUHexagonState *env, uint32_t slot, return retval; } -static inline uint64_t mem_load8(CPUHexagonState *env, uint32_t slot, - target_ulong vaddr) +static uint64_t mem_load8(CPUHexagonState *env, uint32_t slot, + target_ulong vaddr) { uint64_t retval; check_noshuf(env, slot); @@ -374,7 +437,6 @@ float64 HELPER(conv_sf2df)(CPUHexagonState *env, float32 RsV) float64 out_f64; arch_fpop_start(env); out_f64 = float32_to_float64(RsV, &env->fp_status); - out_f64 = hex_check_dfnan(out_f64); arch_fpop_end(env); return out_f64; } @@ -384,7 +446,6 @@ float32 HELPER(conv_df2sf)(CPUHexagonState *env, float64 RssV) float32 out_f32; arch_fpop_start(env); out_f32 = float64_to_float32(RssV, &env->fp_status); - out_f32 = hex_check_sfnan(out_f32); arch_fpop_end(env); return out_f32; } @@ -394,7 +455,6 @@ float32 HELPER(conv_uw2sf)(CPUHexagonState *env, int32_t RsV) float32 RdV; arch_fpop_start(env); RdV = uint32_to_float32(RsV, &env->fp_status); - RdV = hex_check_sfnan(RdV); arch_fpop_end(env); return RdV; } @@ -404,7 +464,6 @@ float64 HELPER(conv_uw2df)(CPUHexagonState *env, int32_t RsV) float64 RddV; arch_fpop_start(env); RddV = uint32_to_float64(RsV, &env->fp_status); - RddV = hex_check_dfnan(RddV); arch_fpop_end(env); return RddV; } @@ -414,7 +473,6 @@ float32 HELPER(conv_w2sf)(CPUHexagonState *env, int32_t RsV) float32 RdV; arch_fpop_start(env); RdV = int32_to_float32(RsV, &env->fp_status); - RdV = hex_check_sfnan(RdV); arch_fpop_end(env); return RdV; } @@ -424,7 +482,6 @@ float64 HELPER(conv_w2df)(CPUHexagonState *env, int32_t RsV) float64 RddV; arch_fpop_start(env); RddV = int32_to_float64(RsV, &env->fp_status); - RddV = hex_check_dfnan(RddV); arch_fpop_end(env); return RddV; } @@ -434,7 +491,6 @@ float32 HELPER(conv_ud2sf)(CPUHexagonState *env, int64_t RssV) float32 RdV; arch_fpop_start(env); RdV = uint64_to_float32(RssV, &env->fp_status); - RdV = hex_check_sfnan(RdV); arch_fpop_end(env); return RdV; } @@ -444,7 +500,6 @@ float64 HELPER(conv_ud2df)(CPUHexagonState *env, int64_t RssV) float64 RddV; arch_fpop_start(env); RddV = uint64_to_float64(RssV, &env->fp_status); - RddV = hex_check_dfnan(RddV); arch_fpop_end(env); return RddV; } @@ -454,7 +509,6 @@ float32 HELPER(conv_d2sf)(CPUHexagonState *env, int64_t RssV) float32 RdV; arch_fpop_start(env); RdV = int64_to_float32(RssV, &env->fp_status); - RdV = hex_check_sfnan(RdV); arch_fpop_end(env); return RdV; } @@ -464,16 +518,21 @@ float64 HELPER(conv_d2df)(CPUHexagonState *env, int64_t RssV) float64 RddV; arch_fpop_start(env); RddV = int64_to_float64(RssV, &env->fp_status); - RddV = hex_check_dfnan(RddV); arch_fpop_end(env); return RddV; } -int32_t HELPER(conv_sf2uw)(CPUHexagonState *env, float32 RsV) +uint32_t HELPER(conv_sf2uw)(CPUHexagonState *env, float32 RsV) { - int32_t RdV; + uint32_t RdV; arch_fpop_start(env); - RdV = conv_sf_to_4u(RsV, &env->fp_status); + /* Hexagon checks the sign before rounding */ + if (float32_is_neg(RsV) && !float32_is_any_nan(RsV)) { + float_raise(float_flag_invalid, &env->fp_status); + RdV = 0; + } else { + RdV = float32_to_uint32(RsV, &env->fp_status); + } arch_fpop_end(env); return RdV; } @@ -482,16 +541,28 @@ int32_t HELPER(conv_sf2w)(CPUHexagonState *env, float32 RsV) { int32_t RdV; arch_fpop_start(env); - RdV = conv_sf_to_4s(RsV, &env->fp_status); + /* Hexagon returns -1 for NaN */ + if (float32_is_any_nan(RsV)) { + float_raise(float_flag_invalid, &env->fp_status); + RdV = -1; + } else { + RdV = float32_to_int32(RsV, &env->fp_status); + } arch_fpop_end(env); return RdV; } -int64_t HELPER(conv_sf2ud)(CPUHexagonState *env, float32 RsV) +uint64_t HELPER(conv_sf2ud)(CPUHexagonState *env, float32 RsV) { - int64_t RddV; + uint64_t RddV; arch_fpop_start(env); - RddV = conv_sf_to_8u(RsV, &env->fp_status); + /* Hexagon checks the sign before rounding */ + if (float32_is_neg(RsV) && !float32_is_any_nan(RsV)) { + float_raise(float_flag_invalid, &env->fp_status); + RddV = 0; + } else { + RddV = float32_to_uint64(RsV, &env->fp_status); + } arch_fpop_end(env); return RddV; } @@ -500,16 +571,28 @@ int64_t HELPER(conv_sf2d)(CPUHexagonState *env, float32 RsV) { int64_t RddV; arch_fpop_start(env); - RddV = conv_sf_to_8s(RsV, &env->fp_status); + /* Hexagon returns -1 for NaN */ + if (float32_is_any_nan(RsV)) { + float_raise(float_flag_invalid, &env->fp_status); + RddV = -1; + } else { + RddV = float32_to_int64(RsV, &env->fp_status); + } arch_fpop_end(env); return RddV; } -int32_t HELPER(conv_df2uw)(CPUHexagonState *env, float64 RssV) +uint32_t HELPER(conv_df2uw)(CPUHexagonState *env, float64 RssV) { - int32_t RdV; + uint32_t RdV; arch_fpop_start(env); - RdV = conv_df_to_4u(RssV, &env->fp_status); + /* Hexagon checks the sign before rounding */ + if (float64_is_neg(RssV) && !float64_is_any_nan(RssV)) { + float_raise(float_flag_invalid, &env->fp_status); + RdV = 0; + } else { + RdV = float64_to_uint32(RssV, &env->fp_status); + } arch_fpop_end(env); return RdV; } @@ -518,16 +601,28 @@ int32_t HELPER(conv_df2w)(CPUHexagonState *env, float64 RssV) { int32_t RdV; arch_fpop_start(env); - RdV = conv_df_to_4s(RssV, &env->fp_status); + /* Hexagon returns -1 for NaN */ + if (float64_is_any_nan(RssV)) { + float_raise(float_flag_invalid, &env->fp_status); + RdV = -1; + } else { + RdV = float64_to_int32(RssV, &env->fp_status); + } arch_fpop_end(env); return RdV; } -int64_t HELPER(conv_df2ud)(CPUHexagonState *env, float64 RssV) +uint64_t HELPER(conv_df2ud)(CPUHexagonState *env, float64 RssV) { - int64_t RddV; + uint64_t RddV; arch_fpop_start(env); - RddV = conv_df_to_8u(RssV, &env->fp_status); + /* Hexagon checks the sign before rounding */ + if (float64_is_neg(RssV) && !float64_is_any_nan(RssV)) { + float_raise(float_flag_invalid, &env->fp_status); + RddV = 0; + } else { + RddV = float64_to_uint64(RssV, &env->fp_status); + } arch_fpop_end(env); return RddV; } @@ -536,17 +631,28 @@ int64_t HELPER(conv_df2d)(CPUHexagonState *env, float64 RssV) { int64_t RddV; arch_fpop_start(env); - RddV = conv_df_to_8s(RssV, &env->fp_status); + /* Hexagon returns -1 for NaN */ + if (float64_is_any_nan(RssV)) { + float_raise(float_flag_invalid, &env->fp_status); + RddV = -1; + } else { + RddV = float64_to_int64(RssV, &env->fp_status); + } arch_fpop_end(env); return RddV; } -int32_t HELPER(conv_sf2uw_chop)(CPUHexagonState *env, float32 RsV) +uint32_t HELPER(conv_sf2uw_chop)(CPUHexagonState *env, float32 RsV) { - int32_t RdV; + uint32_t RdV; arch_fpop_start(env); - set_float_rounding_mode(float_round_to_zero, &env->fp_status); - RdV = conv_sf_to_4u(RsV, &env->fp_status); + /* Hexagon checks the sign before rounding */ + if (float32_is_neg(RsV) && !float32_is_any_nan(RsV)) { + float_raise(float_flag_invalid, &env->fp_status); + RdV = 0; + } else { + RdV = float32_to_uint32_round_to_zero(RsV, &env->fp_status); + } arch_fpop_end(env); return RdV; } @@ -555,18 +661,28 @@ int32_t HELPER(conv_sf2w_chop)(CPUHexagonState *env, float32 RsV) { int32_t RdV; arch_fpop_start(env); - set_float_rounding_mode(float_round_to_zero, &env->fp_status); - RdV = conv_sf_to_4s(RsV, &env->fp_status); + /* Hexagon returns -1 for NaN */ + if (float32_is_any_nan(RsV)) { + float_raise(float_flag_invalid, &env->fp_status); + RdV = -1; + } else { + RdV = float32_to_int32_round_to_zero(RsV, &env->fp_status); + } arch_fpop_end(env); return RdV; } -int64_t HELPER(conv_sf2ud_chop)(CPUHexagonState *env, float32 RsV) +uint64_t HELPER(conv_sf2ud_chop)(CPUHexagonState *env, float32 RsV) { - int64_t RddV; + uint64_t RddV; arch_fpop_start(env); - set_float_rounding_mode(float_round_to_zero, &env->fp_status); - RddV = conv_sf_to_8u(RsV, &env->fp_status); + /* Hexagon checks the sign before rounding */ + if (float32_is_neg(RsV) && !float32_is_any_nan(RsV)) { + float_raise(float_flag_invalid, &env->fp_status); + RddV = 0; + } else { + RddV = float32_to_uint64_round_to_zero(RsV, &env->fp_status); + } arch_fpop_end(env); return RddV; } @@ -575,18 +691,28 @@ int64_t HELPER(conv_sf2d_chop)(CPUHexagonState *env, float32 RsV) { int64_t RddV; arch_fpop_start(env); - set_float_rounding_mode(float_round_to_zero, &env->fp_status); - RddV = conv_sf_to_8s(RsV, &env->fp_status); + /* Hexagon returns -1 for NaN */ + if (float32_is_any_nan(RsV)) { + float_raise(float_flag_invalid, &env->fp_status); + RddV = -1; + } else { + RddV = float32_to_int64_round_to_zero(RsV, &env->fp_status); + } arch_fpop_end(env); return RddV; } -int32_t HELPER(conv_df2uw_chop)(CPUHexagonState *env, float64 RssV) +uint32_t HELPER(conv_df2uw_chop)(CPUHexagonState *env, float64 RssV) { - int32_t RdV; + uint32_t RdV; arch_fpop_start(env); - set_float_rounding_mode(float_round_to_zero, &env->fp_status); - RdV = conv_df_to_4u(RssV, &env->fp_status); + /* Hexagon checks the sign before rounding */ + if (float64_is_neg(RssV) && !float32_is_any_nan(RssV)) { + float_raise(float_flag_invalid, &env->fp_status); + RdV = 0; + } else { + RdV = float64_to_uint32_round_to_zero(RssV, &env->fp_status); + } arch_fpop_end(env); return RdV; } @@ -595,18 +721,28 @@ int32_t HELPER(conv_df2w_chop)(CPUHexagonState *env, float64 RssV) { int32_t RdV; arch_fpop_start(env); - set_float_rounding_mode(float_round_to_zero, &env->fp_status); - RdV = conv_df_to_4s(RssV, &env->fp_status); + /* Hexagon returns -1 for NaN */ + if (float64_is_any_nan(RssV)) { + float_raise(float_flag_invalid, &env->fp_status); + RdV = -1; + } else { + RdV = float64_to_int32_round_to_zero(RssV, &env->fp_status); + } arch_fpop_end(env); return RdV; } -int64_t HELPER(conv_df2ud_chop)(CPUHexagonState *env, float64 RssV) +uint64_t HELPER(conv_df2ud_chop)(CPUHexagonState *env, float64 RssV) { - int64_t RddV; + uint64_t RddV; arch_fpop_start(env); - set_float_rounding_mode(float_round_to_zero, &env->fp_status); - RddV = conv_df_to_8u(RssV, &env->fp_status); + /* Hexagon checks the sign before rounding */ + if (float64_is_neg(RssV) && !float64_is_any_nan(RssV)) { + float_raise(float_flag_invalid, &env->fp_status); + RddV = 0; + } else { + RddV = float64_to_uint64_round_to_zero(RssV, &env->fp_status); + } arch_fpop_end(env); return RddV; } @@ -615,8 +751,13 @@ int64_t HELPER(conv_df2d_chop)(CPUHexagonState *env, float64 RssV) { int64_t RddV; arch_fpop_start(env); - set_float_rounding_mode(float_round_to_zero, &env->fp_status); - RddV = conv_df_to_8s(RssV, &env->fp_status); + /* Hexagon returns -1 for NaN */ + if (float64_is_any_nan(RssV)) { + float_raise(float_flag_invalid, &env->fp_status); + RddV = -1; + } else { + RddV = float64_to_int64_round_to_zero(RssV, &env->fp_status); + } arch_fpop_end(env); return RddV; } @@ -626,7 +767,6 @@ float32 HELPER(sfadd)(CPUHexagonState *env, float32 RsV, float32 RtV) float32 RdV; arch_fpop_start(env); RdV = float32_add(RsV, RtV, &env->fp_status); - RdV = hex_check_sfnan(RdV); arch_fpop_end(env); return RdV; } @@ -636,7 +776,6 @@ float32 HELPER(sfsub)(CPUHexagonState *env, float32 RsV, float32 RtV) float32 RdV; arch_fpop_start(env); RdV = float32_sub(RsV, RtV, &env->fp_status); - RdV = hex_check_sfnan(RdV); arch_fpop_end(env); return RdV; } @@ -688,7 +827,6 @@ float32 HELPER(sfmax)(CPUHexagonState *env, float32 RsV, float32 RtV) float32 RdV; arch_fpop_start(env); RdV = float32_maxnum(RsV, RtV, &env->fp_status); - RdV = hex_check_sfnan(RdV); arch_fpop_end(env); return RdV; } @@ -698,7 +836,6 @@ float32 HELPER(sfmin)(CPUHexagonState *env, float32 RsV, float32 RtV) float32 RdV; arch_fpop_start(env); RdV = float32_minnum(RsV, RtV, &env->fp_status); - RdV = hex_check_sfnan(RdV); arch_fpop_end(env); return RdV; } @@ -765,7 +902,6 @@ float64 HELPER(dfadd)(CPUHexagonState *env, float64 RssV, float64 RttV) float64 RddV; arch_fpop_start(env); RddV = float64_add(RssV, RttV, &env->fp_status); - RddV = hex_check_dfnan(RddV); arch_fpop_end(env); return RddV; } @@ -775,7 +911,6 @@ float64 HELPER(dfsub)(CPUHexagonState *env, float64 RssV, float64 RttV) float64 RddV; arch_fpop_start(env); RddV = float64_sub(RssV, RttV, &env->fp_status); - RddV = hex_check_dfnan(RddV); arch_fpop_end(env); return RddV; } @@ -788,7 +923,6 @@ float64 HELPER(dfmax)(CPUHexagonState *env, float64 RssV, float64 RttV) if (float64_is_any_nan(RssV) || float64_is_any_nan(RttV)) { float_raise(float_flag_invalid, &env->fp_status); } - RddV = hex_check_dfnan(RddV); arch_fpop_end(env); return RddV; } @@ -801,7 +935,6 @@ float64 HELPER(dfmin)(CPUHexagonState *env, float64 RssV, float64 RttV) if (float64_is_any_nan(RssV) || float64_is_any_nan(RttV)) { float_raise(float_flag_invalid, &env->fp_status); } - RddV = hex_check_dfnan(RddV); arch_fpop_end(env); return RddV; } @@ -877,7 +1010,6 @@ float32 HELPER(sfmpy)(CPUHexagonState *env, float32 RsV, float32 RtV) float32 RdV; arch_fpop_start(env); RdV = internal_mpyf(RsV, RtV, &env->fp_status); - RdV = hex_check_sfnan(RdV); arch_fpop_end(env); return RdV; } @@ -887,7 +1019,6 @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV, { arch_fpop_start(env); RxV = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status); - RxV = hex_check_sfnan(RxV); arch_fpop_end(env); return RxV; } @@ -919,7 +1050,6 @@ float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV, RxV = check_nan(RxV, RsV, &env->fp_status); RxV = check_nan(RxV, RtV, &env->fp_status); tmp = internal_fmafx(RsV, RtV, RxV, fSXTN(8, 64, PuV), &env->fp_status); - tmp = hex_check_sfnan(tmp); if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) { RxV = tmp; } @@ -934,12 +1064,11 @@ float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV, arch_fpop_start(env); neg_RsV = float32_sub(float32_zero, RsV, &env->fp_status); RxV = internal_fmafx(neg_RsV, RtV, RxV, 0, &env->fp_status); - RxV = hex_check_sfnan(RxV); arch_fpop_end(env); return RxV; } -static inline bool is_inf_prod(int32_t a, int32_t b) +static bool is_inf_prod(int32_t a, int32_t b) { return (float32_is_infinity(a) && float32_is_infinity(b)) || (float32_is_infinity(a) && is_finite(b) && !float32_is_zero(b)) || @@ -949,8 +1078,8 @@ static inline bool is_inf_prod(int32_t a, int32_t b) float32 HELPER(sffma_lib)(CPUHexagonState *env, float32 RxV, float32 RsV, float32 RtV) { - int infinp; - int infminusinf; + bool infinp; + bool infminusinf; float32 tmp; arch_fpop_start(env); @@ -965,7 +1094,6 @@ float32 HELPER(sffma_lib)(CPUHexagonState *env, float32 RxV, RxV = check_nan(RxV, RsV, &env->fp_status); RxV = check_nan(RxV, RtV, &env->fp_status); tmp = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status); - tmp = hex_check_sfnan(tmp); if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) { RxV = tmp; } @@ -983,8 +1111,8 @@ float32 HELPER(sffma_lib)(CPUHexagonState *env, float32 RxV, float32 HELPER(sffms_lib)(CPUHexagonState *env, float32 RxV, float32 RsV, float32 RtV) { - int infinp; - int infminusinf; + bool infinp; + bool infminusinf; float32 tmp; arch_fpop_start(env); @@ -1000,7 +1128,6 @@ float32 HELPER(sffms_lib)(CPUHexagonState *env, float32 RxV, RxV = check_nan(RxV, RtV, &env->fp_status); float32 minus_RsV = float32_sub(float32_zero, RsV, &env->fp_status); tmp = internal_fmafx(minus_RsV, RtV, RxV, 0, &env->fp_status); - tmp = hex_check_sfnan(tmp); if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) { RxV = tmp; } @@ -1024,13 +1151,11 @@ float64 HELPER(dfmpyfix)(CPUHexagonState *env, float64 RssV, float64 RttV) float64_is_normal(RttV)) { RddV = float64_mul(RssV, make_float64(0x4330000000000000), &env->fp_status); - RddV = hex_check_dfnan(RddV); } else if (float64_is_denormal(RttV) && (float64_getexp(RssV) >= 512) && float64_is_normal(RssV)) { RddV = float64_mul(RssV, make_float64(0x3cb0000000000000), &env->fp_status); - RddV = hex_check_dfnan(RddV); } else { RddV = RssV; } @@ -1043,7 +1168,6 @@ float64 HELPER(dfmpyhh)(CPUHexagonState *env, float64 RxxV, { arch_fpop_start(env); RxxV = internal_mpyhh(RssV, RttV, RxxV, &env->fp_status); - RxxV = hex_check_dfnan(RxxV); arch_fpop_end(env); return RxxV; } diff --git a/target/hexagon/reg_fields.c b/target/hexagon/reg_fields.c index bdcab79428..6713203725 100644 --- a/target/hexagon/reg_fields.c +++ b/target/hexagon/reg_fields.c @@ -18,10 +18,9 @@ #include "qemu/osdep.h" #include "reg_fields.h" -const RegField reg_field_info[] = { +const RegField reg_field_info[NUM_REG_FIELDS] = { #define DEF_REG_FIELD(TAG, START, WIDTH) \ { START, WIDTH }, #include "reg_fields_def.h.inc" - { 0, 0 } #undef DEF_REG_FIELD }; diff --git a/target/hexagon/reg_fields.h b/target/hexagon/reg_fields.h index d3c86c942f..9e2ad5d997 100644 --- a/target/hexagon/reg_fields.h +++ b/target/hexagon/reg_fields.h @@ -23,8 +23,6 @@ typedef struct { int width; } RegField; -extern const RegField reg_field_info[]; - enum { #define DEF_REG_FIELD(TAG, START, WIDTH) \ TAG, @@ -33,4 +31,6 @@ enum { #undef DEF_REG_FIELD }; +extern const RegField reg_field_info[NUM_REG_FIELDS]; + #endif diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c index eeaad5f8ba..9a37644182 100644 --- a/target/hexagon/translate.c +++ b/target/hexagon/translate.c @@ -35,9 +35,7 @@ TCGv hex_this_PC; TCGv hex_slot_cancelled; TCGv hex_branch_taken; TCGv hex_new_value[TOTAL_PER_THREAD_REGS]; -#if HEX_DEBUG TCGv hex_reg_written[TOTAL_PER_THREAD_REGS]; -#endif TCGv hex_new_pred_value[NUM_PREGS]; TCGv hex_pred_written; TCGv hex_store_addr[STORES_MAX]; @@ -54,19 +52,42 @@ static const char * const hexagon_prednames[] = { "p0", "p1", "p2", "p3" }; -void gen_exception(int excp) +static void gen_exception_raw(int excp) { TCGv_i32 helper_tmp = tcg_const_i32(excp); gen_helper_raise_exception(cpu_env, helper_tmp); tcg_temp_free_i32(helper_tmp); } -void gen_exception_debug(void) +static void gen_exec_counters(DisasContext *ctx) { - gen_exception(EXCP_DEBUG); + tcg_gen_addi_tl(hex_gpr[HEX_REG_QEMU_PKT_CNT], + hex_gpr[HEX_REG_QEMU_PKT_CNT], ctx->num_packets); + tcg_gen_addi_tl(hex_gpr[HEX_REG_QEMU_INSN_CNT], + hex_gpr[HEX_REG_QEMU_INSN_CNT], ctx->num_insns); +} + +static void gen_end_tb(DisasContext *ctx) +{ + gen_exec_counters(ctx); + tcg_gen_mov_tl(hex_gpr[HEX_REG_PC], hex_next_PC); + if (ctx->base.singlestep_enabled) { + gen_exception_raw(EXCP_DEBUG); + } else { + tcg_gen_exit_tb(NULL, 0); + } + ctx->base.is_jmp = DISAS_NORETURN; +} + +static void gen_exception_end_tb(DisasContext *ctx, int excp) +{ + gen_exec_counters(ctx); + tcg_gen_mov_tl(hex_gpr[HEX_REG_PC], hex_next_PC); + gen_exception_raw(excp); + ctx->base.is_jmp = DISAS_NORETURN; + } -#if HEX_DEBUG #define PACKET_BUFFER_LEN 1028 static void print_pkt(Packet *pkt) { @@ -75,10 +96,12 @@ static void print_pkt(Packet *pkt) HEX_DEBUG_LOG("%s", buf->str); g_string_free(buf, true); } -#define HEX_DEBUG_PRINT_PKT(pkt) print_pkt(pkt) -#else -#define HEX_DEBUG_PRINT_PKT(pkt) /* nothing */ -#endif +#define HEX_DEBUG_PRINT_PKT(pkt) \ + do { \ + if (HEX_DEBUG) { \ + print_pkt(pkt); \ + } \ + } while (0) static int read_packet_words(CPUHexagonState *env, DisasContext *ctx, uint32_t words[]) @@ -88,8 +111,8 @@ static int read_packet_words(CPUHexagonState *env, DisasContext *ctx, memset(words, 0, PACKET_WORDS_MAX * sizeof(uint32_t)); for (nwords = 0; !found_end && nwords < PACKET_WORDS_MAX; nwords++) { - words[nwords] = cpu_ldl_code(env, - ctx->base.pc_next + nwords * sizeof(uint32_t)); + words[nwords] = + translator_ldl(env, ctx->base.pc_next + nwords * sizeof(uint32_t)); found_end = is_packet_end(words[nwords]); } if (!found_end) { @@ -148,17 +171,18 @@ static void gen_start_packet(DisasContext *ctx, Packet *pkt) ctx->reg_log_idx = 0; bitmap_zero(ctx->regs_written, TOTAL_PER_THREAD_REGS); ctx->preg_log_idx = 0; + bitmap_zero(ctx->pregs_written, NUM_PREGS); for (i = 0; i < STORES_MAX; i++) { ctx->store_width[i] = 0; } tcg_gen_movi_tl(hex_pkt_has_store_s1, pkt->pkt_has_store_s1); - ctx->s1_store_processed = 0; + ctx->s1_store_processed = false; -#if HEX_DEBUG - /* Handy place to set a breakpoint before the packet executes */ - gen_helper_debug_start_packet(cpu_env); - tcg_gen_movi_tl(hex_this_PC, ctx->base.pc_next); -#endif + if (HEX_DEBUG) { + /* Handy place to set a breakpoint before the packet executes */ + gen_helper_debug_start_packet(cpu_env); + tcg_gen_movi_tl(hex_this_PC, ctx->base.pc_next); + } /* Initialize the runtime state for packet semantics */ if (need_pc(pkt)) { @@ -185,7 +209,7 @@ static void mark_implicit_reg_write(DisasContext *ctx, Insn *insn, int attrib, int rnum) { if (GET_ATTRIB(insn->opcode, attrib)) { - int is_predicated = GET_ATTRIB(insn->opcode, A_CONDEXEC); + bool is_predicated = GET_ATTRIB(insn->opcode, A_CONDEXEC); if (is_predicated && !is_preloaded(ctx, rnum)) { tcg_gen_mov_tl(hex_new_value[rnum], hex_gpr[rnum]); } @@ -202,7 +226,7 @@ static void mark_implicit_pred_write(DisasContext *ctx, Insn *insn, } } -static void mark_implicit_writes(DisasContext *ctx, Insn *insn) +static void mark_implicit_reg_writes(DisasContext *ctx, Insn *insn) { mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_FP, HEX_REG_FP); mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_SP, HEX_REG_SP); @@ -211,7 +235,10 @@ static void mark_implicit_writes(DisasContext *ctx, Insn *insn) mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_SA0, HEX_REG_SA0); mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_LC1, HEX_REG_LC1); mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_SA1, HEX_REG_SA1); +} +static void mark_implicit_pred_writes(DisasContext *ctx, Insn *insn) +{ mark_implicit_pred_write(ctx, insn, A_IMPLICIT_WRITES_P0, 0); mark_implicit_pred_write(ctx, insn, A_IMPLICIT_WRITES_P1, 1); mark_implicit_pred_write(ctx, insn, A_IMPLICIT_WRITES_P2, 2); @@ -222,11 +249,11 @@ static void gen_insn(CPUHexagonState *env, DisasContext *ctx, Insn *insn, Packet *pkt) { if (insn->generate) { - mark_implicit_writes(ctx, insn); + mark_implicit_reg_writes(ctx, insn); insn->generate(env, ctx, insn, pkt); + mark_implicit_pred_writes(ctx, insn); } else { - gen_exception(HEX_EXCP_INVALID_OPCODE); - ctx->base.is_jmp = DISAS_NORETURN; + gen_exception_end_tb(ctx, HEX_EXCP_INVALID_OPCODE); } } @@ -280,10 +307,11 @@ static void gen_pred_writes(DisasContext *ctx, Packet *pkt) for (i = 0; i < ctx->preg_log_idx; i++) { int pred_num = ctx->preg_log[i]; tcg_gen_mov_tl(hex_pred[pred_num], hex_new_pred_value[pred_num]); -#if HEX_DEBUG - /* Do this so HELPER(debug_commit_end) will know */ - tcg_gen_ori_tl(hex_pred_written, hex_pred_written, 1 << pred_num); -#endif + if (HEX_DEBUG) { + /* Do this so HELPER(debug_commit_end) will know */ + tcg_gen_ori_tl(hex_pred_written, hex_pred_written, + 1 << pred_num); + } } } @@ -292,20 +320,16 @@ static void gen_pred_writes(DisasContext *ctx, Packet *pkt) tcg_temp_free(pval); } -#if HEX_DEBUG -static inline void gen_check_store_width(DisasContext *ctx, int slot_num) +static void gen_check_store_width(DisasContext *ctx, int slot_num) { - TCGv slot = tcg_const_tl(slot_num); - TCGv check = tcg_const_tl(ctx->store_width[slot_num]); - gen_helper_debug_check_store_width(cpu_env, slot, check); - tcg_temp_free(slot); - tcg_temp_free(check); + if (HEX_DEBUG) { + TCGv slot = tcg_const_tl(slot_num); + TCGv check = tcg_const_tl(ctx->store_width[slot_num]); + gen_helper_debug_check_store_width(cpu_env, slot, check); + tcg_temp_free(slot); + tcg_temp_free(check); + } } -#define HEX_DEBUG_GEN_CHECK_STORE_WIDTH(ctx, slot_num) \ - gen_check_store_width(ctx, slot_num) -#else -#define HEX_DEBUG_GEN_CHECK_STORE_WIDTH(ctx, slot_num) /* nothing */ -#endif static bool slot_is_predicated(Packet *pkt, int slot_num) { @@ -330,7 +354,7 @@ void process_store(DisasContext *ctx, Packet *pkt, int slot_num) if (slot_num == 1 && ctx->s1_store_processed) { return; } - ctx->s1_store_processed = 1; + ctx->s1_store_processed = true; if (is_predicated) { TCGv cancelled = tcg_temp_new(); @@ -355,25 +379,25 @@ void process_store(DisasContext *ctx, Packet *pkt, int slot_num) */ switch (ctx->store_width[slot_num]) { case 1: - HEX_DEBUG_GEN_CHECK_STORE_WIDTH(ctx, slot_num); + gen_check_store_width(ctx, slot_num); tcg_gen_qemu_st8(hex_store_val32[slot_num], hex_store_addr[slot_num], ctx->mem_idx); break; case 2: - HEX_DEBUG_GEN_CHECK_STORE_WIDTH(ctx, slot_num); + gen_check_store_width(ctx, slot_num); tcg_gen_qemu_st16(hex_store_val32[slot_num], hex_store_addr[slot_num], ctx->mem_idx); break; case 4: - HEX_DEBUG_GEN_CHECK_STORE_WIDTH(ctx, slot_num); + gen_check_store_width(ctx, slot_num); tcg_gen_qemu_st32(hex_store_val32[slot_num], hex_store_addr[slot_num], ctx->mem_idx); break; case 8: - HEX_DEBUG_GEN_CHECK_STORE_WIDTH(ctx, slot_num); + gen_check_store_width(ctx, slot_num); tcg_gen_qemu_st64(hex_store_val64[slot_num], hex_store_addr[slot_num], ctx->mem_idx); @@ -451,14 +475,6 @@ static void update_exec_counters(DisasContext *ctx, Packet *pkt) ctx->num_insns += num_real_insns; } -static void gen_exec_counters(DisasContext *ctx) -{ - tcg_gen_addi_tl(hex_gpr[HEX_REG_QEMU_PKT_CNT], - hex_gpr[HEX_REG_QEMU_PKT_CNT], ctx->num_packets); - tcg_gen_addi_tl(hex_gpr[HEX_REG_QEMU_INSN_CNT], - hex_gpr[HEX_REG_QEMU_INSN_CNT], ctx->num_insns); -} - static void gen_commit_packet(DisasContext *ctx, Packet *pkt) { gen_reg_writes(ctx); @@ -466,8 +482,7 @@ static void gen_commit_packet(DisasContext *ctx, Packet *pkt) process_store_log(ctx, pkt); process_dczeroa(ctx, pkt); update_exec_counters(ctx, pkt); -#if HEX_DEBUG - { + if (HEX_DEBUG) { TCGv has_st0 = tcg_const_tl(pkt->pkt_has_store_s0 && !pkt->pkt_has_dczeroa); TCGv has_st1 = @@ -479,10 +494,9 @@ static void gen_commit_packet(DisasContext *ctx, Packet *pkt) tcg_temp_free(has_st0); tcg_temp_free(has_st1); } -#endif if (pkt->pkt_has_cof) { - ctx->base.is_jmp = DISAS_NORETURN; + gen_end_tb(ctx); } } @@ -495,8 +509,7 @@ static void decode_and_translate_packet(CPUHexagonState *env, DisasContext *ctx) nwords = read_packet_words(env, ctx, words); if (!nwords) { - gen_exception(HEX_EXCP_INVALID_PACKET); - ctx->base.is_jmp = DISAS_NORETURN; + gen_exception_end_tb(ctx, HEX_EXCP_INVALID_PACKET); return; } @@ -509,8 +522,7 @@ static void decode_and_translate_packet(CPUHexagonState *env, DisasContext *ctx) gen_commit_packet(ctx, &pkt); ctx->base.pc_next += pkt.encod_pkt_size_in_bytes; } else { - gen_exception(HEX_EXCP_INVALID_PACKET); - ctx->base.is_jmp = DISAS_NORETURN; + gen_exception_end_tb(ctx, HEX_EXCP_INVALID_PACKET); } } @@ -540,9 +552,7 @@ static bool hexagon_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu, { DisasContext *ctx = container_of(dcbase, DisasContext, base); - tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], ctx->base.pc_next); - ctx->base.is_jmp = DISAS_NORETURN; - gen_exception_debug(); + gen_exception_end_tb(ctx, EXCP_DEBUG); /* * The address covered by the breakpoint must be included in * [tb->pc, tb->pc + tb->size) in order to for it to be @@ -589,14 +599,10 @@ static void hexagon_tr_translate_packet(DisasContextBase *dcbase, CPUState *cpu) * The CPU log is used to compare against LLDB single stepping, * so end the TLB after every packet. */ - HexagonCPU *hex_cpu = container_of(env, HexagonCPU, env); + HexagonCPU *hex_cpu = env_archcpu(env); if (hex_cpu->lldb_compat && qemu_loglevel_mask(CPU_LOG_TB_CPU)) { ctx->base.is_jmp = DISAS_TOO_MANY; } -#if HEX_DEBUG - /* When debugging, only put one packet per TB */ - ctx->base.is_jmp = DISAS_TOO_MANY; -#endif } } @@ -609,19 +615,12 @@ static void hexagon_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) gen_exec_counters(ctx); tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], ctx->base.pc_next); if (ctx->base.singlestep_enabled) { - gen_exception_debug(); + gen_exception_raw(EXCP_DEBUG); } else { tcg_gen_exit_tb(NULL, 0); } break; case DISAS_NORETURN: - gen_exec_counters(ctx); - tcg_gen_mov_tl(hex_gpr[HEX_REG_PC], hex_next_PC); - if (ctx->base.singlestep_enabled) { - gen_exception_debug(); - } else { - tcg_gen_exit_tb(NULL, 0); - } break; default: g_assert_not_reached(); @@ -654,9 +653,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) #define NAME_LEN 64 static char new_value_names[TOTAL_PER_THREAD_REGS][NAME_LEN]; -#if HEX_DEBUG static char reg_written_names[TOTAL_PER_THREAD_REGS][NAME_LEN]; -#endif static char new_pred_value_names[NUM_PREGS][NAME_LEN]; static char store_addr_names[STORES_MAX][NAME_LEN]; static char store_width_names[STORES_MAX][NAME_LEN]; @@ -669,11 +666,11 @@ void hexagon_translate_init(void) opcode_init(); -#if HEX_DEBUG - if (!qemu_logfile) { - qemu_set_log(qemu_loglevel); + if (HEX_DEBUG) { + if (!qemu_logfile) { + qemu_set_log(qemu_loglevel); + } } -#endif for (i = 0; i < TOTAL_PER_THREAD_REGS; i++) { hex_gpr[i] = tcg_global_mem_new(cpu_env, @@ -685,13 +682,13 @@ void hexagon_translate_init(void) offsetof(CPUHexagonState, new_value[i]), new_value_names[i]); -#if HEX_DEBUG - snprintf(reg_written_names[i], NAME_LEN, "reg_written_%s", - hexagon_regnames[i]); - hex_reg_written[i] = tcg_global_mem_new(cpu_env, - offsetof(CPUHexagonState, reg_written[i]), - reg_written_names[i]); -#endif + if (HEX_DEBUG) { + snprintf(reg_written_names[i], NAME_LEN, "reg_written_%s", + hexagon_regnames[i]); + hex_reg_written[i] = tcg_global_mem_new(cpu_env, + offsetof(CPUHexagonState, reg_written[i]), + reg_written_names[i]); + } } for (i = 0; i < NUM_PREGS; i++) { hex_pred[i] = tcg_global_mem_new(cpu_env, diff --git a/target/hexagon/translate.h b/target/hexagon/translate.h index 938f7fbb9f..703fd1345f 100644 --- a/target/hexagon/translate.h +++ b/target/hexagon/translate.h @@ -34,17 +34,16 @@ typedef struct DisasContext { DECLARE_BITMAP(regs_written, TOTAL_PER_THREAD_REGS); int preg_log[PRED_WRITES_MAX]; int preg_log_idx; + DECLARE_BITMAP(pregs_written, NUM_PREGS); uint8_t store_width[STORES_MAX]; - uint8_t s1_store_processed; + bool s1_store_processed; } DisasContext; static inline void ctx_log_reg_write(DisasContext *ctx, int rnum) { -#if HEX_DEBUG if (test_bit(rnum, ctx->regs_written)) { HEX_DEBUG_LOG("WARNING: Multiple writes to r%d\n", rnum); } -#endif ctx->reg_log[ctx->reg_log_idx] = rnum; ctx->reg_log_idx++; set_bit(rnum, ctx->regs_written); @@ -60,6 +59,7 @@ static inline void ctx_log_pred_write(DisasContext *ctx, int pnum) { ctx->preg_log[ctx->preg_log_idx] = pnum; ctx->preg_log_idx++; + set_bit(pnum, ctx->pregs_written); } static inline bool is_preloaded(DisasContext *ctx, int num) @@ -86,8 +86,5 @@ extern TCGv hex_llsc_addr; extern TCGv hex_llsc_val; extern TCGv_i64 hex_llsc_val_i64; -void gen_exception(int excp); -void gen_exception_debug(void); - void process_store(DisasContext *ctx, Packet *pkt, int slot_num); #endif diff --git a/target/i386/cpu-internal.h b/target/i386/cpu-internal.h new file mode 100644 index 0000000000..9baac5c0b4 --- /dev/null +++ b/target/i386/cpu-internal.h @@ -0,0 +1,70 @@ +/* + * i386 CPU internal definitions to be shared between cpu.c and cpu-sysemu.c + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef I386_CPU_INTERNAL_H +#define I386_CPU_INTERNAL_H + +typedef enum FeatureWordType { + CPUID_FEATURE_WORD, + MSR_FEATURE_WORD, +} FeatureWordType; + +typedef struct FeatureWordInfo { + FeatureWordType type; + /* feature flags names are taken from "Intel Processor Identification and + * the CPUID Instruction" and AMD's "CPUID Specification". + * In cases of disagreement between feature naming conventions, + * aliases may be added. + */ + const char *feat_names[64]; + union { + /* If type==CPUID_FEATURE_WORD */ + struct { + uint32_t eax; /* Input EAX for CPUID */ + bool needs_ecx; /* CPUID instruction uses ECX as input */ + uint32_t ecx; /* Input ECX value for CPUID */ + int reg; /* output register (R_* constant) */ + } cpuid; + /* If type==MSR_FEATURE_WORD */ + struct { + uint32_t index; + } msr; + }; + uint64_t tcg_features; /* Feature flags supported by TCG */ + uint64_t unmigratable_flags; /* Feature flags known to be unmigratable */ + uint64_t migratable_flags; /* Feature flags known to be migratable */ + /* Features that shouldn't be auto-enabled by "-cpu host" */ + uint64_t no_autoenable_flags; +} FeatureWordInfo; + +extern FeatureWordInfo feature_word_info[]; + +void x86_cpu_expand_features(X86CPU *cpu, Error **errp); + +#ifndef CONFIG_USER_ONLY +GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs); +void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v, + const char *name, void *opaque, Error **errp); + +void x86_cpu_apic_create(X86CPU *cpu, Error **errp); +void x86_cpu_apic_realize(X86CPU *cpu, Error **errp); +void x86_cpu_machine_reset_cb(void *opaque); +#endif /* !CONFIG_USER_ONLY */ + +#endif /* I386_CPU_INTERNAL_H */ diff --git a/target/i386/cpu-sysemu.c b/target/i386/cpu-sysemu.c new file mode 100644 index 0000000000..6477584313 --- /dev/null +++ b/target/i386/cpu-sysemu.c @@ -0,0 +1,352 @@ +/* + * i386 CPUID, CPU class, definitions, models: sysemu-only code + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "sysemu/xen.h" +#include "sysemu/whpx.h" +#include "kvm/kvm_i386.h" +#include "qapi/error.h" +#include "qapi/qapi-visit-run-state.h" +#include "qapi/qmp/qdict.h" +#include "qom/qom-qobject.h" +#include "qapi/qapi-commands-machine-target.h" +#include "hw/qdev-properties.h" + +#include "exec/address-spaces.h" +#include "hw/i386/apic_internal.h" + +#include "cpu-internal.h" + +/* Return a QDict containing keys for all properties that can be included + * in static expansion of CPU models. All properties set by x86_cpu_load_model() + * must be included in the dictionary. + */ +static QDict *x86_cpu_static_props(void) +{ + FeatureWord w; + int i; + static const char *props[] = { + "min-level", + "min-xlevel", + "family", + "model", + "stepping", + "model-id", + "vendor", + "lmce", + NULL, + }; + static QDict *d; + + if (d) { + return d; + } + + d = qdict_new(); + for (i = 0; props[i]; i++) { + qdict_put_null(d, props[i]); + } + + for (w = 0; w < FEATURE_WORDS; w++) { + FeatureWordInfo *fi = &feature_word_info[w]; + int bit; + for (bit = 0; bit < 64; bit++) { + if (!fi->feat_names[bit]) { + continue; + } + qdict_put_null(d, fi->feat_names[bit]); + } + } + + return d; +} + +/* Add an entry to @props dict, with the value for property. */ +static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop) +{ + QObject *value = object_property_get_qobject(OBJECT(cpu), prop, + &error_abort); + + qdict_put_obj(props, prop, value); +} + +/* Convert CPU model data from X86CPU object to a property dictionary + * that can recreate exactly the same CPU model. + */ +static void x86_cpu_to_dict(X86CPU *cpu, QDict *props) +{ + QDict *sprops = x86_cpu_static_props(); + const QDictEntry *e; + + for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) { + const char *prop = qdict_entry_key(e); + x86_cpu_expand_prop(cpu, props, prop); + } +} + +/* Convert CPU model data from X86CPU object to a property dictionary + * that can recreate exactly the same CPU model, including every + * writeable QOM property. + */ +static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props) +{ + ObjectPropertyIterator iter; + ObjectProperty *prop; + + object_property_iter_init(&iter, OBJECT(cpu)); + while ((prop = object_property_iter_next(&iter))) { + /* skip read-only or write-only properties */ + if (!prop->get || !prop->set) { + continue; + } + + /* "hotplugged" is the only property that is configurable + * on the command-line but will be set differently on CPUs + * created using "-cpu ... -smp ..." and by CPUs created + * on the fly by x86_cpu_from_model() for querying. Skip it. + */ + if (!strcmp(prop->name, "hotplugged")) { + continue; + } + x86_cpu_expand_prop(cpu, props, prop->name); + } +} + +static void object_apply_props(Object *obj, QDict *props, Error **errp) +{ + const QDictEntry *prop; + + for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) { + if (!object_property_set_qobject(obj, qdict_entry_key(prop), + qdict_entry_value(prop), errp)) { + break; + } + } +} + +/* Create X86CPU object according to model+props specification */ +static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp) +{ + X86CPU *xc = NULL; + X86CPUClass *xcc; + Error *err = NULL; + + xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model)); + if (xcc == NULL) { + error_setg(&err, "CPU model '%s' not found", model); + goto out; + } + + xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc))); + if (props) { + object_apply_props(OBJECT(xc), props, &err); + if (err) { + goto out; + } + } + + x86_cpu_expand_features(xc, &err); + if (err) { + goto out; + } + +out: + if (err) { + error_propagate(errp, err); + object_unref(OBJECT(xc)); + xc = NULL; + } + return xc; +} + +CpuModelExpansionInfo * +qmp_query_cpu_model_expansion(CpuModelExpansionType type, + CpuModelInfo *model, + Error **errp) +{ + X86CPU *xc = NULL; + Error *err = NULL; + CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1); + QDict *props = NULL; + const char *base_name; + + xc = x86_cpu_from_model(model->name, + model->has_props ? + qobject_to(QDict, model->props) : + NULL, &err); + if (err) { + goto out; + } + + props = qdict_new(); + ret->model = g_new0(CpuModelInfo, 1); + ret->model->props = QOBJECT(props); + ret->model->has_props = true; + + switch (type) { + case CPU_MODEL_EXPANSION_TYPE_STATIC: + /* Static expansion will be based on "base" only */ + base_name = "base"; + x86_cpu_to_dict(xc, props); + break; + case CPU_MODEL_EXPANSION_TYPE_FULL: + /* As we don't return every single property, full expansion needs + * to keep the original model name+props, and add extra + * properties on top of that. + */ + base_name = model->name; + x86_cpu_to_dict_full(xc, props); + break; + default: + error_setg(&err, "Unsupported expansion type"); + goto out; + } + + x86_cpu_to_dict(xc, props); + + ret->model->name = g_strdup(base_name); + +out: + object_unref(OBJECT(xc)); + if (err) { + error_propagate(errp, err); + qapi_free_CpuModelExpansionInfo(ret); + ret = NULL; + } + return ret; +} + +void cpu_clear_apic_feature(CPUX86State *env) +{ + env->features[FEAT_1_EDX] &= ~CPUID_APIC; +} + +bool cpu_is_bsp(X86CPU *cpu) +{ + return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP; +} + +/* TODO: remove me, when reset over QOM tree is implemented */ +void x86_cpu_machine_reset_cb(void *opaque) +{ + X86CPU *cpu = opaque; + cpu_reset(CPU(cpu)); +} + +APICCommonClass *apic_get_class(void) +{ + const char *apic_type = "apic"; + + /* TODO: in-kernel irqchip for hvf */ + if (kvm_apic_in_kernel()) { + apic_type = "kvm-apic"; + } else if (xen_enabled()) { + apic_type = "xen-apic"; + } else if (whpx_apic_in_platform()) { + apic_type = "whpx-apic"; + } + + return APIC_COMMON_CLASS(object_class_by_name(apic_type)); +} + +void x86_cpu_apic_create(X86CPU *cpu, Error **errp) +{ + APICCommonState *apic; + ObjectClass *apic_class = OBJECT_CLASS(apic_get_class()); + + cpu->apic_state = DEVICE(object_new_with_class(apic_class)); + + object_property_add_child(OBJECT(cpu), "lapic", + OBJECT(cpu->apic_state)); + object_unref(OBJECT(cpu->apic_state)); + + qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id); + /* TODO: convert to link<> */ + apic = APIC_COMMON(cpu->apic_state); + apic->cpu = cpu; + apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE; +} + +void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) +{ + APICCommonState *apic; + static bool apic_mmio_map_once; + + if (cpu->apic_state == NULL) { + return; + } + qdev_realize(DEVICE(cpu->apic_state), NULL, errp); + + /* Map APIC MMIO area */ + apic = APIC_COMMON(cpu->apic_state); + if (!apic_mmio_map_once) { + memory_region_add_subregion_overlap(get_system_memory(), + apic->apicbase & + MSR_IA32_APICBASE_BASE, + &apic->io_memory, + 0x1000); + apic_mmio_map_once = true; + } +} + +GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + GuestPanicInformation *panic_info = NULL; + + if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) { + panic_info = g_malloc0(sizeof(GuestPanicInformation)); + + panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V; + + assert(HV_CRASH_PARAMS >= 5); + panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0]; + panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1]; + panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2]; + panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3]; + panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4]; + } + + return panic_info; +} +void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + CPUState *cs = CPU(obj); + GuestPanicInformation *panic_info; + + if (!cs->crash_occurred) { + error_setg(errp, "No crash occured"); + return; + } + + panic_info = x86_cpu_get_crash_info(cs); + if (panic_info == NULL) { + error_setg(errp, "No crash information"); + return; + } + + visit_type_GuestPanicInformation(v, "crash-information", &panic_info, + errp); + qapi_free_GuestPanicInformation(panic_info); +} + diff --git a/target/i386/cpu.c b/target/i386/cpu.c index ad99cad0e7..c496bfa1c2 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -1,5 +1,5 @@ /* - * i386 CPUID helper functions + * i386 CPUID, CPU class, definitions, models * * Copyright (c) 2003 Fabrice Bellard * @@ -20,49 +20,26 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "qemu/cutils.h" -#include "qemu/bitops.h" #include "qemu/qemu-print.h" - #include "cpu.h" -#include "tcg/tcg-cpu.h" #include "tcg/helper-tcg.h" -#include "exec/exec-all.h" -#include "sysemu/kvm.h" #include "sysemu/reset.h" #include "sysemu/hvf.h" -#include "sysemu/cpus.h" -#include "sysemu/xen.h" -#include "sysemu/whpx.h" #include "kvm/kvm_i386.h" #include "sev_i386.h" - -#include "qemu/error-report.h" -#include "qemu/module.h" -#include "qemu/option.h" -#include "qemu/config-file.h" -#include "qapi/error.h" #include "qapi/qapi-visit-machine.h" -#include "qapi/qapi-visit-run-state.h" -#include "qapi/qmp/qdict.h" #include "qapi/qmp/qerror.h" -#include "qapi/visitor.h" -#include "qom/qom-qobject.h" -#include "sysemu/arch_init.h" #include "qapi/qapi-commands-machine-target.h" - #include "standard-headers/asm-x86/kvm_para.h" - -#include "sysemu/sysemu.h" -#include "sysemu/tcg.h" #include "hw/qdev-properties.h" #include "hw/i386/topology.h" #ifndef CONFIG_USER_ONLY #include "exec/address-spaces.h" -#include "hw/i386/apic_internal.h" #include "hw/boards.h" #endif #include "disas/capstone.h" +#include "cpu-internal.h" /* Helpers for building CPUID[2] descriptors: */ @@ -595,8 +572,8 @@ static CPUCacheInfo legacy_l3_cache = { #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */ #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */ -static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, - uint32_t vendor2, uint32_t vendor3) +void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, + uint32_t vendor2, uint32_t vendor3) { int i; for (i = 0; i < 4; i++) { @@ -677,40 +654,7 @@ static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */ #define TCG_14_0_ECX_FEATURES 0 -typedef enum FeatureWordType { - CPUID_FEATURE_WORD, - MSR_FEATURE_WORD, -} FeatureWordType; - -typedef struct FeatureWordInfo { - FeatureWordType type; - /* feature flags names are taken from "Intel Processor Identification and - * the CPUID Instruction" and AMD's "CPUID Specification". - * In cases of disagreement between feature naming conventions, - * aliases may be added. - */ - const char *feat_names[64]; - union { - /* If type==CPUID_FEATURE_WORD */ - struct { - uint32_t eax; /* Input EAX for CPUID */ - bool needs_ecx; /* CPUID instruction uses ECX as input */ - uint32_t ecx; /* Input ECX value for CPUID */ - int reg; /* output register (R_* constant) */ - } cpuid; - /* If type==MSR_FEATURE_WORD */ - struct { - uint32_t index; - } msr; - }; - uint64_t tcg_features; /* Feature flags supported by TCG */ - uint64_t unmigratable_flags; /* Feature flags known to be unmigratable */ - uint64_t migratable_flags; /* Feature flags known to be migratable */ - /* Features that shouldn't be auto-enabled by "-cpu host" */ - uint64_t no_autoenable_flags; -} FeatureWordInfo; - -static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { +FeatureWordInfo feature_word_info[FEATURE_WORDS] = { [FEAT_1_EDX] = { .type = CPUID_FEATURE_WORD, .feat_names = { @@ -1589,25 +1533,6 @@ void host_cpuid(uint32_t function, uint32_t count, *edx = vec[3]; } -void host_vendor_fms(char *vendor, int *family, int *model, int *stepping) -{ - uint32_t eax, ebx, ecx, edx; - - host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); - x86_cpu_vendor_words2str(vendor, ebx, edx, ecx); - - host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); - if (family) { - *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); - } - if (model) { - *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); - } - if (stepping) { - *stepping = eax & 0x0F; - } -} - /* CPU class name definitions: */ /* Return type name for a given CPU model name @@ -1632,10 +1557,6 @@ static char *x86_cpu_class_get_model_name(X86CPUClass *cc) strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX)); } -typedef struct PropValue { - const char *prop, *value; -} PropValue; - typedef struct X86CPUVersionDefinition { X86CPUVersion version; const char *alias; @@ -4249,32 +4170,6 @@ static X86CPUDefinition builtin_x86_defs[] = { }, }; -/* KVM-specific features that are automatically added/removed - * from all CPU models when KVM is enabled. - */ -static PropValue kvm_default_props[] = { - { "kvmclock", "on" }, - { "kvm-nopiodelay", "on" }, - { "kvm-asyncpf", "on" }, - { "kvm-steal-time", "on" }, - { "kvm-pv-eoi", "on" }, - { "kvmclock-stable-bit", "on" }, - { "x2apic", "on" }, - { "kvm-msi-ext-dest-id", "off" }, - { "acpi", "off" }, - { "monitor", "off" }, - { "svm", "off" }, - { NULL, NULL }, -}; - -/* TCG-specific defaults that override all CPU models when using TCG - */ -static PropValue tcg_default_props[] = { - { "vme", "off" }, - { NULL, NULL }, -}; - - /* * We resolve CPU model aliases using -v1 when using "-machine * none", but this is just for compatibility while libvirt isn't @@ -4316,61 +4211,6 @@ static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model) return v; } -void x86_cpu_change_kvm_default(const char *prop, const char *value) -{ - PropValue *pv; - for (pv = kvm_default_props; pv->prop; pv++) { - if (!strcmp(pv->prop, prop)) { - pv->value = value; - break; - } - } - - /* It is valid to call this function only for properties that - * are already present in the kvm_default_props table. - */ - assert(pv->prop); -} - -static bool lmce_supported(void) -{ - uint64_t mce_cap = 0; - -#ifdef CONFIG_KVM - if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) { - return false; - } -#endif - - return !!(mce_cap & MCG_LMCE_P); -} - -#define CPUID_MODEL_ID_SZ 48 - -/** - * cpu_x86_fill_model_id: - * Get CPUID model ID string from host CPU. - * - * @str should have at least CPUID_MODEL_ID_SZ bytes - * - * The function does NOT add a null terminator to the string - * automatically. - */ -static int cpu_x86_fill_model_id(char *str) -{ - uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; - int i; - - for (i = 0; i < 3; i++) { - host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx); - memcpy(str + i * 16 + 0, &eax, 4); - memcpy(str + i * 16 + 4, &ebx, 4); - memcpy(str + i * 16 + 8, &ecx, 4); - memcpy(str + i * 16 + 12, &edx, 4); - } - return 0; -} - static Property max_x86_cpu_properties[] = { DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true), DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false), @@ -4393,62 +4233,25 @@ static void max_x86_cpu_class_init(ObjectClass *oc, void *data) static void max_x86_cpu_initfn(Object *obj) { X86CPU *cpu = X86_CPU(obj); - CPUX86State *env = &cpu->env; - KVMState *s = kvm_state; /* We can't fill the features array here because we don't know yet if * "migratable" is true or false. */ cpu->max_features = true; - - if (accel_uses_host_cpuid()) { - char vendor[CPUID_VENDOR_SZ + 1] = { 0 }; - char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 }; - int family, model, stepping; - - host_vendor_fms(vendor, &family, &model, &stepping); - cpu_x86_fill_model_id(model_id); - - object_property_set_str(OBJECT(cpu), "vendor", vendor, &error_abort); - object_property_set_int(OBJECT(cpu), "family", family, &error_abort); - object_property_set_int(OBJECT(cpu), "model", model, &error_abort); - object_property_set_int(OBJECT(cpu), "stepping", stepping, - &error_abort); - object_property_set_str(OBJECT(cpu), "model-id", model_id, - &error_abort); - - if (kvm_enabled()) { - env->cpuid_min_level = - kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX); - env->cpuid_min_xlevel = - kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX); - env->cpuid_min_xlevel2 = - kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX); - } else { - env->cpuid_min_level = - hvf_get_supported_cpuid(0x0, 0, R_EAX); - env->cpuid_min_xlevel = - hvf_get_supported_cpuid(0x80000000, 0, R_EAX); - env->cpuid_min_xlevel2 = - hvf_get_supported_cpuid(0xC0000000, 0, R_EAX); - } - - if (lmce_supported()) { - object_property_set_bool(OBJECT(cpu), "lmce", true, &error_abort); - } - object_property_set_bool(OBJECT(cpu), "host-phys-bits", true, &error_abort); - } else { - object_property_set_str(OBJECT(cpu), "vendor", CPUID_VENDOR_AMD, - &error_abort); - object_property_set_int(OBJECT(cpu), "family", 6, &error_abort); - object_property_set_int(OBJECT(cpu), "model", 6, &error_abort); - object_property_set_int(OBJECT(cpu), "stepping", 3, &error_abort); - object_property_set_str(OBJECT(cpu), "model-id", - "QEMU TCG CPU version " QEMU_HW_VERSION, - &error_abort); - } - object_property_set_bool(OBJECT(cpu), "pmu", true, &error_abort); + + /* + * these defaults are used for TCG and all other accelerators + * besides KVM and HVF, which overwrite these values + */ + object_property_set_str(OBJECT(cpu), "vendor", CPUID_VENDOR_AMD, + &error_abort); + object_property_set_int(OBJECT(cpu), "family", 6, &error_abort); + object_property_set_int(OBJECT(cpu), "model", 6, &error_abort); + object_property_set_int(OBJECT(cpu), "stepping", 3, &error_abort); + object_property_set_str(OBJECT(cpu), "model-id", + "QEMU TCG CPU version " QEMU_HW_VERSION, + &error_abort); } static const TypeInfo max_x86_cpu_type_info = { @@ -4458,31 +4261,6 @@ static const TypeInfo max_x86_cpu_type_info = { .class_init = max_x86_cpu_class_init, }; -#if defined(CONFIG_KVM) || defined(CONFIG_HVF) -static void host_x86_cpu_class_init(ObjectClass *oc, void *data) -{ - X86CPUClass *xcc = X86_CPU_CLASS(oc); - - xcc->host_cpuid_required = true; - xcc->ordering = 8; - -#if defined(CONFIG_KVM) - xcc->model_description = - "KVM processor with all supported host features "; -#elif defined(CONFIG_HVF) - xcc->model_description = - "HVF processor with all supported host features "; -#endif -} - -static const TypeInfo host_x86_cpu_type_info = { - .name = X86_CPU_TYPE_NAME("host"), - .parent = X86_CPU_TYPE_NAME("max"), - .class_init = host_x86_cpu_class_init, -}; - -#endif - static char *feature_word_description(FeatureWordInfo *f, uint32_t bit) { assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD); @@ -4930,7 +4708,6 @@ static void x86_cpu_parse_featurestr(const char *typename, char *features, } } -static void x86_cpu_expand_features(X86CPU *cpu, Error **errp); static void x86_cpu_filter_features(X86CPU *cpu, bool verbose); /* Build a list with the name of all features on a feature word array */ @@ -5201,7 +4978,7 @@ static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, return r; } -static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props) +void x86_cpu_apply_props(X86CPU *cpu, PropValue *props) { PropValue *pv; for (pv = props; pv->prop; pv++) { @@ -5248,8 +5025,6 @@ static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model) { X86CPUDefinition *def = model->cpudef; CPUX86State *env = &cpu->env; - const char *vendor; - char host_vendor[CPUID_VENDOR_SZ + 1]; FeatureWord w; /*NOTE: any property set by this function should be returned by @@ -5276,20 +5051,6 @@ static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model) /* legacy-cache defaults to 'off' if CPU model provides cache info */ cpu->legacy_cache = !def->cache_info; - /* Special cases not set in the X86CPUDefinition structs: */ - /* TODO: in-kernel irqchip for hvf */ - if (kvm_enabled()) { - if (!kvm_irqchip_in_kernel()) { - x86_cpu_change_kvm_default("x2apic", "off"); - } else if (kvm_irqchip_is_split() && kvm_enable_x2apic()) { - x86_cpu_change_kvm_default("kvm-msi-ext-dest-id", "on"); - } - - x86_cpu_apply_props(cpu, kvm_default_props); - } else if (tcg_enabled()) { - x86_cpu_apply_props(cpu, tcg_default_props); - } - env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; /* sysenter isn't supported in compatibility mode on AMD, @@ -5299,15 +5060,12 @@ static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model) * KVM's sysenter/syscall emulation in compatibility mode and * when doing cross vendor migration */ - vendor = def->vendor; - if (accel_uses_host_cpuid()) { - uint32_t ebx = 0, ecx = 0, edx = 0; - host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); - x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx); - vendor = host_vendor; - } - object_property_set_str(OBJECT(cpu), "vendor", vendor, &error_abort); + /* + * vendor property is set here but then overloaded with the + * host cpu vendor for KVM and HVF. + */ + object_property_set_str(OBJECT(cpu), "vendor", def->vendor, &error_abort); x86_cpu_apply_version_props(cpu, model); @@ -5319,207 +5077,6 @@ static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model) memset(&env->user_features, 0, sizeof(env->user_features)); } -#ifndef CONFIG_USER_ONLY -/* Return a QDict containing keys for all properties that can be included - * in static expansion of CPU models. All properties set by x86_cpu_load_model() - * must be included in the dictionary. - */ -static QDict *x86_cpu_static_props(void) -{ - FeatureWord w; - int i; - static const char *props[] = { - "min-level", - "min-xlevel", - "family", - "model", - "stepping", - "model-id", - "vendor", - "lmce", - NULL, - }; - static QDict *d; - - if (d) { - return d; - } - - d = qdict_new(); - for (i = 0; props[i]; i++) { - qdict_put_null(d, props[i]); - } - - for (w = 0; w < FEATURE_WORDS; w++) { - FeatureWordInfo *fi = &feature_word_info[w]; - int bit; - for (bit = 0; bit < 64; bit++) { - if (!fi->feat_names[bit]) { - continue; - } - qdict_put_null(d, fi->feat_names[bit]); - } - } - - return d; -} - -/* Add an entry to @props dict, with the value for property. */ -static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop) -{ - QObject *value = object_property_get_qobject(OBJECT(cpu), prop, - &error_abort); - - qdict_put_obj(props, prop, value); -} - -/* Convert CPU model data from X86CPU object to a property dictionary - * that can recreate exactly the same CPU model. - */ -static void x86_cpu_to_dict(X86CPU *cpu, QDict *props) -{ - QDict *sprops = x86_cpu_static_props(); - const QDictEntry *e; - - for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) { - const char *prop = qdict_entry_key(e); - x86_cpu_expand_prop(cpu, props, prop); - } -} - -/* Convert CPU model data from X86CPU object to a property dictionary - * that can recreate exactly the same CPU model, including every - * writeable QOM property. - */ -static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props) -{ - ObjectPropertyIterator iter; - ObjectProperty *prop; - - object_property_iter_init(&iter, OBJECT(cpu)); - while ((prop = object_property_iter_next(&iter))) { - /* skip read-only or write-only properties */ - if (!prop->get || !prop->set) { - continue; - } - - /* "hotplugged" is the only property that is configurable - * on the command-line but will be set differently on CPUs - * created using "-cpu ... -smp ..." and by CPUs created - * on the fly by x86_cpu_from_model() for querying. Skip it. - */ - if (!strcmp(prop->name, "hotplugged")) { - continue; - } - x86_cpu_expand_prop(cpu, props, prop->name); - } -} - -static void object_apply_props(Object *obj, QDict *props, Error **errp) -{ - const QDictEntry *prop; - - for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) { - if (!object_property_set_qobject(obj, qdict_entry_key(prop), - qdict_entry_value(prop), errp)) { - break; - } - } -} - -/* Create X86CPU object according to model+props specification */ -static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp) -{ - X86CPU *xc = NULL; - X86CPUClass *xcc; - Error *err = NULL; - - xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model)); - if (xcc == NULL) { - error_setg(&err, "CPU model '%s' not found", model); - goto out; - } - - xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc))); - if (props) { - object_apply_props(OBJECT(xc), props, &err); - if (err) { - goto out; - } - } - - x86_cpu_expand_features(xc, &err); - if (err) { - goto out; - } - -out: - if (err) { - error_propagate(errp, err); - object_unref(OBJECT(xc)); - xc = NULL; - } - return xc; -} - -CpuModelExpansionInfo * -qmp_query_cpu_model_expansion(CpuModelExpansionType type, - CpuModelInfo *model, - Error **errp) -{ - X86CPU *xc = NULL; - Error *err = NULL; - CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1); - QDict *props = NULL; - const char *base_name; - - xc = x86_cpu_from_model(model->name, - model->has_props ? - qobject_to(QDict, model->props) : - NULL, &err); - if (err) { - goto out; - } - - props = qdict_new(); - ret->model = g_new0(CpuModelInfo, 1); - ret->model->props = QOBJECT(props); - ret->model->has_props = true; - - switch (type) { - case CPU_MODEL_EXPANSION_TYPE_STATIC: - /* Static expansion will be based on "base" only */ - base_name = "base"; - x86_cpu_to_dict(xc, props); - break; - case CPU_MODEL_EXPANSION_TYPE_FULL: - /* As we don't return every single property, full expansion needs - * to keep the original model name+props, and add extra - * properties on top of that. - */ - base_name = model->name; - x86_cpu_to_dict_full(xc, props); - break; - default: - error_setg(&err, "Unsupported expansion type"); - goto out; - } - - x86_cpu_to_dict(xc, props); - - ret->model->name = g_strdup(base_name); - -out: - object_unref(OBJECT(xc)); - if (err) { - error_propagate(errp, err); - qapi_free_CpuModelExpansionInfo(ret); - ret = NULL; - } - return ret; -} -#endif /* !CONFIG_USER_ONLY */ - static gchar *x86_gdb_arch_name(CPUState *cs) { #ifdef TARGET_X86_64 @@ -5594,15 +5151,6 @@ static void x86_register_cpudef_types(X86CPUDefinition *def) } -#if !defined(CONFIG_USER_ONLY) - -void cpu_clear_apic_feature(CPUX86State *env) -{ - env->features[FEAT_1_EDX] &= ~CPUID_APIC; -} - -#endif /* !CONFIG_USER_ONLY */ - void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) @@ -6251,20 +5799,6 @@ static void x86_cpu_reset(DeviceState *dev) #endif } -#ifndef CONFIG_USER_ONLY -bool cpu_is_bsp(X86CPU *cpu) -{ - return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP; -} - -/* TODO: remove me, when reset over QOM tree is implemented */ -static void x86_cpu_machine_reset_cb(void *opaque) -{ - X86CPU *cpu = opaque; - cpu_reset(CPU(cpu)); -} -#endif - static void mce_init(X86CPU *cpu) { CPUX86State *cenv = &cpu->env; @@ -6282,109 +5816,6 @@ static void mce_init(X86CPU *cpu) } } -#ifndef CONFIG_USER_ONLY -APICCommonClass *apic_get_class(void) -{ - const char *apic_type = "apic"; - - /* TODO: in-kernel irqchip for hvf */ - if (kvm_apic_in_kernel()) { - apic_type = "kvm-apic"; - } else if (xen_enabled()) { - apic_type = "xen-apic"; - } else if (whpx_apic_in_platform()) { - apic_type = "whpx-apic"; - } - - return APIC_COMMON_CLASS(object_class_by_name(apic_type)); -} - -static void x86_cpu_apic_create(X86CPU *cpu, Error **errp) -{ - APICCommonState *apic; - ObjectClass *apic_class = OBJECT_CLASS(apic_get_class()); - - cpu->apic_state = DEVICE(object_new_with_class(apic_class)); - - object_property_add_child(OBJECT(cpu), "lapic", - OBJECT(cpu->apic_state)); - object_unref(OBJECT(cpu->apic_state)); - - qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id); - /* TODO: convert to link<> */ - apic = APIC_COMMON(cpu->apic_state); - apic->cpu = cpu; - apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE; -} - -static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) -{ - APICCommonState *apic; - static bool apic_mmio_map_once; - - if (cpu->apic_state == NULL) { - return; - } - qdev_realize(DEVICE(cpu->apic_state), NULL, errp); - - /* Map APIC MMIO area */ - apic = APIC_COMMON(cpu->apic_state); - if (!apic_mmio_map_once) { - memory_region_add_subregion_overlap(get_system_memory(), - apic->apicbase & - MSR_IA32_APICBASE_BASE, - &apic->io_memory, - 0x1000); - apic_mmio_map_once = true; - } -} - -static void x86_cpu_machine_done(Notifier *n, void *unused) -{ - X86CPU *cpu = container_of(n, X86CPU, machine_done); - MemoryRegion *smram = - (MemoryRegion *) object_resolve_path("/machine/smram", NULL); - - if (smram) { - cpu->smram = g_new(MemoryRegion, 1); - memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram", - smram, 0, 4 * GiB); - memory_region_set_enabled(cpu->smram, true); - memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1); - } -} -#else -static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) -{ -} -#endif - -/* Note: Only safe for use on x86(-64) hosts */ -static uint32_t x86_host_phys_bits(void) -{ - uint32_t eax; - uint32_t host_phys_bits; - - host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL); - if (eax >= 0x80000008) { - host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL); - /* Note: According to AMD doc 25481 rev 2.34 they have a field - * at 23:16 that can specify a maximum physical address bits for - * the guest that can override this value; but I've not seen - * anything with that set. - */ - host_phys_bits = eax & 0xff; - } else { - /* It's an odd 64 bit machine that doesn't have the leaf for - * physical address bits; fall back to 36 that's most older - * Intel. - */ - host_phys_bits = 36; - } - - return host_phys_bits; -} - static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value) { if (*min < value) { @@ -6488,7 +5919,7 @@ static void x86_cpu_enable_xsave_components(X86CPU *cpu) /* Expand CPU configuration data, based on configured features * and host/accelerator capabilities when appropriate. */ -static void x86_cpu_expand_features(X86CPU *cpu, Error **errp) +void x86_cpu_expand_features(X86CPU *cpu, Error **errp) { CPUX86State *env = &cpu->env; FeatureWord w; @@ -6702,27 +6133,19 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) Error *local_err = NULL; static bool ht_warned; - if (xcc->host_cpuid_required) { - if (!accel_uses_host_cpuid()) { - g_autofree char *name = x86_cpu_class_get_model_name(xcc); - error_setg(&local_err, "CPU model '%s' requires KVM", name); - goto out; - } + /* Process Hyper-V enlightenments */ + x86_cpu_hyperv_realize(cpu); + + cpu_exec_realizefn(cs, &local_err); + if (local_err != NULL) { + error_propagate(errp, local_err); + return; } - if (cpu->max_features && accel_uses_host_cpuid()) { - if (enable_cpu_pm) { - host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx, - &cpu->mwait.ecx, &cpu->mwait.edx); - env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR; - if (kvm_enabled() && kvm_has_waitpkg()) { - env->features[FEAT_7_0_ECX] |= CPUID_7_0_ECX_WAITPKG; - } - } - if (kvm_enabled() && cpu->ucode_rev == 0) { - cpu->ucode_rev = kvm_arch_get_supported_msr_feature(kvm_state, - MSR_IA32_UCODE_REV); - } + if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { + g_autofree char *name = x86_cpu_class_get_model_name(xcc); + error_setg(&local_err, "CPU model '%s' requires KVM or HVF", name); + goto out; } if (cpu->ucode_rev == 0) { @@ -6774,30 +6197,6 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) * consumer AMD devices but nothing else. */ if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { - if (accel_uses_host_cpuid()) { - uint32_t host_phys_bits = x86_host_phys_bits(); - static bool warned; - - /* Print a warning if the user set it to a value that's not the - * host value. - */ - if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 && - !warned) { - warn_report("Host physical bits (%u)" - " does not match phys-bits property (%u)", - host_phys_bits, cpu->phys_bits); - warned = true; - } - - if (cpu->host_phys_bits) { - /* The user asked for us to use the host physical bits */ - cpu->phys_bits = host_phys_bits; - if (cpu->host_phys_bits_limit && - cpu->phys_bits > cpu->host_phys_bits_limit) { - cpu->phys_bits = cpu->host_phys_bits_limit; - } - } - } if (cpu->phys_bits && (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS || cpu->phys_bits < 32)) { @@ -6806,9 +6205,10 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits); return; } - /* 0 means it was not explicitly set by the user (or by machine - * compat_props or by the host code above). In this case, the default - * is the value used by TCG (40). + /* + * 0 means it was not explicitly set by the user (or by machine + * compat_props or by the host code in host-cpu.c). + * In this case, the default is the value used by TCG (40). */ if (cpu->phys_bits == 0) { cpu->phys_bits = TCG_PHYS_ADDR_BITS; @@ -6857,15 +6257,6 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) env->cache_info_amd.l3_cache = &legacy_l3_cache; } - /* Process Hyper-V enlightenments */ - x86_cpu_hyperv_realize(cpu); - - cpu_exec_realizefn(cs, &local_err); - if (local_err != NULL) { - error_propagate(errp, local_err); - return; - } - #ifndef CONFIG_USER_ONLY MachineState *ms = MACHINE(qdev_get_machine()); qemu_register_reset(x86_cpu_machine_reset_cb, cpu); @@ -6880,33 +6271,6 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) mce_init(cpu); -#ifndef CONFIG_USER_ONLY - if (tcg_enabled()) { - cpu->cpu_as_mem = g_new(MemoryRegion, 1); - cpu->cpu_as_root = g_new(MemoryRegion, 1); - - /* Outer container... */ - memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull); - memory_region_set_enabled(cpu->cpu_as_root, true); - - /* ... with two regions inside: normal system memory with low - * priority, and... - */ - memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory", - get_system_memory(), 0, ~0ull); - memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0); - memory_region_set_enabled(cpu->cpu_as_mem, true); - - cs->num_ases = 2; - cpu_address_space_init(cs, 0, "cpu-memory", cs->memory); - cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root); - - /* ... SMRAM with higher priority, linked from /machine/smram. */ - cpu->machine_done.notify = x86_cpu_machine_done; - qemu_add_machine_init_done_notifier(&cpu->machine_done); - } -#endif - qemu_init_vcpu(cs); /* @@ -6929,10 +6293,12 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) ht_warned = true; } +#ifndef CONFIG_USER_ONLY x86_cpu_apic_realize(cpu, &local_err); if (local_err != NULL) { goto out; } +#endif /* !CONFIG_USER_ONLY */ cpu_reset(cs); xcc->parent_realize(dev, &local_err); @@ -7056,52 +6422,6 @@ static void x86_cpu_register_feature_bit_props(X86CPUClass *xcc, x86_cpu_register_bit_prop(xcc, name, w, bitnr); } -#if !defined(CONFIG_USER_ONLY) -static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs) -{ - X86CPU *cpu = X86_CPU(cs); - CPUX86State *env = &cpu->env; - GuestPanicInformation *panic_info = NULL; - - if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) { - panic_info = g_malloc0(sizeof(GuestPanicInformation)); - - panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V; - - assert(HV_CRASH_PARAMS >= 5); - panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0]; - panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1]; - panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2]; - panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3]; - panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4]; - } - - return panic_info; -} -static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v, - const char *name, void *opaque, - Error **errp) -{ - CPUState *cs = CPU(obj); - GuestPanicInformation *panic_info; - - if (!cs->crash_occurred) { - error_setg(errp, "No crash occurred"); - return; - } - - panic_info = x86_cpu_get_crash_info(cs); - if (panic_info == NULL) { - error_setg(errp, "No crash information"); - return; - } - - visit_type_GuestPanicInformation(v, "crash-information", &panic_info, - errp); - qapi_free_GuestPanicInformation(panic_info); -} -#endif /* !CONFIG_USER_ONLY */ - static void x86_cpu_initfn(Object *obj) { X86CPU *cpu = X86_CPU(obj); @@ -7153,6 +6473,9 @@ static void x86_cpu_initfn(Object *obj) if (xcc->model) { x86_cpu_load_model(cpu, xcc->model); } + + /* if required, do accelerator-specific cpu initializations */ + accel_cpu_instance_init(CPU(obj)); } static int64_t x86_cpu_get_arch_id(CPUState *cs) @@ -7410,11 +6733,6 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data) cc->class_by_name = x86_cpu_class_by_name; cc->parse_features = x86_cpu_parse_featurestr; cc->has_work = x86_cpu_has_work; - -#ifdef CONFIG_TCG - tcg_cpu_common_class_init(cc); -#endif /* CONFIG_TCG */ - cc->dump_state = x86_cpu_dump_state; cc->set_pc = x86_cpu_set_pc; cc->gdb_read_register = x86_cpu_gdb_read_register; @@ -7525,9 +6843,6 @@ static void x86_cpu_register_types(void) } type_register_static(&max_x86_cpu_type_info); type_register_static(&x86_base_cpu_type_info); -#if defined(CONFIG_KVM) || defined(CONFIG_HVF) - type_register_static(&host_x86_cpu_type_info); -#endif } type_init(x86_cpu_register_types) diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 570f916878..e6836393f7 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -303,6 +303,19 @@ typedef enum X86Seg { #define PG_ERROR_I_D_MASK 0x10 #define PG_ERROR_PK_MASK 0x20 +#define PG_MODE_PAE (1 << 0) +#define PG_MODE_LMA (1 << 1) +#define PG_MODE_NXE (1 << 2) +#define PG_MODE_PSE (1 << 3) +#define PG_MODE_LA57 (1 << 4) +#define PG_MODE_SVM_MASK MAKE_64BIT_MASK(0, 15) + +/* Bits of CR4 that do not affect the NPT page format. */ +#define PG_MODE_WP (1 << 16) +#define PG_MODE_PKE (1 << 17) +#define PG_MODE_PKS (1 << 18) +#define PG_MODE_SMEP (1 << 19) + #define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */ #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ #define MCG_LMCE_P (1ULL<<27) /* Local Machine Check Supported */ @@ -1786,7 +1799,7 @@ struct X86CPU { #ifndef CONFIG_USER_ONLY -extern VMStateDescription vmstate_x86_cpu; +extern const VMStateDescription vmstate_x86_cpu; #endif int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request); @@ -1817,7 +1830,10 @@ int cpu_x86_support_mca_broadcast(CPUX86State *env); int cpu_get_pic_interrupt(CPUX86State *s); /* MSDOS compatibility mode FPU exception support */ void x86_register_ferr_irq(qemu_irq irq); +void fpu_check_raise_ferr_irq(CPUX86State *s); void cpu_set_ignne(void); +void cpu_clear_ignne(void); + /* mpx_helper.c */ void cpu_sync_bndcs_hflags(CPUX86State *env); @@ -1926,13 +1942,20 @@ int cpu_x86_signal_handler(int host_signum, void *pinfo, void *puc); /* cpu.c */ +void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, + uint32_t vendor2, uint32_t vendor3); +typedef struct PropValue { + const char *prop, *value; +} PropValue; +void x86_cpu_apply_props(X86CPU *cpu, PropValue *props); + +/* cpu.c other functions (cpuid) */ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx); void cpu_clear_apic_feature(CPUX86State *env); void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx); -void host_vendor_fms(char *vendor, int *family, int *model, int *stepping); /* helper.c */ void x86_cpu_set_a20(X86CPU *cpu, int a20_state); @@ -1948,6 +1971,11 @@ static inline AddressSpace *cpu_addressspace(CPUState *cs, MemTxAttrs attrs) return cpu_get_address_space(cs, cpu_asidx_from_attrs(cs, attrs)); } +/* + * load efer and update the corresponding hflags. XXX: do consistency + * checks with cpuid bits? + */ +void cpu_load_efer(CPUX86State *env, uint64_t val); uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr); uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr); uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr); @@ -2044,21 +2072,6 @@ static inline uint32_t cpu_compute_eflags(CPUX86State *env) return eflags; } - -/* load efer and update the corresponding hflags. XXX: do consistency - checks with cpuid bits? */ -static inline void cpu_load_efer(CPUX86State *env, uint64_t val) -{ - env->efer = val; - env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK); - if (env->efer & MSR_EFER_LMA) { - env->hflags |= HF_LMA_MASK; - } - if (env->efer & MSR_EFER_SVME) { - env->hflags |= HF_SVME_MASK; - } -} - static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env) { return ((MemTxAttrs) { .secure = (env->hflags & HF_SMM_MASK) != 0 }); @@ -2105,6 +2118,9 @@ static inline bool cpu_vmx_maybe_enabled(CPUX86State *env) ((env->cr[4] & CR4_VMXE_MASK) || (env->hflags & HF_SMM_MASK)); } +/* excp_helper.c */ +int get_pg_mode(CPUX86State *env); + /* fpu_helper.c */ void update_fp_status(CPUX86State *env); void update_mxcsr_status(CPUX86State *env); @@ -2130,24 +2146,21 @@ static inline void cpu_set_fpuc(CPUX86State *env, uint16_t fpuc) void helper_lock_init(void); /* svm_helper.c */ +#ifdef CONFIG_USER_ONLY +static inline void +cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type, + uint64_t param, uintptr_t retaddr) +{ /* no-op */ } +#else void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type, uint64_t param, uintptr_t retaddr); +#endif + /* apic.c */ void cpu_report_tpr_access(CPUX86State *env, TPRAccess access); void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip, TPRAccess access); - -/* Change the value of a KVM-specific default - * - * If value is NULL, no default will be set and the original - * value from the CPU model table will be kept. - * - * It is valid to call this function only for properties that - * are already present in the kvm_default_props table. - */ -void x86_cpu_change_kvm_default(const char *prop, const char *value); - /* Special values for X86CPUVersion: */ /* Resolve to latest CPU version */ diff --git a/target/i386/gdbstub.c b/target/i386/gdbstub.c index 41e265fc67..098a2ad15a 100644 --- a/target/i386/gdbstub.c +++ b/target/i386/gdbstub.c @@ -78,6 +78,23 @@ static const int gpr_map32[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; #define GDB_FORCE_64 0 #endif +static int gdb_read_reg_cs64(uint32_t hflags, GByteArray *buf, target_ulong val) +{ + if ((hflags & HF_CS64_MASK) || GDB_FORCE_64) { + return gdb_get_reg64(buf, val); + } + return gdb_get_reg32(buf, val); +} + +static int gdb_write_reg_cs64(uint32_t hflags, uint8_t *buf, target_ulong *val) +{ + if (hflags & HF_CS64_MASK) { + *val = ldq_p(buf); + return 8; + } + *val = ldl_p(buf); + return 4; +} int x86_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { @@ -142,25 +159,14 @@ int x86_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) return gdb_get_reg32(mem_buf, env->segs[R_FS].selector); case IDX_SEG_REGS + 5: return gdb_get_reg32(mem_buf, env->segs[R_GS].selector); - case IDX_SEG_REGS + 6: - if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) { - return gdb_get_reg64(mem_buf, env->segs[R_FS].base); - } - return gdb_get_reg32(mem_buf, env->segs[R_FS].base); - + return gdb_read_reg_cs64(env->hflags, mem_buf, env->segs[R_FS].base); case IDX_SEG_REGS + 7: - if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) { - return gdb_get_reg64(mem_buf, env->segs[R_GS].base); - } - return gdb_get_reg32(mem_buf, env->segs[R_GS].base); + return gdb_read_reg_cs64(env->hflags, mem_buf, env->segs[R_GS].base); case IDX_SEG_REGS + 8: #ifdef TARGET_X86_64 - if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) { - return gdb_get_reg64(mem_buf, env->kernelgsbase); - } - return gdb_get_reg32(mem_buf, env->kernelgsbase); + return gdb_read_reg_cs64(env->hflags, mem_buf, env->kernelgsbase); #else return gdb_get_reg32(mem_buf, 0); #endif @@ -188,45 +194,23 @@ int x86_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) return gdb_get_reg32(mem_buf, env->mxcsr); case IDX_CTL_CR0_REG: - if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) { - return gdb_get_reg64(mem_buf, env->cr[0]); - } - return gdb_get_reg32(mem_buf, env->cr[0]); - + return gdb_read_reg_cs64(env->hflags, mem_buf, env->cr[0]); case IDX_CTL_CR2_REG: - if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) { - return gdb_get_reg64(mem_buf, env->cr[2]); - } - return gdb_get_reg32(mem_buf, env->cr[2]); - + return gdb_read_reg_cs64(env->hflags, mem_buf, env->cr[2]); case IDX_CTL_CR3_REG: - if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) { - return gdb_get_reg64(mem_buf, env->cr[3]); - } - return gdb_get_reg32(mem_buf, env->cr[3]); - + return gdb_read_reg_cs64(env->hflags, mem_buf, env->cr[3]); case IDX_CTL_CR4_REG: - if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) { - return gdb_get_reg64(mem_buf, env->cr[4]); - } - return gdb_get_reg32(mem_buf, env->cr[4]); - + return gdb_read_reg_cs64(env->hflags, mem_buf, env->cr[4]); case IDX_CTL_CR8_REG: -#ifdef CONFIG_SOFTMMU +#ifndef CONFIG_USER_ONLY tpr = cpu_get_apic_tpr(cpu->apic_state); #else tpr = 0; #endif - if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) { - return gdb_get_reg64(mem_buf, tpr); - } - return gdb_get_reg32(mem_buf, tpr); + return gdb_read_reg_cs64(env->hflags, mem_buf, tpr); case IDX_CTL_EFER_REG: - if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) { - return gdb_get_reg64(mem_buf, env->efer); - } - return gdb_get_reg32(mem_buf, env->efer); + return gdb_read_reg_cs64(env->hflags, mem_buf, env->efer); } } return 0; @@ -266,7 +250,8 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; - uint32_t tmp; + target_ulong tmp; + int len; /* N.B. GDB can't deal with changes in registers or sizes in the middle of a session. So if we're in 32-bit mode on a 64-bit cpu, still act @@ -329,30 +314,13 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) return x86_cpu_gdb_load_seg(cpu, R_FS, mem_buf); case IDX_SEG_REGS + 5: return x86_cpu_gdb_load_seg(cpu, R_GS, mem_buf); - case IDX_SEG_REGS + 6: - if (env->hflags & HF_CS64_MASK) { - env->segs[R_FS].base = ldq_p(mem_buf); - return 8; - } - env->segs[R_FS].base = ldl_p(mem_buf); - return 4; - + return gdb_write_reg_cs64(env->hflags, mem_buf, &env->segs[R_FS].base); case IDX_SEG_REGS + 7: - if (env->hflags & HF_CS64_MASK) { - env->segs[R_GS].base = ldq_p(mem_buf); - return 8; - } - env->segs[R_GS].base = ldl_p(mem_buf); - return 4; - + return gdb_write_reg_cs64(env->hflags, mem_buf, &env->segs[R_GS].base); case IDX_SEG_REGS + 8: #ifdef TARGET_X86_64 - if (env->hflags & HF_CS64_MASK) { - env->kernelgsbase = ldq_p(mem_buf); - return 8; - } - env->kernelgsbase = ldl_p(mem_buf); + return gdb_write_reg_cs64(env->hflags, mem_buf, &env->kernelgsbase); #endif return 4; @@ -382,57 +350,46 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) return 4; case IDX_CTL_CR0_REG: - if (env->hflags & HF_CS64_MASK) { - cpu_x86_update_cr0(env, ldq_p(mem_buf)); - return 8; - } - cpu_x86_update_cr0(env, ldl_p(mem_buf)); - return 4; + len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp); +#ifndef CONFIG_USER_ONLY + cpu_x86_update_cr0(env, tmp); +#endif + return len; case IDX_CTL_CR2_REG: - if (env->hflags & HF_CS64_MASK) { - env->cr[2] = ldq_p(mem_buf); - return 8; - } - env->cr[2] = ldl_p(mem_buf); - return 4; + len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp); +#ifndef CONFIG_USER_ONLY + env->cr[2] = tmp; +#endif + return len; case IDX_CTL_CR3_REG: - if (env->hflags & HF_CS64_MASK) { - cpu_x86_update_cr3(env, ldq_p(mem_buf)); - return 8; - } - cpu_x86_update_cr3(env, ldl_p(mem_buf)); - return 4; + len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp); +#ifndef CONFIG_USER_ONLY + cpu_x86_update_cr3(env, tmp); +#endif + return len; case IDX_CTL_CR4_REG: - if (env->hflags & HF_CS64_MASK) { - cpu_x86_update_cr4(env, ldq_p(mem_buf)); - return 8; - } - cpu_x86_update_cr4(env, ldl_p(mem_buf)); - return 4; + len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp); +#ifndef CONFIG_USER_ONLY + cpu_x86_update_cr4(env, tmp); +#endif + return len; case IDX_CTL_CR8_REG: - if (env->hflags & HF_CS64_MASK) { -#ifdef CONFIG_SOFTMMU - cpu_set_apic_tpr(cpu->apic_state, ldq_p(mem_buf)); + len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp); +#ifndef CONFIG_USER_ONLY + cpu_set_apic_tpr(cpu->apic_state, tmp); #endif - return 8; - } -#ifdef CONFIG_SOFTMMU - cpu_set_apic_tpr(cpu->apic_state, ldl_p(mem_buf)); -#endif - return 4; + return len; case IDX_CTL_EFER_REG: - if (env->hflags & HF_CS64_MASK) { - cpu_load_efer(env, ldq_p(mem_buf)); - return 8; - } - cpu_load_efer(env, ldl_p(mem_buf)); - return 4; - + len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp); +#ifndef CONFIG_USER_ONLY + cpu_load_efer(env, tmp); +#endif + return len; } } /* Unrecognised register. */ diff --git a/target/i386/hax/hax-mem.c b/target/i386/hax/hax-mem.c index 35495f5e82..8d44edbffd 100644 --- a/target/i386/hax/hax-mem.c +++ b/target/i386/hax/hax-mem.c @@ -293,7 +293,8 @@ static MemoryListener hax_memory_listener = { .priority = 10, }; -static void hax_ram_block_added(RAMBlockNotifier *n, void *host, size_t size) +static void hax_ram_block_added(RAMBlockNotifier *n, void *host, size_t size, + size_t max_size) { /* * We must register each RAM block with the HAXM kernel module, or @@ -304,7 +305,7 @@ static void hax_ram_block_added(RAMBlockNotifier *n, void *host, size_t size) * host physical pages for the RAM block as part of this registration * process, hence the name hax_populate_ram(). */ - if (hax_populate_ram((uint64_t)(uintptr_t)host, size) < 0) { + if (hax_populate_ram((uint64_t)(uintptr_t)host, max_size) < 0) { fprintf(stderr, "HAX failed to populate RAM\n"); abort(); } diff --git a/target/i386/helper.c b/target/i386/helper.c index 618ad1c409..533b29cb91 100644 --- a/target/i386/helper.c +++ b/target/i386/helper.c @@ -495,7 +495,7 @@ void cpu_report_tpr_access(CPUX86State *env, TPRAccess access) X86CPU *cpu = env_archcpu(env); CPUState *cs = env_cpu(env); - if (kvm_enabled() || whpx_enabled()) { + if (kvm_enabled() || whpx_enabled() || nvmm_enabled()) { env->tpr_access_type = access; cpu_interrupt(cs, CPU_INTERRUPT_TPR); @@ -574,6 +574,19 @@ void do_cpu_sipi(X86CPU *cpu) #endif #ifndef CONFIG_USER_ONLY + +void cpu_load_efer(CPUX86State *env, uint64_t val) +{ + env->efer = val; + env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK); + if (env->efer & MSR_EFER_LMA) { + env->hflags |= HF_LMA_MASK; + } + if (env->efer & MSR_EFER_SVME) { + env->hflags |= HF_SVME_MASK; + } +} + uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr) { X86CPU *cpu = X86_CPU(cs); diff --git a/target/i386/helper.h b/target/i386/helper.h index c2ae2f7e61..f3d8c3f949 100644 --- a/target/i386/helper.h +++ b/target/i386/helper.h @@ -42,13 +42,13 @@ DEF_HELPER_5(lcall_protected, void, env, int, tl, int, tl) DEF_HELPER_2(iret_real, void, env, int) DEF_HELPER_3(iret_protected, void, env, int, int) DEF_HELPER_3(lret_protected, void, env, int, int) -DEF_HELPER_2(read_crN, tl, env, int) -DEF_HELPER_3(write_crN, void, env, int, tl) -DEF_HELPER_2(lmsw, void, env, tl) DEF_HELPER_1(clts, void, env) + +#ifndef CONFIG_USER_ONLY DEF_HELPER_FLAGS_3(set_dr, TCG_CALL_NO_WG, void, env, int, tl) +#endif /* !CONFIG_USER_ONLY */ + DEF_HELPER_FLAGS_2(get_dr, TCG_CALL_NO_WG, tl, env, int) -DEF_HELPER_2(invlpg, void, env, tl) DEF_HELPER_1(sysenter, void, env) DEF_HELPER_2(sysexit, void, env, int) @@ -56,21 +56,22 @@ DEF_HELPER_2(sysexit, void, env, int) DEF_HELPER_2(syscall, void, env, int) DEF_HELPER_2(sysret, void, env, int) #endif -DEF_HELPER_2(hlt, void, env, int) -DEF_HELPER_2(monitor, void, env, tl) -DEF_HELPER_2(mwait, void, env, int) -DEF_HELPER_2(pause, void, env, int) -DEF_HELPER_1(debug, void, env) +DEF_HELPER_FLAGS_2(pause, TCG_CALL_NO_WG, noreturn, env, int) +DEF_HELPER_FLAGS_1(debug, TCG_CALL_NO_WG, noreturn, env) DEF_HELPER_1(reset_rf, void, env) -DEF_HELPER_3(raise_interrupt, void, env, int, int) -DEF_HELPER_2(raise_exception, void, env, int) +DEF_HELPER_FLAGS_3(raise_interrupt, TCG_CALL_NO_WG, noreturn, env, int, int) +DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_WG, noreturn, env, int) DEF_HELPER_1(cli, void, env) DEF_HELPER_1(sti, void, env) DEF_HELPER_1(clac, void, env) DEF_HELPER_1(stac, void, env) DEF_HELPER_3(boundw, void, env, tl, int) DEF_HELPER_3(boundl, void, env, tl, int) + +#ifndef CONFIG_USER_ONLY DEF_HELPER_1(rsm, void, env) +#endif /* !CONFIG_USER_ONLY */ + DEF_HELPER_2(into, void, env, int) DEF_HELPER_2(cmpxchg8b_unlocked, void, env, tl) DEF_HELPER_2(cmpxchg8b, void, env, tl) @@ -78,27 +79,23 @@ DEF_HELPER_2(cmpxchg8b, void, env, tl) DEF_HELPER_2(cmpxchg16b_unlocked, void, env, tl) DEF_HELPER_2(cmpxchg16b, void, env, tl) #endif -DEF_HELPER_1(single_step, void, env) +DEF_HELPER_FLAGS_1(single_step, TCG_CALL_NO_WG, noreturn, env) DEF_HELPER_1(rechecking_single_step, void, env) DEF_HELPER_1(cpuid, void, env) DEF_HELPER_1(rdtsc, void, env) DEF_HELPER_1(rdtscp, void, env) -DEF_HELPER_1(rdpmc, void, env) -DEF_HELPER_1(rdmsr, void, env) -DEF_HELPER_1(wrmsr, void, env) +DEF_HELPER_FLAGS_1(rdpmc, TCG_CALL_NO_WG, noreturn, env) -DEF_HELPER_2(check_iob, void, env, i32) -DEF_HELPER_2(check_iow, void, env, i32) -DEF_HELPER_2(check_iol, void, env, i32) +#ifndef CONFIG_USER_ONLY DEF_HELPER_3(outb, void, env, i32, i32) DEF_HELPER_2(inb, tl, env, i32) DEF_HELPER_3(outw, void, env, i32, i32) DEF_HELPER_2(inw, tl, env, i32) DEF_HELPER_3(outl, void, env, i32, i32) DEF_HELPER_2(inl, tl, env, i32) +DEF_HELPER_FLAGS_3(check_io, TCG_CALL_NO_WG, void, env, i32, i32) DEF_HELPER_FLAGS_4(bpt_io, TCG_CALL_NO_WG, void, env, i32, i32, tl) - -DEF_HELPER_3(svm_check_intercept_param, void, env, i32, i64) +DEF_HELPER_2(svm_check_intercept, void, env, i32) DEF_HELPER_4(svm_check_io, void, env, i32, i32, i32) DEF_HELPER_3(vmrun, void, env, int, int) DEF_HELPER_1(vmmcall, void, env) @@ -106,8 +103,15 @@ DEF_HELPER_2(vmload, void, env, int) DEF_HELPER_2(vmsave, void, env, int) DEF_HELPER_1(stgi, void, env) DEF_HELPER_1(clgi, void, env) -DEF_HELPER_1(skinit, void, env) -DEF_HELPER_2(invlpga, void, env, int) +DEF_HELPER_FLAGS_2(flush_page, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_2(hlt, TCG_CALL_NO_WG, noreturn, env, int) +DEF_HELPER_FLAGS_2(monitor, TCG_CALL_NO_WG, void, env, tl) +DEF_HELPER_FLAGS_2(mwait, TCG_CALL_NO_WG, noreturn, env, int) +DEF_HELPER_1(rdmsr, void, env) +DEF_HELPER_1(wrmsr, void, env) +DEF_HELPER_FLAGS_2(read_crN, TCG_CALL_NO_RWG, tl, env, int) +DEF_HELPER_FLAGS_3(write_crN, TCG_CALL_NO_RWG, void, env, int, tl) +#endif /* !CONFIG_USER_ONLY */ /* x86 FPU */ diff --git a/target/i386/host-cpu.c b/target/i386/host-cpu.c new file mode 100644 index 0000000000..4ea9e354ea --- /dev/null +++ b/target/i386/host-cpu.c @@ -0,0 +1,204 @@ +/* + * x86 host CPU functions, and "host" cpu type initialization + * + * Copyright 2021 SUSE LLC + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "host-cpu.h" +#include "qapi/error.h" +#include "sysemu/sysemu.h" + +/* Note: Only safe for use on x86(-64) hosts */ +static uint32_t host_cpu_phys_bits(void) +{ + uint32_t eax; + uint32_t host_phys_bits; + + host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL); + if (eax >= 0x80000008) { + host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL); + /* + * Note: According to AMD doc 25481 rev 2.34 they have a field + * at 23:16 that can specify a maximum physical address bits for + * the guest that can override this value; but I've not seen + * anything with that set. + */ + host_phys_bits = eax & 0xff; + } else { + /* + * It's an odd 64 bit machine that doesn't have the leaf for + * physical address bits; fall back to 36 that's most older + * Intel. + */ + host_phys_bits = 36; + } + + return host_phys_bits; +} + +static void host_cpu_enable_cpu_pm(X86CPU *cpu) +{ + CPUX86State *env = &cpu->env; + + host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx, + &cpu->mwait.ecx, &cpu->mwait.edx); + env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR; +} + +static uint32_t host_cpu_adjust_phys_bits(X86CPU *cpu) +{ + uint32_t host_phys_bits = host_cpu_phys_bits(); + uint32_t phys_bits = cpu->phys_bits; + static bool warned; + + /* + * Print a warning if the user set it to a value that's not the + * host value. + */ + if (phys_bits != host_phys_bits && phys_bits != 0 && + !warned) { + warn_report("Host physical bits (%u)" + " does not match phys-bits property (%u)", + host_phys_bits, phys_bits); + warned = true; + } + + if (cpu->host_phys_bits) { + /* The user asked for us to use the host physical bits */ + phys_bits = host_phys_bits; + if (cpu->host_phys_bits_limit && + phys_bits > cpu->host_phys_bits_limit) { + phys_bits = cpu->host_phys_bits_limit; + } + } + + return phys_bits; +} + +bool host_cpu_realizefn(CPUState *cs, Error **errp) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + if (cpu->max_features && enable_cpu_pm) { + host_cpu_enable_cpu_pm(cpu); + } + if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { + uint32_t phys_bits = host_cpu_adjust_phys_bits(cpu); + + if (phys_bits && + (phys_bits > TARGET_PHYS_ADDR_SPACE_BITS || + phys_bits < 32)) { + error_setg(errp, "phys-bits should be between 32 and %u " + " (but is %u)", + TARGET_PHYS_ADDR_SPACE_BITS, phys_bits); + return false; + } + cpu->phys_bits = phys_bits; + } + return true; +} + +#define CPUID_MODEL_ID_SZ 48 +/** + * cpu_x86_fill_model_id: + * Get CPUID model ID string from host CPU. + * + * @str should have at least CPUID_MODEL_ID_SZ bytes + * + * The function does NOT add a null terminator to the string + * automatically. + */ +static int host_cpu_fill_model_id(char *str) +{ + uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; + int i; + + for (i = 0; i < 3; i++) { + host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx); + memcpy(str + i * 16 + 0, &eax, 4); + memcpy(str + i * 16 + 4, &ebx, 4); + memcpy(str + i * 16 + 8, &ecx, 4); + memcpy(str + i * 16 + 12, &edx, 4); + } + return 0; +} + +void host_cpu_vendor_fms(char *vendor, int *family, int *model, int *stepping) +{ + uint32_t eax, ebx, ecx, edx; + + host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); + x86_cpu_vendor_words2str(vendor, ebx, edx, ecx); + + host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); + if (family) { + *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); + } + if (model) { + *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); + } + if (stepping) { + *stepping = eax & 0x0F; + } +} + +void host_cpu_instance_init(X86CPU *cpu) +{ + uint32_t ebx = 0, ecx = 0, edx = 0; + char vendor[CPUID_VENDOR_SZ + 1]; + + host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); + x86_cpu_vendor_words2str(vendor, ebx, edx, ecx); + + object_property_set_str(OBJECT(cpu), "vendor", vendor, &error_abort); +} + +void host_cpu_max_instance_init(X86CPU *cpu) +{ + char vendor[CPUID_VENDOR_SZ + 1] = { 0 }; + char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 }; + int family, model, stepping; + + /* Use max host physical address bits if -cpu max option is applied */ + object_property_set_bool(OBJECT(cpu), "host-phys-bits", true, &error_abort); + + host_cpu_vendor_fms(vendor, &family, &model, &stepping); + host_cpu_fill_model_id(model_id); + + object_property_set_str(OBJECT(cpu), "vendor", vendor, &error_abort); + object_property_set_int(OBJECT(cpu), "family", family, &error_abort); + object_property_set_int(OBJECT(cpu), "model", model, &error_abort); + object_property_set_int(OBJECT(cpu), "stepping", stepping, + &error_abort); + object_property_set_str(OBJECT(cpu), "model-id", model_id, + &error_abort); +} + +static void host_cpu_class_init(ObjectClass *oc, void *data) +{ + X86CPUClass *xcc = X86_CPU_CLASS(oc); + + xcc->host_cpuid_required = true; + xcc->ordering = 8; + xcc->model_description = + g_strdup_printf("processor with all supported host features "); +} + +static const TypeInfo host_cpu_type_info = { + .name = X86_CPU_TYPE_NAME("host"), + .parent = X86_CPU_TYPE_NAME("max"), + .class_init = host_cpu_class_init, +}; + +static void host_cpu_type_init(void) +{ + type_register_static(&host_cpu_type_info); +} + +type_init(host_cpu_type_init); diff --git a/target/i386/host-cpu.h b/target/i386/host-cpu.h new file mode 100644 index 0000000000..6a9bc918ba --- /dev/null +++ b/target/i386/host-cpu.h @@ -0,0 +1,19 @@ +/* + * x86 host CPU type initialization and host CPU functions + * + * Copyright 2021 SUSE LLC + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef HOST_CPU_H +#define HOST_CPU_H + +void host_cpu_instance_init(X86CPU *cpu); +void host_cpu_max_instance_init(X86CPU *cpu); +bool host_cpu_realizefn(CPUState *cs, Error **errp); + +void host_cpu_vendor_fms(char *vendor, int *family, int *model, int *stepping); + +#endif /* HOST_CPU_H */ diff --git a/target/i386/hvf/hvf-cpu.c b/target/i386/hvf/hvf-cpu.c new file mode 100644 index 0000000000..8fbc423888 --- /dev/null +++ b/target/i386/hvf/hvf-cpu.c @@ -0,0 +1,68 @@ +/* + * x86 HVF CPU type initialization + * + * Copyright 2021 SUSE LLC + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "host-cpu.h" +#include "qapi/error.h" +#include "sysemu/sysemu.h" +#include "hw/boards.h" +#include "sysemu/hvf.h" +#include "hw/core/accel-cpu.h" + +static void hvf_cpu_max_instance_init(X86CPU *cpu) +{ + CPUX86State *env = &cpu->env; + + host_cpu_max_instance_init(cpu); + + env->cpuid_min_level = + hvf_get_supported_cpuid(0x0, 0, R_EAX); + env->cpuid_min_xlevel = + hvf_get_supported_cpuid(0x80000000, 0, R_EAX); + env->cpuid_min_xlevel2 = + hvf_get_supported_cpuid(0xC0000000, 0, R_EAX); +} + +static void hvf_cpu_instance_init(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + + host_cpu_instance_init(cpu); + + /* Special cases not set in the X86CPUDefinition structs: */ + /* TODO: in-kernel irqchip for hvf */ + + if (cpu->max_features) { + hvf_cpu_max_instance_init(cpu); + } +} + +static void hvf_cpu_accel_class_init(ObjectClass *oc, void *data) +{ + AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); + + acc->cpu_realizefn = host_cpu_realizefn; + acc->cpu_instance_init = hvf_cpu_instance_init; +} + +static const TypeInfo hvf_cpu_accel_type_info = { + .name = ACCEL_CPU_NAME("hvf"), + + .parent = TYPE_ACCEL_CPU, + .class_init = hvf_cpu_accel_class_init, + .abstract = true, +}; + +static void hvf_cpu_accel_register_types(void) +{ + type_register_static(&hvf_cpu_accel_type_info); +} + +type_init(hvf_cpu_accel_register_types); diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c index 15f14ac69e..f044181d06 100644 --- a/target/i386/hvf/hvf.c +++ b/target/i386/hvf/hvf.c @@ -67,7 +67,6 @@ #include <Hypervisor/hv_vmx.h> #include <sys/sysctl.h> -#include "exec/address-spaces.h" #include "hw/i386/apic_internal.h" #include "qemu/main-loop.h" #include "qemu/accel.h" diff --git a/target/i386/hvf/meson.build b/target/i386/hvf/meson.build index e9eb5a5da8..d253d5fd10 100644 --- a/target/i386/hvf/meson.build +++ b/target/i386/hvf/meson.build @@ -10,4 +10,5 @@ i386_softmmu_ss.add(when: [hvf, 'CONFIG_HVF'], if_true: files( 'x86_mmu.c', 'x86_task.c', 'x86hvf.c', + 'hvf-cpu.c', )) diff --git a/target/i386/hvf/x86_mmu.c b/target/i386/hvf/x86_mmu.c index 882a6237ee..78fff04684 100644 --- a/target/i386/hvf/x86_mmu.c +++ b/target/i386/hvf/x86_mmu.c @@ -24,7 +24,6 @@ #include "x86_mmu.h" #include "vmcs.h" #include "vmx.h" -#include "exec/address-spaces.h" #define pte_present(pte) (pte & PT_PRESENT) #define pte_write_access(pte) (pte & PT_WRITE) diff --git a/target/i386/kvm/kvm-cpu.c b/target/i386/kvm/kvm-cpu.c new file mode 100644 index 0000000000..c660ad4293 --- /dev/null +++ b/target/i386/kvm/kvm-cpu.c @@ -0,0 +1,151 @@ +/* + * x86 KVM CPU type initialization + * + * Copyright 2021 SUSE LLC + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "host-cpu.h" +#include "kvm-cpu.h" +#include "qapi/error.h" +#include "sysemu/sysemu.h" +#include "hw/boards.h" + +#include "kvm_i386.h" +#include "hw/core/accel-cpu.h" + +static bool kvm_cpu_realizefn(CPUState *cs, Error **errp) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + /* + * The realize order is important, since x86_cpu_realize() checks if + * nothing else has been set by the user (or by accelerators) in + * cpu->ucode_rev and cpu->phys_bits. + * + * realize order: + * kvm_cpu -> host_cpu -> x86_cpu + */ + if (cpu->max_features) { + if (enable_cpu_pm && kvm_has_waitpkg()) { + env->features[FEAT_7_0_ECX] |= CPUID_7_0_ECX_WAITPKG; + } + if (cpu->ucode_rev == 0) { + cpu->ucode_rev = + kvm_arch_get_supported_msr_feature(kvm_state, + MSR_IA32_UCODE_REV); + } + } + return host_cpu_realizefn(cs, errp); +} + +/* + * KVM-specific features that are automatically added/removed + * from all CPU models when KVM is enabled. + */ +static PropValue kvm_default_props[] = { + { "kvmclock", "on" }, + { "kvm-nopiodelay", "on" }, + { "kvm-asyncpf", "on" }, + { "kvm-steal-time", "on" }, + { "kvm-pv-eoi", "on" }, + { "kvmclock-stable-bit", "on" }, + { "x2apic", "on" }, + { "kvm-msi-ext-dest-id", "off" }, + { "acpi", "off" }, + { "monitor", "off" }, + { "svm", "off" }, + { NULL, NULL }, +}; + +void x86_cpu_change_kvm_default(const char *prop, const char *value) +{ + PropValue *pv; + for (pv = kvm_default_props; pv->prop; pv++) { + if (!strcmp(pv->prop, prop)) { + pv->value = value; + break; + } + } + + /* + * It is valid to call this function only for properties that + * are already present in the kvm_default_props table. + */ + assert(pv->prop); +} + +static bool lmce_supported(void) +{ + uint64_t mce_cap = 0; + + if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) { + return false; + } + return !!(mce_cap & MCG_LMCE_P); +} + +static void kvm_cpu_max_instance_init(X86CPU *cpu) +{ + CPUX86State *env = &cpu->env; + KVMState *s = kvm_state; + + host_cpu_max_instance_init(cpu); + + if (lmce_supported()) { + object_property_set_bool(OBJECT(cpu), "lmce", true, &error_abort); + } + + env->cpuid_min_level = + kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX); + env->cpuid_min_xlevel = + kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX); + env->cpuid_min_xlevel2 = + kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX); +} + +static void kvm_cpu_instance_init(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + + host_cpu_instance_init(cpu); + + if (!kvm_irqchip_in_kernel()) { + x86_cpu_change_kvm_default("x2apic", "off"); + } else if (kvm_irqchip_is_split() && kvm_enable_x2apic()) { + x86_cpu_change_kvm_default("kvm-msi-ext-dest-id", "on"); + } + + /* Special cases not set in the X86CPUDefinition structs: */ + + x86_cpu_apply_props(cpu, kvm_default_props); + + if (cpu->max_features) { + kvm_cpu_max_instance_init(cpu); + } +} + +static void kvm_cpu_accel_class_init(ObjectClass *oc, void *data) +{ + AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); + + acc->cpu_realizefn = kvm_cpu_realizefn; + acc->cpu_instance_init = kvm_cpu_instance_init; +} +static const TypeInfo kvm_cpu_accel_type_info = { + .name = ACCEL_CPU_NAME("kvm"), + + .parent = TYPE_ACCEL_CPU, + .class_init = kvm_cpu_accel_class_init, + .abstract = true, +}; +static void kvm_cpu_accel_register_types(void) +{ + type_register_static(&kvm_cpu_accel_type_info); +} +type_init(kvm_cpu_accel_register_types); diff --git a/target/i386/kvm/kvm-cpu.h b/target/i386/kvm/kvm-cpu.h new file mode 100644 index 0000000000..e858ca21e5 --- /dev/null +++ b/target/i386/kvm/kvm-cpu.h @@ -0,0 +1,41 @@ +/* + * i386 KVM CPU type and functions + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef KVM_CPU_H +#define KVM_CPU_H + +#ifdef CONFIG_KVM +/* + * Change the value of a KVM-specific default + * + * If value is NULL, no default will be set and the original + * value from the CPU model table will be kept. + * + * It is valid to call this function only for properties that + * are already present in the kvm_default_props table. + */ +void x86_cpu_change_kvm_default(const char *prop, const char *value); + +#else /* !CONFIG_KVM */ + +#define x86_cpu_change_kvm_default(a, b) + +#endif /* CONFIG_KVM */ + +#endif /* KVM_CPU_H */ diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index 7fe9f52710..d972eb4705 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -22,6 +22,7 @@ #include "standard-headers/asm-x86/kvm_para.h" #include "cpu.h" +#include "host-cpu.h" #include "sysemu/sysemu.h" #include "sysemu/hw_accel.h" #include "sysemu/kvm_int.h" @@ -288,7 +289,7 @@ static bool host_tsx_broken(void) int family, model, stepping;\ char vendor[CPUID_VENDOR_SZ + 1]; - host_vendor_fms(vendor, &family, &model, &stepping); + host_cpu_vendor_fms(vendor, &family, &model, &stepping); /* Check if we are running on a Haswell host known to have broken TSX */ return !strcmp(vendor, CPUID_VENDOR_INTEL) && diff --git a/target/i386/kvm/meson.build b/target/i386/kvm/meson.build index 1d66559187..0a533411ca 100644 --- a/target/i386/kvm/meson.build +++ b/target/i386/kvm/meson.build @@ -1,3 +1,8 @@ i386_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c')) -i386_softmmu_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c')) + +i386_softmmu_ss.add(when: 'CONFIG_KVM', if_true: files( + 'kvm.c', + 'kvm-cpu.c', +)) + i386_softmmu_ss.add(when: 'CONFIG_HYPERV', if_true: files('hyperv.c'), if_false: files('hyperv-stub.c')) diff --git a/target/i386/machine.c b/target/i386/machine.c index 137604ddb8..f6f094f1c9 100644 --- a/target/i386/machine.c +++ b/target/i386/machine.c @@ -1396,7 +1396,7 @@ static const VMStateDescription vmstate_msr_tsx_ctrl = { } }; -VMStateDescription vmstate_x86_cpu = { +const VMStateDescription vmstate_x86_cpu = { .name = "cpu", .version_id = 12, .minimum_version_id = 11, diff --git a/target/i386/meson.build b/target/i386/meson.build index c4bf20b319..dac19ec00d 100644 --- a/target/i386/meson.build +++ b/target/i386/meson.build @@ -6,7 +6,11 @@ i386_ss.add(files( 'xsave_helper.c', 'cpu-dump.c', )) -i386_ss.add(when: 'CONFIG_SEV', if_true: files('sev.c'), if_false: files('sev-stub.c')) +i386_ss.add(when: 'CONFIG_SEV', if_true: files('host-cpu.c', 'sev.c'), if_false: files('sev-stub.c')) + +# x86 cpu type +i386_ss.add(when: 'CONFIG_KVM', if_true: files('host-cpu.c')) +i386_ss.add(when: 'CONFIG_HVF', if_true: files('host-cpu.c')) i386_softmmu_ss = ss.source_set() i386_softmmu_ss.add(files( @@ -14,13 +18,17 @@ i386_softmmu_ss.add(files( 'arch_memory_mapping.c', 'machine.c', 'monitor.c', + 'cpu-sysemu.c', )) +i386_user_ss = ss.source_set() subdir('kvm') subdir('hax') subdir('whpx') +subdir('nvmm') subdir('hvf') subdir('tcg') target_arch += {'i386': i386_ss} target_softmmu_arch += {'i386': i386_softmmu_ss} +target_user_arch += {'i386': i386_user_ss} diff --git a/target/i386/nvmm/meson.build b/target/i386/nvmm/meson.build new file mode 100644 index 0000000000..733e334083 --- /dev/null +++ b/target/i386/nvmm/meson.build @@ -0,0 +1,8 @@ +i386_softmmu_ss.add(when: 'CONFIG_NVMM', if_true: + files( + 'nvmm-all.c', + 'nvmm-accel-ops.c', + ) +) + +i386_softmmu_ss.add(when: 'CONFIG_NVMM', if_true: nvmm) diff --git a/target/i386/nvmm/nvmm-accel-ops.c b/target/i386/nvmm/nvmm-accel-ops.c new file mode 100644 index 0000000000..f788f75289 --- /dev/null +++ b/target/i386/nvmm/nvmm-accel-ops.c @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2018-2019 Maxime Villard, All rights reserved. + * + * NetBSD Virtual Machine Monitor (NVMM) accelerator for QEMU. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "sysemu/kvm_int.h" +#include "qemu/main-loop.h" +#include "sysemu/cpus.h" +#include "qemu/guest-random.h" + +#include "sysemu/nvmm.h" +#include "nvmm-accel-ops.h" + +static void *qemu_nvmm_cpu_thread_fn(void *arg) +{ + CPUState *cpu = arg; + int r; + + assert(nvmm_enabled()); + + rcu_register_thread(); + + qemu_mutex_lock_iothread(); + qemu_thread_get_self(cpu->thread); + cpu->thread_id = qemu_get_thread_id(); + current_cpu = cpu; + + r = nvmm_init_vcpu(cpu); + if (r < 0) { + fprintf(stderr, "nvmm_init_vcpu failed: %s\n", strerror(-r)); + exit(1); + } + + /* signal CPU creation */ + cpu_thread_signal_created(cpu); + qemu_guest_random_seed_thread_part2(cpu->random_seed); + + do { + if (cpu_can_run(cpu)) { + r = nvmm_vcpu_exec(cpu); + if (r == EXCP_DEBUG) { + cpu_handle_guest_debug(cpu); + } + } + while (cpu_thread_is_idle(cpu)) { + qemu_cond_wait_iothread(cpu->halt_cond); + } + qemu_wait_io_event_common(cpu); + } while (!cpu->unplug || cpu_can_run(cpu)); + + nvmm_destroy_vcpu(cpu); + cpu_thread_signal_destroyed(cpu); + qemu_mutex_unlock_iothread(); + rcu_unregister_thread(); + return NULL; +} + +static void nvmm_start_vcpu_thread(CPUState *cpu) +{ + char thread_name[VCPU_THREAD_NAME_SIZE]; + + cpu->thread = g_malloc0(sizeof(QemuThread)); + cpu->halt_cond = g_malloc0(sizeof(QemuCond)); + qemu_cond_init(cpu->halt_cond); + snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/NVMM", + cpu->cpu_index); + qemu_thread_create(cpu->thread, thread_name, qemu_nvmm_cpu_thread_fn, + cpu, QEMU_THREAD_JOINABLE); +} + +/* + * Abort the call to run the virtual processor by another thread, and to + * return the control to that thread. + */ +static void nvmm_kick_vcpu_thread(CPUState *cpu) +{ + cpu->exit_request = 1; + cpus_kick_thread(cpu); +} + +static void nvmm_accel_ops_class_init(ObjectClass *oc, void *data) +{ + AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); + + ops->create_vcpu_thread = nvmm_start_vcpu_thread; + ops->kick_vcpu_thread = nvmm_kick_vcpu_thread; + + ops->synchronize_post_reset = nvmm_cpu_synchronize_post_reset; + ops->synchronize_post_init = nvmm_cpu_synchronize_post_init; + ops->synchronize_state = nvmm_cpu_synchronize_state; + ops->synchronize_pre_loadvm = nvmm_cpu_synchronize_pre_loadvm; +} + +static const TypeInfo nvmm_accel_ops_type = { + .name = ACCEL_OPS_NAME("nvmm"), + + .parent = TYPE_ACCEL_OPS, + .class_init = nvmm_accel_ops_class_init, + .abstract = true, +}; + +static void nvmm_accel_ops_register_types(void) +{ + type_register_static(&nvmm_accel_ops_type); +} +type_init(nvmm_accel_ops_register_types); diff --git a/target/i386/nvmm/nvmm-accel-ops.h b/target/i386/nvmm/nvmm-accel-ops.h new file mode 100644 index 0000000000..43e24adcaf --- /dev/null +++ b/target/i386/nvmm/nvmm-accel-ops.h @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2018-2019 Maxime Villard, All rights reserved. + * + * NetBSD Virtual Machine Monitor (NVMM) accelerator for QEMU. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef NVMM_CPUS_H +#define NVMM_CPUS_H + +#include "sysemu/cpus.h" + +int nvmm_init_vcpu(CPUState *cpu); +int nvmm_vcpu_exec(CPUState *cpu); +void nvmm_destroy_vcpu(CPUState *cpu); + +void nvmm_cpu_synchronize_state(CPUState *cpu); +void nvmm_cpu_synchronize_post_reset(CPUState *cpu); +void nvmm_cpu_synchronize_post_init(CPUState *cpu); +void nvmm_cpu_synchronize_pre_loadvm(CPUState *cpu); + +#endif /* NVMM_CPUS_H */ diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c new file mode 100644 index 0000000000..dfa690d65d --- /dev/null +++ b/target/i386/nvmm/nvmm-all.c @@ -0,0 +1,1226 @@ +/* + * Copyright (c) 2018-2019 Maxime Villard, All rights reserved. + * + * NetBSD Virtual Machine Monitor (NVMM) accelerator for QEMU. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/address-spaces.h" +#include "exec/ioport.h" +#include "qemu-common.h" +#include "qemu/accel.h" +#include "sysemu/nvmm.h" +#include "sysemu/cpus.h" +#include "sysemu/runstate.h" +#include "qemu/main-loop.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "qemu/queue.h" +#include "migration/blocker.h" +#include "strings.h" + +#include "nvmm-accel-ops.h" + +#include <nvmm.h> + +struct qemu_vcpu { + struct nvmm_vcpu vcpu; + uint8_t tpr; + bool stop; + + /* Window-exiting for INTs/NMIs. */ + bool int_window_exit; + bool nmi_window_exit; + + /* The guest is in an interrupt shadow (POP SS, etc). */ + bool int_shadow; +}; + +struct qemu_machine { + struct nvmm_capability cap; + struct nvmm_machine mach; +}; + +/* -------------------------------------------------------------------------- */ + +static bool nvmm_allowed; +static struct qemu_machine qemu_mach; + +static struct qemu_vcpu * +get_qemu_vcpu(CPUState *cpu) +{ + return (struct qemu_vcpu *)cpu->hax_vcpu; +} + +static struct nvmm_machine * +get_nvmm_mach(void) +{ + return &qemu_mach.mach; +} + +/* -------------------------------------------------------------------------- */ + +static void +nvmm_set_segment(struct nvmm_x64_state_seg *nseg, const SegmentCache *qseg) +{ + uint32_t attrib = qseg->flags; + + nseg->selector = qseg->selector; + nseg->limit = qseg->limit; + nseg->base = qseg->base; + nseg->attrib.type = __SHIFTOUT(attrib, DESC_TYPE_MASK); + nseg->attrib.s = __SHIFTOUT(attrib, DESC_S_MASK); + nseg->attrib.dpl = __SHIFTOUT(attrib, DESC_DPL_MASK); + nseg->attrib.p = __SHIFTOUT(attrib, DESC_P_MASK); + nseg->attrib.avl = __SHIFTOUT(attrib, DESC_AVL_MASK); + nseg->attrib.l = __SHIFTOUT(attrib, DESC_L_MASK); + nseg->attrib.def = __SHIFTOUT(attrib, DESC_B_MASK); + nseg->attrib.g = __SHIFTOUT(attrib, DESC_G_MASK); +} + +static void +nvmm_set_registers(CPUState *cpu) +{ + struct CPUX86State *env = (CPUArchState *)cpu->env_ptr; + struct nvmm_machine *mach = get_nvmm_mach(); + struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + struct nvmm_vcpu *vcpu = &qcpu->vcpu; + struct nvmm_x64_state *state = vcpu->state; + uint64_t bitmap; + size_t i; + int ret; + + assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu)); + + /* GPRs. */ + state->gprs[NVMM_X64_GPR_RAX] = env->regs[R_EAX]; + state->gprs[NVMM_X64_GPR_RCX] = env->regs[R_ECX]; + state->gprs[NVMM_X64_GPR_RDX] = env->regs[R_EDX]; + state->gprs[NVMM_X64_GPR_RBX] = env->regs[R_EBX]; + state->gprs[NVMM_X64_GPR_RSP] = env->regs[R_ESP]; + state->gprs[NVMM_X64_GPR_RBP] = env->regs[R_EBP]; + state->gprs[NVMM_X64_GPR_RSI] = env->regs[R_ESI]; + state->gprs[NVMM_X64_GPR_RDI] = env->regs[R_EDI]; +#ifdef TARGET_X86_64 + state->gprs[NVMM_X64_GPR_R8] = env->regs[R_R8]; + state->gprs[NVMM_X64_GPR_R9] = env->regs[R_R9]; + state->gprs[NVMM_X64_GPR_R10] = env->regs[R_R10]; + state->gprs[NVMM_X64_GPR_R11] = env->regs[R_R11]; + state->gprs[NVMM_X64_GPR_R12] = env->regs[R_R12]; + state->gprs[NVMM_X64_GPR_R13] = env->regs[R_R13]; + state->gprs[NVMM_X64_GPR_R14] = env->regs[R_R14]; + state->gprs[NVMM_X64_GPR_R15] = env->regs[R_R15]; +#endif + + /* RIP and RFLAGS. */ + state->gprs[NVMM_X64_GPR_RIP] = env->eip; + state->gprs[NVMM_X64_GPR_RFLAGS] = env->eflags; + + /* Segments. */ + nvmm_set_segment(&state->segs[NVMM_X64_SEG_CS], &env->segs[R_CS]); + nvmm_set_segment(&state->segs[NVMM_X64_SEG_DS], &env->segs[R_DS]); + nvmm_set_segment(&state->segs[NVMM_X64_SEG_ES], &env->segs[R_ES]); + nvmm_set_segment(&state->segs[NVMM_X64_SEG_FS], &env->segs[R_FS]); + nvmm_set_segment(&state->segs[NVMM_X64_SEG_GS], &env->segs[R_GS]); + nvmm_set_segment(&state->segs[NVMM_X64_SEG_SS], &env->segs[R_SS]); + + /* Special segments. */ + nvmm_set_segment(&state->segs[NVMM_X64_SEG_GDT], &env->gdt); + nvmm_set_segment(&state->segs[NVMM_X64_SEG_LDT], &env->ldt); + nvmm_set_segment(&state->segs[NVMM_X64_SEG_TR], &env->tr); + nvmm_set_segment(&state->segs[NVMM_X64_SEG_IDT], &env->idt); + + /* Control registers. */ + state->crs[NVMM_X64_CR_CR0] = env->cr[0]; + state->crs[NVMM_X64_CR_CR2] = env->cr[2]; + state->crs[NVMM_X64_CR_CR3] = env->cr[3]; + state->crs[NVMM_X64_CR_CR4] = env->cr[4]; + state->crs[NVMM_X64_CR_CR8] = qcpu->tpr; + state->crs[NVMM_X64_CR_XCR0] = env->xcr0; + + /* Debug registers. */ + state->drs[NVMM_X64_DR_DR0] = env->dr[0]; + state->drs[NVMM_X64_DR_DR1] = env->dr[1]; + state->drs[NVMM_X64_DR_DR2] = env->dr[2]; + state->drs[NVMM_X64_DR_DR3] = env->dr[3]; + state->drs[NVMM_X64_DR_DR6] = env->dr[6]; + state->drs[NVMM_X64_DR_DR7] = env->dr[7]; + + /* FPU. */ + state->fpu.fx_cw = env->fpuc; + state->fpu.fx_sw = (env->fpus & ~0x3800) | ((env->fpstt & 0x7) << 11); + state->fpu.fx_tw = 0; + for (i = 0; i < 8; i++) { + state->fpu.fx_tw |= (!env->fptags[i]) << i; + } + state->fpu.fx_opcode = env->fpop; + state->fpu.fx_ip.fa_64 = env->fpip; + state->fpu.fx_dp.fa_64 = env->fpdp; + state->fpu.fx_mxcsr = env->mxcsr; + state->fpu.fx_mxcsr_mask = 0x0000FFFF; + assert(sizeof(state->fpu.fx_87_ac) == sizeof(env->fpregs)); + memcpy(state->fpu.fx_87_ac, env->fpregs, sizeof(env->fpregs)); + for (i = 0; i < CPU_NB_REGS; i++) { + memcpy(&state->fpu.fx_xmm[i].xmm_bytes[0], + &env->xmm_regs[i].ZMM_Q(0), 8); + memcpy(&state->fpu.fx_xmm[i].xmm_bytes[8], + &env->xmm_regs[i].ZMM_Q(1), 8); + } + + /* MSRs. */ + state->msrs[NVMM_X64_MSR_EFER] = env->efer; + state->msrs[NVMM_X64_MSR_STAR] = env->star; +#ifdef TARGET_X86_64 + state->msrs[NVMM_X64_MSR_LSTAR] = env->lstar; + state->msrs[NVMM_X64_MSR_CSTAR] = env->cstar; + state->msrs[NVMM_X64_MSR_SFMASK] = env->fmask; + state->msrs[NVMM_X64_MSR_KERNELGSBASE] = env->kernelgsbase; +#endif + state->msrs[NVMM_X64_MSR_SYSENTER_CS] = env->sysenter_cs; + state->msrs[NVMM_X64_MSR_SYSENTER_ESP] = env->sysenter_esp; + state->msrs[NVMM_X64_MSR_SYSENTER_EIP] = env->sysenter_eip; + state->msrs[NVMM_X64_MSR_PAT] = env->pat; + state->msrs[NVMM_X64_MSR_TSC] = env->tsc; + + bitmap = + NVMM_X64_STATE_SEGS | + NVMM_X64_STATE_GPRS | + NVMM_X64_STATE_CRS | + NVMM_X64_STATE_DRS | + NVMM_X64_STATE_MSRS | + NVMM_X64_STATE_FPU; + + ret = nvmm_vcpu_setstate(mach, vcpu, bitmap); + if (ret == -1) { + error_report("NVMM: Failed to set virtual processor context," + " error=%d", errno); + } +} + +static void +nvmm_get_segment(SegmentCache *qseg, const struct nvmm_x64_state_seg *nseg) +{ + qseg->selector = nseg->selector; + qseg->limit = nseg->limit; + qseg->base = nseg->base; + + qseg->flags = + __SHIFTIN((uint32_t)nseg->attrib.type, DESC_TYPE_MASK) | + __SHIFTIN((uint32_t)nseg->attrib.s, DESC_S_MASK) | + __SHIFTIN((uint32_t)nseg->attrib.dpl, DESC_DPL_MASK) | + __SHIFTIN((uint32_t)nseg->attrib.p, DESC_P_MASK) | + __SHIFTIN((uint32_t)nseg->attrib.avl, DESC_AVL_MASK) | + __SHIFTIN((uint32_t)nseg->attrib.l, DESC_L_MASK) | + __SHIFTIN((uint32_t)nseg->attrib.def, DESC_B_MASK) | + __SHIFTIN((uint32_t)nseg->attrib.g, DESC_G_MASK); +} + +static void +nvmm_get_registers(CPUState *cpu) +{ + struct CPUX86State *env = (CPUArchState *)cpu->env_ptr; + struct nvmm_machine *mach = get_nvmm_mach(); + struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + struct nvmm_vcpu *vcpu = &qcpu->vcpu; + X86CPU *x86_cpu = X86_CPU(cpu); + struct nvmm_x64_state *state = vcpu->state; + uint64_t bitmap, tpr; + size_t i; + int ret; + + assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu)); + + bitmap = + NVMM_X64_STATE_SEGS | + NVMM_X64_STATE_GPRS | + NVMM_X64_STATE_CRS | + NVMM_X64_STATE_DRS | + NVMM_X64_STATE_MSRS | + NVMM_X64_STATE_FPU; + + ret = nvmm_vcpu_getstate(mach, vcpu, bitmap); + if (ret == -1) { + error_report("NVMM: Failed to get virtual processor context," + " error=%d", errno); + } + + /* GPRs. */ + env->regs[R_EAX] = state->gprs[NVMM_X64_GPR_RAX]; + env->regs[R_ECX] = state->gprs[NVMM_X64_GPR_RCX]; + env->regs[R_EDX] = state->gprs[NVMM_X64_GPR_RDX]; + env->regs[R_EBX] = state->gprs[NVMM_X64_GPR_RBX]; + env->regs[R_ESP] = state->gprs[NVMM_X64_GPR_RSP]; + env->regs[R_EBP] = state->gprs[NVMM_X64_GPR_RBP]; + env->regs[R_ESI] = state->gprs[NVMM_X64_GPR_RSI]; + env->regs[R_EDI] = state->gprs[NVMM_X64_GPR_RDI]; +#ifdef TARGET_X86_64 + env->regs[R_R8] = state->gprs[NVMM_X64_GPR_R8]; + env->regs[R_R9] = state->gprs[NVMM_X64_GPR_R9]; + env->regs[R_R10] = state->gprs[NVMM_X64_GPR_R10]; + env->regs[R_R11] = state->gprs[NVMM_X64_GPR_R11]; + env->regs[R_R12] = state->gprs[NVMM_X64_GPR_R12]; + env->regs[R_R13] = state->gprs[NVMM_X64_GPR_R13]; + env->regs[R_R14] = state->gprs[NVMM_X64_GPR_R14]; + env->regs[R_R15] = state->gprs[NVMM_X64_GPR_R15]; +#endif + + /* RIP and RFLAGS. */ + env->eip = state->gprs[NVMM_X64_GPR_RIP]; + env->eflags = state->gprs[NVMM_X64_GPR_RFLAGS]; + + /* Segments. */ + nvmm_get_segment(&env->segs[R_ES], &state->segs[NVMM_X64_SEG_ES]); + nvmm_get_segment(&env->segs[R_CS], &state->segs[NVMM_X64_SEG_CS]); + nvmm_get_segment(&env->segs[R_SS], &state->segs[NVMM_X64_SEG_SS]); + nvmm_get_segment(&env->segs[R_DS], &state->segs[NVMM_X64_SEG_DS]); + nvmm_get_segment(&env->segs[R_FS], &state->segs[NVMM_X64_SEG_FS]); + nvmm_get_segment(&env->segs[R_GS], &state->segs[NVMM_X64_SEG_GS]); + + /* Special segments. */ + nvmm_get_segment(&env->gdt, &state->segs[NVMM_X64_SEG_GDT]); + nvmm_get_segment(&env->ldt, &state->segs[NVMM_X64_SEG_LDT]); + nvmm_get_segment(&env->tr, &state->segs[NVMM_X64_SEG_TR]); + nvmm_get_segment(&env->idt, &state->segs[NVMM_X64_SEG_IDT]); + + /* Control registers. */ + env->cr[0] = state->crs[NVMM_X64_CR_CR0]; + env->cr[2] = state->crs[NVMM_X64_CR_CR2]; + env->cr[3] = state->crs[NVMM_X64_CR_CR3]; + env->cr[4] = state->crs[NVMM_X64_CR_CR4]; + tpr = state->crs[NVMM_X64_CR_CR8]; + if (tpr != qcpu->tpr) { + qcpu->tpr = tpr; + cpu_set_apic_tpr(x86_cpu->apic_state, tpr); + } + env->xcr0 = state->crs[NVMM_X64_CR_XCR0]; + + /* Debug registers. */ + env->dr[0] = state->drs[NVMM_X64_DR_DR0]; + env->dr[1] = state->drs[NVMM_X64_DR_DR1]; + env->dr[2] = state->drs[NVMM_X64_DR_DR2]; + env->dr[3] = state->drs[NVMM_X64_DR_DR3]; + env->dr[6] = state->drs[NVMM_X64_DR_DR6]; + env->dr[7] = state->drs[NVMM_X64_DR_DR7]; + + /* FPU. */ + env->fpuc = state->fpu.fx_cw; + env->fpstt = (state->fpu.fx_sw >> 11) & 0x7; + env->fpus = state->fpu.fx_sw & ~0x3800; + for (i = 0; i < 8; i++) { + env->fptags[i] = !((state->fpu.fx_tw >> i) & 1); + } + env->fpop = state->fpu.fx_opcode; + env->fpip = state->fpu.fx_ip.fa_64; + env->fpdp = state->fpu.fx_dp.fa_64; + env->mxcsr = state->fpu.fx_mxcsr; + assert(sizeof(state->fpu.fx_87_ac) == sizeof(env->fpregs)); + memcpy(env->fpregs, state->fpu.fx_87_ac, sizeof(env->fpregs)); + for (i = 0; i < CPU_NB_REGS; i++) { + memcpy(&env->xmm_regs[i].ZMM_Q(0), + &state->fpu.fx_xmm[i].xmm_bytes[0], 8); + memcpy(&env->xmm_regs[i].ZMM_Q(1), + &state->fpu.fx_xmm[i].xmm_bytes[8], 8); + } + + /* MSRs. */ + env->efer = state->msrs[NVMM_X64_MSR_EFER]; + env->star = state->msrs[NVMM_X64_MSR_STAR]; +#ifdef TARGET_X86_64 + env->lstar = state->msrs[NVMM_X64_MSR_LSTAR]; + env->cstar = state->msrs[NVMM_X64_MSR_CSTAR]; + env->fmask = state->msrs[NVMM_X64_MSR_SFMASK]; + env->kernelgsbase = state->msrs[NVMM_X64_MSR_KERNELGSBASE]; +#endif + env->sysenter_cs = state->msrs[NVMM_X64_MSR_SYSENTER_CS]; + env->sysenter_esp = state->msrs[NVMM_X64_MSR_SYSENTER_ESP]; + env->sysenter_eip = state->msrs[NVMM_X64_MSR_SYSENTER_EIP]; + env->pat = state->msrs[NVMM_X64_MSR_PAT]; + env->tsc = state->msrs[NVMM_X64_MSR_TSC]; + + x86_update_hflags(env); +} + +static bool +nvmm_can_take_int(CPUState *cpu) +{ + struct CPUX86State *env = (CPUArchState *)cpu->env_ptr; + struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + struct nvmm_vcpu *vcpu = &qcpu->vcpu; + struct nvmm_machine *mach = get_nvmm_mach(); + + if (qcpu->int_window_exit) { + return false; + } + + if (qcpu->int_shadow || !(env->eflags & IF_MASK)) { + struct nvmm_x64_state *state = vcpu->state; + + /* Exit on interrupt window. */ + nvmm_vcpu_getstate(mach, vcpu, NVMM_X64_STATE_INTR); + state->intr.int_window_exiting = 1; + nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_INTR); + + return false; + } + + return true; +} + +static bool +nvmm_can_take_nmi(CPUState *cpu) +{ + struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + + /* + * Contrary to INTs, NMIs always schedule an exit when they are + * completed. Therefore, if window-exiting is enabled, it means + * NMIs are blocked. + */ + if (qcpu->nmi_window_exit) { + return false; + } + + return true; +} + +/* + * Called before the VCPU is run. We inject events generated by the I/O + * thread, and synchronize the guest TPR. + */ +static void +nvmm_vcpu_pre_run(CPUState *cpu) +{ + struct CPUX86State *env = (CPUArchState *)cpu->env_ptr; + struct nvmm_machine *mach = get_nvmm_mach(); + struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + struct nvmm_vcpu *vcpu = &qcpu->vcpu; + X86CPU *x86_cpu = X86_CPU(cpu); + struct nvmm_x64_state *state = vcpu->state; + struct nvmm_vcpu_event *event = vcpu->event; + bool has_event = false; + bool sync_tpr = false; + uint8_t tpr; + int ret; + + qemu_mutex_lock_iothread(); + + tpr = cpu_get_apic_tpr(x86_cpu->apic_state); + if (tpr != qcpu->tpr) { + qcpu->tpr = tpr; + sync_tpr = true; + } + + /* + * Force the VCPU out of its inner loop to process any INIT requests + * or commit pending TPR access. + */ + if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { + cpu->exit_request = 1; + } + + if (!has_event && (cpu->interrupt_request & CPU_INTERRUPT_NMI)) { + if (nvmm_can_take_nmi(cpu)) { + cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; + event->type = NVMM_VCPU_EVENT_INTR; + event->vector = 2; + has_event = true; + } + } + + if (!has_event && (cpu->interrupt_request & CPU_INTERRUPT_HARD)) { + if (nvmm_can_take_int(cpu)) { + cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; + event->type = NVMM_VCPU_EVENT_INTR; + event->vector = cpu_get_pic_interrupt(env); + has_event = true; + } + } + + /* Don't want SMIs. */ + if (cpu->interrupt_request & CPU_INTERRUPT_SMI) { + cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; + } + + if (sync_tpr) { + ret = nvmm_vcpu_getstate(mach, vcpu, NVMM_X64_STATE_CRS); + if (ret == -1) { + error_report("NVMM: Failed to get CPU state," + " error=%d", errno); + } + + state->crs[NVMM_X64_CR_CR8] = qcpu->tpr; + + ret = nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_CRS); + if (ret == -1) { + error_report("NVMM: Failed to set CPU state," + " error=%d", errno); + } + } + + if (has_event) { + ret = nvmm_vcpu_inject(mach, vcpu); + if (ret == -1) { + error_report("NVMM: Failed to inject event," + " error=%d", errno); + } + } + + qemu_mutex_unlock_iothread(); +} + +/* + * Called after the VCPU ran. We synchronize the host view of the TPR and + * RFLAGS. + */ +static void +nvmm_vcpu_post_run(CPUState *cpu, struct nvmm_vcpu_exit *exit) +{ + struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + struct CPUX86State *env = (CPUArchState *)cpu->env_ptr; + X86CPU *x86_cpu = X86_CPU(cpu); + uint64_t tpr; + + env->eflags = exit->exitstate.rflags; + qcpu->int_shadow = exit->exitstate.int_shadow; + qcpu->int_window_exit = exit->exitstate.int_window_exiting; + qcpu->nmi_window_exit = exit->exitstate.nmi_window_exiting; + + tpr = exit->exitstate.cr8; + if (qcpu->tpr != tpr) { + qcpu->tpr = tpr; + qemu_mutex_lock_iothread(); + cpu_set_apic_tpr(x86_cpu->apic_state, qcpu->tpr); + qemu_mutex_unlock_iothread(); + } +} + +/* -------------------------------------------------------------------------- */ + +static void +nvmm_io_callback(struct nvmm_io *io) +{ + MemTxAttrs attrs = { 0 }; + int ret; + + ret = address_space_rw(&address_space_io, io->port, attrs, io->data, + io->size, !io->in); + if (ret != MEMTX_OK) { + error_report("NVMM: I/O Transaction Failed " + "[%s, port=%u, size=%zu]", (io->in ? "in" : "out"), + io->port, io->size); + } + + /* Needed, otherwise infinite loop. */ + current_cpu->vcpu_dirty = false; +} + +static void +nvmm_mem_callback(struct nvmm_mem *mem) +{ + cpu_physical_memory_rw(mem->gpa, mem->data, mem->size, mem->write); + + /* Needed, otherwise infinite loop. */ + current_cpu->vcpu_dirty = false; +} + +static struct nvmm_assist_callbacks nvmm_callbacks = { + .io = nvmm_io_callback, + .mem = nvmm_mem_callback +}; + +/* -------------------------------------------------------------------------- */ + +static int +nvmm_handle_mem(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu) +{ + int ret; + + ret = nvmm_assist_mem(mach, vcpu); + if (ret == -1) { + error_report("NVMM: Mem Assist Failed [gpa=%p]", + (void *)vcpu->exit->u.mem.gpa); + } + + return ret; +} + +static int +nvmm_handle_io(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu) +{ + int ret; + + ret = nvmm_assist_io(mach, vcpu); + if (ret == -1) { + error_report("NVMM: I/O Assist Failed [port=%d]", + (int)vcpu->exit->u.io.port); + } + + return ret; +} + +static int +nvmm_handle_rdmsr(struct nvmm_machine *mach, CPUState *cpu, + struct nvmm_vcpu_exit *exit) +{ + struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + struct nvmm_vcpu *vcpu = &qcpu->vcpu; + X86CPU *x86_cpu = X86_CPU(cpu); + struct nvmm_x64_state *state = vcpu->state; + uint64_t val; + int ret; + + switch (exit->u.rdmsr.msr) { + case MSR_IA32_APICBASE: + val = cpu_get_apic_base(x86_cpu->apic_state); + break; + case MSR_MTRRcap: + case MSR_MTRRdefType: + case MSR_MCG_CAP: + case MSR_MCG_STATUS: + val = 0; + break; + default: /* More MSRs to add? */ + val = 0; + error_report("NVMM: Unexpected RDMSR 0x%x, ignored", + exit->u.rdmsr.msr); + break; + } + + ret = nvmm_vcpu_getstate(mach, vcpu, NVMM_X64_STATE_GPRS); + if (ret == -1) { + return -1; + } + + state->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF); + state->gprs[NVMM_X64_GPR_RDX] = (val >> 32); + state->gprs[NVMM_X64_GPR_RIP] = exit->u.rdmsr.npc; + + ret = nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_GPRS); + if (ret == -1) { + return -1; + } + + return 0; +} + +static int +nvmm_handle_wrmsr(struct nvmm_machine *mach, CPUState *cpu, + struct nvmm_vcpu_exit *exit) +{ + struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + struct nvmm_vcpu *vcpu = &qcpu->vcpu; + X86CPU *x86_cpu = X86_CPU(cpu); + struct nvmm_x64_state *state = vcpu->state; + uint64_t val; + int ret; + + val = exit->u.wrmsr.val; + + switch (exit->u.wrmsr.msr) { + case MSR_IA32_APICBASE: + cpu_set_apic_base(x86_cpu->apic_state, val); + break; + case MSR_MTRRdefType: + case MSR_MCG_STATUS: + break; + default: /* More MSRs to add? */ + error_report("NVMM: Unexpected WRMSR 0x%x [val=0x%lx], ignored", + exit->u.wrmsr.msr, val); + break; + } + + ret = nvmm_vcpu_getstate(mach, vcpu, NVMM_X64_STATE_GPRS); + if (ret == -1) { + return -1; + } + + state->gprs[NVMM_X64_GPR_RIP] = exit->u.wrmsr.npc; + + ret = nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_GPRS); + if (ret == -1) { + return -1; + } + + return 0; +} + +static int +nvmm_handle_halted(struct nvmm_machine *mach, CPUState *cpu, + struct nvmm_vcpu_exit *exit) +{ + struct CPUX86State *env = (CPUArchState *)cpu->env_ptr; + int ret = 0; + + qemu_mutex_lock_iothread(); + + if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) && + (env->eflags & IF_MASK)) && + !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) { + cpu->exception_index = EXCP_HLT; + cpu->halted = true; + ret = 1; + } + + qemu_mutex_unlock_iothread(); + + return ret; +} + +static int +nvmm_inject_ud(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu) +{ + struct nvmm_vcpu_event *event = vcpu->event; + + event->type = NVMM_VCPU_EVENT_EXCP; + event->vector = 6; + event->u.excp.error = 0; + + return nvmm_vcpu_inject(mach, vcpu); +} + +static int +nvmm_vcpu_loop(CPUState *cpu) +{ + struct CPUX86State *env = (CPUArchState *)cpu->env_ptr; + struct nvmm_machine *mach = get_nvmm_mach(); + struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + struct nvmm_vcpu *vcpu = &qcpu->vcpu; + X86CPU *x86_cpu = X86_CPU(cpu); + struct nvmm_vcpu_exit *exit = vcpu->exit; + int ret; + + /* + * Some asynchronous events must be handled outside of the inner + * VCPU loop. They are handled here. + */ + if (cpu->interrupt_request & CPU_INTERRUPT_INIT) { + nvmm_cpu_synchronize_state(cpu); + do_cpu_init(x86_cpu); + /* set int/nmi windows back to the reset state */ + } + if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { + cpu->interrupt_request &= ~CPU_INTERRUPT_POLL; + apic_poll_irq(x86_cpu->apic_state); + } + if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) && + (env->eflags & IF_MASK)) || + (cpu->interrupt_request & CPU_INTERRUPT_NMI)) { + cpu->halted = false; + } + if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) { + nvmm_cpu_synchronize_state(cpu); + do_cpu_sipi(x86_cpu); + } + if (cpu->interrupt_request & CPU_INTERRUPT_TPR) { + cpu->interrupt_request &= ~CPU_INTERRUPT_TPR; + nvmm_cpu_synchronize_state(cpu); + apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip, + env->tpr_access_type); + } + + if (cpu->halted) { + cpu->exception_index = EXCP_HLT; + qatomic_set(&cpu->exit_request, false); + return 0; + } + + qemu_mutex_unlock_iothread(); + cpu_exec_start(cpu); + + /* + * Inner VCPU loop. + */ + do { + if (cpu->vcpu_dirty) { + nvmm_set_registers(cpu); + cpu->vcpu_dirty = false; + } + + if (qcpu->stop) { + cpu->exception_index = EXCP_INTERRUPT; + qcpu->stop = false; + ret = 1; + break; + } + + nvmm_vcpu_pre_run(cpu); + + if (qatomic_read(&cpu->exit_request)) { + nvmm_vcpu_stop(vcpu); + } + + /* Read exit_request before the kernel reads the immediate exit flag */ + smp_rmb(); + ret = nvmm_vcpu_run(mach, vcpu); + if (ret == -1) { + error_report("NVMM: Failed to exec a virtual processor," + " error=%d", errno); + break; + } + + nvmm_vcpu_post_run(cpu, exit); + + switch (exit->reason) { + case NVMM_VCPU_EXIT_NONE: + break; + case NVMM_VCPU_EXIT_STOPPED: + /* + * The kernel cleared the immediate exit flag; cpu->exit_request + * must be cleared after + */ + smp_wmb(); + qcpu->stop = true; + break; + case NVMM_VCPU_EXIT_MEMORY: + ret = nvmm_handle_mem(mach, vcpu); + break; + case NVMM_VCPU_EXIT_IO: + ret = nvmm_handle_io(mach, vcpu); + break; + case NVMM_VCPU_EXIT_INT_READY: + case NVMM_VCPU_EXIT_NMI_READY: + case NVMM_VCPU_EXIT_TPR_CHANGED: + break; + case NVMM_VCPU_EXIT_HALTED: + ret = nvmm_handle_halted(mach, cpu, exit); + break; + case NVMM_VCPU_EXIT_SHUTDOWN: + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + cpu->exception_index = EXCP_INTERRUPT; + ret = 1; + break; + case NVMM_VCPU_EXIT_RDMSR: + ret = nvmm_handle_rdmsr(mach, cpu, exit); + break; + case NVMM_VCPU_EXIT_WRMSR: + ret = nvmm_handle_wrmsr(mach, cpu, exit); + break; + case NVMM_VCPU_EXIT_MONITOR: + case NVMM_VCPU_EXIT_MWAIT: + ret = nvmm_inject_ud(mach, vcpu); + break; + default: + error_report("NVMM: Unexpected VM exit code 0x%lx [hw=0x%lx]", + exit->reason, exit->u.inv.hwcode); + nvmm_get_registers(cpu); + qemu_mutex_lock_iothread(); + qemu_system_guest_panicked(cpu_get_crash_info(cpu)); + qemu_mutex_unlock_iothread(); + ret = -1; + break; + } + } while (ret == 0); + + cpu_exec_end(cpu); + qemu_mutex_lock_iothread(); + + qatomic_set(&cpu->exit_request, false); + + return ret < 0; +} + +/* -------------------------------------------------------------------------- */ + +static void +do_nvmm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg) +{ + nvmm_get_registers(cpu); + cpu->vcpu_dirty = true; +} + +static void +do_nvmm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg) +{ + nvmm_set_registers(cpu); + cpu->vcpu_dirty = false; +} + +static void +do_nvmm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg) +{ + nvmm_set_registers(cpu); + cpu->vcpu_dirty = false; +} + +static void +do_nvmm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg) +{ + cpu->vcpu_dirty = true; +} + +void nvmm_cpu_synchronize_state(CPUState *cpu) +{ + if (!cpu->vcpu_dirty) { + run_on_cpu(cpu, do_nvmm_cpu_synchronize_state, RUN_ON_CPU_NULL); + } +} + +void nvmm_cpu_synchronize_post_reset(CPUState *cpu) +{ + run_on_cpu(cpu, do_nvmm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL); +} + +void nvmm_cpu_synchronize_post_init(CPUState *cpu) +{ + run_on_cpu(cpu, do_nvmm_cpu_synchronize_post_init, RUN_ON_CPU_NULL); +} + +void nvmm_cpu_synchronize_pre_loadvm(CPUState *cpu) +{ + run_on_cpu(cpu, do_nvmm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL); +} + +/* -------------------------------------------------------------------------- */ + +static Error *nvmm_migration_blocker; + +/* + * The nvmm_vcpu_stop() mechanism breaks races between entering the VMM + * and another thread signaling the vCPU thread to exit. + */ + +static void +nvmm_ipi_signal(int sigcpu) +{ + if (current_cpu) { + struct qemu_vcpu *qcpu = get_qemu_vcpu(current_cpu); + struct nvmm_vcpu *vcpu = &qcpu->vcpu; + nvmm_vcpu_stop(vcpu); + } +} + +static void +nvmm_init_cpu_signals(void) +{ + struct sigaction sigact; + sigset_t set; + + /* Install the IPI handler. */ + memset(&sigact, 0, sizeof(sigact)); + sigact.sa_handler = nvmm_ipi_signal; + sigaction(SIG_IPI, &sigact, NULL); + + /* Allow IPIs on the current thread. */ + sigprocmask(SIG_BLOCK, NULL, &set); + sigdelset(&set, SIG_IPI); + pthread_sigmask(SIG_SETMASK, &set, NULL); +} + +int +nvmm_init_vcpu(CPUState *cpu) +{ + struct nvmm_machine *mach = get_nvmm_mach(); + struct nvmm_vcpu_conf_cpuid cpuid; + struct nvmm_vcpu_conf_tpr tpr; + Error *local_error = NULL; + struct qemu_vcpu *qcpu; + int ret, err; + + nvmm_init_cpu_signals(); + + if (nvmm_migration_blocker == NULL) { + error_setg(&nvmm_migration_blocker, + "NVMM: Migration not supported"); + + (void)migrate_add_blocker(nvmm_migration_blocker, &local_error); + if (local_error) { + error_report_err(local_error); + migrate_del_blocker(nvmm_migration_blocker); + error_free(nvmm_migration_blocker); + return -EINVAL; + } + } + + qcpu = g_malloc0(sizeof(*qcpu)); + if (qcpu == NULL) { + error_report("NVMM: Failed to allocate VCPU context."); + return -ENOMEM; + } + + ret = nvmm_vcpu_create(mach, cpu->cpu_index, &qcpu->vcpu); + if (ret == -1) { + err = errno; + error_report("NVMM: Failed to create a virtual processor," + " error=%d", err); + g_free(qcpu); + return -err; + } + + memset(&cpuid, 0, sizeof(cpuid)); + cpuid.mask = 1; + cpuid.leaf = 0x00000001; + cpuid.u.mask.set.edx = CPUID_MCE | CPUID_MCA | CPUID_MTRR; + ret = nvmm_vcpu_configure(mach, &qcpu->vcpu, NVMM_VCPU_CONF_CPUID, + &cpuid); + if (ret == -1) { + err = errno; + error_report("NVMM: Failed to configure a virtual processor," + " error=%d", err); + g_free(qcpu); + return -err; + } + + ret = nvmm_vcpu_configure(mach, &qcpu->vcpu, NVMM_VCPU_CONF_CALLBACKS, + &nvmm_callbacks); + if (ret == -1) { + err = errno; + error_report("NVMM: Failed to configure a virtual processor," + " error=%d", err); + g_free(qcpu); + return -err; + } + + if (qemu_mach.cap.arch.vcpu_conf_support & NVMM_CAP_ARCH_VCPU_CONF_TPR) { + memset(&tpr, 0, sizeof(tpr)); + tpr.exit_changed = 1; + ret = nvmm_vcpu_configure(mach, &qcpu->vcpu, NVMM_VCPU_CONF_TPR, &tpr); + if (ret == -1) { + err = errno; + error_report("NVMM: Failed to configure a virtual processor," + " error=%d", err); + g_free(qcpu); + return -err; + } + } + + cpu->vcpu_dirty = true; + cpu->hax_vcpu = (struct hax_vcpu_state *)qcpu; + + return 0; +} + +int +nvmm_vcpu_exec(CPUState *cpu) +{ + int ret, fatal; + + while (1) { + if (cpu->exception_index >= EXCP_INTERRUPT) { + ret = cpu->exception_index; + cpu->exception_index = -1; + break; + } + + fatal = nvmm_vcpu_loop(cpu); + + if (fatal) { + error_report("NVMM: Failed to execute a VCPU."); + abort(); + } + } + + return ret; +} + +void +nvmm_destroy_vcpu(CPUState *cpu) +{ + struct nvmm_machine *mach = get_nvmm_mach(); + struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + + nvmm_vcpu_destroy(mach, &qcpu->vcpu); + g_free(cpu->hax_vcpu); +} + +/* -------------------------------------------------------------------------- */ + +static void +nvmm_update_mapping(hwaddr start_pa, ram_addr_t size, uintptr_t hva, + bool add, bool rom, const char *name) +{ + struct nvmm_machine *mach = get_nvmm_mach(); + int ret, prot; + + if (add) { + prot = PROT_READ | PROT_EXEC; + if (!rom) { + prot |= PROT_WRITE; + } + ret = nvmm_gpa_map(mach, hva, start_pa, size, prot); + } else { + ret = nvmm_gpa_unmap(mach, hva, start_pa, size); + } + + if (ret == -1) { + error_report("NVMM: Failed to %s GPA range '%s' PA:%p, " + "Size:%p bytes, HostVA:%p, error=%d", + (add ? "map" : "unmap"), name, (void *)(uintptr_t)start_pa, + (void *)size, (void *)hva, errno); + } +} + +static void +nvmm_process_section(MemoryRegionSection *section, int add) +{ + MemoryRegion *mr = section->mr; + hwaddr start_pa = section->offset_within_address_space; + ram_addr_t size = int128_get64(section->size); + unsigned int delta; + uintptr_t hva; + + if (!memory_region_is_ram(mr)) { + return; + } + + /* Adjust start_pa and size so that they are page-aligned. */ + delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask); + delta &= ~qemu_real_host_page_mask; + if (delta > size) { + return; + } + start_pa += delta; + size -= delta; + size &= qemu_real_host_page_mask; + if (!size || (start_pa & ~qemu_real_host_page_mask)) { + return; + } + + hva = (uintptr_t)memory_region_get_ram_ptr(mr) + + section->offset_within_region + delta; + + nvmm_update_mapping(start_pa, size, hva, add, + memory_region_is_rom(mr), mr->name); +} + +static void +nvmm_region_add(MemoryListener *listener, MemoryRegionSection *section) +{ + memory_region_ref(section->mr); + nvmm_process_section(section, 1); +} + +static void +nvmm_region_del(MemoryListener *listener, MemoryRegionSection *section) +{ + nvmm_process_section(section, 0); + memory_region_unref(section->mr); +} + +static void +nvmm_transaction_begin(MemoryListener *listener) +{ + /* nothing */ +} + +static void +nvmm_transaction_commit(MemoryListener *listener) +{ + /* nothing */ +} + +static void +nvmm_log_sync(MemoryListener *listener, MemoryRegionSection *section) +{ + MemoryRegion *mr = section->mr; + + if (!memory_region_is_ram(mr)) { + return; + } + + memory_region_set_dirty(mr, 0, int128_get64(section->size)); +} + +static MemoryListener nvmm_memory_listener = { + .begin = nvmm_transaction_begin, + .commit = nvmm_transaction_commit, + .region_add = nvmm_region_add, + .region_del = nvmm_region_del, + .log_sync = nvmm_log_sync, + .priority = 10, +}; + +static void +nvmm_ram_block_added(RAMBlockNotifier *n, void *host, size_t size) +{ + struct nvmm_machine *mach = get_nvmm_mach(); + uintptr_t hva = (uintptr_t)host; + int ret; + + ret = nvmm_hva_map(mach, hva, size); + + if (ret == -1) { + error_report("NVMM: Failed to map HVA, HostVA:%p " + "Size:%p bytes, error=%d", + (void *)hva, (void *)size, errno); + } +} + +static struct RAMBlockNotifier nvmm_ram_notifier = { + .ram_block_added = nvmm_ram_block_added +}; + +/* -------------------------------------------------------------------------- */ + +static int +nvmm_accel_init(MachineState *ms) +{ + int ret, err; + + ret = nvmm_init(); + if (ret == -1) { + err = errno; + error_report("NVMM: Initialization failed, error=%d", errno); + return -err; + } + + ret = nvmm_capability(&qemu_mach.cap); + if (ret == -1) { + err = errno; + error_report("NVMM: Unable to fetch capability, error=%d", errno); + return -err; + } + if (qemu_mach.cap.version < NVMM_KERN_VERSION) { + error_report("NVMM: Unsupported version %u", qemu_mach.cap.version); + return -EPROGMISMATCH; + } + if (qemu_mach.cap.state_size != sizeof(struct nvmm_x64_state)) { + error_report("NVMM: Wrong state size %u", qemu_mach.cap.state_size); + return -EPROGMISMATCH; + } + + ret = nvmm_machine_create(&qemu_mach.mach); + if (ret == -1) { + err = errno; + error_report("NVMM: Machine creation failed, error=%d", errno); + return -err; + } + + memory_listener_register(&nvmm_memory_listener, &address_space_memory); + ram_block_notifier_add(&nvmm_ram_notifier); + + printf("NetBSD Virtual Machine Monitor accelerator is operational\n"); + return 0; +} + +int +nvmm_enabled(void) +{ + return nvmm_allowed; +} + +static void +nvmm_accel_class_init(ObjectClass *oc, void *data) +{ + AccelClass *ac = ACCEL_CLASS(oc); + ac->name = "NVMM"; + ac->init_machine = nvmm_accel_init; + ac->allowed = &nvmm_allowed; +} + +static const TypeInfo nvmm_accel_type = { + .name = ACCEL_CLASS_NAME("nvmm"), + .parent = TYPE_ACCEL, + .class_init = nvmm_accel_class_init, +}; + +static void +nvmm_type_init(void) +{ + type_register_static(&nvmm_accel_type); +} + +type_init(nvmm_type_init); diff --git a/target/i386/sev.c b/target/i386/sev.c index 72b9e2ab40..41f7800b5f 100644 --- a/target/i386/sev.c +++ b/target/i386/sev.c @@ -30,7 +30,6 @@ #include "trace.h" #include "migration/blocker.h" #include "qom/object.h" -#include "exec/address-spaces.h" #include "monitor/monitor.h" #include "exec/confidential-guest-support.h" #include "hw/i386/pc.h" @@ -181,7 +180,8 @@ sev_set_guest_state(SevGuestState *sev, SevState new_state) } static void -sev_ram_block_added(RAMBlockNotifier *n, void *host, size_t size) +sev_ram_block_added(RAMBlockNotifier *n, void *host, size_t size, + size_t max_size) { int r; struct kvm_enc_region range; @@ -198,19 +198,20 @@ sev_ram_block_added(RAMBlockNotifier *n, void *host, size_t size) } range.addr = (__u64)(unsigned long)host; - range.size = size; + range.size = max_size; - trace_kvm_memcrypt_register_region(host, size); + trace_kvm_memcrypt_register_region(host, max_size); r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_REG_REGION, &range); if (r) { error_report("%s: failed to register region (%p+%#zx) error '%s'", - __func__, host, size, strerror(errno)); + __func__, host, max_size, strerror(errno)); exit(1); } } static void -sev_ram_block_removed(RAMBlockNotifier *n, void *host, size_t size) +sev_ram_block_removed(RAMBlockNotifier *n, void *host, size_t size, + size_t max_size) { int r; struct kvm_enc_region range; @@ -227,13 +228,13 @@ sev_ram_block_removed(RAMBlockNotifier *n, void *host, size_t size) } range.addr = (__u64)(unsigned long)host; - range.size = size; + range.size = max_size; - trace_kvm_memcrypt_unregister_region(host, size); + trace_kvm_memcrypt_unregister_region(host, max_size); r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_UNREG_REGION, &range); if (r) { error_report("%s: failed to unregister region (%p+%#zx)", - __func__, host, size); + __func__, host, max_size); } } diff --git a/target/i386/svm.h b/target/i386/svm.h index ae30fc6f79..87965e5bc2 100644 --- a/target/i386/svm.h +++ b/target/i386/svm.h @@ -132,16 +132,6 @@ #define SVM_NPT_ENABLED (1 << 0) -#define SVM_NPT_PAE (1 << 0) -#define SVM_NPT_LMA (1 << 1) -#define SVM_NPT_NXE (1 << 2) -#define SVM_NPT_PSE (1 << 3) - -#define SVM_NPTEXIT_P (1ULL << 0) -#define SVM_NPTEXIT_RW (1ULL << 1) -#define SVM_NPTEXIT_US (1ULL << 2) -#define SVM_NPTEXIT_RSVD (1ULL << 3) -#define SVM_NPTEXIT_ID (1ULL << 4) #define SVM_NPTEXIT_GPA (1ULL << 32) #define SVM_NPTEXIT_GPT (1ULL << 33) diff --git a/target/i386/tcg/bpt_helper.c b/target/i386/tcg/bpt_helper.c index 979230ac12..83cd89581e 100644 --- a/target/i386/tcg/bpt_helper.c +++ b/target/i386/tcg/bpt_helper.c @@ -19,224 +19,10 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" #include "helper-tcg.h" - -#ifndef CONFIG_USER_ONLY -static inline bool hw_local_breakpoint_enabled(unsigned long dr7, int index) -{ - return (dr7 >> (index * 2)) & 1; -} - -static inline bool hw_global_breakpoint_enabled(unsigned long dr7, int index) -{ - return (dr7 >> (index * 2)) & 2; - -} -static inline bool hw_breakpoint_enabled(unsigned long dr7, int index) -{ - return hw_global_breakpoint_enabled(dr7, index) || - hw_local_breakpoint_enabled(dr7, index); -} - -static inline int hw_breakpoint_type(unsigned long dr7, int index) -{ - return (dr7 >> (DR7_TYPE_SHIFT + (index * 4))) & 3; -} - -static inline int hw_breakpoint_len(unsigned long dr7, int index) -{ - int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 4))) & 3); - return (len == 2) ? 8 : len + 1; -} - -static int hw_breakpoint_insert(CPUX86State *env, int index) -{ - CPUState *cs = env_cpu(env); - target_ulong dr7 = env->dr[7]; - target_ulong drN = env->dr[index]; - int err = 0; - - switch (hw_breakpoint_type(dr7, index)) { - case DR7_TYPE_BP_INST: - if (hw_breakpoint_enabled(dr7, index)) { - err = cpu_breakpoint_insert(cs, drN, BP_CPU, - &env->cpu_breakpoint[index]); - } - break; - - case DR7_TYPE_IO_RW: - /* Notice when we should enable calls to bpt_io. */ - return hw_breakpoint_enabled(env->dr[7], index) - ? HF_IOBPT_MASK : 0; - - case DR7_TYPE_DATA_WR: - if (hw_breakpoint_enabled(dr7, index)) { - err = cpu_watchpoint_insert(cs, drN, - hw_breakpoint_len(dr7, index), - BP_CPU | BP_MEM_WRITE, - &env->cpu_watchpoint[index]); - } - break; - - case DR7_TYPE_DATA_RW: - if (hw_breakpoint_enabled(dr7, index)) { - err = cpu_watchpoint_insert(cs, drN, - hw_breakpoint_len(dr7, index), - BP_CPU | BP_MEM_ACCESS, - &env->cpu_watchpoint[index]); - } - break; - } - if (err) { - env->cpu_breakpoint[index] = NULL; - } - return 0; -} - -static void hw_breakpoint_remove(CPUX86State *env, int index) -{ - CPUState *cs = env_cpu(env); - - switch (hw_breakpoint_type(env->dr[7], index)) { - case DR7_TYPE_BP_INST: - if (env->cpu_breakpoint[index]) { - cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[index]); - env->cpu_breakpoint[index] = NULL; - } - break; - - case DR7_TYPE_DATA_WR: - case DR7_TYPE_DATA_RW: - if (env->cpu_breakpoint[index]) { - cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[index]); - env->cpu_breakpoint[index] = NULL; - } - break; - - case DR7_TYPE_IO_RW: - /* HF_IOBPT_MASK cleared elsewhere. */ - break; - } -} - -void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7) -{ - target_ulong old_dr7 = env->dr[7]; - int iobpt = 0; - int i; - - new_dr7 |= DR7_FIXED_1; - - /* If nothing is changing except the global/local enable bits, - then we can make the change more efficient. */ - if (((old_dr7 ^ new_dr7) & ~0xff) == 0) { - /* Fold the global and local enable bits together into the - global fields, then xor to show which registers have - changed collective enable state. */ - int mod = ((old_dr7 | old_dr7 * 2) ^ (new_dr7 | new_dr7 * 2)) & 0xff; - - for (i = 0; i < DR7_MAX_BP; i++) { - if ((mod & (2 << i * 2)) && !hw_breakpoint_enabled(new_dr7, i)) { - hw_breakpoint_remove(env, i); - } - } - env->dr[7] = new_dr7; - for (i = 0; i < DR7_MAX_BP; i++) { - if (mod & (2 << i * 2) && hw_breakpoint_enabled(new_dr7, i)) { - iobpt |= hw_breakpoint_insert(env, i); - } else if (hw_breakpoint_type(new_dr7, i) == DR7_TYPE_IO_RW - && hw_breakpoint_enabled(new_dr7, i)) { - iobpt |= HF_IOBPT_MASK; - } - } - } else { - for (i = 0; i < DR7_MAX_BP; i++) { - hw_breakpoint_remove(env, i); - } - env->dr[7] = new_dr7; - for (i = 0; i < DR7_MAX_BP; i++) { - iobpt |= hw_breakpoint_insert(env, i); - } - } - - env->hflags = (env->hflags & ~HF_IOBPT_MASK) | iobpt; -} - -static bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update) -{ - target_ulong dr6; - int reg; - bool hit_enabled = false; - - dr6 = env->dr[6] & ~0xf; - for (reg = 0; reg < DR7_MAX_BP; reg++) { - bool bp_match = false; - bool wp_match = false; - - switch (hw_breakpoint_type(env->dr[7], reg)) { - case DR7_TYPE_BP_INST: - if (env->dr[reg] == env->eip) { - bp_match = true; - } - break; - case DR7_TYPE_DATA_WR: - case DR7_TYPE_DATA_RW: - if (env->cpu_watchpoint[reg] && - env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) { - wp_match = true; - } - break; - case DR7_TYPE_IO_RW: - break; - } - if (bp_match || wp_match) { - dr6 |= 1 << reg; - if (hw_breakpoint_enabled(env->dr[7], reg)) { - hit_enabled = true; - } - } - } - - if (hit_enabled || force_dr6_update) { - env->dr[6] = dr6; - } - - return hit_enabled; -} - -void breakpoint_handler(CPUState *cs) -{ - X86CPU *cpu = X86_CPU(cs); - CPUX86State *env = &cpu->env; - CPUBreakpoint *bp; - - if (cs->watchpoint_hit) { - if (cs->watchpoint_hit->flags & BP_CPU) { - cs->watchpoint_hit = NULL; - if (check_hw_breakpoints(env, false)) { - raise_exception(env, EXCP01_DB); - } else { - cpu_loop_exit_noexc(cs); - } - } - } else { - QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { - if (bp->pc == env->eip) { - if (bp->flags & BP_CPU) { - check_hw_breakpoints(env, true); - raise_exception(env, EXCP01_DB); - } - break; - } - } - } -} -#endif - -void helper_single_step(CPUX86State *env) +void QEMU_NORETURN helper_single_step(CPUX86State *env) { #ifndef CONFIG_USER_ONLY check_hw_breakpoints(env, true); @@ -252,41 +38,6 @@ void helper_rechecking_single_step(CPUX86State *env) } } -void helper_set_dr(CPUX86State *env, int reg, target_ulong t0) -{ -#ifndef CONFIG_USER_ONLY - switch (reg) { - case 0: case 1: case 2: case 3: - if (hw_breakpoint_enabled(env->dr[7], reg) - && hw_breakpoint_type(env->dr[7], reg) != DR7_TYPE_IO_RW) { - hw_breakpoint_remove(env, reg); - env->dr[reg] = t0; - hw_breakpoint_insert(env, reg); - } else { - env->dr[reg] = t0; - } - return; - case 4: - if (env->cr[4] & CR4_DE_MASK) { - break; - } - /* fallthru */ - case 6: - env->dr[6] = t0 | DR6_FIXED_1; - return; - case 5: - if (env->cr[4] & CR4_DE_MASK) { - break; - } - /* fallthru */ - case 7: - cpu_x86_update_dr7(env, t0); - return; - } - raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); -#endif -} - target_ulong helper_get_dr(CPUX86State *env, int reg) { switch (reg) { @@ -307,30 +58,3 @@ target_ulong helper_get_dr(CPUX86State *env, int reg) } raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); } - -/* Check if Port I/O is trapped by a breakpoint. */ -void helper_bpt_io(CPUX86State *env, uint32_t port, - uint32_t size, target_ulong next_eip) -{ -#ifndef CONFIG_USER_ONLY - target_ulong dr7 = env->dr[7]; - int i, hit = 0; - - for (i = 0; i < DR7_MAX_BP; ++i) { - if (hw_breakpoint_type(dr7, i) == DR7_TYPE_IO_RW - && hw_breakpoint_enabled(dr7, i)) { - int bpt_len = hw_breakpoint_len(dr7, i); - if (port + size - 1 >= env->dr[i] - && port <= env->dr[i] + bpt_len - 1) { - hit |= 1 << i; - } - } - } - - if (hit) { - env->dr[6] = (env->dr[6] & ~0xf) | hit; - env->eip = next_eip; - raise_exception(env, EXCP01_DB); - } -#endif -} diff --git a/target/i386/tcg/excp_helper.c b/target/i386/tcg/excp_helper.c index 1e71e44510..bdae887d0a 100644 --- a/target/i386/tcg/excp_helper.c +++ b/target/i386/tcg/excp_helper.c @@ -25,12 +25,13 @@ #include "exec/helper-proto.h" #include "helper-tcg.h" -void helper_raise_interrupt(CPUX86State *env, int intno, int next_eip_addend) +void QEMU_NORETURN helper_raise_interrupt(CPUX86State *env, int intno, + int next_eip_addend) { raise_interrupt(env, intno, 1, 0, next_eip_addend); } -void helper_raise_exception(CPUX86State *env, int exception_index) +void QEMU_NORETURN helper_raise_exception(CPUX86State *env, int exception_index) { raise_exception(env, exception_index); } @@ -116,597 +117,25 @@ void QEMU_NORETURN raise_interrupt(CPUX86State *env, int intno, int is_int, raise_interrupt2(env, intno, is_int, error_code, next_eip_addend, 0); } -void raise_exception_err(CPUX86State *env, int exception_index, - int error_code) +void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index, + int error_code) { raise_interrupt2(env, exception_index, 0, error_code, 0, 0); } -void raise_exception_err_ra(CPUX86State *env, int exception_index, - int error_code, uintptr_t retaddr) +void QEMU_NORETURN raise_exception_err_ra(CPUX86State *env, int exception_index, + int error_code, uintptr_t retaddr) { raise_interrupt2(env, exception_index, 0, error_code, 0, retaddr); } -void raise_exception(CPUX86State *env, int exception_index) +void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index) { raise_interrupt2(env, exception_index, 0, 0, 0, 0); } -void raise_exception_ra(CPUX86State *env, int exception_index, uintptr_t retaddr) +void QEMU_NORETURN raise_exception_ra(CPUX86State *env, int exception_index, + uintptr_t retaddr) { raise_interrupt2(env, exception_index, 0, 0, 0, retaddr); } - -#if !defined(CONFIG_USER_ONLY) -static hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type, - int *prot) -{ - X86CPU *cpu = X86_CPU(cs); - CPUX86State *env = &cpu->env; - uint64_t rsvd_mask = PG_ADDRESS_MASK & ~MAKE_64BIT_MASK(0, cpu->phys_bits); - uint64_t ptep, pte; - uint64_t exit_info_1 = 0; - target_ulong pde_addr, pte_addr; - uint32_t page_offset; - int page_size; - - if (likely(!(env->hflags2 & HF2_NPT_MASK))) { - return gphys; - } - - if (!(env->nested_pg_mode & SVM_NPT_NXE)) { - rsvd_mask |= PG_NX_MASK; - } - - if (env->nested_pg_mode & SVM_NPT_PAE) { - uint64_t pde, pdpe; - target_ulong pdpe_addr; - -#ifdef TARGET_X86_64 - if (env->nested_pg_mode & SVM_NPT_LMA) { - uint64_t pml5e; - uint64_t pml4e_addr, pml4e; - - pml5e = env->nested_cr3; - ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; - - pml4e_addr = (pml5e & PG_ADDRESS_MASK) + - (((gphys >> 39) & 0x1ff) << 3); - pml4e = x86_ldq_phys(cs, pml4e_addr); - if (!(pml4e & PG_PRESENT_MASK)) { - goto do_fault; - } - if (pml4e & (rsvd_mask | PG_PSE_MASK)) { - goto do_fault_rsvd; - } - if (!(pml4e & PG_ACCESSED_MASK)) { - pml4e |= PG_ACCESSED_MASK; - x86_stl_phys_notdirty(cs, pml4e_addr, pml4e); - } - ptep &= pml4e ^ PG_NX_MASK; - pdpe_addr = (pml4e & PG_ADDRESS_MASK) + - (((gphys >> 30) & 0x1ff) << 3); - pdpe = x86_ldq_phys(cs, pdpe_addr); - if (!(pdpe & PG_PRESENT_MASK)) { - goto do_fault; - } - if (pdpe & rsvd_mask) { - goto do_fault_rsvd; - } - ptep &= pdpe ^ PG_NX_MASK; - if (!(pdpe & PG_ACCESSED_MASK)) { - pdpe |= PG_ACCESSED_MASK; - x86_stl_phys_notdirty(cs, pdpe_addr, pdpe); - } - if (pdpe & PG_PSE_MASK) { - /* 1 GB page */ - page_size = 1024 * 1024 * 1024; - pte_addr = pdpe_addr; - pte = pdpe; - goto do_check_protect; - } - } else -#endif - { - pdpe_addr = (env->nested_cr3 & ~0x1f) + ((gphys >> 27) & 0x18); - pdpe = x86_ldq_phys(cs, pdpe_addr); - if (!(pdpe & PG_PRESENT_MASK)) { - goto do_fault; - } - rsvd_mask |= PG_HI_USER_MASK; - if (pdpe & (rsvd_mask | PG_NX_MASK)) { - goto do_fault_rsvd; - } - ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; - } - - pde_addr = (pdpe & PG_ADDRESS_MASK) + (((gphys >> 21) & 0x1ff) << 3); - pde = x86_ldq_phys(cs, pde_addr); - if (!(pde & PG_PRESENT_MASK)) { - goto do_fault; - } - if (pde & rsvd_mask) { - goto do_fault_rsvd; - } - ptep &= pde ^ PG_NX_MASK; - if (pde & PG_PSE_MASK) { - /* 2 MB page */ - page_size = 2048 * 1024; - pte_addr = pde_addr; - pte = pde; - goto do_check_protect; - } - /* 4 KB page */ - if (!(pde & PG_ACCESSED_MASK)) { - pde |= PG_ACCESSED_MASK; - x86_stl_phys_notdirty(cs, pde_addr, pde); - } - pte_addr = (pde & PG_ADDRESS_MASK) + (((gphys >> 12) & 0x1ff) << 3); - pte = x86_ldq_phys(cs, pte_addr); - if (!(pte & PG_PRESENT_MASK)) { - goto do_fault; - } - if (pte & rsvd_mask) { - goto do_fault_rsvd; - } - /* combine pde and pte nx, user and rw protections */ - ptep &= pte ^ PG_NX_MASK; - page_size = 4096; - } else { - uint32_t pde; - - /* page directory entry */ - pde_addr = (env->nested_cr3 & ~0xfff) + ((gphys >> 20) & 0xffc); - pde = x86_ldl_phys(cs, pde_addr); - if (!(pde & PG_PRESENT_MASK)) { - goto do_fault; - } - ptep = pde | PG_NX_MASK; - - /* if host cr4 PSE bit is set, then we use a 4MB page */ - if ((pde & PG_PSE_MASK) && (env->nested_pg_mode & SVM_NPT_PSE)) { - page_size = 4096 * 1024; - pte_addr = pde_addr; - - /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved. - * Leave bits 20-13 in place for setting accessed/dirty bits below. - */ - pte = pde | ((pde & 0x1fe000LL) << (32 - 13)); - rsvd_mask = 0x200000; - goto do_check_protect_pse36; - } - - if (!(pde & PG_ACCESSED_MASK)) { - pde |= PG_ACCESSED_MASK; - x86_stl_phys_notdirty(cs, pde_addr, pde); - } - - /* page directory entry */ - pte_addr = (pde & ~0xfff) + ((gphys >> 10) & 0xffc); - pte = x86_ldl_phys(cs, pte_addr); - if (!(pte & PG_PRESENT_MASK)) { - goto do_fault; - } - /* combine pde and pte user and rw protections */ - ptep &= pte | PG_NX_MASK; - page_size = 4096; - rsvd_mask = 0; - } - - do_check_protect: - rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK; - do_check_protect_pse36: - if (pte & rsvd_mask) { - goto do_fault_rsvd; - } - ptep ^= PG_NX_MASK; - - if (!(ptep & PG_USER_MASK)) { - goto do_fault_protect; - } - if (ptep & PG_NX_MASK) { - if (access_type == MMU_INST_FETCH) { - goto do_fault_protect; - } - *prot &= ~PAGE_EXEC; - } - if (!(ptep & PG_RW_MASK)) { - if (access_type == MMU_DATA_STORE) { - goto do_fault_protect; - } - *prot &= ~PAGE_WRITE; - } - - pte &= PG_ADDRESS_MASK & ~(page_size - 1); - page_offset = gphys & (page_size - 1); - return pte + page_offset; - - do_fault_rsvd: - exit_info_1 |= SVM_NPTEXIT_RSVD; - do_fault_protect: - exit_info_1 |= SVM_NPTEXIT_P; - do_fault: - x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), - gphys); - exit_info_1 |= SVM_NPTEXIT_US; - if (access_type == MMU_DATA_STORE) { - exit_info_1 |= SVM_NPTEXIT_RW; - } else if (access_type == MMU_INST_FETCH) { - exit_info_1 |= SVM_NPTEXIT_ID; - } - if (prot) { - exit_info_1 |= SVM_NPTEXIT_GPA; - } else { /* page table access */ - exit_info_1 |= SVM_NPTEXIT_GPT; - } - cpu_vmexit(env, SVM_EXIT_NPF, exit_info_1, env->retaddr); -} - -/* return value: - * -1 = cannot handle fault - * 0 = nothing more to do - * 1 = generate PF fault - */ -static int handle_mmu_fault(CPUState *cs, vaddr addr, int size, - int is_write1, int mmu_idx) -{ - X86CPU *cpu = X86_CPU(cs); - CPUX86State *env = &cpu->env; - uint64_t ptep, pte; - int32_t a20_mask; - target_ulong pde_addr, pte_addr; - int error_code = 0; - int is_dirty, prot, page_size, is_write, is_user; - hwaddr paddr; - uint64_t rsvd_mask = PG_ADDRESS_MASK & ~MAKE_64BIT_MASK(0, cpu->phys_bits); - uint32_t page_offset; - target_ulong vaddr; - uint32_t pkr; - - is_user = mmu_idx == MMU_USER_IDX; -#if defined(DEBUG_MMU) - printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n", - addr, is_write1, is_user, env->eip); -#endif - is_write = is_write1 & 1; - - a20_mask = x86_get_a20_mask(env); - if (!(env->cr[0] & CR0_PG_MASK)) { - pte = addr; -#ifdef TARGET_X86_64 - if (!(env->hflags & HF_LMA_MASK)) { - /* Without long mode we can only address 32bits in real mode */ - pte = (uint32_t)pte; - } -#endif - prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - page_size = 4096; - goto do_mapping; - } - - if (!(env->efer & MSR_EFER_NXE)) { - rsvd_mask |= PG_NX_MASK; - } - - if (env->cr[4] & CR4_PAE_MASK) { - uint64_t pde, pdpe; - target_ulong pdpe_addr; - -#ifdef TARGET_X86_64 - if (env->hflags & HF_LMA_MASK) { - bool la57 = env->cr[4] & CR4_LA57_MASK; - uint64_t pml5e_addr, pml5e; - uint64_t pml4e_addr, pml4e; - int32_t sext; - - /* test virtual address sign extension */ - sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47; - if (sext != 0 && sext != -1) { - env->error_code = 0; - cs->exception_index = EXCP0D_GPF; - return 1; - } - - if (la57) { - pml5e_addr = ((env->cr[3] & ~0xfff) + - (((addr >> 48) & 0x1ff) << 3)) & a20_mask; - pml5e_addr = get_hphys(cs, pml5e_addr, MMU_DATA_STORE, NULL); - pml5e = x86_ldq_phys(cs, pml5e_addr); - if (!(pml5e & PG_PRESENT_MASK)) { - goto do_fault; - } - if (pml5e & (rsvd_mask | PG_PSE_MASK)) { - goto do_fault_rsvd; - } - if (!(pml5e & PG_ACCESSED_MASK)) { - pml5e |= PG_ACCESSED_MASK; - x86_stl_phys_notdirty(cs, pml5e_addr, pml5e); - } - ptep = pml5e ^ PG_NX_MASK; - } else { - pml5e = env->cr[3]; - ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; - } - - pml4e_addr = ((pml5e & PG_ADDRESS_MASK) + - (((addr >> 39) & 0x1ff) << 3)) & a20_mask; - pml4e_addr = get_hphys(cs, pml4e_addr, MMU_DATA_STORE, false); - pml4e = x86_ldq_phys(cs, pml4e_addr); - if (!(pml4e & PG_PRESENT_MASK)) { - goto do_fault; - } - if (pml4e & (rsvd_mask | PG_PSE_MASK)) { - goto do_fault_rsvd; - } - if (!(pml4e & PG_ACCESSED_MASK)) { - pml4e |= PG_ACCESSED_MASK; - x86_stl_phys_notdirty(cs, pml4e_addr, pml4e); - } - ptep &= pml4e ^ PG_NX_MASK; - pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) & - a20_mask; - pdpe_addr = get_hphys(cs, pdpe_addr, MMU_DATA_STORE, NULL); - pdpe = x86_ldq_phys(cs, pdpe_addr); - if (!(pdpe & PG_PRESENT_MASK)) { - goto do_fault; - } - if (pdpe & rsvd_mask) { - goto do_fault_rsvd; - } - ptep &= pdpe ^ PG_NX_MASK; - if (!(pdpe & PG_ACCESSED_MASK)) { - pdpe |= PG_ACCESSED_MASK; - x86_stl_phys_notdirty(cs, pdpe_addr, pdpe); - } - if (pdpe & PG_PSE_MASK) { - /* 1 GB page */ - page_size = 1024 * 1024 * 1024; - pte_addr = pdpe_addr; - pte = pdpe; - goto do_check_protect; - } - } else -#endif - { - /* XXX: load them when cr3 is loaded ? */ - pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) & - a20_mask; - pdpe_addr = get_hphys(cs, pdpe_addr, MMU_DATA_STORE, false); - pdpe = x86_ldq_phys(cs, pdpe_addr); - if (!(pdpe & PG_PRESENT_MASK)) { - goto do_fault; - } - rsvd_mask |= PG_HI_USER_MASK; - if (pdpe & (rsvd_mask | PG_NX_MASK)) { - goto do_fault_rsvd; - } - ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; - } - - pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) & - a20_mask; - pde_addr = get_hphys(cs, pde_addr, MMU_DATA_STORE, NULL); - pde = x86_ldq_phys(cs, pde_addr); - if (!(pde & PG_PRESENT_MASK)) { - goto do_fault; - } - if (pde & rsvd_mask) { - goto do_fault_rsvd; - } - ptep &= pde ^ PG_NX_MASK; - if (pde & PG_PSE_MASK) { - /* 2 MB page */ - page_size = 2048 * 1024; - pte_addr = pde_addr; - pte = pde; - goto do_check_protect; - } - /* 4 KB page */ - if (!(pde & PG_ACCESSED_MASK)) { - pde |= PG_ACCESSED_MASK; - x86_stl_phys_notdirty(cs, pde_addr, pde); - } - pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) & - a20_mask; - pte_addr = get_hphys(cs, pte_addr, MMU_DATA_STORE, NULL); - pte = x86_ldq_phys(cs, pte_addr); - if (!(pte & PG_PRESENT_MASK)) { - goto do_fault; - } - if (pte & rsvd_mask) { - goto do_fault_rsvd; - } - /* combine pde and pte nx, user and rw protections */ - ptep &= pte ^ PG_NX_MASK; - page_size = 4096; - } else { - uint32_t pde; - - /* page directory entry */ - pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & - a20_mask; - pde_addr = get_hphys(cs, pde_addr, MMU_DATA_STORE, NULL); - pde = x86_ldl_phys(cs, pde_addr); - if (!(pde & PG_PRESENT_MASK)) { - goto do_fault; - } - ptep = pde | PG_NX_MASK; - - /* if PSE bit is set, then we use a 4MB page */ - if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { - page_size = 4096 * 1024; - pte_addr = pde_addr; - - /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved. - * Leave bits 20-13 in place for setting accessed/dirty bits below. - */ - pte = pde | ((pde & 0x1fe000LL) << (32 - 13)); - rsvd_mask = 0x200000; - goto do_check_protect_pse36; - } - - if (!(pde & PG_ACCESSED_MASK)) { - pde |= PG_ACCESSED_MASK; - x86_stl_phys_notdirty(cs, pde_addr, pde); - } - - /* page directory entry */ - pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & - a20_mask; - pte_addr = get_hphys(cs, pte_addr, MMU_DATA_STORE, NULL); - pte = x86_ldl_phys(cs, pte_addr); - if (!(pte & PG_PRESENT_MASK)) { - goto do_fault; - } - /* combine pde and pte user and rw protections */ - ptep &= pte | PG_NX_MASK; - page_size = 4096; - rsvd_mask = 0; - } - -do_check_protect: - rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK; -do_check_protect_pse36: - if (pte & rsvd_mask) { - goto do_fault_rsvd; - } - ptep ^= PG_NX_MASK; - - /* can the page can be put in the TLB? prot will tell us */ - if (is_user && !(ptep & PG_USER_MASK)) { - goto do_fault_protect; - } - - prot = 0; - if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) { - prot |= PAGE_READ; - if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) { - prot |= PAGE_WRITE; - } - } - if (!(ptep & PG_NX_MASK) && - (mmu_idx == MMU_USER_IDX || - !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) { - prot |= PAGE_EXEC; - } - - if (!(env->hflags & HF_LMA_MASK)) { - pkr = 0; - } else if (ptep & PG_USER_MASK) { - pkr = env->cr[4] & CR4_PKE_MASK ? env->pkru : 0; - } else { - pkr = env->cr[4] & CR4_PKS_MASK ? env->pkrs : 0; - } - if (pkr) { - uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT; - uint32_t pkr_ad = (pkr >> pk * 2) & 1; - uint32_t pkr_wd = (pkr >> pk * 2) & 2; - uint32_t pkr_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - - if (pkr_ad) { - pkr_prot &= ~(PAGE_READ | PAGE_WRITE); - } else if (pkr_wd && (is_user || env->cr[0] & CR0_WP_MASK)) { - pkr_prot &= ~PAGE_WRITE; - } - - prot &= pkr_prot; - if ((pkr_prot & (1 << is_write1)) == 0) { - assert(is_write1 != 2); - error_code |= PG_ERROR_PK_MASK; - goto do_fault_protect; - } - } - - if ((prot & (1 << is_write1)) == 0) { - goto do_fault_protect; - } - - /* yes, it can! */ - is_dirty = is_write && !(pte & PG_DIRTY_MASK); - if (!(pte & PG_ACCESSED_MASK) || is_dirty) { - pte |= PG_ACCESSED_MASK; - if (is_dirty) { - pte |= PG_DIRTY_MASK; - } - x86_stl_phys_notdirty(cs, pte_addr, pte); - } - - if (!(pte & PG_DIRTY_MASK)) { - /* only set write access if already dirty... otherwise wait - for dirty access */ - assert(!is_write); - prot &= ~PAGE_WRITE; - } - - do_mapping: - pte = pte & a20_mask; - - /* align to page_size */ - pte &= PG_ADDRESS_MASK & ~(page_size - 1); - page_offset = addr & (page_size - 1); - paddr = get_hphys(cs, pte + page_offset, is_write1, &prot); - - /* Even if 4MB pages, we map only one 4KB page in the cache to - avoid filling it too fast */ - vaddr = addr & TARGET_PAGE_MASK; - paddr &= TARGET_PAGE_MASK; - - assert(prot & (1 << is_write1)); - tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env), - prot, mmu_idx, page_size); - return 0; - do_fault_rsvd: - error_code |= PG_ERROR_RSVD_MASK; - do_fault_protect: - error_code |= PG_ERROR_P_MASK; - do_fault: - error_code |= (is_write << PG_ERROR_W_BIT); - if (is_user) - error_code |= PG_ERROR_U_MASK; - if (is_write1 == 2 && - (((env->efer & MSR_EFER_NXE) && - (env->cr[4] & CR4_PAE_MASK)) || - (env->cr[4] & CR4_SMEP_MASK))) - error_code |= PG_ERROR_I_D_MASK; - if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) { - /* cr2 is not modified in case of exceptions */ - x86_stq_phys(cs, - env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), - addr); - } else { - env->cr[2] = addr; - } - env->error_code = error_code; - cs->exception_index = EXCP0E_PAGE; - return 1; -} -#endif - -bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) -{ - X86CPU *cpu = X86_CPU(cs); - CPUX86State *env = &cpu->env; - -#ifdef CONFIG_USER_ONLY - /* user mode only emulation */ - env->cr[2] = addr; - env->error_code = (access_type == MMU_DATA_STORE) << PG_ERROR_W_BIT; - env->error_code |= PG_ERROR_U_MASK; - cs->exception_index = EXCP0E_PAGE; - env->exception_is_int = 0; - env->exception_next_eip = -1; - cpu_loop_exit_restore(cs, retaddr); -#else - env->retaddr = retaddr; - if (handle_mmu_fault(cs, addr, size, access_type, mmu_idx)) { - /* FIXME: On error in get_hphys we have already jumped out. */ - g_assert(!probe); - raise_exception_err_ra(env, cs->exception_index, - env->error_code, retaddr); - } - return true; -#endif -} diff --git a/target/i386/tcg/fpu_helper.c b/target/i386/tcg/fpu_helper.c index 60ed93520a..1b30f1bb73 100644 --- a/target/i386/tcg/fpu_helper.c +++ b/target/i386/tcg/fpu_helper.c @@ -21,17 +21,10 @@ #include <math.h> #include "cpu.h" #include "exec/helper-proto.h" -#include "qemu/host-utils.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" #include "fpu/softfloat.h" #include "fpu/softfloat-macros.h" #include "helper-tcg.h" -#ifdef CONFIG_SOFTMMU -#include "hw/irq.h" -#endif - /* float macros */ #define FT0 (env->ft0) #define ST0 (env->fpregs[env->fpstt].d) @@ -75,36 +68,6 @@ #define floatx80_ln2_d make_floatx80(0x3ffe, 0xb17217f7d1cf79abLL) #define floatx80_pi_d make_floatx80(0x4000, 0xc90fdaa22168c234LL) -#if !defined(CONFIG_USER_ONLY) -static qemu_irq ferr_irq; - -void x86_register_ferr_irq(qemu_irq irq) -{ - ferr_irq = irq; -} - -static void cpu_clear_ignne(void) -{ - CPUX86State *env = &X86_CPU(first_cpu)->env; - env->hflags2 &= ~HF2_IGNNE_MASK; -} - -void cpu_set_ignne(void) -{ - CPUX86State *env = &X86_CPU(first_cpu)->env; - env->hflags2 |= HF2_IGNNE_MASK; - /* - * We get here in response to a write to port F0h. The chipset should - * deassert FP_IRQ and FERR# instead should stay signaled until FPSW_SE is - * cleared, because FERR# and FP_IRQ are two separate pins on real - * hardware. However, we don't model FERR# as a qemu_irq, so we just - * do directly what the chipset would do, i.e. deassert FP_IRQ. - */ - qemu_irq_lower(ferr_irq); -} -#endif - - static inline void fpush(CPUX86State *env) { env->fpstt = (env->fpstt - 1) & 7; @@ -117,8 +80,7 @@ static inline void fpop(CPUX86State *env) env->fpstt = (env->fpstt + 1) & 7; } -static inline floatx80 helper_fldt(CPUX86State *env, target_ulong ptr, - uintptr_t retaddr) +static floatx80 do_fldt(CPUX86State *env, target_ulong ptr, uintptr_t retaddr) { CPU_LDoubleU temp; @@ -127,8 +89,8 @@ static inline floatx80 helper_fldt(CPUX86State *env, target_ulong ptr, return temp.d; } -static inline void helper_fstt(CPUX86State *env, floatx80 f, target_ulong ptr, - uintptr_t retaddr) +static void do_fstt(CPUX86State *env, floatx80 f, target_ulong ptr, + uintptr_t retaddr) { CPU_LDoubleU temp; @@ -203,8 +165,8 @@ static void fpu_raise_exception(CPUX86State *env, uintptr_t retaddr) raise_exception_ra(env, EXCP10_COPR, retaddr); } #if !defined(CONFIG_USER_ONLY) - else if (ferr_irq && !(env->hflags2 & HF2_IGNNE_MASK)) { - qemu_irq_raise(ferr_irq); + else { + fpu_check_raise_ferr_irq(env); } #endif } @@ -405,14 +367,14 @@ void helper_fldt_ST0(CPUX86State *env, target_ulong ptr) int new_fpstt; new_fpstt = (env->fpstt - 1) & 7; - env->fpregs[new_fpstt].d = helper_fldt(env, ptr, GETPC()); + env->fpregs[new_fpstt].d = do_fldt(env, ptr, GETPC()); env->fpstt = new_fpstt; env->fptags[new_fpstt] = 0; /* validate stack entry */ } void helper_fstt_ST0(CPUX86State *env, target_ulong ptr) { - helper_fstt(env, ST0, ptr, GETPC()); + do_fstt(env, ST0, ptr, GETPC()); } void helper_fpush(CPUX86State *env) @@ -2458,17 +2420,18 @@ void helper_fldenv(CPUX86State *env, target_ulong ptr, int data32) do_fldenv(env, ptr, data32, GETPC()); } -void helper_fsave(CPUX86State *env, target_ulong ptr, int data32) +static void do_fsave(CPUX86State *env, target_ulong ptr, int data32, + uintptr_t retaddr) { floatx80 tmp; int i; - do_fstenv(env, ptr, data32, GETPC()); + do_fstenv(env, ptr, data32, retaddr); ptr += (14 << data32); for (i = 0; i < 8; i++) { tmp = ST(i); - helper_fstt(env, tmp, ptr, GETPC()); + do_fstt(env, tmp, ptr, retaddr); ptr += 10; } @@ -2486,30 +2449,41 @@ void helper_fsave(CPUX86State *env, target_ulong ptr, int data32) env->fptags[7] = 1; } -void helper_frstor(CPUX86State *env, target_ulong ptr, int data32) +void helper_fsave(CPUX86State *env, target_ulong ptr, int data32) +{ + do_fsave(env, ptr, data32, GETPC()); +} + +static void do_frstor(CPUX86State *env, target_ulong ptr, int data32, + uintptr_t retaddr) { floatx80 tmp; int i; - do_fldenv(env, ptr, data32, GETPC()); + do_fldenv(env, ptr, data32, retaddr); ptr += (14 << data32); for (i = 0; i < 8; i++) { - tmp = helper_fldt(env, ptr, GETPC()); + tmp = do_fldt(env, ptr, retaddr); ST(i) = tmp; ptr += 10; } } +void helper_frstor(CPUX86State *env, target_ulong ptr, int data32) +{ + do_frstor(env, ptr, data32, GETPC()); +} + #if defined(CONFIG_USER_ONLY) void cpu_x86_fsave(CPUX86State *env, target_ulong ptr, int data32) { - helper_fsave(env, ptr, data32); + do_fsave(env, ptr, data32, 0); } void cpu_x86_frstor(CPUX86State *env, target_ulong ptr, int data32) { - helper_frstor(env, ptr, data32); + do_frstor(env, ptr, data32, 0); } #endif @@ -2539,7 +2513,7 @@ static void do_xsave_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra) addr = ptr + XO(legacy.fpregs); for (i = 0; i < 8; i++) { floatx80 tmp = ST(i); - helper_fstt(env, tmp, addr, ra); + do_fstt(env, tmp, addr, ra); addr += 16; } } @@ -2594,10 +2568,8 @@ static void do_xsave_pkru(CPUX86State *env, target_ulong ptr, uintptr_t ra) cpu_stq_data_ra(env, ptr, env->pkru, ra); } -void helper_fxsave(CPUX86State *env, target_ulong ptr) +static void do_fxsave(CPUX86State *env, target_ulong ptr, uintptr_t ra) { - uintptr_t ra = GETPC(); - /* The operand must be 16 byte aligned */ if (ptr & 0xf) { raise_exception_ra(env, EXCP0D_GPF, ra); @@ -2616,6 +2588,11 @@ void helper_fxsave(CPUX86State *env, target_ulong ptr) } } +void helper_fxsave(CPUX86State *env, target_ulong ptr) +{ + do_fxsave(env, ptr, GETPC()); +} + static uint64_t get_xinuse(CPUX86State *env) { uint64_t inuse = -1; @@ -2703,7 +2680,7 @@ static void do_xrstor_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra) addr = ptr + XO(legacy.fpregs); for (i = 0; i < 8; i++) { - floatx80 tmp = helper_fldt(env, addr, ra); + floatx80 tmp = do_fldt(env, addr, ra); ST(i) = tmp; addr += 16; } @@ -2758,10 +2735,8 @@ static void do_xrstor_pkru(CPUX86State *env, target_ulong ptr, uintptr_t ra) env->pkru = cpu_ldq_data_ra(env, ptr, ra); } -void helper_fxrstor(CPUX86State *env, target_ulong ptr) +static void do_fxrstor(CPUX86State *env, target_ulong ptr, uintptr_t ra) { - uintptr_t ra = GETPC(); - /* The operand must be 16 byte aligned */ if (ptr & 0xf) { raise_exception_ra(env, EXCP0D_GPF, ra); @@ -2780,15 +2755,20 @@ void helper_fxrstor(CPUX86State *env, target_ulong ptr) } } +void helper_fxrstor(CPUX86State *env, target_ulong ptr) +{ + do_fxrstor(env, ptr, GETPC()); +} + #if defined(CONFIG_USER_ONLY) void cpu_x86_fxsave(CPUX86State *env, target_ulong ptr) { - helper_fxsave(env, ptr); + do_fxsave(env, ptr, 0); } void cpu_x86_fxrstor(CPUX86State *env, target_ulong ptr) { - helper_fxrstor(env, ptr); + do_fxrstor(env, ptr, 0); } #endif diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h index bcdfca06f6..2510cc244e 100644 --- a/target/i386/tcg/helper-tcg.h +++ b/target/i386/tcg/helper-tcg.h @@ -76,16 +76,27 @@ extern const uint8_t parity_table[256]; /* misc_helper.c */ void cpu_load_eflags(CPUX86State *env, int eflags, int update_mask); +void do_pause(CPUX86State *env) QEMU_NORETURN; -/* svm_helper.c */ +/* sysemu/svm_helper.c */ +#ifndef CONFIG_USER_ONLY void QEMU_NORETURN cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1, uintptr_t retaddr); void do_vmexit(CPUX86State *env); +#endif /* seg_helper.c */ void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw); +void do_interrupt_all(X86CPU *cpu, int intno, int is_int, + int error_code, target_ulong next_eip, int is_hw); +void handle_even_inj(CPUX86State *env, int intno, int is_int, + int error_code, int is_hw, int rm); +int exception_has_error_code(int intno); /* smm_helper.c */ void do_smm_enter(X86CPU *cpu); +/* bpt_helper.c */ +bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update); + #endif /* I386_HELPER_TCG_H */ diff --git a/target/i386/tcg/meson.build b/target/i386/tcg/meson.build index 6a1a73cdbf..f9110e890c 100644 --- a/target/i386/tcg/meson.build +++ b/target/i386/tcg/meson.build @@ -8,7 +8,8 @@ i386_ss.add(when: 'CONFIG_TCG', if_true: files( 'misc_helper.c', 'mpx_helper.c', 'seg_helper.c', - 'smm_helper.c', - 'svm_helper.c', 'tcg-cpu.c', 'translate.c'), if_false: files('tcg-stub.c')) + +subdir('sysemu') +subdir('user') diff --git a/target/i386/tcg/misc_helper.c b/target/i386/tcg/misc_helper.c index a25428c36e..baffa5d7ba 100644 --- a/target/i386/tcg/misc_helper.c +++ b/target/i386/tcg/misc_helper.c @@ -18,12 +18,9 @@ */ #include "qemu/osdep.h" -#include "qemu/main-loop.h" #include "cpu.h" #include "exec/helper-proto.h" #include "exec/exec-all.h" -#include "exec/cpu_ldst.h" -#include "exec/address-spaces.h" #include "helper-tcg.h" /* @@ -39,69 +36,6 @@ void cpu_load_eflags(CPUX86State *env, int eflags, int update_mask) (eflags & update_mask) | 0x2; } -void helper_outb(CPUX86State *env, uint32_t port, uint32_t data) -{ -#ifdef CONFIG_USER_ONLY - fprintf(stderr, "outb: port=0x%04x, data=%02x\n", port, data); -#else - address_space_stb(&address_space_io, port, data, - cpu_get_mem_attrs(env), NULL); -#endif -} - -target_ulong helper_inb(CPUX86State *env, uint32_t port) -{ -#ifdef CONFIG_USER_ONLY - fprintf(stderr, "inb: port=0x%04x\n", port); - return 0; -#else - return address_space_ldub(&address_space_io, port, - cpu_get_mem_attrs(env), NULL); -#endif -} - -void helper_outw(CPUX86State *env, uint32_t port, uint32_t data) -{ -#ifdef CONFIG_USER_ONLY - fprintf(stderr, "outw: port=0x%04x, data=%04x\n", port, data); -#else - address_space_stw(&address_space_io, port, data, - cpu_get_mem_attrs(env), NULL); -#endif -} - -target_ulong helper_inw(CPUX86State *env, uint32_t port) -{ -#ifdef CONFIG_USER_ONLY - fprintf(stderr, "inw: port=0x%04x\n", port); - return 0; -#else - return address_space_lduw(&address_space_io, port, - cpu_get_mem_attrs(env), NULL); -#endif -} - -void helper_outl(CPUX86State *env, uint32_t port, uint32_t data) -{ -#ifdef CONFIG_USER_ONLY - fprintf(stderr, "outl: port=0x%04x, data=%08x\n", port, data); -#else - address_space_stl(&address_space_io, port, data, - cpu_get_mem_attrs(env), NULL); -#endif -} - -target_ulong helper_inl(CPUX86State *env, uint32_t port) -{ -#ifdef CONFIG_USER_ONLY - fprintf(stderr, "inl: port=0x%04x\n", port); - return 0; -#else - return address_space_ldl(&address_space_io, port, - cpu_get_mem_attrs(env), NULL); -#endif -} - void helper_into(CPUX86State *env, int next_eip_addend) { int eflags; @@ -126,84 +60,6 @@ void helper_cpuid(CPUX86State *env) env->regs[R_EDX] = edx; } -#if defined(CONFIG_USER_ONLY) -target_ulong helper_read_crN(CPUX86State *env, int reg) -{ - return 0; -} - -void helper_write_crN(CPUX86State *env, int reg, target_ulong t0) -{ -} -#else -target_ulong helper_read_crN(CPUX86State *env, int reg) -{ - target_ulong val; - - cpu_svm_check_intercept_param(env, SVM_EXIT_READ_CR0 + reg, 0, GETPC()); - switch (reg) { - default: - val = env->cr[reg]; - break; - case 8: - if (!(env->hflags2 & HF2_VINTR_MASK)) { - val = cpu_get_apic_tpr(env_archcpu(env)->apic_state); - } else { - val = env->v_tpr; - } - break; - } - return val; -} - -void helper_write_crN(CPUX86State *env, int reg, target_ulong t0) -{ - cpu_svm_check_intercept_param(env, SVM_EXIT_WRITE_CR0 + reg, 0, GETPC()); - switch (reg) { - case 0: - cpu_x86_update_cr0(env, t0); - break; - case 3: - cpu_x86_update_cr3(env, t0); - break; - case 4: - if (((t0 ^ env->cr[4]) & CR4_LA57_MASK) && - (env->hflags & HF_CS64_MASK)) { - raise_exception_ra(env, EXCP0D_GPF, GETPC()); - } - cpu_x86_update_cr4(env, t0); - break; - case 8: - if (!(env->hflags2 & HF2_VINTR_MASK)) { - qemu_mutex_lock_iothread(); - cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0); - qemu_mutex_unlock_iothread(); - } - env->v_tpr = t0 & 0x0f; - break; - default: - env->cr[reg] = t0; - break; - } -} -#endif - -void helper_lmsw(CPUX86State *env, target_ulong t0) -{ - /* only 4 lower bits of CR0 are modified. PE cannot be set to zero - if already set to one. */ - t0 = (env->cr[0] & ~0xe) | (t0 & 0xf); - helper_write_crN(env, 0, t0); -} - -void helper_invlpg(CPUX86State *env, target_ulong addr) -{ - X86CPU *cpu = env_archcpu(env); - - cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPG, 0, GETPC()); - tlb_flush_page(CPU(cpu), addr); -} - void helper_rdtsc(CPUX86State *env) { uint64_t val; @@ -224,7 +80,7 @@ void helper_rdtscp(CPUX86State *env) env->regs[R_ECX] = (uint32_t)(env->tsc_aux); } -void helper_rdpmc(CPUX86State *env) +void QEMU_NORETURN helper_rdpmc(CPUX86State *env) { if (((env->cr[4] & CR4_PCE_MASK) == 0 ) && ((env->hflags & HF_CPL_MASK) != 0)) { @@ -237,414 +93,24 @@ void helper_rdpmc(CPUX86State *env) raise_exception_err(env, EXCP06_ILLOP, 0); } -#if defined(CONFIG_USER_ONLY) -void helper_wrmsr(CPUX86State *env) +void QEMU_NORETURN do_pause(CPUX86State *env) { -} - -void helper_rdmsr(CPUX86State *env) -{ -} -#else -void helper_wrmsr(CPUX86State *env) -{ - uint64_t val; CPUState *cs = env_cpu(env); - cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1, GETPC()); - - val = ((uint32_t)env->regs[R_EAX]) | - ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32); - - switch ((uint32_t)env->regs[R_ECX]) { - case MSR_IA32_SYSENTER_CS: - env->sysenter_cs = val & 0xffff; - break; - case MSR_IA32_SYSENTER_ESP: - env->sysenter_esp = val; - break; - case MSR_IA32_SYSENTER_EIP: - env->sysenter_eip = val; - break; - case MSR_IA32_APICBASE: - cpu_set_apic_base(env_archcpu(env)->apic_state, val); - break; - case MSR_EFER: - { - uint64_t update_mask; - - update_mask = 0; - if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_SYSCALL) { - update_mask |= MSR_EFER_SCE; - } - if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { - update_mask |= MSR_EFER_LME; - } - if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) { - update_mask |= MSR_EFER_FFXSR; - } - if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_NX) { - update_mask |= MSR_EFER_NXE; - } - if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { - update_mask |= MSR_EFER_SVME; - } - if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) { - update_mask |= MSR_EFER_FFXSR; - } - cpu_load_efer(env, (env->efer & ~update_mask) | - (val & update_mask)); - } - break; - case MSR_STAR: - env->star = val; - break; - case MSR_PAT: - env->pat = val; - break; - case MSR_IA32_PKRS: - if (val & 0xFFFFFFFF00000000ull) { - goto error; - } - env->pkrs = val; - tlb_flush(cs); - break; - case MSR_VM_HSAVE_PA: - env->vm_hsave = val; - break; -#ifdef TARGET_X86_64 - case MSR_LSTAR: - env->lstar = val; - break; - case MSR_CSTAR: - env->cstar = val; - break; - case MSR_FMASK: - env->fmask = val; - break; - case MSR_FSBASE: - env->segs[R_FS].base = val; - break; - case MSR_GSBASE: - env->segs[R_GS].base = val; - break; - case MSR_KERNELGSBASE: - env->kernelgsbase = val; - break; -#endif - case MSR_MTRRphysBase(0): - case MSR_MTRRphysBase(1): - case MSR_MTRRphysBase(2): - case MSR_MTRRphysBase(3): - case MSR_MTRRphysBase(4): - case MSR_MTRRphysBase(5): - case MSR_MTRRphysBase(6): - case MSR_MTRRphysBase(7): - env->mtrr_var[((uint32_t)env->regs[R_ECX] - - MSR_MTRRphysBase(0)) / 2].base = val; - break; - case MSR_MTRRphysMask(0): - case MSR_MTRRphysMask(1): - case MSR_MTRRphysMask(2): - case MSR_MTRRphysMask(3): - case MSR_MTRRphysMask(4): - case MSR_MTRRphysMask(5): - case MSR_MTRRphysMask(6): - case MSR_MTRRphysMask(7): - env->mtrr_var[((uint32_t)env->regs[R_ECX] - - MSR_MTRRphysMask(0)) / 2].mask = val; - break; - case MSR_MTRRfix64K_00000: - env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - - MSR_MTRRfix64K_00000] = val; - break; - case MSR_MTRRfix16K_80000: - case MSR_MTRRfix16K_A0000: - env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - - MSR_MTRRfix16K_80000 + 1] = val; - break; - case MSR_MTRRfix4K_C0000: - case MSR_MTRRfix4K_C8000: - case MSR_MTRRfix4K_D0000: - case MSR_MTRRfix4K_D8000: - case MSR_MTRRfix4K_E0000: - case MSR_MTRRfix4K_E8000: - case MSR_MTRRfix4K_F0000: - case MSR_MTRRfix4K_F8000: - env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - - MSR_MTRRfix4K_C0000 + 3] = val; - break; - case MSR_MTRRdefType: - env->mtrr_deftype = val; - break; - case MSR_MCG_STATUS: - env->mcg_status = val; - break; - case MSR_MCG_CTL: - if ((env->mcg_cap & MCG_CTL_P) - && (val == 0 || val == ~(uint64_t)0)) { - env->mcg_ctl = val; - } - break; - case MSR_TSC_AUX: - env->tsc_aux = val; - break; - case MSR_IA32_MISC_ENABLE: - env->msr_ia32_misc_enable = val; - break; - case MSR_IA32_BNDCFGS: - /* FIXME: #GP if reserved bits are set. */ - /* FIXME: Extend highest implemented bit of linear address. */ - env->msr_bndcfgs = val; - cpu_sync_bndcs_hflags(env); - break; - default: - if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL - && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + - (4 * env->mcg_cap & 0xff)) { - uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL; - if ((offset & 0x3) != 0 - || (val == 0 || val == ~(uint64_t)0)) { - env->mce_banks[offset] = val; - } - break; - } - /* XXX: exception? */ - break; - } - return; -error: - raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); -} - -void helper_rdmsr(CPUX86State *env) -{ - X86CPU *x86_cpu = env_archcpu(env); - uint64_t val; - - cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0, GETPC()); - - switch ((uint32_t)env->regs[R_ECX]) { - case MSR_IA32_SYSENTER_CS: - val = env->sysenter_cs; - break; - case MSR_IA32_SYSENTER_ESP: - val = env->sysenter_esp; - break; - case MSR_IA32_SYSENTER_EIP: - val = env->sysenter_eip; - break; - case MSR_IA32_APICBASE: - val = cpu_get_apic_base(env_archcpu(env)->apic_state); - break; - case MSR_EFER: - val = env->efer; - break; - case MSR_STAR: - val = env->star; - break; - case MSR_PAT: - val = env->pat; - break; - case MSR_IA32_PKRS: - val = env->pkrs; - break; - case MSR_VM_HSAVE_PA: - val = env->vm_hsave; - break; - case MSR_IA32_PERF_STATUS: - /* tsc_increment_by_tick */ - val = 1000ULL; - /* CPU multiplier */ - val |= (((uint64_t)4ULL) << 40); - break; -#ifdef TARGET_X86_64 - case MSR_LSTAR: - val = env->lstar; - break; - case MSR_CSTAR: - val = env->cstar; - break; - case MSR_FMASK: - val = env->fmask; - break; - case MSR_FSBASE: - val = env->segs[R_FS].base; - break; - case MSR_GSBASE: - val = env->segs[R_GS].base; - break; - case MSR_KERNELGSBASE: - val = env->kernelgsbase; - break; - case MSR_TSC_AUX: - val = env->tsc_aux; - break; -#endif - case MSR_SMI_COUNT: - val = env->msr_smi_count; - break; - case MSR_MTRRphysBase(0): - case MSR_MTRRphysBase(1): - case MSR_MTRRphysBase(2): - case MSR_MTRRphysBase(3): - case MSR_MTRRphysBase(4): - case MSR_MTRRphysBase(5): - case MSR_MTRRphysBase(6): - case MSR_MTRRphysBase(7): - val = env->mtrr_var[((uint32_t)env->regs[R_ECX] - - MSR_MTRRphysBase(0)) / 2].base; - break; - case MSR_MTRRphysMask(0): - case MSR_MTRRphysMask(1): - case MSR_MTRRphysMask(2): - case MSR_MTRRphysMask(3): - case MSR_MTRRphysMask(4): - case MSR_MTRRphysMask(5): - case MSR_MTRRphysMask(6): - case MSR_MTRRphysMask(7): - val = env->mtrr_var[((uint32_t)env->regs[R_ECX] - - MSR_MTRRphysMask(0)) / 2].mask; - break; - case MSR_MTRRfix64K_00000: - val = env->mtrr_fixed[0]; - break; - case MSR_MTRRfix16K_80000: - case MSR_MTRRfix16K_A0000: - val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - - MSR_MTRRfix16K_80000 + 1]; - break; - case MSR_MTRRfix4K_C0000: - case MSR_MTRRfix4K_C8000: - case MSR_MTRRfix4K_D0000: - case MSR_MTRRfix4K_D8000: - case MSR_MTRRfix4K_E0000: - case MSR_MTRRfix4K_E8000: - case MSR_MTRRfix4K_F0000: - case MSR_MTRRfix4K_F8000: - val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - - MSR_MTRRfix4K_C0000 + 3]; - break; - case MSR_MTRRdefType: - val = env->mtrr_deftype; - break; - case MSR_MTRRcap: - if (env->features[FEAT_1_EDX] & CPUID_MTRR) { - val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | - MSR_MTRRcap_WC_SUPPORTED; - } else { - /* XXX: exception? */ - val = 0; - } - break; - case MSR_MCG_CAP: - val = env->mcg_cap; - break; - case MSR_MCG_CTL: - if (env->mcg_cap & MCG_CTL_P) { - val = env->mcg_ctl; - } else { - val = 0; - } - break; - case MSR_MCG_STATUS: - val = env->mcg_status; - break; - case MSR_IA32_MISC_ENABLE: - val = env->msr_ia32_misc_enable; - break; - case MSR_IA32_BNDCFGS: - val = env->msr_bndcfgs; - break; - case MSR_IA32_UCODE_REV: - val = x86_cpu->ucode_rev; - break; - default: - if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL - && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + - (4 * env->mcg_cap & 0xff)) { - uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL; - val = env->mce_banks[offset]; - break; - } - /* XXX: exception? */ - val = 0; - break; - } - env->regs[R_EAX] = (uint32_t)(val); - env->regs[R_EDX] = (uint32_t)(val >> 32); -} -#endif - -static void do_pause(X86CPU *cpu) -{ - CPUState *cs = CPU(cpu); - /* Just let another CPU run. */ cs->exception_index = EXCP_INTERRUPT; cpu_loop_exit(cs); } -static void do_hlt(X86CPU *cpu) -{ - CPUState *cs = CPU(cpu); - CPUX86State *env = &cpu->env; - - env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */ - cs->halted = 1; - cs->exception_index = EXCP_HLT; - cpu_loop_exit(cs); -} - -void helper_hlt(CPUX86State *env, int next_eip_addend) +void QEMU_NORETURN helper_pause(CPUX86State *env, int next_eip_addend) { - X86CPU *cpu = env_archcpu(env); - - cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0, GETPC()); - env->eip += next_eip_addend; - - do_hlt(cpu); -} - -void helper_monitor(CPUX86State *env, target_ulong ptr) -{ - if ((uint32_t)env->regs[R_ECX] != 0) { - raise_exception_ra(env, EXCP0D_GPF, GETPC()); - } - /* XXX: store address? */ - cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0, GETPC()); -} - -void helper_mwait(CPUX86State *env, int next_eip_addend) -{ - CPUState *cs = env_cpu(env); - X86CPU *cpu = env_archcpu(env); - - if ((uint32_t)env->regs[R_ECX] != 0) { - raise_exception_ra(env, EXCP0D_GPF, GETPC()); - } - cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0, GETPC()); - env->eip += next_eip_addend; - - /* XXX: not complete but not completely erroneous */ - if (cs->cpu_index != 0 || CPU_NEXT(cs) != NULL) { - do_pause(cpu); - } else { - do_hlt(cpu); - } -} - -void helper_pause(CPUX86State *env, int next_eip_addend) -{ - X86CPU *cpu = env_archcpu(env); - cpu_svm_check_intercept_param(env, SVM_EXIT_PAUSE, 0, GETPC()); env->eip += next_eip_addend; - do_pause(cpu); + do_pause(env); } -void helper_debug(CPUX86State *env) +void QEMU_NORETURN helper_debug(CPUX86State *env) { CPUState *cs = env_cpu(env); diff --git a/target/i386/tcg/seg_helper.c b/target/i386/tcg/seg_helper.c index d180a381d1..2f6cdc8239 100644 --- a/target/i386/tcg/seg_helper.c +++ b/target/i386/tcg/seg_helper.c @@ -26,49 +26,7 @@ #include "exec/cpu_ldst.h" #include "exec/log.h" #include "helper-tcg.h" - -//#define DEBUG_PCALL - -#ifdef DEBUG_PCALL -# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__) -# define LOG_PCALL_STATE(cpu) \ - log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP) -#else -# define LOG_PCALL(...) do { } while (0) -# define LOG_PCALL_STATE(cpu) do { } while (0) -#endif - -/* - * TODO: Convert callers to compute cpu_mmu_index_kernel once - * and use *_mmuidx_ra directly. - */ -#define cpu_ldub_kernel_ra(e, p, r) \ - cpu_ldub_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) -#define cpu_lduw_kernel_ra(e, p, r) \ - cpu_lduw_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) -#define cpu_ldl_kernel_ra(e, p, r) \ - cpu_ldl_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) -#define cpu_ldq_kernel_ra(e, p, r) \ - cpu_ldq_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) - -#define cpu_stb_kernel_ra(e, p, v, r) \ - cpu_stb_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) -#define cpu_stw_kernel_ra(e, p, v, r) \ - cpu_stw_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) -#define cpu_stl_kernel_ra(e, p, v, r) \ - cpu_stl_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) -#define cpu_stq_kernel_ra(e, p, v, r) \ - cpu_stq_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) - -#define cpu_ldub_kernel(e, p) cpu_ldub_kernel_ra(e, p, 0) -#define cpu_lduw_kernel(e, p) cpu_lduw_kernel_ra(e, p, 0) -#define cpu_ldl_kernel(e, p) cpu_ldl_kernel_ra(e, p, 0) -#define cpu_ldq_kernel(e, p) cpu_ldq_kernel_ra(e, p, 0) - -#define cpu_stb_kernel(e, p, v) cpu_stb_kernel_ra(e, p, v, 0) -#define cpu_stw_kernel(e, p, v) cpu_stw_kernel_ra(e, p, v, 0) -#define cpu_stl_kernel(e, p, v) cpu_stl_kernel_ra(e, p, v, 0) -#define cpu_stq_kernel(e, p, v) cpu_stq_kernel_ra(e, p, v, 0) +#include "seg_helper.h" /* return non zero if error */ static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr, @@ -531,7 +489,7 @@ static inline unsigned int get_sp_mask(unsigned int e2) } } -static int exception_has_error_code(int intno) +int exception_has_error_code(int intno) { switch (intno) { case 8: @@ -977,72 +935,6 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int, #endif #ifdef TARGET_X86_64 -#if defined(CONFIG_USER_ONLY) -void helper_syscall(CPUX86State *env, int next_eip_addend) -{ - CPUState *cs = env_cpu(env); - - cs->exception_index = EXCP_SYSCALL; - env->exception_is_int = 0; - env->exception_next_eip = env->eip + next_eip_addend; - cpu_loop_exit(cs); -} -#else -void helper_syscall(CPUX86State *env, int next_eip_addend) -{ - int selector; - - if (!(env->efer & MSR_EFER_SCE)) { - raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); - } - selector = (env->star >> 32) & 0xffff; - if (env->hflags & HF_LMA_MASK) { - int code64; - - env->regs[R_ECX] = env->eip + next_eip_addend; - env->regs[11] = cpu_compute_eflags(env) & ~RF_MASK; - - code64 = env->hflags & HF_CS64_MASK; - - env->eflags &= ~(env->fmask | RF_MASK); - cpu_load_eflags(env, env->eflags, 0); - cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, - 0, 0xffffffff, - DESC_G_MASK | DESC_P_MASK | - DESC_S_MASK | - DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | - DESC_L_MASK); - cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, - 0, 0xffffffff, - DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | - DESC_S_MASK | - DESC_W_MASK | DESC_A_MASK); - if (code64) { - env->eip = env->lstar; - } else { - env->eip = env->cstar; - } - } else { - env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend); - - env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK); - cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, - 0, 0xffffffff, - DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | - DESC_S_MASK | - DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); - cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, - 0, 0xffffffff, - DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | - DESC_S_MASK | - DESC_W_MASK | DESC_A_MASK); - env->eip = (uint32_t)env->star; - } -} -#endif -#endif - -#ifdef TARGET_X86_64 void helper_sysret(CPUX86State *env, int dflag) { int cpl, selector; @@ -1136,84 +1028,13 @@ static void do_interrupt_real(CPUX86State *env, int intno, int is_int, env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK); } -#if defined(CONFIG_USER_ONLY) -/* fake user mode interrupt. is_int is TRUE if coming from the int - * instruction. next_eip is the env->eip value AFTER the interrupt - * instruction. It is only relevant if is_int is TRUE or if intno - * is EXCP_SYSCALL. - */ -static void do_interrupt_user(CPUX86State *env, int intno, int is_int, - int error_code, target_ulong next_eip) -{ - if (is_int) { - SegmentCache *dt; - target_ulong ptr; - int dpl, cpl, shift; - uint32_t e2; - - dt = &env->idt; - if (env->hflags & HF_LMA_MASK) { - shift = 4; - } else { - shift = 3; - } - ptr = dt->base + (intno << shift); - e2 = cpu_ldl_kernel(env, ptr + 4); - - dpl = (e2 >> DESC_DPL_SHIFT) & 3; - cpl = env->hflags & HF_CPL_MASK; - /* check privilege if software int */ - if (dpl < cpl) { - raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2); - } - } - - /* Since we emulate only user space, we cannot do more than - exiting the emulation with the suitable exception and error - code. So update EIP for INT 0x80 and EXCP_SYSCALL. */ - if (is_int || intno == EXCP_SYSCALL) { - env->eip = next_eip; - } -} - -#else - -static void handle_even_inj(CPUX86State *env, int intno, int is_int, - int error_code, int is_hw, int rm) -{ - CPUState *cs = env_cpu(env); - uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, - control.event_inj)); - - if (!(event_inj & SVM_EVTINJ_VALID)) { - int type; - - if (is_int) { - type = SVM_EVTINJ_TYPE_SOFT; - } else { - type = SVM_EVTINJ_TYPE_EXEPT; - } - event_inj = intno | type | SVM_EVTINJ_VALID; - if (!rm && exception_has_error_code(intno)) { - event_inj |= SVM_EVTINJ_VALID_ERR; - x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, - control.event_inj_err), - error_code); - } - x86_stl_phys(cs, - env->vm_vmcb + offsetof(struct vmcb, control.event_inj), - event_inj); - } -} -#endif - /* * Begin execution of an interruption. is_int is TRUE if coming from * the int instruction. next_eip is the env->eip value AFTER the interrupt * instruction. It is only relevant if is_int is TRUE. */ -static void do_interrupt_all(X86CPU *cpu, int intno, int is_int, - int error_code, target_ulong next_eip, int is_hw) +void do_interrupt_all(X86CPU *cpu, int intno, int is_int, + int error_code, target_ulong next_eip, int is_hw) { CPUX86State *env = &cpu->env; @@ -1289,36 +1110,6 @@ static void do_interrupt_all(X86CPU *cpu, int intno, int is_int, #endif } -void x86_cpu_do_interrupt(CPUState *cs) -{ - X86CPU *cpu = X86_CPU(cs); - CPUX86State *env = &cpu->env; - -#if defined(CONFIG_USER_ONLY) - /* if user mode only, we simulate a fake exception - which will be handled outside the cpu execution - loop */ - do_interrupt_user(env, cs->exception_index, - env->exception_is_int, - env->error_code, - env->exception_next_eip); - /* successfully delivered */ - env->old_exception = -1; -#else - if (cs->exception_index == EXCP_VMEXIT) { - assert(env->old_exception == -1); - do_vmexit(env); - } else { - do_interrupt_all(cpu, cs->exception_index, - env->exception_is_int, - env->error_code, - env->exception_next_eip, 0); - /* successfully delivered */ - env->old_exception = -1; - } -#endif -} - void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw) { do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw); @@ -1351,7 +1142,11 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request) case CPU_INTERRUPT_SMI: cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0); cs->interrupt_request &= ~CPU_INTERRUPT_SMI; +#ifdef CONFIG_USER_ONLY + cpu_abort(CPU(cpu), "SMI interrupt: cannot enter SMM in user-mode"); +#else do_smm_enter(cpu); +#endif /* CONFIG_USER_ONLY */ break; case CPU_INTERRUPT_NMI: cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0); @@ -2621,62 +2416,3 @@ void helper_verw(CPUX86State *env, target_ulong selector1) } CC_SRC = eflags | CC_Z; } - -#if defined(CONFIG_USER_ONLY) -void cpu_x86_load_seg(CPUX86State *env, X86Seg seg_reg, int selector) -{ - if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) { - int dpl = (env->eflags & VM_MASK) ? 3 : 0; - selector &= 0xffff; - cpu_x86_load_seg_cache(env, seg_reg, selector, - (selector << 4), 0xffff, - DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | - DESC_A_MASK | (dpl << DESC_DPL_SHIFT)); - } else { - helper_load_seg(env, seg_reg, selector); - } -} -#endif - -/* check if Port I/O is allowed in TSS */ -static inline void check_io(CPUX86State *env, int addr, int size, - uintptr_t retaddr) -{ - int io_offset, val, mask; - - /* TSS must be a valid 32 bit one */ - if (!(env->tr.flags & DESC_P_MASK) || - ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 || - env->tr.limit < 103) { - goto fail; - } - io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr); - io_offset += (addr >> 3); - /* Note: the check needs two bytes */ - if ((io_offset + 1) > env->tr.limit) { - goto fail; - } - val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr); - val >>= (addr & 7); - mask = (1 << size) - 1; - /* all bits must be zero to allow the I/O */ - if ((val & mask) != 0) { - fail: - raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); - } -} - -void helper_check_iob(CPUX86State *env, uint32_t t0) -{ - check_io(env, t0, 1, GETPC()); -} - -void helper_check_iow(CPUX86State *env, uint32_t t0) -{ - check_io(env, t0, 2, GETPC()); -} - -void helper_check_iol(CPUX86State *env, uint32_t t0) -{ - check_io(env, t0, 4, GETPC()); -} diff --git a/target/i386/tcg/seg_helper.h b/target/i386/tcg/seg_helper.h new file mode 100644 index 0000000000..ebf1035277 --- /dev/null +++ b/target/i386/tcg/seg_helper.h @@ -0,0 +1,66 @@ +/* + * x86 segmentation related helpers macros + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef SEG_HELPER_H +#define SEG_HELPER_H + +//#define DEBUG_PCALL + +#ifdef DEBUG_PCALL +# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__) +# define LOG_PCALL_STATE(cpu) \ + log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP) +#else +# define LOG_PCALL(...) do { } while (0) +# define LOG_PCALL_STATE(cpu) do { } while (0) +#endif + +/* + * TODO: Convert callers to compute cpu_mmu_index_kernel once + * and use *_mmuidx_ra directly. + */ +#define cpu_ldub_kernel_ra(e, p, r) \ + cpu_ldub_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) +#define cpu_lduw_kernel_ra(e, p, r) \ + cpu_lduw_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) +#define cpu_ldl_kernel_ra(e, p, r) \ + cpu_ldl_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) +#define cpu_ldq_kernel_ra(e, p, r) \ + cpu_ldq_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) + +#define cpu_stb_kernel_ra(e, p, v, r) \ + cpu_stb_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) +#define cpu_stw_kernel_ra(e, p, v, r) \ + cpu_stw_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) +#define cpu_stl_kernel_ra(e, p, v, r) \ + cpu_stl_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) +#define cpu_stq_kernel_ra(e, p, v, r) \ + cpu_stq_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) + +#define cpu_ldub_kernel(e, p) cpu_ldub_kernel_ra(e, p, 0) +#define cpu_lduw_kernel(e, p) cpu_lduw_kernel_ra(e, p, 0) +#define cpu_ldl_kernel(e, p) cpu_ldl_kernel_ra(e, p, 0) +#define cpu_ldq_kernel(e, p) cpu_ldq_kernel_ra(e, p, 0) + +#define cpu_stb_kernel(e, p, v) cpu_stb_kernel_ra(e, p, v, 0) +#define cpu_stw_kernel(e, p, v) cpu_stw_kernel_ra(e, p, v, 0) +#define cpu_stl_kernel(e, p, v) cpu_stl_kernel_ra(e, p, v, 0) +#define cpu_stq_kernel(e, p, v) cpu_stq_kernel_ra(e, p, v, 0) + +#endif /* SEG_HELPER_H */ diff --git a/target/i386/tcg/sysemu/bpt_helper.c b/target/i386/tcg/sysemu/bpt_helper.c new file mode 100644 index 0000000000..9bdf7e170b --- /dev/null +++ b/target/i386/tcg/sysemu/bpt_helper.c @@ -0,0 +1,293 @@ +/* + * i386 breakpoint helpers - sysemu code + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" +#include "tcg/helper-tcg.h" + + +static inline bool hw_local_breakpoint_enabled(unsigned long dr7, int index) +{ + return (dr7 >> (index * 2)) & 1; +} + +static inline bool hw_global_breakpoint_enabled(unsigned long dr7, int index) +{ + return (dr7 >> (index * 2)) & 2; + +} +static inline bool hw_breakpoint_enabled(unsigned long dr7, int index) +{ + return hw_global_breakpoint_enabled(dr7, index) || + hw_local_breakpoint_enabled(dr7, index); +} + +static inline int hw_breakpoint_type(unsigned long dr7, int index) +{ + return (dr7 >> (DR7_TYPE_SHIFT + (index * 4))) & 3; +} + +static inline int hw_breakpoint_len(unsigned long dr7, int index) +{ + int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 4))) & 3); + return (len == 2) ? 8 : len + 1; +} + +static int hw_breakpoint_insert(CPUX86State *env, int index) +{ + CPUState *cs = env_cpu(env); + target_ulong dr7 = env->dr[7]; + target_ulong drN = env->dr[index]; + int err = 0; + + switch (hw_breakpoint_type(dr7, index)) { + case DR7_TYPE_BP_INST: + if (hw_breakpoint_enabled(dr7, index)) { + err = cpu_breakpoint_insert(cs, drN, BP_CPU, + &env->cpu_breakpoint[index]); + } + break; + + case DR7_TYPE_IO_RW: + /* Notice when we should enable calls to bpt_io. */ + return hw_breakpoint_enabled(env->dr[7], index) + ? HF_IOBPT_MASK : 0; + + case DR7_TYPE_DATA_WR: + if (hw_breakpoint_enabled(dr7, index)) { + err = cpu_watchpoint_insert(cs, drN, + hw_breakpoint_len(dr7, index), + BP_CPU | BP_MEM_WRITE, + &env->cpu_watchpoint[index]); + } + break; + + case DR7_TYPE_DATA_RW: + if (hw_breakpoint_enabled(dr7, index)) { + err = cpu_watchpoint_insert(cs, drN, + hw_breakpoint_len(dr7, index), + BP_CPU | BP_MEM_ACCESS, + &env->cpu_watchpoint[index]); + } + break; + } + if (err) { + env->cpu_breakpoint[index] = NULL; + } + return 0; +} + +static void hw_breakpoint_remove(CPUX86State *env, int index) +{ + CPUState *cs = env_cpu(env); + + switch (hw_breakpoint_type(env->dr[7], index)) { + case DR7_TYPE_BP_INST: + if (env->cpu_breakpoint[index]) { + cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[index]); + env->cpu_breakpoint[index] = NULL; + } + break; + + case DR7_TYPE_DATA_WR: + case DR7_TYPE_DATA_RW: + if (env->cpu_breakpoint[index]) { + cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[index]); + env->cpu_breakpoint[index] = NULL; + } + break; + + case DR7_TYPE_IO_RW: + /* HF_IOBPT_MASK cleared elsewhere. */ + break; + } +} + +void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7) +{ + target_ulong old_dr7 = env->dr[7]; + int iobpt = 0; + int i; + + new_dr7 |= DR7_FIXED_1; + + /* If nothing is changing except the global/local enable bits, + then we can make the change more efficient. */ + if (((old_dr7 ^ new_dr7) & ~0xff) == 0) { + /* Fold the global and local enable bits together into the + global fields, then xor to show which registers have + changed collective enable state. */ + int mod = ((old_dr7 | old_dr7 * 2) ^ (new_dr7 | new_dr7 * 2)) & 0xff; + + for (i = 0; i < DR7_MAX_BP; i++) { + if ((mod & (2 << i * 2)) && !hw_breakpoint_enabled(new_dr7, i)) { + hw_breakpoint_remove(env, i); + } + } + env->dr[7] = new_dr7; + for (i = 0; i < DR7_MAX_BP; i++) { + if (mod & (2 << i * 2) && hw_breakpoint_enabled(new_dr7, i)) { + iobpt |= hw_breakpoint_insert(env, i); + } else if (hw_breakpoint_type(new_dr7, i) == DR7_TYPE_IO_RW + && hw_breakpoint_enabled(new_dr7, i)) { + iobpt |= HF_IOBPT_MASK; + } + } + } else { + for (i = 0; i < DR7_MAX_BP; i++) { + hw_breakpoint_remove(env, i); + } + env->dr[7] = new_dr7; + for (i = 0; i < DR7_MAX_BP; i++) { + iobpt |= hw_breakpoint_insert(env, i); + } + } + + env->hflags = (env->hflags & ~HF_IOBPT_MASK) | iobpt; +} + +bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update) +{ + target_ulong dr6; + int reg; + bool hit_enabled = false; + + dr6 = env->dr[6] & ~0xf; + for (reg = 0; reg < DR7_MAX_BP; reg++) { + bool bp_match = false; + bool wp_match = false; + + switch (hw_breakpoint_type(env->dr[7], reg)) { + case DR7_TYPE_BP_INST: + if (env->dr[reg] == env->eip) { + bp_match = true; + } + break; + case DR7_TYPE_DATA_WR: + case DR7_TYPE_DATA_RW: + if (env->cpu_watchpoint[reg] && + env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) { + wp_match = true; + } + break; + case DR7_TYPE_IO_RW: + break; + } + if (bp_match || wp_match) { + dr6 |= 1 << reg; + if (hw_breakpoint_enabled(env->dr[7], reg)) { + hit_enabled = true; + } + } + } + + if (hit_enabled || force_dr6_update) { + env->dr[6] = dr6; + } + + return hit_enabled; +} + +void breakpoint_handler(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + CPUBreakpoint *bp; + + if (cs->watchpoint_hit) { + if (cs->watchpoint_hit->flags & BP_CPU) { + cs->watchpoint_hit = NULL; + if (check_hw_breakpoints(env, false)) { + raise_exception(env, EXCP01_DB); + } else { + cpu_loop_exit_noexc(cs); + } + } + } else { + QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { + if (bp->pc == env->eip) { + if (bp->flags & BP_CPU) { + check_hw_breakpoints(env, true); + raise_exception(env, EXCP01_DB); + } + break; + } + } + } +} + +void helper_set_dr(CPUX86State *env, int reg, target_ulong t0) +{ + switch (reg) { + case 0: case 1: case 2: case 3: + if (hw_breakpoint_enabled(env->dr[7], reg) + && hw_breakpoint_type(env->dr[7], reg) != DR7_TYPE_IO_RW) { + hw_breakpoint_remove(env, reg); + env->dr[reg] = t0; + hw_breakpoint_insert(env, reg); + } else { + env->dr[reg] = t0; + } + return; + case 4: + if (env->cr[4] & CR4_DE_MASK) { + break; + } + /* fallthru */ + case 6: + env->dr[6] = t0 | DR6_FIXED_1; + return; + case 5: + if (env->cr[4] & CR4_DE_MASK) { + break; + } + /* fallthru */ + case 7: + cpu_x86_update_dr7(env, t0); + return; + } + raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); +} + +/* Check if Port I/O is trapped by a breakpoint. */ +void helper_bpt_io(CPUX86State *env, uint32_t port, + uint32_t size, target_ulong next_eip) +{ + target_ulong dr7 = env->dr[7]; + int i, hit = 0; + + for (i = 0; i < DR7_MAX_BP; ++i) { + if (hw_breakpoint_type(dr7, i) == DR7_TYPE_IO_RW + && hw_breakpoint_enabled(dr7, i)) { + int bpt_len = hw_breakpoint_len(dr7, i); + if (port + size - 1 >= env->dr[i] + && port <= env->dr[i] + bpt_len - 1) { + hit |= 1 << i; + } + } + } + + if (hit) { + env->dr[6] = (env->dr[6] & ~0xf) | hit; + env->eip = next_eip; + raise_exception(env, EXCP01_DB); + } +} diff --git a/target/i386/tcg/sysemu/excp_helper.c b/target/i386/tcg/sysemu/excp_helper.c new file mode 100644 index 0000000000..b6d940e04e --- /dev/null +++ b/target/i386/tcg/sysemu/excp_helper.c @@ -0,0 +1,471 @@ +/* + * x86 exception helpers - sysemu code + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "tcg/helper-tcg.h" + +int get_pg_mode(CPUX86State *env) +{ + int pg_mode = 0; + if (env->cr[0] & CR0_WP_MASK) { + pg_mode |= PG_MODE_WP; + } + if (env->cr[4] & CR4_PAE_MASK) { + pg_mode |= PG_MODE_PAE; + } + if (env->cr[4] & CR4_PSE_MASK) { + pg_mode |= PG_MODE_PSE; + } + if (env->cr[4] & CR4_PKE_MASK) { + pg_mode |= PG_MODE_PKE; + } + if (env->cr[4] & CR4_PKS_MASK) { + pg_mode |= PG_MODE_PKS; + } + if (env->cr[4] & CR4_SMEP_MASK) { + pg_mode |= PG_MODE_SMEP; + } + if (env->cr[4] & CR4_LA57_MASK) { + pg_mode |= PG_MODE_LA57; + } + if (env->hflags & HF_LMA_MASK) { + pg_mode |= PG_MODE_LMA; + } + if (env->efer & MSR_EFER_NXE) { + pg_mode |= PG_MODE_NXE; + } + return pg_mode; +} + +#define PG_ERROR_OK (-1) + +typedef hwaddr (*MMUTranslateFunc)(CPUState *cs, hwaddr gphys, MMUAccessType access_type, + int *prot); + +#define GET_HPHYS(cs, gpa, access_type, prot) \ + (get_hphys_func ? get_hphys_func(cs, gpa, access_type, prot) : gpa) + +static int mmu_translate(CPUState *cs, hwaddr addr, MMUTranslateFunc get_hphys_func, + uint64_t cr3, int is_write1, int mmu_idx, int pg_mode, + hwaddr *xlat, int *page_size, int *prot) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + uint64_t ptep, pte; + int32_t a20_mask; + target_ulong pde_addr, pte_addr; + int error_code = 0; + int is_dirty, is_write, is_user; + uint64_t rsvd_mask = PG_ADDRESS_MASK & ~MAKE_64BIT_MASK(0, cpu->phys_bits); + uint32_t page_offset; + uint32_t pkr; + + is_user = (mmu_idx == MMU_USER_IDX); + is_write = is_write1 & 1; + a20_mask = x86_get_a20_mask(env); + + if (!(pg_mode & PG_MODE_NXE)) { + rsvd_mask |= PG_NX_MASK; + } + + if (pg_mode & PG_MODE_PAE) { + uint64_t pde, pdpe; + target_ulong pdpe_addr; + +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + bool la57 = pg_mode & PG_MODE_LA57; + uint64_t pml5e_addr, pml5e; + uint64_t pml4e_addr, pml4e; + int32_t sext; + + /* test virtual address sign extension */ + sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47; + if (get_hphys_func && sext != 0 && sext != -1) { + env->error_code = 0; + cs->exception_index = EXCP0D_GPF; + return 1; + } + + if (la57) { + pml5e_addr = ((cr3 & ~0xfff) + + (((addr >> 48) & 0x1ff) << 3)) & a20_mask; + pml5e_addr = GET_HPHYS(cs, pml5e_addr, MMU_DATA_STORE, NULL); + pml5e = x86_ldq_phys(cs, pml5e_addr); + if (!(pml5e & PG_PRESENT_MASK)) { + goto do_fault; + } + if (pml5e & (rsvd_mask | PG_PSE_MASK)) { + goto do_fault_rsvd; + } + if (!(pml5e & PG_ACCESSED_MASK)) { + pml5e |= PG_ACCESSED_MASK; + x86_stl_phys_notdirty(cs, pml5e_addr, pml5e); + } + ptep = pml5e ^ PG_NX_MASK; + } else { + pml5e = cr3; + ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; + } + + pml4e_addr = ((pml5e & PG_ADDRESS_MASK) + + (((addr >> 39) & 0x1ff) << 3)) & a20_mask; + pml4e_addr = GET_HPHYS(cs, pml4e_addr, MMU_DATA_STORE, NULL); + pml4e = x86_ldq_phys(cs, pml4e_addr); + if (!(pml4e & PG_PRESENT_MASK)) { + goto do_fault; + } + if (pml4e & (rsvd_mask | PG_PSE_MASK)) { + goto do_fault_rsvd; + } + if (!(pml4e & PG_ACCESSED_MASK)) { + pml4e |= PG_ACCESSED_MASK; + x86_stl_phys_notdirty(cs, pml4e_addr, pml4e); + } + ptep &= pml4e ^ PG_NX_MASK; + pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) & + a20_mask; + pdpe_addr = GET_HPHYS(cs, pdpe_addr, MMU_DATA_STORE, NULL); + pdpe = x86_ldq_phys(cs, pdpe_addr); + if (!(pdpe & PG_PRESENT_MASK)) { + goto do_fault; + } + if (pdpe & rsvd_mask) { + goto do_fault_rsvd; + } + ptep &= pdpe ^ PG_NX_MASK; + if (!(pdpe & PG_ACCESSED_MASK)) { + pdpe |= PG_ACCESSED_MASK; + x86_stl_phys_notdirty(cs, pdpe_addr, pdpe); + } + if (pdpe & PG_PSE_MASK) { + /* 1 GB page */ + *page_size = 1024 * 1024 * 1024; + pte_addr = pdpe_addr; + pte = pdpe; + goto do_check_protect; + } + } else +#endif + { + /* XXX: load them when cr3 is loaded ? */ + pdpe_addr = ((cr3 & ~0x1f) + ((addr >> 27) & 0x18)) & + a20_mask; + pdpe_addr = GET_HPHYS(cs, pdpe_addr, MMU_DATA_STORE, NULL); + pdpe = x86_ldq_phys(cs, pdpe_addr); + if (!(pdpe & PG_PRESENT_MASK)) { + goto do_fault; + } + rsvd_mask |= PG_HI_USER_MASK; + if (pdpe & (rsvd_mask | PG_NX_MASK)) { + goto do_fault_rsvd; + } + ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; + } + + pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) & + a20_mask; + pde_addr = GET_HPHYS(cs, pde_addr, MMU_DATA_STORE, NULL); + pde = x86_ldq_phys(cs, pde_addr); + if (!(pde & PG_PRESENT_MASK)) { + goto do_fault; + } + if (pde & rsvd_mask) { + goto do_fault_rsvd; + } + ptep &= pde ^ PG_NX_MASK; + if (pde & PG_PSE_MASK) { + /* 2 MB page */ + *page_size = 2048 * 1024; + pte_addr = pde_addr; + pte = pde; + goto do_check_protect; + } + /* 4 KB page */ + if (!(pde & PG_ACCESSED_MASK)) { + pde |= PG_ACCESSED_MASK; + x86_stl_phys_notdirty(cs, pde_addr, pde); + } + pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) & + a20_mask; + pte_addr = GET_HPHYS(cs, pte_addr, MMU_DATA_STORE, NULL); + pte = x86_ldq_phys(cs, pte_addr); + if (!(pte & PG_PRESENT_MASK)) { + goto do_fault; + } + if (pte & rsvd_mask) { + goto do_fault_rsvd; + } + /* combine pde and pte nx, user and rw protections */ + ptep &= pte ^ PG_NX_MASK; + *page_size = 4096; + } else { + uint32_t pde; + + /* page directory entry */ + pde_addr = ((cr3 & ~0xfff) + ((addr >> 20) & 0xffc)) & + a20_mask; + pde_addr = GET_HPHYS(cs, pde_addr, MMU_DATA_STORE, NULL); + pde = x86_ldl_phys(cs, pde_addr); + if (!(pde & PG_PRESENT_MASK)) { + goto do_fault; + } + ptep = pde | PG_NX_MASK; + + /* if PSE bit is set, then we use a 4MB page */ + if ((pde & PG_PSE_MASK) && (pg_mode & PG_MODE_PSE)) { + *page_size = 4096 * 1024; + pte_addr = pde_addr; + + /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved. + * Leave bits 20-13 in place for setting accessed/dirty bits below. + */ + pte = pde | ((pde & 0x1fe000LL) << (32 - 13)); + rsvd_mask = 0x200000; + goto do_check_protect_pse36; + } + + if (!(pde & PG_ACCESSED_MASK)) { + pde |= PG_ACCESSED_MASK; + x86_stl_phys_notdirty(cs, pde_addr, pde); + } + + /* page directory entry */ + pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & + a20_mask; + pte_addr = GET_HPHYS(cs, pte_addr, MMU_DATA_STORE, NULL); + pte = x86_ldl_phys(cs, pte_addr); + if (!(pte & PG_PRESENT_MASK)) { + goto do_fault; + } + /* combine pde and pte user and rw protections */ + ptep &= pte | PG_NX_MASK; + *page_size = 4096; + rsvd_mask = 0; + } + +do_check_protect: + rsvd_mask |= (*page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK; +do_check_protect_pse36: + if (pte & rsvd_mask) { + goto do_fault_rsvd; + } + ptep ^= PG_NX_MASK; + + /* can the page can be put in the TLB? prot will tell us */ + if (is_user && !(ptep & PG_USER_MASK)) { + goto do_fault_protect; + } + + *prot = 0; + if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) { + *prot |= PAGE_READ; + if ((ptep & PG_RW_MASK) || !(is_user || (pg_mode & PG_MODE_WP))) { + *prot |= PAGE_WRITE; + } + } + if (!(ptep & PG_NX_MASK) && + (mmu_idx == MMU_USER_IDX || + !((pg_mode & PG_MODE_SMEP) && (ptep & PG_USER_MASK)))) { + *prot |= PAGE_EXEC; + } + + if (!(env->hflags & HF_LMA_MASK)) { + pkr = 0; + } else if (ptep & PG_USER_MASK) { + pkr = pg_mode & PG_MODE_PKE ? env->pkru : 0; + } else { + pkr = pg_mode & PG_MODE_PKS ? env->pkrs : 0; + } + if (pkr) { + uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT; + uint32_t pkr_ad = (pkr >> pk * 2) & 1; + uint32_t pkr_wd = (pkr >> pk * 2) & 2; + uint32_t pkr_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + + if (pkr_ad) { + pkr_prot &= ~(PAGE_READ | PAGE_WRITE); + } else if (pkr_wd && (is_user || (pg_mode & PG_MODE_WP))) { + pkr_prot &= ~PAGE_WRITE; + } + + *prot &= pkr_prot; + if ((pkr_prot & (1 << is_write1)) == 0) { + assert(is_write1 != 2); + error_code |= PG_ERROR_PK_MASK; + goto do_fault_protect; + } + } + + if ((*prot & (1 << is_write1)) == 0) { + goto do_fault_protect; + } + + /* yes, it can! */ + is_dirty = is_write && !(pte & PG_DIRTY_MASK); + if (!(pte & PG_ACCESSED_MASK) || is_dirty) { + pte |= PG_ACCESSED_MASK; + if (is_dirty) { + pte |= PG_DIRTY_MASK; + } + x86_stl_phys_notdirty(cs, pte_addr, pte); + } + + if (!(pte & PG_DIRTY_MASK)) { + /* only set write access if already dirty... otherwise wait + for dirty access */ + assert(!is_write); + *prot &= ~PAGE_WRITE; + } + + pte = pte & a20_mask; + + /* align to page_size */ + pte &= PG_ADDRESS_MASK & ~(*page_size - 1); + page_offset = addr & (*page_size - 1); + *xlat = GET_HPHYS(cs, pte + page_offset, is_write1, prot); + return PG_ERROR_OK; + + do_fault_rsvd: + error_code |= PG_ERROR_RSVD_MASK; + do_fault_protect: + error_code |= PG_ERROR_P_MASK; + do_fault: + error_code |= (is_write << PG_ERROR_W_BIT); + if (is_user) + error_code |= PG_ERROR_U_MASK; + if (is_write1 == 2 && + (((pg_mode & PG_MODE_NXE) && (pg_mode & PG_MODE_PAE)) || + (pg_mode & PG_MODE_SMEP))) + error_code |= PG_ERROR_I_D_MASK; + return error_code; +} + +static hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type, + int *prot) +{ + CPUX86State *env = &X86_CPU(cs)->env; + uint64_t exit_info_1; + int page_size; + int next_prot; + hwaddr hphys; + + if (likely(!(env->hflags2 & HF2_NPT_MASK))) { + return gphys; + } + + exit_info_1 = mmu_translate(cs, gphys, NULL, env->nested_cr3, + access_type, MMU_USER_IDX, env->nested_pg_mode, + &hphys, &page_size, &next_prot); + if (exit_info_1 == PG_ERROR_OK) { + if (prot) { + *prot &= next_prot; + } + return hphys; + } + + x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), + gphys); + if (prot) { + exit_info_1 |= SVM_NPTEXIT_GPA; + } else { /* page table access */ + exit_info_1 |= SVM_NPTEXIT_GPT; + } + cpu_vmexit(env, SVM_EXIT_NPF, exit_info_1, env->retaddr); +} + +/* return value: + * -1 = cannot handle fault + * 0 = nothing more to do + * 1 = generate PF fault + */ +static int handle_mmu_fault(CPUState *cs, vaddr addr, int size, + int is_write1, int mmu_idx) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + int error_code = PG_ERROR_OK; + int pg_mode, prot, page_size; + hwaddr paddr; + hwaddr vaddr; + +#if defined(DEBUG_MMU) + printf("MMU fault: addr=%" VADDR_PRIx " w=%d mmu=%d eip=" TARGET_FMT_lx "\n", + addr, is_write1, mmu_idx, env->eip); +#endif + + if (!(env->cr[0] & CR0_PG_MASK)) { + paddr = addr; +#ifdef TARGET_X86_64 + if (!(env->hflags & HF_LMA_MASK)) { + /* Without long mode we can only address 32bits in real mode */ + paddr = (uint32_t)paddr; + } +#endif + prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + page_size = 4096; + } else { + pg_mode = get_pg_mode(env); + error_code = mmu_translate(cs, addr, get_hphys, env->cr[3], is_write1, + mmu_idx, pg_mode, + &paddr, &page_size, &prot); + } + + if (error_code == PG_ERROR_OK) { + /* Even if 4MB pages, we map only one 4KB page in the cache to + avoid filling it too fast */ + vaddr = addr & TARGET_PAGE_MASK; + paddr &= TARGET_PAGE_MASK; + + assert(prot & (1 << is_write1)); + tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env), + prot, mmu_idx, page_size); + return 0; + } else { + if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) { + /* cr2 is not modified in case of exceptions */ + x86_stq_phys(cs, + env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), + addr); + } else { + env->cr[2] = addr; + } + env->error_code = error_code; + cs->exception_index = EXCP0E_PAGE; + return 1; + } +} + +bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + env->retaddr = retaddr; + if (handle_mmu_fault(cs, addr, size, access_type, mmu_idx)) { + /* FIXME: On error in get_hphys we have already jumped out. */ + g_assert(!probe); + raise_exception_err_ra(env, cs->exception_index, + env->error_code, retaddr); + } + return true; +} diff --git a/target/i386/tcg/sysemu/fpu_helper.c b/target/i386/tcg/sysemu/fpu_helper.c new file mode 100644 index 0000000000..1c3610da3b --- /dev/null +++ b/target/i386/tcg/sysemu/fpu_helper.c @@ -0,0 +1,57 @@ +/* + * x86 FPU, MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI helpers (sysemu code) + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "hw/irq.h" + +static qemu_irq ferr_irq; + +void x86_register_ferr_irq(qemu_irq irq) +{ + ferr_irq = irq; +} + +void fpu_check_raise_ferr_irq(CPUX86State *env) +{ + if (ferr_irq && !(env->hflags2 & HF2_IGNNE_MASK)) { + qemu_irq_raise(ferr_irq); + return; + } +} + +void cpu_clear_ignne(void) +{ + CPUX86State *env = &X86_CPU(first_cpu)->env; + env->hflags2 &= ~HF2_IGNNE_MASK; +} + +void cpu_set_ignne(void) +{ + CPUX86State *env = &X86_CPU(first_cpu)->env; + env->hflags2 |= HF2_IGNNE_MASK; + /* + * We get here in response to a write to port F0h. The chipset should + * deassert FP_IRQ and FERR# instead should stay signaled until FPSW_SE is + * cleared, because FERR# and FP_IRQ are two separate pins on real + * hardware. However, we don't model FERR# as a qemu_irq, so we just + * do directly what the chipset would do, i.e. deassert FP_IRQ. + */ + qemu_irq_lower(ferr_irq); +} diff --git a/target/i386/tcg/sysemu/meson.build b/target/i386/tcg/sysemu/meson.build new file mode 100644 index 0000000000..2e444e766a --- /dev/null +++ b/target/i386/tcg/sysemu/meson.build @@ -0,0 +1,10 @@ +i386_softmmu_ss.add(when: ['CONFIG_TCG', 'CONFIG_SOFTMMU'], if_true: files( + 'tcg-cpu.c', + 'smm_helper.c', + 'excp_helper.c', + 'bpt_helper.c', + 'misc_helper.c', + 'fpu_helper.c', + 'svm_helper.c', + 'seg_helper.c', +)) diff --git a/target/i386/tcg/sysemu/misc_helper.c b/target/i386/tcg/sysemu/misc_helper.c new file mode 100644 index 0000000000..0cef2f1a4c --- /dev/null +++ b/target/i386/tcg/sysemu/misc_helper.c @@ -0,0 +1,490 @@ +/* + * x86 misc helpers - sysemu code + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "cpu.h" +#include "exec/helper-proto.h" +#include "exec/cpu_ldst.h" +#include "exec/address-spaces.h" +#include "tcg/helper-tcg.h" + +void helper_outb(CPUX86State *env, uint32_t port, uint32_t data) +{ + address_space_stb(&address_space_io, port, data, + cpu_get_mem_attrs(env), NULL); +} + +target_ulong helper_inb(CPUX86State *env, uint32_t port) +{ + return address_space_ldub(&address_space_io, port, + cpu_get_mem_attrs(env), NULL); +} + +void helper_outw(CPUX86State *env, uint32_t port, uint32_t data) +{ + address_space_stw(&address_space_io, port, data, + cpu_get_mem_attrs(env), NULL); +} + +target_ulong helper_inw(CPUX86State *env, uint32_t port) +{ + return address_space_lduw(&address_space_io, port, + cpu_get_mem_attrs(env), NULL); +} + +void helper_outl(CPUX86State *env, uint32_t port, uint32_t data) +{ + address_space_stl(&address_space_io, port, data, + cpu_get_mem_attrs(env), NULL); +} + +target_ulong helper_inl(CPUX86State *env, uint32_t port) +{ + return address_space_ldl(&address_space_io, port, + cpu_get_mem_attrs(env), NULL); +} + +target_ulong helper_read_crN(CPUX86State *env, int reg) +{ + target_ulong val; + + switch (reg) { + default: + val = env->cr[reg]; + break; + case 8: + if (!(env->hflags2 & HF2_VINTR_MASK)) { + val = cpu_get_apic_tpr(env_archcpu(env)->apic_state); + } else { + val = env->v_tpr; + } + break; + } + return val; +} + +void helper_write_crN(CPUX86State *env, int reg, target_ulong t0) +{ + switch (reg) { + case 0: + cpu_x86_update_cr0(env, t0); + break; + case 3: + cpu_x86_update_cr3(env, t0); + break; + case 4: + if (((t0 ^ env->cr[4]) & CR4_LA57_MASK) && + (env->hflags & HF_CS64_MASK)) { + raise_exception_ra(env, EXCP0D_GPF, GETPC()); + } + cpu_x86_update_cr4(env, t0); + break; + case 8: + if (!(env->hflags2 & HF2_VINTR_MASK)) { + qemu_mutex_lock_iothread(); + cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0); + qemu_mutex_unlock_iothread(); + } + env->v_tpr = t0 & 0x0f; + break; + default: + env->cr[reg] = t0; + break; + } +} + +void helper_wrmsr(CPUX86State *env) +{ + uint64_t val; + CPUState *cs = env_cpu(env); + + cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1, GETPC()); + + val = ((uint32_t)env->regs[R_EAX]) | + ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32); + + switch ((uint32_t)env->regs[R_ECX]) { + case MSR_IA32_SYSENTER_CS: + env->sysenter_cs = val & 0xffff; + break; + case MSR_IA32_SYSENTER_ESP: + env->sysenter_esp = val; + break; + case MSR_IA32_SYSENTER_EIP: + env->sysenter_eip = val; + break; + case MSR_IA32_APICBASE: + cpu_set_apic_base(env_archcpu(env)->apic_state, val); + break; + case MSR_EFER: + { + uint64_t update_mask; + + update_mask = 0; + if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_SYSCALL) { + update_mask |= MSR_EFER_SCE; + } + if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { + update_mask |= MSR_EFER_LME; + } + if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) { + update_mask |= MSR_EFER_FFXSR; + } + if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_NX) { + update_mask |= MSR_EFER_NXE; + } + if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { + update_mask |= MSR_EFER_SVME; + } + if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) { + update_mask |= MSR_EFER_FFXSR; + } + cpu_load_efer(env, (env->efer & ~update_mask) | + (val & update_mask)); + } + break; + case MSR_STAR: + env->star = val; + break; + case MSR_PAT: + env->pat = val; + break; + case MSR_IA32_PKRS: + if (val & 0xFFFFFFFF00000000ull) { + goto error; + } + env->pkrs = val; + tlb_flush(cs); + break; + case MSR_VM_HSAVE_PA: + env->vm_hsave = val; + break; +#ifdef TARGET_X86_64 + case MSR_LSTAR: + env->lstar = val; + break; + case MSR_CSTAR: + env->cstar = val; + break; + case MSR_FMASK: + env->fmask = val; + break; + case MSR_FSBASE: + env->segs[R_FS].base = val; + break; + case MSR_GSBASE: + env->segs[R_GS].base = val; + break; + case MSR_KERNELGSBASE: + env->kernelgsbase = val; + break; +#endif + case MSR_MTRRphysBase(0): + case MSR_MTRRphysBase(1): + case MSR_MTRRphysBase(2): + case MSR_MTRRphysBase(3): + case MSR_MTRRphysBase(4): + case MSR_MTRRphysBase(5): + case MSR_MTRRphysBase(6): + case MSR_MTRRphysBase(7): + env->mtrr_var[((uint32_t)env->regs[R_ECX] - + MSR_MTRRphysBase(0)) / 2].base = val; + break; + case MSR_MTRRphysMask(0): + case MSR_MTRRphysMask(1): + case MSR_MTRRphysMask(2): + case MSR_MTRRphysMask(3): + case MSR_MTRRphysMask(4): + case MSR_MTRRphysMask(5): + case MSR_MTRRphysMask(6): + case MSR_MTRRphysMask(7): + env->mtrr_var[((uint32_t)env->regs[R_ECX] - + MSR_MTRRphysMask(0)) / 2].mask = val; + break; + case MSR_MTRRfix64K_00000: + env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - + MSR_MTRRfix64K_00000] = val; + break; + case MSR_MTRRfix16K_80000: + case MSR_MTRRfix16K_A0000: + env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - + MSR_MTRRfix16K_80000 + 1] = val; + break; + case MSR_MTRRfix4K_C0000: + case MSR_MTRRfix4K_C8000: + case MSR_MTRRfix4K_D0000: + case MSR_MTRRfix4K_D8000: + case MSR_MTRRfix4K_E0000: + case MSR_MTRRfix4K_E8000: + case MSR_MTRRfix4K_F0000: + case MSR_MTRRfix4K_F8000: + env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - + MSR_MTRRfix4K_C0000 + 3] = val; + break; + case MSR_MTRRdefType: + env->mtrr_deftype = val; + break; + case MSR_MCG_STATUS: + env->mcg_status = val; + break; + case MSR_MCG_CTL: + if ((env->mcg_cap & MCG_CTL_P) + && (val == 0 || val == ~(uint64_t)0)) { + env->mcg_ctl = val; + } + break; + case MSR_TSC_AUX: + env->tsc_aux = val; + break; + case MSR_IA32_MISC_ENABLE: + env->msr_ia32_misc_enable = val; + break; + case MSR_IA32_BNDCFGS: + /* FIXME: #GP if reserved bits are set. */ + /* FIXME: Extend highest implemented bit of linear address. */ + env->msr_bndcfgs = val; + cpu_sync_bndcs_hflags(env); + break; + default: + if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL + && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + + (4 * env->mcg_cap & 0xff)) { + uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL; + if ((offset & 0x3) != 0 + || (val == 0 || val == ~(uint64_t)0)) { + env->mce_banks[offset] = val; + } + break; + } + /* XXX: exception? */ + break; + } + return; +error: + raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); +} + +void helper_rdmsr(CPUX86State *env) +{ + X86CPU *x86_cpu = env_archcpu(env); + uint64_t val; + + cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0, GETPC()); + + switch ((uint32_t)env->regs[R_ECX]) { + case MSR_IA32_SYSENTER_CS: + val = env->sysenter_cs; + break; + case MSR_IA32_SYSENTER_ESP: + val = env->sysenter_esp; + break; + case MSR_IA32_SYSENTER_EIP: + val = env->sysenter_eip; + break; + case MSR_IA32_APICBASE: + val = cpu_get_apic_base(env_archcpu(env)->apic_state); + break; + case MSR_EFER: + val = env->efer; + break; + case MSR_STAR: + val = env->star; + break; + case MSR_PAT: + val = env->pat; + break; + case MSR_IA32_PKRS: + val = env->pkrs; + break; + case MSR_VM_HSAVE_PA: + val = env->vm_hsave; + break; + case MSR_IA32_PERF_STATUS: + /* tsc_increment_by_tick */ + val = 1000ULL; + /* CPU multiplier */ + val |= (((uint64_t)4ULL) << 40); + break; +#ifdef TARGET_X86_64 + case MSR_LSTAR: + val = env->lstar; + break; + case MSR_CSTAR: + val = env->cstar; + break; + case MSR_FMASK: + val = env->fmask; + break; + case MSR_FSBASE: + val = env->segs[R_FS].base; + break; + case MSR_GSBASE: + val = env->segs[R_GS].base; + break; + case MSR_KERNELGSBASE: + val = env->kernelgsbase; + break; + case MSR_TSC_AUX: + val = env->tsc_aux; + break; +#endif + case MSR_SMI_COUNT: + val = env->msr_smi_count; + break; + case MSR_MTRRphysBase(0): + case MSR_MTRRphysBase(1): + case MSR_MTRRphysBase(2): + case MSR_MTRRphysBase(3): + case MSR_MTRRphysBase(4): + case MSR_MTRRphysBase(5): + case MSR_MTRRphysBase(6): + case MSR_MTRRphysBase(7): + val = env->mtrr_var[((uint32_t)env->regs[R_ECX] - + MSR_MTRRphysBase(0)) / 2].base; + break; + case MSR_MTRRphysMask(0): + case MSR_MTRRphysMask(1): + case MSR_MTRRphysMask(2): + case MSR_MTRRphysMask(3): + case MSR_MTRRphysMask(4): + case MSR_MTRRphysMask(5): + case MSR_MTRRphysMask(6): + case MSR_MTRRphysMask(7): + val = env->mtrr_var[((uint32_t)env->regs[R_ECX] - + MSR_MTRRphysMask(0)) / 2].mask; + break; + case MSR_MTRRfix64K_00000: + val = env->mtrr_fixed[0]; + break; + case MSR_MTRRfix16K_80000: + case MSR_MTRRfix16K_A0000: + val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - + MSR_MTRRfix16K_80000 + 1]; + break; + case MSR_MTRRfix4K_C0000: + case MSR_MTRRfix4K_C8000: + case MSR_MTRRfix4K_D0000: + case MSR_MTRRfix4K_D8000: + case MSR_MTRRfix4K_E0000: + case MSR_MTRRfix4K_E8000: + case MSR_MTRRfix4K_F0000: + case MSR_MTRRfix4K_F8000: + val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - + MSR_MTRRfix4K_C0000 + 3]; + break; + case MSR_MTRRdefType: + val = env->mtrr_deftype; + break; + case MSR_MTRRcap: + if (env->features[FEAT_1_EDX] & CPUID_MTRR) { + val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | + MSR_MTRRcap_WC_SUPPORTED; + } else { + /* XXX: exception? */ + val = 0; + } + break; + case MSR_MCG_CAP: + val = env->mcg_cap; + break; + case MSR_MCG_CTL: + if (env->mcg_cap & MCG_CTL_P) { + val = env->mcg_ctl; + } else { + val = 0; + } + break; + case MSR_MCG_STATUS: + val = env->mcg_status; + break; + case MSR_IA32_MISC_ENABLE: + val = env->msr_ia32_misc_enable; + break; + case MSR_IA32_BNDCFGS: + val = env->msr_bndcfgs; + break; + case MSR_IA32_UCODE_REV: + val = x86_cpu->ucode_rev; + break; + default: + if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL + && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + + (4 * env->mcg_cap & 0xff)) { + uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL; + val = env->mce_banks[offset]; + break; + } + /* XXX: exception? */ + val = 0; + break; + } + env->regs[R_EAX] = (uint32_t)(val); + env->regs[R_EDX] = (uint32_t)(val >> 32); +} + +void helper_flush_page(CPUX86State *env, target_ulong addr) +{ + tlb_flush_page(env_cpu(env), addr); +} + +static void QEMU_NORETURN do_hlt(CPUX86State *env) +{ + CPUState *cs = env_cpu(env); + + env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */ + cs->halted = 1; + cs->exception_index = EXCP_HLT; + cpu_loop_exit(cs); +} + +void QEMU_NORETURN helper_hlt(CPUX86State *env, int next_eip_addend) +{ + cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0, GETPC()); + env->eip += next_eip_addend; + + do_hlt(env); +} + +void helper_monitor(CPUX86State *env, target_ulong ptr) +{ + if ((uint32_t)env->regs[R_ECX] != 0) { + raise_exception_ra(env, EXCP0D_GPF, GETPC()); + } + /* XXX: store address? */ + cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0, GETPC()); +} + +void QEMU_NORETURN helper_mwait(CPUX86State *env, int next_eip_addend) +{ + CPUState *cs = env_cpu(env); + + if ((uint32_t)env->regs[R_ECX] != 0) { + raise_exception_ra(env, EXCP0D_GPF, GETPC()); + } + cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0, GETPC()); + env->eip += next_eip_addend; + + /* XXX: not complete but not completely erroneous */ + if (cs->cpu_index != 0 || CPU_NEXT(cs) != NULL) { + do_pause(env); + } else { + do_hlt(env); + } +} diff --git a/target/i386/tcg/sysemu/seg_helper.c b/target/i386/tcg/sysemu/seg_helper.c new file mode 100644 index 0000000000..82c0856c41 --- /dev/null +++ b/target/i386/tcg/sysemu/seg_helper.c @@ -0,0 +1,154 @@ +/* + * x86 segmentation related helpers: (sysemu-only code) + * TSS, interrupts, system calls, jumps and call/task gates, descriptors + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/helper-proto.h" +#include "exec/cpu_ldst.h" +#include "tcg/helper-tcg.h" +#include "../seg_helper.h" + +#ifdef TARGET_X86_64 +void helper_syscall(CPUX86State *env, int next_eip_addend) +{ + int selector; + + if (!(env->efer & MSR_EFER_SCE)) { + raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); + } + selector = (env->star >> 32) & 0xffff; + if (env->hflags & HF_LMA_MASK) { + int code64; + + env->regs[R_ECX] = env->eip + next_eip_addend; + env->regs[11] = cpu_compute_eflags(env) & ~RF_MASK; + + code64 = env->hflags & HF_CS64_MASK; + + env->eflags &= ~(env->fmask | RF_MASK); + cpu_load_eflags(env, env->eflags, 0); + cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, + 0, 0xffffffff, + DESC_G_MASK | DESC_P_MASK | + DESC_S_MASK | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | + DESC_L_MASK); + cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | + DESC_W_MASK | DESC_A_MASK); + if (code64) { + env->eip = env->lstar; + } else { + env->eip = env->cstar; + } + } else { + env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend); + + env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK); + cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | + DESC_W_MASK | DESC_A_MASK); + env->eip = (uint32_t)env->star; + } +} +#endif /* TARGET_X86_64 */ + +void handle_even_inj(CPUX86State *env, int intno, int is_int, + int error_code, int is_hw, int rm) +{ + CPUState *cs = env_cpu(env); + uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, + control.event_inj)); + + if (!(event_inj & SVM_EVTINJ_VALID)) { + int type; + + if (is_int) { + type = SVM_EVTINJ_TYPE_SOFT; + } else { + type = SVM_EVTINJ_TYPE_EXEPT; + } + event_inj = intno | type | SVM_EVTINJ_VALID; + if (!rm && exception_has_error_code(intno)) { + event_inj |= SVM_EVTINJ_VALID_ERR; + x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, + control.event_inj_err), + error_code); + } + x86_stl_phys(cs, + env->vm_vmcb + offsetof(struct vmcb, control.event_inj), + event_inj); + } +} + +void x86_cpu_do_interrupt(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + if (cs->exception_index == EXCP_VMEXIT) { + assert(env->old_exception == -1); + do_vmexit(env); + } else { + do_interrupt_all(cpu, cs->exception_index, + env->exception_is_int, + env->error_code, + env->exception_next_eip, 0); + /* successfully delivered */ + env->old_exception = -1; + } +} + +/* check if Port I/O is allowed in TSS */ +void helper_check_io(CPUX86State *env, uint32_t addr, uint32_t size) +{ + uintptr_t retaddr = GETPC(); + uint32_t io_offset, val, mask; + + /* TSS must be a valid 32 bit one */ + if (!(env->tr.flags & DESC_P_MASK) || + ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 || + env->tr.limit < 103) { + goto fail; + } + io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr); + io_offset += (addr >> 3); + /* Note: the check needs two bytes */ + if ((io_offset + 1) > env->tr.limit) { + goto fail; + } + val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr); + val >>= (addr & 7); + mask = (1 << size) - 1; + /* all bits must be zero to allow the I/O */ + if ((val & mask) != 0) { + fail: + raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); + } +} diff --git a/target/i386/tcg/smm_helper.c b/target/i386/tcg/sysemu/smm_helper.c index 62d027abd3..a45b5651c3 100644 --- a/target/i386/tcg/smm_helper.c +++ b/target/i386/tcg/sysemu/smm_helper.c @@ -1,5 +1,5 @@ /* - * x86 SMM helpers + * x86 SMM helpers (sysemu-only) * * Copyright (c) 2003 Fabrice Bellard * @@ -18,27 +18,14 @@ */ #include "qemu/osdep.h" -#include "qemu/main-loop.h" #include "cpu.h" #include "exec/helper-proto.h" #include "exec/log.h" -#include "helper-tcg.h" +#include "tcg/helper-tcg.h" /* SMM support */ -#if defined(CONFIG_USER_ONLY) - -void do_smm_enter(X86CPU *cpu) -{ -} - -void helper_rsm(CPUX86State *env) -{ -} - -#else - #ifdef TARGET_X86_64 #define SMM_REVISION_ID 0x00020064 #else @@ -330,5 +317,3 @@ void helper_rsm(CPUX86State *env) qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n"); log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP); } - -#endif /* !CONFIG_USER_ONLY */ diff --git a/target/i386/tcg/svm_helper.c b/target/i386/tcg/sysemu/svm_helper.c index 0145afceae..9d671297cf 100644 --- a/target/i386/tcg/svm_helper.c +++ b/target/i386/tcg/sysemu/svm_helper.c @@ -1,5 +1,5 @@ /* - * x86 SVM helpers + * x86 SVM helpers (sysemu only) * * Copyright (c) 2003 Fabrice Bellard * @@ -22,66 +22,10 @@ #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" -#include "helper-tcg.h" +#include "tcg/helper-tcg.h" /* Secure Virtual Machine helpers */ -#if defined(CONFIG_USER_ONLY) - -void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) -{ -} - -void helper_vmmcall(CPUX86State *env) -{ -} - -void helper_vmload(CPUX86State *env, int aflag) -{ -} - -void helper_vmsave(CPUX86State *env, int aflag) -{ -} - -void helper_stgi(CPUX86State *env) -{ -} - -void helper_clgi(CPUX86State *env) -{ -} - -void helper_skinit(CPUX86State *env) -{ -} - -void helper_invlpga(CPUX86State *env, int aflag) -{ -} - -void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1, - uintptr_t retaddr) -{ - assert(0); -} - -void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type, - uint64_t param) -{ -} - -void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type, - uint64_t param, uintptr_t retaddr) -{ -} - -void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param, - uint32_t next_eip_addend) -{ -} -#else - static inline void svm_save_seg(CPUX86State *env, hwaddr addr, const SegmentCache *sc) { @@ -219,18 +163,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) control.nested_cr3)); env->hflags2 |= HF2_NPT_MASK; - if (env->cr[4] & CR4_PAE_MASK) { - env->nested_pg_mode |= SVM_NPT_PAE; - } - if (env->cr[4] & CR4_PSE_MASK) { - env->nested_pg_mode |= SVM_NPT_PSE; - } - if (env->hflags & HF_LMA_MASK) { - env->nested_pg_mode |= SVM_NPT_LMA; - } - if (env->efer & MSR_EFER_NXE) { - env->nested_pg_mode |= SVM_NPT_NXE; - } + env->nested_pg_mode = get_pg_mode(env) & PG_MODE_SVM_MASK; } /* enable intercepts */ @@ -479,31 +412,6 @@ void helper_clgi(CPUX86State *env) env->hflags2 &= ~HF2_GIF_MASK; } -void helper_skinit(CPUX86State *env) -{ - cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0, GETPC()); - /* XXX: not implemented */ - raise_exception(env, EXCP06_ILLOP); -} - -void helper_invlpga(CPUX86State *env, int aflag) -{ - X86CPU *cpu = env_archcpu(env); - target_ulong addr; - - cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0, GETPC()); - - if (aflag == 2) { - addr = env->regs[R_EAX]; - } else { - addr = (uint32_t)env->regs[R_EAX]; - } - - /* XXX: could use the ASID to see if it is needed to do the - flush */ - tlb_flush_page(CPU(cpu), addr); -} - void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type, uint64_t param, uintptr_t retaddr) { @@ -580,10 +488,9 @@ void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type, } } -void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type, - uint64_t param) +void helper_svm_check_intercept(CPUX86State *env, uint32_t type) { - cpu_svm_check_intercept_param(env, type, param, GETPC()); + cpu_svm_check_intercept_param(env, type, 0, GETPC()); } void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param, @@ -796,5 +703,3 @@ void do_vmexit(CPUX86State *env) host's code segment or non-canonical (in the case of long mode), a #GP fault is delivered inside the host. */ } - -#endif diff --git a/target/i386/tcg/sysemu/tcg-cpu.c b/target/i386/tcg/sysemu/tcg-cpu.c new file mode 100644 index 0000000000..c223c0fe9b --- /dev/null +++ b/target/i386/tcg/sysemu/tcg-cpu.c @@ -0,0 +1,83 @@ +/* + * i386 TCG cpu class initialization functions specific to sysemu + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "tcg/helper-tcg.h" + +#include "sysemu/sysemu.h" +#include "qemu/units.h" +#include "exec/address-spaces.h" + +#include "tcg/tcg-cpu.h" + +static void tcg_cpu_machine_done(Notifier *n, void *unused) +{ + X86CPU *cpu = container_of(n, X86CPU, machine_done); + MemoryRegion *smram = + (MemoryRegion *) object_resolve_path("/machine/smram", NULL); + + if (smram) { + cpu->smram = g_new(MemoryRegion, 1); + memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram", + smram, 0, 4 * GiB); + memory_region_set_enabled(cpu->smram, true); + memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, + cpu->smram, 1); + } +} + +bool tcg_cpu_realizefn(CPUState *cs, Error **errp) +{ + X86CPU *cpu = X86_CPU(cs); + + /* + * The realize order is important, since x86_cpu_realize() checks if + * nothing else has been set by the user (or by accelerators) in + * cpu->ucode_rev and cpu->phys_bits, and the memory regions + * initialized here are needed for the vcpu initialization. + * + * realize order: + * tcg_cpu -> host_cpu -> x86_cpu + */ + cpu->cpu_as_mem = g_new(MemoryRegion, 1); + cpu->cpu_as_root = g_new(MemoryRegion, 1); + + /* Outer container... */ + memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull); + memory_region_set_enabled(cpu->cpu_as_root, true); + + /* + * ... with two regions inside: normal system memory with low + * priority, and... + */ + memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory", + get_system_memory(), 0, ~0ull); + memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0); + memory_region_set_enabled(cpu->cpu_as_mem, true); + + cs->num_ases = 2; + cpu_address_space_init(cs, 0, "cpu-memory", cs->memory); + cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root); + + /* ... SMRAM with higher priority, linked from /machine/smram. */ + cpu->machine_done.notify = tcg_cpu_machine_done; + qemu_add_machine_init_done_notifier(&cpu->machine_done); + return true; +} diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c index 1e125d2175..ba39531aa5 100644 --- a/target/i386/tcg/tcg-cpu.c +++ b/target/i386/tcg/tcg-cpu.c @@ -19,14 +19,11 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "tcg-cpu.h" -#include "exec/exec-all.h" -#include "sysemu/runstate.h" #include "helper-tcg.h" +#include "qemu/accel.h" +#include "hw/core/accel-cpu.h" -#if !defined(CONFIG_USER_ONLY) -#include "hw/i386/apic.h" -#endif +#include "tcg-cpu.h" /* Frob eflags into and out of the CPU temporary format. */ @@ -72,7 +69,52 @@ static struct TCGCPUOps x86_tcg_ops = { #endif /* !CONFIG_USER_ONLY */ }; -void tcg_cpu_common_class_init(CPUClass *cc) +static void tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc) { + /* for x86, all cpus use the same set of operations */ cc->tcg_ops = &x86_tcg_ops; } + +static void tcg_cpu_class_init(CPUClass *cc) +{ + cc->init_accel_cpu = tcg_cpu_init_ops; +} + +/* + * TCG-specific defaults that override all CPU models when using TCG + */ +static PropValue tcg_default_props[] = { + { "vme", "off" }, + { NULL, NULL }, +}; + +static void tcg_cpu_instance_init(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + /* Special cases not set in the X86CPUDefinition structs: */ + x86_cpu_apply_props(cpu, tcg_default_props); +} + +static void tcg_cpu_accel_class_init(ObjectClass *oc, void *data) +{ + AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); + +#ifndef CONFIG_USER_ONLY + acc->cpu_realizefn = tcg_cpu_realizefn; +#endif /* CONFIG_USER_ONLY */ + + acc->cpu_class_init = tcg_cpu_class_init; + acc->cpu_instance_init = tcg_cpu_instance_init; +} +static const TypeInfo tcg_cpu_accel_type_info = { + .name = ACCEL_CPU_NAME("tcg"), + + .parent = TYPE_ACCEL_CPU, + .class_init = tcg_cpu_accel_class_init, + .abstract = true, +}; +static void tcg_cpu_accel_register_types(void) +{ + type_register_static(&tcg_cpu_accel_type_info); +} +type_init(tcg_cpu_accel_register_types); diff --git a/target/i386/tcg/tcg-cpu.h b/target/i386/tcg/tcg-cpu.h index 81f02e562e..36bd300af0 100644 --- a/target/i386/tcg/tcg-cpu.h +++ b/target/i386/tcg/tcg-cpu.h @@ -1,15 +1,24 @@ /* - * i386 TCG CPU class initialization + * i386 TCG cpu class initialization functions * - * Copyright 2020 SUSE LLC + * Copyright (c) 2003 Fabrice Bellard * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ - #ifndef TCG_CPU_H #define TCG_CPU_H -void tcg_cpu_common_class_init(CPUClass *cc); +bool tcg_cpu_realizefn(CPUState *cs, Error **errp); #endif /* TCG_CPU_H */ diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c index 880bc45561..834186bcae 100644 --- a/target/i386/tcg/translate.c +++ b/target/i386/tcg/translate.c @@ -39,16 +39,7 @@ #define PREFIX_DATA 0x08 #define PREFIX_ADR 0x10 #define PREFIX_VEX 0x20 - -#ifdef TARGET_X86_64 -#define CODE64(s) ((s)->code64) -#define REX_X(s) ((s)->rex_x) -#define REX_B(s) ((s)->rex_b) -#else -#define CODE64(s) 0 -#define REX_X(s) 0 -#define REX_B(s) 0 -#endif +#define PREFIX_REX 0x40 #ifdef TARGET_X86_64 # define ctztl ctz64 @@ -85,42 +76,38 @@ static TCGv_i64 cpu_bndu[4]; typedef struct DisasContext { DisasContextBase base; - /* current insn context */ - int override; /* -1 if no override */ - int prefix; + target_ulong pc; /* pc = eip + cs_base */ + target_ulong pc_start; /* pc at TB entry */ + target_ulong cs_base; /* base of CS segment */ + MemOp aflag; MemOp dflag; - target_ulong pc_start; - target_ulong pc; /* pc = eip + cs_base */ - /* current block context */ - target_ulong cs_base; /* base of CS segment */ - int pe; /* protected mode */ - int code32; /* 32 bit code segment */ -#ifdef TARGET_X86_64 - int lma; /* long mode active */ - int code64; /* 64 bit code segment */ - int rex_x, rex_b; + + int8_t override; /* -1 if no override, else R_CS, R_DS, etc */ + uint8_t prefix; + +#ifndef CONFIG_USER_ONLY + uint8_t cpl; /* code priv level */ + uint8_t iopl; /* i/o priv level */ #endif - int vex_l; /* vex vector length */ - int vex_v; /* vex vvvv register, without 1's complement. */ - int ss32; /* 32 bit stack segment */ - CCOp cc_op; /* current CC operation */ - bool cc_op_dirty; + uint8_t vex_l; /* vex vector length */ + uint8_t vex_v; /* vex vvvv register, without 1's complement. */ + uint8_t popl_esp_hack; /* for correct popl with esp base handling */ + uint8_t rip_offset; /* only used in x86_64, but left for simplicity */ + #ifdef TARGET_X86_64 - bool x86_64_hregs; + uint8_t rex_r; + uint8_t rex_x; + uint8_t rex_b; + bool rex_w; #endif - int addseg; /* non zero if either DS/ES/SS have a non zero base */ - int f_st; /* currently unused */ - int vm86; /* vm86 mode */ - int cpl; - int iopl; - int tf; /* TF cpu flag */ - int jmp_opt; /* use direct block chaining for direct jumps */ - int repz_opt; /* optimize jumps within repz instructions */ + bool jmp_opt; /* use direct block chaining for direct jumps */ + bool repz_opt; /* optimize jumps within repz instructions */ + bool cc_op_dirty; + + CCOp cc_op; /* current CC operation */ int mem_index; /* select memory access functions */ - uint64_t flags; /* all execution flags */ - int popl_esp_hack; /* for correct popl with esp base handling */ - int rip_offset; /* only used in x86_64, but left for simplicity */ + uint32_t flags; /* all execution flags */ int cpuid_features; int cpuid_ext_features; int cpuid_ext2_features; @@ -146,11 +133,96 @@ typedef struct DisasContext { sigjmp_buf jmpbuf; } DisasContext; +/* The environment in which user-only runs is constrained. */ +#ifdef CONFIG_USER_ONLY +#define PE(S) true +#define CPL(S) 3 +#define IOPL(S) 0 +#define SVME(S) false +#define GUEST(S) false +#else +#define PE(S) (((S)->flags & HF_PE_MASK) != 0) +#define CPL(S) ((S)->cpl) +#define IOPL(S) ((S)->iopl) +#define SVME(S) (((S)->flags & HF_SVME_MASK) != 0) +#define GUEST(S) (((S)->flags & HF_GUEST_MASK) != 0) +#endif +#if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64) +#define VM86(S) false +#define CODE32(S) true +#define SS32(S) true +#define ADDSEG(S) false +#else +#define VM86(S) (((S)->flags & HF_VM_MASK) != 0) +#define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0) +#define SS32(S) (((S)->flags & HF_SS32_MASK) != 0) +#define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0) +#endif +#if !defined(TARGET_X86_64) +#define CODE64(S) false +#define LMA(S) false +#elif defined(CONFIG_USER_ONLY) +#define CODE64(S) true +#define LMA(S) true +#else +#define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0) +#define LMA(S) (((S)->flags & HF_LMA_MASK) != 0) +#endif + +#ifdef TARGET_X86_64 +#define REX_PREFIX(S) (((S)->prefix & PREFIX_REX) != 0) +#define REX_W(S) ((S)->rex_w) +#define REX_R(S) ((S)->rex_r + 0) +#define REX_X(S) ((S)->rex_x + 0) +#define REX_B(S) ((S)->rex_b + 0) +#else +#define REX_PREFIX(S) false +#define REX_W(S) false +#define REX_R(S) 0 +#define REX_X(S) 0 +#define REX_B(S) 0 +#endif + +/* + * Many sysemu-only helpers are not reachable for user-only. + * Define stub generators here, so that we need not either sprinkle + * ifdefs through the translator, nor provide the helper function. + */ +#define STUB_HELPER(NAME, ...) \ + static inline void gen_helper_##NAME(__VA_ARGS__) \ + { qemu_build_not_reached(); } + +#ifdef CONFIG_USER_ONLY +STUB_HELPER(clgi, TCGv_env env) +STUB_HELPER(flush_page, TCGv_env env, TCGv addr) +STUB_HELPER(hlt, TCGv_env env, TCGv_i32 pc_ofs) +STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port) +STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port) +STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port) +STUB_HELPER(monitor, TCGv_env env, TCGv addr) +STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs) +STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val) +STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val) +STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val) +STUB_HELPER(rdmsr, TCGv_env env) +STUB_HELPER(read_crN, TCGv ret, TCGv_env env, TCGv_i32 reg) +STUB_HELPER(set_dr, TCGv_env env, TCGv_i32 reg, TCGv val) +STUB_HELPER(stgi, TCGv_env env) +STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type) +STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag) +STUB_HELPER(vmmcall, TCGv_env env) +STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs) +STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag) +STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val) +STUB_HELPER(wrmsr, TCGv_env env) +#endif + static void gen_eob(DisasContext *s); static void gen_jr(DisasContext *s, TCGv dest); static void gen_jmp(DisasContext *s, target_ulong eip); static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num); static void gen_op(DisasContext *s1, int op, MemOp ot, int d); +static void gen_exception_gpf(DisasContext *s); /* i386 arith/logic operations */ enum { @@ -309,14 +381,10 @@ static void gen_update_cc_op(DisasContext *s) */ static inline bool byte_reg_is_xH(DisasContext *s, int reg) { - if (reg < 4) { - return false; - } -#ifdef TARGET_X86_64 - if (reg >= 8 || s->x86_64_hregs) { + /* Any time the REX prefix is present, byte registers are uniform */ + if (reg < 4 || REX_PREFIX(s)) { return false; } -#endif return true; } @@ -333,7 +401,7 @@ static inline MemOp mo_pushpop(DisasContext *s, MemOp ot) /* Select the size of the stack pointer. */ static inline MemOp mo_stacksize(DisasContext *s) { - return CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16; + return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16; } /* Select only size 64 else 32. Used for SSE operand sizes. */ @@ -466,7 +534,7 @@ static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0, #endif case MO_32: /* 32 bit address */ - if (ovr_seg < 0 && s->addseg) { + if (ovr_seg < 0 && ADDSEG(s)) { ovr_seg = def_seg; } if (ovr_seg < 0) { @@ -479,7 +547,7 @@ static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0, tcg_gen_ext16u_tl(s->A0, a0); a0 = s->A0; if (ovr_seg < 0) { - if (s->addseg) { + if (ADDSEG(s)) { ovr_seg = def_seg; } else { return; @@ -612,37 +680,40 @@ static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n) } } -static void gen_check_io(DisasContext *s, MemOp ot, target_ulong cur_eip, +/* + * Validate that access to [port, port + 1<<ot) is allowed. + * Raise #GP, or VMM exit if not. + */ +static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port, uint32_t svm_flags) { - target_ulong next_eip; - - if (s->pe && (s->cpl > s->iopl || s->vm86)) { - tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); - switch (ot) { - case MO_8: - gen_helper_check_iob(cpu_env, s->tmp2_i32); - break; - case MO_16: - gen_helper_check_iow(cpu_env, s->tmp2_i32); - break; - case MO_32: - gen_helper_check_iol(cpu_env, s->tmp2_i32); - break; - default: - tcg_abort(); - } +#ifdef CONFIG_USER_ONLY + /* + * We do not implement the ioperm(2) syscall, so the TSS check + * will always fail. + */ + gen_exception_gpf(s); + return false; +#else + if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) { + gen_helper_check_io(cpu_env, port, tcg_constant_i32(1 << ot)); } - if(s->flags & HF_GUEST_MASK) { + if (GUEST(s)) { + target_ulong cur_eip = s->base.pc_next - s->cs_base; + target_ulong next_eip = s->pc - s->cs_base; + gen_update_cc_op(s); gen_jmp_im(s, cur_eip); - svm_flags |= (1 << (4 + ot)); - next_eip = s->pc - s->cs_base; - tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); - gen_helper_svm_check_io(cpu_env, s->tmp2_i32, - tcg_const_i32(svm_flags), - tcg_const_i32(next_eip - cur_eip)); + if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) { + svm_flags |= SVM_IOIO_REP_MASK; + } + svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot); + gen_helper_svm_check_io(cpu_env, port, + tcg_constant_i32(svm_flags), + tcg_constant_i32(next_eip - cur_eip)); } + return true; +#endif } static inline void gen_movs(DisasContext *s, MemOp ot) @@ -1117,16 +1188,20 @@ static inline void gen_cmps(DisasContext *s, MemOp ot) static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot) { if (s->flags & HF_IOBPT_MASK) { +#ifdef CONFIG_USER_ONLY + /* user-mode cpu should not be in IOBPT mode */ + g_assert_not_reached(); +#else TCGv_i32 t_size = tcg_const_i32(1 << ot); TCGv t_next = tcg_const_tl(s->pc - s->cs_base); gen_helper_bpt_io(cpu_env, t_port, t_size, t_next); tcg_temp_free_i32(t_size); tcg_temp_free(t_next); +#endif /* CONFIG_USER_ONLY */ } } - static inline void gen_ins(DisasContext *s, MemOp ot) { gen_string_movl_A0_EDI(s); @@ -1272,6 +1347,42 @@ static void gen_illegal_opcode(DisasContext *s) gen_exception(s, EXCP06_ILLOP, s->pc_start - s->cs_base); } +/* Generate #GP for the current instruction. */ +static void gen_exception_gpf(DisasContext *s) +{ + gen_exception(s, EXCP0D_GPF, s->pc_start - s->cs_base); +} + +/* Check for cpl == 0; if not, raise #GP and return false. */ +static bool check_cpl0(DisasContext *s) +{ + if (CPL(s) == 0) { + return true; + } + gen_exception_gpf(s); + return false; +} + +/* If vm86, check for iopl == 3; if not, raise #GP and return false. */ +static bool check_vm86_iopl(DisasContext *s) +{ + if (!VM86(s) || IOPL(s) == 3) { + return true; + } + gen_exception_gpf(s); + return false; +} + +/* Check for iopl allowing access; if not, raise #GP and return false. */ +static bool check_iopl(DisasContext *s) +{ + if (VM86(s) ? IOPL(s) == 3 : CPL(s) <= IOPL(s)) { + return true; + } + gen_exception_gpf(s); + return false; +} + /* if d == OR_TMP0, it means memory operand (address in A0) */ static void gen_op(DisasContext *s1, int op, MemOp ot, int d) { @@ -2305,14 +2416,14 @@ static inline void gen_op_movl_seg_T0_vm(DisasContext *s, X86Seg seg_reg) call this function with seg_reg == R_CS */ static void gen_movl_seg_T0(DisasContext *s, X86Seg seg_reg) { - if (s->pe && !s->vm86) { + if (PE(s) && !VM86(s)) { tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), s->tmp2_i32); /* abort translation because the addseg value may change or because ss32 may change. For R_SS, translation must always stop as a special handling must be done to disable hardware interrupts for the next instruction */ - if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS)) { + if (seg_reg == R_SS || (CODE32(s) && seg_reg < R_FS)) { s->base.is_jmp = DISAS_TOO_MANY; } } else { @@ -2323,28 +2434,13 @@ static void gen_movl_seg_T0(DisasContext *s, X86Seg seg_reg) } } -static inline int svm_is_rep(int prefixes) -{ - return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0); -} - -static inline void -gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start, - uint32_t type, uint64_t param) +static void gen_svm_check_intercept(DisasContext *s, uint32_t type) { /* no SVM activated; fast case */ - if (likely(!(s->flags & HF_GUEST_MASK))) + if (likely(!GUEST(s))) { return; - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type), - tcg_const_i64(param)); -} - -static inline void -gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type) -{ - gen_svm_check_intercept_param(s, pc_start, type, 0); + } + gen_helper_svm_check_intercept(cpu_env, tcg_constant_i32(type)); } static inline void gen_stack_update(DisasContext *s, int addend) @@ -2363,7 +2459,7 @@ static void gen_push_v(DisasContext *s, TCGv val) tcg_gen_subi_tl(s->A0, cpu_regs[R_ESP], size); if (!CODE64(s)) { - if (s->addseg) { + if (ADDSEG(s)) { new_esp = s->tmp4; tcg_gen_mov_tl(new_esp, s->A0); } @@ -2392,12 +2488,12 @@ static inline void gen_pop_update(DisasContext *s, MemOp ot) static inline void gen_stack_A0(DisasContext *s) { - gen_lea_v_seg(s, s->ss32 ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1); + gen_lea_v_seg(s, SS32(s) ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1); } static void gen_pusha(DisasContext *s) { - MemOp s_ot = s->ss32 ? MO_32 : MO_16; + MemOp s_ot = SS32(s) ? MO_32 : MO_16; MemOp d_ot = s->dflag; int size = 1 << d_ot; int i; @@ -2413,7 +2509,7 @@ static void gen_pusha(DisasContext *s) static void gen_popa(DisasContext *s) { - MemOp s_ot = s->ss32 ? MO_32 : MO_16; + MemOp s_ot = SS32(s) ? MO_32 : MO_16; MemOp d_ot = s->dflag; int size = 1 << d_ot; int i; @@ -2435,7 +2531,7 @@ static void gen_popa(DisasContext *s) static void gen_enter(DisasContext *s, int esp_addend, int level) { MemOp d_ot = mo_pushpop(s, s->dflag); - MemOp a_ot = CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16; + MemOp a_ot = CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16; int size = 1 << d_ot; /* Push BP; compute FrameTemp into T1. */ @@ -2518,10 +2614,10 @@ static void gen_interrupt(DisasContext *s, int intno, s->base.is_jmp = DISAS_NORETURN; } -static void gen_debug(DisasContext *s, target_ulong cur_eip) +static void gen_debug(DisasContext *s) { gen_update_cc_op(s); - gen_jmp_im(s, cur_eip); + gen_jmp_im(s, s->base.pc_next - s->cs_base); gen_helper_debug(cpu_env); s->base.is_jmp = DISAS_NORETURN; } @@ -2587,7 +2683,7 @@ do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr) } else if (recheck_tf) { gen_helper_rechecking_single_step(cpu_env); tcg_gen_exit_tb(NULL, 0); - } else if (s->tf) { + } else if (s->flags & HF_TF_MASK) { gen_helper_single_step(cpu_env); } else if (jr) { tcg_gen_lookup_and_goto_ptr(); @@ -3030,7 +3126,7 @@ static const struct SSEOpHelper_eppi sse_op_table7[256] = { }; static void gen_sse(CPUX86State *env, DisasContext *s, int b, - target_ulong pc_start, int rex_r) + target_ulong pc_start) { int b1, op1_offset, op2_offset, is_xmm, val; int modrm, mod, rm, reg; @@ -3100,8 +3196,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7); - if (is_xmm) - reg |= rex_r; + if (is_xmm) { + reg |= REX_R(s); + } mod = (modrm >> 6) & 3; if (sse_fn_epp == SSE_SPECIAL) { b |= (b1 << 8); @@ -3635,7 +3732,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, tcg_gen_ld16u_tl(s->T0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val))); } - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); gen_op_mov_reg_v(s, ot, reg, s->T0); break; case 0x1d6: /* movq ea, xmm */ @@ -3679,7 +3776,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, offsetof(CPUX86State, fpregs[rm].mmx)); gen_helper_pmovmskb_mmx(s->tmp2_i32, cpu_env, s->ptr0); } - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp2_i32); break; @@ -3691,7 +3788,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, } modrm = x86_ldub_code(env, s); rm = modrm & 7; - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); mod = (modrm >> 6) & 3; if (b1 >= 2) { goto unknown_op; @@ -3767,7 +3864,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, /* Various integer extensions at 0f 38 f[0-f]. */ b = modrm | (b1 << 8); modrm = x86_ldub_code(env, s); - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); switch (b) { case 0x3f0: /* crc32 Gd,Eb */ @@ -4121,7 +4218,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, b = modrm; modrm = x86_ldub_code(env, s); rm = modrm & 7; - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); mod = (modrm >> 6) & 3; if (b1 >= 2) { goto unknown_op; @@ -4141,7 +4238,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, rm = (modrm & 7) | REX_B(s); if (mod != 3) gen_lea_modrm(env, s, modrm); - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); val = x86_ldub_code(env, s); switch (b) { case 0x14: /* pextrb */ @@ -4310,7 +4407,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, /* Various integer extensions at 0f 3a f[0-f]. */ b = modrm | (b1 << 8); modrm = x86_ldub_code(env, s); - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); switch (b) { case 0x3f0: /* rorx Gy,Ey, Ib */ @@ -4484,27 +4581,25 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) MemOp ot, aflag, dflag; int modrm, reg, rm, mod, op, opreg, val; target_ulong next_eip, tval; - int rex_w, rex_r; target_ulong pc_start = s->base.pc_next; s->pc_start = s->pc = pc_start; s->override = -1; #ifdef TARGET_X86_64 + s->rex_w = false; + s->rex_r = 0; s->rex_x = 0; s->rex_b = 0; - s->x86_64_hregs = false; #endif s->rip_offset = 0; /* for relative ip address */ s->vex_l = 0; s->vex_v = 0; if (sigsetjmp(s->jmpbuf, 0) != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + gen_exception_gpf(s); return s->pc; } prefixes = 0; - rex_w = -1; - rex_r = 0; next_byte: b = x86_ldub_code(env, s); @@ -4547,12 +4642,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0x40 ... 0x4f: if (CODE64(s)) { /* REX prefix */ - rex_w = (b >> 3) & 1; - rex_r = (b & 0x4) << 1; + prefixes |= PREFIX_REX; + s->rex_w = (b >> 3) & 1; + s->rex_r = (b & 0x4) << 1; s->rex_x = (b & 0x2) << 2; - REX_B(s) = (b & 0x1) << 3; - /* select uniform byte register addressing */ - s->x86_64_hregs = true; + s->rex_b = (b & 0x1) << 3; goto next_byte; } break; @@ -4561,7 +4655,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0xc4: /* 3-byte VEX */ /* VEX prefixes cannot be used except in 32-bit mode. Otherwise the instruction is LES or LDS. */ - if (s->code32 && !s->vm86) { + if (CODE32(s) && !VM86(s)) { static const int pp_prefix[4] = { 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ }; @@ -4576,27 +4670,24 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */ if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ - | PREFIX_LOCK | PREFIX_DATA)) { + | PREFIX_LOCK | PREFIX_DATA | PREFIX_REX)) { goto illegal_op; } #ifdef TARGET_X86_64 - if (s->x86_64_hregs) { - goto illegal_op; - } + s->rex_r = (~vex2 >> 4) & 8; #endif - rex_r = (~vex2 >> 4) & 8; if (b == 0xc5) { /* 2-byte VEX prefix: RVVVVlpp, implied 0f leading opcode byte */ vex3 = vex2; b = x86_ldub_code(env, s) | 0x100; } else { /* 3-byte VEX prefix: RXBmmmmm wVVVVlpp */ + vex3 = x86_ldub_code(env, s); #ifdef TARGET_X86_64 s->rex_x = (~vex2 >> 3) & 8; s->rex_b = (~vex2 >> 2) & 8; + s->rex_w = (vex3 >> 7) & 1; #endif - vex3 = x86_ldub_code(env, s); - rex_w = (vex3 >> 7) & 1; switch (vex2 & 0x1f) { case 0x01: /* Implied 0f leading opcode bytes. */ b = x86_ldub_code(env, s) | 0x100; @@ -4623,18 +4714,18 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) /* In 64-bit mode, the default data size is 32-bit. Select 64-bit data with rex_w, and 16-bit data with 0x66; rex_w takes precedence over 0x66 if both are present. */ - dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32); + dflag = (REX_W(s) ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32); /* In 64-bit mode, 0x67 selects 32-bit addressing. */ aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64); } else { /* In 16/32-bit mode, 0x66 selects the opposite data size. */ - if (s->code32 ^ ((prefixes & PREFIX_DATA) != 0)) { + if (CODE32(s) ^ ((prefixes & PREFIX_DATA) != 0)) { dflag = MO_32; } else { dflag = MO_16; } /* In 16/32-bit mode, 0x67 selects the opposite addressing. */ - if (s->code32 ^ ((prefixes & PREFIX_ADR) != 0)) { + if (CODE32(s) ^ ((prefixes & PREFIX_ADR) != 0)) { aflag = MO_32; } else { aflag = MO_16; @@ -4674,7 +4765,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) switch(f) { case 0: /* OP Ev, Gv */ modrm = x86_ldub_code(env, s); - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod != 3) { @@ -4696,7 +4787,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 1: /* OP Gv, Ev */ modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); rm = (modrm & 7) | REX_B(s); if (mod != 3) { gen_lea_modrm(env, s, modrm); @@ -5019,7 +5110,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) /* operand size for jumps is 64 bit */ ot = MO_64; } else if (op == 3 || op == 5) { - ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16; + ot = dflag != MO_16 ? MO_32 + REX_W(s) : MO_16; } else if (op == 6) { /* default push size is 64 bit */ ot = mo_pushpop(s, dflag); @@ -5068,7 +5159,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_add_A0_im(s, 1 << ot); gen_op_ld_v(s, MO_16, s->T0, s->A0); do_lcall: - if (s->pe && !s->vm86) { + if (PE(s) && !VM86(s)) { tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); gen_helper_lcall_protected(cpu_env, s->tmp2_i32, s->T1, tcg_const_i32(dflag - 1), @@ -5098,7 +5189,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_add_A0_im(s, 1 << ot); gen_op_ld_v(s, MO_16, s->T0, s->A0); do_ljmp: - if (s->pe && !s->vm86) { + if (PE(s) && !VM86(s)) { tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); gen_helper_ljmp_protected(cpu_env, s->tmp2_i32, s->T1, tcg_const_tl(s->pc - s->cs_base)); @@ -5122,7 +5213,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); gen_op_mov_v_reg(s, ot, s->T1, reg); @@ -5194,7 +5285,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0x6b: ot = dflag; modrm = x86_ldub_code(env, s); - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); if (b == 0x69) s->rip_offset = insn_const_size(ot); else if (b == 0x6b) @@ -5246,7 +5337,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0x1c1: /* xadd Ev, Gv */ ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); mod = (modrm >> 6) & 3; gen_op_mov_v_reg(s, ot, s->T0, reg); if (mod == 3) { @@ -5278,7 +5369,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); mod = (modrm >> 6) & 3; oldv = tcg_temp_new(); newv = tcg_temp_new(); @@ -5476,7 +5567,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (s->base.is_jmp) { gen_jmp_im(s, s->pc - s->cs_base); if (reg == R_SS) { - s->tf = 0; + s->flags &= ~HF_TF_MASK; gen_eob_inhibit_irq(s, true); } else { gen_eob(s); @@ -5500,7 +5591,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0x89: /* mov Gv, Ev */ ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); /* generate a generic store */ gen_ldst_modrm(env, s, modrm, ot, reg, 1); @@ -5526,7 +5617,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0x8b: /* mov Ev, Gv */ ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); gen_op_mov_reg_v(s, ot, reg, s->T0); @@ -5542,7 +5633,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (s->base.is_jmp) { gen_jmp_im(s, s->pc - s->cs_base); if (reg == R_SS) { - s->tf = 0; + s->flags &= ~HF_TF_MASK; gen_eob_inhibit_irq(s, true); } else { gen_eob(s); @@ -5576,7 +5667,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) s_ot = b & 8 ? MO_SIGN | ot : ot; modrm = x86_ldub_code(env, s); - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); @@ -5615,7 +5706,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); { AddressParts a = gen_lea_modrm_0(env, s, modrm); TCGv ea = gen_lea_modrm_1(s, a); @@ -5697,7 +5788,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0x87: /* xchg Ev, Gv */ ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); mod = (modrm >> 6) & 3; if (mod == 3) { rm = (modrm & 7) | REX_B(s); @@ -5734,7 +5825,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) do_lxx: ot = dflag != MO_16 ? MO_32 : MO_16; modrm = x86_ldub_code(env, s); - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; @@ -5817,7 +5908,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); if (mod != 3) { gen_lea_modrm(env, s, modrm); opreg = OR_TMP0; @@ -6395,9 +6486,12 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0x6c: /* insS */ case 0x6d: ot = mo_b_d32(b, dflag); - tcg_gen_ext16u_tl(s->T0, cpu_regs[R_EDX]); - gen_check_io(s, ot, pc_start - s->cs_base, - SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4); + tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]); + tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32); + if (!gen_check_io(s, ot, s->tmp2_i32, + SVM_IOIO_TYPE_MASK | SVM_IOIO_STR_MASK)) { + break; + } if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } @@ -6414,9 +6508,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0x6e: /* outsS */ case 0x6f: ot = mo_b_d32(b, dflag); - tcg_gen_ext16u_tl(s->T0, cpu_regs[R_EDX]); - gen_check_io(s, ot, pc_start - s->cs_base, - svm_is_rep(prefixes) | 4); + tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]); + tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32); + if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_STR_MASK)) { + break; + } if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } @@ -6438,13 +6534,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0xe5: ot = mo_b_d32(b, dflag); val = x86_ldub_code(env, s); - tcg_gen_movi_tl(s->T0, val); - gen_check_io(s, ot, pc_start - s->cs_base, - SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); + tcg_gen_movi_i32(s->tmp2_i32, val); + if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) { + break; + } if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } - tcg_gen_movi_i32(s->tmp2_i32, val); gen_helper_in_func(ot, s->T1, s->tmp2_i32); gen_op_mov_reg_v(s, ot, R_EAX, s->T1); gen_bpt_io(s, s->tmp2_i32, ot); @@ -6456,15 +6552,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0xe7: ot = mo_b_d32(b, dflag); val = x86_ldub_code(env, s); - tcg_gen_movi_tl(s->T0, val); - gen_check_io(s, ot, pc_start - s->cs_base, - svm_is_rep(prefixes)); - gen_op_mov_v_reg(s, ot, s->T1, R_EAX); - + tcg_gen_movi_i32(s->tmp2_i32, val); + if (!gen_check_io(s, ot, s->tmp2_i32, 0)) { + break; + } if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } - tcg_gen_movi_i32(s->tmp2_i32, val); + gen_op_mov_v_reg(s, ot, s->T1, R_EAX); tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1); gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32); gen_bpt_io(s, s->tmp2_i32, ot); @@ -6475,13 +6570,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0xec: case 0xed: ot = mo_b_d32(b, dflag); - tcg_gen_ext16u_tl(s->T0, cpu_regs[R_EDX]); - gen_check_io(s, ot, pc_start - s->cs_base, - SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); + tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]); + tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32); + if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) { + break; + } if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } - tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); gen_helper_in_func(ot, s->T1, s->tmp2_i32); gen_op_mov_reg_v(s, ot, R_EAX, s->T1); gen_bpt_io(s, s->tmp2_i32, ot); @@ -6492,15 +6588,15 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0xee: case 0xef: ot = mo_b_d32(b, dflag); - tcg_gen_ext16u_tl(s->T0, cpu_regs[R_EDX]); - gen_check_io(s, ot, pc_start - s->cs_base, - svm_is_rep(prefixes)); - gen_op_mov_v_reg(s, ot, s->T1, R_EAX); - + tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]); + tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32); + if (!gen_check_io(s, ot, s->tmp2_i32, 0)) { + break; + } if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } - tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); + gen_op_mov_v_reg(s, ot, s->T1, R_EAX); tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1); gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32); gen_bpt_io(s, s->tmp2_i32, ot); @@ -6531,7 +6627,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0xca: /* lret im */ val = x86_ldsw_code(env, s); do_lret: - if (s->pe && !s->vm86) { + if (PE(s) && !VM86(s)) { gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1), @@ -6556,23 +6652,18 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) val = 0; goto do_lret; case 0xcf: /* iret */ - gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET); - if (!s->pe) { - /* real mode */ - gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1)); - set_cc_op(s, CC_OP_EFLAGS); - } else if (s->vm86) { - if (s->iopl != 3) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { - gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1)); - set_cc_op(s, CC_OP_EFLAGS); + gen_svm_check_intercept(s, SVM_EXIT_IRET); + if (!PE(s) || VM86(s)) { + /* real mode or vm86 mode */ + if (!check_vm86_iopl(s)) { + break; } + gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1)); } else { gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1), tcg_const_i32(s->pc - s->cs_base)); - set_cc_op(s, CC_OP_EFLAGS); } + set_cc_op(s, CC_OP_EFLAGS); gen_eob(s); break; case 0xe8: /* call im */ @@ -6676,29 +6767,25 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } ot = dflag; modrm = x86_ldub_code(env, s); - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); gen_cmovcc1(env, s, ot, b, modrm, reg); break; /************************/ /* flags */ case 0x9c: /* pushf */ - gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF); - if (s->vm86 && s->iopl != 3) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { + gen_svm_check_intercept(s, SVM_EXIT_PUSHF); + if (check_vm86_iopl(s)) { gen_update_cc_op(s); gen_helper_read_eflags(s->T0, cpu_env); gen_push_v(s, s->T0); } break; case 0x9d: /* popf */ - gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF); - if (s->vm86 && s->iopl != 3) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { + gen_svm_check_intercept(s, SVM_EXIT_POPF); + if (check_vm86_iopl(s)) { ot = gen_pop_T0(s); - if (s->cpl == 0) { + if (CPL(s) == 0) { if (dflag != MO_16) { gen_helper_write_eflags(cpu_env, s->T0, tcg_const_i32((TF_MASK | AC_MASK | @@ -6713,7 +6800,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) & 0xffff)); } } else { - if (s->cpl <= s->iopl) { + if (CPL(s) <= IOPL(s)) { if (dflag != MO_16) { gen_helper_write_eflags(cpu_env, s->T0, tcg_const_i32((TF_MASK | @@ -6826,7 +6913,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) do_btx: ot = dflag; modrm = x86_ldub_code(env, s); - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); gen_op_mov_v_reg(s, MO_32, s->T1, reg); @@ -6931,7 +7018,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0x1bd: /* bsr / lzcnt */ ot = dflag; modrm = x86_ldub_code(env, s); - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); gen_extu(ot, s->T0); @@ -7056,9 +7143,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; case 0xcd: /* int N */ val = x86_ldub_code(env, s); - if (s->vm86 && s->iopl != 3) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { + if (check_vm86_iopl(s)) { gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base); } break; @@ -7071,33 +7156,21 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; #ifdef WANT_ICEBP case 0xf1: /* icebp (undocumented, exits to external debugger) */ - gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP); - gen_debug(s, pc_start - s->cs_base); + gen_svm_check_intercept(s, SVM_EXIT_ICEBP); + gen_debug(s); break; #endif case 0xfa: /* cli */ - if (!s->vm86) { - if (s->cpl <= s->iopl) { - gen_helper_cli(cpu_env); - } else { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } - } else { - if (s->iopl == 3) { - gen_helper_cli(cpu_env); - } else { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } + if (check_iopl(s)) { + gen_helper_cli(cpu_env); } break; case 0xfb: /* sti */ - if (s->vm86 ? s->iopl == 3 : s->cpl <= s->iopl) { + if (check_iopl(s)) { gen_helper_sti(cpu_env); /* interruptions are enabled only the first insn after sti */ gen_jmp_im(s, s->pc - s->cs_base); gen_eob_inhibit_irq(s, true); - } else { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } break; case 0x62: /* bound */ @@ -7189,15 +7262,15 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; case 0x130: /* wrmsr */ case 0x132: /* rdmsr */ - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { + if (check_cpl0(s)) { gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); if (b & 2) { gen_helper_rdmsr(cpu_env); } else { gen_helper_wrmsr(cpu_env); + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); } } break; @@ -7216,13 +7289,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); gen_helper_rdpmc(cpu_env); + s->base.is_jmp = DISAS_NORETURN; break; case 0x134: /* sysenter */ /* For Intel SYSENTER is valid on 64-bit */ if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) goto illegal_op; - if (!s->pe) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + if (!PE(s)) { + gen_exception_gpf(s); } else { gen_helper_sysenter(cpu_env); gen_eob(s); @@ -7232,8 +7306,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) /* For Intel SYSEXIT is valid on 64-bit */ if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) goto illegal_op; - if (!s->pe) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + if (!PE(s)) { + gen_exception_gpf(s); } else { gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1)); gen_eob(s); @@ -7251,12 +7325,12 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_eob_worker(s, false, true); break; case 0x107: /* sysret */ - if (!s->pe) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + if (!PE(s)) { + gen_exception_gpf(s); } else { gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1)); /* condition codes are modified only in long mode */ - if (s->lma) { + if (LMA(s)) { set_cc_op(s, CC_OP_EFLAGS); } /* TF handling for the sysret insn is different. The TF bit is @@ -7273,9 +7347,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_helper_cpuid(cpu_env); break; case 0xf4: /* hlt */ - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { + if (check_cpl0(s)) { gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start)); @@ -7288,42 +7360,38 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) op = (modrm >> 3) & 7; switch(op) { case 0: /* sldt */ - if (!s->pe || s->vm86) + if (!PE(s) || VM86(s)) goto illegal_op; - gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ); + gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ); tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, ldt.selector)); ot = mod == 3 ? dflag : MO_16; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); break; case 2: /* lldt */ - if (!s->pe || s->vm86) + if (!PE(s) || VM86(s)) goto illegal_op; - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { - gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE); + if (check_cpl0(s)) { + gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE); gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); gen_helper_lldt(cpu_env, s->tmp2_i32); } break; case 1: /* str */ - if (!s->pe || s->vm86) + if (!PE(s) || VM86(s)) goto illegal_op; - gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ); + gen_svm_check_intercept(s, SVM_EXIT_TR_READ); tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, tr.selector)); ot = mod == 3 ? dflag : MO_16; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); break; case 3: /* ltr */ - if (!s->pe || s->vm86) + if (!PE(s) || VM86(s)) goto illegal_op; - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { - gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE); + if (check_cpl0(s)) { + gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE); gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); gen_helper_ltr(cpu_env, s->tmp2_i32); @@ -7331,7 +7399,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; case 4: /* verr */ case 5: /* verw */ - if (!s->pe || s->vm86) + if (!PE(s) || VM86(s)) goto illegal_op; gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); gen_update_cc_op(s); @@ -7351,7 +7419,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) modrm = x86_ldub_code(env, s); switch (modrm) { CASE_MODRM_MEM_OP(0): /* sgdt */ - gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ); + gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ); gen_lea_modrm(env, s, modrm); tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, gdt.limit)); @@ -7365,7 +7433,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; case 0xc8: /* monitor */ - if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) { + if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) { goto illegal_op; } gen_update_cc_op(s); @@ -7377,18 +7445,18 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; case 0xc9: /* mwait */ - if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) { + if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) { goto illegal_op; } gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start)); - gen_eob(s); + s->base.is_jmp = DISAS_NORETURN; break; case 0xca: /* clac */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) - || s->cpl != 0) { + || CPL(s) != 0) { goto illegal_op; } gen_helper_clac(cpu_env); @@ -7398,7 +7466,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0xcb: /* stac */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) - || s->cpl != 0) { + || CPL(s) != 0) { goto illegal_op; } gen_helper_stac(cpu_env); @@ -7407,7 +7475,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; CASE_MODRM_MEM_OP(1): /* sidt */ - gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ); + gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ); gen_lea_modrm(env, s, modrm); tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.limit)); gen_op_st_v(s, MO_16, s->T0, s->A0); @@ -7436,8 +7504,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) | PREFIX_REPZ | PREFIX_REPNZ))) { goto illegal_op; } - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + if (!check_cpl0(s)) { break; } tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX], @@ -7450,11 +7517,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; case 0xd8: /* VMRUN */ - if (!(s->flags & HF_SVME_MASK) || !s->pe) { + if (!SVME(s) || !PE(s)) { goto illegal_op; } - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + if (!check_cpl0(s)) { break; } gen_update_cc_op(s); @@ -7466,7 +7532,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; case 0xd9: /* VMMCALL */ - if (!(s->flags & HF_SVME_MASK)) { + if (!SVME(s)) { goto illegal_op; } gen_update_cc_op(s); @@ -7475,11 +7541,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; case 0xda: /* VMLOAD */ - if (!(s->flags & HF_SVME_MASK) || !s->pe) { + if (!SVME(s) || !PE(s)) { goto illegal_op; } - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + if (!check_cpl0(s)) { break; } gen_update_cc_op(s); @@ -7488,11 +7553,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; case 0xdb: /* VMSAVE */ - if (!(s->flags & HF_SVME_MASK) || !s->pe) { + if (!SVME(s) || !PE(s)) { goto illegal_op; } - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + if (!check_cpl0(s)) { break; } gen_update_cc_op(s); @@ -7501,13 +7565,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; case 0xdc: /* STGI */ - if ((!(s->flags & HF_SVME_MASK) - && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) - || !s->pe) { + if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) + || !PE(s)) { goto illegal_op; } - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + if (!check_cpl0(s)) { break; } gen_update_cc_op(s); @@ -7517,11 +7579,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; case 0xdd: /* CLGI */ - if (!(s->flags & HF_SVME_MASK) || !s->pe) { + if (!SVME(s) || !PE(s)) { goto illegal_op; } - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + if (!check_cpl0(s)) { break; } gen_update_cc_op(s); @@ -7530,35 +7591,37 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; case 0xde: /* SKINIT */ - if ((!(s->flags & HF_SVME_MASK) - && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) - || !s->pe) { + if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) + || !PE(s)) { goto illegal_op; } - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_skinit(cpu_env); - break; + gen_svm_check_intercept(s, SVM_EXIT_SKINIT); + /* If not intercepted, not implemented -- raise #UD. */ + goto illegal_op; case 0xdf: /* INVLPGA */ - if (!(s->flags & HF_SVME_MASK) || !s->pe) { + if (!SVME(s) || !PE(s)) { goto illegal_op; } - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + if (!check_cpl0(s)) { break; } - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag - 1)); + gen_svm_check_intercept(s, SVM_EXIT_INVLPGA); + if (s->aflag == MO_64) { + tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]); + } else { + tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]); + } + gen_helper_flush_page(cpu_env, s->A0); + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); break; CASE_MODRM_MEM_OP(2): /* lgdt */ - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + if (!check_cpl0(s)) { break; } - gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_WRITE); + gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE); gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, MO_16, s->T1, s->A0); gen_add_A0_im(s, 2); @@ -7571,11 +7634,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; CASE_MODRM_MEM_OP(3): /* lidt */ - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + if (!check_cpl0(s)) { break; } - gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_WRITE); + gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE); gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, MO_16, s->T1, s->A0); gen_add_A0_im(s, 2); @@ -7588,7 +7650,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; CASE_MODRM_OP(4): /* smsw */ - gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0); + gen_svm_check_intercept(s, SVM_EXIT_READ_CR0); tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, cr[0])); /* * In 32-bit mode, the higher 16 bits of the destination @@ -7616,27 +7678,33 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]); gen_helper_wrpkru(cpu_env, s->tmp2_i32, s->tmp1_i64); break; + CASE_MODRM_OP(6): /* lmsw */ - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + if (!check_cpl0(s)) { break; } - gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); + gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0); gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); - gen_helper_lmsw(cpu_env, s->T0); + /* + * Only the 4 lower bits of CR0 are modified. + * PE cannot be set to zero if already set to one. + */ + tcg_gen_ld_tl(s->T1, cpu_env, offsetof(CPUX86State, cr[0])); + tcg_gen_andi_tl(s->T0, s->T0, 0xf); + tcg_gen_andi_tl(s->T1, s->T1, ~0xe); + tcg_gen_or_tl(s->T0, s->T0, s->T1); + gen_helper_write_crN(cpu_env, tcg_constant_i32(0), s->T0); gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); break; CASE_MODRM_MEM_OP(7): /* invlpg */ - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + if (!check_cpl0(s)) { break; } - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); + gen_svm_check_intercept(s, SVM_EXIT_INVLPG); gen_lea_modrm(env, s, modrm); - gen_helper_invlpg(cpu_env, s->A0); + gen_helper_flush_page(cpu_env, s->A0); gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); break; @@ -7644,9 +7712,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0xf8: /* swapgs */ #ifdef TARGET_X86_64 if (CODE64(s)) { - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { + if (check_cpl0(s)) { tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]); tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env, offsetof(CPUX86State, kernelgsbase)); @@ -7680,10 +7746,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0x108: /* invd */ case 0x109: /* wbinvd */ - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { - gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD); + if (check_cpl0(s)) { + gen_svm_check_intercept(s, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD); /* nothing to do */ } break; @@ -7695,7 +7759,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) d_ot = dflag; modrm = x86_ldub_code(env, s); - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); @@ -7717,7 +7781,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) TCGLabel *label1; TCGv t0, t1, t2, a0; - if (!s->pe || s->vm86) + if (!PE(s) || VM86(s)) goto illegal_op; t0 = tcg_temp_local_new(); t1 = tcg_temp_local_new(); @@ -7765,11 +7829,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) { TCGLabel *label1; TCGv t0; - if (!s->pe || s->vm86) + if (!PE(s) || VM86(s)) goto illegal_op; ot = dflag != MO_16 ? MO_32 : MO_16; modrm = x86_ldub_code(env, s); - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); t0 = tcg_temp_local_new(); gen_update_cc_op(s); @@ -7810,7 +7874,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) modrm = x86_ldub_code(env, s); if (s->flags & HF_MPX_EN_MASK) { mod = (modrm >> 6) & 3; - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); if (prefixes & PREFIX_REPZ) { /* bndcl */ if (reg >= 4 @@ -7900,7 +7964,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) modrm = x86_ldub_code(env, s); if (s->flags & HF_MPX_EN_MASK) { mod = (modrm >> 6) & 3; - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); if (mod != 3 && (prefixes & PREFIX_REPZ)) { /* bndmk */ if (reg >= 4 @@ -8002,65 +8066,59 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) modrm = x86_ldub_code(env, s); gen_nop_modrm(env, s, modrm); break; + case 0x120: /* mov reg, crN */ case 0x122: /* mov crN, reg */ - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { - modrm = x86_ldub_code(env, s); - /* Ignore the mod bits (assume (modrm&0xc0)==0xc0). - * AMD documentation (24594.pdf) and testing of - * intel 386 and 486 processors all show that the mod bits - * are assumed to be 1's, regardless of actual values. - */ - rm = (modrm & 7) | REX_B(s); - reg = ((modrm >> 3) & 7) | rex_r; - if (CODE64(s)) - ot = MO_64; - else - ot = MO_32; - if ((prefixes & PREFIX_LOCK) && (reg == 0) && + if (!check_cpl0(s)) { + break; + } + modrm = x86_ldub_code(env, s); + /* + * Ignore the mod bits (assume (modrm&0xc0)==0xc0). + * AMD documentation (24594.pdf) and testing of Intel 386 and 486 + * processors all show that the mod bits are assumed to be 1's, + * regardless of actual values. + */ + rm = (modrm & 7) | REX_B(s); + reg = ((modrm >> 3) & 7) | REX_R(s); + switch (reg) { + case 0: + if ((prefixes & PREFIX_LOCK) && (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) { reg = 8; } - switch(reg) { - case 0: - case 2: - case 3: - case 4: - case 8: - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - if (b & 2) { - if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_op_mov_v_reg(s, ot, s->T0, rm); - gen_helper_write_crN(cpu_env, tcg_const_i32(reg), - s->T0); - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); - } else { - if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_helper_read_crN(s->T0, cpu_env, tcg_const_i32(reg)); - gen_op_mov_reg_v(s, ot, rm, s->T0); - if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_jmp(s, s->pc - s->cs_base); - } - } - break; - default: - goto unknown_op; + break; + case 2: + case 3: + case 4: + break; + default: + goto unknown_op; + } + ot = (CODE64(s) ? MO_64 : MO_32); + + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_io_start(); + } + if (b & 2) { + gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0 + reg); + gen_op_mov_v_reg(s, ot, s->T0, rm); + gen_helper_write_crN(cpu_env, tcg_constant_i32(reg), s->T0); + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); + } else { + gen_svm_check_intercept(s, SVM_EXIT_READ_CR0 + reg); + gen_helper_read_crN(s->T0, cpu_env, tcg_constant_i32(reg)); + gen_op_mov_reg_v(s, ot, rm, s->T0); + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_jmp(s, s->pc - s->cs_base); } } break; + case 0x121: /* mov reg, drN */ case 0x123: /* mov drN, reg */ - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { + if (check_cpl0(s)) { modrm = x86_ldub_code(env, s); /* Ignore the mod bits (assume (modrm&0xc0)==0xc0). * AMD documentation (24594.pdf) and testing of @@ -8068,7 +8126,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) * are assumed to be 1's, regardless of actual values. */ rm = (modrm & 7) | REX_B(s); - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); if (CODE64(s)) ot = MO_64; else @@ -8077,14 +8135,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } if (b & 2) { - gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg); + gen_svm_check_intercept(s, SVM_EXIT_WRITE_DR0 + reg); gen_op_mov_v_reg(s, ot, s->T0, rm); tcg_gen_movi_i32(s->tmp2_i32, reg); gen_helper_set_dr(cpu_env, s->tmp2_i32, s->T0); gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); } else { - gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg); + gen_svm_check_intercept(s, SVM_EXIT_READ_DR0 + reg); tcg_gen_movi_i32(s->tmp2_i32, reg); gen_helper_get_dr(s->T0, cpu_env, s->tmp2_i32); gen_op_mov_reg_v(s, ot, rm, s->T0); @@ -8092,10 +8150,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } break; case 0x106: /* clts */ - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { - gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); + if (check_cpl0(s)) { + gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0); gen_helper_clts(cpu_env); /* abort block because static cpu state changed */ gen_jmp_im(s, s->pc - s->cs_base); @@ -8111,7 +8167,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); /* generate a generic store */ gen_ldst_modrm(env, s, modrm, ot, reg, 1); break; @@ -8322,12 +8378,17 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_nop_modrm(env, s, modrm); break; case 0x1aa: /* rsm */ - gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM); + gen_svm_check_intercept(s, SVM_EXIT_RSM); if (!(s->flags & HF_SMM_MASK)) goto illegal_op; +#ifdef CONFIG_USER_ONLY + /* we should not be in SMM mode */ + g_assert_not_reached(); +#else gen_update_cc_op(s); gen_jmp_im(s, s->pc - s->cs_base); gen_helper_rsm(cpu_env); +#endif /* CONFIG_USER_ONLY */ gen_eob(s); break; case 0x1b8: /* SSE4.2 popcnt */ @@ -8338,7 +8399,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; modrm = x86_ldub_code(env, s); - reg = ((modrm >> 3) & 7) | rex_r; + reg = ((modrm >> 3) & 7) | REX_R(s); if (s->prefix & PREFIX_DATA) { ot = MO_16; @@ -8366,7 +8427,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0x1c2: case 0x1c4 ... 0x1c6: case 0x1d0 ... 0x1fe: - gen_sse(env, s, b, pc_start, rex_r); + gen_sse(env, s, b, pc_start); break; default: goto unknown_op; @@ -8466,20 +8527,31 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) DisasContext *dc = container_of(dcbase, DisasContext, base); CPUX86State *env = cpu->env_ptr; uint32_t flags = dc->base.tb->flags; - target_ulong cs_base = dc->base.tb->cs_base; - - dc->pe = (flags >> HF_PE_SHIFT) & 1; - dc->code32 = (flags >> HF_CS32_SHIFT) & 1; - dc->ss32 = (flags >> HF_SS32_SHIFT) & 1; - dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1; - dc->f_st = 0; - dc->vm86 = (flags >> VM_SHIFT) & 1; - dc->cpl = (flags >> HF_CPL_SHIFT) & 3; - dc->iopl = (flags >> IOPL_SHIFT) & 3; - dc->tf = (flags >> TF_SHIFT) & 1; + int cpl = (flags >> HF_CPL_SHIFT) & 3; + int iopl = (flags >> IOPL_SHIFT) & 3; + + dc->cs_base = dc->base.tb->cs_base; + dc->flags = flags; +#ifndef CONFIG_USER_ONLY + dc->cpl = cpl; + dc->iopl = iopl; +#endif + + /* We make some simplifying assumptions; validate they're correct. */ + g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0)); + g_assert(CPL(dc) == cpl); + g_assert(IOPL(dc) == iopl); + g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0)); + g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0)); + g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0)); + g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0)); + g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0)); + g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0)); + g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0)); + g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0)); + dc->cc_op = CC_OP_DYNAMIC; dc->cc_op_dirty = false; - dc->cs_base = cs_base; dc->popl_esp_hack = 0; /* select memory access functions */ dc->mem_index = 0; @@ -8492,29 +8564,14 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX]; dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX]; dc->cpuid_xsave_features = env->features[FEAT_XSAVE]; -#ifdef TARGET_X86_64 - dc->lma = (flags >> HF_LMA_SHIFT) & 1; - dc->code64 = (flags >> HF_CS64_SHIFT) & 1; -#endif - dc->flags = flags; - dc->jmp_opt = !(dc->tf || dc->base.singlestep_enabled || - (flags & HF_INHIBIT_IRQ_MASK)); - /* Do not optimize repz jumps at all in icount mode, because - rep movsS instructions are execured with different paths - in !repz_opt and repz_opt modes. The first one was used - always except single step mode. And this setting - disables jumps optimization and control paths become - equivalent in run and single step modes. - Now there will be no jump optimization for repz in - record/replay modes and there will always be an - additional step for ecx=0 when icount is enabled. + dc->jmp_opt = !(dc->base.singlestep_enabled || + (flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))); + /* + * If jmp_opt, we want to handle each string instruction individually. + * For icount also disable repz optimization so that each iteration + * is accounted separately. */ dc->repz_opt = !dc->jmp_opt && !(tb_cflags(dc->base.tb) & CF_USE_ICOUNT); -#if 0 - /* check addseg logic */ - if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32)) - printf("ERROR addseg\n"); -#endif dc->T0 = tcg_temp_new(); dc->T1 = tcg_temp_new(); @@ -8548,8 +8605,7 @@ static bool i386_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu, /* If RF is set, suppress an internally generated breakpoint. */ int flags = dc->base.tb->flags & HF_RF_MASK ? BP_GDB : BP_ANY; if (bp->flags & flags) { - gen_debug(dc, dc->base.pc_next - dc->cs_base); - dc->base.is_jmp = DISAS_NORETURN; + gen_debug(dc); /* The address covered by the breakpoint must be included in [tb->pc, tb->pc + tb->size) in order to for it to be properly cleared -- thus we increment the PC here so that @@ -8572,13 +8628,14 @@ static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) */ if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) { gen_exception(dc, EXCP_VSYSCALL, dc->base.pc_next); + dc->base.pc_next = dc->pc + 1; return; } #endif pc_next = disas_insn(dc, cpu); - if (dc->tf || (dc->base.tb->flags & HF_INHIBIT_IRQ_MASK)) { + if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) { /* if single step mode, we generate only one instruction and generate an exception */ /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear diff --git a/target/i386/tcg/user/excp_helper.c b/target/i386/tcg/user/excp_helper.c new file mode 100644 index 0000000000..a89b5228fd --- /dev/null +++ b/target/i386/tcg/user/excp_helper.c @@ -0,0 +1,39 @@ +/* + * x86 exception helpers - user-mode specific code + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "tcg/helper-tcg.h" + +bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + env->cr[2] = addr; + env->error_code = (access_type == MMU_DATA_STORE) << PG_ERROR_W_BIT; + env->error_code |= PG_ERROR_U_MASK; + cs->exception_index = EXCP0E_PAGE; + env->exception_is_int = 0; + env->exception_next_eip = -1; + cpu_loop_exit_restore(cs, retaddr); +} diff --git a/target/i386/tcg/user/meson.build b/target/i386/tcg/user/meson.build new file mode 100644 index 0000000000..1df6bc4343 --- /dev/null +++ b/target/i386/tcg/user/meson.build @@ -0,0 +1,4 @@ +i386_user_ss.add(when: ['CONFIG_TCG', 'CONFIG_USER_ONLY'], if_true: files( + 'excp_helper.c', + 'seg_helper.c', +)) diff --git a/target/i386/tcg/user/seg_helper.c b/target/i386/tcg/user/seg_helper.c new file mode 100644 index 0000000000..67481b0aa8 --- /dev/null +++ b/target/i386/tcg/user/seg_helper.c @@ -0,0 +1,109 @@ +/* + * x86 segmentation related helpers (user-mode code): + * TSS, interrupts, system calls, jumps and call/task gates, descriptors + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/helper-proto.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "tcg/helper-tcg.h" +#include "tcg/seg_helper.h" + +#ifdef TARGET_X86_64 +void helper_syscall(CPUX86State *env, int next_eip_addend) +{ + CPUState *cs = env_cpu(env); + + cs->exception_index = EXCP_SYSCALL; + env->exception_is_int = 0; + env->exception_next_eip = env->eip + next_eip_addend; + cpu_loop_exit(cs); +} +#endif /* TARGET_X86_64 */ + +/* + * fake user mode interrupt. is_int is TRUE if coming from the int + * instruction. next_eip is the env->eip value AFTER the interrupt + * instruction. It is only relevant if is_int is TRUE or if intno + * is EXCP_SYSCALL. + */ +static void do_interrupt_user(CPUX86State *env, int intno, int is_int, + int error_code, target_ulong next_eip) +{ + if (is_int) { + SegmentCache *dt; + target_ulong ptr; + int dpl, cpl, shift; + uint32_t e2; + + dt = &env->idt; + if (env->hflags & HF_LMA_MASK) { + shift = 4; + } else { + shift = 3; + } + ptr = dt->base + (intno << shift); + e2 = cpu_ldl_kernel(env, ptr + 4); + + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + cpl = env->hflags & HF_CPL_MASK; + /* check privilege if software int */ + if (dpl < cpl) { + raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2); + } + } + + /* Since we emulate only user space, we cannot do more than + exiting the emulation with the suitable exception and error + code. So update EIP for INT 0x80 and EXCP_SYSCALL. */ + if (is_int || intno == EXCP_SYSCALL) { + env->eip = next_eip; + } +} + +void x86_cpu_do_interrupt(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + /* if user mode only, we simulate a fake exception + which will be handled outside the cpu execution + loop */ + do_interrupt_user(env, cs->exception_index, + env->exception_is_int, + env->error_code, + env->exception_next_eip); + /* successfully delivered */ + env->old_exception = -1; +} + +void cpu_x86_load_seg(CPUX86State *env, X86Seg seg_reg, int selector) +{ + if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) { + int dpl = (env->eflags & VM_MASK) ? 3 : 0; + selector &= 0xffff; + cpu_x86_load_seg_cache(env, seg_reg, selector, + (selector << 4), 0xffff, + DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | + DESC_A_MASK | (dpl << DESC_DPL_SHIFT)); + } else { + helper_load_seg(env, seg_reg, selector); + } +} diff --git a/target/lm32/README b/target/lm32/README deleted file mode 100644 index ba3508a711..0000000000 --- a/target/lm32/README +++ /dev/null @@ -1,45 +0,0 @@ -LatticeMico32 target --------------------- - -General -------- -All opcodes including the JUART CSRs are supported. - - -JTAG UART ---------- -JTAG UART is routed to a serial console device. For the current boards it -is the second one. Ie to enable it in the qemu virtual console window use -the following command line parameters: - -serial vc -serial vc -This will make serial0 (the lm32_uart) and serial1 (the JTAG UART) -available as virtual consoles. - - -Semihosting ------------ -Semihosting on this target is supported. Some system calls like read, write -and exit are executed on the host if semihosting is enabled. See -target/lm32-semi.c for all supported system calls. Emulation aware programs -can use this mechanism to shut down the virtual machine and print to the -host console. See the tcg tests for an example. - - -Special instructions --------------------- -The translation recognizes one special instruction to halt the cpu: - and r0, r0, r0 -On real hardware this instruction is a nop. It is not used by GCC and -should (hopefully) not be used within hand-crafted assembly. -Insert this instruction in your idle loop to reduce the cpu load on the -host. - - -Ignoring the MSB of the address bus ------------------------------------ -Some SoC ignores the MSB on the address bus. Thus creating a shadow memory -area. As a general rule, 0x00000000-0x7fffffff is cached, whereas -0x80000000-0xffffffff is not cached and used to access IO devices. This -behaviour can be enabled with: - cpu_lm32_set_phys_msb_ignore(env, 1); - diff --git a/target/lm32/TODO b/target/lm32/TODO deleted file mode 100644 index e163c42ebe..0000000000 --- a/target/lm32/TODO +++ /dev/null @@ -1 +0,0 @@ -* linux-user emulation diff --git a/target/lm32/cpu-param.h b/target/lm32/cpu-param.h deleted file mode 100644 index d89574ad19..0000000000 --- a/target/lm32/cpu-param.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * LatticeMico32 cpu parameters for qemu. - * - * Copyright (c) 2010 Michael Walle <michael@walle.cc> - * SPDX-License-Identifier: LGPL-2.0+ - */ - -#ifndef LM32_CPU_PARAM_H -#define LM32_CPU_PARAM_H 1 - -#define TARGET_LONG_BITS 32 -#define TARGET_PAGE_BITS 12 -#define TARGET_PHYS_ADDR_SPACE_BITS 32 -#define TARGET_VIRT_ADDR_SPACE_BITS 32 -#define NB_MMU_MODES 1 - -#endif diff --git a/target/lm32/cpu-qom.h b/target/lm32/cpu-qom.h deleted file mode 100644 index 245b35cd1d..0000000000 --- a/target/lm32/cpu-qom.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * QEMU LatticeMico32 CPU - * - * Copyright (c) 2012 SUSE LINUX Products GmbH - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see - * <http://www.gnu.org/licenses/lgpl-2.1.html> - */ -#ifndef QEMU_LM32_CPU_QOM_H -#define QEMU_LM32_CPU_QOM_H - -#include "hw/core/cpu.h" -#include "qom/object.h" - -#define TYPE_LM32_CPU "lm32-cpu" - -OBJECT_DECLARE_TYPE(LM32CPU, LM32CPUClass, - LM32_CPU) - -/** - * LM32CPUClass: - * @parent_realize: The parent class' realize handler. - * @parent_reset: The parent class' reset handler. - * - * A LatticeMico32 CPU model. - */ -struct LM32CPUClass { - /*< private >*/ - CPUClass parent_class; - /*< public >*/ - - DeviceRealize parent_realize; - DeviceReset parent_reset; -}; - - -#endif diff --git a/target/lm32/cpu.c b/target/lm32/cpu.c deleted file mode 100644 index c23d72874c..0000000000 --- a/target/lm32/cpu.c +++ /dev/null @@ -1,274 +0,0 @@ -/* - * QEMU LatticeMico32 CPU - * - * Copyright (c) 2012 SUSE LINUX Products GmbH - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see - * <http://www.gnu.org/licenses/lgpl-2.1.html> - */ - -#include "qemu/osdep.h" -#include "qapi/error.h" -#include "qemu/qemu-print.h" -#include "cpu.h" - - -static void lm32_cpu_set_pc(CPUState *cs, vaddr value) -{ - LM32CPU *cpu = LM32_CPU(cs); - - cpu->env.pc = value; -} - -static void lm32_cpu_list_entry(gpointer data, gpointer user_data) -{ - ObjectClass *oc = data; - const char *typename = object_class_get_name(oc); - char *name; - - name = g_strndup(typename, strlen(typename) - strlen(LM32_CPU_TYPE_SUFFIX)); - qemu_printf(" %s\n", name); - g_free(name); -} - - -void lm32_cpu_list(void) -{ - GSList *list; - - list = object_class_get_list_sorted(TYPE_LM32_CPU, false); - qemu_printf("Available CPUs:\n"); - g_slist_foreach(list, lm32_cpu_list_entry, NULL); - g_slist_free(list); -} - -static void lm32_cpu_init_cfg_reg(LM32CPU *cpu) -{ - CPULM32State *env = &cpu->env; - uint32_t cfg = 0; - - if (cpu->features & LM32_FEATURE_MULTIPLY) { - cfg |= CFG_M; - } - - if (cpu->features & LM32_FEATURE_DIVIDE) { - cfg |= CFG_D; - } - - if (cpu->features & LM32_FEATURE_SHIFT) { - cfg |= CFG_S; - } - - if (cpu->features & LM32_FEATURE_SIGN_EXTEND) { - cfg |= CFG_X; - } - - if (cpu->features & LM32_FEATURE_I_CACHE) { - cfg |= CFG_IC; - } - - if (cpu->features & LM32_FEATURE_D_CACHE) { - cfg |= CFG_DC; - } - - if (cpu->features & LM32_FEATURE_CYCLE_COUNT) { - cfg |= CFG_CC; - } - - cfg |= (cpu->num_interrupts << CFG_INT_SHIFT); - cfg |= (cpu->num_breakpoints << CFG_BP_SHIFT); - cfg |= (cpu->num_watchpoints << CFG_WP_SHIFT); - cfg |= (cpu->revision << CFG_REV_SHIFT); - - env->cfg = cfg; -} - -static bool lm32_cpu_has_work(CPUState *cs) -{ - return cs->interrupt_request & CPU_INTERRUPT_HARD; -} - -static void lm32_cpu_reset(DeviceState *dev) -{ - CPUState *s = CPU(dev); - LM32CPU *cpu = LM32_CPU(s); - LM32CPUClass *lcc = LM32_CPU_GET_CLASS(cpu); - CPULM32State *env = &cpu->env; - - lcc->parent_reset(dev); - - /* reset cpu state */ - memset(env, 0, offsetof(CPULM32State, end_reset_fields)); - - lm32_cpu_init_cfg_reg(cpu); -} - -static void lm32_cpu_disas_set_info(CPUState *cpu, disassemble_info *info) -{ - info->mach = bfd_mach_lm32; - info->print_insn = print_insn_lm32; -} - -static void lm32_cpu_realizefn(DeviceState *dev, Error **errp) -{ - CPUState *cs = CPU(dev); - LM32CPUClass *lcc = LM32_CPU_GET_CLASS(dev); - Error *local_err = NULL; - - cpu_exec_realizefn(cs, &local_err); - if (local_err != NULL) { - error_propagate(errp, local_err); - return; - } - - cpu_reset(cs); - - qemu_init_vcpu(cs); - - lcc->parent_realize(dev, errp); -} - -static void lm32_cpu_initfn(Object *obj) -{ - LM32CPU *cpu = LM32_CPU(obj); - CPULM32State *env = &cpu->env; - - cpu_set_cpustate_pointers(cpu); - - env->flags = 0; -} - -static void lm32_basic_cpu_initfn(Object *obj) -{ - LM32CPU *cpu = LM32_CPU(obj); - - cpu->revision = 3; - cpu->num_interrupts = 32; - cpu->num_breakpoints = 4; - cpu->num_watchpoints = 4; - cpu->features = LM32_FEATURE_SHIFT - | LM32_FEATURE_SIGN_EXTEND - | LM32_FEATURE_CYCLE_COUNT; -} - -static void lm32_standard_cpu_initfn(Object *obj) -{ - LM32CPU *cpu = LM32_CPU(obj); - - cpu->revision = 3; - cpu->num_interrupts = 32; - cpu->num_breakpoints = 4; - cpu->num_watchpoints = 4; - cpu->features = LM32_FEATURE_MULTIPLY - | LM32_FEATURE_DIVIDE - | LM32_FEATURE_SHIFT - | LM32_FEATURE_SIGN_EXTEND - | LM32_FEATURE_I_CACHE - | LM32_FEATURE_CYCLE_COUNT; -} - -static void lm32_full_cpu_initfn(Object *obj) -{ - LM32CPU *cpu = LM32_CPU(obj); - - cpu->revision = 3; - cpu->num_interrupts = 32; - cpu->num_breakpoints = 4; - cpu->num_watchpoints = 4; - cpu->features = LM32_FEATURE_MULTIPLY - | LM32_FEATURE_DIVIDE - | LM32_FEATURE_SHIFT - | LM32_FEATURE_SIGN_EXTEND - | LM32_FEATURE_I_CACHE - | LM32_FEATURE_D_CACHE - | LM32_FEATURE_CYCLE_COUNT; -} - -static ObjectClass *lm32_cpu_class_by_name(const char *cpu_model) -{ - ObjectClass *oc; - char *typename; - - typename = g_strdup_printf(LM32_CPU_TYPE_NAME("%s"), cpu_model); - oc = object_class_by_name(typename); - g_free(typename); - if (oc != NULL && (!object_class_dynamic_cast(oc, TYPE_LM32_CPU) || - object_class_is_abstract(oc))) { - oc = NULL; - } - return oc; -} - -#include "hw/core/tcg-cpu-ops.h" - -static struct TCGCPUOps lm32_tcg_ops = { - .initialize = lm32_translate_init, - .cpu_exec_interrupt = lm32_cpu_exec_interrupt, - .tlb_fill = lm32_cpu_tlb_fill, - .debug_excp_handler = lm32_debug_excp_handler, - -#ifndef CONFIG_USER_ONLY - .do_interrupt = lm32_cpu_do_interrupt, -#endif /* !CONFIG_USER_ONLY */ -}; - -static void lm32_cpu_class_init(ObjectClass *oc, void *data) -{ - LM32CPUClass *lcc = LM32_CPU_CLASS(oc); - CPUClass *cc = CPU_CLASS(oc); - DeviceClass *dc = DEVICE_CLASS(oc); - - device_class_set_parent_realize(dc, lm32_cpu_realizefn, - &lcc->parent_realize); - device_class_set_parent_reset(dc, lm32_cpu_reset, &lcc->parent_reset); - - cc->class_by_name = lm32_cpu_class_by_name; - cc->has_work = lm32_cpu_has_work; - cc->dump_state = lm32_cpu_dump_state; - cc->set_pc = lm32_cpu_set_pc; - cc->gdb_read_register = lm32_cpu_gdb_read_register; - cc->gdb_write_register = lm32_cpu_gdb_write_register; -#ifndef CONFIG_USER_ONLY - cc->get_phys_page_debug = lm32_cpu_get_phys_page_debug; - cc->vmsd = &vmstate_lm32_cpu; -#endif - cc->gdb_num_core_regs = 32 + 7; - cc->gdb_stop_before_watchpoint = true; - cc->disas_set_info = lm32_cpu_disas_set_info; - cc->tcg_ops = &lm32_tcg_ops; -} - -#define DEFINE_LM32_CPU_TYPE(cpu_model, initfn) \ - { \ - .parent = TYPE_LM32_CPU, \ - .name = LM32_CPU_TYPE_NAME(cpu_model), \ - .instance_init = initfn, \ - } - -static const TypeInfo lm32_cpus_type_infos[] = { - { /* base class should be registered first */ - .name = TYPE_LM32_CPU, - .parent = TYPE_CPU, - .instance_size = sizeof(LM32CPU), - .instance_init = lm32_cpu_initfn, - .abstract = true, - .class_size = sizeof(LM32CPUClass), - .class_init = lm32_cpu_class_init, - }, - DEFINE_LM32_CPU_TYPE("lm32-basic", lm32_basic_cpu_initfn), - DEFINE_LM32_CPU_TYPE("lm32-standard", lm32_standard_cpu_initfn), - DEFINE_LM32_CPU_TYPE("lm32-full", lm32_full_cpu_initfn), -}; - -DEFINE_TYPES(lm32_cpus_type_infos) diff --git a/target/lm32/cpu.h b/target/lm32/cpu.h deleted file mode 100644 index ea7c01ca8b..0000000000 --- a/target/lm32/cpu.h +++ /dev/null @@ -1,262 +0,0 @@ -/* - * LatticeMico32 virtual CPU header. - * - * Copyright (c) 2010 Michael Walle <michael@walle.cc> - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see <http://www.gnu.org/licenses/>. - */ - -#ifndef LM32_CPU_H -#define LM32_CPU_H - -#include "cpu-qom.h" -#include "exec/cpu-defs.h" - -typedef struct CPULM32State CPULM32State; - -static inline int cpu_mmu_index(CPULM32State *env, bool ifetch) -{ - return 0; -} - -/* Exceptions indices */ -enum { - EXCP_RESET = 0, - EXCP_BREAKPOINT, - EXCP_INSN_BUS_ERROR, - EXCP_WATCHPOINT, - EXCP_DATA_BUS_ERROR, - EXCP_DIVIDE_BY_ZERO, - EXCP_IRQ, - EXCP_SYSTEMCALL -}; - -/* Registers */ -enum { - R_R0 = 0, R_R1, R_R2, R_R3, R_R4, R_R5, R_R6, R_R7, R_R8, R_R9, R_R10, - R_R11, R_R12, R_R13, R_R14, R_R15, R_R16, R_R17, R_R18, R_R19, R_R20, - R_R21, R_R22, R_R23, R_R24, R_R25, R_R26, R_R27, R_R28, R_R29, R_R30, - R_R31 -}; - -/* Register aliases */ -enum { - R_GP = R_R26, - R_FP = R_R27, - R_SP = R_R28, - R_RA = R_R29, - R_EA = R_R30, - R_BA = R_R31 -}; - -/* IE flags */ -enum { - IE_IE = (1<<0), - IE_EIE = (1<<1), - IE_BIE = (1<<2), -}; - -/* DC flags */ -enum { - DC_SS = (1<<0), - DC_RE = (1<<1), - DC_C0 = (1<<2), - DC_C1 = (1<<3), - DC_C2 = (1<<4), - DC_C3 = (1<<5), -}; - -/* CFG mask */ -enum { - CFG_M = (1<<0), - CFG_D = (1<<1), - CFG_S = (1<<2), - CFG_U = (1<<3), - CFG_X = (1<<4), - CFG_CC = (1<<5), - CFG_IC = (1<<6), - CFG_DC = (1<<7), - CFG_G = (1<<8), - CFG_H = (1<<9), - CFG_R = (1<<10), - CFG_J = (1<<11), - CFG_INT_SHIFT = 12, - CFG_BP_SHIFT = 18, - CFG_WP_SHIFT = 22, - CFG_REV_SHIFT = 26, -}; - -/* CSRs */ -enum { - CSR_IE = 0x00, - CSR_IM = 0x01, - CSR_IP = 0x02, - CSR_ICC = 0x03, - CSR_DCC = 0x04, - CSR_CC = 0x05, - CSR_CFG = 0x06, - CSR_EBA = 0x07, - CSR_DC = 0x08, - CSR_DEBA = 0x09, - CSR_JTX = 0x0e, - CSR_JRX = 0x0f, - CSR_BP0 = 0x10, - CSR_BP1 = 0x11, - CSR_BP2 = 0x12, - CSR_BP3 = 0x13, - CSR_WP0 = 0x18, - CSR_WP1 = 0x19, - CSR_WP2 = 0x1a, - CSR_WP3 = 0x1b, -}; - -enum { - LM32_FEATURE_MULTIPLY = 1, - LM32_FEATURE_DIVIDE = 2, - LM32_FEATURE_SHIFT = 4, - LM32_FEATURE_SIGN_EXTEND = 8, - LM32_FEATURE_I_CACHE = 16, - LM32_FEATURE_D_CACHE = 32, - LM32_FEATURE_CYCLE_COUNT = 64, -}; - -enum { - LM32_FLAG_IGNORE_MSB = 1, -}; - -struct CPULM32State { - /* general registers */ - uint32_t regs[32]; - - /* special registers */ - uint32_t pc; /* program counter */ - uint32_t ie; /* interrupt enable */ - uint32_t icc; /* instruction cache control */ - uint32_t dcc; /* data cache control */ - uint32_t cc; /* cycle counter */ - uint32_t cfg; /* configuration */ - - /* debug registers */ - uint32_t dc; /* debug control */ - uint32_t bp[4]; /* breakpoints */ - uint32_t wp[4]; /* watchpoints */ - - struct CPUBreakpoint *cpu_breakpoint[4]; - struct CPUWatchpoint *cpu_watchpoint[4]; - - /* Fields up to this point are cleared by a CPU reset */ - struct {} end_reset_fields; - - /* Fields from here on are preserved across CPU reset. */ - uint32_t eba; /* exception base address */ - uint32_t deba; /* debug exception base address */ - - /* interrupt controller handle for callbacks */ - DeviceState *pic_state; - /* JTAG UART handle for callbacks */ - DeviceState *juart_state; - - /* processor core features */ - uint32_t flags; - -}; - -/** - * LM32CPU: - * @env: #CPULM32State - * - * A LatticeMico32 CPU. - */ -struct LM32CPU { - /*< private >*/ - CPUState parent_obj; - /*< public >*/ - - CPUNegativeOffsetState neg; - CPULM32State env; - - uint32_t revision; - uint8_t num_interrupts; - uint8_t num_breakpoints; - uint8_t num_watchpoints; - uint32_t features; -}; - - -#ifndef CONFIG_USER_ONLY -extern const VMStateDescription vmstate_lm32_cpu; -#endif - -void lm32_cpu_do_interrupt(CPUState *cpu); -bool lm32_cpu_exec_interrupt(CPUState *cs, int int_req); -void lm32_cpu_dump_state(CPUState *cpu, FILE *f, int flags); -hwaddr lm32_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); -int lm32_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); -int lm32_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); - -typedef enum { - LM32_WP_DISABLED = 0, - LM32_WP_READ, - LM32_WP_WRITE, - LM32_WP_READ_WRITE, -} lm32_wp_t; - -static inline lm32_wp_t lm32_wp_type(uint32_t dc, int idx) -{ - assert(idx < 4); - return (dc >> (idx+1)*2) & 0x3; -} - -/* you can call this signal handler from your SIGBUS and SIGSEGV - signal handlers to inform the virtual CPU of exceptions. non zero - is returned if the signal was handled by the virtual CPU. */ -int cpu_lm32_signal_handler(int host_signum, void *pinfo, - void *puc); -void lm32_cpu_list(void); -void lm32_translate_init(void); -void cpu_lm32_set_phys_msb_ignore(CPULM32State *env, int value); -void QEMU_NORETURN raise_exception(CPULM32State *env, int index); -void lm32_debug_excp_handler(CPUState *cs); -void lm32_breakpoint_insert(CPULM32State *env, int index, target_ulong address); -void lm32_breakpoint_remove(CPULM32State *env, int index); -void lm32_watchpoint_insert(CPULM32State *env, int index, target_ulong address, - lm32_wp_t wp_type); -void lm32_watchpoint_remove(CPULM32State *env, int index); -bool lm32_cpu_do_semihosting(CPUState *cs); - -#define LM32_CPU_TYPE_SUFFIX "-" TYPE_LM32_CPU -#define LM32_CPU_TYPE_NAME(model) model LM32_CPU_TYPE_SUFFIX -#define CPU_RESOLVING_TYPE TYPE_LM32_CPU - -#define cpu_list lm32_cpu_list -#define cpu_signal_handler cpu_lm32_signal_handler - -bool lm32_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); - -typedef CPULM32State CPUArchState; -typedef LM32CPU ArchCPU; - -#include "exec/cpu-all.h" - -static inline void cpu_get_tb_cpu_state(CPULM32State *env, target_ulong *pc, - target_ulong *cs_base, uint32_t *flags) -{ - *pc = env->pc; - *cs_base = 0; - *flags = 0; -} - -#endif diff --git a/target/lm32/gdbstub.c b/target/lm32/gdbstub.c deleted file mode 100644 index 56f508a5b6..0000000000 --- a/target/lm32/gdbstub.c +++ /dev/null @@ -1,92 +0,0 @@ -/* - * LM32 gdb server stub - * - * Copyright (c) 2003-2005 Fabrice Bellard - * Copyright (c) 2013 SUSE LINUX Products GmbH - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see <http://www.gnu.org/licenses/>. - */ -#include "qemu/osdep.h" -#include "cpu.h" -#include "exec/gdbstub.h" -#include "hw/lm32/lm32_pic.h" - -int lm32_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) -{ - LM32CPU *cpu = LM32_CPU(cs); - CPULM32State *env = &cpu->env; - - if (n < 32) { - return gdb_get_reg32(mem_buf, env->regs[n]); - } else { - switch (n) { - case 32: - return gdb_get_reg32(mem_buf, env->pc); - /* FIXME: put in right exception ID */ - case 33: - return gdb_get_reg32(mem_buf, 0); - case 34: - return gdb_get_reg32(mem_buf, env->eba); - case 35: - return gdb_get_reg32(mem_buf, env->deba); - case 36: - return gdb_get_reg32(mem_buf, env->ie); - case 37: - return gdb_get_reg32(mem_buf, lm32_pic_get_im(env->pic_state)); - case 38: - return gdb_get_reg32(mem_buf, lm32_pic_get_ip(env->pic_state)); - } - } - return 0; -} - -int lm32_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) -{ - LM32CPU *cpu = LM32_CPU(cs); - CPUClass *cc = CPU_GET_CLASS(cs); - CPULM32State *env = &cpu->env; - uint32_t tmp; - - if (n > cc->gdb_num_core_regs) { - return 0; - } - - tmp = ldl_p(mem_buf); - - if (n < 32) { - env->regs[n] = tmp; - } else { - switch (n) { - case 32: - env->pc = tmp; - break; - case 34: - env->eba = tmp; - break; - case 35: - env->deba = tmp; - break; - case 36: - env->ie = tmp; - break; - case 37: - lm32_pic_set_im(env->pic_state, tmp); - break; - case 38: - lm32_pic_set_ip(env->pic_state, tmp); - break; - } - } - return 4; -} diff --git a/target/lm32/helper.c b/target/lm32/helper.c deleted file mode 100644 index 01cc3c53a5..0000000000 --- a/target/lm32/helper.c +++ /dev/null @@ -1,224 +0,0 @@ -/* - * LatticeMico32 helper routines. - * - * Copyright (c) 2010-2014 Michael Walle <michael@walle.cc> - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see <http://www.gnu.org/licenses/>. - */ - -#include "qemu/osdep.h" -#include "cpu.h" -#include "exec/exec-all.h" -#include "qemu/host-utils.h" -#include "semihosting/semihost.h" -#include "exec/log.h" - -bool lm32_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) -{ - LM32CPU *cpu = LM32_CPU(cs); - CPULM32State *env = &cpu->env; - int prot; - - address &= TARGET_PAGE_MASK; - prot = PAGE_BITS; - if (env->flags & LM32_FLAG_IGNORE_MSB) { - tlb_set_page(cs, address, address & 0x7fffffff, prot, mmu_idx, - TARGET_PAGE_SIZE); - } else { - tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE); - } - return true; -} - -hwaddr lm32_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) -{ - LM32CPU *cpu = LM32_CPU(cs); - - addr &= TARGET_PAGE_MASK; - if (cpu->env.flags & LM32_FLAG_IGNORE_MSB) { - return addr & 0x7fffffff; - } else { - return addr; - } -} - -void lm32_breakpoint_insert(CPULM32State *env, int idx, target_ulong address) -{ - cpu_breakpoint_insert(env_cpu(env), address, BP_CPU, - &env->cpu_breakpoint[idx]); -} - -void lm32_breakpoint_remove(CPULM32State *env, int idx) -{ - if (!env->cpu_breakpoint[idx]) { - return; - } - - cpu_breakpoint_remove_by_ref(env_cpu(env), env->cpu_breakpoint[idx]); - env->cpu_breakpoint[idx] = NULL; -} - -void lm32_watchpoint_insert(CPULM32State *env, int idx, target_ulong address, - lm32_wp_t wp_type) -{ - int flags = 0; - - switch (wp_type) { - case LM32_WP_DISABLED: - /* nothing to do */ - break; - case LM32_WP_READ: - flags = BP_CPU | BP_STOP_BEFORE_ACCESS | BP_MEM_READ; - break; - case LM32_WP_WRITE: - flags = BP_CPU | BP_STOP_BEFORE_ACCESS | BP_MEM_WRITE; - break; - case LM32_WP_READ_WRITE: - flags = BP_CPU | BP_STOP_BEFORE_ACCESS | BP_MEM_ACCESS; - break; - } - - if (flags != 0) { - cpu_watchpoint_insert(env_cpu(env), address, 1, flags, - &env->cpu_watchpoint[idx]); - } -} - -void lm32_watchpoint_remove(CPULM32State *env, int idx) -{ - if (!env->cpu_watchpoint[idx]) { - return; - } - - cpu_watchpoint_remove_by_ref(env_cpu(env), env->cpu_watchpoint[idx]); - env->cpu_watchpoint[idx] = NULL; -} - -static bool check_watchpoints(CPULM32State *env) -{ - LM32CPU *cpu = env_archcpu(env); - int i; - - for (i = 0; i < cpu->num_watchpoints; i++) { - if (env->cpu_watchpoint[i] && - env->cpu_watchpoint[i]->flags & BP_WATCHPOINT_HIT) { - return true; - } - } - return false; -} - -void lm32_debug_excp_handler(CPUState *cs) -{ - LM32CPU *cpu = LM32_CPU(cs); - CPULM32State *env = &cpu->env; - CPUBreakpoint *bp; - - if (cs->watchpoint_hit) { - if (cs->watchpoint_hit->flags & BP_CPU) { - cs->watchpoint_hit = NULL; - if (check_watchpoints(env)) { - raise_exception(env, EXCP_WATCHPOINT); - } else { - cpu_loop_exit_noexc(cs); - } - } - } else { - QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { - if (bp->pc == env->pc) { - if (bp->flags & BP_CPU) { - raise_exception(env, EXCP_BREAKPOINT); - } - break; - } - } - } -} - -void lm32_cpu_do_interrupt(CPUState *cs) -{ - LM32CPU *cpu = LM32_CPU(cs); - CPULM32State *env = &cpu->env; - - qemu_log_mask(CPU_LOG_INT, - "exception at pc=%x type=%x\n", env->pc, cs->exception_index); - - switch (cs->exception_index) { - case EXCP_SYSTEMCALL: - if (unlikely(semihosting_enabled())) { - /* do_semicall() returns true if call was handled. Otherwise - * do the normal exception handling. */ - if (lm32_cpu_do_semihosting(cs)) { - env->pc += 4; - break; - } - } - /* fall through */ - case EXCP_INSN_BUS_ERROR: - case EXCP_DATA_BUS_ERROR: - case EXCP_DIVIDE_BY_ZERO: - case EXCP_IRQ: - /* non-debug exceptions */ - env->regs[R_EA] = env->pc; - env->ie |= (env->ie & IE_IE) ? IE_EIE : 0; - env->ie &= ~IE_IE; - if (env->dc & DC_RE) { - env->pc = env->deba + (cs->exception_index * 32); - } else { - env->pc = env->eba + (cs->exception_index * 32); - } - log_cpu_state_mask(CPU_LOG_INT, cs, 0); - break; - case EXCP_BREAKPOINT: - case EXCP_WATCHPOINT: - /* debug exceptions */ - env->regs[R_BA] = env->pc; - env->ie |= (env->ie & IE_IE) ? IE_BIE : 0; - env->ie &= ~IE_IE; - env->pc = env->deba + (cs->exception_index * 32); - log_cpu_state_mask(CPU_LOG_INT, cs, 0); - break; - default: - cpu_abort(cs, "unhandled exception type=%d\n", - cs->exception_index); - break; - } -} - -bool lm32_cpu_exec_interrupt(CPUState *cs, int interrupt_request) -{ - LM32CPU *cpu = LM32_CPU(cs); - CPULM32State *env = &cpu->env; - - if ((interrupt_request & CPU_INTERRUPT_HARD) && (env->ie & IE_IE)) { - cs->exception_index = EXCP_IRQ; - lm32_cpu_do_interrupt(cs); - return true; - } - return false; -} - -/* Some soc ignores the MSB on the address bus. Thus creating a shadow memory - * area. As a general rule, 0x00000000-0x7fffffff is cached, whereas - * 0x80000000-0xffffffff is not cached and used to access IO devices. */ -void cpu_lm32_set_phys_msb_ignore(CPULM32State *env, int value) -{ - if (value) { - env->flags |= LM32_FLAG_IGNORE_MSB; - } else { - env->flags &= ~LM32_FLAG_IGNORE_MSB; - } -} diff --git a/target/lm32/helper.h b/target/lm32/helper.h deleted file mode 100644 index 445578c439..0000000000 --- a/target/lm32/helper.h +++ /dev/null @@ -1,14 +0,0 @@ -DEF_HELPER_2(raise_exception, void, env, i32) -DEF_HELPER_1(hlt, void, env) -DEF_HELPER_3(wcsr_bp, void, env, i32, i32) -DEF_HELPER_3(wcsr_wp, void, env, i32, i32) -DEF_HELPER_2(wcsr_dc, void, env, i32) -DEF_HELPER_2(wcsr_im, void, env, i32) -DEF_HELPER_2(wcsr_ip, void, env, i32) -DEF_HELPER_2(wcsr_jtx, void, env, i32) -DEF_HELPER_2(wcsr_jrx, void, env, i32) -DEF_HELPER_1(rcsr_im, i32, env) -DEF_HELPER_1(rcsr_ip, i32, env) -DEF_HELPER_1(rcsr_jtx, i32, env) -DEF_HELPER_1(rcsr_jrx, i32, env) -DEF_HELPER_1(ill, void, env) diff --git a/target/lm32/lm32-semi.c b/target/lm32/lm32-semi.c deleted file mode 100644 index 6a11a6299a..0000000000 --- a/target/lm32/lm32-semi.c +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Lattice Mico32 semihosting syscall interface - * - * Copyright (c) 2014 Michael Walle <michael@walle.cc> - * - * Based on target/m68k/m68k-semi.c, which is - * Copyright (c) 2005-2007 CodeSourcery. - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#include "qemu/osdep.h" -#include "cpu.h" -#include "exec/helper-proto.h" -#include "qemu/log.h" -#include "exec/softmmu-semi.h" - -enum { - TARGET_SYS_exit = 1, - TARGET_SYS_open = 2, - TARGET_SYS_close = 3, - TARGET_SYS_read = 4, - TARGET_SYS_write = 5, - TARGET_SYS_lseek = 6, - TARGET_SYS_fstat = 10, - TARGET_SYS_stat = 15, -}; - -enum { - NEWLIB_O_RDONLY = 0x0, - NEWLIB_O_WRONLY = 0x1, - NEWLIB_O_RDWR = 0x2, - NEWLIB_O_APPEND = 0x8, - NEWLIB_O_CREAT = 0x200, - NEWLIB_O_TRUNC = 0x400, - NEWLIB_O_EXCL = 0x800, -}; - -static int translate_openflags(int flags) -{ - int hf; - - if (flags & NEWLIB_O_WRONLY) { - hf = O_WRONLY; - } else if (flags & NEWLIB_O_RDWR) { - hf = O_RDWR; - } else { - hf = O_RDONLY; - } - - if (flags & NEWLIB_O_APPEND) { - hf |= O_APPEND; - } - - if (flags & NEWLIB_O_CREAT) { - hf |= O_CREAT; - } - - if (flags & NEWLIB_O_TRUNC) { - hf |= O_TRUNC; - } - - if (flags & NEWLIB_O_EXCL) { - hf |= O_EXCL; - } - - return hf; -} - -struct newlib_stat { - int16_t newlib_st_dev; /* device */ - uint16_t newlib_st_ino; /* inode */ - uint16_t newlib_st_mode; /* protection */ - uint16_t newlib_st_nlink; /* number of hard links */ - uint16_t newlib_st_uid; /* user ID of owner */ - uint16_t newlib_st_gid; /* group ID of owner */ - int16_t newlib_st_rdev; /* device type (if inode device) */ - int32_t newlib_st_size; /* total size, in bytes */ - int32_t newlib_st_atime; /* time of last access */ - uint32_t newlib_st_spare1; - int32_t newlib_st_mtime; /* time of last modification */ - uint32_t newlib_st_spare2; - int32_t newlib_st_ctime; /* time of last change */ - uint32_t newlib_st_spare3; -} QEMU_PACKED; - -static int translate_stat(CPULM32State *env, target_ulong addr, - struct stat *s) -{ - struct newlib_stat *p; - - p = lock_user(VERIFY_WRITE, addr, sizeof(struct newlib_stat), 0); - if (!p) { - return 0; - } - p->newlib_st_dev = cpu_to_be16(s->st_dev); - p->newlib_st_ino = cpu_to_be16(s->st_ino); - p->newlib_st_mode = cpu_to_be16(s->st_mode); - p->newlib_st_nlink = cpu_to_be16(s->st_nlink); - p->newlib_st_uid = cpu_to_be16(s->st_uid); - p->newlib_st_gid = cpu_to_be16(s->st_gid); - p->newlib_st_rdev = cpu_to_be16(s->st_rdev); - p->newlib_st_size = cpu_to_be32(s->st_size); - p->newlib_st_atime = cpu_to_be32(s->st_atime); - p->newlib_st_mtime = cpu_to_be32(s->st_mtime); - p->newlib_st_ctime = cpu_to_be32(s->st_ctime); - unlock_user(p, addr, sizeof(struct newlib_stat)); - - return 1; -} - -bool lm32_cpu_do_semihosting(CPUState *cs) -{ - LM32CPU *cpu = LM32_CPU(cs); - CPULM32State *env = &cpu->env; - - int ret = -1; - target_ulong nr, arg0, arg1, arg2; - void *p; - struct stat s; - - nr = env->regs[R_R8]; - arg0 = env->regs[R_R1]; - arg1 = env->regs[R_R2]; - arg2 = env->regs[R_R3]; - - switch (nr) { - case TARGET_SYS_exit: - /* void _exit(int rc) */ - exit(arg0); - - case TARGET_SYS_open: - /* int open(const char *pathname, int flags) */ - p = lock_user_string(arg0); - if (!p) { - ret = -1; - } else { - ret = open(p, translate_openflags(arg2)); - unlock_user(p, arg0, 0); - } - break; - - case TARGET_SYS_read: - /* ssize_t read(int fd, const void *buf, size_t count) */ - p = lock_user(VERIFY_WRITE, arg1, arg2, 0); - if (!p) { - ret = -1; - } else { - ret = read(arg0, p, arg2); - unlock_user(p, arg1, arg2); - } - break; - - case TARGET_SYS_write: - /* ssize_t write(int fd, const void *buf, size_t count) */ - p = lock_user(VERIFY_READ, arg1, arg2, 1); - if (!p) { - ret = -1; - } else { - ret = write(arg0, p, arg2); - unlock_user(p, arg1, 0); - } - break; - - case TARGET_SYS_close: - /* int close(int fd) */ - /* don't close stdin/stdout/stderr */ - if (arg0 > 2) { - ret = close(arg0); - } else { - ret = 0; - } - break; - - case TARGET_SYS_lseek: - /* off_t lseek(int fd, off_t offset, int whence */ - ret = lseek(arg0, arg1, arg2); - break; - - case TARGET_SYS_stat: - /* int stat(const char *path, struct stat *buf) */ - p = lock_user_string(arg0); - if (!p) { - ret = -1; - } else { - ret = stat(p, &s); - unlock_user(p, arg0, 0); - if (translate_stat(env, arg1, &s) == 0) { - ret = -1; - } - } - break; - - case TARGET_SYS_fstat: - /* int stat(int fd, struct stat *buf) */ - ret = fstat(arg0, &s); - if (ret == 0) { - if (translate_stat(env, arg1, &s) == 0) { - ret = -1; - } - } - break; - - default: - /* unhandled */ - return false; - } - - env->regs[R_R1] = ret; - return true; -} diff --git a/target/lm32/machine.c b/target/lm32/machine.c deleted file mode 100644 index 365eaa2e47..0000000000 --- a/target/lm32/machine.c +++ /dev/null @@ -1,33 +0,0 @@ -#include "qemu/osdep.h" -#include "cpu.h" -#include "migration/cpu.h" - -static const VMStateDescription vmstate_env = { - .name = "env", - .version_id = 1, - .minimum_version_id = 1, - .fields = (VMStateField[]) { - VMSTATE_UINT32_ARRAY(regs, CPULM32State, 32), - VMSTATE_UINT32(pc, CPULM32State), - VMSTATE_UINT32(ie, CPULM32State), - VMSTATE_UINT32(icc, CPULM32State), - VMSTATE_UINT32(dcc, CPULM32State), - VMSTATE_UINT32(cc, CPULM32State), - VMSTATE_UINT32(eba, CPULM32State), - VMSTATE_UINT32(dc, CPULM32State), - VMSTATE_UINT32(deba, CPULM32State), - VMSTATE_UINT32_ARRAY(bp, CPULM32State, 4), - VMSTATE_UINT32_ARRAY(wp, CPULM32State, 4), - VMSTATE_END_OF_LIST() - } -}; - -const VMStateDescription vmstate_lm32_cpu = { - .name = "cpu", - .version_id = 1, - .minimum_version_id = 1, - .fields = (VMStateField[]) { - VMSTATE_STRUCT(env, LM32CPU, 1, vmstate_env, CPULM32State), - VMSTATE_END_OF_LIST() - } -}; diff --git a/target/lm32/meson.build b/target/lm32/meson.build deleted file mode 100644 index ef0eef07f1..0000000000 --- a/target/lm32/meson.build +++ /dev/null @@ -1,15 +0,0 @@ -lm32_ss = ss.source_set() -lm32_ss.add(files( - 'cpu.c', - 'gdbstub.c', - 'helper.c', - 'lm32-semi.c', - 'op_helper.c', - 'translate.c', -)) - -lm32_softmmu_ss = ss.source_set() -lm32_softmmu_ss.add(files('machine.c')) - -target_arch += {'lm32': lm32_ss} -target_softmmu_arch += {'lm32': lm32_softmmu_ss} diff --git a/target/lm32/op_helper.c b/target/lm32/op_helper.c deleted file mode 100644 index e39fcd5647..0000000000 --- a/target/lm32/op_helper.c +++ /dev/null @@ -1,148 +0,0 @@ -#include "qemu/osdep.h" -#include "cpu.h" -#include "exec/helper-proto.h" -#include "qemu/host-utils.h" -#include "qemu/main-loop.h" -#include "sysemu/runstate.h" - -#include "hw/lm32/lm32_pic.h" -#include "hw/char/lm32_juart.h" - -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" - -#ifndef CONFIG_USER_ONLY -#endif - -#if !defined(CONFIG_USER_ONLY) -void raise_exception(CPULM32State *env, int index) -{ - CPUState *cs = env_cpu(env); - - cs->exception_index = index; - cpu_loop_exit(cs); -} - -void HELPER(raise_exception)(CPULM32State *env, uint32_t index) -{ - raise_exception(env, index); -} - -void HELPER(hlt)(CPULM32State *env) -{ - CPUState *cs = env_cpu(env); - - cs->halted = 1; - cs->exception_index = EXCP_HLT; - cpu_loop_exit(cs); -} - -void HELPER(ill)(CPULM32State *env) -{ -#ifndef CONFIG_USER_ONLY - CPUState *cs = env_cpu(env); - fprintf(stderr, "VM paused due to illegal instruction. " - "Connect a debugger or switch to the monitor console " - "to find out more.\n"); - vm_stop(RUN_STATE_PAUSED); - cs->halted = 1; - raise_exception(env, EXCP_HALTED); -#endif -} - -void HELPER(wcsr_bp)(CPULM32State *env, uint32_t bp, uint32_t idx) -{ - uint32_t addr = bp & ~1; - - assert(idx < 4); - - env->bp[idx] = bp; - lm32_breakpoint_remove(env, idx); - if (bp & 1) { - lm32_breakpoint_insert(env, idx, addr); - } -} - -void HELPER(wcsr_wp)(CPULM32State *env, uint32_t wp, uint32_t idx) -{ - lm32_wp_t wp_type; - - assert(idx < 4); - - env->wp[idx] = wp; - - wp_type = lm32_wp_type(env->dc, idx); - lm32_watchpoint_remove(env, idx); - if (wp_type != LM32_WP_DISABLED) { - lm32_watchpoint_insert(env, idx, wp, wp_type); - } -} - -void HELPER(wcsr_dc)(CPULM32State *env, uint32_t dc) -{ - uint32_t old_dc; - int i; - lm32_wp_t old_type; - lm32_wp_t new_type; - - old_dc = env->dc; - env->dc = dc; - - for (i = 0; i < 4; i++) { - old_type = lm32_wp_type(old_dc, i); - new_type = lm32_wp_type(dc, i); - - if (old_type != new_type) { - lm32_watchpoint_remove(env, i); - if (new_type != LM32_WP_DISABLED) { - lm32_watchpoint_insert(env, i, env->wp[i], new_type); - } - } - } -} - -void HELPER(wcsr_im)(CPULM32State *env, uint32_t im) -{ - qemu_mutex_lock_iothread(); - lm32_pic_set_im(env->pic_state, im); - qemu_mutex_unlock_iothread(); -} - -void HELPER(wcsr_ip)(CPULM32State *env, uint32_t im) -{ - qemu_mutex_lock_iothread(); - lm32_pic_set_ip(env->pic_state, im); - qemu_mutex_unlock_iothread(); -} - -void HELPER(wcsr_jtx)(CPULM32State *env, uint32_t jtx) -{ - lm32_juart_set_jtx(env->juart_state, jtx); -} - -void HELPER(wcsr_jrx)(CPULM32State *env, uint32_t jrx) -{ - lm32_juart_set_jrx(env->juart_state, jrx); -} - -uint32_t HELPER(rcsr_im)(CPULM32State *env) -{ - return lm32_pic_get_im(env->pic_state); -} - -uint32_t HELPER(rcsr_ip)(CPULM32State *env) -{ - return lm32_pic_get_ip(env->pic_state); -} - -uint32_t HELPER(rcsr_jtx)(CPULM32State *env) -{ - return lm32_juart_get_jtx(env->juart_state); -} - -uint32_t HELPER(rcsr_jrx)(CPULM32State *env) -{ - return lm32_juart_get_jrx(env->juart_state); -} -#endif - diff --git a/target/lm32/translate.c b/target/lm32/translate.c deleted file mode 100644 index 20c70d03f1..0000000000 --- a/target/lm32/translate.c +++ /dev/null @@ -1,1237 +0,0 @@ -/* - * LatticeMico32 main translation routines. - * - * Copyright (c) 2010 Michael Walle <michael@walle.cc> - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see <http://www.gnu.org/licenses/>. - */ - -#include "qemu/osdep.h" -#include "cpu.h" -#include "disas/disas.h" -#include "exec/helper-proto.h" -#include "exec/exec-all.h" -#include "exec/translator.h" -#include "tcg/tcg-op.h" -#include "qemu/qemu-print.h" - -#include "exec/cpu_ldst.h" -#include "hw/lm32/lm32_pic.h" - -#include "exec/helper-gen.h" - -#include "trace-tcg.h" -#include "exec/log.h" - - -#define DISAS_LM32 0 - -#define LOG_DIS(...) \ - do { \ - if (DISAS_LM32) { \ - qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__); \ - } \ - } while (0) - -#define EXTRACT_FIELD(src, start, end) \ - (((src) >> start) & ((1 << (end - start + 1)) - 1)) - -#define MEM_INDEX 0 - -/* is_jmp field values */ -#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */ -#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */ -#define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */ - -static TCGv cpu_R[32]; -static TCGv cpu_pc; -static TCGv cpu_ie; -static TCGv cpu_icc; -static TCGv cpu_dcc; -static TCGv cpu_cc; -static TCGv cpu_cfg; -static TCGv cpu_eba; -static TCGv cpu_dc; -static TCGv cpu_deba; -static TCGv cpu_bp[4]; -static TCGv cpu_wp[4]; - -#include "exec/gen-icount.h" - -enum { - OP_FMT_RI, - OP_FMT_RR, - OP_FMT_CR, - OP_FMT_I -}; - -/* This is the state at translation time. */ -typedef struct DisasContext { - target_ulong pc; - - /* Decoder. */ - int format; - uint32_t ir; - uint8_t opcode; - uint8_t r0, r1, r2, csr; - uint16_t imm5; - uint16_t imm16; - uint32_t imm26; - - unsigned int delayed_branch; - unsigned int tb_flags, synced_flags; /* tb dependent flags. */ - int is_jmp; - - TranslationBlock *tb; - int singlestep_enabled; - - uint32_t features; - uint8_t num_breakpoints; - uint8_t num_watchpoints; -} DisasContext; - -static const char *regnames[] = { - "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", - "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", - "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", - "r24", "r25", "r26/gp", "r27/fp", "r28/sp", "r29/ra", - "r30/ea", "r31/ba", "bp0", "bp1", "bp2", "bp3", "wp0", - "wp1", "wp2", "wp3" -}; - -static inline int zero_extend(unsigned int val, int width) -{ - return val & ((1 << width) - 1); -} - -static inline int sign_extend(unsigned int val, int width) -{ - int sval; - - /* LSL. */ - val <<= 32 - width; - sval = val; - /* ASR. */ - sval >>= 32 - width; - - return sval; -} - -static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index) -{ - TCGv_i32 tmp = tcg_const_i32(index); - - gen_helper_raise_exception(cpu_env, tmp); - tcg_temp_free_i32(tmp); -} - -static inline void t_gen_illegal_insn(DisasContext *dc) -{ - tcg_gen_movi_tl(cpu_pc, dc->pc); - gen_helper_ill(cpu_env); -} - -static inline bool use_goto_tb(DisasContext *dc, target_ulong dest) -{ - if (unlikely(dc->singlestep_enabled)) { - return false; - } - -#ifndef CONFIG_USER_ONLY - return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); -#else - return true; -#endif -} - -static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest) -{ - if (use_goto_tb(dc, dest)) { - tcg_gen_goto_tb(n); - tcg_gen_movi_tl(cpu_pc, dest); - tcg_gen_exit_tb(dc->tb, n); - } else { - tcg_gen_movi_tl(cpu_pc, dest); - if (dc->singlestep_enabled) { - t_gen_raise_exception(dc, EXCP_DEBUG); - } - tcg_gen_exit_tb(NULL, 0); - } -} - -static void dec_add(DisasContext *dc) -{ - if (dc->format == OP_FMT_RI) { - if (dc->r0 == R_R0) { - if (dc->r1 == R_R0 && dc->imm16 == 0) { - LOG_DIS("nop\n"); - } else { - LOG_DIS("mvi r%d, %d\n", dc->r1, sign_extend(dc->imm16, 16)); - } - } else { - LOG_DIS("addi r%d, r%d, %d\n", dc->r1, dc->r0, - sign_extend(dc->imm16, 16)); - } - } else { - LOG_DIS("add r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); - } - - if (dc->format == OP_FMT_RI) { - tcg_gen_addi_tl(cpu_R[dc->r1], cpu_R[dc->r0], - sign_extend(dc->imm16, 16)); - } else { - tcg_gen_add_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]); - } -} - -static void dec_and(DisasContext *dc) -{ - if (dc->format == OP_FMT_RI) { - LOG_DIS("andi r%d, r%d, %d\n", dc->r1, dc->r0, - zero_extend(dc->imm16, 16)); - } else { - LOG_DIS("and r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); - } - - if (dc->format == OP_FMT_RI) { - tcg_gen_andi_tl(cpu_R[dc->r1], cpu_R[dc->r0], - zero_extend(dc->imm16, 16)); - } else { - if (dc->r0 == 0 && dc->r1 == 0 && dc->r2 == 0) { - tcg_gen_movi_tl(cpu_pc, dc->pc + 4); - gen_helper_hlt(cpu_env); - } else { - tcg_gen_and_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]); - } - } -} - -static void dec_andhi(DisasContext *dc) -{ - LOG_DIS("andhi r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm16); - - tcg_gen_andi_tl(cpu_R[dc->r1], cpu_R[dc->r0], (dc->imm16 << 16)); -} - -static void dec_b(DisasContext *dc) -{ - if (dc->r0 == R_RA) { - LOG_DIS("ret\n"); - } else if (dc->r0 == R_EA) { - LOG_DIS("eret\n"); - } else if (dc->r0 == R_BA) { - LOG_DIS("bret\n"); - } else { - LOG_DIS("b r%d\n", dc->r0); - } - - /* restore IE.IE in case of an eret */ - if (dc->r0 == R_EA) { - TCGv t0 = tcg_temp_new(); - TCGLabel *l1 = gen_new_label(); - tcg_gen_andi_tl(t0, cpu_ie, IE_EIE); - tcg_gen_ori_tl(cpu_ie, cpu_ie, IE_IE); - tcg_gen_brcondi_tl(TCG_COND_EQ, t0, IE_EIE, l1); - tcg_gen_andi_tl(cpu_ie, cpu_ie, ~IE_IE); - gen_set_label(l1); - tcg_temp_free(t0); - } else if (dc->r0 == R_BA) { - TCGv t0 = tcg_temp_new(); - TCGLabel *l1 = gen_new_label(); - tcg_gen_andi_tl(t0, cpu_ie, IE_BIE); - tcg_gen_ori_tl(cpu_ie, cpu_ie, IE_IE); - tcg_gen_brcondi_tl(TCG_COND_EQ, t0, IE_BIE, l1); - tcg_gen_andi_tl(cpu_ie, cpu_ie, ~IE_IE); - gen_set_label(l1); - tcg_temp_free(t0); - } - tcg_gen_mov_tl(cpu_pc, cpu_R[dc->r0]); - - dc->is_jmp = DISAS_JUMP; -} - -static void dec_bi(DisasContext *dc) -{ - LOG_DIS("bi %d\n", sign_extend(dc->imm26 << 2, 26)); - - gen_goto_tb(dc, 0, dc->pc + (sign_extend(dc->imm26 << 2, 26))); - - dc->is_jmp = DISAS_TB_JUMP; -} - -static inline void gen_cond_branch(DisasContext *dc, int cond) -{ - TCGLabel *l1 = gen_new_label(); - tcg_gen_brcond_tl(cond, cpu_R[dc->r0], cpu_R[dc->r1], l1); - gen_goto_tb(dc, 0, dc->pc + 4); - gen_set_label(l1); - gen_goto_tb(dc, 1, dc->pc + (sign_extend(dc->imm16 << 2, 16))); - dc->is_jmp = DISAS_TB_JUMP; -} - -static void dec_be(DisasContext *dc) -{ - LOG_DIS("be r%d, r%d, %d\n", dc->r1, dc->r0, - sign_extend(dc->imm16, 16) * 4); - - gen_cond_branch(dc, TCG_COND_EQ); -} - -static void dec_bg(DisasContext *dc) -{ - LOG_DIS("bg r%d, r%d, %d\n", dc->r1, dc->r0, - sign_extend(dc->imm16, 16 * 4)); - - gen_cond_branch(dc, TCG_COND_GT); -} - -static void dec_bge(DisasContext *dc) -{ - LOG_DIS("bge r%d, r%d, %d\n", dc->r1, dc->r0, - sign_extend(dc->imm16, 16) * 4); - - gen_cond_branch(dc, TCG_COND_GE); -} - -static void dec_bgeu(DisasContext *dc) -{ - LOG_DIS("bgeu r%d, r%d, %d\n", dc->r1, dc->r0, - sign_extend(dc->imm16, 16) * 4); - - gen_cond_branch(dc, TCG_COND_GEU); -} - -static void dec_bgu(DisasContext *dc) -{ - LOG_DIS("bgu r%d, r%d, %d\n", dc->r1, dc->r0, - sign_extend(dc->imm16, 16) * 4); - - gen_cond_branch(dc, TCG_COND_GTU); -} - -static void dec_bne(DisasContext *dc) -{ - LOG_DIS("bne r%d, r%d, %d\n", dc->r1, dc->r0, - sign_extend(dc->imm16, 16) * 4); - - gen_cond_branch(dc, TCG_COND_NE); -} - -static void dec_call(DisasContext *dc) -{ - LOG_DIS("call r%d\n", dc->r0); - - tcg_gen_movi_tl(cpu_R[R_RA], dc->pc + 4); - tcg_gen_mov_tl(cpu_pc, cpu_R[dc->r0]); - - dc->is_jmp = DISAS_JUMP; -} - -static void dec_calli(DisasContext *dc) -{ - LOG_DIS("calli %d\n", sign_extend(dc->imm26, 26) * 4); - - tcg_gen_movi_tl(cpu_R[R_RA], dc->pc + 4); - gen_goto_tb(dc, 0, dc->pc + (sign_extend(dc->imm26 << 2, 26))); - - dc->is_jmp = DISAS_TB_JUMP; -} - -static inline void gen_compare(DisasContext *dc, int cond) -{ - int i; - - if (dc->format == OP_FMT_RI) { - switch (cond) { - case TCG_COND_GEU: - case TCG_COND_GTU: - i = zero_extend(dc->imm16, 16); - break; - default: - i = sign_extend(dc->imm16, 16); - break; - } - - tcg_gen_setcondi_tl(cond, cpu_R[dc->r1], cpu_R[dc->r0], i); - } else { - tcg_gen_setcond_tl(cond, cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]); - } -} - -static void dec_cmpe(DisasContext *dc) -{ - if (dc->format == OP_FMT_RI) { - LOG_DIS("cmpei r%d, r%d, %d\n", dc->r1, dc->r0, - sign_extend(dc->imm16, 16)); - } else { - LOG_DIS("cmpe r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); - } - - gen_compare(dc, TCG_COND_EQ); -} - -static void dec_cmpg(DisasContext *dc) -{ - if (dc->format == OP_FMT_RI) { - LOG_DIS("cmpgi r%d, r%d, %d\n", dc->r1, dc->r0, - sign_extend(dc->imm16, 16)); - } else { - LOG_DIS("cmpg r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); - } - - gen_compare(dc, TCG_COND_GT); -} - -static void dec_cmpge(DisasContext *dc) -{ - if (dc->format == OP_FMT_RI) { - LOG_DIS("cmpgei r%d, r%d, %d\n", dc->r1, dc->r0, - sign_extend(dc->imm16, 16)); - } else { - LOG_DIS("cmpge r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); - } - - gen_compare(dc, TCG_COND_GE); -} - -static void dec_cmpgeu(DisasContext *dc) -{ - if (dc->format == OP_FMT_RI) { - LOG_DIS("cmpgeui r%d, r%d, %d\n", dc->r1, dc->r0, - zero_extend(dc->imm16, 16)); - } else { - LOG_DIS("cmpgeu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); - } - - gen_compare(dc, TCG_COND_GEU); -} - -static void dec_cmpgu(DisasContext *dc) -{ - if (dc->format == OP_FMT_RI) { - LOG_DIS("cmpgui r%d, r%d, %d\n", dc->r1, dc->r0, - zero_extend(dc->imm16, 16)); - } else { - LOG_DIS("cmpgu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); - } - - gen_compare(dc, TCG_COND_GTU); -} - -static void dec_cmpne(DisasContext *dc) -{ - if (dc->format == OP_FMT_RI) { - LOG_DIS("cmpnei r%d, r%d, %d\n", dc->r1, dc->r0, - sign_extend(dc->imm16, 16)); - } else { - LOG_DIS("cmpne r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); - } - - gen_compare(dc, TCG_COND_NE); -} - -static void dec_divu(DisasContext *dc) -{ - TCGLabel *l1; - - LOG_DIS("divu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); - - if (!(dc->features & LM32_FEATURE_DIVIDE)) { - qemu_log_mask(LOG_GUEST_ERROR, "hardware divider is not available\n"); - t_gen_illegal_insn(dc); - return; - } - - l1 = gen_new_label(); - tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[dc->r1], 0, l1); - tcg_gen_movi_tl(cpu_pc, dc->pc); - t_gen_raise_exception(dc, EXCP_DIVIDE_BY_ZERO); - gen_set_label(l1); - tcg_gen_divu_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]); -} - -static void dec_lb(DisasContext *dc) -{ - TCGv t0; - - LOG_DIS("lb r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16); - - t0 = tcg_temp_new(); - tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16)); - tcg_gen_qemu_ld8s(cpu_R[dc->r1], t0, MEM_INDEX); - tcg_temp_free(t0); -} - -static void dec_lbu(DisasContext *dc) -{ - TCGv t0; - - LOG_DIS("lbu r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16); - - t0 = tcg_temp_new(); - tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16)); - tcg_gen_qemu_ld8u(cpu_R[dc->r1], t0, MEM_INDEX); - tcg_temp_free(t0); -} - -static void dec_lh(DisasContext *dc) -{ - TCGv t0; - - LOG_DIS("lh r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16); - - t0 = tcg_temp_new(); - tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16)); - tcg_gen_qemu_ld16s(cpu_R[dc->r1], t0, MEM_INDEX); - tcg_temp_free(t0); -} - -static void dec_lhu(DisasContext *dc) -{ - TCGv t0; - - LOG_DIS("lhu r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16); - - t0 = tcg_temp_new(); - tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16)); - tcg_gen_qemu_ld16u(cpu_R[dc->r1], t0, MEM_INDEX); - tcg_temp_free(t0); -} - -static void dec_lw(DisasContext *dc) -{ - TCGv t0; - - LOG_DIS("lw r%d, (r%d+%d)\n", dc->r1, dc->r0, sign_extend(dc->imm16, 16)); - - t0 = tcg_temp_new(); - tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16)); - tcg_gen_qemu_ld32s(cpu_R[dc->r1], t0, MEM_INDEX); - tcg_temp_free(t0); -} - -static void dec_modu(DisasContext *dc) -{ - TCGLabel *l1; - - LOG_DIS("modu r%d, r%d, %d\n", dc->r2, dc->r0, dc->r1); - - if (!(dc->features & LM32_FEATURE_DIVIDE)) { - qemu_log_mask(LOG_GUEST_ERROR, "hardware divider is not available\n"); - t_gen_illegal_insn(dc); - return; - } - - l1 = gen_new_label(); - tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[dc->r1], 0, l1); - tcg_gen_movi_tl(cpu_pc, dc->pc); - t_gen_raise_exception(dc, EXCP_DIVIDE_BY_ZERO); - gen_set_label(l1); - tcg_gen_remu_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]); -} - -static void dec_mul(DisasContext *dc) -{ - if (dc->format == OP_FMT_RI) { - LOG_DIS("muli r%d, r%d, %d\n", dc->r1, dc->r0, - sign_extend(dc->imm16, 16)); - } else { - LOG_DIS("mul r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); - } - - if (!(dc->features & LM32_FEATURE_MULTIPLY)) { - qemu_log_mask(LOG_GUEST_ERROR, - "hardware multiplier is not available\n"); - t_gen_illegal_insn(dc); - return; - } - - if (dc->format == OP_FMT_RI) { - tcg_gen_muli_tl(cpu_R[dc->r1], cpu_R[dc->r0], - sign_extend(dc->imm16, 16)); - } else { - tcg_gen_mul_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]); - } -} - -static void dec_nor(DisasContext *dc) -{ - if (dc->format == OP_FMT_RI) { - LOG_DIS("nori r%d, r%d, %d\n", dc->r1, dc->r0, - zero_extend(dc->imm16, 16)); - } else { - LOG_DIS("nor r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); - } - - if (dc->format == OP_FMT_RI) { - TCGv t0 = tcg_temp_new(); - tcg_gen_movi_tl(t0, zero_extend(dc->imm16, 16)); - tcg_gen_nor_tl(cpu_R[dc->r1], cpu_R[dc->r0], t0); - tcg_temp_free(t0); - } else { - tcg_gen_nor_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]); - } -} - -static void dec_or(DisasContext *dc) -{ - if (dc->format == OP_FMT_RI) { - LOG_DIS("ori r%d, r%d, %d\n", dc->r1, dc->r0, - zero_extend(dc->imm16, 16)); - } else { - if (dc->r1 == R_R0) { - LOG_DIS("mv r%d, r%d\n", dc->r2, dc->r0); - } else { - LOG_DIS("or r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); - } - } - - if (dc->format == OP_FMT_RI) { - tcg_gen_ori_tl(cpu_R[dc->r1], cpu_R[dc->r0], - zero_extend(dc->imm16, 16)); - } else { - tcg_gen_or_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]); - } -} - -static void dec_orhi(DisasContext *dc) -{ - if (dc->r0 == R_R0) { - LOG_DIS("mvhi r%d, %d\n", dc->r1, dc->imm16); - } else { - LOG_DIS("orhi r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm16); - } - - tcg_gen_ori_tl(cpu_R[dc->r1], cpu_R[dc->r0], (dc->imm16 << 16)); -} - -static void dec_scall(DisasContext *dc) -{ - switch (dc->imm5) { - case 2: - LOG_DIS("break\n"); - tcg_gen_movi_tl(cpu_pc, dc->pc); - t_gen_raise_exception(dc, EXCP_BREAKPOINT); - break; - case 7: - LOG_DIS("scall\n"); - tcg_gen_movi_tl(cpu_pc, dc->pc); - t_gen_raise_exception(dc, EXCP_SYSTEMCALL); - break; - default: - qemu_log_mask(LOG_GUEST_ERROR, "invalid opcode @0x%x", dc->pc); - t_gen_illegal_insn(dc); - break; - } -} - -static void dec_rcsr(DisasContext *dc) -{ - LOG_DIS("rcsr r%d, %d\n", dc->r2, dc->csr); - - switch (dc->csr) { - case CSR_IE: - tcg_gen_mov_tl(cpu_R[dc->r2], cpu_ie); - break; - case CSR_IM: - gen_helper_rcsr_im(cpu_R[dc->r2], cpu_env); - break; - case CSR_IP: - gen_helper_rcsr_ip(cpu_R[dc->r2], cpu_env); - break; - case CSR_CC: - tcg_gen_mov_tl(cpu_R[dc->r2], cpu_cc); - break; - case CSR_CFG: - tcg_gen_mov_tl(cpu_R[dc->r2], cpu_cfg); - break; - case CSR_EBA: - tcg_gen_mov_tl(cpu_R[dc->r2], cpu_eba); - break; - case CSR_DC: - tcg_gen_mov_tl(cpu_R[dc->r2], cpu_dc); - break; - case CSR_DEBA: - tcg_gen_mov_tl(cpu_R[dc->r2], cpu_deba); - break; - case CSR_JTX: - gen_helper_rcsr_jtx(cpu_R[dc->r2], cpu_env); - break; - case CSR_JRX: - gen_helper_rcsr_jrx(cpu_R[dc->r2], cpu_env); - break; - case CSR_ICC: - case CSR_DCC: - case CSR_BP0: - case CSR_BP1: - case CSR_BP2: - case CSR_BP3: - case CSR_WP0: - case CSR_WP1: - case CSR_WP2: - case CSR_WP3: - qemu_log_mask(LOG_GUEST_ERROR, "invalid read access csr=%x\n", dc->csr); - break; - default: - qemu_log_mask(LOG_GUEST_ERROR, "read_csr: unknown csr=%x\n", dc->csr); - break; - } -} - -static void dec_sb(DisasContext *dc) -{ - TCGv t0; - - LOG_DIS("sb (r%d+%d), r%d\n", dc->r0, dc->imm16, dc->r1); - - t0 = tcg_temp_new(); - tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16)); - tcg_gen_qemu_st8(cpu_R[dc->r1], t0, MEM_INDEX); - tcg_temp_free(t0); -} - -static void dec_sextb(DisasContext *dc) -{ - LOG_DIS("sextb r%d, r%d\n", dc->r2, dc->r0); - - if (!(dc->features & LM32_FEATURE_SIGN_EXTEND)) { - qemu_log_mask(LOG_GUEST_ERROR, - "hardware sign extender is not available\n"); - t_gen_illegal_insn(dc); - return; - } - - tcg_gen_ext8s_tl(cpu_R[dc->r2], cpu_R[dc->r0]); -} - -static void dec_sexth(DisasContext *dc) -{ - LOG_DIS("sexth r%d, r%d\n", dc->r2, dc->r0); - - if (!(dc->features & LM32_FEATURE_SIGN_EXTEND)) { - qemu_log_mask(LOG_GUEST_ERROR, - "hardware sign extender is not available\n"); - t_gen_illegal_insn(dc); - return; - } - - tcg_gen_ext16s_tl(cpu_R[dc->r2], cpu_R[dc->r0]); -} - -static void dec_sh(DisasContext *dc) -{ - TCGv t0; - - LOG_DIS("sh (r%d+%d), r%d\n", dc->r0, dc->imm16, dc->r1); - - t0 = tcg_temp_new(); - tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16)); - tcg_gen_qemu_st16(cpu_R[dc->r1], t0, MEM_INDEX); - tcg_temp_free(t0); -} - -static void dec_sl(DisasContext *dc) -{ - if (dc->format == OP_FMT_RI) { - LOG_DIS("sli r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5); - } else { - LOG_DIS("sl r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); - } - - if (!(dc->features & LM32_FEATURE_SHIFT)) { - qemu_log_mask(LOG_GUEST_ERROR, "hardware shifter is not available\n"); - t_gen_illegal_insn(dc); - return; - } - - if (dc->format == OP_FMT_RI) { - tcg_gen_shli_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5); - } else { - TCGv t0 = tcg_temp_new(); - tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f); - tcg_gen_shl_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0); - tcg_temp_free(t0); - } -} - -static void dec_sr(DisasContext *dc) -{ - if (dc->format == OP_FMT_RI) { - LOG_DIS("sri r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5); - } else { - LOG_DIS("sr r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); - } - - /* The real CPU (w/o hardware shifter) only supports right shift by exactly - * one bit */ - if (dc->format == OP_FMT_RI) { - if (!(dc->features & LM32_FEATURE_SHIFT) && (dc->imm5 != 1)) { - qemu_log_mask(LOG_GUEST_ERROR, - "hardware shifter is not available\n"); - t_gen_illegal_insn(dc); - return; - } - tcg_gen_sari_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5); - } else { - TCGLabel *l1 = gen_new_label(); - TCGLabel *l2 = gen_new_label(); - TCGv t0 = tcg_temp_local_new(); - tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f); - - if (!(dc->features & LM32_FEATURE_SHIFT)) { - tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 1, l1); - t_gen_illegal_insn(dc); - tcg_gen_br(l2); - } - - gen_set_label(l1); - tcg_gen_sar_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0); - gen_set_label(l2); - - tcg_temp_free(t0); - } -} - -static void dec_sru(DisasContext *dc) -{ - if (dc->format == OP_FMT_RI) { - LOG_DIS("srui r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5); - } else { - LOG_DIS("sru r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); - } - - if (dc->format == OP_FMT_RI) { - if (!(dc->features & LM32_FEATURE_SHIFT) && (dc->imm5 != 1)) { - qemu_log_mask(LOG_GUEST_ERROR, - "hardware shifter is not available\n"); - t_gen_illegal_insn(dc); - return; - } - tcg_gen_shri_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5); - } else { - TCGLabel *l1 = gen_new_label(); - TCGLabel *l2 = gen_new_label(); - TCGv t0 = tcg_temp_local_new(); - tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f); - - if (!(dc->features & LM32_FEATURE_SHIFT)) { - tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 1, l1); - t_gen_illegal_insn(dc); - tcg_gen_br(l2); - } - - gen_set_label(l1); - tcg_gen_shr_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0); - gen_set_label(l2); - - tcg_temp_free(t0); - } -} - -static void dec_sub(DisasContext *dc) -{ - LOG_DIS("sub r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); - - tcg_gen_sub_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]); -} - -static void dec_sw(DisasContext *dc) -{ - TCGv t0; - - LOG_DIS("sw (r%d+%d), r%d\n", dc->r0, sign_extend(dc->imm16, 16), dc->r1); - - t0 = tcg_temp_new(); - tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16)); - tcg_gen_qemu_st32(cpu_R[dc->r1], t0, MEM_INDEX); - tcg_temp_free(t0); -} - -static void dec_user(DisasContext *dc) -{ - LOG_DIS("user"); - - qemu_log_mask(LOG_GUEST_ERROR, "user instruction undefined\n"); - t_gen_illegal_insn(dc); -} - -static void dec_wcsr(DisasContext *dc) -{ - int no; - - LOG_DIS("wcsr %d, r%d\n", dc->csr, dc->r1); - - switch (dc->csr) { - case CSR_IE: - tcg_gen_mov_tl(cpu_ie, cpu_R[dc->r1]); - tcg_gen_movi_tl(cpu_pc, dc->pc + 4); - dc->is_jmp = DISAS_UPDATE; - break; - case CSR_IM: - /* mark as an io operation because it could cause an interrupt */ - if (tb_cflags(dc->tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_helper_wcsr_im(cpu_env, cpu_R[dc->r1]); - tcg_gen_movi_tl(cpu_pc, dc->pc + 4); - dc->is_jmp = DISAS_UPDATE; - break; - case CSR_IP: - /* mark as an io operation because it could cause an interrupt */ - if (tb_cflags(dc->tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_helper_wcsr_ip(cpu_env, cpu_R[dc->r1]); - tcg_gen_movi_tl(cpu_pc, dc->pc + 4); - dc->is_jmp = DISAS_UPDATE; - break; - case CSR_ICC: - /* TODO */ - break; - case CSR_DCC: - /* TODO */ - break; - case CSR_EBA: - tcg_gen_mov_tl(cpu_eba, cpu_R[dc->r1]); - break; - case CSR_DEBA: - tcg_gen_mov_tl(cpu_deba, cpu_R[dc->r1]); - break; - case CSR_JTX: - gen_helper_wcsr_jtx(cpu_env, cpu_R[dc->r1]); - break; - case CSR_JRX: - gen_helper_wcsr_jrx(cpu_env, cpu_R[dc->r1]); - break; - case CSR_DC: - gen_helper_wcsr_dc(cpu_env, cpu_R[dc->r1]); - break; - case CSR_BP0: - case CSR_BP1: - case CSR_BP2: - case CSR_BP3: - no = dc->csr - CSR_BP0; - if (dc->num_breakpoints <= no) { - qemu_log_mask(LOG_GUEST_ERROR, - "breakpoint #%i is not available\n", no); - t_gen_illegal_insn(dc); - break; - } - gen_helper_wcsr_bp(cpu_env, cpu_R[dc->r1], tcg_const_i32(no)); - break; - case CSR_WP0: - case CSR_WP1: - case CSR_WP2: - case CSR_WP3: - no = dc->csr - CSR_WP0; - if (dc->num_watchpoints <= no) { - qemu_log_mask(LOG_GUEST_ERROR, - "watchpoint #%i is not available\n", no); - t_gen_illegal_insn(dc); - break; - } - gen_helper_wcsr_wp(cpu_env, cpu_R[dc->r1], tcg_const_i32(no)); - break; - case CSR_CC: - case CSR_CFG: - qemu_log_mask(LOG_GUEST_ERROR, "invalid write access csr=%x\n", - dc->csr); - break; - default: - qemu_log_mask(LOG_GUEST_ERROR, "write_csr: unknown csr=%x\n", - dc->csr); - break; - } -} - -static void dec_xnor(DisasContext *dc) -{ - if (dc->format == OP_FMT_RI) { - LOG_DIS("xnori r%d, r%d, %d\n", dc->r1, dc->r0, - zero_extend(dc->imm16, 16)); - } else { - if (dc->r1 == R_R0) { - LOG_DIS("not r%d, r%d\n", dc->r2, dc->r0); - } else { - LOG_DIS("xnor r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); - } - } - - if (dc->format == OP_FMT_RI) { - tcg_gen_xori_tl(cpu_R[dc->r1], cpu_R[dc->r0], - zero_extend(dc->imm16, 16)); - tcg_gen_not_tl(cpu_R[dc->r1], cpu_R[dc->r1]); - } else { - tcg_gen_eqv_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]); - } -} - -static void dec_xor(DisasContext *dc) -{ - if (dc->format == OP_FMT_RI) { - LOG_DIS("xori r%d, r%d, %d\n", dc->r1, dc->r0, - zero_extend(dc->imm16, 16)); - } else { - LOG_DIS("xor r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); - } - - if (dc->format == OP_FMT_RI) { - tcg_gen_xori_tl(cpu_R[dc->r1], cpu_R[dc->r0], - zero_extend(dc->imm16, 16)); - } else { - tcg_gen_xor_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]); - } -} - -static void dec_ill(DisasContext *dc) -{ - qemu_log_mask(LOG_GUEST_ERROR, "invalid opcode 0x%02x\n", dc->opcode); - t_gen_illegal_insn(dc); -} - -typedef void (*DecoderInfo)(DisasContext *dc); -static const DecoderInfo decinfo[] = { - dec_sru, dec_nor, dec_mul, dec_sh, dec_lb, dec_sr, dec_xor, dec_lh, - dec_and, dec_xnor, dec_lw, dec_lhu, dec_sb, dec_add, dec_or, dec_sl, - dec_lbu, dec_be, dec_bg, dec_bge, dec_bgeu, dec_bgu, dec_sw, dec_bne, - dec_andhi, dec_cmpe, dec_cmpg, dec_cmpge, dec_cmpgeu, dec_cmpgu, dec_orhi, - dec_cmpne, - dec_sru, dec_nor, dec_mul, dec_divu, dec_rcsr, dec_sr, dec_xor, dec_ill, - dec_and, dec_xnor, dec_ill, dec_scall, dec_sextb, dec_add, dec_or, dec_sl, - dec_b, dec_modu, dec_sub, dec_user, dec_wcsr, dec_ill, dec_call, dec_sexth, - dec_bi, dec_cmpe, dec_cmpg, dec_cmpge, dec_cmpgeu, dec_cmpgu, dec_calli, - dec_cmpne -}; - -static inline void decode(DisasContext *dc, uint32_t ir) -{ - dc->ir = ir; - LOG_DIS("%8.8x\t", dc->ir); - - dc->opcode = EXTRACT_FIELD(ir, 26, 31); - - dc->imm5 = EXTRACT_FIELD(ir, 0, 4); - dc->imm16 = EXTRACT_FIELD(ir, 0, 15); - dc->imm26 = EXTRACT_FIELD(ir, 0, 25); - - dc->csr = EXTRACT_FIELD(ir, 21, 25); - dc->r0 = EXTRACT_FIELD(ir, 21, 25); - dc->r1 = EXTRACT_FIELD(ir, 16, 20); - dc->r2 = EXTRACT_FIELD(ir, 11, 15); - - /* bit 31 seems to indicate insn type. */ - if (ir & (1 << 31)) { - dc->format = OP_FMT_RR; - } else { - dc->format = OP_FMT_RI; - } - - assert(ARRAY_SIZE(decinfo) == 64); - assert(dc->opcode < 64); - - decinfo[dc->opcode](dc); -} - -/* generate intermediate code for basic block 'tb'. */ -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) -{ - CPULM32State *env = cs->env_ptr; - LM32CPU *cpu = env_archcpu(env); - struct DisasContext ctx, *dc = &ctx; - uint32_t pc_start; - uint32_t page_start; - int num_insns; - - pc_start = tb->pc; - dc->features = cpu->features; - dc->num_breakpoints = cpu->num_breakpoints; - dc->num_watchpoints = cpu->num_watchpoints; - dc->tb = tb; - - dc->is_jmp = DISAS_NEXT; - dc->pc = pc_start; - dc->singlestep_enabled = cs->singlestep_enabled; - - if (pc_start & 3) { - qemu_log_mask(LOG_GUEST_ERROR, - "unaligned PC=%x. Ignoring lowest bits.\n", pc_start); - pc_start &= ~3; - } - - page_start = pc_start & TARGET_PAGE_MASK; - num_insns = 0; - - gen_tb_start(tb); - do { - tcg_gen_insn_start(dc->pc); - num_insns++; - - if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) { - tcg_gen_movi_tl(cpu_pc, dc->pc); - t_gen_raise_exception(dc, EXCP_DEBUG); - dc->is_jmp = DISAS_UPDATE; - /* The address covered by the breakpoint must be included in - [tb->pc, tb->pc + tb->size) in order to for it to be - properly cleared -- thus we increment the PC here so that - the logic setting tb->size below does the right thing. */ - dc->pc += 4; - break; - } - - /* Pretty disas. */ - LOG_DIS("%8.8x:\t", dc->pc); - - if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) { - gen_io_start(); - } - - decode(dc, cpu_ldl_code(env, dc->pc)); - dc->pc += 4; - } while (!dc->is_jmp - && !tcg_op_buf_full() - && !cs->singlestep_enabled - && !singlestep - && (dc->pc - page_start < TARGET_PAGE_SIZE) - && num_insns < max_insns); - - - if (unlikely(cs->singlestep_enabled)) { - if (dc->is_jmp == DISAS_NEXT) { - tcg_gen_movi_tl(cpu_pc, dc->pc); - } - t_gen_raise_exception(dc, EXCP_DEBUG); - } else { - switch (dc->is_jmp) { - case DISAS_NEXT: - gen_goto_tb(dc, 1, dc->pc); - break; - default: - case DISAS_JUMP: - case DISAS_UPDATE: - /* indicate that the hash table must be used - to find the next TB */ - tcg_gen_exit_tb(NULL, 0); - break; - case DISAS_TB_JUMP: - /* nothing more to generate */ - break; - } - } - - gen_tb_end(tb, num_insns); - - tb->size = dc->pc - pc_start; - tb->icount = num_insns; - -#ifdef DEBUG_DISAS - if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) - && qemu_log_in_addr_range(pc_start)) { - FILE *logfile = qemu_log_lock(); - qemu_log("\n"); - log_target_disas(cs, pc_start, dc->pc - pc_start); - qemu_log_unlock(logfile); - } -#endif -} - -void lm32_cpu_dump_state(CPUState *cs, FILE *f, int flags) -{ - LM32CPU *cpu = LM32_CPU(cs); - CPULM32State *env = &cpu->env; - int i; - - if (!env) { - return; - } - - qemu_fprintf(f, "IN: PC=%x %s\n", - env->pc, lookup_symbol(env->pc)); - - qemu_fprintf(f, "ie=%8.8x (IE=%x EIE=%x BIE=%x) im=%8.8x ip=%8.8x\n", - env->ie, - (env->ie & IE_IE) ? 1 : 0, - (env->ie & IE_EIE) ? 1 : 0, - (env->ie & IE_BIE) ? 1 : 0, - lm32_pic_get_im(env->pic_state), - lm32_pic_get_ip(env->pic_state)); - qemu_fprintf(f, "eba=%8.8x deba=%8.8x\n", - env->eba, - env->deba); - - for (i = 0; i < 32; i++) { - qemu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]); - if ((i + 1) % 4 == 0) { - qemu_fprintf(f, "\n"); - } - } - qemu_fprintf(f, "\n\n"); -} - -void restore_state_to_opc(CPULM32State *env, TranslationBlock *tb, - target_ulong *data) -{ - env->pc = data[0]; -} - -void lm32_translate_init(void) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(cpu_R); i++) { - cpu_R[i] = tcg_global_mem_new(cpu_env, - offsetof(CPULM32State, regs[i]), - regnames[i]); - } - - for (i = 0; i < ARRAY_SIZE(cpu_bp); i++) { - cpu_bp[i] = tcg_global_mem_new(cpu_env, - offsetof(CPULM32State, bp[i]), - regnames[32+i]); - } - - for (i = 0; i < ARRAY_SIZE(cpu_wp); i++) { - cpu_wp[i] = tcg_global_mem_new(cpu_env, - offsetof(CPULM32State, wp[i]), - regnames[36+i]); - } - - cpu_pc = tcg_global_mem_new(cpu_env, - offsetof(CPULM32State, pc), - "pc"); - cpu_ie = tcg_global_mem_new(cpu_env, - offsetof(CPULM32State, ie), - "ie"); - cpu_icc = tcg_global_mem_new(cpu_env, - offsetof(CPULM32State, icc), - "icc"); - cpu_dcc = tcg_global_mem_new(cpu_env, - offsetof(CPULM32State, dcc), - "dcc"); - cpu_cc = tcg_global_mem_new(cpu_env, - offsetof(CPULM32State, cc), - "cc"); - cpu_cfg = tcg_global_mem_new(cpu_env, - offsetof(CPULM32State, cfg), - "cfg"); - cpu_eba = tcg_global_mem_new(cpu_env, - offsetof(CPULM32State, eba), - "eba"); - cpu_dc = tcg_global_mem_new(cpu_env, - offsetof(CPULM32State, dc), - "dc"); - cpu_deba = tcg_global_mem_new(cpu_env, - offsetof(CPULM32State, deba), - "deba"); -} - diff --git a/target/meson.build b/target/meson.build index 0e2c4b69cb..2f6940255e 100644 --- a/target/meson.build +++ b/target/meson.build @@ -5,11 +5,9 @@ subdir('cris') subdir('hexagon') subdir('hppa') subdir('i386') -subdir('lm32') subdir('m68k') subdir('microblaze') subdir('mips') -subdir('moxie') subdir('nios2') subdir('openrisc') subdir('ppc') @@ -19,5 +17,4 @@ subdir('s390x') subdir('sh4') subdir('sparc') subdir('tricore') -subdir('unicore32') subdir('xtensa') diff --git a/target/mips/cpu.c b/target/mips/cpu.c index dce1e166bd..1ad2fe4aa3 100644 --- a/target/mips/cpu.c +++ b/target/mips/cpu.c @@ -35,155 +35,84 @@ #include "qapi/qapi-commands-machine-target.h" #include "fpu_helper.h" -#if !defined(CONFIG_USER_ONLY) +const char regnames[32][4] = { + "r0", "at", "v0", "v1", "a0", "a1", "a2", "a3", + "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", + "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", + "t8", "t9", "k0", "k1", "gp", "sp", "s8", "ra", +}; -/* Called for updates to CP0_Status. */ -void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc) +static void fpu_dump_fpr(fpr_t *fpr, FILE *f, bool is_fpu64) { - int32_t tcstatus, *tcst; - uint32_t v = cpu->CP0_Status; - uint32_t cu, mx, asid, ksu; - uint32_t mask = ((1 << CP0TCSt_TCU3) - | (1 << CP0TCSt_TCU2) - | (1 << CP0TCSt_TCU1) - | (1 << CP0TCSt_TCU0) - | (1 << CP0TCSt_TMX) - | (3 << CP0TCSt_TKSU) - | (0xff << CP0TCSt_TASID)); - - cu = (v >> CP0St_CU0) & 0xf; - mx = (v >> CP0St_MX) & 0x1; - ksu = (v >> CP0St_KSU) & 0x3; - asid = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; - - tcstatus = cu << CP0TCSt_TCU0; - tcstatus |= mx << CP0TCSt_TMX; - tcstatus |= ksu << CP0TCSt_TKSU; - tcstatus |= asid; - - if (tc == cpu->current_tc) { - tcst = &cpu->active_tc.CP0_TCStatus; + if (is_fpu64) { + qemu_fprintf(f, "w:%08x d:%016" PRIx64 " fd:%13g fs:%13g psu: %13g\n", + fpr->w[FP_ENDIAN_IDX], fpr->d, + (double)fpr->fd, + (double)fpr->fs[FP_ENDIAN_IDX], + (double)fpr->fs[!FP_ENDIAN_IDX]); } else { - tcst = &cpu->tcs[tc].CP0_TCStatus; - } + fpr_t tmp; - *tcst &= ~mask; - *tcst |= tcstatus; - compute_hflags(cpu); + tmp.w[FP_ENDIAN_IDX] = fpr->w[FP_ENDIAN_IDX]; + tmp.w[!FP_ENDIAN_IDX] = (fpr + 1)->w[FP_ENDIAN_IDX]; + qemu_fprintf(f, "w:%08x d:%016" PRIx64 " fd:%13g fs:%13g psu:%13g\n", + tmp.w[FP_ENDIAN_IDX], tmp.d, + (double)tmp.fd, + (double)tmp.fs[FP_ENDIAN_IDX], + (double)tmp.fs[!FP_ENDIAN_IDX]); + } } -void cpu_mips_store_status(CPUMIPSState *env, target_ulong val) +static void fpu_dump_state(CPUMIPSState *env, FILE *f, int flags) { - uint32_t mask = env->CP0_Status_rw_bitmask; - target_ulong old = env->CP0_Status; - - if (env->insn_flags & ISA_MIPS_R6) { - bool has_supervisor = extract32(mask, CP0St_KSU, 2) == 0x3; -#if defined(TARGET_MIPS64) - uint32_t ksux = (1 << CP0St_KX) & val; - ksux |= (ksux >> 1) & val; /* KX = 0 forces SX to be 0 */ - ksux |= (ksux >> 1) & val; /* SX = 0 forces UX to be 0 */ - val = (val & ~(7 << CP0St_UX)) | ksux; -#endif - if (has_supervisor && extract32(val, CP0St_KSU, 2) == 0x3) { - mask &= ~(3 << CP0St_KSU); - } - mask &= ~(((1 << CP0St_SR) | (1 << CP0St_NMI)) & val); - } + int i; + bool is_fpu64 = !!(env->hflags & MIPS_HFLAG_F64); - env->CP0_Status = (old & ~mask) | (val & mask); -#if defined(TARGET_MIPS64) - if ((env->CP0_Status ^ old) & (old & (7 << CP0St_UX))) { - /* Access to at least one of the 64-bit segments has been disabled */ - tlb_flush(env_cpu(env)); - } -#endif - if (ase_mt_available(env)) { - sync_c0_status(env, env, env->current_tc); - } else { - compute_hflags(env); + qemu_fprintf(f, + "CP1 FCR0 0x%08x FCR31 0x%08x SR.FR %d fp_status 0x%02x\n", + env->active_fpu.fcr0, env->active_fpu.fcr31, is_fpu64, + get_float_exception_flags(&env->active_fpu.fp_status)); + for (i = 0; i < 32; (is_fpu64) ? i++ : (i += 2)) { + qemu_fprintf(f, "%3s: ", fregnames[i]); + fpu_dump_fpr(&env->active_fpu.fpr[i], f, is_fpu64); } } -void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val) +static void mips_cpu_dump_state(CPUState *cs, FILE *f, int flags) { - uint32_t mask = 0x00C00300; - uint32_t old = env->CP0_Cause; + MIPSCPU *cpu = MIPS_CPU(cs); + CPUMIPSState *env = &cpu->env; int i; - if (env->insn_flags & ISA_MIPS_R2) { - mask |= 1 << CP0Ca_DC; - } - if (env->insn_flags & ISA_MIPS_R6) { - mask &= ~((1 << CP0Ca_WP) & val); - } - - env->CP0_Cause = (env->CP0_Cause & ~mask) | (val & mask); - - if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) { - if (env->CP0_Cause & (1 << CP0Ca_DC)) { - cpu_mips_stop_count(env); - } else { - cpu_mips_start_count(env); + qemu_fprintf(f, "pc=0x" TARGET_FMT_lx " HI=0x" TARGET_FMT_lx + " LO=0x" TARGET_FMT_lx " ds %04x " + TARGET_FMT_lx " " TARGET_FMT_ld "\n", + env->active_tc.PC, env->active_tc.HI[0], env->active_tc.LO[0], + env->hflags, env->btarget, env->bcond); + for (i = 0; i < 32; i++) { + if ((i & 3) == 0) { + qemu_fprintf(f, "GPR%02d:", i); } - } - - /* Set/reset software interrupts */ - for (i = 0 ; i < 2 ; i++) { - if ((old ^ env->CP0_Cause) & (1 << (CP0Ca_IP + i))) { - cpu_mips_soft_irq(env, i, env->CP0_Cause & (1 << (CP0Ca_IP + i))); + qemu_fprintf(f, " %s " TARGET_FMT_lx, + regnames[i], env->active_tc.gpr[i]); + if ((i & 3) == 3) { + qemu_fprintf(f, "\n"); } } -} -#endif /* !CONFIG_USER_ONLY */ - -static const char * const excp_names[EXCP_LAST + 1] = { - [EXCP_RESET] = "reset", - [EXCP_SRESET] = "soft reset", - [EXCP_DSS] = "debug single step", - [EXCP_DINT] = "debug interrupt", - [EXCP_NMI] = "non-maskable interrupt", - [EXCP_MCHECK] = "machine check", - [EXCP_EXT_INTERRUPT] = "interrupt", - [EXCP_DFWATCH] = "deferred watchpoint", - [EXCP_DIB] = "debug instruction breakpoint", - [EXCP_IWATCH] = "instruction fetch watchpoint", - [EXCP_AdEL] = "address error load", - [EXCP_AdES] = "address error store", - [EXCP_TLBF] = "TLB refill", - [EXCP_IBE] = "instruction bus error", - [EXCP_DBp] = "debug breakpoint", - [EXCP_SYSCALL] = "syscall", - [EXCP_BREAK] = "break", - [EXCP_CpU] = "coprocessor unusable", - [EXCP_RI] = "reserved instruction", - [EXCP_OVERFLOW] = "arithmetic overflow", - [EXCP_TRAP] = "trap", - [EXCP_FPE] = "floating point", - [EXCP_DDBS] = "debug data break store", - [EXCP_DWATCH] = "data watchpoint", - [EXCP_LTLBL] = "TLB modify", - [EXCP_TLBL] = "TLB load", - [EXCP_TLBS] = "TLB store", - [EXCP_DBE] = "data bus error", - [EXCP_DDBL] = "debug data break load", - [EXCP_THREAD] = "thread", - [EXCP_MDMX] = "MDMX", - [EXCP_C2E] = "precise coprocessor 2", - [EXCP_CACHE] = "cache error", - [EXCP_TLBXI] = "TLB execute-inhibit", - [EXCP_TLBRI] = "TLB read-inhibit", - [EXCP_MSADIS] = "MSA disabled", - [EXCP_MSAFPE] = "MSA floating point", -}; - -const char *mips_exception_name(int32_t exception) -{ - if (exception < 0 || exception > EXCP_LAST) { - return "unknown"; + qemu_fprintf(f, "CP0 Status 0x%08x Cause 0x%08x EPC 0x" + TARGET_FMT_lx "\n", + env->CP0_Status, env->CP0_Cause, env->CP0_EPC); + qemu_fprintf(f, " Config0 0x%08x Config1 0x%08x LLAddr 0x%016" + PRIx64 "\n", + env->CP0_Config0, env->CP0_Config1, env->CP0_LLAddr); + qemu_fprintf(f, " Config2 0x%08x Config3 0x%08x\n", + env->CP0_Config2, env->CP0_Config3); + qemu_fprintf(f, " Config4 0x%08x Config5 0x%08x\n", + env->CP0_Config4, env->CP0_Config5); + if ((flags & CPU_DUMP_FPU) && (env->hflags & MIPS_HFLAG_FPU)) { + fpu_dump_state(env, f, flags); } - return excp_names[exception]; } void cpu_set_exception_base(int vp_index, target_ulong address) @@ -192,101 +121,13 @@ void cpu_set_exception_base(int vp_index, target_ulong address) vp->env.exception_base = address; } -target_ulong exception_resume_pc(CPUMIPSState *env) -{ - target_ulong bad_pc; - target_ulong isa_mode; - - isa_mode = !!(env->hflags & MIPS_HFLAG_M16); - bad_pc = env->active_tc.PC | isa_mode; - if (env->hflags & MIPS_HFLAG_BMASK) { - /* - * If the exception was raised from a delay slot, come back to - * the jump. - */ - bad_pc -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4); - } - - return bad_pc; -} - -bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request) -{ - if (interrupt_request & CPU_INTERRUPT_HARD) { - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; - - if (cpu_mips_hw_interrupts_enabled(env) && - cpu_mips_hw_interrupts_pending(env)) { - /* Raise it */ - cs->exception_index = EXCP_EXT_INTERRUPT; - env->error_code = 0; - mips_cpu_do_interrupt(cs); - return true; - } - } - return false; -} - -void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env, - uint32_t exception, - int error_code, - uintptr_t pc) -{ - CPUState *cs = env_cpu(env); - - qemu_log_mask(CPU_LOG_INT, "%s: %d (%s) %d\n", - __func__, exception, mips_exception_name(exception), - error_code); - cs->exception_index = exception; - env->error_code = error_code; - - cpu_loop_exit_restore(cs, pc); -} - static void mips_cpu_set_pc(CPUState *cs, vaddr value) { MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; - env->active_tc.PC = value & ~(target_ulong)1; - if (value & 1) { - env->hflags |= MIPS_HFLAG_M16; - } else { - env->hflags &= ~(MIPS_HFLAG_M16); - } + mips_env_set_pc(&cpu->env, value); } -#ifdef CONFIG_TCG -static void mips_cpu_synchronize_from_tb(CPUState *cs, - const TranslationBlock *tb) -{ - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; - - env->active_tc.PC = tb->pc; - env->hflags &= ~MIPS_HFLAG_BMASK; - env->hflags |= tb->flags & MIPS_HFLAG_BMASK; -} - -# ifndef CONFIG_USER_ONLY -static bool mips_io_recompile_replay_branch(CPUState *cs, - const TranslationBlock *tb) -{ - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; - - if ((env->hflags & MIPS_HFLAG_BMASK) != 0 - && env->active_tc.PC != tb->pc) { - env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4); - env->hflags &= ~MIPS_HFLAG_BMASK; - return true; - } - return false; -} -# endif /* !CONFIG_USER_ONLY */ -#endif /* CONFIG_TCG */ - static bool mips_cpu_has_work(CPUState *cs) { MIPSCPU *cpu = MIPS_CPU(cs); @@ -634,7 +475,7 @@ static void mips_cpu_realizefn(DeviceState *dev, Error **errp) env->exception_base = (int32_t)0xBFC00000; -#ifndef CONFIG_USER_ONLY +#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) mmu_init(env, env->cpu_model); #endif fpu_init(env, env->cpu_model); diff --git a/target/mips/fpu.c b/target/mips/fpu.c new file mode 100644 index 0000000000..c7c487c1f9 --- /dev/null +++ b/target/mips/fpu.c @@ -0,0 +1,25 @@ +/* + * Helpers for emulation of FPU-related MIPS instructions. + * + * Copyright (C) 2004-2005 Jocelyn Mayer + * + * SPDX-License-Identifier: LGPL-2.1-or-later + */ +#include "qemu/osdep.h" +#include "fpu/softfloat-helpers.h" +#include "fpu_helper.h" + +/* convert MIPS rounding mode in FCR31 to IEEE library */ +const FloatRoundMode ieee_rm[4] = { + float_round_nearest_even, + float_round_to_zero, + float_round_up, + float_round_down +}; + +const char fregnames[32][4] = { + "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", + "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", + "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", + "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", +}; diff --git a/target/mips/fpu_helper.h b/target/mips/fpu_helper.h index 1c2d6d35a7..ad1116e8c1 100644 --- a/target/mips/fpu_helper.h +++ b/target/mips/fpu_helper.h @@ -27,8 +27,14 @@ static inline void restore_flush_mode(CPUMIPSState *env) static inline void restore_snan_bit_mode(CPUMIPSState *env) { - set_snan_bit_is_one((env->active_fpu.fcr31 & (1 << FCR31_NAN2008)) == 0, - &env->active_fpu.fp_status); + bool nan2008 = env->active_fpu.fcr31 & (1 << FCR31_NAN2008); + + /* + * With nan2008, SNaNs are silenced in the usual way. + * Before that, SNaNs are not silenced; default nans are produced. + */ + set_snan_bit_is_one(!nan2008, &env->active_fpu.fp_status); + set_default_nan_mode(!nan2008, &env->active_fpu.fp_status); } static inline void restore_fp_status(CPUMIPSState *env) diff --git a/target/mips/helper.h b/target/mips/helper.h index 709494445d..a9c6c7d1a3 100644 --- a/target/mips/helper.h +++ b/target/mips/helper.h @@ -2,10 +2,6 @@ DEF_HELPER_3(raise_exception_err, noreturn, env, i32, int) DEF_HELPER_2(raise_exception, noreturn, env, i32) DEF_HELPER_1(raise_exception_debug, noreturn, env) -#ifndef CONFIG_USER_ONLY -DEF_HELPER_1(do_semihosting, void, env) -#endif - #ifdef TARGET_MIPS64 DEF_HELPER_4(sdl, void, env, tl, tl, int) DEF_HELPER_4(sdr, void, env, tl, tl, int) @@ -42,164 +38,6 @@ DEF_HELPER_FLAGS_1(dbitswap, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_4(rotx, TCG_CALL_NO_RWG_SE, tl, tl, i32, i32, i32) -#ifndef CONFIG_USER_ONLY -/* CP0 helpers */ -DEF_HELPER_1(mfc0_mvpcontrol, tl, env) -DEF_HELPER_1(mfc0_mvpconf0, tl, env) -DEF_HELPER_1(mfc0_mvpconf1, tl, env) -DEF_HELPER_1(mftc0_vpecontrol, tl, env) -DEF_HELPER_1(mftc0_vpeconf0, tl, env) -DEF_HELPER_1(mfc0_random, tl, env) -DEF_HELPER_1(mfc0_tcstatus, tl, env) -DEF_HELPER_1(mftc0_tcstatus, tl, env) -DEF_HELPER_1(mfc0_tcbind, tl, env) -DEF_HELPER_1(mftc0_tcbind, tl, env) -DEF_HELPER_1(mfc0_tcrestart, tl, env) -DEF_HELPER_1(mftc0_tcrestart, tl, env) -DEF_HELPER_1(mfc0_tchalt, tl, env) -DEF_HELPER_1(mftc0_tchalt, tl, env) -DEF_HELPER_1(mfc0_tccontext, tl, env) -DEF_HELPER_1(mftc0_tccontext, tl, env) -DEF_HELPER_1(mfc0_tcschedule, tl, env) -DEF_HELPER_1(mftc0_tcschedule, tl, env) -DEF_HELPER_1(mfc0_tcschefback, tl, env) -DEF_HELPER_1(mftc0_tcschefback, tl, env) -DEF_HELPER_1(mfc0_count, tl, env) -DEF_HELPER_1(mfc0_saar, tl, env) -DEF_HELPER_1(mfhc0_saar, tl, env) -DEF_HELPER_1(mftc0_entryhi, tl, env) -DEF_HELPER_1(mftc0_status, tl, env) -DEF_HELPER_1(mftc0_cause, tl, env) -DEF_HELPER_1(mftc0_epc, tl, env) -DEF_HELPER_1(mftc0_ebase, tl, env) -DEF_HELPER_2(mftc0_configx, tl, env, tl) -DEF_HELPER_1(mfc0_lladdr, tl, env) -DEF_HELPER_1(mfc0_maar, tl, env) -DEF_HELPER_1(mfhc0_maar, tl, env) -DEF_HELPER_2(mfc0_watchlo, tl, env, i32) -DEF_HELPER_2(mfc0_watchhi, tl, env, i32) -DEF_HELPER_2(mfhc0_watchhi, tl, env, i32) -DEF_HELPER_1(mfc0_debug, tl, env) -DEF_HELPER_1(mftc0_debug, tl, env) -#ifdef TARGET_MIPS64 -DEF_HELPER_1(dmfc0_tcrestart, tl, env) -DEF_HELPER_1(dmfc0_tchalt, tl, env) -DEF_HELPER_1(dmfc0_tccontext, tl, env) -DEF_HELPER_1(dmfc0_tcschedule, tl, env) -DEF_HELPER_1(dmfc0_tcschefback, tl, env) -DEF_HELPER_1(dmfc0_lladdr, tl, env) -DEF_HELPER_1(dmfc0_maar, tl, env) -DEF_HELPER_2(dmfc0_watchlo, tl, env, i32) -DEF_HELPER_2(dmfc0_watchhi, tl, env, i32) -DEF_HELPER_1(dmfc0_saar, tl, env) -#endif /* TARGET_MIPS64 */ - -DEF_HELPER_2(mtc0_index, void, env, tl) -DEF_HELPER_2(mtc0_mvpcontrol, void, env, tl) -DEF_HELPER_2(mtc0_vpecontrol, void, env, tl) -DEF_HELPER_2(mttc0_vpecontrol, void, env, tl) -DEF_HELPER_2(mtc0_vpeconf0, void, env, tl) -DEF_HELPER_2(mttc0_vpeconf0, void, env, tl) -DEF_HELPER_2(mtc0_vpeconf1, void, env, tl) -DEF_HELPER_2(mtc0_yqmask, void, env, tl) -DEF_HELPER_2(mtc0_vpeopt, void, env, tl) -DEF_HELPER_2(mtc0_entrylo0, void, env, tl) -DEF_HELPER_2(mtc0_tcstatus, void, env, tl) -DEF_HELPER_2(mttc0_tcstatus, void, env, tl) -DEF_HELPER_2(mtc0_tcbind, void, env, tl) -DEF_HELPER_2(mttc0_tcbind, void, env, tl) -DEF_HELPER_2(mtc0_tcrestart, void, env, tl) -DEF_HELPER_2(mttc0_tcrestart, void, env, tl) -DEF_HELPER_2(mtc0_tchalt, void, env, tl) -DEF_HELPER_2(mttc0_tchalt, void, env, tl) -DEF_HELPER_2(mtc0_tccontext, void, env, tl) -DEF_HELPER_2(mttc0_tccontext, void, env, tl) -DEF_HELPER_2(mtc0_tcschedule, void, env, tl) -DEF_HELPER_2(mttc0_tcschedule, void, env, tl) -DEF_HELPER_2(mtc0_tcschefback, void, env, tl) -DEF_HELPER_2(mttc0_tcschefback, void, env, tl) -DEF_HELPER_2(mtc0_entrylo1, void, env, tl) -DEF_HELPER_2(mtc0_context, void, env, tl) -DEF_HELPER_2(mtc0_memorymapid, void, env, tl) -DEF_HELPER_2(mtc0_pagemask, void, env, tl) -DEF_HELPER_2(mtc0_pagegrain, void, env, tl) -DEF_HELPER_2(mtc0_segctl0, void, env, tl) -DEF_HELPER_2(mtc0_segctl1, void, env, tl) -DEF_HELPER_2(mtc0_segctl2, void, env, tl) -DEF_HELPER_2(mtc0_pwfield, void, env, tl) -DEF_HELPER_2(mtc0_pwsize, void, env, tl) -DEF_HELPER_2(mtc0_wired, void, env, tl) -DEF_HELPER_2(mtc0_srsconf0, void, env, tl) -DEF_HELPER_2(mtc0_srsconf1, void, env, tl) -DEF_HELPER_2(mtc0_srsconf2, void, env, tl) -DEF_HELPER_2(mtc0_srsconf3, void, env, tl) -DEF_HELPER_2(mtc0_srsconf4, void, env, tl) -DEF_HELPER_2(mtc0_hwrena, void, env, tl) -DEF_HELPER_2(mtc0_pwctl, void, env, tl) -DEF_HELPER_2(mtc0_count, void, env, tl) -DEF_HELPER_2(mtc0_saari, void, env, tl) -DEF_HELPER_2(mtc0_saar, void, env, tl) -DEF_HELPER_2(mthc0_saar, void, env, tl) -DEF_HELPER_2(mtc0_entryhi, void, env, tl) -DEF_HELPER_2(mttc0_entryhi, void, env, tl) -DEF_HELPER_2(mtc0_compare, void, env, tl) -DEF_HELPER_2(mtc0_status, void, env, tl) -DEF_HELPER_2(mttc0_status, void, env, tl) -DEF_HELPER_2(mtc0_intctl, void, env, tl) -DEF_HELPER_2(mtc0_srsctl, void, env, tl) -DEF_HELPER_2(mtc0_cause, void, env, tl) -DEF_HELPER_2(mttc0_cause, void, env, tl) -DEF_HELPER_2(mtc0_ebase, void, env, tl) -DEF_HELPER_2(mttc0_ebase, void, env, tl) -DEF_HELPER_2(mtc0_config0, void, env, tl) -DEF_HELPER_2(mtc0_config2, void, env, tl) -DEF_HELPER_2(mtc0_config3, void, env, tl) -DEF_HELPER_2(mtc0_config4, void, env, tl) -DEF_HELPER_2(mtc0_config5, void, env, tl) -DEF_HELPER_2(mtc0_lladdr, void, env, tl) -DEF_HELPER_2(mtc0_maar, void, env, tl) -DEF_HELPER_2(mthc0_maar, void, env, tl) -DEF_HELPER_2(mtc0_maari, void, env, tl) -DEF_HELPER_3(mtc0_watchlo, void, env, tl, i32) -DEF_HELPER_3(mtc0_watchhi, void, env, tl, i32) -DEF_HELPER_3(mthc0_watchhi, void, env, tl, i32) -DEF_HELPER_2(mtc0_xcontext, void, env, tl) -DEF_HELPER_2(mtc0_framemask, void, env, tl) -DEF_HELPER_2(mtc0_debug, void, env, tl) -DEF_HELPER_2(mttc0_debug, void, env, tl) -DEF_HELPER_2(mtc0_performance0, void, env, tl) -DEF_HELPER_2(mtc0_errctl, void, env, tl) -DEF_HELPER_2(mtc0_taglo, void, env, tl) -DEF_HELPER_2(mtc0_datalo, void, env, tl) -DEF_HELPER_2(mtc0_taghi, void, env, tl) -DEF_HELPER_2(mtc0_datahi, void, env, tl) - -#if defined(TARGET_MIPS64) -DEF_HELPER_2(dmtc0_entrylo0, void, env, i64) -DEF_HELPER_2(dmtc0_entrylo1, void, env, i64) -#endif - -/* MIPS MT functions */ -DEF_HELPER_2(mftgpr, tl, env, i32) -DEF_HELPER_2(mftlo, tl, env, i32) -DEF_HELPER_2(mfthi, tl, env, i32) -DEF_HELPER_2(mftacx, tl, env, i32) -DEF_HELPER_1(mftdsp, tl, env) -DEF_HELPER_3(mttgpr, void, env, tl, i32) -DEF_HELPER_3(mttlo, void, env, tl, i32) -DEF_HELPER_3(mtthi, void, env, tl, i32) -DEF_HELPER_3(mttacx, void, env, tl, i32) -DEF_HELPER_2(mttdsp, void, env, tl) -DEF_HELPER_0(dmt, tl) -DEF_HELPER_0(emt, tl) -DEF_HELPER_1(dvpe, tl, env) -DEF_HELPER_1(evpe, tl, env) - -/* R6 Multi-threading */ -DEF_HELPER_1(dvp, tl, env) -DEF_HELPER_1(evp, tl, env) -#endif /* !CONFIG_USER_ONLY */ - /* microMIPS functions */ DEF_HELPER_4(lwm, void, env, tl, tl, i32) DEF_HELPER_4(swm, void, env, tl, tl, i32) @@ -364,21 +202,6 @@ FOP_PROTO(sune) FOP_PROTO(sne) #undef FOP_PROTO -/* Special functions */ -#ifndef CONFIG_USER_ONLY -DEF_HELPER_1(tlbwi, void, env) -DEF_HELPER_1(tlbwr, void, env) -DEF_HELPER_1(tlbp, void, env) -DEF_HELPER_1(tlbr, void, env) -DEF_HELPER_1(tlbinv, void, env) -DEF_HELPER_1(tlbinvf, void, env) -DEF_HELPER_1(di, tl, env) -DEF_HELPER_1(ei, tl, env) -DEF_HELPER_1(eret, void, env) -DEF_HELPER_1(eretnc, void, env) -DEF_HELPER_1(deret, void, env) -DEF_HELPER_3(ginvt, void, env, tl, i32) -#endif /* !CONFIG_USER_ONLY */ DEF_HELPER_1(rdhwr_cpunum, tl, env) DEF_HELPER_1(rdhwr_synci_step, tl, env) DEF_HELPER_1(rdhwr_cc, tl, env) @@ -781,6 +604,8 @@ DEF_HELPER_FLAGS_3(dmthlip, 0, void, tl, tl, env) DEF_HELPER_FLAGS_3(wrdsp, 0, void, tl, tl, env) DEF_HELPER_FLAGS_2(rddsp, 0, tl, tl, env) -DEF_HELPER_3(cache, void, env, tl, i32) +#ifndef CONFIG_USER_ONLY +#include "tcg/sysemu_helper.h.inc" +#endif /* !CONFIG_USER_ONLY */ -#include "msa_helper.h.inc" +#include "tcg/msa_helper.h.inc" diff --git a/target/mips/internal.h b/target/mips/internal.h index 99264b8bf6..18d5da64a5 100644 --- a/target/mips/internal.h +++ b/target/mips/internal.h @@ -9,6 +9,9 @@ #define MIPS_INTERNAL_H #include "exec/memattrs.h" +#ifdef CONFIG_TCG +#include "tcg/tcg-internal.h" +#endif /* * MMU types, the first four entries have the same layout as the @@ -71,21 +74,41 @@ struct mips_def_t { int32_t SAARP; }; +extern const char regnames[32][4]; +extern const char fregnames[32][4]; + extern const struct mips_def_t mips_defs[]; extern const int mips_defs_number; -void mips_cpu_do_interrupt(CPUState *cpu); -bool mips_cpu_exec_interrupt(CPUState *cpu, int int_req); -void mips_cpu_dump_state(CPUState *cpu, FILE *f, int flags); -hwaddr mips_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int mips_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int mips_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); -void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, - MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr); + +#define USEG_LIMIT ((target_ulong)(int32_t)0x7FFFFFFFUL) +#define KSEG0_BASE ((target_ulong)(int32_t)0x80000000UL) +#define KSEG1_BASE ((target_ulong)(int32_t)0xA0000000UL) +#define KSEG2_BASE ((target_ulong)(int32_t)0xC0000000UL) +#define KSEG3_BASE ((target_ulong)(int32_t)0xE0000000UL) + +#define KVM_KSEG0_BASE ((target_ulong)(int32_t)0x40000000UL) +#define KVM_KSEG2_BASE ((target_ulong)(int32_t)0x60000000UL) #if !defined(CONFIG_USER_ONLY) +enum { + TLBRET_XI = -6, + TLBRET_RI = -5, + TLBRET_DIRTY = -4, + TLBRET_INVALID = -3, + TLBRET_NOMATCH = -2, + TLBRET_BADADDR = -1, + TLBRET_MATCH = 0 +}; + +int get_physical_address(CPUMIPSState *env, hwaddr *physical, + int *prot, target_ulong real_address, + MMUAccessType access_type, int mmu_idx); +hwaddr mips_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); + typedef struct r4k_tlb_t r4k_tlb_t; struct r4k_tlb_t { target_ulong VPN; @@ -125,35 +148,15 @@ struct CPUMIPSTLBContext { } mmu; }; -int no_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot, - target_ulong address, MMUAccessType access_type); -int fixed_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot, - target_ulong address, MMUAccessType access_type); -int r4k_map_address(CPUMIPSState *env, hwaddr *physical, int *prot, - target_ulong address, MMUAccessType access_type); -void r4k_helper_tlbwi(CPUMIPSState *env); -void r4k_helper_tlbwr(CPUMIPSState *env); -void r4k_helper_tlbp(CPUMIPSState *env); -void r4k_helper_tlbr(CPUMIPSState *env); -void r4k_helper_tlbinv(CPUMIPSState *env); -void r4k_helper_tlbinvf(CPUMIPSState *env); -void r4k_invalidate_tlb(CPUMIPSState *env, int idx, int use_extra); -uint32_t cpu_mips_get_random(CPUMIPSState *env); - -void mips_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, - vaddr addr, unsigned size, - MMUAccessType access_type, - int mmu_idx, MemTxAttrs attrs, - MemTxResult response, uintptr_t retaddr); -hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address, - MMUAccessType access_type); -#endif - -#define cpu_signal_handler cpu_mips_signal_handler +void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc); +void cpu_mips_store_status(CPUMIPSState *env, target_ulong val); +void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val); -#ifndef CONFIG_USER_ONLY extern const VMStateDescription vmstate_mips_cpu; -#endif + +#endif /* !CONFIG_USER_ONLY */ + +#define cpu_signal_handler cpu_mips_signal_handler static inline bool cpu_mips_hw_interrupts_enabled(CPUMIPSState *env) { @@ -197,8 +200,6 @@ static inline bool cpu_mips_hw_interrupts_pending(CPUMIPSState *env) return r; } -void mips_tcg_init(void); - void msa_reset(CPUMIPSState *env); /* cp0_timer.c */ @@ -208,14 +209,15 @@ void cpu_mips_store_compare(CPUMIPSState *env, uint32_t value); void cpu_mips_start_count(CPUMIPSState *env); void cpu_mips_stop_count(CPUMIPSState *env); -/* helper.c */ -void mmu_init(CPUMIPSState *env, const mips_def_t *def); -bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); - -/* op_helper.c */ -void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask); +static inline void mips_env_set_pc(CPUMIPSState *env, target_ulong value) +{ + env->active_tc.PC = value & ~(target_ulong)1; + if (value & 1) { + env->hflags |= MIPS_HFLAG_M16; + } else { + env->hflags &= ~(MIPS_HFLAG_M16); + } +} static inline void restore_pamask(CPUMIPSState *env) { @@ -397,21 +399,4 @@ static inline void compute_hflags(CPUMIPSState *env) } } -void cpu_mips_tlb_flush(CPUMIPSState *env); -void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc); -void cpu_mips_store_status(CPUMIPSState *env, target_ulong val); -void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val); - -const char *mips_exception_name(int32_t exception); - -void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env, uint32_t exception, - int error_code, uintptr_t pc); - -static inline void QEMU_NORETURN do_raise_exception(CPUMIPSState *env, - uint32_t exception, - uintptr_t pc) -{ - do_raise_exception_err(env, exception, 0, pc); -} - #endif diff --git a/target/mips/meson.build b/target/mips/meson.build index 3b131c4a7f..2407a05d4c 100644 --- a/target/mips/meson.build +++ b/target/mips/meson.build @@ -1,52 +1,23 @@ -gen = [ - decodetree.process('mips32r6.decode', extra_args: '--static-decode=decode_mips32r6'), - decodetree.process('mips64r6.decode', extra_args: '--static-decode=decode_mips64r6'), - decodetree.process('msa32.decode', extra_args: '--static-decode=decode_msa32'), - decodetree.process('msa64.decode', extra_args: '--static-decode=decode_msa64'), - decodetree.process('tx79.decode', extra_args: '--static-decode=decode_tx79'), -] - +mips_user_ss = ss.source_set() +mips_softmmu_ss = ss.source_set() mips_ss = ss.source_set() mips_ss.add(files( 'cpu.c', + 'fpu.c', 'gdbstub.c', -)) -mips_tcg_ss = ss.source_set() -mips_tcg_ss.add(gen) -mips_tcg_ss.add(files( - 'dsp_helper.c', - 'fpu_helper.c', - 'lmmi_helper.c', - 'msa_helper.c', - 'msa_translate.c', - 'op_helper.c', - 'rel6_translate.c', - 'tlb_helper.c', - 'translate.c', - 'translate_addr_const.c', - 'txx9_translate.c', -)) -mips_ss.add(when: ['CONFIG_TCG', 'TARGET_MIPS64'], if_true: files( - 'tx79_translate.c', -)) -mips_tcg_ss.add(when: 'TARGET_MIPS64', if_false: files( - 'mxu_translate.c', + 'msa.c', )) -mips_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c')) +if have_system + subdir('sysemu') +endif -mips_softmmu_ss = ss.source_set() -mips_softmmu_ss.add(files( - 'addr.c', - 'cp0_timer.c', - 'machine.c', -)) -mips_softmmu_ss.add(when: 'CONFIG_TCG', if_true: files( - 'cp0_helper.c', - 'mips-semi.c', -)) +if 'CONFIG_TCG' in config_all + subdir('tcg') +endif -mips_ss.add_all(when: 'CONFIG_TCG', if_true: [mips_tcg_ss]) +mips_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c')) target_arch += {'mips': mips_ss} target_softmmu_arch += {'mips': mips_softmmu_ss} +target_user_arch += {'mips': mips_user_ss} diff --git a/target/mips/msa.c b/target/mips/msa.c new file mode 100644 index 0000000000..61f1a9a593 --- /dev/null +++ b/target/mips/msa.c @@ -0,0 +1,60 @@ +/* + * MIPS SIMD Architecture Module Instruction emulation helpers for QEMU. + * + * Copyright (c) 2014 Imagination Technologies + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +#include "fpu/softfloat.h" +#include "fpu_helper.h" + +void msa_reset(CPUMIPSState *env) +{ + if (!ase_msa_available(env)) { + return; + } + +#ifdef CONFIG_USER_ONLY + /* MSA access enabled */ + env->CP0_Config5 |= 1 << CP0C5_MSAEn; + env->CP0_Status |= (1 << CP0St_CU1) | (1 << CP0St_FR); +#endif + + /* + * MSA CSR: + * - non-signaling floating point exception mode off (NX bit is 0) + * - Cause, Enables, and Flags are all 0 + * - round to nearest / ties to even (RM bits are 0) + */ + env->active_tc.msacsr = 0; + + restore_msa_fp_status(env); + + /* tininess detected after rounding.*/ + set_float_detect_tininess(float_tininess_after_rounding, + &env->active_tc.msa_fp_status); + + /* clear float_status exception flags */ + set_float_exception_flags(0, &env->active_tc.msa_fp_status); + + /* clear float_status nan mode */ + set_default_nan_mode(0, &env->active_tc.msa_fp_status); + + /* set proper signanling bit meaning ("1" means "quiet") */ + set_snan_bit_is_one(0, &env->active_tc.msa_fp_status); +} diff --git a/target/mips/op_helper.c b/target/mips/op_helper.c deleted file mode 100644 index b80e8f7540..0000000000 --- a/target/mips/op_helper.c +++ /dev/null @@ -1,1210 +0,0 @@ -/* - * MIPS emulation helpers for qemu. - * - * Copyright (c) 2004-2005 Jocelyn Mayer - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see <http://www.gnu.org/licenses/>. - * - */ - -#include "qemu/osdep.h" -#include "cpu.h" -#include "internal.h" -#include "exec/helper-proto.h" -#include "exec/exec-all.h" -#include "exec/memop.h" -#include "fpu_helper.h" - -/*****************************************************************************/ -/* Exceptions processing helpers */ - -void helper_raise_exception_err(CPUMIPSState *env, uint32_t exception, - int error_code) -{ - do_raise_exception_err(env, exception, error_code, 0); -} - -void helper_raise_exception(CPUMIPSState *env, uint32_t exception) -{ - do_raise_exception(env, exception, GETPC()); -} - -void helper_raise_exception_debug(CPUMIPSState *env) -{ - do_raise_exception(env, EXCP_DEBUG, 0); -} - -static void raise_exception(CPUMIPSState *env, uint32_t exception) -{ - do_raise_exception(env, exception, 0); -} - -/* 64 bits arithmetic for 32 bits hosts */ -static inline uint64_t get_HILO(CPUMIPSState *env) -{ - return ((uint64_t)(env->active_tc.HI[0]) << 32) | - (uint32_t)env->active_tc.LO[0]; -} - -static inline target_ulong set_HIT0_LO(CPUMIPSState *env, uint64_t HILO) -{ - env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF); - return env->active_tc.HI[0] = (int32_t)(HILO >> 32); -} - -static inline target_ulong set_HI_LOT0(CPUMIPSState *env, uint64_t HILO) -{ - target_ulong tmp = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF); - env->active_tc.HI[0] = (int32_t)(HILO >> 32); - return tmp; -} - -/* Multiplication variants of the vr54xx. */ -target_ulong helper_muls(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HI_LOT0(env, 0 - ((int64_t)(int32_t)arg1 * - (int64_t)(int32_t)arg2)); -} - -target_ulong helper_mulsu(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HI_LOT0(env, 0 - (uint64_t)(uint32_t)arg1 * - (uint64_t)(uint32_t)arg2); -} - -target_ulong helper_macc(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HI_LOT0(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 * - (int64_t)(int32_t)arg2); -} - -target_ulong helper_macchi(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HIT0_LO(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 * - (int64_t)(int32_t)arg2); -} - -target_ulong helper_maccu(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HI_LOT0(env, (uint64_t)get_HILO(env) + - (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); -} - -target_ulong helper_macchiu(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HIT0_LO(env, (uint64_t)get_HILO(env) + - (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); -} - -target_ulong helper_msac(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HI_LOT0(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 * - (int64_t)(int32_t)arg2); -} - -target_ulong helper_msachi(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HIT0_LO(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 * - (int64_t)(int32_t)arg2); -} - -target_ulong helper_msacu(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HI_LOT0(env, (uint64_t)get_HILO(env) - - (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); -} - -target_ulong helper_msachiu(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HIT0_LO(env, (uint64_t)get_HILO(env) - - (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); -} - -target_ulong helper_mulhi(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HIT0_LO(env, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2); -} - -target_ulong helper_mulhiu(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HIT0_LO(env, (uint64_t)(uint32_t)arg1 * - (uint64_t)(uint32_t)arg2); -} - -target_ulong helper_mulshi(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HIT0_LO(env, 0 - (int64_t)(int32_t)arg1 * - (int64_t)(int32_t)arg2); -} - -target_ulong helper_mulshiu(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HIT0_LO(env, 0 - (uint64_t)(uint32_t)arg1 * - (uint64_t)(uint32_t)arg2); -} - -static inline target_ulong bitswap(target_ulong v) -{ - v = ((v >> 1) & (target_ulong)0x5555555555555555ULL) | - ((v & (target_ulong)0x5555555555555555ULL) << 1); - v = ((v >> 2) & (target_ulong)0x3333333333333333ULL) | - ((v & (target_ulong)0x3333333333333333ULL) << 2); - v = ((v >> 4) & (target_ulong)0x0F0F0F0F0F0F0F0FULL) | - ((v & (target_ulong)0x0F0F0F0F0F0F0F0FULL) << 4); - return v; -} - -#ifdef TARGET_MIPS64 -target_ulong helper_dbitswap(target_ulong rt) -{ - return bitswap(rt); -} -#endif - -target_ulong helper_bitswap(target_ulong rt) -{ - return (int32_t)bitswap(rt); -} - -target_ulong helper_rotx(target_ulong rs, uint32_t shift, uint32_t shiftx, - uint32_t stripe) -{ - int i; - uint64_t tmp0 = ((uint64_t)rs) << 32 | ((uint64_t)rs & 0xffffffff); - uint64_t tmp1 = tmp0; - for (i = 0; i <= 46; i++) { - int s; - if (i & 0x8) { - s = shift; - } else { - s = shiftx; - } - - if (stripe != 0 && !(i & 0x4)) { - s = ~s; - } - if (s & 0x10) { - if (tmp0 & (1LL << (i + 16))) { - tmp1 |= 1LL << i; - } else { - tmp1 &= ~(1LL << i); - } - } - } - - uint64_t tmp2 = tmp1; - for (i = 0; i <= 38; i++) { - int s; - if (i & 0x4) { - s = shift; - } else { - s = shiftx; - } - - if (s & 0x8) { - if (tmp1 & (1LL << (i + 8))) { - tmp2 |= 1LL << i; - } else { - tmp2 &= ~(1LL << i); - } - } - } - - uint64_t tmp3 = tmp2; - for (i = 0; i <= 34; i++) { - int s; - if (i & 0x2) { - s = shift; - } else { - s = shiftx; - } - if (s & 0x4) { - if (tmp2 & (1LL << (i + 4))) { - tmp3 |= 1LL << i; - } else { - tmp3 &= ~(1LL << i); - } - } - } - - uint64_t tmp4 = tmp3; - for (i = 0; i <= 32; i++) { - int s; - if (i & 0x1) { - s = shift; - } else { - s = shiftx; - } - if (s & 0x2) { - if (tmp3 & (1LL << (i + 2))) { - tmp4 |= 1LL << i; - } else { - tmp4 &= ~(1LL << i); - } - } - } - - uint64_t tmp5 = tmp4; - for (i = 0; i <= 31; i++) { - int s; - s = shift; - if (s & 0x1) { - if (tmp4 & (1LL << (i + 1))) { - tmp5 |= 1LL << i; - } else { - tmp5 &= ~(1LL << i); - } - } - } - - return (int64_t)(int32_t)(uint32_t)tmp5; -} - -#ifndef CONFIG_USER_ONLY - -static inline hwaddr do_translate_address(CPUMIPSState *env, - target_ulong address, - MMUAccessType access_type, - uintptr_t retaddr) -{ - hwaddr paddr; - CPUState *cs = env_cpu(env); - - paddr = cpu_mips_translate_address(env, address, access_type); - - if (paddr == -1LL) { - cpu_loop_exit_restore(cs, retaddr); - } else { - return paddr; - } -} - -#define HELPER_LD_ATOMIC(name, insn, almask, do_cast) \ -target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx) \ -{ \ - if (arg & almask) { \ - if (!(env->hflags & MIPS_HFLAG_DM)) { \ - env->CP0_BadVAddr = arg; \ - } \ - do_raise_exception(env, EXCP_AdEL, GETPC()); \ - } \ - env->CP0_LLAddr = do_translate_address(env, arg, MMU_DATA_LOAD, GETPC()); \ - env->lladdr = arg; \ - env->llval = do_cast cpu_##insn##_mmuidx_ra(env, arg, mem_idx, GETPC()); \ - return env->llval; \ -} -HELPER_LD_ATOMIC(ll, ldl, 0x3, (target_long)(int32_t)) -#ifdef TARGET_MIPS64 -HELPER_LD_ATOMIC(lld, ldq, 0x7, (target_ulong)) -#endif -#undef HELPER_LD_ATOMIC -#endif - -#ifdef TARGET_WORDS_BIGENDIAN -#define GET_LMASK(v) ((v) & 3) -#define GET_OFFSET(addr, offset) (addr + (offset)) -#else -#define GET_LMASK(v) (((v) & 3) ^ 3) -#define GET_OFFSET(addr, offset) (addr - (offset)) -#endif - -void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, - int mem_idx) -{ - cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 24), mem_idx, GETPC()); - - if (GET_LMASK(arg2) <= 2) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), - mem_idx, GETPC()); - } - - if (GET_LMASK(arg2) <= 1) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), - mem_idx, GETPC()); - } - - if (GET_LMASK(arg2) == 0) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 3), (uint8_t)arg1, - mem_idx, GETPC()); - } -} - -void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, - int mem_idx) -{ - cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC()); - - if (GET_LMASK(arg2) >= 1) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), - mem_idx, GETPC()); - } - - if (GET_LMASK(arg2) >= 2) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), - mem_idx, GETPC()); - } - - if (GET_LMASK(arg2) == 3) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), - mem_idx, GETPC()); - } -} - -#if defined(TARGET_MIPS64) -/* - * "half" load and stores. We must do the memory access inline, - * or fault handling won't work. - */ -#ifdef TARGET_WORDS_BIGENDIAN -#define GET_LMASK64(v) ((v) & 7) -#else -#define GET_LMASK64(v) (((v) & 7) ^ 7) -#endif - -void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, - int mem_idx) -{ - cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 56), mem_idx, GETPC()); - - if (GET_LMASK64(arg2) <= 6) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), - mem_idx, GETPC()); - } - - if (GET_LMASK64(arg2) <= 5) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), - mem_idx, GETPC()); - } - - if (GET_LMASK64(arg2) <= 4) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), - mem_idx, GETPC()); - } - - if (GET_LMASK64(arg2) <= 3) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), - mem_idx, GETPC()); - } - - if (GET_LMASK64(arg2) <= 2) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), - mem_idx, GETPC()); - } - - if (GET_LMASK64(arg2) <= 1) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), - mem_idx, GETPC()); - } - - if (GET_LMASK64(arg2) <= 0) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 7), (uint8_t)arg1, - mem_idx, GETPC()); - } -} - -void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, - int mem_idx) -{ - cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC()); - - if (GET_LMASK64(arg2) >= 1) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), - mem_idx, GETPC()); - } - - if (GET_LMASK64(arg2) >= 2) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), - mem_idx, GETPC()); - } - - if (GET_LMASK64(arg2) >= 3) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), - mem_idx, GETPC()); - } - - if (GET_LMASK64(arg2) >= 4) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), - mem_idx, GETPC()); - } - - if (GET_LMASK64(arg2) >= 5) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), - mem_idx, GETPC()); - } - - if (GET_LMASK64(arg2) >= 6) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), - mem_idx, GETPC()); - } - - if (GET_LMASK64(arg2) == 7) { - cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), - mem_idx, GETPC()); - } -} -#endif /* TARGET_MIPS64 */ - -static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 }; - -void helper_lwm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, - uint32_t mem_idx) -{ - target_ulong base_reglist = reglist & 0xf; - target_ulong do_r31 = reglist & 0x10; - - if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) { - target_ulong i; - - for (i = 0; i < base_reglist; i++) { - env->active_tc.gpr[multiple_regs[i]] = - (target_long)cpu_ldl_mmuidx_ra(env, addr, mem_idx, GETPC()); - addr += 4; - } - } - - if (do_r31) { - env->active_tc.gpr[31] = - (target_long)cpu_ldl_mmuidx_ra(env, addr, mem_idx, GETPC()); - } -} - -void helper_swm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, - uint32_t mem_idx) -{ - target_ulong base_reglist = reglist & 0xf; - target_ulong do_r31 = reglist & 0x10; - - if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) { - target_ulong i; - - for (i = 0; i < base_reglist; i++) { - cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]], - mem_idx, GETPC()); - addr += 4; - } - } - - if (do_r31) { - cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC()); - } -} - -#if defined(TARGET_MIPS64) -void helper_ldm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, - uint32_t mem_idx) -{ - target_ulong base_reglist = reglist & 0xf; - target_ulong do_r31 = reglist & 0x10; - - if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) { - target_ulong i; - - for (i = 0; i < base_reglist; i++) { - env->active_tc.gpr[multiple_regs[i]] = - cpu_ldq_mmuidx_ra(env, addr, mem_idx, GETPC()); - addr += 8; - } - } - - if (do_r31) { - env->active_tc.gpr[31] = - cpu_ldq_mmuidx_ra(env, addr, mem_idx, GETPC()); - } -} - -void helper_sdm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, - uint32_t mem_idx) -{ - target_ulong base_reglist = reglist & 0xf; - target_ulong do_r31 = reglist & 0x10; - - if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) { - target_ulong i; - - for (i = 0; i < base_reglist; i++) { - cpu_stq_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]], - mem_idx, GETPC()); - addr += 8; - } - } - - if (do_r31) { - cpu_stq_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC()); - } -} -#endif - - -void helper_fork(target_ulong arg1, target_ulong arg2) -{ - /* - * arg1 = rt, arg2 = rs - * TODO: store to TC register - */ -} - -target_ulong helper_yield(CPUMIPSState *env, target_ulong arg) -{ - target_long arg1 = arg; - - if (arg1 < 0) { - /* No scheduling policy implemented. */ - if (arg1 != -2) { - if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) && - env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) { - env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT); - env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT; - do_raise_exception(env, EXCP_THREAD, GETPC()); - } - } - } else if (arg1 == 0) { - if (0) { - /* TODO: TC underflow */ - env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT); - do_raise_exception(env, EXCP_THREAD, GETPC()); - } else { - /* TODO: Deallocate TC */ - } - } else if (arg1 > 0) { - /* Yield qualifier inputs not implemented. */ - env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT); - env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT; - do_raise_exception(env, EXCP_THREAD, GETPC()); - } - return env->CP0_YQMask; -} - -#ifndef CONFIG_USER_ONLY -/* TLB management */ -static void r4k_mips_tlb_flush_extra(CPUMIPSState *env, int first) -{ - /* Discard entries from env->tlb[first] onwards. */ - while (env->tlb->tlb_in_use > first) { - r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0); - } -} - -static inline uint64_t get_tlb_pfn_from_entrylo(uint64_t entrylo) -{ -#if defined(TARGET_MIPS64) - return extract64(entrylo, 6, 54); -#else - return extract64(entrylo, 6, 24) | /* PFN */ - (extract64(entrylo, 32, 32) << 24); /* PFNX */ -#endif -} - -static void r4k_fill_tlb(CPUMIPSState *env, int idx) -{ - r4k_tlb_t *tlb; - uint64_t mask = env->CP0_PageMask >> (TARGET_PAGE_BITS + 1); - - /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */ - tlb = &env->tlb->mmu.r4k.tlb[idx]; - if (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) { - tlb->EHINV = 1; - return; - } - tlb->EHINV = 0; - tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1); -#if defined(TARGET_MIPS64) - tlb->VPN &= env->SEGMask; -#endif - tlb->ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; - tlb->MMID = env->CP0_MemoryMapID; - tlb->PageMask = env->CP0_PageMask; - tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1; - tlb->V0 = (env->CP0_EntryLo0 & 2) != 0; - tlb->D0 = (env->CP0_EntryLo0 & 4) != 0; - tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7; - tlb->XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) & 1; - tlb->RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) & 1; - tlb->PFN[0] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo0) & ~mask) << 12; - tlb->V1 = (env->CP0_EntryLo1 & 2) != 0; - tlb->D1 = (env->CP0_EntryLo1 & 4) != 0; - tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7; - tlb->XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) & 1; - tlb->RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) & 1; - tlb->PFN[1] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo1) & ~mask) << 12; -} - -void r4k_helper_tlbinv(CPUMIPSState *env) -{ - bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1); - uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; - uint32_t MMID = env->CP0_MemoryMapID; - uint32_t tlb_mmid; - r4k_tlb_t *tlb; - int idx; - - MMID = mi ? MMID : (uint32_t) ASID; - for (idx = 0; idx < env->tlb->nb_tlb; idx++) { - tlb = &env->tlb->mmu.r4k.tlb[idx]; - tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; - if (!tlb->G && tlb_mmid == MMID) { - tlb->EHINV = 1; - } - } - cpu_mips_tlb_flush(env); -} - -void r4k_helper_tlbinvf(CPUMIPSState *env) -{ - int idx; - - for (idx = 0; idx < env->tlb->nb_tlb; idx++) { - env->tlb->mmu.r4k.tlb[idx].EHINV = 1; - } - cpu_mips_tlb_flush(env); -} - -void r4k_helper_tlbwi(CPUMIPSState *env) -{ - bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1); - target_ulong VPN; - uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; - uint32_t MMID = env->CP0_MemoryMapID; - uint32_t tlb_mmid; - bool EHINV, G, V0, D0, V1, D1, XI0, XI1, RI0, RI1; - r4k_tlb_t *tlb; - int idx; - - MMID = mi ? MMID : (uint32_t) ASID; - - idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb; - tlb = &env->tlb->mmu.r4k.tlb[idx]; - VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1); -#if defined(TARGET_MIPS64) - VPN &= env->SEGMask; -#endif - EHINV = (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) != 0; - G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1; - V0 = (env->CP0_EntryLo0 & 2) != 0; - D0 = (env->CP0_EntryLo0 & 4) != 0; - XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) &1; - RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) &1; - V1 = (env->CP0_EntryLo1 & 2) != 0; - D1 = (env->CP0_EntryLo1 & 4) != 0; - XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) &1; - RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) &1; - - tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; - /* - * Discard cached TLB entries, unless tlbwi is just upgrading access - * permissions on the current entry. - */ - if (tlb->VPN != VPN || tlb_mmid != MMID || tlb->G != G || - (!tlb->EHINV && EHINV) || - (tlb->V0 && !V0) || (tlb->D0 && !D0) || - (!tlb->XI0 && XI0) || (!tlb->RI0 && RI0) || - (tlb->V1 && !V1) || (tlb->D1 && !D1) || - (!tlb->XI1 && XI1) || (!tlb->RI1 && RI1)) { - r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb); - } - - r4k_invalidate_tlb(env, idx, 0); - r4k_fill_tlb(env, idx); -} - -void r4k_helper_tlbwr(CPUMIPSState *env) -{ - int r = cpu_mips_get_random(env); - - r4k_invalidate_tlb(env, r, 1); - r4k_fill_tlb(env, r); -} - -void r4k_helper_tlbp(CPUMIPSState *env) -{ - bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1); - r4k_tlb_t *tlb; - target_ulong mask; - target_ulong tag; - target_ulong VPN; - uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; - uint32_t MMID = env->CP0_MemoryMapID; - uint32_t tlb_mmid; - int i; - - MMID = mi ? MMID : (uint32_t) ASID; - for (i = 0; i < env->tlb->nb_tlb; i++) { - tlb = &env->tlb->mmu.r4k.tlb[i]; - /* 1k pages are not supported. */ - mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1); - tag = env->CP0_EntryHi & ~mask; - VPN = tlb->VPN & ~mask; -#if defined(TARGET_MIPS64) - tag &= env->SEGMask; -#endif - tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; - /* Check ASID/MMID, virtual page number & size */ - if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) { - /* TLB match */ - env->CP0_Index = i; - break; - } - } - if (i == env->tlb->nb_tlb) { - /* No match. Discard any shadow entries, if any of them match. */ - for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) { - tlb = &env->tlb->mmu.r4k.tlb[i]; - /* 1k pages are not supported. */ - mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1); - tag = env->CP0_EntryHi & ~mask; - VPN = tlb->VPN & ~mask; -#if defined(TARGET_MIPS64) - tag &= env->SEGMask; -#endif - tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; - /* Check ASID/MMID, virtual page number & size */ - if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag) { - r4k_mips_tlb_flush_extra(env, i); - break; - } - } - - env->CP0_Index |= 0x80000000; - } -} - -static inline uint64_t get_entrylo_pfn_from_tlb(uint64_t tlb_pfn) -{ -#if defined(TARGET_MIPS64) - return tlb_pfn << 6; -#else - return (extract64(tlb_pfn, 0, 24) << 6) | /* PFN */ - (extract64(tlb_pfn, 24, 32) << 32); /* PFNX */ -#endif -} - -void r4k_helper_tlbr(CPUMIPSState *env) -{ - bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1); - uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; - uint32_t MMID = env->CP0_MemoryMapID; - uint32_t tlb_mmid; - r4k_tlb_t *tlb; - int idx; - - MMID = mi ? MMID : (uint32_t) ASID; - idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb; - tlb = &env->tlb->mmu.r4k.tlb[idx]; - - tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; - /* If this will change the current ASID/MMID, flush qemu's TLB. */ - if (MMID != tlb_mmid) { - cpu_mips_tlb_flush(env); - } - - r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb); - - if (tlb->EHINV) { - env->CP0_EntryHi = 1 << CP0EnHi_EHINV; - env->CP0_PageMask = 0; - env->CP0_EntryLo0 = 0; - env->CP0_EntryLo1 = 0; - } else { - env->CP0_EntryHi = mi ? tlb->VPN : tlb->VPN | tlb->ASID; - env->CP0_MemoryMapID = tlb->MMID; - env->CP0_PageMask = tlb->PageMask; - env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) | - ((uint64_t)tlb->RI0 << CP0EnLo_RI) | - ((uint64_t)tlb->XI0 << CP0EnLo_XI) | (tlb->C0 << 3) | - get_entrylo_pfn_from_tlb(tlb->PFN[0] >> 12); - env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) | - ((uint64_t)tlb->RI1 << CP0EnLo_RI) | - ((uint64_t)tlb->XI1 << CP0EnLo_XI) | (tlb->C1 << 3) | - get_entrylo_pfn_from_tlb(tlb->PFN[1] >> 12); - } -} - -void helper_tlbwi(CPUMIPSState *env) -{ - env->tlb->helper_tlbwi(env); -} - -void helper_tlbwr(CPUMIPSState *env) -{ - env->tlb->helper_tlbwr(env); -} - -void helper_tlbp(CPUMIPSState *env) -{ - env->tlb->helper_tlbp(env); -} - -void helper_tlbr(CPUMIPSState *env) -{ - env->tlb->helper_tlbr(env); -} - -void helper_tlbinv(CPUMIPSState *env) -{ - env->tlb->helper_tlbinv(env); -} - -void helper_tlbinvf(CPUMIPSState *env) -{ - env->tlb->helper_tlbinvf(env); -} - -static void global_invalidate_tlb(CPUMIPSState *env, - uint32_t invMsgVPN2, - uint8_t invMsgR, - uint32_t invMsgMMid, - bool invAll, - bool invVAMMid, - bool invMMid, - bool invVA) -{ - - int idx; - r4k_tlb_t *tlb; - bool VAMatch; - bool MMidMatch; - - for (idx = 0; idx < env->tlb->nb_tlb; idx++) { - tlb = &env->tlb->mmu.r4k.tlb[idx]; - VAMatch = - (((tlb->VPN & ~tlb->PageMask) == (invMsgVPN2 & ~tlb->PageMask)) -#ifdef TARGET_MIPS64 - && - (extract64(env->CP0_EntryHi, 62, 2) == invMsgR) -#endif - ); - MMidMatch = tlb->MMID == invMsgMMid; - if ((invAll && (idx > env->CP0_Wired)) || - (VAMatch && invVAMMid && (tlb->G || MMidMatch)) || - (VAMatch && invVA) || - (MMidMatch && !(tlb->G) && invMMid)) { - tlb->EHINV = 1; - } - } - cpu_mips_tlb_flush(env); -} - -void helper_ginvt(CPUMIPSState *env, target_ulong arg, uint32_t type) -{ - bool invAll = type == 0; - bool invVA = type == 1; - bool invMMid = type == 2; - bool invVAMMid = type == 3; - uint32_t invMsgVPN2 = arg & (TARGET_PAGE_MASK << 1); - uint8_t invMsgR = 0; - uint32_t invMsgMMid = env->CP0_MemoryMapID; - CPUState *other_cs = first_cpu; - -#ifdef TARGET_MIPS64 - invMsgR = extract64(arg, 62, 2); -#endif - - CPU_FOREACH(other_cs) { - MIPSCPU *other_cpu = MIPS_CPU(other_cs); - global_invalidate_tlb(&other_cpu->env, invMsgVPN2, invMsgR, invMsgMMid, - invAll, invVAMMid, invMMid, invVA); - } -} - -/* Specials */ -target_ulong helper_di(CPUMIPSState *env) -{ - target_ulong t0 = env->CP0_Status; - - env->CP0_Status = t0 & ~(1 << CP0St_IE); - return t0; -} - -target_ulong helper_ei(CPUMIPSState *env) -{ - target_ulong t0 = env->CP0_Status; - - env->CP0_Status = t0 | (1 << CP0St_IE); - return t0; -} - -static void debug_pre_eret(CPUMIPSState *env) -{ - if (qemu_loglevel_mask(CPU_LOG_EXEC)) { - qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx, - env->active_tc.PC, env->CP0_EPC); - if (env->CP0_Status & (1 << CP0St_ERL)) { - qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC); - } - if (env->hflags & MIPS_HFLAG_DM) { - qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC); - } - qemu_log("\n"); - } -} - -static void debug_post_eret(CPUMIPSState *env) -{ - if (qemu_loglevel_mask(CPU_LOG_EXEC)) { - qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx, - env->active_tc.PC, env->CP0_EPC); - if (env->CP0_Status & (1 << CP0St_ERL)) { - qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC); - } - if (env->hflags & MIPS_HFLAG_DM) { - qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC); - } - switch (cpu_mmu_index(env, false)) { - case 3: - qemu_log(", ERL\n"); - break; - case MIPS_HFLAG_UM: - qemu_log(", UM\n"); - break; - case MIPS_HFLAG_SM: - qemu_log(", SM\n"); - break; - case MIPS_HFLAG_KM: - qemu_log("\n"); - break; - default: - cpu_abort(env_cpu(env), "Invalid MMU mode!\n"); - break; - } - } -} - -static void set_pc(CPUMIPSState *env, target_ulong error_pc) -{ - env->active_tc.PC = error_pc & ~(target_ulong)1; - if (error_pc & 1) { - env->hflags |= MIPS_HFLAG_M16; - } else { - env->hflags &= ~(MIPS_HFLAG_M16); - } -} - -static inline void exception_return(CPUMIPSState *env) -{ - debug_pre_eret(env); - if (env->CP0_Status & (1 << CP0St_ERL)) { - set_pc(env, env->CP0_ErrorEPC); - env->CP0_Status &= ~(1 << CP0St_ERL); - } else { - set_pc(env, env->CP0_EPC); - env->CP0_Status &= ~(1 << CP0St_EXL); - } - compute_hflags(env); - debug_post_eret(env); -} - -void helper_eret(CPUMIPSState *env) -{ - exception_return(env); - env->CP0_LLAddr = 1; - env->lladdr = 1; -} - -void helper_eretnc(CPUMIPSState *env) -{ - exception_return(env); -} - -void helper_deret(CPUMIPSState *env) -{ - debug_pre_eret(env); - - env->hflags &= ~MIPS_HFLAG_DM; - compute_hflags(env); - - set_pc(env, env->CP0_DEPC); - - debug_post_eret(env); -} -#endif /* !CONFIG_USER_ONLY */ - -static inline void check_hwrena(CPUMIPSState *env, int reg, uintptr_t pc) -{ - if ((env->hflags & MIPS_HFLAG_CP0) || (env->CP0_HWREna & (1 << reg))) { - return; - } - do_raise_exception(env, EXCP_RI, pc); -} - -target_ulong helper_rdhwr_cpunum(CPUMIPSState *env) -{ - check_hwrena(env, 0, GETPC()); - return env->CP0_EBase & 0x3ff; -} - -target_ulong helper_rdhwr_synci_step(CPUMIPSState *env) -{ - check_hwrena(env, 1, GETPC()); - return env->SYNCI_Step; -} - -target_ulong helper_rdhwr_cc(CPUMIPSState *env) -{ - check_hwrena(env, 2, GETPC()); -#ifdef CONFIG_USER_ONLY - return env->CP0_Count; -#else - return (int32_t)cpu_mips_get_count(env); -#endif -} - -target_ulong helper_rdhwr_ccres(CPUMIPSState *env) -{ - check_hwrena(env, 3, GETPC()); - return env->CCRes; -} - -target_ulong helper_rdhwr_performance(CPUMIPSState *env) -{ - check_hwrena(env, 4, GETPC()); - return env->CP0_Performance0; -} - -target_ulong helper_rdhwr_xnp(CPUMIPSState *env) -{ - check_hwrena(env, 5, GETPC()); - return (env->CP0_Config5 >> CP0C5_XNP) & 1; -} - -void helper_pmon(CPUMIPSState *env, int function) -{ - function /= 2; - switch (function) { - case 2: /* TODO: char inbyte(int waitflag); */ - if (env->active_tc.gpr[4] == 0) { - env->active_tc.gpr[2] = -1; - } - /* Fall through */ - case 11: /* TODO: char inbyte (void); */ - env->active_tc.gpr[2] = -1; - break; - case 3: - case 12: - printf("%c", (char)(env->active_tc.gpr[4] & 0xFF)); - break; - case 17: - break; - case 158: - { - unsigned char *fmt = (void *)(uintptr_t)env->active_tc.gpr[4]; - printf("%s", fmt); - } - break; - } -} - -void helper_wait(CPUMIPSState *env) -{ - CPUState *cs = env_cpu(env); - - cs->halted = 1; - cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE); - /* - * Last instruction in the block, PC was updated before - * - no need to recover PC and icount. - */ - raise_exception(env, EXCP_HLT); -} - -#if !defined(CONFIG_USER_ONLY) - -void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr, - MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr) -{ - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; - int error_code = 0; - int excp; - - if (!(env->hflags & MIPS_HFLAG_DM)) { - env->CP0_BadVAddr = addr; - } - - if (access_type == MMU_DATA_STORE) { - excp = EXCP_AdES; - } else { - excp = EXCP_AdEL; - if (access_type == MMU_INST_FETCH) { - error_code |= EXCP_INST_NOTAVAIL; - } - } - - do_raise_exception_err(env, excp, error_code, retaddr); -} - -void mips_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, - vaddr addr, unsigned size, - MMUAccessType access_type, - int mmu_idx, MemTxAttrs attrs, - MemTxResult response, uintptr_t retaddr) -{ - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; - - if (access_type == MMU_INST_FETCH) { - do_raise_exception(env, EXCP_IBE, retaddr); - } else { - do_raise_exception(env, EXCP_DBE, retaddr); - } -} -#endif /* !CONFIG_USER_ONLY */ - -void helper_cache(CPUMIPSState *env, target_ulong addr, uint32_t op) -{ -#ifndef CONFIG_USER_ONLY - static const char *const type_name[] = { - "Primary Instruction", - "Primary Data or Unified Primary", - "Tertiary", - "Secondary" - }; - uint32_t cache_type = extract32(op, 0, 2); - uint32_t cache_operation = extract32(op, 2, 3); - target_ulong index = addr & 0x1fffffff; - - switch (cache_operation) { - case 0b010: /* Index Store Tag */ - memory_region_dispatch_write(env->itc_tag, index, env->CP0_TagLo, - MO_64, MEMTXATTRS_UNSPECIFIED); - break; - case 0b001: /* Index Load Tag */ - memory_region_dispatch_read(env->itc_tag, index, &env->CP0_TagLo, - MO_64, MEMTXATTRS_UNSPECIFIED); - break; - case 0b000: /* Index Invalidate */ - case 0b100: /* Hit Invalidate */ - case 0b110: /* Hit Writeback */ - /* no-op */ - break; - default: - qemu_log_mask(LOG_UNIMP, "cache operation:%u (type: %s cache)\n", - cache_operation, type_name[cache_type]); - break; - } -#endif -} diff --git a/target/mips/addr.c b/target/mips/sysemu/addr.c index 86f1c129c9..86f1c129c9 100644 --- a/target/mips/addr.c +++ b/target/mips/sysemu/addr.c diff --git a/target/mips/sysemu/cp0.c b/target/mips/sysemu/cp0.c new file mode 100644 index 0000000000..bae37f515b --- /dev/null +++ b/target/mips/sysemu/cp0.c @@ -0,0 +1,123 @@ +/* + * QEMU MIPS CPU + * + * Copyright (c) 2012 SUSE LINUX Products GmbH + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/lgpl-2.1.html> + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +#include "exec/exec-all.h" + +/* Called for updates to CP0_Status. */ +void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc) +{ + int32_t tcstatus, *tcst; + uint32_t v = cpu->CP0_Status; + uint32_t cu, mx, asid, ksu; + uint32_t mask = ((1 << CP0TCSt_TCU3) + | (1 << CP0TCSt_TCU2) + | (1 << CP0TCSt_TCU1) + | (1 << CP0TCSt_TCU0) + | (1 << CP0TCSt_TMX) + | (3 << CP0TCSt_TKSU) + | (0xff << CP0TCSt_TASID)); + + cu = (v >> CP0St_CU0) & 0xf; + mx = (v >> CP0St_MX) & 0x1; + ksu = (v >> CP0St_KSU) & 0x3; + asid = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; + + tcstatus = cu << CP0TCSt_TCU0; + tcstatus |= mx << CP0TCSt_TMX; + tcstatus |= ksu << CP0TCSt_TKSU; + tcstatus |= asid; + + if (tc == cpu->current_tc) { + tcst = &cpu->active_tc.CP0_TCStatus; + } else { + tcst = &cpu->tcs[tc].CP0_TCStatus; + } + + *tcst &= ~mask; + *tcst |= tcstatus; + compute_hflags(cpu); +} + +void cpu_mips_store_status(CPUMIPSState *env, target_ulong val) +{ + uint32_t mask = env->CP0_Status_rw_bitmask; + target_ulong old = env->CP0_Status; + + if (env->insn_flags & ISA_MIPS_R6) { + bool has_supervisor = extract32(mask, CP0St_KSU, 2) == 0x3; +#if defined(TARGET_MIPS64) + uint32_t ksux = (1 << CP0St_KX) & val; + ksux |= (ksux >> 1) & val; /* KX = 0 forces SX to be 0 */ + ksux |= (ksux >> 1) & val; /* SX = 0 forces UX to be 0 */ + val = (val & ~(7 << CP0St_UX)) | ksux; +#endif + if (has_supervisor && extract32(val, CP0St_KSU, 2) == 0x3) { + mask &= ~(3 << CP0St_KSU); + } + mask &= ~(((1 << CP0St_SR) | (1 << CP0St_NMI)) & val); + } + + env->CP0_Status = (old & ~mask) | (val & mask); +#if defined(TARGET_MIPS64) + if ((env->CP0_Status ^ old) & (old & (7 << CP0St_UX))) { + /* Access to at least one of the 64-bit segments has been disabled */ + tlb_flush(env_cpu(env)); + } +#endif + if (ase_mt_available(env)) { + sync_c0_status(env, env, env->current_tc); + } else { + compute_hflags(env); + } +} + +void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val) +{ + uint32_t mask = 0x00C00300; + uint32_t old = env->CP0_Cause; + int i; + + if (env->insn_flags & ISA_MIPS_R2) { + mask |= 1 << CP0Ca_DC; + } + if (env->insn_flags & ISA_MIPS_R6) { + mask &= ~((1 << CP0Ca_WP) & val); + } + + env->CP0_Cause = (env->CP0_Cause & ~mask) | (val & mask); + + if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) { + if (env->CP0_Cause & (1 << CP0Ca_DC)) { + cpu_mips_stop_count(env); + } else { + cpu_mips_start_count(env); + } + } + + /* Set/reset software interrupts */ + for (i = 0 ; i < 2 ; i++) { + if ((old ^ env->CP0_Cause) & (1 << (CP0Ca_IP + i))) { + cpu_mips_soft_irq(env, i, env->CP0_Cause & (1 << (CP0Ca_IP + i))); + } + } +} diff --git a/target/mips/cp0_timer.c b/target/mips/sysemu/cp0_timer.c index 70de95d338..70de95d338 100644 --- a/target/mips/cp0_timer.c +++ b/target/mips/sysemu/cp0_timer.c diff --git a/target/mips/machine.c b/target/mips/sysemu/machine.c index b5fda6a278..80d37f9c2f 100644 --- a/target/mips/machine.c +++ b/target/mips/sysemu/machine.c @@ -81,6 +81,9 @@ const VMStateDescription vmstate_inactive_fpu = { static VMStateField vmstate_tc_fields[] = { VMSTATE_UINTTL_ARRAY(gpr, TCState, 32), +#if defined(TARGET_MIPS64) + VMSTATE_UINT64_ARRAY(gpr_hi, TCState, 32), +#endif /* TARGET_MIPS64 */ VMSTATE_UINTTL(PC, TCState), VMSTATE_UINTTL_ARRAY(HI, TCState, MIPS_DSP_ACC), VMSTATE_UINTTL_ARRAY(LO, TCState, MIPS_DSP_ACC), @@ -95,20 +98,22 @@ static VMStateField vmstate_tc_fields[] = { VMSTATE_INT32(CP0_Debug_tcstatus, TCState), VMSTATE_UINTTL(CP0_UserLocal, TCState), VMSTATE_INT32(msacsr, TCState), + VMSTATE_UINTTL_ARRAY(mxu_gpr, TCState, NUMBER_OF_MXU_REGISTERS - 1), + VMSTATE_UINTTL(mxu_cr, TCState), VMSTATE_END_OF_LIST() }; const VMStateDescription vmstate_tc = { .name = "cpu/tc", - .version_id = 1, - .minimum_version_id = 1, + .version_id = 2, + .minimum_version_id = 2, .fields = vmstate_tc_fields }; const VMStateDescription vmstate_inactive_tc = { .name = "cpu/inactive_tc", - .version_id = 1, - .minimum_version_id = 1, + .version_id = 2, + .minimum_version_id = 2, .fields = vmstate_tc_fields }; @@ -213,8 +218,8 @@ const VMStateDescription vmstate_tlb = { const VMStateDescription vmstate_mips_cpu = { .name = "cpu", - .version_id = 20, - .minimum_version_id = 20, + .version_id = 21, + .minimum_version_id = 21, .post_load = cpu_post_load, .fields = (VMStateField[]) { /* Active TC */ @@ -241,6 +246,7 @@ const VMStateDescription vmstate_mips_cpu = { /* Remaining CP0 registers */ VMSTATE_INT32(env.CP0_Index, MIPSCPU), + VMSTATE_INT32(env.CP0_VPControl, MIPSCPU), VMSTATE_INT32(env.CP0_Random, MIPSCPU), VMSTATE_INT32(env.CP0_VPEControl, MIPSCPU), VMSTATE_INT32(env.CP0_VPEConf0, MIPSCPU), @@ -251,6 +257,7 @@ const VMStateDescription vmstate_mips_cpu = { VMSTATE_INT32(env.CP0_VPEOpt, MIPSCPU), VMSTATE_UINT64(env.CP0_EntryLo0, MIPSCPU), VMSTATE_UINT64(env.CP0_EntryLo1, MIPSCPU), + VMSTATE_INT32(env.CP0_GlobalNumber, MIPSCPU), VMSTATE_UINTTL(env.CP0_Context, MIPSCPU), VMSTATE_INT32(env.CP0_MemoryMapID, MIPSCPU), VMSTATE_INT32(env.CP0_PageMask, MIPSCPU), @@ -286,6 +293,7 @@ const VMStateDescription vmstate_mips_cpu = { VMSTATE_UINTTL(env.CP0_EPC, MIPSCPU), VMSTATE_INT32(env.CP0_PRid, MIPSCPU), VMSTATE_UINTTL(env.CP0_EBase, MIPSCPU), + VMSTATE_UINTTL(env.CP0_CMGCRBase, MIPSCPU), VMSTATE_INT32(env.CP0_Config0, MIPSCPU), VMSTATE_INT32(env.CP0_Config1, MIPSCPU), VMSTATE_INT32(env.CP0_Config2, MIPSCPU), @@ -305,6 +313,7 @@ const VMStateDescription vmstate_mips_cpu = { VMSTATE_INT32(env.CP0_Debug, MIPSCPU), VMSTATE_UINTTL(env.CP0_DEPC, MIPSCPU), VMSTATE_INT32(env.CP0_Performance0, MIPSCPU), + VMSTATE_INT32(env.CP0_ErrCtl, MIPSCPU), VMSTATE_UINT64(env.CP0_TagLo, MIPSCPU), VMSTATE_INT32(env.CP0_DataLo, MIPSCPU), VMSTATE_INT32(env.CP0_TagHi, MIPSCPU), diff --git a/target/mips/sysemu/meson.build b/target/mips/sysemu/meson.build new file mode 100644 index 0000000000..cefc227582 --- /dev/null +++ b/target/mips/sysemu/meson.build @@ -0,0 +1,7 @@ +mips_softmmu_ss.add(files( + 'addr.c', + 'cp0.c', + 'cp0_timer.c', + 'machine.c', + 'physaddr.c', +)) diff --git a/target/mips/sysemu/physaddr.c b/target/mips/sysemu/physaddr.c new file mode 100644 index 0000000000..1918633aa1 --- /dev/null +++ b/target/mips/sysemu/physaddr.c @@ -0,0 +1,257 @@ +/* + * MIPS TLB (Translation lookaside buffer) helpers. + * + * Copyright (c) 2004-2005 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "../internal.h" + +static int is_seg_am_mapped(unsigned int am, bool eu, int mmu_idx) +{ + /* + * Interpret access control mode and mmu_idx. + * AdE? TLB? + * AM K S U E K S U E + * UK 0 0 1 1 0 0 - - 0 + * MK 1 0 1 1 0 1 - - !eu + * MSK 2 0 0 1 0 1 1 - !eu + * MUSK 3 0 0 0 0 1 1 1 !eu + * MUSUK 4 0 0 0 0 0 1 1 0 + * USK 5 0 0 1 0 0 0 - 0 + * - 6 - - - - - - - - + * UUSK 7 0 0 0 0 0 0 0 0 + */ + int32_t adetlb_mask; + + switch (mmu_idx) { + case 3: /* ERL */ + /* If EU is set, always unmapped */ + if (eu) { + return 0; + } + /* fall through */ + case MIPS_HFLAG_KM: + /* Never AdE, TLB mapped if AM={1,2,3} */ + adetlb_mask = 0x70000000; + goto check_tlb; + + case MIPS_HFLAG_SM: + /* AdE if AM={0,1}, TLB mapped if AM={2,3,4} */ + adetlb_mask = 0xc0380000; + goto check_ade; + + case MIPS_HFLAG_UM: + /* AdE if AM={0,1,2,5}, TLB mapped if AM={3,4} */ + adetlb_mask = 0xe4180000; + /* fall through */ + check_ade: + /* does this AM cause AdE in current execution mode */ + if ((adetlb_mask << am) < 0) { + return TLBRET_BADADDR; + } + adetlb_mask <<= 8; + /* fall through */ + check_tlb: + /* is this AM mapped in current execution mode */ + return ((adetlb_mask << am) < 0); + default: + assert(0); + return TLBRET_BADADDR; + }; +} + +static int get_seg_physical_address(CPUMIPSState *env, hwaddr *physical, + int *prot, target_ulong real_address, + MMUAccessType access_type, int mmu_idx, + unsigned int am, bool eu, + target_ulong segmask, + hwaddr physical_base) +{ + int mapped = is_seg_am_mapped(am, eu, mmu_idx); + + if (mapped < 0) { + /* is_seg_am_mapped can report TLBRET_BADADDR */ + return mapped; + } else if (mapped) { + /* The segment is TLB mapped */ + return env->tlb->map_address(env, physical, prot, real_address, + access_type); + } else { + /* The segment is unmapped */ + *physical = physical_base | (real_address & segmask); + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return TLBRET_MATCH; + } +} + +static int get_segctl_physical_address(CPUMIPSState *env, hwaddr *physical, + int *prot, target_ulong real_address, + MMUAccessType access_type, int mmu_idx, + uint16_t segctl, target_ulong segmask) +{ + unsigned int am = (segctl & CP0SC_AM_MASK) >> CP0SC_AM; + bool eu = (segctl >> CP0SC_EU) & 1; + hwaddr pa = ((hwaddr)segctl & CP0SC_PA_MASK) << 20; + + return get_seg_physical_address(env, physical, prot, real_address, + access_type, mmu_idx, am, eu, segmask, + pa & ~(hwaddr)segmask); +} + +int get_physical_address(CPUMIPSState *env, hwaddr *physical, + int *prot, target_ulong real_address, + MMUAccessType access_type, int mmu_idx) +{ + /* User mode can only access useg/xuseg */ +#if defined(TARGET_MIPS64) + int user_mode = mmu_idx == MIPS_HFLAG_UM; + int supervisor_mode = mmu_idx == MIPS_HFLAG_SM; + int kernel_mode = !user_mode && !supervisor_mode; + int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0; + int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0; + int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0; +#endif + int ret = TLBRET_MATCH; + /* effective address (modified for KVM T&E kernel segments) */ + target_ulong address = real_address; + + if (mips_um_ksegs_enabled()) { + /* KVM T&E adds guest kernel segments in useg */ + if (real_address >= KVM_KSEG0_BASE) { + if (real_address < KVM_KSEG2_BASE) { + /* kseg0 */ + address += KSEG0_BASE - KVM_KSEG0_BASE; + } else if (real_address <= USEG_LIMIT) { + /* kseg2/3 */ + address += KSEG2_BASE - KVM_KSEG2_BASE; + } + } + } + + if (address <= USEG_LIMIT) { + /* useg */ + uint16_t segctl; + + if (address >= 0x40000000UL) { + segctl = env->CP0_SegCtl2; + } else { + segctl = env->CP0_SegCtl2 >> 16; + } + ret = get_segctl_physical_address(env, physical, prot, + real_address, access_type, + mmu_idx, segctl, 0x3FFFFFFF); +#if defined(TARGET_MIPS64) + } else if (address < 0x4000000000000000ULL) { + /* xuseg */ + if (UX && address <= (0x3FFFFFFFFFFFFFFFULL & env->SEGMask)) { + ret = env->tlb->map_address(env, physical, prot, + real_address, access_type); + } else { + ret = TLBRET_BADADDR; + } + } else if (address < 0x8000000000000000ULL) { + /* xsseg */ + if ((supervisor_mode || kernel_mode) && + SX && address <= (0x7FFFFFFFFFFFFFFFULL & env->SEGMask)) { + ret = env->tlb->map_address(env, physical, prot, + real_address, access_type); + } else { + ret = TLBRET_BADADDR; + } + } else if (address < 0xC000000000000000ULL) { + /* xkphys */ + if ((address & 0x07FFFFFFFFFFFFFFULL) <= env->PAMask) { + /* KX/SX/UX bit to check for each xkphys EVA access mode */ + static const uint8_t am_ksux[8] = { + [CP0SC_AM_UK] = (1u << CP0St_KX), + [CP0SC_AM_MK] = (1u << CP0St_KX), + [CP0SC_AM_MSK] = (1u << CP0St_SX), + [CP0SC_AM_MUSK] = (1u << CP0St_UX), + [CP0SC_AM_MUSUK] = (1u << CP0St_UX), + [CP0SC_AM_USK] = (1u << CP0St_SX), + [6] = (1u << CP0St_KX), + [CP0SC_AM_UUSK] = (1u << CP0St_UX), + }; + unsigned int am = CP0SC_AM_UK; + unsigned int xr = (env->CP0_SegCtl2 & CP0SC2_XR_MASK) >> CP0SC2_XR; + + if (xr & (1 << ((address >> 59) & 0x7))) { + am = (env->CP0_SegCtl1 & CP0SC1_XAM_MASK) >> CP0SC1_XAM; + } + /* Does CP0_Status.KX/SX/UX permit the access mode (am) */ + if (env->CP0_Status & am_ksux[am]) { + ret = get_seg_physical_address(env, physical, prot, + real_address, access_type, + mmu_idx, am, false, env->PAMask, + 0); + } else { + ret = TLBRET_BADADDR; + } + } else { + ret = TLBRET_BADADDR; + } + } else if (address < 0xFFFFFFFF80000000ULL) { + /* xkseg */ + if (kernel_mode && KX && + address <= (0xFFFFFFFF7FFFFFFFULL & env->SEGMask)) { + ret = env->tlb->map_address(env, physical, prot, + real_address, access_type); + } else { + ret = TLBRET_BADADDR; + } +#endif + } else if (address < KSEG1_BASE) { + /* kseg0 */ + ret = get_segctl_physical_address(env, physical, prot, real_address, + access_type, mmu_idx, + env->CP0_SegCtl1 >> 16, 0x1FFFFFFF); + } else if (address < KSEG2_BASE) { + /* kseg1 */ + ret = get_segctl_physical_address(env, physical, prot, real_address, + access_type, mmu_idx, + env->CP0_SegCtl1, 0x1FFFFFFF); + } else if (address < KSEG3_BASE) { + /* sseg (kseg2) */ + ret = get_segctl_physical_address(env, physical, prot, real_address, + access_type, mmu_idx, + env->CP0_SegCtl0 >> 16, 0x1FFFFFFF); + } else { + /* + * kseg3 + * XXX: debug segment is not emulated + */ + ret = get_segctl_physical_address(env, physical, prot, real_address, + access_type, mmu_idx, + env->CP0_SegCtl0, 0x1FFFFFFF); + } + return ret; +} + +hwaddr mips_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) +{ + MIPSCPU *cpu = MIPS_CPU(cs); + CPUMIPSState *env = &cpu->env; + hwaddr phys_addr; + int prot; + + if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD, + cpu_mmu_index(env, false)) != 0) { + return -1; + } + return phys_addr; +} diff --git a/target/mips/dsp_helper.c b/target/mips/tcg/dsp_helper.c index 09b6e5fb15..09b6e5fb15 100644 --- a/target/mips/dsp_helper.c +++ b/target/mips/tcg/dsp_helper.c diff --git a/target/mips/tcg/exception.c b/target/mips/tcg/exception.c new file mode 100644 index 0000000000..4fb8b00711 --- /dev/null +++ b/target/mips/tcg/exception.c @@ -0,0 +1,167 @@ +/* + * MIPS Exceptions processing helpers for QEMU. + * + * Copyright (c) 2004-2005 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + * + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +#include "exec/helper-proto.h" +#include "exec/exec-all.h" + +target_ulong exception_resume_pc(CPUMIPSState *env) +{ + target_ulong bad_pc; + target_ulong isa_mode; + + isa_mode = !!(env->hflags & MIPS_HFLAG_M16); + bad_pc = env->active_tc.PC | isa_mode; + if (env->hflags & MIPS_HFLAG_BMASK) { + /* + * If the exception was raised from a delay slot, come back to + * the jump. + */ + bad_pc -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4); + } + + return bad_pc; +} + +void helper_raise_exception_err(CPUMIPSState *env, uint32_t exception, + int error_code) +{ + do_raise_exception_err(env, exception, error_code, 0); +} + +void helper_raise_exception(CPUMIPSState *env, uint32_t exception) +{ + do_raise_exception(env, exception, GETPC()); +} + +void helper_raise_exception_debug(CPUMIPSState *env) +{ + do_raise_exception(env, EXCP_DEBUG, 0); +} + +static void raise_exception(CPUMIPSState *env, uint32_t exception) +{ + do_raise_exception(env, exception, 0); +} + +void helper_wait(CPUMIPSState *env) +{ + CPUState *cs = env_cpu(env); + + cs->halted = 1; + cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE); + /* + * Last instruction in the block, PC was updated before + * - no need to recover PC and icount. + */ + raise_exception(env, EXCP_HLT); +} + +void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) +{ + MIPSCPU *cpu = MIPS_CPU(cs); + CPUMIPSState *env = &cpu->env; + + env->active_tc.PC = tb->pc; + env->hflags &= ~MIPS_HFLAG_BMASK; + env->hflags |= tb->flags & MIPS_HFLAG_BMASK; +} + +bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +{ + if (interrupt_request & CPU_INTERRUPT_HARD) { + MIPSCPU *cpu = MIPS_CPU(cs); + CPUMIPSState *env = &cpu->env; + + if (cpu_mips_hw_interrupts_enabled(env) && + cpu_mips_hw_interrupts_pending(env)) { + /* Raise it */ + cs->exception_index = EXCP_EXT_INTERRUPT; + env->error_code = 0; + mips_cpu_do_interrupt(cs); + return true; + } + } + return false; +} + +static const char * const excp_names[EXCP_LAST + 1] = { + [EXCP_RESET] = "reset", + [EXCP_SRESET] = "soft reset", + [EXCP_DSS] = "debug single step", + [EXCP_DINT] = "debug interrupt", + [EXCP_NMI] = "non-maskable interrupt", + [EXCP_MCHECK] = "machine check", + [EXCP_EXT_INTERRUPT] = "interrupt", + [EXCP_DFWATCH] = "deferred watchpoint", + [EXCP_DIB] = "debug instruction breakpoint", + [EXCP_IWATCH] = "instruction fetch watchpoint", + [EXCP_AdEL] = "address error load", + [EXCP_AdES] = "address error store", + [EXCP_TLBF] = "TLB refill", + [EXCP_IBE] = "instruction bus error", + [EXCP_DBp] = "debug breakpoint", + [EXCP_SYSCALL] = "syscall", + [EXCP_BREAK] = "break", + [EXCP_CpU] = "coprocessor unusable", + [EXCP_RI] = "reserved instruction", + [EXCP_OVERFLOW] = "arithmetic overflow", + [EXCP_TRAP] = "trap", + [EXCP_FPE] = "floating point", + [EXCP_DDBS] = "debug data break store", + [EXCP_DWATCH] = "data watchpoint", + [EXCP_LTLBL] = "TLB modify", + [EXCP_TLBL] = "TLB load", + [EXCP_TLBS] = "TLB store", + [EXCP_DBE] = "data bus error", + [EXCP_DDBL] = "debug data break load", + [EXCP_THREAD] = "thread", + [EXCP_MDMX] = "MDMX", + [EXCP_C2E] = "precise coprocessor 2", + [EXCP_CACHE] = "cache error", + [EXCP_TLBXI] = "TLB execute-inhibit", + [EXCP_TLBRI] = "TLB read-inhibit", + [EXCP_MSADIS] = "MSA disabled", + [EXCP_MSAFPE] = "MSA floating point", +}; + +const char *mips_exception_name(int32_t exception) +{ + if (exception < 0 || exception > EXCP_LAST) { + return "unknown"; + } + return excp_names[exception]; +} + +void do_raise_exception_err(CPUMIPSState *env, uint32_t exception, + int error_code, uintptr_t pc) +{ + CPUState *cs = env_cpu(env); + + qemu_log_mask(CPU_LOG_INT, "%s: %d (%s) %d\n", + __func__, exception, mips_exception_name(exception), + error_code); + cs->exception_index = exception; + env->error_code = error_code; + + cpu_loop_exit_restore(cs, pc); +} diff --git a/target/mips/fpu_helper.c b/target/mips/tcg/fpu_helper.c index 6dd853259e..8ce56ed7c8 100644 --- a/target/mips/fpu_helper.c +++ b/target/mips/tcg/fpu_helper.c @@ -38,14 +38,6 @@ #define FP_TO_INT32_OVERFLOW 0x7fffffff #define FP_TO_INT64_OVERFLOW 0x7fffffffffffffffULL -/* convert MIPS rounding mode in FCR31 to IEEE library */ -const FloatRoundMode ieee_rm[4] = { - float_round_nearest_even, - float_round_to_zero, - float_round_up, - float_round_down -}; - target_ulong helper_cfc1(CPUMIPSState *env, uint32_t reg) { target_ulong arg1 = 0; diff --git a/target/mips/tcg/ldst_helper.c b/target/mips/tcg/ldst_helper.c new file mode 100644 index 0000000000..d42812b8a6 --- /dev/null +++ b/target/mips/tcg/ldst_helper.c @@ -0,0 +1,288 @@ +/* + * MIPS emulation load/store helpers for QEMU. + * + * Copyright (c) 2004-2005 Jocelyn Mayer + * + * SPDX-License-Identifier: LGPL-2.1-or-later + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + * + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/helper-proto.h" +#include "exec/exec-all.h" +#include "exec/memop.h" +#include "internal.h" + +#ifndef CONFIG_USER_ONLY + +#define HELPER_LD_ATOMIC(name, insn, almask, do_cast) \ +target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx) \ +{ \ + if (arg & almask) { \ + if (!(env->hflags & MIPS_HFLAG_DM)) { \ + env->CP0_BadVAddr = arg; \ + } \ + do_raise_exception(env, EXCP_AdEL, GETPC()); \ + } \ + env->CP0_LLAddr = cpu_mips_translate_address(env, arg, MMU_DATA_LOAD, \ + GETPC()); \ + env->lladdr = arg; \ + env->llval = do_cast cpu_##insn##_mmuidx_ra(env, arg, mem_idx, GETPC()); \ + return env->llval; \ +} +HELPER_LD_ATOMIC(ll, ldl, 0x3, (target_long)(int32_t)) +#ifdef TARGET_MIPS64 +HELPER_LD_ATOMIC(lld, ldq, 0x7, (target_ulong)) +#endif +#undef HELPER_LD_ATOMIC + +#endif /* !CONFIG_USER_ONLY */ + +#ifdef TARGET_WORDS_BIGENDIAN +#define GET_LMASK(v) ((v) & 3) +#define GET_OFFSET(addr, offset) (addr + (offset)) +#else +#define GET_LMASK(v) (((v) & 3) ^ 3) +#define GET_OFFSET(addr, offset) (addr - (offset)) +#endif + +void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, + int mem_idx) +{ + cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 24), mem_idx, GETPC()); + + if (GET_LMASK(arg2) <= 2) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), + mem_idx, GETPC()); + } + + if (GET_LMASK(arg2) <= 1) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), + mem_idx, GETPC()); + } + + if (GET_LMASK(arg2) == 0) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 3), (uint8_t)arg1, + mem_idx, GETPC()); + } +} + +void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, + int mem_idx) +{ + cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC()); + + if (GET_LMASK(arg2) >= 1) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), + mem_idx, GETPC()); + } + + if (GET_LMASK(arg2) >= 2) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), + mem_idx, GETPC()); + } + + if (GET_LMASK(arg2) == 3) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), + mem_idx, GETPC()); + } +} + +#if defined(TARGET_MIPS64) +/* + * "half" load and stores. We must do the memory access inline, + * or fault handling won't work. + */ +#ifdef TARGET_WORDS_BIGENDIAN +#define GET_LMASK64(v) ((v) & 7) +#else +#define GET_LMASK64(v) (((v) & 7) ^ 7) +#endif + +void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, + int mem_idx) +{ + cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 56), mem_idx, GETPC()); + + if (GET_LMASK64(arg2) <= 6) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), + mem_idx, GETPC()); + } + + if (GET_LMASK64(arg2) <= 5) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), + mem_idx, GETPC()); + } + + if (GET_LMASK64(arg2) <= 4) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), + mem_idx, GETPC()); + } + + if (GET_LMASK64(arg2) <= 3) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), + mem_idx, GETPC()); + } + + if (GET_LMASK64(arg2) <= 2) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), + mem_idx, GETPC()); + } + + if (GET_LMASK64(arg2) <= 1) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), + mem_idx, GETPC()); + } + + if (GET_LMASK64(arg2) <= 0) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 7), (uint8_t)arg1, + mem_idx, GETPC()); + } +} + +void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, + int mem_idx) +{ + cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC()); + + if (GET_LMASK64(arg2) >= 1) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), + mem_idx, GETPC()); + } + + if (GET_LMASK64(arg2) >= 2) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), + mem_idx, GETPC()); + } + + if (GET_LMASK64(arg2) >= 3) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), + mem_idx, GETPC()); + } + + if (GET_LMASK64(arg2) >= 4) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), + mem_idx, GETPC()); + } + + if (GET_LMASK64(arg2) >= 5) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), + mem_idx, GETPC()); + } + + if (GET_LMASK64(arg2) >= 6) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), + mem_idx, GETPC()); + } + + if (GET_LMASK64(arg2) == 7) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), + mem_idx, GETPC()); + } +} +#endif /* TARGET_MIPS64 */ + +static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 }; + +void helper_lwm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, + uint32_t mem_idx) +{ + target_ulong base_reglist = reglist & 0xf; + target_ulong do_r31 = reglist & 0x10; + + if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) { + target_ulong i; + + for (i = 0; i < base_reglist; i++) { + env->active_tc.gpr[multiple_regs[i]] = + (target_long)cpu_ldl_mmuidx_ra(env, addr, mem_idx, GETPC()); + addr += 4; + } + } + + if (do_r31) { + env->active_tc.gpr[31] = + (target_long)cpu_ldl_mmuidx_ra(env, addr, mem_idx, GETPC()); + } +} + +void helper_swm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, + uint32_t mem_idx) +{ + target_ulong base_reglist = reglist & 0xf; + target_ulong do_r31 = reglist & 0x10; + + if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) { + target_ulong i; + + for (i = 0; i < base_reglist; i++) { + cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]], + mem_idx, GETPC()); + addr += 4; + } + } + + if (do_r31) { + cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC()); + } +} + +#if defined(TARGET_MIPS64) +void helper_ldm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, + uint32_t mem_idx) +{ + target_ulong base_reglist = reglist & 0xf; + target_ulong do_r31 = reglist & 0x10; + + if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) { + target_ulong i; + + for (i = 0; i < base_reglist; i++) { + env->active_tc.gpr[multiple_regs[i]] = + cpu_ldq_mmuidx_ra(env, addr, mem_idx, GETPC()); + addr += 8; + } + } + + if (do_r31) { + env->active_tc.gpr[31] = + cpu_ldq_mmuidx_ra(env, addr, mem_idx, GETPC()); + } +} + +void helper_sdm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, + uint32_t mem_idx) +{ + target_ulong base_reglist = reglist & 0xf; + target_ulong do_r31 = reglist & 0x10; + + if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) { + target_ulong i; + + for (i = 0; i < base_reglist; i++) { + cpu_stq_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]], + mem_idx, GETPC()); + addr += 8; + } + } + + if (do_r31) { + cpu_stq_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC()); + } +} + +#endif /* TARGET_MIPS64 */ diff --git a/target/mips/lmmi_helper.c b/target/mips/tcg/lmmi_helper.c index abeb7736ae..abeb7736ae 100644 --- a/target/mips/lmmi_helper.c +++ b/target/mips/tcg/lmmi_helper.c diff --git a/target/mips/tcg/meson.build b/target/mips/tcg/meson.build new file mode 100644 index 0000000000..5d8acbaf0d --- /dev/null +++ b/target/mips/tcg/meson.build @@ -0,0 +1,35 @@ +gen = [ + decodetree.process('mips32r6.decode', extra_args: '--static-decode=decode_mips32r6'), + decodetree.process('mips64r6.decode', extra_args: '--static-decode=decode_mips64r6'), + decodetree.process('msa32.decode', extra_args: '--static-decode=decode_msa32'), + decodetree.process('msa64.decode', extra_args: '--static-decode=decode_msa64'), + decodetree.process('tx79.decode', extra_args: '--static-decode=decode_tx79'), +] + +mips_ss.add(gen) +mips_ss.add(files( + 'dsp_helper.c', + 'exception.c', + 'fpu_helper.c', + 'ldst_helper.c', + 'lmmi_helper.c', + 'msa_helper.c', + 'msa_translate.c', + 'op_helper.c', + 'rel6_translate.c', + 'translate.c', + 'translate_addr_const.c', + 'txx9_translate.c', +)) +mips_ss.add(when: 'TARGET_MIPS64', if_true: files( + 'tx79_translate.c', +), if_false: files( + 'mxu_translate.c', +)) + +if have_user + subdir('user') +endif +if have_system + subdir('sysemu') +endif diff --git a/target/mips/mips32r6.decode b/target/mips/tcg/mips32r6.decode index 837c991edc..837c991edc 100644 --- a/target/mips/mips32r6.decode +++ b/target/mips/tcg/mips32r6.decode diff --git a/target/mips/mips64r6.decode b/target/mips/tcg/mips64r6.decode index b58d8009cc..b58d8009cc 100644 --- a/target/mips/mips64r6.decode +++ b/target/mips/tcg/mips64r6.decode diff --git a/target/mips/msa32.decode b/target/mips/tcg/msa32.decode index ca200e373b..ca200e373b 100644 --- a/target/mips/msa32.decode +++ b/target/mips/tcg/msa32.decode diff --git a/target/mips/msa64.decode b/target/mips/tcg/msa64.decode index d2442474d0..d2442474d0 100644 --- a/target/mips/msa64.decode +++ b/target/mips/tcg/msa64.decode diff --git a/target/mips/msa_helper.c b/target/mips/tcg/msa_helper.c index 4caefe29ad..04af54f66d 100644 --- a/target/mips/msa_helper.c +++ b/target/mips/tcg/msa_helper.c @@ -8595,39 +8595,3 @@ void helper_msa_st_d(CPUMIPSState *env, uint32_t wd, cpu_stq_data(env, addr + (1 << DF_DOUBLE), pwd->d[1]); #endif } - -void msa_reset(CPUMIPSState *env) -{ - if (!ase_msa_available(env)) { - return; - } - -#ifdef CONFIG_USER_ONLY - /* MSA access enabled */ - env->CP0_Config5 |= 1 << CP0C5_MSAEn; - env->CP0_Status |= (1 << CP0St_CU1) | (1 << CP0St_FR); -#endif - - /* - * MSA CSR: - * - non-signaling floating point exception mode off (NX bit is 0) - * - Cause, Enables, and Flags are all 0 - * - round to nearest / ties to even (RM bits are 0) - */ - env->active_tc.msacsr = 0; - - restore_msa_fp_status(env); - - /* tininess detected after rounding.*/ - set_float_detect_tininess(float_tininess_after_rounding, - &env->active_tc.msa_fp_status); - - /* clear float_status exception flags */ - set_float_exception_flags(0, &env->active_tc.msa_fp_status); - - /* clear float_status nan mode */ - set_default_nan_mode(0, &env->active_tc.msa_fp_status); - - /* set proper signanling bit meaning ("1" means "quiet") */ - set_snan_bit_is_one(0, &env->active_tc.msa_fp_status); -} diff --git a/target/mips/msa_helper.h.inc b/target/mips/tcg/msa_helper.h.inc index 4963d1553a..4963d1553a 100644 --- a/target/mips/msa_helper.h.inc +++ b/target/mips/tcg/msa_helper.h.inc diff --git a/target/mips/msa_translate.c b/target/mips/tcg/msa_translate.c index ae6587edf6..ae6587edf6 100644 --- a/target/mips/msa_translate.c +++ b/target/mips/tcg/msa_translate.c diff --git a/target/mips/mxu_translate.c b/target/mips/tcg/mxu_translate.c index fb0a811af6..fb0a811af6 100644 --- a/target/mips/mxu_translate.c +++ b/target/mips/tcg/mxu_translate.c diff --git a/target/mips/tcg/op_helper.c b/target/mips/tcg/op_helper.c new file mode 100644 index 0000000000..ce1549c985 --- /dev/null +++ b/target/mips/tcg/op_helper.c @@ -0,0 +1,420 @@ +/* + * MIPS emulation helpers for qemu. + * + * Copyright (c) 2004-2005 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + * + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +#include "exec/helper-proto.h" +#include "exec/exec-all.h" +#include "exec/memop.h" +#include "fpu_helper.h" + +/* 64 bits arithmetic for 32 bits hosts */ +static inline uint64_t get_HILO(CPUMIPSState *env) +{ + return ((uint64_t)(env->active_tc.HI[0]) << 32) | + (uint32_t)env->active_tc.LO[0]; +} + +static inline target_ulong set_HIT0_LO(CPUMIPSState *env, uint64_t HILO) +{ + env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF); + return env->active_tc.HI[0] = (int32_t)(HILO >> 32); +} + +static inline target_ulong set_HI_LOT0(CPUMIPSState *env, uint64_t HILO) +{ + target_ulong tmp = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF); + env->active_tc.HI[0] = (int32_t)(HILO >> 32); + return tmp; +} + +/* Multiplication variants of the vr54xx. */ +target_ulong helper_muls(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HI_LOT0(env, 0 - ((int64_t)(int32_t)arg1 * + (int64_t)(int32_t)arg2)); +} + +target_ulong helper_mulsu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HI_LOT0(env, 0 - (uint64_t)(uint32_t)arg1 * + (uint64_t)(uint32_t)arg2); +} + +target_ulong helper_macc(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HI_LOT0(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 * + (int64_t)(int32_t)arg2); +} + +target_ulong helper_macchi(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 * + (int64_t)(int32_t)arg2); +} + +target_ulong helper_maccu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HI_LOT0(env, (uint64_t)get_HILO(env) + + (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); +} + +target_ulong helper_macchiu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, (uint64_t)get_HILO(env) + + (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); +} + +target_ulong helper_msac(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HI_LOT0(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 * + (int64_t)(int32_t)arg2); +} + +target_ulong helper_msachi(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 * + (int64_t)(int32_t)arg2); +} + +target_ulong helper_msacu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HI_LOT0(env, (uint64_t)get_HILO(env) - + (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); +} + +target_ulong helper_msachiu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, (uint64_t)get_HILO(env) - + (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); +} + +target_ulong helper_mulhi(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2); +} + +target_ulong helper_mulhiu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, (uint64_t)(uint32_t)arg1 * + (uint64_t)(uint32_t)arg2); +} + +target_ulong helper_mulshi(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, 0 - (int64_t)(int32_t)arg1 * + (int64_t)(int32_t)arg2); +} + +target_ulong helper_mulshiu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, 0 - (uint64_t)(uint32_t)arg1 * + (uint64_t)(uint32_t)arg2); +} + +static inline target_ulong bitswap(target_ulong v) +{ + v = ((v >> 1) & (target_ulong)0x5555555555555555ULL) | + ((v & (target_ulong)0x5555555555555555ULL) << 1); + v = ((v >> 2) & (target_ulong)0x3333333333333333ULL) | + ((v & (target_ulong)0x3333333333333333ULL) << 2); + v = ((v >> 4) & (target_ulong)0x0F0F0F0F0F0F0F0FULL) | + ((v & (target_ulong)0x0F0F0F0F0F0F0F0FULL) << 4); + return v; +} + +#ifdef TARGET_MIPS64 +target_ulong helper_dbitswap(target_ulong rt) +{ + return bitswap(rt); +} +#endif + +target_ulong helper_bitswap(target_ulong rt) +{ + return (int32_t)bitswap(rt); +} + +target_ulong helper_rotx(target_ulong rs, uint32_t shift, uint32_t shiftx, + uint32_t stripe) +{ + int i; + uint64_t tmp0 = ((uint64_t)rs) << 32 | ((uint64_t)rs & 0xffffffff); + uint64_t tmp1 = tmp0; + for (i = 0; i <= 46; i++) { + int s; + if (i & 0x8) { + s = shift; + } else { + s = shiftx; + } + + if (stripe != 0 && !(i & 0x4)) { + s = ~s; + } + if (s & 0x10) { + if (tmp0 & (1LL << (i + 16))) { + tmp1 |= 1LL << i; + } else { + tmp1 &= ~(1LL << i); + } + } + } + + uint64_t tmp2 = tmp1; + for (i = 0; i <= 38; i++) { + int s; + if (i & 0x4) { + s = shift; + } else { + s = shiftx; + } + + if (s & 0x8) { + if (tmp1 & (1LL << (i + 8))) { + tmp2 |= 1LL << i; + } else { + tmp2 &= ~(1LL << i); + } + } + } + + uint64_t tmp3 = tmp2; + for (i = 0; i <= 34; i++) { + int s; + if (i & 0x2) { + s = shift; + } else { + s = shiftx; + } + if (s & 0x4) { + if (tmp2 & (1LL << (i + 4))) { + tmp3 |= 1LL << i; + } else { + tmp3 &= ~(1LL << i); + } + } + } + + uint64_t tmp4 = tmp3; + for (i = 0; i <= 32; i++) { + int s; + if (i & 0x1) { + s = shift; + } else { + s = shiftx; + } + if (s & 0x2) { + if (tmp3 & (1LL << (i + 2))) { + tmp4 |= 1LL << i; + } else { + tmp4 &= ~(1LL << i); + } + } + } + + uint64_t tmp5 = tmp4; + for (i = 0; i <= 31; i++) { + int s; + s = shift; + if (s & 0x1) { + if (tmp4 & (1LL << (i + 1))) { + tmp5 |= 1LL << i; + } else { + tmp5 &= ~(1LL << i); + } + } + } + + return (int64_t)(int32_t)(uint32_t)tmp5; +} + +void helper_fork(target_ulong arg1, target_ulong arg2) +{ + /* + * arg1 = rt, arg2 = rs + * TODO: store to TC register + */ +} + +target_ulong helper_yield(CPUMIPSState *env, target_ulong arg) +{ + target_long arg1 = arg; + + if (arg1 < 0) { + /* No scheduling policy implemented. */ + if (arg1 != -2) { + if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) && + env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) { + env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT); + env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT; + do_raise_exception(env, EXCP_THREAD, GETPC()); + } + } + } else if (arg1 == 0) { + if (0) { + /* TODO: TC underflow */ + env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT); + do_raise_exception(env, EXCP_THREAD, GETPC()); + } else { + /* TODO: Deallocate TC */ + } + } else if (arg1 > 0) { + /* Yield qualifier inputs not implemented. */ + env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT); + env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT; + do_raise_exception(env, EXCP_THREAD, GETPC()); + } + return env->CP0_YQMask; +} + +static inline void check_hwrena(CPUMIPSState *env, int reg, uintptr_t pc) +{ + if ((env->hflags & MIPS_HFLAG_CP0) || (env->CP0_HWREna & (1 << reg))) { + return; + } + do_raise_exception(env, EXCP_RI, pc); +} + +target_ulong helper_rdhwr_cpunum(CPUMIPSState *env) +{ + check_hwrena(env, 0, GETPC()); + return env->CP0_EBase & 0x3ff; +} + +target_ulong helper_rdhwr_synci_step(CPUMIPSState *env) +{ + check_hwrena(env, 1, GETPC()); + return env->SYNCI_Step; +} + +target_ulong helper_rdhwr_cc(CPUMIPSState *env) +{ + check_hwrena(env, 2, GETPC()); +#ifdef CONFIG_USER_ONLY + return env->CP0_Count; +#else + return (int32_t)cpu_mips_get_count(env); +#endif +} + +target_ulong helper_rdhwr_ccres(CPUMIPSState *env) +{ + check_hwrena(env, 3, GETPC()); + return env->CCRes; +} + +target_ulong helper_rdhwr_performance(CPUMIPSState *env) +{ + check_hwrena(env, 4, GETPC()); + return env->CP0_Performance0; +} + +target_ulong helper_rdhwr_xnp(CPUMIPSState *env) +{ + check_hwrena(env, 5, GETPC()); + return (env->CP0_Config5 >> CP0C5_XNP) & 1; +} + +void helper_pmon(CPUMIPSState *env, int function) +{ + function /= 2; + switch (function) { + case 2: /* TODO: char inbyte(int waitflag); */ + if (env->active_tc.gpr[4] == 0) { + env->active_tc.gpr[2] = -1; + } + /* Fall through */ + case 11: /* TODO: char inbyte (void); */ + env->active_tc.gpr[2] = -1; + break; + case 3: + case 12: + printf("%c", (char)(env->active_tc.gpr[4] & 0xFF)); + break; + case 17: + break; + case 158: + { + unsigned char *fmt = (void *)(uintptr_t)env->active_tc.gpr[4]; + printf("%s", fmt); + } + break; + } +} + +#if !defined(CONFIG_USER_ONLY) + +void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr) +{ + MIPSCPU *cpu = MIPS_CPU(cs); + CPUMIPSState *env = &cpu->env; + int error_code = 0; + int excp; + + if (!(env->hflags & MIPS_HFLAG_DM)) { + env->CP0_BadVAddr = addr; + } + + if (access_type == MMU_DATA_STORE) { + excp = EXCP_AdES; + } else { + excp = EXCP_AdEL; + if (access_type == MMU_INST_FETCH) { + error_code |= EXCP_INST_NOTAVAIL; + } + } + + do_raise_exception_err(env, excp, error_code, retaddr); +} + +void mips_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, + vaddr addr, unsigned size, + MMUAccessType access_type, + int mmu_idx, MemTxAttrs attrs, + MemTxResult response, uintptr_t retaddr) +{ + MIPSCPU *cpu = MIPS_CPU(cs); + CPUMIPSState *env = &cpu->env; + + if (access_type == MMU_INST_FETCH) { + do_raise_exception(env, EXCP_IBE, retaddr); + } else { + do_raise_exception(env, EXCP_DBE, retaddr); + } +} +#endif /* !CONFIG_USER_ONLY */ diff --git a/target/mips/rel6_translate.c b/target/mips/tcg/rel6_translate.c index 0354370927..0354370927 100644 --- a/target/mips/rel6_translate.c +++ b/target/mips/tcg/rel6_translate.c diff --git a/target/mips/cp0_helper.c b/target/mips/tcg/sysemu/cp0_helper.c index aae2af6ecc..aae2af6ecc 100644 --- a/target/mips/cp0_helper.c +++ b/target/mips/tcg/sysemu/cp0_helper.c diff --git a/target/mips/tcg/sysemu/meson.build b/target/mips/tcg/sysemu/meson.build new file mode 100644 index 0000000000..4da2c577b2 --- /dev/null +++ b/target/mips/tcg/sysemu/meson.build @@ -0,0 +1,6 @@ +mips_softmmu_ss.add(files( + 'cp0_helper.c', + 'mips-semi.c', + 'special_helper.c', + 'tlb_helper.c', +)) diff --git a/target/mips/mips-semi.c b/target/mips/tcg/sysemu/mips-semi.c index 6de60fa6dd..6de60fa6dd 100644 --- a/target/mips/mips-semi.c +++ b/target/mips/tcg/sysemu/mips-semi.c diff --git a/target/mips/tcg/sysemu/special_helper.c b/target/mips/tcg/sysemu/special_helper.c new file mode 100644 index 0000000000..2a2afb49e8 --- /dev/null +++ b/target/mips/tcg/sysemu/special_helper.c @@ -0,0 +1,173 @@ +/* + * QEMU MIPS emulation: Special opcode helpers + * + * Copyright (c) 2004-2005 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + * + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/helper-proto.h" +#include "exec/exec-all.h" +#include "internal.h" + +/* Specials */ +target_ulong helper_di(CPUMIPSState *env) +{ + target_ulong t0 = env->CP0_Status; + + env->CP0_Status = t0 & ~(1 << CP0St_IE); + return t0; +} + +target_ulong helper_ei(CPUMIPSState *env) +{ + target_ulong t0 = env->CP0_Status; + + env->CP0_Status = t0 | (1 << CP0St_IE); + return t0; +} + +static void debug_pre_eret(CPUMIPSState *env) +{ + if (qemu_loglevel_mask(CPU_LOG_EXEC)) { + qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx, + env->active_tc.PC, env->CP0_EPC); + if (env->CP0_Status & (1 << CP0St_ERL)) { + qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC); + } + if (env->hflags & MIPS_HFLAG_DM) { + qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC); + } + qemu_log("\n"); + } +} + +static void debug_post_eret(CPUMIPSState *env) +{ + if (qemu_loglevel_mask(CPU_LOG_EXEC)) { + qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx, + env->active_tc.PC, env->CP0_EPC); + if (env->CP0_Status & (1 << CP0St_ERL)) { + qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC); + } + if (env->hflags & MIPS_HFLAG_DM) { + qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC); + } + switch (cpu_mmu_index(env, false)) { + case 3: + qemu_log(", ERL\n"); + break; + case MIPS_HFLAG_UM: + qemu_log(", UM\n"); + break; + case MIPS_HFLAG_SM: + qemu_log(", SM\n"); + break; + case MIPS_HFLAG_KM: + qemu_log("\n"); + break; + default: + cpu_abort(env_cpu(env), "Invalid MMU mode!\n"); + break; + } + } +} + +bool mips_io_recompile_replay_branch(CPUState *cs, const TranslationBlock *tb) +{ + MIPSCPU *cpu = MIPS_CPU(cs); + CPUMIPSState *env = &cpu->env; + + if ((env->hflags & MIPS_HFLAG_BMASK) != 0 + && env->active_tc.PC != tb->pc) { + env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4); + env->hflags &= ~MIPS_HFLAG_BMASK; + return true; + } + return false; +} + +static inline void exception_return(CPUMIPSState *env) +{ + debug_pre_eret(env); + if (env->CP0_Status & (1 << CP0St_ERL)) { + mips_env_set_pc(env, env->CP0_ErrorEPC); + env->CP0_Status &= ~(1 << CP0St_ERL); + } else { + mips_env_set_pc(env, env->CP0_EPC); + env->CP0_Status &= ~(1 << CP0St_EXL); + } + compute_hflags(env); + debug_post_eret(env); +} + +void helper_eret(CPUMIPSState *env) +{ + exception_return(env); + env->CP0_LLAddr = 1; + env->lladdr = 1; +} + +void helper_eretnc(CPUMIPSState *env) +{ + exception_return(env); +} + +void helper_deret(CPUMIPSState *env) +{ + debug_pre_eret(env); + + env->hflags &= ~MIPS_HFLAG_DM; + compute_hflags(env); + + mips_env_set_pc(env, env->CP0_DEPC); + + debug_post_eret(env); +} + +void helper_cache(CPUMIPSState *env, target_ulong addr, uint32_t op) +{ + static const char *const type_name[] = { + "Primary Instruction", + "Primary Data or Unified Primary", + "Tertiary", + "Secondary" + }; + uint32_t cache_type = extract32(op, 0, 2); + uint32_t cache_operation = extract32(op, 2, 3); + target_ulong index = addr & 0x1fffffff; + + switch (cache_operation) { + case 0b010: /* Index Store Tag */ + memory_region_dispatch_write(env->itc_tag, index, env->CP0_TagLo, + MO_64, MEMTXATTRS_UNSPECIFIED); + break; + case 0b001: /* Index Load Tag */ + memory_region_dispatch_read(env->itc_tag, index, &env->CP0_TagLo, + MO_64, MEMTXATTRS_UNSPECIFIED); + break; + case 0b000: /* Index Invalidate */ + case 0b100: /* Hit Invalidate */ + case 0b110: /* Hit Writeback */ + /* no-op */ + break; + default: + qemu_log_mask(LOG_UNIMP, "cache operation:%u (type: %s cache)\n", + cache_operation, type_name[cache_type]); + break; + } +} diff --git a/target/mips/tlb_helper.c b/target/mips/tcg/sysemu/tlb_helper.c index 8d3ea49780..259f780d19 100644 --- a/target/mips/tlb_helper.c +++ b/target/mips/tcg/sysemu/tlb_helper.c @@ -24,22 +24,341 @@ #include "exec/cpu_ldst.h" #include "exec/log.h" #include "hw/mips/cpudevs.h" +#include "exec/helper-proto.h" -enum { - TLBRET_XI = -6, - TLBRET_RI = -5, - TLBRET_DIRTY = -4, - TLBRET_INVALID = -3, - TLBRET_NOMATCH = -2, - TLBRET_BADADDR = -1, - TLBRET_MATCH = 0 -}; +/* TLB management */ +static void r4k_mips_tlb_flush_extra(CPUMIPSState *env, int first) +{ + /* Discard entries from env->tlb[first] onwards. */ + while (env->tlb->tlb_in_use > first) { + r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0); + } +} + +static inline uint64_t get_tlb_pfn_from_entrylo(uint64_t entrylo) +{ +#if defined(TARGET_MIPS64) + return extract64(entrylo, 6, 54); +#else + return extract64(entrylo, 6, 24) | /* PFN */ + (extract64(entrylo, 32, 32) << 24); /* PFNX */ +#endif +} + +static void r4k_fill_tlb(CPUMIPSState *env, int idx) +{ + r4k_tlb_t *tlb; + uint64_t mask = env->CP0_PageMask >> (TARGET_PAGE_BITS + 1); + + /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */ + tlb = &env->tlb->mmu.r4k.tlb[idx]; + if (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) { + tlb->EHINV = 1; + return; + } + tlb->EHINV = 0; + tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1); +#if defined(TARGET_MIPS64) + tlb->VPN &= env->SEGMask; +#endif + tlb->ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; + tlb->MMID = env->CP0_MemoryMapID; + tlb->PageMask = env->CP0_PageMask; + tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1; + tlb->V0 = (env->CP0_EntryLo0 & 2) != 0; + tlb->D0 = (env->CP0_EntryLo0 & 4) != 0; + tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7; + tlb->XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) & 1; + tlb->RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) & 1; + tlb->PFN[0] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo0) & ~mask) << 12; + tlb->V1 = (env->CP0_EntryLo1 & 2) != 0; + tlb->D1 = (env->CP0_EntryLo1 & 4) != 0; + tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7; + tlb->XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) & 1; + tlb->RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) & 1; + tlb->PFN[1] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo1) & ~mask) << 12; +} + +static void r4k_helper_tlbinv(CPUMIPSState *env) +{ + bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1); + uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; + uint32_t MMID = env->CP0_MemoryMapID; + uint32_t tlb_mmid; + r4k_tlb_t *tlb; + int idx; + + MMID = mi ? MMID : (uint32_t) ASID; + for (idx = 0; idx < env->tlb->nb_tlb; idx++) { + tlb = &env->tlb->mmu.r4k.tlb[idx]; + tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; + if (!tlb->G && tlb_mmid == MMID) { + tlb->EHINV = 1; + } + } + cpu_mips_tlb_flush(env); +} + +static void r4k_helper_tlbinvf(CPUMIPSState *env) +{ + int idx; + + for (idx = 0; idx < env->tlb->nb_tlb; idx++) { + env->tlb->mmu.r4k.tlb[idx].EHINV = 1; + } + cpu_mips_tlb_flush(env); +} + +static void r4k_helper_tlbwi(CPUMIPSState *env) +{ + bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1); + target_ulong VPN; + uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; + uint32_t MMID = env->CP0_MemoryMapID; + uint32_t tlb_mmid; + bool EHINV, G, V0, D0, V1, D1, XI0, XI1, RI0, RI1; + r4k_tlb_t *tlb; + int idx; + + MMID = mi ? MMID : (uint32_t) ASID; + + idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb; + tlb = &env->tlb->mmu.r4k.tlb[idx]; + VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1); +#if defined(TARGET_MIPS64) + VPN &= env->SEGMask; +#endif + EHINV = (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) != 0; + G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1; + V0 = (env->CP0_EntryLo0 & 2) != 0; + D0 = (env->CP0_EntryLo0 & 4) != 0; + XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) &1; + RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) &1; + V1 = (env->CP0_EntryLo1 & 2) != 0; + D1 = (env->CP0_EntryLo1 & 4) != 0; + XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) &1; + RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) &1; + + tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; + /* + * Discard cached TLB entries, unless tlbwi is just upgrading access + * permissions on the current entry. + */ + if (tlb->VPN != VPN || tlb_mmid != MMID || tlb->G != G || + (!tlb->EHINV && EHINV) || + (tlb->V0 && !V0) || (tlb->D0 && !D0) || + (!tlb->XI0 && XI0) || (!tlb->RI0 && RI0) || + (tlb->V1 && !V1) || (tlb->D1 && !D1) || + (!tlb->XI1 && XI1) || (!tlb->RI1 && RI1)) { + r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb); + } + + r4k_invalidate_tlb(env, idx, 0); + r4k_fill_tlb(env, idx); +} + +static void r4k_helper_tlbwr(CPUMIPSState *env) +{ + int r = cpu_mips_get_random(env); + + r4k_invalidate_tlb(env, r, 1); + r4k_fill_tlb(env, r); +} + +static void r4k_helper_tlbp(CPUMIPSState *env) +{ + bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1); + r4k_tlb_t *tlb; + target_ulong mask; + target_ulong tag; + target_ulong VPN; + uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; + uint32_t MMID = env->CP0_MemoryMapID; + uint32_t tlb_mmid; + int i; + + MMID = mi ? MMID : (uint32_t) ASID; + for (i = 0; i < env->tlb->nb_tlb; i++) { + tlb = &env->tlb->mmu.r4k.tlb[i]; + /* 1k pages are not supported. */ + mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1); + tag = env->CP0_EntryHi & ~mask; + VPN = tlb->VPN & ~mask; +#if defined(TARGET_MIPS64) + tag &= env->SEGMask; +#endif + tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; + /* Check ASID/MMID, virtual page number & size */ + if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) { + /* TLB match */ + env->CP0_Index = i; + break; + } + } + if (i == env->tlb->nb_tlb) { + /* No match. Discard any shadow entries, if any of them match. */ + for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) { + tlb = &env->tlb->mmu.r4k.tlb[i]; + /* 1k pages are not supported. */ + mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1); + tag = env->CP0_EntryHi & ~mask; + VPN = tlb->VPN & ~mask; +#if defined(TARGET_MIPS64) + tag &= env->SEGMask; +#endif + tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; + /* Check ASID/MMID, virtual page number & size */ + if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag) { + r4k_mips_tlb_flush_extra(env, i); + break; + } + } + + env->CP0_Index |= 0x80000000; + } +} + +static inline uint64_t get_entrylo_pfn_from_tlb(uint64_t tlb_pfn) +{ +#if defined(TARGET_MIPS64) + return tlb_pfn << 6; +#else + return (extract64(tlb_pfn, 0, 24) << 6) | /* PFN */ + (extract64(tlb_pfn, 24, 32) << 32); /* PFNX */ +#endif +} + +static void r4k_helper_tlbr(CPUMIPSState *env) +{ + bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1); + uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; + uint32_t MMID = env->CP0_MemoryMapID; + uint32_t tlb_mmid; + r4k_tlb_t *tlb; + int idx; + + MMID = mi ? MMID : (uint32_t) ASID; + idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb; + tlb = &env->tlb->mmu.r4k.tlb[idx]; + + tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; + /* If this will change the current ASID/MMID, flush qemu's TLB. */ + if (MMID != tlb_mmid) { + cpu_mips_tlb_flush(env); + } + + r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb); + + if (tlb->EHINV) { + env->CP0_EntryHi = 1 << CP0EnHi_EHINV; + env->CP0_PageMask = 0; + env->CP0_EntryLo0 = 0; + env->CP0_EntryLo1 = 0; + } else { + env->CP0_EntryHi = mi ? tlb->VPN : tlb->VPN | tlb->ASID; + env->CP0_MemoryMapID = tlb->MMID; + env->CP0_PageMask = tlb->PageMask; + env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) | + ((uint64_t)tlb->RI0 << CP0EnLo_RI) | + ((uint64_t)tlb->XI0 << CP0EnLo_XI) | (tlb->C0 << 3) | + get_entrylo_pfn_from_tlb(tlb->PFN[0] >> 12); + env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) | + ((uint64_t)tlb->RI1 << CP0EnLo_RI) | + ((uint64_t)tlb->XI1 << CP0EnLo_XI) | (tlb->C1 << 3) | + get_entrylo_pfn_from_tlb(tlb->PFN[1] >> 12); + } +} -#if !defined(CONFIG_USER_ONLY) +void helper_tlbwi(CPUMIPSState *env) +{ + env->tlb->helper_tlbwi(env); +} + +void helper_tlbwr(CPUMIPSState *env) +{ + env->tlb->helper_tlbwr(env); +} + +void helper_tlbp(CPUMIPSState *env) +{ + env->tlb->helper_tlbp(env); +} + +void helper_tlbr(CPUMIPSState *env) +{ + env->tlb->helper_tlbr(env); +} + +void helper_tlbinv(CPUMIPSState *env) +{ + env->tlb->helper_tlbinv(env); +} + +void helper_tlbinvf(CPUMIPSState *env) +{ + env->tlb->helper_tlbinvf(env); +} + +static void global_invalidate_tlb(CPUMIPSState *env, + uint32_t invMsgVPN2, + uint8_t invMsgR, + uint32_t invMsgMMid, + bool invAll, + bool invVAMMid, + bool invMMid, + bool invVA) +{ + + int idx; + r4k_tlb_t *tlb; + bool VAMatch; + bool MMidMatch; + + for (idx = 0; idx < env->tlb->nb_tlb; idx++) { + tlb = &env->tlb->mmu.r4k.tlb[idx]; + VAMatch = + (((tlb->VPN & ~tlb->PageMask) == (invMsgVPN2 & ~tlb->PageMask)) +#ifdef TARGET_MIPS64 + && + (extract64(env->CP0_EntryHi, 62, 2) == invMsgR) +#endif + ); + MMidMatch = tlb->MMID == invMsgMMid; + if ((invAll && (idx > env->CP0_Wired)) || + (VAMatch && invVAMMid && (tlb->G || MMidMatch)) || + (VAMatch && invVA) || + (MMidMatch && !(tlb->G) && invMMid)) { + tlb->EHINV = 1; + } + } + cpu_mips_tlb_flush(env); +} + +void helper_ginvt(CPUMIPSState *env, target_ulong arg, uint32_t type) +{ + bool invAll = type == 0; + bool invVA = type == 1; + bool invMMid = type == 2; + bool invVAMMid = type == 3; + uint32_t invMsgVPN2 = arg & (TARGET_PAGE_MASK << 1); + uint8_t invMsgR = 0; + uint32_t invMsgMMid = env->CP0_MemoryMapID; + CPUState *other_cs = first_cpu; + +#ifdef TARGET_MIPS64 + invMsgR = extract64(arg, 62, 2); +#endif + + CPU_FOREACH(other_cs) { + MIPSCPU *other_cpu = MIPS_CPU(other_cs); + global_invalidate_tlb(&other_cpu->env, invMsgVPN2, invMsgR, invMsgMMid, + invAll, invVAMMid, invMMid, invVA); + } +} /* no MMU emulation */ -int no_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot, - target_ulong address, MMUAccessType access_type) +static int no_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot, + target_ulong address, MMUAccessType access_type) { *physical = address; *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; @@ -47,8 +366,9 @@ int no_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot, } /* fixed mapping MMU emulation */ -int fixed_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot, - target_ulong address, MMUAccessType access_type) +static int fixed_mmu_map_address(CPUMIPSState *env, hwaddr *physical, + int *prot, target_ulong address, + MMUAccessType access_type) { if (address <= (int32_t)0x7FFFFFFFUL) { if (!(env->CP0_Status & (1 << CP0St_ERL))) { @@ -67,8 +387,8 @@ int fixed_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot, } /* MIPS32/MIPS64 R4000-style MMU emulation */ -int r4k_map_address(CPUMIPSState *env, hwaddr *physical, int *prot, - target_ulong address, MMUAccessType access_type) +static int r4k_map_address(CPUMIPSState *env, hwaddr *physical, int *prot, + target_ulong address, MMUAccessType access_type) { uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; uint32_t MMID = env->CP0_MemoryMapID; @@ -166,236 +486,6 @@ void mmu_init(CPUMIPSState *env, const mips_def_t *def) } } -static int is_seg_am_mapped(unsigned int am, bool eu, int mmu_idx) -{ - /* - * Interpret access control mode and mmu_idx. - * AdE? TLB? - * AM K S U E K S U E - * UK 0 0 1 1 0 0 - - 0 - * MK 1 0 1 1 0 1 - - !eu - * MSK 2 0 0 1 0 1 1 - !eu - * MUSK 3 0 0 0 0 1 1 1 !eu - * MUSUK 4 0 0 0 0 0 1 1 0 - * USK 5 0 0 1 0 0 0 - 0 - * - 6 - - - - - - - - - * UUSK 7 0 0 0 0 0 0 0 0 - */ - int32_t adetlb_mask; - - switch (mmu_idx) { - case 3: /* ERL */ - /* If EU is set, always unmapped */ - if (eu) { - return 0; - } - /* fall through */ - case MIPS_HFLAG_KM: - /* Never AdE, TLB mapped if AM={1,2,3} */ - adetlb_mask = 0x70000000; - goto check_tlb; - - case MIPS_HFLAG_SM: - /* AdE if AM={0,1}, TLB mapped if AM={2,3,4} */ - adetlb_mask = 0xc0380000; - goto check_ade; - - case MIPS_HFLAG_UM: - /* AdE if AM={0,1,2,5}, TLB mapped if AM={3,4} */ - adetlb_mask = 0xe4180000; - /* fall through */ - check_ade: - /* does this AM cause AdE in current execution mode */ - if ((adetlb_mask << am) < 0) { - return TLBRET_BADADDR; - } - adetlb_mask <<= 8; - /* fall through */ - check_tlb: - /* is this AM mapped in current execution mode */ - return ((adetlb_mask << am) < 0); - default: - assert(0); - return TLBRET_BADADDR; - }; -} - -static int get_seg_physical_address(CPUMIPSState *env, hwaddr *physical, - int *prot, target_ulong real_address, - MMUAccessType access_type, int mmu_idx, - unsigned int am, bool eu, - target_ulong segmask, - hwaddr physical_base) -{ - int mapped = is_seg_am_mapped(am, eu, mmu_idx); - - if (mapped < 0) { - /* is_seg_am_mapped can report TLBRET_BADADDR */ - return mapped; - } else if (mapped) { - /* The segment is TLB mapped */ - return env->tlb->map_address(env, physical, prot, real_address, - access_type); - } else { - /* The segment is unmapped */ - *physical = physical_base | (real_address & segmask); - *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - return TLBRET_MATCH; - } -} - -static int get_segctl_physical_address(CPUMIPSState *env, hwaddr *physical, - int *prot, target_ulong real_address, - MMUAccessType access_type, int mmu_idx, - uint16_t segctl, target_ulong segmask) -{ - unsigned int am = (segctl & CP0SC_AM_MASK) >> CP0SC_AM; - bool eu = (segctl >> CP0SC_EU) & 1; - hwaddr pa = ((hwaddr)segctl & CP0SC_PA_MASK) << 20; - - return get_seg_physical_address(env, physical, prot, real_address, - access_type, mmu_idx, am, eu, segmask, - pa & ~(hwaddr)segmask); -} - -static int get_physical_address(CPUMIPSState *env, hwaddr *physical, - int *prot, target_ulong real_address, - MMUAccessType access_type, int mmu_idx) -{ - /* User mode can only access useg/xuseg */ -#if defined(TARGET_MIPS64) - int user_mode = mmu_idx == MIPS_HFLAG_UM; - int supervisor_mode = mmu_idx == MIPS_HFLAG_SM; - int kernel_mode = !user_mode && !supervisor_mode; - int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0; - int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0; - int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0; -#endif - int ret = TLBRET_MATCH; - /* effective address (modified for KVM T&E kernel segments) */ - target_ulong address = real_address; - -#define USEG_LIMIT ((target_ulong)(int32_t)0x7FFFFFFFUL) -#define KSEG0_BASE ((target_ulong)(int32_t)0x80000000UL) -#define KSEG1_BASE ((target_ulong)(int32_t)0xA0000000UL) -#define KSEG2_BASE ((target_ulong)(int32_t)0xC0000000UL) -#define KSEG3_BASE ((target_ulong)(int32_t)0xE0000000UL) - -#define KVM_KSEG0_BASE ((target_ulong)(int32_t)0x40000000UL) -#define KVM_KSEG2_BASE ((target_ulong)(int32_t)0x60000000UL) - - if (mips_um_ksegs_enabled()) { - /* KVM T&E adds guest kernel segments in useg */ - if (real_address >= KVM_KSEG0_BASE) { - if (real_address < KVM_KSEG2_BASE) { - /* kseg0 */ - address += KSEG0_BASE - KVM_KSEG0_BASE; - } else if (real_address <= USEG_LIMIT) { - /* kseg2/3 */ - address += KSEG2_BASE - KVM_KSEG2_BASE; - } - } - } - - if (address <= USEG_LIMIT) { - /* useg */ - uint16_t segctl; - - if (address >= 0x40000000UL) { - segctl = env->CP0_SegCtl2; - } else { - segctl = env->CP0_SegCtl2 >> 16; - } - ret = get_segctl_physical_address(env, physical, prot, - real_address, access_type, - mmu_idx, segctl, 0x3FFFFFFF); -#if defined(TARGET_MIPS64) - } else if (address < 0x4000000000000000ULL) { - /* xuseg */ - if (UX && address <= (0x3FFFFFFFFFFFFFFFULL & env->SEGMask)) { - ret = env->tlb->map_address(env, physical, prot, - real_address, access_type); - } else { - ret = TLBRET_BADADDR; - } - } else if (address < 0x8000000000000000ULL) { - /* xsseg */ - if ((supervisor_mode || kernel_mode) && - SX && address <= (0x7FFFFFFFFFFFFFFFULL & env->SEGMask)) { - ret = env->tlb->map_address(env, physical, prot, - real_address, access_type); - } else { - ret = TLBRET_BADADDR; - } - } else if (address < 0xC000000000000000ULL) { - /* xkphys */ - if ((address & 0x07FFFFFFFFFFFFFFULL) <= env->PAMask) { - /* KX/SX/UX bit to check for each xkphys EVA access mode */ - static const uint8_t am_ksux[8] = { - [CP0SC_AM_UK] = (1u << CP0St_KX), - [CP0SC_AM_MK] = (1u << CP0St_KX), - [CP0SC_AM_MSK] = (1u << CP0St_SX), - [CP0SC_AM_MUSK] = (1u << CP0St_UX), - [CP0SC_AM_MUSUK] = (1u << CP0St_UX), - [CP0SC_AM_USK] = (1u << CP0St_SX), - [6] = (1u << CP0St_KX), - [CP0SC_AM_UUSK] = (1u << CP0St_UX), - }; - unsigned int am = CP0SC_AM_UK; - unsigned int xr = (env->CP0_SegCtl2 & CP0SC2_XR_MASK) >> CP0SC2_XR; - - if (xr & (1 << ((address >> 59) & 0x7))) { - am = (env->CP0_SegCtl1 & CP0SC1_XAM_MASK) >> CP0SC1_XAM; - } - /* Does CP0_Status.KX/SX/UX permit the access mode (am) */ - if (env->CP0_Status & am_ksux[am]) { - ret = get_seg_physical_address(env, physical, prot, - real_address, access_type, - mmu_idx, am, false, env->PAMask, - 0); - } else { - ret = TLBRET_BADADDR; - } - } else { - ret = TLBRET_BADADDR; - } - } else if (address < 0xFFFFFFFF80000000ULL) { - /* xkseg */ - if (kernel_mode && KX && - address <= (0xFFFFFFFF7FFFFFFFULL & env->SEGMask)) { - ret = env->tlb->map_address(env, physical, prot, - real_address, access_type); - } else { - ret = TLBRET_BADADDR; - } -#endif - } else if (address < KSEG1_BASE) { - /* kseg0 */ - ret = get_segctl_physical_address(env, physical, prot, real_address, - access_type, mmu_idx, - env->CP0_SegCtl1 >> 16, 0x1FFFFFFF); - } else if (address < KSEG2_BASE) { - /* kseg1 */ - ret = get_segctl_physical_address(env, physical, prot, real_address, - access_type, mmu_idx, - env->CP0_SegCtl1, 0x1FFFFFFF); - } else if (address < KSEG3_BASE) { - /* sseg (kseg2) */ - ret = get_segctl_physical_address(env, physical, prot, real_address, - access_type, mmu_idx, - env->CP0_SegCtl0 >> 16, 0x1FFFFFFF); - } else { - /* - * kseg3 - * XXX: debug segment is not emulated - */ - ret = get_segctl_physical_address(env, physical, prot, real_address, - access_type, mmu_idx, - env->CP0_SegCtl0, 0x1FFFFFFF); - } - return ret; -} - void cpu_mips_tlb_flush(CPUMIPSState *env) { /* Flush qemu's TLB and discard all shadowed entries. */ @@ -403,8 +493,6 @@ void cpu_mips_tlb_flush(CPUMIPSState *env) env->tlb->tlb_in_use = env->tlb->nb_tlb; } -#endif /* !CONFIG_USER_ONLY */ - static void raise_mmu_exception(CPUMIPSState *env, target_ulong address, MMUAccessType access_type, int tlb_error) { @@ -484,22 +572,6 @@ static void raise_mmu_exception(CPUMIPSState *env, target_ulong address, env->error_code = error_code; } -#if !defined(CONFIG_USER_ONLY) - -hwaddr mips_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) -{ - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; - hwaddr phys_addr; - int prot; - - if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD, - cpu_mmu_index(env, false)) != 0) { - return -1; - } - return phys_addr; -} - #if !defined(TARGET_MIPS64) /* @@ -833,7 +905,6 @@ refill: return true; } #endif -#endif /* !CONFIG_USER_ONLY */ bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, @@ -841,14 +912,11 @@ bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size, { MIPSCPU *cpu = MIPS_CPU(cs); CPUMIPSState *env = &cpu->env; -#if !defined(CONFIG_USER_ONLY) hwaddr physical; int prot; -#endif int ret = TLBRET_BADADDR; /* data access */ -#if !defined(CONFIG_USER_ONLY) /* XXX: put correct access by using cpu_restore_state() correctly */ ret = get_physical_address(env, &physical, &prot, address, access_type, mmu_idx); @@ -896,29 +964,28 @@ bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size, if (probe) { return false; } -#endif raise_mmu_exception(env, address, access_type, ret); do_raise_exception_err(env, cs->exception_index, env->error_code, retaddr); } -#ifndef CONFIG_USER_ONLY hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address, - MMUAccessType access_type) + MMUAccessType access_type, uintptr_t retaddr) { hwaddr physical; int prot; int ret = 0; + CPUState *cs = env_cpu(env); /* data access */ ret = get_physical_address(env, &physical, &prot, address, access_type, cpu_mmu_index(env, false)); - if (ret != TLBRET_MATCH) { - raise_mmu_exception(env, address, access_type, ret); - return -1LL; - } else { + if (ret == TLBRET_MATCH) { return physical; } + + raise_mmu_exception(env, address, access_type, ret); + cpu_loop_exit_restore(cs, retaddr); } static void set_hflags_for_handler(CPUMIPSState *env) @@ -964,11 +1031,8 @@ static inline void set_badinstr_registers(CPUMIPSState *env) } } -#endif /* !CONFIG_USER_ONLY */ - void mips_cpu_do_interrupt(CPUState *cs) { -#if !defined(CONFIG_USER_ONLY) MIPSCPU *cpu = MIPS_CPU(cs); CPUMIPSState *env = &cpu->env; bool update_badinstr = 0; @@ -1271,11 +1335,9 @@ void mips_cpu_do_interrupt(CPUState *cs) env->CP0_Status, env->CP0_Cause, env->CP0_BadVAddr, env->CP0_DEPC); } -#endif cs->exception_index = EXCP_NONE; } -#if !defined(CONFIG_USER_ONLY) void r4k_invalidate_tlb(CPUMIPSState *env, int idx, int use_extra) { CPUState *cs = env_cpu(env); @@ -1340,4 +1402,3 @@ void r4k_invalidate_tlb(CPUMIPSState *env, int idx, int use_extra) } } } -#endif /* !CONFIG_USER_ONLY */ diff --git a/target/mips/tcg/sysemu_helper.h.inc b/target/mips/tcg/sysemu_helper.h.inc new file mode 100644 index 0000000000..4353a966f9 --- /dev/null +++ b/target/mips/tcg/sysemu_helper.h.inc @@ -0,0 +1,185 @@ +/* + * QEMU MIPS sysemu helpers + * + * Copyright (c) 2004-2005 Jocelyn Mayer + * Copyright (c) 2006 Marius Groeger (FPU operations) + * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support) + * Copyright (c) 2009 CodeSourcery (MIPS16 and microMIPS support) + * + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +DEF_HELPER_1(do_semihosting, void, env) + +/* CP0 helpers */ +DEF_HELPER_1(mfc0_mvpcontrol, tl, env) +DEF_HELPER_1(mfc0_mvpconf0, tl, env) +DEF_HELPER_1(mfc0_mvpconf1, tl, env) +DEF_HELPER_1(mftc0_vpecontrol, tl, env) +DEF_HELPER_1(mftc0_vpeconf0, tl, env) +DEF_HELPER_1(mfc0_random, tl, env) +DEF_HELPER_1(mfc0_tcstatus, tl, env) +DEF_HELPER_1(mftc0_tcstatus, tl, env) +DEF_HELPER_1(mfc0_tcbind, tl, env) +DEF_HELPER_1(mftc0_tcbind, tl, env) +DEF_HELPER_1(mfc0_tcrestart, tl, env) +DEF_HELPER_1(mftc0_tcrestart, tl, env) +DEF_HELPER_1(mfc0_tchalt, tl, env) +DEF_HELPER_1(mftc0_tchalt, tl, env) +DEF_HELPER_1(mfc0_tccontext, tl, env) +DEF_HELPER_1(mftc0_tccontext, tl, env) +DEF_HELPER_1(mfc0_tcschedule, tl, env) +DEF_HELPER_1(mftc0_tcschedule, tl, env) +DEF_HELPER_1(mfc0_tcschefback, tl, env) +DEF_HELPER_1(mftc0_tcschefback, tl, env) +DEF_HELPER_1(mfc0_count, tl, env) +DEF_HELPER_1(mfc0_saar, tl, env) +DEF_HELPER_1(mfhc0_saar, tl, env) +DEF_HELPER_1(mftc0_entryhi, tl, env) +DEF_HELPER_1(mftc0_status, tl, env) +DEF_HELPER_1(mftc0_cause, tl, env) +DEF_HELPER_1(mftc0_epc, tl, env) +DEF_HELPER_1(mftc0_ebase, tl, env) +DEF_HELPER_2(mftc0_configx, tl, env, tl) +DEF_HELPER_1(mfc0_lladdr, tl, env) +DEF_HELPER_1(mfc0_maar, tl, env) +DEF_HELPER_1(mfhc0_maar, tl, env) +DEF_HELPER_2(mfc0_watchlo, tl, env, i32) +DEF_HELPER_2(mfc0_watchhi, tl, env, i32) +DEF_HELPER_2(mfhc0_watchhi, tl, env, i32) +DEF_HELPER_1(mfc0_debug, tl, env) +DEF_HELPER_1(mftc0_debug, tl, env) +#ifdef TARGET_MIPS64 +DEF_HELPER_1(dmfc0_tcrestart, tl, env) +DEF_HELPER_1(dmfc0_tchalt, tl, env) +DEF_HELPER_1(dmfc0_tccontext, tl, env) +DEF_HELPER_1(dmfc0_tcschedule, tl, env) +DEF_HELPER_1(dmfc0_tcschefback, tl, env) +DEF_HELPER_1(dmfc0_lladdr, tl, env) +DEF_HELPER_1(dmfc0_maar, tl, env) +DEF_HELPER_2(dmfc0_watchlo, tl, env, i32) +DEF_HELPER_2(dmfc0_watchhi, tl, env, i32) +DEF_HELPER_1(dmfc0_saar, tl, env) +#endif /* TARGET_MIPS64 */ + +DEF_HELPER_2(mtc0_index, void, env, tl) +DEF_HELPER_2(mtc0_mvpcontrol, void, env, tl) +DEF_HELPER_2(mtc0_vpecontrol, void, env, tl) +DEF_HELPER_2(mttc0_vpecontrol, void, env, tl) +DEF_HELPER_2(mtc0_vpeconf0, void, env, tl) +DEF_HELPER_2(mttc0_vpeconf0, void, env, tl) +DEF_HELPER_2(mtc0_vpeconf1, void, env, tl) +DEF_HELPER_2(mtc0_yqmask, void, env, tl) +DEF_HELPER_2(mtc0_vpeopt, void, env, tl) +DEF_HELPER_2(mtc0_entrylo0, void, env, tl) +DEF_HELPER_2(mtc0_tcstatus, void, env, tl) +DEF_HELPER_2(mttc0_tcstatus, void, env, tl) +DEF_HELPER_2(mtc0_tcbind, void, env, tl) +DEF_HELPER_2(mttc0_tcbind, void, env, tl) +DEF_HELPER_2(mtc0_tcrestart, void, env, tl) +DEF_HELPER_2(mttc0_tcrestart, void, env, tl) +DEF_HELPER_2(mtc0_tchalt, void, env, tl) +DEF_HELPER_2(mttc0_tchalt, void, env, tl) +DEF_HELPER_2(mtc0_tccontext, void, env, tl) +DEF_HELPER_2(mttc0_tccontext, void, env, tl) +DEF_HELPER_2(mtc0_tcschedule, void, env, tl) +DEF_HELPER_2(mttc0_tcschedule, void, env, tl) +DEF_HELPER_2(mtc0_tcschefback, void, env, tl) +DEF_HELPER_2(mttc0_tcschefback, void, env, tl) +DEF_HELPER_2(mtc0_entrylo1, void, env, tl) +DEF_HELPER_2(mtc0_context, void, env, tl) +DEF_HELPER_2(mtc0_memorymapid, void, env, tl) +DEF_HELPER_2(mtc0_pagemask, void, env, tl) +DEF_HELPER_2(mtc0_pagegrain, void, env, tl) +DEF_HELPER_2(mtc0_segctl0, void, env, tl) +DEF_HELPER_2(mtc0_segctl1, void, env, tl) +DEF_HELPER_2(mtc0_segctl2, void, env, tl) +DEF_HELPER_2(mtc0_pwfield, void, env, tl) +DEF_HELPER_2(mtc0_pwsize, void, env, tl) +DEF_HELPER_2(mtc0_wired, void, env, tl) +DEF_HELPER_2(mtc0_srsconf0, void, env, tl) +DEF_HELPER_2(mtc0_srsconf1, void, env, tl) +DEF_HELPER_2(mtc0_srsconf2, void, env, tl) +DEF_HELPER_2(mtc0_srsconf3, void, env, tl) +DEF_HELPER_2(mtc0_srsconf4, void, env, tl) +DEF_HELPER_2(mtc0_hwrena, void, env, tl) +DEF_HELPER_2(mtc0_pwctl, void, env, tl) +DEF_HELPER_2(mtc0_count, void, env, tl) +DEF_HELPER_2(mtc0_saari, void, env, tl) +DEF_HELPER_2(mtc0_saar, void, env, tl) +DEF_HELPER_2(mthc0_saar, void, env, tl) +DEF_HELPER_2(mtc0_entryhi, void, env, tl) +DEF_HELPER_2(mttc0_entryhi, void, env, tl) +DEF_HELPER_2(mtc0_compare, void, env, tl) +DEF_HELPER_2(mtc0_status, void, env, tl) +DEF_HELPER_2(mttc0_status, void, env, tl) +DEF_HELPER_2(mtc0_intctl, void, env, tl) +DEF_HELPER_2(mtc0_srsctl, void, env, tl) +DEF_HELPER_2(mtc0_cause, void, env, tl) +DEF_HELPER_2(mttc0_cause, void, env, tl) +DEF_HELPER_2(mtc0_ebase, void, env, tl) +DEF_HELPER_2(mttc0_ebase, void, env, tl) +DEF_HELPER_2(mtc0_config0, void, env, tl) +DEF_HELPER_2(mtc0_config2, void, env, tl) +DEF_HELPER_2(mtc0_config3, void, env, tl) +DEF_HELPER_2(mtc0_config4, void, env, tl) +DEF_HELPER_2(mtc0_config5, void, env, tl) +DEF_HELPER_2(mtc0_lladdr, void, env, tl) +DEF_HELPER_2(mtc0_maar, void, env, tl) +DEF_HELPER_2(mthc0_maar, void, env, tl) +DEF_HELPER_2(mtc0_maari, void, env, tl) +DEF_HELPER_3(mtc0_watchlo, void, env, tl, i32) +DEF_HELPER_3(mtc0_watchhi, void, env, tl, i32) +DEF_HELPER_3(mthc0_watchhi, void, env, tl, i32) +DEF_HELPER_2(mtc0_xcontext, void, env, tl) +DEF_HELPER_2(mtc0_framemask, void, env, tl) +DEF_HELPER_2(mtc0_debug, void, env, tl) +DEF_HELPER_2(mttc0_debug, void, env, tl) +DEF_HELPER_2(mtc0_performance0, void, env, tl) +DEF_HELPER_2(mtc0_errctl, void, env, tl) +DEF_HELPER_2(mtc0_taglo, void, env, tl) +DEF_HELPER_2(mtc0_datalo, void, env, tl) +DEF_HELPER_2(mtc0_taghi, void, env, tl) +DEF_HELPER_2(mtc0_datahi, void, env, tl) + +#if defined(TARGET_MIPS64) +DEF_HELPER_2(dmtc0_entrylo0, void, env, i64) +DEF_HELPER_2(dmtc0_entrylo1, void, env, i64) +#endif + +/* MIPS MT functions */ +DEF_HELPER_2(mftgpr, tl, env, i32) +DEF_HELPER_2(mftlo, tl, env, i32) +DEF_HELPER_2(mfthi, tl, env, i32) +DEF_HELPER_2(mftacx, tl, env, i32) +DEF_HELPER_1(mftdsp, tl, env) +DEF_HELPER_3(mttgpr, void, env, tl, i32) +DEF_HELPER_3(mttlo, void, env, tl, i32) +DEF_HELPER_3(mtthi, void, env, tl, i32) +DEF_HELPER_3(mttacx, void, env, tl, i32) +DEF_HELPER_2(mttdsp, void, env, tl) +DEF_HELPER_0(dmt, tl) +DEF_HELPER_0(emt, tl) +DEF_HELPER_1(dvpe, tl, env) +DEF_HELPER_1(evpe, tl, env) + +/* R6 Multi-threading */ +DEF_HELPER_1(dvp, tl, env) +DEF_HELPER_1(evp, tl, env) + +/* TLB */ +DEF_HELPER_1(tlbwi, void, env) +DEF_HELPER_1(tlbwr, void, env) +DEF_HELPER_1(tlbp, void, env) +DEF_HELPER_1(tlbr, void, env) +DEF_HELPER_1(tlbinv, void, env) +DEF_HELPER_1(tlbinvf, void, env) +DEF_HELPER_3(ginvt, void, env, tl, i32) + +/* Special */ +DEF_HELPER_1(di, tl, env) +DEF_HELPER_1(ei, tl, env) +DEF_HELPER_1(eret, void, env) +DEF_HELPER_1(eretnc, void, env) +DEF_HELPER_1(deret, void, env) +DEF_HELPER_3(cache, void, env, tl, i32) diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h new file mode 100644 index 0000000000..81b14eb219 --- /dev/null +++ b/target/mips/tcg/tcg-internal.h @@ -0,0 +1,64 @@ +/* + * MIPS internal definitions and helpers (TCG accelerator) + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef MIPS_TCG_INTERNAL_H +#define MIPS_TCG_INTERNAL_H + +#include "tcg/tcg.h" +#include "exec/memattrs.h" +#include "hw/core/cpu.h" +#include "cpu.h" + +void mips_tcg_init(void); + +void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb); +void mips_cpu_do_interrupt(CPUState *cpu); +bool mips_cpu_exec_interrupt(CPUState *cpu, int int_req); +bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); +void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr); + +const char *mips_exception_name(int32_t exception); + +void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env, uint32_t exception, + int error_code, uintptr_t pc); + +static inline void QEMU_NORETURN do_raise_exception(CPUMIPSState *env, + uint32_t exception, + uintptr_t pc) +{ + do_raise_exception_err(env, exception, 0, pc); +} + +#if !defined(CONFIG_USER_ONLY) + +void mmu_init(CPUMIPSState *env, const mips_def_t *def); + +void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask); + +void r4k_invalidate_tlb(CPUMIPSState *env, int idx, int use_extra); +uint32_t cpu_mips_get_random(CPUMIPSState *env); + +bool mips_io_recompile_replay_branch(CPUState *cs, const TranslationBlock *tb); + +hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address, + MMUAccessType access_type, uintptr_t retaddr); +void mips_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, + vaddr addr, unsigned size, + MMUAccessType access_type, + int mmu_idx, MemTxAttrs attrs, + MemTxResult response, uintptr_t retaddr); +void cpu_mips_tlb_flush(CPUMIPSState *env); + +#endif /* !CONFIG_USER_ONLY */ + +#endif diff --git a/target/mips/translate.c b/target/mips/tcg/translate.c index 71fa5ec197..c03a8ae1fe 100644 --- a/target/mips/translate.c +++ b/target/mips/tcg/translate.c @@ -39,6 +39,19 @@ #include "fpu_helper.h" #include "translate.h" +/* + * Many sysemu-only helpers are not reachable for user-only. + * Define stub generators here, so that we need not either sprinkle + * ifdefs through the translator, nor provide the helper function. + */ +#define STUB_HELPER(NAME, ...) \ + static inline void gen_helper_##NAME(__VA_ARGS__) \ + { g_assert_not_reached(); } + +#ifdef CONFIG_USER_ONLY +STUB_HELPER(cache, TCGv_env env, TCGv val, TCGv_i32 reg) +#endif + enum { /* indirect opcode tables */ OPC_SPECIAL = (0x00 << 26), @@ -1267,13 +1280,6 @@ TCGv_i64 fpu_f64[32]; #define DISAS_STOP DISAS_TARGET_0 #define DISAS_EXIT DISAS_TARGET_1 -static const char * const regnames[] = { - "r0", "at", "v0", "v1", "a0", "a1", "a2", "a3", - "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", - "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", - "t8", "t9", "k0", "k1", "gp", "sp", "s8", "ra", -}; - static const char * const regnames_HI[] = { "HI0", "HI1", "HI2", "HI3", }; @@ -1282,13 +1288,6 @@ static const char * const regnames_LO[] = { "LO0", "LO1", "LO2", "LO3", }; -static const char * const fregnames[] = { - "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", - "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", - "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", - "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", -}; - /* General purpose registers moves. */ void gen_load_gpr(TCGv t, int reg) { @@ -1572,11 +1571,13 @@ void gen_move_high32(TCGv ret, TCGv_i64 arg) #endif } -void check_cp0_enabled(DisasContext *ctx) +bool check_cp0_enabled(DisasContext *ctx) { if (unlikely(!(ctx->hflags & MIPS_HFLAG_CP0))) { generate_exception_end(ctx, EXCP_CpU); + return false; } + return true; } void check_cp1_enabled(DisasContext *ctx) @@ -5945,6 +5946,7 @@ static void gen_mthc0(DisasContext *ctx, TCGv arg, int reg, int sel) goto cp0_unimplemented; } trace_mips_translate_c0("mthc0", register_name, reg, sel); + return; cp0_unimplemented: qemu_log_mask(LOG_UNIMP, "mthc0 %s (reg %d sel %d)\n", @@ -18969,9 +18971,11 @@ static void gen_pool32axf_nanomips_insn(CPUMIPSState *env, DisasContext *ctx) } break; case NM_RDPGPR: + check_cp0_enabled(ctx); gen_load_srsgpr(rs, rt); break; case NM_WRPGPR: + check_cp0_enabled(ctx); gen_store_srsgpr(rs, rt); break; case NM_WAIT: @@ -20957,6 +20961,8 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) gen_ld(ctx, OPC_LHUE, rt, rs, s); break; case NM_CACHEE: + check_eva(ctx); + check_cp0_enabled(ctx); check_nms_dl_il_sl_tl_l2c(ctx); gen_cache_operation(ctx, rt, rs, s); break; @@ -24530,11 +24536,11 @@ static void decode_opc_special3(CPUMIPSState *env, DisasContext *ctx) gen_st_cond(ctx, rt, rs, imm, MO_TESL, true); return; case OPC_CACHEE: + check_eva(ctx); check_cp0_enabled(ctx); if (ctx->hflags & MIPS_HFLAG_ITC_CACHE) { gen_cache_operation(ctx, rt, rs, imm); } - /* Treat as NOP. */ return; case OPC_PREFE: check_cp0_enabled(ctx); @@ -25593,83 +25599,6 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) translator_loop(&mips_tr_ops, &ctx.base, cs, tb, max_insns); } -static void fpu_dump_state(CPUMIPSState *env, FILE * f, int flags) -{ - int i; - int is_fpu64 = !!(env->hflags & MIPS_HFLAG_F64); - -#define printfpr(fp) \ - do { \ - if (is_fpu64) \ - qemu_fprintf(f, "w:%08x d:%016" PRIx64 \ - " fd:%13g fs:%13g psu: %13g\n", \ - (fp)->w[FP_ENDIAN_IDX], (fp)->d, \ - (double)(fp)->fd, \ - (double)(fp)->fs[FP_ENDIAN_IDX], \ - (double)(fp)->fs[!FP_ENDIAN_IDX]); \ - else { \ - fpr_t tmp; \ - tmp.w[FP_ENDIAN_IDX] = (fp)->w[FP_ENDIAN_IDX]; \ - tmp.w[!FP_ENDIAN_IDX] = ((fp) + 1)->w[FP_ENDIAN_IDX]; \ - qemu_fprintf(f, "w:%08x d:%016" PRIx64 \ - " fd:%13g fs:%13g psu:%13g\n", \ - tmp.w[FP_ENDIAN_IDX], tmp.d, \ - (double)tmp.fd, \ - (double)tmp.fs[FP_ENDIAN_IDX], \ - (double)tmp.fs[!FP_ENDIAN_IDX]); \ - } \ - } while (0) - - - qemu_fprintf(f, - "CP1 FCR0 0x%08x FCR31 0x%08x SR.FR %d fp_status 0x%02x\n", - env->active_fpu.fcr0, env->active_fpu.fcr31, is_fpu64, - get_float_exception_flags(&env->active_fpu.fp_status)); - for (i = 0; i < 32; (is_fpu64) ? i++ : (i += 2)) { - qemu_fprintf(f, "%3s: ", fregnames[i]); - printfpr(&env->active_fpu.fpr[i]); - } - -#undef printfpr -} - -void mips_cpu_dump_state(CPUState *cs, FILE *f, int flags) -{ - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; - int i; - - qemu_fprintf(f, "pc=0x" TARGET_FMT_lx " HI=0x" TARGET_FMT_lx - " LO=0x" TARGET_FMT_lx " ds %04x " - TARGET_FMT_lx " " TARGET_FMT_ld "\n", - env->active_tc.PC, env->active_tc.HI[0], env->active_tc.LO[0], - env->hflags, env->btarget, env->bcond); - for (i = 0; i < 32; i++) { - if ((i & 3) == 0) { - qemu_fprintf(f, "GPR%02d:", i); - } - qemu_fprintf(f, " %s " TARGET_FMT_lx, - regnames[i], env->active_tc.gpr[i]); - if ((i & 3) == 3) { - qemu_fprintf(f, "\n"); - } - } - - qemu_fprintf(f, "CP0 Status 0x%08x Cause 0x%08x EPC 0x" - TARGET_FMT_lx "\n", - env->CP0_Status, env->CP0_Cause, env->CP0_EPC); - qemu_fprintf(f, " Config0 0x%08x Config1 0x%08x LLAddr 0x%016" - PRIx64 "\n", - env->CP0_Config0, env->CP0_Config1, env->CP0_LLAddr); - qemu_fprintf(f, " Config2 0x%08x Config3 0x%08x\n", - env->CP0_Config2, env->CP0_Config3); - qemu_fprintf(f, " Config4 0x%08x Config5 0x%08x\n", - env->CP0_Config4, env->CP0_Config5); - if ((flags & CPU_DUMP_FPU) && (env->hflags & MIPS_HFLAG_FPU)) { - fpu_dump_state(env, f, flags); - } -} - void mips_tcg_init(void) { int i; diff --git a/target/mips/translate_addr_const.c b/target/mips/tcg/translate_addr_const.c index 96f483418e..96f483418e 100644 --- a/target/mips/translate_addr_const.c +++ b/target/mips/tcg/translate_addr_const.c diff --git a/target/mips/tx79.decode b/target/mips/tcg/tx79.decode index 0f748b53a6..0f748b53a6 100644 --- a/target/mips/tx79.decode +++ b/target/mips/tcg/tx79.decode diff --git a/target/mips/tx79_translate.c b/target/mips/tcg/tx79_translate.c index ad83774b97..ad83774b97 100644 --- a/target/mips/tx79_translate.c +++ b/target/mips/tcg/tx79_translate.c diff --git a/target/mips/txx9_translate.c b/target/mips/tcg/txx9_translate.c index 8a2c0b766b..8a2c0b766b 100644 --- a/target/mips/txx9_translate.c +++ b/target/mips/tcg/txx9_translate.c diff --git a/target/mips/tcg/user/meson.build b/target/mips/tcg/user/meson.build new file mode 100644 index 0000000000..79badcd321 --- /dev/null +++ b/target/mips/tcg/user/meson.build @@ -0,0 +1,3 @@ +mips_user_ss.add(files( + 'tlb_helper.c', +)) diff --git a/target/mips/tcg/user/tlb_helper.c b/target/mips/tcg/user/tlb_helper.c new file mode 100644 index 0000000000..b835144b82 --- /dev/null +++ b/target/mips/tcg/user/tlb_helper.c @@ -0,0 +1,64 @@ +/* + * MIPS TLB (Translation lookaside buffer) helpers. + * + * Copyright (c) 2004-2005 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#include "qemu/osdep.h" + +#include "cpu.h" +#include "exec/exec-all.h" +#include "internal.h" + +static void raise_mmu_exception(CPUMIPSState *env, target_ulong address, + MMUAccessType access_type) +{ + CPUState *cs = env_cpu(env); + + env->error_code = 0; + if (access_type == MMU_INST_FETCH) { + env->error_code |= EXCP_INST_NOTAVAIL; + } + + /* Reference to kernel address from user mode or supervisor mode */ + /* Reference to supervisor address from user mode */ + if (access_type == MMU_DATA_STORE) { + cs->exception_index = EXCP_AdES; + } else { + cs->exception_index = EXCP_AdEL; + } + + /* Raise exception */ + if (!(env->hflags & MIPS_HFLAG_DM)) { + env->CP0_BadVAddr = address; + } +} + +bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr) +{ + MIPSCPU *cpu = MIPS_CPU(cs); + CPUMIPSState *env = &cpu->env; + + /* data access */ + raise_mmu_exception(env, address, access_type); + do_raise_exception_err(env, cs->exception_index, env->error_code, retaddr); +} + +void mips_cpu_do_interrupt(CPUState *cs) +{ + cs->exception_index = EXCP_NONE; +} diff --git a/target/mips/translate.h b/target/mips/translate.h index 2b3c7a69ec..6144259034 100644 --- a/target/mips/translate.h +++ b/target/mips/translate.h @@ -120,7 +120,12 @@ void gen_reserved_instruction(DisasContext *ctx); void check_insn(DisasContext *ctx, uint64_t flags); void check_mips_64(DisasContext *ctx); -void check_cp0_enabled(DisasContext *ctx); +/** + * check_cp0_enabled: + * Return %true if CP0 is enabled, otherwise return %false + * and emit a 'coprocessor unusable' exception. + */ +bool check_cp0_enabled(DisasContext *ctx); void check_cp1_enabled(DisasContext *ctx); void check_cp1_64bitmode(DisasContext *ctx); void check_cp1_registers(DisasContext *ctx, int regs); diff --git a/target/moxie/cpu-param.h b/target/moxie/cpu-param.h deleted file mode 100644 index 9a40ef525c..0000000000 --- a/target/moxie/cpu-param.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Moxie cpu parameters for qemu. - * - * Copyright (c) 2008, 2010, 2013 Anthony Green - * SPDX-License-Identifier: LGPL-2.1+ - */ - -#ifndef MOXIE_CPU_PARAM_H -#define MOXIE_CPU_PARAM_H 1 - -#define TARGET_LONG_BITS 32 -#define TARGET_PAGE_BITS 12 /* 4k */ -#define TARGET_PHYS_ADDR_SPACE_BITS 32 -#define TARGET_VIRT_ADDR_SPACE_BITS 32 -#define NB_MMU_MODES 1 - -#endif diff --git a/target/moxie/cpu.c b/target/moxie/cpu.c deleted file mode 100644 index 83bec34d36..0000000000 --- a/target/moxie/cpu.c +++ /dev/null @@ -1,161 +0,0 @@ -/* - * QEMU Moxie CPU - * - * Copyright (c) 2013 Anthony Green - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include "qemu/osdep.h" -#include "qapi/error.h" -#include "cpu.h" -#include "migration/vmstate.h" -#include "machine.h" - -static void moxie_cpu_set_pc(CPUState *cs, vaddr value) -{ - MoxieCPU *cpu = MOXIE_CPU(cs); - - cpu->env.pc = value; -} - -static bool moxie_cpu_has_work(CPUState *cs) -{ - return cs->interrupt_request & CPU_INTERRUPT_HARD; -} - -static void moxie_cpu_reset(DeviceState *dev) -{ - CPUState *s = CPU(dev); - MoxieCPU *cpu = MOXIE_CPU(s); - MoxieCPUClass *mcc = MOXIE_CPU_GET_CLASS(cpu); - CPUMoxieState *env = &cpu->env; - - mcc->parent_reset(dev); - - memset(env, 0, offsetof(CPUMoxieState, end_reset_fields)); - env->pc = 0x1000; -} - -static void moxie_cpu_disas_set_info(CPUState *cpu, disassemble_info *info) -{ - info->mach = bfd_arch_moxie; - info->print_insn = print_insn_moxie; -} - -static void moxie_cpu_realizefn(DeviceState *dev, Error **errp) -{ - CPUState *cs = CPU(dev); - MoxieCPUClass *mcc = MOXIE_CPU_GET_CLASS(dev); - Error *local_err = NULL; - - cpu_exec_realizefn(cs, &local_err); - if (local_err != NULL) { - error_propagate(errp, local_err); - return; - } - - qemu_init_vcpu(cs); - cpu_reset(cs); - - mcc->parent_realize(dev, errp); -} - -static void moxie_cpu_initfn(Object *obj) -{ - MoxieCPU *cpu = MOXIE_CPU(obj); - - cpu_set_cpustate_pointers(cpu); -} - -static ObjectClass *moxie_cpu_class_by_name(const char *cpu_model) -{ - ObjectClass *oc; - char *typename; - - typename = g_strdup_printf(MOXIE_CPU_TYPE_NAME("%s"), cpu_model); - oc = object_class_by_name(typename); - g_free(typename); - if (oc != NULL && (!object_class_dynamic_cast(oc, TYPE_MOXIE_CPU) || - object_class_is_abstract(oc))) { - return NULL; - } - return oc; -} - -#include "hw/core/tcg-cpu-ops.h" - -static struct TCGCPUOps moxie_tcg_ops = { - .initialize = moxie_translate_init, - .tlb_fill = moxie_cpu_tlb_fill, - -#ifndef CONFIG_USER_ONLY - .do_interrupt = moxie_cpu_do_interrupt, -#endif /* !CONFIG_USER_ONLY */ -}; - -static void moxie_cpu_class_init(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - CPUClass *cc = CPU_CLASS(oc); - MoxieCPUClass *mcc = MOXIE_CPU_CLASS(oc); - - device_class_set_parent_realize(dc, moxie_cpu_realizefn, - &mcc->parent_realize); - device_class_set_parent_reset(dc, moxie_cpu_reset, &mcc->parent_reset); - - cc->class_by_name = moxie_cpu_class_by_name; - - cc->has_work = moxie_cpu_has_work; - cc->dump_state = moxie_cpu_dump_state; - cc->set_pc = moxie_cpu_set_pc; -#ifndef CONFIG_USER_ONLY - cc->get_phys_page_debug = moxie_cpu_get_phys_page_debug; - cc->vmsd = &vmstate_moxie_cpu; -#endif - cc->disas_set_info = moxie_cpu_disas_set_info; - cc->tcg_ops = &moxie_tcg_ops; -} - -static void moxielite_initfn(Object *obj) -{ - /* Set cpu feature flags */ -} - -static void moxie_any_initfn(Object *obj) -{ - /* Set cpu feature flags */ -} - -#define DEFINE_MOXIE_CPU_TYPE(cpu_model, initfn) \ - { \ - .parent = TYPE_MOXIE_CPU, \ - .instance_init = initfn, \ - .name = MOXIE_CPU_TYPE_NAME(cpu_model), \ - } - -static const TypeInfo moxie_cpus_type_infos[] = { - { /* base class should be registered first */ - .name = TYPE_MOXIE_CPU, - .parent = TYPE_CPU, - .instance_size = sizeof(MoxieCPU), - .instance_init = moxie_cpu_initfn, - .class_size = sizeof(MoxieCPUClass), - .class_init = moxie_cpu_class_init, - }, - DEFINE_MOXIE_CPU_TYPE("MoxieLite", moxielite_initfn), - DEFINE_MOXIE_CPU_TYPE("any", moxie_any_initfn), -}; - -DEFINE_TYPES(moxie_cpus_type_infos) diff --git a/target/moxie/cpu.h b/target/moxie/cpu.h deleted file mode 100644 index bd6ab66084..0000000000 --- a/target/moxie/cpu.h +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Moxie emulation - * - * Copyright (c) 2008, 2010, 2013 Anthony Green - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#ifndef MOXIE_CPU_H -#define MOXIE_CPU_H - -#include "exec/cpu-defs.h" -#include "qom/object.h" - -#define MOXIE_EX_DIV0 0 -#define MOXIE_EX_BAD 1 -#define MOXIE_EX_IRQ 2 -#define MOXIE_EX_SWI 3 -#define MOXIE_EX_MMU_MISS 4 -#define MOXIE_EX_BREAK 16 - -typedef struct CPUMoxieState { - - uint32_t flags; /* general execution flags */ - uint32_t gregs[16]; /* general registers */ - uint32_t sregs[256]; /* special registers */ - uint32_t pc; /* program counter */ - /* Instead of saving the cc value, we save the cmp arguments - and compute cc on demand. */ - uint32_t cc_a; /* reg a for condition code calculation */ - uint32_t cc_b; /* reg b for condition code calculation */ - - void *irq[8]; - - /* Fields up to this point are cleared by a CPU reset */ - struct {} end_reset_fields; -} CPUMoxieState; - -#include "hw/core/cpu.h" - -#define TYPE_MOXIE_CPU "moxie-cpu" - -OBJECT_DECLARE_TYPE(MoxieCPU, MoxieCPUClass, - MOXIE_CPU) - -/** - * MoxieCPUClass: - * @parent_reset: The parent class' reset handler. - * - * A Moxie CPU model. - */ -struct MoxieCPUClass { - /*< private >*/ - CPUClass parent_class; - /*< public >*/ - - DeviceRealize parent_realize; - DeviceReset parent_reset; -}; - -/** - * MoxieCPU: - * @env: #CPUMoxieState - * - * A Moxie CPU. - */ -struct MoxieCPU { - /*< private >*/ - CPUState parent_obj; - /*< public >*/ - - CPUNegativeOffsetState neg; - CPUMoxieState env; -}; - - -void moxie_cpu_do_interrupt(CPUState *cs); -void moxie_cpu_dump_state(CPUState *cpu, FILE *f, int flags); -hwaddr moxie_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); -void moxie_translate_init(void); -int cpu_moxie_signal_handler(int host_signum, void *pinfo, - void *puc); - -#define MOXIE_CPU_TYPE_SUFFIX "-" TYPE_MOXIE_CPU -#define MOXIE_CPU_TYPE_NAME(model) model MOXIE_CPU_TYPE_SUFFIX -#define CPU_RESOLVING_TYPE TYPE_MOXIE_CPU - -#define cpu_signal_handler cpu_moxie_signal_handler - -static inline int cpu_mmu_index(CPUMoxieState *env, bool ifetch) -{ - return 0; -} - -typedef CPUMoxieState CPUArchState; -typedef MoxieCPU ArchCPU; - -#include "exec/cpu-all.h" - -static inline void cpu_get_tb_cpu_state(CPUMoxieState *env, target_ulong *pc, - target_ulong *cs_base, uint32_t *flags) -{ - *pc = env->pc; - *cs_base = 0; - *flags = 0; -} - -bool moxie_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); - -#endif /* MOXIE_CPU_H */ diff --git a/target/moxie/helper.c b/target/moxie/helper.c deleted file mode 100644 index b1919f62b3..0000000000 --- a/target/moxie/helper.c +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Moxie helper routines. - * - * Copyright (c) 2008, 2009, 2010, 2013 Anthony Green - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include "qemu/osdep.h" - -#include "cpu.h" -#include "mmu.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" -#include "qemu/host-utils.h" -#include "exec/helper-proto.h" - -void helper_raise_exception(CPUMoxieState *env, int ex) -{ - CPUState *cs = env_cpu(env); - - cs->exception_index = ex; - /* Stash the exception type. */ - env->sregs[2] = ex; - /* Stash the address where the exception occurred. */ - cpu_restore_state(cs, GETPC(), true); - env->sregs[5] = env->pc; - /* Jump to the exception handline routine. */ - env->pc = env->sregs[1]; - cpu_loop_exit(cs); -} - -uint32_t helper_div(CPUMoxieState *env, uint32_t a, uint32_t b) -{ - if (unlikely(b == 0)) { - helper_raise_exception(env, MOXIE_EX_DIV0); - return 0; - } - if (unlikely(a == INT_MIN && b == -1)) { - return INT_MIN; - } - - return (int32_t)a / (int32_t)b; -} - -uint32_t helper_udiv(CPUMoxieState *env, uint32_t a, uint32_t b) -{ - if (unlikely(b == 0)) { - helper_raise_exception(env, MOXIE_EX_DIV0); - return 0; - } - return a / b; -} - -void helper_debug(CPUMoxieState *env) -{ - CPUState *cs = env_cpu(env); - - cs->exception_index = EXCP_DEBUG; - cpu_loop_exit(cs); -} - -bool moxie_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) -{ - MoxieCPU *cpu = MOXIE_CPU(cs); - CPUMoxieState *env = &cpu->env; - MoxieMMUResult res; - int prot, miss; - - address &= TARGET_PAGE_MASK; - prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - miss = moxie_mmu_translate(&res, env, address, access_type, mmu_idx); - if (likely(!miss)) { - tlb_set_page(cs, address, res.phy, prot, mmu_idx, TARGET_PAGE_SIZE); - return true; - } - if (probe) { - return false; - } - - cs->exception_index = MOXIE_EX_MMU_MISS; - cpu_loop_exit_restore(cs, retaddr); -} - -void moxie_cpu_do_interrupt(CPUState *cs) -{ - switch (cs->exception_index) { - case MOXIE_EX_BREAK: - break; - default: - break; - } -} - -hwaddr moxie_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) -{ - MoxieCPU *cpu = MOXIE_CPU(cs); - uint32_t phy = addr; - MoxieMMUResult res; - int miss; - - miss = moxie_mmu_translate(&res, &cpu->env, addr, 0, 0); - if (!miss) { - phy = res.phy; - } - return phy; -} diff --git a/target/moxie/helper.h b/target/moxie/helper.h deleted file mode 100644 index d94ef7a17e..0000000000 --- a/target/moxie/helper.h +++ /dev/null @@ -1,5 +0,0 @@ -DEF_HELPER_2(raise_exception, void, env, int) -DEF_HELPER_1(debug, void, env) - -DEF_HELPER_FLAGS_3(div, TCG_CALL_NO_WG, i32, env, i32, i32) -DEF_HELPER_FLAGS_3(udiv, TCG_CALL_NO_WG, i32, env, i32, i32) diff --git a/target/moxie/machine.c b/target/moxie/machine.c deleted file mode 100644 index d0f177048c..0000000000 --- a/target/moxie/machine.c +++ /dev/null @@ -1,19 +0,0 @@ -#include "qemu/osdep.h" -#include "cpu.h" -#include "machine.h" -#include "migration/cpu.h" - -const VMStateDescription vmstate_moxie_cpu = { - .name = "cpu", - .version_id = 1, - .minimum_version_id = 1, - .fields = (VMStateField[]) { - VMSTATE_UINT32(flags, CPUMoxieState), - VMSTATE_UINT32_ARRAY(gregs, CPUMoxieState, 16), - VMSTATE_UINT32_ARRAY(sregs, CPUMoxieState, 256), - VMSTATE_UINT32(pc, CPUMoxieState), - VMSTATE_UINT32(cc_a, CPUMoxieState), - VMSTATE_UINT32(cc_b, CPUMoxieState), - VMSTATE_END_OF_LIST() - } -}; diff --git a/target/moxie/machine.h b/target/moxie/machine.h deleted file mode 100644 index a1b72907ae..0000000000 --- a/target/moxie/machine.h +++ /dev/null @@ -1 +0,0 @@ -extern const VMStateDescription vmstate_moxie_cpu; diff --git a/target/moxie/meson.build b/target/moxie/meson.build deleted file mode 100644 index b4beb528cc..0000000000 --- a/target/moxie/meson.build +++ /dev/null @@ -1,14 +0,0 @@ -moxie_ss = ss.source_set() -moxie_ss.add(files( - 'cpu.c', - 'helper.c', - 'machine.c', - 'machine.c', - 'translate.c', -)) - -moxie_softmmu_ss = ss.source_set() -moxie_softmmu_ss.add(files('mmu.c')) - -target_arch += {'moxie': moxie_ss} -target_softmmu_arch += {'moxie': moxie_softmmu_ss} diff --git a/target/moxie/mmu.c b/target/moxie/mmu.c deleted file mode 100644 index 87783a36f8..0000000000 --- a/target/moxie/mmu.c +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Moxie mmu emulation. - * - * Copyright (c) 2008, 2013 Anthony Green - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include "qemu/osdep.h" - -#include "cpu.h" -#include "mmu.h" - -int moxie_mmu_translate(MoxieMMUResult *res, - CPUMoxieState *env, uint32_t vaddr, - int rw, int mmu_idx) -{ - /* Perform no translation yet. */ - res->phy = vaddr; - return 0; -} diff --git a/target/moxie/mmu.h b/target/moxie/mmu.h deleted file mode 100644 index d80690f4d2..0000000000 --- a/target/moxie/mmu.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef TARGET_MOXIE_MMU_H -#define TARGET_MOXIE_MMU_H - -#define MOXIE_MMU_ERR_EXEC 0 -#define MOXIE_MMU_ERR_READ 1 -#define MOXIE_MMU_ERR_WRITE 2 -#define MOXIE_MMU_ERR_FLUSH 3 - -typedef struct { - uint32_t phy; - uint32_t pfn; - int cause_op; -} MoxieMMUResult; - -int moxie_mmu_translate(MoxieMMUResult *res, - CPUMoxieState *env, uint32_t vaddr, - int rw, int mmu_idx); - -#endif diff --git a/target/moxie/translate.c b/target/moxie/translate.c deleted file mode 100644 index 24a742b25e..0000000000 --- a/target/moxie/translate.c +++ /dev/null @@ -1,892 +0,0 @@ -/* - * Moxie emulation for qemu: main translation routines. - * - * Copyright (c) 2009, 2013 Anthony Green - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public License - * as published by the Free Software Foundation; either version 2.1 of - * the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -/* For information on the Moxie architecture, see - * http://moxielogic.org/wiki - */ - -#include "qemu/osdep.h" - -#include "cpu.h" -#include "exec/exec-all.h" -#include "disas/disas.h" -#include "tcg/tcg-op.h" -#include "exec/cpu_ldst.h" -#include "qemu/qemu-print.h" - -#include "exec/helper-proto.h" -#include "exec/helper-gen.h" -#include "exec/log.h" - -/* This is the state at translation time. */ -typedef struct DisasContext { - TranslationBlock *tb; - target_ulong pc, saved_pc; - uint32_t opcode; - uint32_t fp_status; - /* Routine used to access memory */ - int memidx; - int bstate; - target_ulong btarget; - int singlestep_enabled; -} DisasContext; - -enum { - BS_NONE = 0, /* We go out of the TB without reaching a branch or an - * exception condition */ - BS_STOP = 1, /* We want to stop translation for any reason */ - BS_BRANCH = 2, /* We reached a branch condition */ - BS_EXCP = 3, /* We reached an exception condition */ -}; - -static TCGv cpu_pc; -static TCGv cpu_gregs[16]; -static TCGv cc_a, cc_b; - -#include "exec/gen-icount.h" - -#define REG(x) (cpu_gregs[x]) - -/* Extract the signed 10-bit offset from a 16-bit branch - instruction. */ -static int extract_branch_offset(int opcode) -{ - return (((signed short)((opcode & ((1 << 10) - 1)) << 6)) >> 6) << 1; -} - -void moxie_cpu_dump_state(CPUState *cs, FILE *f, int flags) -{ - MoxieCPU *cpu = MOXIE_CPU(cs); - CPUMoxieState *env = &cpu->env; - int i; - qemu_fprintf(f, "pc=0x%08x\n", env->pc); - qemu_fprintf(f, "$fp=0x%08x $sp=0x%08x $r0=0x%08x $r1=0x%08x\n", - env->gregs[0], env->gregs[1], env->gregs[2], env->gregs[3]); - for (i = 4; i < 16; i += 4) { - qemu_fprintf(f, "$r%d=0x%08x $r%d=0x%08x $r%d=0x%08x $r%d=0x%08x\n", - i - 2, env->gregs[i], i - 1, env->gregs[i + 1], - i, env->gregs[i + 2], i + 1, env->gregs[i + 3]); - } - for (i = 4; i < 16; i += 4) { - qemu_fprintf(f, "sr%d=0x%08x sr%d=0x%08x sr%d=0x%08x sr%d=0x%08x\n", - i - 2, env->sregs[i], i - 1, env->sregs[i + 1], - i, env->sregs[i + 2], i + 1, env->sregs[i + 3]); - } -} - -void moxie_translate_init(void) -{ - int i; - static const char * const gregnames[16] = { - "$fp", "$sp", "$r0", "$r1", - "$r2", "$r3", "$r4", "$r5", - "$r6", "$r7", "$r8", "$r9", - "$r10", "$r11", "$r12", "$r13" - }; - - cpu_pc = tcg_global_mem_new_i32(cpu_env, - offsetof(CPUMoxieState, pc), "$pc"); - for (i = 0; i < 16; i++) - cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env, - offsetof(CPUMoxieState, gregs[i]), - gregnames[i]); - - cc_a = tcg_global_mem_new_i32(cpu_env, - offsetof(CPUMoxieState, cc_a), "cc_a"); - cc_b = tcg_global_mem_new_i32(cpu_env, - offsetof(CPUMoxieState, cc_b), "cc_b"); -} - -static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest) -{ - if (unlikely(ctx->singlestep_enabled)) { - return false; - } - -#ifndef CONFIG_USER_ONLY - return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); -#else - return true; -#endif -} - -static inline void gen_goto_tb(CPUMoxieState *env, DisasContext *ctx, - int n, target_ulong dest) -{ - if (use_goto_tb(ctx, dest)) { - tcg_gen_goto_tb(n); - tcg_gen_movi_i32(cpu_pc, dest); - tcg_gen_exit_tb(ctx->tb, n); - } else { - tcg_gen_movi_i32(cpu_pc, dest); - if (ctx->singlestep_enabled) { - gen_helper_debug(cpu_env); - } - tcg_gen_exit_tb(NULL, 0); - } -} - -static int decode_opc(MoxieCPU *cpu, DisasContext *ctx) -{ - CPUMoxieState *env = &cpu->env; - - /* Local cache for the instruction opcode. */ - int opcode; - /* Set the default instruction length. */ - int length = 2; - - /* Examine the 16-bit opcode. */ - opcode = ctx->opcode; - - /* Decode instruction. */ - if (opcode & (1 << 15)) { - if (opcode & (1 << 14)) { - /* This is a Form 3 instruction. */ - int inst = (opcode >> 10 & 0xf); - -#define BRANCH(cond) \ - do { \ - TCGLabel *l1 = gen_new_label(); \ - tcg_gen_brcond_i32(cond, cc_a, cc_b, l1); \ - gen_goto_tb(env, ctx, 1, ctx->pc+2); \ - gen_set_label(l1); \ - gen_goto_tb(env, ctx, 0, extract_branch_offset(opcode) + ctx->pc+2); \ - ctx->bstate = BS_BRANCH; \ - } while (0) - - switch (inst) { - case 0x00: /* beq */ - BRANCH(TCG_COND_EQ); - break; - case 0x01: /* bne */ - BRANCH(TCG_COND_NE); - break; - case 0x02: /* blt */ - BRANCH(TCG_COND_LT); - break; - case 0x03: /* bgt */ - BRANCH(TCG_COND_GT); - break; - case 0x04: /* bltu */ - BRANCH(TCG_COND_LTU); - break; - case 0x05: /* bgtu */ - BRANCH(TCG_COND_GTU); - break; - case 0x06: /* bge */ - BRANCH(TCG_COND_GE); - break; - case 0x07: /* ble */ - BRANCH(TCG_COND_LE); - break; - case 0x08: /* bgeu */ - BRANCH(TCG_COND_GEU); - break; - case 0x09: /* bleu */ - BRANCH(TCG_COND_LEU); - break; - default: - { - TCGv temp = tcg_temp_new_i32(); - tcg_gen_movi_i32(cpu_pc, ctx->pc); - tcg_gen_movi_i32(temp, MOXIE_EX_BAD); - gen_helper_raise_exception(cpu_env, temp); - tcg_temp_free_i32(temp); - } - break; - } - } else { - /* This is a Form 2 instruction. */ - int inst = (opcode >> 12 & 0x3); - switch (inst) { - case 0x00: /* inc */ - { - int a = (opcode >> 8) & 0xf; - unsigned int v = (opcode & 0xff); - tcg_gen_addi_i32(REG(a), REG(a), v); - } - break; - case 0x01: /* dec */ - { - int a = (opcode >> 8) & 0xf; - unsigned int v = (opcode & 0xff); - tcg_gen_subi_i32(REG(a), REG(a), v); - } - break; - case 0x02: /* gsr */ - { - int a = (opcode >> 8) & 0xf; - unsigned v = (opcode & 0xff); - tcg_gen_ld_i32(REG(a), cpu_env, - offsetof(CPUMoxieState, sregs[v])); - } - break; - case 0x03: /* ssr */ - { - int a = (opcode >> 8) & 0xf; - unsigned v = (opcode & 0xff); - tcg_gen_st_i32(REG(a), cpu_env, - offsetof(CPUMoxieState, sregs[v])); - } - break; - default: - { - TCGv temp = tcg_temp_new_i32(); - tcg_gen_movi_i32(cpu_pc, ctx->pc); - tcg_gen_movi_i32(temp, MOXIE_EX_BAD); - gen_helper_raise_exception(cpu_env, temp); - tcg_temp_free_i32(temp); - } - break; - } - } - } else { - /* This is a Form 1 instruction. */ - int inst = opcode >> 8; - switch (inst) { - case 0x00: /* nop */ - break; - case 0x01: /* ldi.l (immediate) */ - { - int reg = (opcode >> 4) & 0xf; - int val = cpu_ldl_code(env, ctx->pc+2); - tcg_gen_movi_i32(REG(reg), val); - length = 6; - } - break; - case 0x02: /* mov (register-to-register) */ - { - int dest = (opcode >> 4) & 0xf; - int src = opcode & 0xf; - tcg_gen_mov_i32(REG(dest), REG(src)); - } - break; - case 0x03: /* jsra */ - { - TCGv t1 = tcg_temp_new_i32(); - TCGv t2 = tcg_temp_new_i32(); - - tcg_gen_movi_i32(t1, ctx->pc + 6); - - /* Make space for the static chain and return address. */ - tcg_gen_subi_i32(t2, REG(1), 8); - tcg_gen_mov_i32(REG(1), t2); - tcg_gen_qemu_st32(t1, REG(1), ctx->memidx); - - /* Push the current frame pointer. */ - tcg_gen_subi_i32(t2, REG(1), 4); - tcg_gen_mov_i32(REG(1), t2); - tcg_gen_qemu_st32(REG(0), REG(1), ctx->memidx); - - /* Set the pc and $fp. */ - tcg_gen_mov_i32(REG(0), REG(1)); - - gen_goto_tb(env, ctx, 0, cpu_ldl_code(env, ctx->pc+2)); - - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); - - ctx->bstate = BS_BRANCH; - length = 6; - } - break; - case 0x04: /* ret */ - { - TCGv t1 = tcg_temp_new_i32(); - - /* The new $sp is the old $fp. */ - tcg_gen_mov_i32(REG(1), REG(0)); - - /* Pop the frame pointer. */ - tcg_gen_qemu_ld32u(REG(0), REG(1), ctx->memidx); - tcg_gen_addi_i32(t1, REG(1), 4); - tcg_gen_mov_i32(REG(1), t1); - - - /* Pop the return address and skip over the static chain - slot. */ - tcg_gen_qemu_ld32u(cpu_pc, REG(1), ctx->memidx); - tcg_gen_addi_i32(t1, REG(1), 8); - tcg_gen_mov_i32(REG(1), t1); - - tcg_temp_free_i32(t1); - - /* Jump... */ - tcg_gen_exit_tb(NULL, 0); - - ctx->bstate = BS_BRANCH; - } - break; - case 0x05: /* add.l */ - { - int a = (opcode >> 4) & 0xf; - int b = opcode & 0xf; - - tcg_gen_add_i32(REG(a), REG(a), REG(b)); - } - break; - case 0x06: /* push */ - { - int a = (opcode >> 4) & 0xf; - int b = opcode & 0xf; - - TCGv t1 = tcg_temp_new_i32(); - tcg_gen_subi_i32(t1, REG(a), 4); - tcg_gen_mov_i32(REG(a), t1); - tcg_gen_qemu_st32(REG(b), REG(a), ctx->memidx); - tcg_temp_free_i32(t1); - } - break; - case 0x07: /* pop */ - { - int a = (opcode >> 4) & 0xf; - int b = opcode & 0xf; - TCGv t1 = tcg_temp_new_i32(); - - tcg_gen_qemu_ld32u(REG(b), REG(a), ctx->memidx); - tcg_gen_addi_i32(t1, REG(a), 4); - tcg_gen_mov_i32(REG(a), t1); - tcg_temp_free_i32(t1); - } - break; - case 0x08: /* lda.l */ - { - int reg = (opcode >> 4) & 0xf; - - TCGv ptr = tcg_temp_new_i32(); - tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2)); - tcg_gen_qemu_ld32u(REG(reg), ptr, ctx->memidx); - tcg_temp_free_i32(ptr); - - length = 6; - } - break; - case 0x09: /* sta.l */ - { - int val = (opcode >> 4) & 0xf; - - TCGv ptr = tcg_temp_new_i32(); - tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2)); - tcg_gen_qemu_st32(REG(val), ptr, ctx->memidx); - tcg_temp_free_i32(ptr); - - length = 6; - } - break; - case 0x0a: /* ld.l (register indirect) */ - { - int src = opcode & 0xf; - int dest = (opcode >> 4) & 0xf; - - tcg_gen_qemu_ld32u(REG(dest), REG(src), ctx->memidx); - } - break; - case 0x0b: /* st.l */ - { - int dest = (opcode >> 4) & 0xf; - int val = opcode & 0xf; - - tcg_gen_qemu_st32(REG(val), REG(dest), ctx->memidx); - } - break; - case 0x0c: /* ldo.l */ - { - int a = (opcode >> 4) & 0xf; - int b = opcode & 0xf; - - TCGv t1 = tcg_temp_new_i32(); - TCGv t2 = tcg_temp_new_i32(); - tcg_gen_addi_i32(t1, REG(b), cpu_ldl_code(env, ctx->pc+2)); - tcg_gen_qemu_ld32u(t2, t1, ctx->memidx); - tcg_gen_mov_i32(REG(a), t2); - - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); - - length = 6; - } - break; - case 0x0d: /* sto.l */ - { - int a = (opcode >> 4) & 0xf; - int b = opcode & 0xf; - - TCGv t1 = tcg_temp_new_i32(); - TCGv t2 = tcg_temp_new_i32(); - tcg_gen_addi_i32(t1, REG(a), cpu_ldl_code(env, ctx->pc+2)); - tcg_gen_qemu_st32(REG(b), t1, ctx->memidx); - - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); - - length = 6; - } - break; - case 0x0e: /* cmp */ - { - int a = (opcode >> 4) & 0xf; - int b = opcode & 0xf; - - tcg_gen_mov_i32(cc_a, REG(a)); - tcg_gen_mov_i32(cc_b, REG(b)); - } - break; - case 0x19: /* jsr */ - { - int fnreg = (opcode >> 4) & 0xf; - - /* Load the stack pointer into T0. */ - TCGv t1 = tcg_temp_new_i32(); - TCGv t2 = tcg_temp_new_i32(); - - tcg_gen_movi_i32(t1, ctx->pc+2); - - /* Make space for the static chain and return address. */ - tcg_gen_subi_i32(t2, REG(1), 8); - tcg_gen_mov_i32(REG(1), t2); - tcg_gen_qemu_st32(t1, REG(1), ctx->memidx); - - /* Push the current frame pointer. */ - tcg_gen_subi_i32(t2, REG(1), 4); - tcg_gen_mov_i32(REG(1), t2); - tcg_gen_qemu_st32(REG(0), REG(1), ctx->memidx); - - /* Set the pc and $fp. */ - tcg_gen_mov_i32(REG(0), REG(1)); - tcg_gen_mov_i32(cpu_pc, REG(fnreg)); - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); - tcg_gen_exit_tb(NULL, 0); - ctx->bstate = BS_BRANCH; - } - break; - case 0x1a: /* jmpa */ - { - tcg_gen_movi_i32(cpu_pc, cpu_ldl_code(env, ctx->pc+2)); - tcg_gen_exit_tb(NULL, 0); - ctx->bstate = BS_BRANCH; - length = 6; - } - break; - case 0x1b: /* ldi.b (immediate) */ - { - int reg = (opcode >> 4) & 0xf; - int val = cpu_ldl_code(env, ctx->pc+2); - tcg_gen_movi_i32(REG(reg), val); - length = 6; - } - break; - case 0x1c: /* ld.b (register indirect) */ - { - int src = opcode & 0xf; - int dest = (opcode >> 4) & 0xf; - - tcg_gen_qemu_ld8u(REG(dest), REG(src), ctx->memidx); - } - break; - case 0x1d: /* lda.b */ - { - int reg = (opcode >> 4) & 0xf; - - TCGv ptr = tcg_temp_new_i32(); - tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2)); - tcg_gen_qemu_ld8u(REG(reg), ptr, ctx->memidx); - tcg_temp_free_i32(ptr); - - length = 6; - } - break; - case 0x1e: /* st.b */ - { - int dest = (opcode >> 4) & 0xf; - int val = opcode & 0xf; - - tcg_gen_qemu_st8(REG(val), REG(dest), ctx->memidx); - } - break; - case 0x1f: /* sta.b */ - { - int val = (opcode >> 4) & 0xf; - - TCGv ptr = tcg_temp_new_i32(); - tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2)); - tcg_gen_qemu_st8(REG(val), ptr, ctx->memidx); - tcg_temp_free_i32(ptr); - - length = 6; - } - break; - case 0x20: /* ldi.s (immediate) */ - { - int reg = (opcode >> 4) & 0xf; - int val = cpu_ldl_code(env, ctx->pc+2); - tcg_gen_movi_i32(REG(reg), val); - length = 6; - } - break; - case 0x21: /* ld.s (register indirect) */ - { - int src = opcode & 0xf; - int dest = (opcode >> 4) & 0xf; - - tcg_gen_qemu_ld16u(REG(dest), REG(src), ctx->memidx); - } - break; - case 0x22: /* lda.s */ - { - int reg = (opcode >> 4) & 0xf; - - TCGv ptr = tcg_temp_new_i32(); - tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2)); - tcg_gen_qemu_ld16u(REG(reg), ptr, ctx->memidx); - tcg_temp_free_i32(ptr); - - length = 6; - } - break; - case 0x23: /* st.s */ - { - int dest = (opcode >> 4) & 0xf; - int val = opcode & 0xf; - - tcg_gen_qemu_st16(REG(val), REG(dest), ctx->memidx); - } - break; - case 0x24: /* sta.s */ - { - int val = (opcode >> 4) & 0xf; - - TCGv ptr = tcg_temp_new_i32(); - tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2)); - tcg_gen_qemu_st16(REG(val), ptr, ctx->memidx); - tcg_temp_free_i32(ptr); - - length = 6; - } - break; - case 0x25: /* jmp */ - { - int reg = (opcode >> 4) & 0xf; - tcg_gen_mov_i32(cpu_pc, REG(reg)); - tcg_gen_exit_tb(NULL, 0); - ctx->bstate = BS_BRANCH; - } - break; - case 0x26: /* and */ - { - int a = (opcode >> 4) & 0xf; - int b = opcode & 0xf; - - tcg_gen_and_i32(REG(a), REG(a), REG(b)); - } - break; - case 0x27: /* lshr */ - { - int a = (opcode >> 4) & 0xf; - int b = opcode & 0xf; - - TCGv sv = tcg_temp_new_i32(); - tcg_gen_andi_i32(sv, REG(b), 0x1f); - tcg_gen_shr_i32(REG(a), REG(a), sv); - tcg_temp_free_i32(sv); - } - break; - case 0x28: /* ashl */ - { - int a = (opcode >> 4) & 0xf; - int b = opcode & 0xf; - - TCGv sv = tcg_temp_new_i32(); - tcg_gen_andi_i32(sv, REG(b), 0x1f); - tcg_gen_shl_i32(REG(a), REG(a), sv); - tcg_temp_free_i32(sv); - } - break; - case 0x29: /* sub.l */ - { - int a = (opcode >> 4) & 0xf; - int b = opcode & 0xf; - - tcg_gen_sub_i32(REG(a), REG(a), REG(b)); - } - break; - case 0x2a: /* neg */ - { - int a = (opcode >> 4) & 0xf; - int b = opcode & 0xf; - - tcg_gen_neg_i32(REG(a), REG(b)); - } - break; - case 0x2b: /* or */ - { - int a = (opcode >> 4) & 0xf; - int b = opcode & 0xf; - - tcg_gen_or_i32(REG(a), REG(a), REG(b)); - } - break; - case 0x2c: /* not */ - { - int a = (opcode >> 4) & 0xf; - int b = opcode & 0xf; - - tcg_gen_not_i32(REG(a), REG(b)); - } - break; - case 0x2d: /* ashr */ - { - int a = (opcode >> 4) & 0xf; - int b = opcode & 0xf; - - TCGv sv = tcg_temp_new_i32(); - tcg_gen_andi_i32(sv, REG(b), 0x1f); - tcg_gen_sar_i32(REG(a), REG(a), sv); - tcg_temp_free_i32(sv); - } - break; - case 0x2e: /* xor */ - { - int a = (opcode >> 4) & 0xf; - int b = opcode & 0xf; - - tcg_gen_xor_i32(REG(a), REG(a), REG(b)); - } - break; - case 0x2f: /* mul.l */ - { - int a = (opcode >> 4) & 0xf; - int b = opcode & 0xf; - - tcg_gen_mul_i32(REG(a), REG(a), REG(b)); - } - break; - case 0x30: /* swi */ - { - int val = cpu_ldl_code(env, ctx->pc+2); - - TCGv temp = tcg_temp_new_i32(); - tcg_gen_movi_i32(temp, val); - tcg_gen_st_i32(temp, cpu_env, - offsetof(CPUMoxieState, sregs[3])); - tcg_gen_movi_i32(cpu_pc, ctx->pc); - tcg_gen_movi_i32(temp, MOXIE_EX_SWI); - gen_helper_raise_exception(cpu_env, temp); - tcg_temp_free_i32(temp); - - length = 6; - } - break; - case 0x31: /* div.l */ - { - int a = (opcode >> 4) & 0xf; - int b = opcode & 0xf; - tcg_gen_movi_i32(cpu_pc, ctx->pc); - gen_helper_div(REG(a), cpu_env, REG(a), REG(b)); - } - break; - case 0x32: /* udiv.l */ - { - int a = (opcode >> 4) & 0xf; - int b = opcode & 0xf; - tcg_gen_movi_i32(cpu_pc, ctx->pc); - gen_helper_udiv(REG(a), cpu_env, REG(a), REG(b)); - } - break; - case 0x33: /* mod.l */ - { - int a = (opcode >> 4) & 0xf; - int b = opcode & 0xf; - tcg_gen_rem_i32(REG(a), REG(a), REG(b)); - } - break; - case 0x34: /* umod.l */ - { - int a = (opcode >> 4) & 0xf; - int b = opcode & 0xf; - tcg_gen_remu_i32(REG(a), REG(a), REG(b)); - } - break; - case 0x35: /* brk */ - { - TCGv temp = tcg_temp_new_i32(); - tcg_gen_movi_i32(cpu_pc, ctx->pc); - tcg_gen_movi_i32(temp, MOXIE_EX_BREAK); - gen_helper_raise_exception(cpu_env, temp); - tcg_temp_free_i32(temp); - } - break; - case 0x36: /* ldo.b */ - { - int a = (opcode >> 4) & 0xf; - int b = opcode & 0xf; - - TCGv t1 = tcg_temp_new_i32(); - TCGv t2 = tcg_temp_new_i32(); - tcg_gen_addi_i32(t1, REG(b), cpu_ldl_code(env, ctx->pc+2)); - tcg_gen_qemu_ld8u(t2, t1, ctx->memidx); - tcg_gen_mov_i32(REG(a), t2); - - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); - - length = 6; - } - break; - case 0x37: /* sto.b */ - { - int a = (opcode >> 4) & 0xf; - int b = opcode & 0xf; - - TCGv t1 = tcg_temp_new_i32(); - TCGv t2 = tcg_temp_new_i32(); - tcg_gen_addi_i32(t1, REG(a), cpu_ldl_code(env, ctx->pc+2)); - tcg_gen_qemu_st8(REG(b), t1, ctx->memidx); - - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); - - length = 6; - } - break; - case 0x38: /* ldo.s */ - { - int a = (opcode >> 4) & 0xf; - int b = opcode & 0xf; - - TCGv t1 = tcg_temp_new_i32(); - TCGv t2 = tcg_temp_new_i32(); - tcg_gen_addi_i32(t1, REG(b), cpu_ldl_code(env, ctx->pc+2)); - tcg_gen_qemu_ld16u(t2, t1, ctx->memidx); - tcg_gen_mov_i32(REG(a), t2); - - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); - - length = 6; - } - break; - case 0x39: /* sto.s */ - { - int a = (opcode >> 4) & 0xf; - int b = opcode & 0xf; - - TCGv t1 = tcg_temp_new_i32(); - TCGv t2 = tcg_temp_new_i32(); - tcg_gen_addi_i32(t1, REG(a), cpu_ldl_code(env, ctx->pc+2)); - tcg_gen_qemu_st16(REG(b), t1, ctx->memidx); - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); - - length = 6; - } - break; - default: - { - TCGv temp = tcg_temp_new_i32(); - tcg_gen_movi_i32(cpu_pc, ctx->pc); - tcg_gen_movi_i32(temp, MOXIE_EX_BAD); - gen_helper_raise_exception(cpu_env, temp); - tcg_temp_free_i32(temp); - } - break; - } - } - - return length; -} - -/* generate intermediate code for basic block 'tb'. */ -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) -{ - CPUMoxieState *env = cs->env_ptr; - MoxieCPU *cpu = env_archcpu(env); - DisasContext ctx; - target_ulong pc_start; - int num_insns; - - pc_start = tb->pc; - ctx.pc = pc_start; - ctx.saved_pc = -1; - ctx.tb = tb; - ctx.memidx = 0; - ctx.singlestep_enabled = 0; - ctx.bstate = BS_NONE; - num_insns = 0; - - gen_tb_start(tb); - do { - tcg_gen_insn_start(ctx.pc); - num_insns++; - - if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) { - tcg_gen_movi_i32(cpu_pc, ctx.pc); - gen_helper_debug(cpu_env); - ctx.bstate = BS_EXCP; - /* The address covered by the breakpoint must be included in - [tb->pc, tb->pc + tb->size) in order to for it to be - properly cleared -- thus we increment the PC here so that - the logic setting tb->size below does the right thing. */ - ctx.pc += 2; - goto done_generating; - } - - ctx.opcode = cpu_lduw_code(env, ctx.pc); - ctx.pc += decode_opc(cpu, &ctx); - - if (num_insns >= max_insns) { - break; - } - if (cs->singlestep_enabled) { - break; - } - if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0) { - break; - } - } while (ctx.bstate == BS_NONE && !tcg_op_buf_full()); - - if (cs->singlestep_enabled) { - tcg_gen_movi_tl(cpu_pc, ctx.pc); - gen_helper_debug(cpu_env); - } else { - switch (ctx.bstate) { - case BS_STOP: - case BS_NONE: - gen_goto_tb(env, &ctx, 0, ctx.pc); - break; - case BS_EXCP: - tcg_gen_exit_tb(NULL, 0); - break; - case BS_BRANCH: - default: - break; - } - } - done_generating: - gen_tb_end(tb, num_insns); - - tb->size = ctx.pc - pc_start; - tb->icount = num_insns; -} - -void restore_state_to_opc(CPUMoxieState *env, TranslationBlock *tb, - target_ulong *data) -{ - env->pc = data[0]; -} diff --git a/target/openrisc/sys_helper.c b/target/openrisc/sys_helper.c index 41390d046f..48674231e7 100644 --- a/target/openrisc/sys_helper.c +++ b/target/openrisc/sys_helper.c @@ -23,7 +23,6 @@ #include "exec/exec-all.h" #include "exec/helper-proto.h" #include "exception.h" -#include "sysemu/sysemu.h" #ifndef CONFIG_USER_ONLY #include "hw/boards.h" #endif diff --git a/target/ppc/arch_dump.c b/target/ppc/arch_dump.c index 9ab04b2c38..9210e61ef4 100644 --- a/target/ppc/arch_dump.c +++ b/target/ppc/arch_dump.c @@ -17,7 +17,6 @@ #include "elf.h" #include "sysemu/dump.h" #include "sysemu/kvm.h" -#include "exec/helper-proto.h" #ifdef TARGET_PPC64 #define ELFCLASS ELFCLASS64 @@ -176,7 +175,7 @@ static void ppc_write_elf_vmxregset(NoteFuncArg *arg, PowerPCCPU *cpu) vmxregset->avr[i].u64[1] = avr->u64[1]; } } - vmxregset->vscr.u32[3] = cpu_to_dump32(s, helper_mfvscr(&cpu->env)); + vmxregset->vscr.u32[3] = cpu_to_dump32(s, ppc_get_vscr(&cpu->env)); } static void ppc_write_elf_vsxregset(NoteFuncArg *arg, PowerPCCPU *cpu) diff --git a/target/ppc/cpu-qom.h b/target/ppc/cpu-qom.h index 118baf8d41..06b6571bc9 100644 --- a/target/ppc/cpu-qom.h +++ b/target/ppc/cpu-qom.h @@ -116,6 +116,8 @@ enum powerpc_excp_t { POWERPC_EXCP_POWER8, /* POWER9 exception model */ POWERPC_EXCP_POWER9, + /* POWER10 exception model */ + POWERPC_EXCP_POWER10, }; /*****************************************************************************/ diff --git a/target/ppc/cpu.c b/target/ppc/cpu.c index e501a7ff6f..d957d1a687 100644 --- a/target/ppc/cpu.c +++ b/target/ppc/cpu.c @@ -20,6 +20,10 @@ #include "qemu/osdep.h" #include "cpu.h" #include "cpu-models.h" +#include "cpu-qom.h" +#include "exec/log.h" +#include "fpu/softfloat-helpers.h" +#include "mmu-hash64.h" target_ulong cpu_read_xer(CPUPPCState *env) { @@ -45,3 +49,46 @@ void cpu_write_xer(CPUPPCState *env, target_ulong xer) (1ul << XER_OV) | (1ul << XER_CA) | (1ul << XER_OV32) | (1ul << XER_CA32)); } + +void ppc_store_vscr(CPUPPCState *env, uint32_t vscr) +{ + env->vscr = vscr & ~(1u << VSCR_SAT); + /* Which bit we set is completely arbitrary, but clear the rest. */ + env->vscr_sat.u64[0] = vscr & (1u << VSCR_SAT); + env->vscr_sat.u64[1] = 0; + set_flush_to_zero((vscr >> VSCR_NJ) & 1, &env->vec_status); +} + +uint32_t ppc_get_vscr(CPUPPCState *env) +{ + uint32_t sat = (env->vscr_sat.u64[0] | env->vscr_sat.u64[1]) != 0; + return env->vscr | (sat << VSCR_SAT); +} + +#ifdef CONFIG_SOFTMMU +void ppc_store_sdr1(CPUPPCState *env, target_ulong value) +{ + PowerPCCPU *cpu = env_archcpu(env); + qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value); + assert(!cpu->vhyp); +#if defined(TARGET_PPC64) + if (mmu_is_64bit(env->mmu_model)) { + target_ulong sdr_mask = SDR_64_HTABORG | SDR_64_HTABSIZE; + target_ulong htabsize = value & SDR_64_HTABSIZE; + + if (value & ~sdr_mask) { + error_report("Invalid bits 0x"TARGET_FMT_lx" set in SDR1", + value & ~sdr_mask); + value &= sdr_mask; + } + if (htabsize > 28) { + error_report("Invalid HTABSIZE 0x" TARGET_FMT_lx" stored in SDR1", + htabsize); + return; + } + } +#endif /* defined(TARGET_PPC64) */ + /* FIXME: Should check for valid HTABMASK values in 32-bit case */ + env->spr[SPR_SDR1] = value; +} +#endif /* CONFIG_SOFTMMU */ diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index e73416da68..cab33a3680 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -131,11 +131,7 @@ enum { POWERPC_EXCP_SYSCALL_VECTORED = 102, /* scv exception */ /* EOL */ POWERPC_EXCP_NB = 103, - /* QEMU exceptions: used internally during code translation */ - POWERPC_EXCP_STOP = 0x200, /* stop translation */ - POWERPC_EXCP_BRANCH = 0x201, /* branch instruction */ /* QEMU exceptions: special cases we want to stop translation */ - POWERPC_EXCP_SYNC = 0x202, /* context synchronizing instruction */ POWERPC_EXCP_SYSCALL_USER = 0x203, /* System call in user mode only */ }; @@ -192,17 +188,21 @@ typedef struct ppc_hash_pte64 ppc_hash_pte64_t; /* SPR access micro-ops generations callbacks */ struct ppc_spr_t { + const char *name; + target_ulong default_value; +#ifndef CONFIG_USER_ONLY + unsigned int gdb_id; +#endif +#ifdef CONFIG_TCG void (*uea_read)(DisasContext *ctx, int gpr_num, int spr_num); void (*uea_write)(DisasContext *ctx, int spr_num, int gpr_num); -#if !defined(CONFIG_USER_ONLY) +# ifndef CONFIG_USER_ONLY void (*oea_read)(DisasContext *ctx, int gpr_num, int spr_num); void (*oea_write)(DisasContext *ctx, int spr_num, int gpr_num); void (*hea_read)(DisasContext *ctx, int gpr_num, int spr_num); void (*hea_write)(DisasContext *ctx, int spr_num, int gpr_num); - unsigned int gdb_id; +# endif #endif - const char *name; - target_ulong default_value; #ifdef CONFIG_KVM /* * We (ab)use the fact that all the SPRs will have ids for the @@ -322,13 +322,13 @@ typedef struct ppc_v3_pate_t { #define MSR_PR 14 /* Problem state hflags */ #define MSR_FP 13 /* Floating point available hflags */ #define MSR_ME 12 /* Machine check interrupt enable */ -#define MSR_FE0 11 /* Floating point exception mode 0 hflags */ +#define MSR_FE0 11 /* Floating point exception mode 0 */ #define MSR_SE 10 /* Single-step trace enable x hflags */ #define MSR_DWE 10 /* Debug wait enable on 405 x */ #define MSR_UBLE 10 /* User BTB lock enable on e500 x */ #define MSR_BE 9 /* Branch trace enable x hflags */ #define MSR_DE 9 /* Debug interrupts enable on embedded PowerPC x */ -#define MSR_FE1 8 /* Floating point exception mode 1 hflags */ +#define MSR_FE1 8 /* Floating point exception mode 1 */ #define MSR_AL 7 /* AL bit on POWER */ #define MSR_EP 6 /* Exception prefix on 601 */ #define MSR_IR 5 /* Instruction relocate */ @@ -354,10 +354,11 @@ typedef struct ppc_v3_pate_t { #define LPCR_PECE_U_SHIFT (63 - 19) #define LPCR_PECE_U_MASK (0x7ull << LPCR_PECE_U_SHIFT) #define LPCR_HVEE PPC_BIT(17) /* Hypervisor Virt Exit Enable */ -#define LPCR_RMLS_SHIFT (63 - 37) +#define LPCR_RMLS_SHIFT (63 - 37) /* RMLS (removed in ISA v3.0) */ #define LPCR_RMLS (0xfull << LPCR_RMLS_SHIFT) +#define LPCR_HAIL PPC_BIT(37) /* ISA v3.1 HV AIL=3 equivalent */ #define LPCR_ILE PPC_BIT(38) -#define LPCR_AIL_SHIFT (63 - 40) /* Alternate interrupt location */ +#define LPCR_AIL_SHIFT (63 - 40) /* Alternate interrupt location */ #define LPCR_AIL (3ull << LPCR_AIL_SHIFT) #define LPCR_UPRT PPC_BIT(41) /* Use Process Table */ #define LPCR_EVIRT PPC_BIT(42) /* Enhanced Virtualisation */ @@ -581,6 +582,34 @@ enum { POWERPC_FLAG_TM = 0x00100000, /* Has SCV (ISA 3.00) */ POWERPC_FLAG_SCV = 0x00200000, + /* Has HID0 for LE bit (601) */ + POWERPC_FLAG_HID0_LE = 0x00400000, +}; + +/* + * Bits for env->hflags. + * + * Most of these bits overlap with corresponding bits in MSR, + * but some come from other sources. Those that do come from + * the MSR are validated in hreg_compute_hflags. + */ +enum { + HFLAGS_LE = 0, /* MSR_LE -- comes from elsewhere on 601 */ + HFLAGS_HV = 1, /* computed from MSR_HV and other state */ + HFLAGS_64 = 2, /* computed from MSR_CE and MSR_SF */ + HFLAGS_GTSE = 3, /* computed from SPR_LPCR[GTSE] */ + HFLAGS_DR = 4, /* MSR_DR */ + HFLAGS_SPE = 6, /* from MSR_SPE if cpu has SPE; avoid overlap w/ MSR_VR */ + HFLAGS_TM = 8, /* computed from MSR_TM */ + HFLAGS_BE = 9, /* MSR_BE -- from elsewhere on embedded ppc */ + HFLAGS_SE = 10, /* MSR_SE -- from elsewhere on embedded ppc */ + HFLAGS_FP = 13, /* MSR_FP */ + HFLAGS_PR = 14, /* MSR_PR */ + HFLAGS_VSX = 23, /* MSR_VSX if cpu has VSX */ + HFLAGS_VR = 25, /* MSR_VR if cpu has VRE */ + + HFLAGS_IMMU_IDX = 26, /* 26..28 -- the composite immu_idx */ + HFLAGS_DMMU_IDX = 29, /* 29..31 -- the composite dmmu_idx */ }; /*****************************************************************************/ @@ -1102,11 +1131,9 @@ struct CPUPPCState { bool resume_as_sreset; #endif - /* These resources are used only in QEMU core */ - target_ulong hflags; /* hflags is MSR & HFLAGS_MASK */ - target_ulong hflags_nmsr; /* specific hflags, not coming from MSR */ - int immu_idx; /* precomputed MMU index to speed up insn accesses */ - int dmmu_idx; /* precomputed MMU index to speed up data accesses */ + /* These resources are used only in TCG */ + uint32_t hflags; + target_ulong hflags_compat_nmsr; /* for migration compatibility */ /* Power management */ int (*check_pow)(CPUPPCState *env); @@ -1266,6 +1293,7 @@ void ppc_store_sdr1(CPUPPCState *env, target_ulong value); void ppc_store_ptcr(CPUPPCState *env, target_ulong value); #endif /* !defined(CONFIG_USER_ONLY) */ void ppc_store_msr(CPUPPCState *env, target_ulong value); +void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val); void ppc_cpu_list(void); @@ -1342,7 +1370,11 @@ int ppc_dcr_write(ppc_dcr_t *dcr_env, int dcrn, uint32_t val); #define MMU_USER_IDX 0 static inline int cpu_mmu_index(CPUPPCState *env, bool ifetch) { - return ifetch ? env->immu_idx : env->dmmu_idx; +#ifdef CONFIG_USER_ONLY + return MMU_USER_IDX; +#else + return (env->hflags >> (ifetch ? HFLAGS_IMMU_IDX : HFLAGS_DMMU_IDX)) & 7; +#endif } /* Compatibility modes */ @@ -1459,10 +1491,10 @@ typedef PowerPCCPU ArchCPU; #define SPR_MPC_BAR (0x09F) #define SPR_PSPB (0x09F) #define SPR_DPDES (0x0B0) -#define SPR_DAWR (0x0B4) +#define SPR_DAWR0 (0x0B4) #define SPR_RPR (0x0BA) #define SPR_CIABR (0x0BB) -#define SPR_DAWRX (0x0BC) +#define SPR_DAWRX0 (0x0BC) #define SPR_HFSCR (0x0BE) #define SPR_VRSAVE (0x100) #define SPR_USPRG0 (0x100) @@ -2375,14 +2407,6 @@ enum { HMER_XSCOM_STATUS_MASK = PPC_BITMASK(21, 23), }; -/* Alternate Interrupt Location (AIL) */ -enum { - AIL_NONE = 0, - AIL_RESERVED = 1, - AIL_0001_8000 = 2, - AIL_C000_0000_0000_4000 = 3, -}; - /*****************************************************************************/ #define is_isa300(ctx) (!!(ctx->insns_flags2 & PPC2_ISA300)) @@ -2395,6 +2419,10 @@ void cpu_write_xer(CPUPPCState *env, target_ulong xer); */ #define is_book3s_arch2x(ctx) (!!((ctx)->insns_flags & PPC_SEGMENT_64B)) +#ifdef CONFIG_DEBUG_TCG +void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc, + target_ulong *cs_base, uint32_t *flags); +#else static inline void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc, target_ulong *cs_base, uint32_t *flags) { @@ -2402,6 +2430,7 @@ static inline void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc, *cs_base = 0; *flags = env->hflags; } +#endif void QEMU_NORETURN raise_exception(CPUPPCState *env, uint32_t exception); void QEMU_NORETURN raise_exception_ra(CPUPPCState *env, uint32_t exception, @@ -2609,7 +2638,15 @@ static inline ppc_avr_t *cpu_avr_ptr(CPUPPCState *env, int i) return (ppc_avr_t *)((uintptr_t)env + avr_full_offset(i)); } +static inline bool ppc_has_spr(PowerPCCPU *cpu, int spr) +{ + /* We can test whether the SPR is defined by checking for a valid name */ + return cpu->env.spr_cb[spr].name != NULL; +} + void dump_mmu(CPUPPCState *env); void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len); +void ppc_store_vscr(CPUPPCState *env, uint32_t vscr); +uint32_t ppc_get_vscr(CPUPPCState *env); #endif /* PPC_CPU_H */ diff --git a/target/ppc/translate_init.c.inc b/target/ppc/cpu_init.c index c03a7c4f52..22ecbccad8 100644 --- a/target/ppc/translate_init.c.inc +++ b/target/ppc/cpu_init.c @@ -18,6 +18,7 @@ * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ +#include "qemu/osdep.h" #include "disas/dis-asm.h" #include "exec/gdbstub.h" #include "kvm_ppc.h" @@ -42,785 +43,115 @@ #include "fpu/softfloat.h" #include "qapi/qapi-commands-machine-target.h" -/* #define PPC_DUMP_CPU */ +#include "exec/helper-proto.h" +#include "helper_regs.h" +#include "internal.h" +#include "spr_tcg.h" + /* #define PPC_DEBUG_SPR */ -/* #define PPC_DUMP_SPR_ACCESSES */ /* #define USE_APPLE_GDB */ -/* - * Generic callbacks: - * do nothing but store/retrieve spr value - */ -static void spr_load_dump_spr(int sprn) -{ -#ifdef PPC_DUMP_SPR_ACCESSES - TCGv_i32 t0 = tcg_const_i32(sprn); - gen_helper_load_dump_spr(cpu_env, t0); - tcg_temp_free_i32(t0); -#endif -} - -static void spr_read_generic(DisasContext *ctx, int gprn, int sprn) -{ - gen_load_spr(cpu_gpr[gprn], sprn); - spr_load_dump_spr(sprn); -} - -static void spr_store_dump_spr(int sprn) -{ -#ifdef PPC_DUMP_SPR_ACCESSES - TCGv_i32 t0 = tcg_const_i32(sprn); - gen_helper_store_dump_spr(cpu_env, t0); - tcg_temp_free_i32(t0); -#endif -} - -static void spr_write_generic(DisasContext *ctx, int sprn, int gprn) -{ - gen_store_spr(sprn, cpu_gpr[gprn]); - spr_store_dump_spr(sprn); -} - -#if !defined(CONFIG_USER_ONLY) -static void spr_write_generic32(DisasContext *ctx, int sprn, int gprn) -{ -#ifdef TARGET_PPC64 - TCGv t0 = tcg_temp_new(); - tcg_gen_ext32u_tl(t0, cpu_gpr[gprn]); - gen_store_spr(sprn, t0); - tcg_temp_free(t0); - spr_store_dump_spr(sprn); -#else - spr_write_generic(ctx, sprn, gprn); -#endif -} - -static void spr_write_clear(DisasContext *ctx, int sprn, int gprn) -{ - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - gen_load_spr(t0, sprn); - tcg_gen_neg_tl(t1, cpu_gpr[gprn]); - tcg_gen_and_tl(t0, t0, t1); - gen_store_spr(sprn, t0); - tcg_temp_free(t0); - tcg_temp_free(t1); -} - -static void spr_access_nop(DisasContext *ctx, int sprn, int gprn) -{ -} - -#endif - -/* SPR common to all PowerPC */ -/* XER */ -static void spr_read_xer(DisasContext *ctx, int gprn, int sprn) -{ - gen_read_xer(ctx, cpu_gpr[gprn]); -} - -static void spr_write_xer(DisasContext *ctx, int sprn, int gprn) -{ - gen_write_xer(cpu_gpr[gprn]); -} - -/* LR */ -static void spr_read_lr(DisasContext *ctx, int gprn, int sprn) -{ - tcg_gen_mov_tl(cpu_gpr[gprn], cpu_lr); -} - -static void spr_write_lr(DisasContext *ctx, int sprn, int gprn) -{ - tcg_gen_mov_tl(cpu_lr, cpu_gpr[gprn]); -} - -/* CFAR */ -#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) -static void spr_read_cfar(DisasContext *ctx, int gprn, int sprn) -{ - tcg_gen_mov_tl(cpu_gpr[gprn], cpu_cfar); -} - -static void spr_write_cfar(DisasContext *ctx, int sprn, int gprn) -{ - tcg_gen_mov_tl(cpu_cfar, cpu_gpr[gprn]); -} -#endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */ - -/* CTR */ -static void spr_read_ctr(DisasContext *ctx, int gprn, int sprn) -{ - tcg_gen_mov_tl(cpu_gpr[gprn], cpu_ctr); -} - -static void spr_write_ctr(DisasContext *ctx, int sprn, int gprn) -{ - tcg_gen_mov_tl(cpu_ctr, cpu_gpr[gprn]); -} - -/* User read access to SPR */ -/* USPRx */ -/* UMMCRx */ -/* UPMCx */ -/* USIA */ -/* UDECR */ -static void spr_read_ureg(DisasContext *ctx, int gprn, int sprn) -{ - gen_load_spr(cpu_gpr[gprn], sprn + 0x10); -} - -#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) -static void spr_write_ureg(DisasContext *ctx, int sprn, int gprn) -{ - gen_store_spr(sprn + 0x10, cpu_gpr[gprn]); -} -#endif - -/* SPR common to all non-embedded PowerPC */ -/* DECR */ -#if !defined(CONFIG_USER_ONLY) -static void spr_read_decr(DisasContext *ctx, int gprn, int sprn) -{ - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_helper_load_decr(cpu_gpr[gprn], cpu_env); - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_stop_exception(ctx); - } -} - -static void spr_write_decr(DisasContext *ctx, int sprn, int gprn) -{ - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_helper_store_decr(cpu_env, cpu_gpr[gprn]); - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_stop_exception(ctx); - } -} -#endif - -/* SPR common to all non-embedded PowerPC, except 601 */ -/* Time base */ -static void spr_read_tbl(DisasContext *ctx, int gprn, int sprn) -{ - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_helper_load_tbl(cpu_gpr[gprn], cpu_env); - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); - gen_stop_exception(ctx); - } -} - -static void spr_read_tbu(DisasContext *ctx, int gprn, int sprn) -{ - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_helper_load_tbu(cpu_gpr[gprn], cpu_env); - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); - gen_stop_exception(ctx); - } -} - -ATTRIBUTE_UNUSED -static void spr_read_atbl(DisasContext *ctx, int gprn, int sprn) -{ - gen_helper_load_atbl(cpu_gpr[gprn], cpu_env); -} - -ATTRIBUTE_UNUSED -static void spr_read_atbu(DisasContext *ctx, int gprn, int sprn) -{ - gen_helper_load_atbu(cpu_gpr[gprn], cpu_env); -} - -#if !defined(CONFIG_USER_ONLY) -static void spr_write_tbl(DisasContext *ctx, int sprn, int gprn) -{ - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_helper_store_tbl(cpu_env, cpu_gpr[gprn]); - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); - gen_stop_exception(ctx); - } -} - -static void spr_write_tbu(DisasContext *ctx, int sprn, int gprn) -{ - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_helper_store_tbu(cpu_env, cpu_gpr[gprn]); - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); - gen_stop_exception(ctx); - } -} - -ATTRIBUTE_UNUSED -static void spr_write_atbl(DisasContext *ctx, int sprn, int gprn) -{ - gen_helper_store_atbl(cpu_env, cpu_gpr[gprn]); -} - -ATTRIBUTE_UNUSED -static void spr_write_atbu(DisasContext *ctx, int sprn, int gprn) -{ - gen_helper_store_atbu(cpu_env, cpu_gpr[gprn]); -} - -#if defined(TARGET_PPC64) -ATTRIBUTE_UNUSED -static void spr_read_purr(DisasContext *ctx, int gprn, int sprn) -{ - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_helper_load_purr(cpu_gpr[gprn], cpu_env); - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_stop_exception(ctx); - } -} - -static void spr_write_purr(DisasContext *ctx, int sprn, int gprn) -{ - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_helper_store_purr(cpu_env, cpu_gpr[gprn]); - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_stop_exception(ctx); - } -} - -/* HDECR */ -static void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn) -{ - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_helper_load_hdecr(cpu_gpr[gprn], cpu_env); - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); - gen_stop_exception(ctx); - } -} - -static void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn) -{ - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_helper_store_hdecr(cpu_env, cpu_gpr[gprn]); - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); - gen_stop_exception(ctx); - } -} - -static void spr_read_vtb(DisasContext *ctx, int gprn, int sprn) -{ - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_helper_load_vtb(cpu_gpr[gprn], cpu_env); - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_stop_exception(ctx); - } -} - -static void spr_write_vtb(DisasContext *ctx, int sprn, int gprn) -{ - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_helper_store_vtb(cpu_env, cpu_gpr[gprn]); - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_stop_exception(ctx); - } -} - -static void spr_write_tbu40(DisasContext *ctx, int sprn, int gprn) -{ - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_helper_store_tbu40(cpu_env, cpu_gpr[gprn]); - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_stop_exception(ctx); - } -} - -#endif -#endif - -#if !defined(CONFIG_USER_ONLY) -/* IBAT0U...IBAT0U */ -/* IBAT0L...IBAT7L */ -static void spr_read_ibat(DisasContext *ctx, int gprn, int sprn) -{ - tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, - offsetof(CPUPPCState, - IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2])); -} - -static void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn) -{ - tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, - offsetof(CPUPPCState, - IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4])); -} - -static void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn) -{ - TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2); - gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); -} - -static void spr_write_ibatu_h(DisasContext *ctx, int sprn, int gprn) -{ - TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4U) / 2) + 4); - gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); -} - -static void spr_write_ibatl(DisasContext *ctx, int sprn, int gprn) -{ - TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0L) / 2); - gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); -} - -static void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn) -{ - TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4L) / 2) + 4); - gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); -} - -/* DBAT0U...DBAT7U */ -/* DBAT0L...DBAT7L */ -static void spr_read_dbat(DisasContext *ctx, int gprn, int sprn) -{ - tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, - offsetof(CPUPPCState, - DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2])); -} - -static void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn) -{ - tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, - offsetof(CPUPPCState, - DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4])); -} - -static void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn) -{ - TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0U) / 2); - gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); -} - -static void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn) -{ - TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4U) / 2) + 4); - gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); -} - -static void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn) -{ - TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0L) / 2); - gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); -} - -static void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn) -{ - TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4L) / 2) + 4); - gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); -} - -/* SDR1 */ -static void spr_write_sdr1(DisasContext *ctx, int sprn, int gprn) -{ - gen_helper_store_sdr1(cpu_env, cpu_gpr[gprn]); -} - -#if defined(TARGET_PPC64) -/* 64 bits PowerPC specific SPRs */ -/* PIDR */ -static void spr_write_pidr(DisasContext *ctx, int sprn, int gprn) -{ - gen_helper_store_pidr(cpu_env, cpu_gpr[gprn]); -} - -static void spr_write_lpidr(DisasContext *ctx, int sprn, int gprn) -{ - gen_helper_store_lpidr(cpu_env, cpu_gpr[gprn]); -} - -static void spr_read_hior(DisasContext *ctx, int gprn, int sprn) -{ - tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, excp_prefix)); -} - -static void spr_write_hior(DisasContext *ctx, int sprn, int gprn) -{ - TCGv t0 = tcg_temp_new(); - tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0x3FFFFF00000ULL); - tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix)); - tcg_temp_free(t0); -} -static void spr_write_ptcr(DisasContext *ctx, int sprn, int gprn) -{ - gen_helper_store_ptcr(cpu_env, cpu_gpr[gprn]); -} - -static void spr_write_pcr(DisasContext *ctx, int sprn, int gprn) -{ - gen_helper_store_pcr(cpu_env, cpu_gpr[gprn]); -} - -/* DPDES */ -static void spr_read_dpdes(DisasContext *ctx, int gprn, int sprn) -{ - gen_helper_load_dpdes(cpu_gpr[gprn], cpu_env); -} - -static void spr_write_dpdes(DisasContext *ctx, int sprn, int gprn) -{ - gen_helper_store_dpdes(cpu_env, cpu_gpr[gprn]); -} -#endif -#endif - -/* PowerPC 601 specific registers */ -/* RTC */ -static void spr_read_601_rtcl(DisasContext *ctx, int gprn, int sprn) -{ - gen_helper_load_601_rtcl(cpu_gpr[gprn], cpu_env); -} - -static void spr_read_601_rtcu(DisasContext *ctx, int gprn, int sprn) -{ - gen_helper_load_601_rtcu(cpu_gpr[gprn], cpu_env); -} - -#if !defined(CONFIG_USER_ONLY) -static void spr_write_601_rtcu(DisasContext *ctx, int sprn, int gprn) -{ - gen_helper_store_601_rtcu(cpu_env, cpu_gpr[gprn]); -} - -static void spr_write_601_rtcl(DisasContext *ctx, int sprn, int gprn) -{ - gen_helper_store_601_rtcl(cpu_env, cpu_gpr[gprn]); -} - -static void spr_write_hid0_601(DisasContext *ctx, int sprn, int gprn) -{ - gen_helper_store_hid0_601(cpu_env, cpu_gpr[gprn]); - /* Must stop the translation as endianness may have changed */ - gen_stop_exception(ctx); -} -#endif - -/* Unified bats */ -#if !defined(CONFIG_USER_ONLY) -static void spr_read_601_ubat(DisasContext *ctx, int gprn, int sprn) -{ - tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, - offsetof(CPUPPCState, - IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2])); -} - -static void spr_write_601_ubatu(DisasContext *ctx, int sprn, int gprn) -{ - TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2); - gen_helper_store_601_batl(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); -} - -static void spr_write_601_ubatl(DisasContext *ctx, int sprn, int gprn) -{ - TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2); - gen_helper_store_601_batu(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); -} -#endif - -/* PowerPC 40x specific registers */ -#if !defined(CONFIG_USER_ONLY) -static void spr_read_40x_pit(DisasContext *ctx, int gprn, int sprn) -{ - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_helper_load_40x_pit(cpu_gpr[gprn], cpu_env); - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_stop_exception(ctx); - } -} - -static void spr_write_40x_pit(DisasContext *ctx, int sprn, int gprn) -{ - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_helper_store_40x_pit(cpu_env, cpu_gpr[gprn]); - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_stop_exception(ctx); - } -} - -static void spr_write_40x_dbcr0(DisasContext *ctx, int sprn, int gprn) -{ - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_store_spr(sprn, cpu_gpr[gprn]); - gen_helper_store_40x_dbcr0(cpu_env, cpu_gpr[gprn]); - /* We must stop translation as we may have rebooted */ - gen_stop_exception(ctx); - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_stop_exception(ctx); - } -} - -static void spr_write_40x_sler(DisasContext *ctx, int sprn, int gprn) -{ - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_helper_store_40x_sler(cpu_env, cpu_gpr[gprn]); - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_stop_exception(ctx); - } -} - -static void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn) -{ - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_helper_store_booke_tcr(cpu_env, cpu_gpr[gprn]); - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_stop_exception(ctx); - } -} - -static void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn) -{ - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_helper_store_booke_tsr(cpu_env, cpu_gpr[gprn]); - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_stop_exception(ctx); - } -} -#endif - -/* PowerPC 403 specific registers */ -/* PBL1 / PBU1 / PBL2 / PBU2 */ -#if !defined(CONFIG_USER_ONLY) -static void spr_read_403_pbr(DisasContext *ctx, int gprn, int sprn) -{ - tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, - offsetof(CPUPPCState, pb[sprn - SPR_403_PBL1])); -} - -static void spr_write_403_pbr(DisasContext *ctx, int sprn, int gprn) -{ - TCGv_i32 t0 = tcg_const_i32(sprn - SPR_403_PBL1); - gen_helper_store_403_pbr(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); -} - -static void spr_write_pir(DisasContext *ctx, int sprn, int gprn) -{ - TCGv t0 = tcg_temp_new(); - tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xF); - gen_store_spr(SPR_PIR, t0); - tcg_temp_free(t0); -} -#endif - -/* SPE specific registers */ -static void spr_read_spefscr(DisasContext *ctx, int gprn, int sprn) -{ - TCGv_i32 t0 = tcg_temp_new_i32(); - tcg_gen_ld_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr)); - tcg_gen_extu_i32_tl(cpu_gpr[gprn], t0); - tcg_temp_free_i32(t0); -} - -static void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn) -{ - TCGv_i32 t0 = tcg_temp_new_i32(); - tcg_gen_trunc_tl_i32(t0, cpu_gpr[gprn]); - tcg_gen_st_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr)); - tcg_temp_free_i32(t0); -} - -#if !defined(CONFIG_USER_ONLY) -/* Callback used to write the exception vector base */ -static void spr_write_excp_prefix(DisasContext *ctx, int sprn, int gprn) -{ - TCGv t0 = tcg_temp_new(); - tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivpr_mask)); - tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]); - tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix)); - gen_store_spr(sprn, t0); - tcg_temp_free(t0); -} - -static void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn) -{ - int sprn_offs; - - if (sprn >= SPR_BOOKE_IVOR0 && sprn <= SPR_BOOKE_IVOR15) { - sprn_offs = sprn - SPR_BOOKE_IVOR0; - } else if (sprn >= SPR_BOOKE_IVOR32 && sprn <= SPR_BOOKE_IVOR37) { - sprn_offs = sprn - SPR_BOOKE_IVOR32 + 32; - } else if (sprn >= SPR_BOOKE_IVOR38 && sprn <= SPR_BOOKE_IVOR42) { - sprn_offs = sprn - SPR_BOOKE_IVOR38 + 38; - } else { - printf("Trying to write an unknown exception vector %d %03x\n", - sprn, sprn); - gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); - return; - } - - TCGv t0 = tcg_temp_new(); - tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivor_mask)); - tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]); - tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_vectors[sprn_offs])); - gen_store_spr(sprn, t0); - tcg_temp_free(t0); -} -#endif - static inline void vscr_init(CPUPPCState *env, uint32_t val) { /* Altivec always uses round-to-nearest */ set_float_rounding_mode(float_round_nearest_even, &env->vec_status); - helper_mtvscr(env, val); + ppc_store_vscr(env, val); } -#ifdef CONFIG_USER_ONLY -#define spr_register_kvm(env, num, name, uea_read, uea_write, \ - oea_read, oea_write, one_reg_id, initial_value) \ - _spr_register(env, num, name, uea_read, uea_write, initial_value) -#define spr_register_kvm_hv(env, num, name, uea_read, uea_write, \ - oea_read, oea_write, hea_read, hea_write, \ - one_reg_id, initial_value) \ - _spr_register(env, num, name, uea_read, uea_write, initial_value) -#else -#if !defined(CONFIG_KVM) -#define spr_register_kvm(env, num, name, uea_read, uea_write, \ - oea_read, oea_write, one_reg_id, initial_value) \ - _spr_register(env, num, name, uea_read, uea_write, \ - oea_read, oea_write, oea_read, oea_write, initial_value) -#define spr_register_kvm_hv(env, num, name, uea_read, uea_write, \ - oea_read, oea_write, hea_read, hea_write, \ - one_reg_id, initial_value) \ - _spr_register(env, num, name, uea_read, uea_write, \ - oea_read, oea_write, hea_read, hea_write, initial_value) +/** + * _spr_register + * + * Register an SPR with all the callbacks required for tcg, + * and the ID number for KVM. + * + * The reason for the conditional compilation is that the tcg functions + * may be compiled out, and the system kvm header may not be available + * for supplying the ID numbers. This is ugly, but the best we can do. + */ + +#ifdef CONFIG_TCG +# define USR_ARG(X) X, +# ifdef CONFIG_USER_ONLY +# define SYS_ARG(X) +# else +# define SYS_ARG(X) X, +# endif #else -#define spr_register_kvm(env, num, name, uea_read, uea_write, \ - oea_read, oea_write, one_reg_id, initial_value) \ - _spr_register(env, num, name, uea_read, uea_write, \ - oea_read, oea_write, oea_read, oea_write, \ - one_reg_id, initial_value) -#define spr_register_kvm_hv(env, num, name, uea_read, uea_write, \ - oea_read, oea_write, hea_read, hea_write, \ - one_reg_id, initial_value) \ - _spr_register(env, num, name, uea_read, uea_write, \ - oea_read, oea_write, hea_read, hea_write, \ - one_reg_id, initial_value) +# define USR_ARG(X) +# define SYS_ARG(X) #endif +#ifdef CONFIG_KVM +# define KVM_ARG(X) X, +#else +# define KVM_ARG(X) #endif -#define spr_register(env, num, name, uea_read, uea_write, \ - oea_read, oea_write, initial_value) \ - spr_register_kvm(env, num, name, uea_read, uea_write, \ - oea_read, oea_write, 0, initial_value) - -#define spr_register_hv(env, num, name, uea_read, uea_write, \ - oea_read, oea_write, hea_read, hea_write, \ - initial_value) \ - spr_register_kvm_hv(env, num, name, uea_read, uea_write, \ - oea_read, oea_write, hea_read, hea_write, \ - 0, initial_value) - -static inline void _spr_register(CPUPPCState *env, int num, - const char *name, - void (*uea_read)(DisasContext *ctx, - int gprn, int sprn), - void (*uea_write)(DisasContext *ctx, - int sprn, int gprn), -#if !defined(CONFIG_USER_ONLY) +typedef void spr_callback(DisasContext *, int, int); - void (*oea_read)(DisasContext *ctx, - int gprn, int sprn), - void (*oea_write)(DisasContext *ctx, - int sprn, int gprn), - void (*hea_read)(DisasContext *opaque, - int gprn, int sprn), - void (*hea_write)(DisasContext *opaque, - int sprn, int gprn), -#endif -#if defined(CONFIG_KVM) - uint64_t one_reg_id, -#endif - target_ulong initial_value) +static void _spr_register(CPUPPCState *env, int num, const char *name, + USR_ARG(spr_callback *uea_read) + USR_ARG(spr_callback *uea_write) + SYS_ARG(spr_callback *oea_read) + SYS_ARG(spr_callback *oea_write) + SYS_ARG(spr_callback *hea_read) + SYS_ARG(spr_callback *hea_write) + KVM_ARG(uint64_t one_reg_id) + target_ulong initial_value) { - ppc_spr_t *spr; + ppc_spr_t *spr = &env->spr_cb[num]; + + /* No SPR should be registered twice. */ + assert(spr->name == NULL); + assert(name != NULL); - spr = &env->spr_cb[num]; - if (spr->name != NULL || env->spr[num] != 0x00000000 || -#if !defined(CONFIG_USER_ONLY) - spr->oea_read != NULL || spr->oea_write != NULL || -#endif - spr->uea_read != NULL || spr->uea_write != NULL) { - printf("Error: Trying to register SPR %d (%03x) twice !\n", num, num); - exit(1); - } -#if defined(PPC_DEBUG_SPR) - printf("*** register spr %d (%03x) %s val " TARGET_FMT_lx "\n", num, num, - name, initial_value); -#endif spr->name = name; + spr->default_value = initial_value; + env->spr[num] = initial_value; + +#ifdef CONFIG_TCG spr->uea_read = uea_read; spr->uea_write = uea_write; -#if !defined(CONFIG_USER_ONLY) +# ifndef CONFIG_USER_ONLY spr->oea_read = oea_read; spr->oea_write = oea_write; spr->hea_read = hea_read; spr->hea_write = hea_write; +# endif #endif -#if defined(CONFIG_KVM) - spr->one_reg_id = one_reg_id, +#ifdef CONFIG_KVM + spr->one_reg_id = one_reg_id; #endif - env->spr[num] = spr->default_value = initial_value; } +/* spr_register_kvm_hv passes all required arguments. */ +#define spr_register_kvm_hv(env, num, name, uea_read, uea_write, \ + oea_read, oea_write, hea_read, hea_write, \ + one_reg_id, initial_value) \ + _spr_register(env, num, name, \ + USR_ARG(uea_read) USR_ARG(uea_write) \ + SYS_ARG(oea_read) SYS_ARG(oea_write) \ + SYS_ARG(hea_read) SYS_ARG(hea_write) \ + KVM_ARG(one_reg_id) initial_value) + +/* spr_register_kvm duplicates the oea callbacks to the hea callbacks. */ +#define spr_register_kvm(env, num, name, uea_read, uea_write, \ + oea_read, oea_write, one_reg_id, ival) \ + spr_register_kvm_hv(env, num, name, uea_read, uea_write, oea_read, \ + oea_write, oea_read, oea_write, one_reg_id, ival) + +/* spr_register_hv and spr_register are similar, except there is no kvm id. */ +#define spr_register_hv(env, num, name, uea_read, uea_write, \ + oea_read, oea_write, hea_read, hea_write, ival) \ + spr_register_kvm_hv(env, num, name, uea_read, uea_write, oea_read, \ + oea_write, hea_read, hea_write, 0, ival) + +#define spr_register(env, num, name, uea_read, uea_write, \ + oea_read, oea_write, ival) \ + spr_register_kvm(env, num, name, uea_read, uea_write, \ + oea_read, oea_write, 0, ival) + /* Generic PowerPC SPRs */ -static void gen_spr_generic(CPUPPCState *env) +static void register_generic_sprs(CPUPPCState *env) { /* Integer processing */ spr_register(env, SPR_XER, "XER", @@ -865,7 +196,7 @@ static void gen_spr_generic(CPUPPCState *env) } /* SPR common to all non-embedded PowerPC, including 601 */ -static void gen_spr_ne_601(CPUPPCState *env) +static void register_ne_601_sprs(CPUPPCState *env) { /* Exception processing */ spr_register_kvm(env, SPR_DSISR, "DSISR", @@ -884,7 +215,7 @@ static void gen_spr_ne_601(CPUPPCState *env) } /* Storage Description Register 1 */ -static void gen_spr_sdr1(CPUPPCState *env) +static void register_sdr1_sprs(CPUPPCState *env) { #ifndef CONFIG_USER_ONLY if (env->has_hv_mode) { @@ -907,7 +238,7 @@ static void gen_spr_sdr1(CPUPPCState *env) } /* BATs 0-3 */ -static void gen_low_BATs(CPUPPCState *env) +static void register_low_BATs(CPUPPCState *env) { #if !defined(CONFIG_USER_ONLY) spr_register(env, SPR_IBAT0U, "IBAT0U", @@ -979,7 +310,7 @@ static void gen_low_BATs(CPUPPCState *env) } /* BATs 4-7 */ -static void gen_high_BATs(CPUPPCState *env) +static void register_high_BATs(CPUPPCState *env) { #if !defined(CONFIG_USER_ONLY) spr_register(env, SPR_IBAT4U, "IBAT4U", @@ -1051,7 +382,7 @@ static void gen_high_BATs(CPUPPCState *env) } /* Generic PowerPC time base */ -static void gen_tbl(CPUPPCState *env) +static void register_tbl(CPUPPCState *env) { spr_register(env, SPR_VTBL, "TBL", &spr_read_tbl, SPR_NOACCESS, @@ -1072,7 +403,7 @@ static void gen_tbl(CPUPPCState *env) } /* Softare table search registers */ -static void gen_6xx_7xx_soft_tlb(CPUPPCState *env, int nb_tlbs, int nb_ways) +static void register_6xx_7xx_soft_tlb(CPUPPCState *env, int nb_tlbs, int nb_ways) { #if !defined(CONFIG_USER_ONLY) env->nb_tlb = nb_tlbs; @@ -1111,7 +442,7 @@ static void gen_6xx_7xx_soft_tlb(CPUPPCState *env, int nb_tlbs, int nb_ways) } /* SPR common to MPC755 and G2 */ -static void gen_spr_G2_755(CPUPPCState *env) +static void register_G2_755_sprs(CPUPPCState *env) { /* SGPRs */ spr_register(env, SPR_SPRG4, "SPRG4", @@ -1133,7 +464,7 @@ static void gen_spr_G2_755(CPUPPCState *env) } /* SPR common to all 7xx PowerPC implementations */ -static void gen_spr_7xx(CPUPPCState *env) +static void register_7xx_sprs(CPUPPCState *env) { /* Breakpoints */ /* XXX : not implemented */ @@ -1232,106 +563,7 @@ static void gen_spr_7xx(CPUPPCState *env) } #ifdef TARGET_PPC64 -#ifndef CONFIG_USER_ONLY -static void spr_write_amr(DisasContext *ctx, int sprn, int gprn) -{ - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - TCGv t2 = tcg_temp_new(); - - /* - * Note, the HV=1 PR=0 case is handled earlier by simply using - * spr_write_generic for HV mode in the SPR table - */ - - /* Build insertion mask into t1 based on context */ - if (ctx->pr) { - gen_load_spr(t1, SPR_UAMOR); - } else { - gen_load_spr(t1, SPR_AMOR); - } - - /* Mask new bits into t2 */ - tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]); - - /* Load AMR and clear new bits in t0 */ - gen_load_spr(t0, SPR_AMR); - tcg_gen_andc_tl(t0, t0, t1); - - /* Or'in new bits and write it out */ - tcg_gen_or_tl(t0, t0, t2); - gen_store_spr(SPR_AMR, t0); - spr_store_dump_spr(SPR_AMR); - - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); -} - -static void spr_write_uamor(DisasContext *ctx, int sprn, int gprn) -{ - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - TCGv t2 = tcg_temp_new(); - - /* - * Note, the HV=1 case is handled earlier by simply using - * spr_write_generic for HV mode in the SPR table - */ - - /* Build insertion mask into t1 based on context */ - gen_load_spr(t1, SPR_AMOR); - - /* Mask new bits into t2 */ - tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]); - - /* Load AMR and clear new bits in t0 */ - gen_load_spr(t0, SPR_UAMOR); - tcg_gen_andc_tl(t0, t0, t1); - - /* Or'in new bits and write it out */ - tcg_gen_or_tl(t0, t0, t2); - gen_store_spr(SPR_UAMOR, t0); - spr_store_dump_spr(SPR_UAMOR); - - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); -} - -static void spr_write_iamr(DisasContext *ctx, int sprn, int gprn) -{ - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - TCGv t2 = tcg_temp_new(); - - /* - * Note, the HV=1 case is handled earlier by simply using - * spr_write_generic for HV mode in the SPR table - */ - - /* Build insertion mask into t1 based on context */ - gen_load_spr(t1, SPR_AMOR); - - /* Mask new bits into t2 */ - tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]); - - /* Load AMR and clear new bits in t0 */ - gen_load_spr(t0, SPR_IAMR); - tcg_gen_andc_tl(t0, t0, t1); - - /* Or'in new bits and write it out */ - tcg_gen_or_tl(t0, t0, t2); - gen_store_spr(SPR_IAMR, t0); - spr_store_dump_spr(SPR_IAMR); - - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); -} -#endif /* CONFIG_USER_ONLY */ - -static void gen_spr_amr(CPUPPCState *env) +static void register_amr_sprs(CPUPPCState *env) { #ifndef CONFIG_USER_ONLY /* @@ -1363,7 +595,7 @@ static void gen_spr_amr(CPUPPCState *env) #endif /* !CONFIG_USER_ONLY */ } -static void gen_spr_iamr(CPUPPCState *env) +static void register_iamr_sprs(CPUPPCState *env) { #ifndef CONFIG_USER_ONLY spr_register_kvm_hv(env, SPR_IAMR, "IAMR", @@ -1375,16 +607,7 @@ static void gen_spr_iamr(CPUPPCState *env) } #endif /* TARGET_PPC64 */ -#ifndef CONFIG_USER_ONLY -static void spr_read_thrm(DisasContext *ctx, int gprn, int sprn) -{ - gen_helper_fixup_thrm(cpu_env); - gen_load_spr(cpu_gpr[gprn], sprn); - spr_load_dump_spr(sprn); -} -#endif /* !CONFIG_USER_ONLY */ - -static void gen_spr_thrm(CPUPPCState *env) +static void register_thrm_sprs(CPUPPCState *env) { /* Thermal management */ /* XXX : not implemented */ @@ -1405,7 +628,7 @@ static void gen_spr_thrm(CPUPPCState *env) } /* SPR specific to PowerPC 604 implementation */ -static void gen_spr_604(CPUPPCState *env) +static void register_604_sprs(CPUPPCState *env) { /* Processor identification */ spr_register(env, SPR_PIR, "PIR", @@ -1458,7 +681,7 @@ static void gen_spr_604(CPUPPCState *env) } /* SPR specific to PowerPC 603 implementation */ -static void gen_spr_603(CPUPPCState *env) +static void register_603_sprs(CPUPPCState *env) { /* External access control */ /* XXX : not implemented */ @@ -1476,7 +699,7 @@ static void gen_spr_603(CPUPPCState *env) } /* SPR specific to PowerPC G2 implementation */ -static void gen_spr_G2(CPUPPCState *env) +static void register_G2_sprs(CPUPPCState *env) { /* Memory base address */ /* MBAR */ @@ -1528,7 +751,7 @@ static void gen_spr_G2(CPUPPCState *env) } /* SPR specific to PowerPC 602 implementation */ -static void gen_spr_602(CPUPPCState *env) +static void register_602_sprs(CPUPPCState *env) { /* ESA registers */ /* XXX : not implemented */ @@ -1576,7 +799,7 @@ static void gen_spr_602(CPUPPCState *env) } /* SPR specific to PowerPC 601 implementation */ -static void gen_spr_601(CPUPPCState *env) +static void register_601_sprs(CPUPPCState *env) { /* Multiplication/division register */ /* MQ */ @@ -1652,7 +875,7 @@ static void gen_spr_601(CPUPPCState *env) #endif } -static void gen_spr_74xx(CPUPPCState *env) +static void register_74xx_sprs(CPUPPCState *env) { /* Processor identification */ spr_register(env, SPR_PIR, "PIR", @@ -1700,11 +923,9 @@ static void gen_spr_74xx(CPUPPCState *env) SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, spr_access_nop, 0x00000000); - /* Not strictly an SPR */ - vscr_init(env, 0x00010000); } -static void gen_l3_ctrl(CPUPPCState *env) +static void register_l3_ctrl(CPUPPCState *env) { /* L3CR */ /* XXX : not implemented */ @@ -1726,7 +947,7 @@ static void gen_l3_ctrl(CPUPPCState *env) 0x00000000); } -static void gen_74xx_soft_tlb(CPUPPCState *env, int nb_tlbs, int nb_ways) +static void register_74xx_soft_tlb(CPUPPCState *env, int nb_tlbs, int nb_ways) { #if !defined(CONFIG_USER_ONLY) env->nb_tlb = nb_tlbs; @@ -1751,58 +972,7 @@ static void gen_74xx_soft_tlb(CPUPPCState *env, int nb_tlbs, int nb_ways) #endif } -#if !defined(CONFIG_USER_ONLY) -static void spr_write_e500_l1csr0(DisasContext *ctx, int sprn, int gprn) -{ - TCGv t0 = tcg_temp_new(); - - tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR0_DCE | L1CSR0_CPE); - gen_store_spr(sprn, t0); - tcg_temp_free(t0); -} - -static void spr_write_e500_l1csr1(DisasContext *ctx, int sprn, int gprn) -{ - TCGv t0 = tcg_temp_new(); - - tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR1_ICE | L1CSR1_CPE); - gen_store_spr(sprn, t0); - tcg_temp_free(t0); -} - -static void spr_write_e500_l2csr0(DisasContext *ctx, int sprn, int gprn) -{ - TCGv t0 = tcg_temp_new(); - - tcg_gen_andi_tl(t0, cpu_gpr[gprn], - ~(E500_L2CSR0_L2FI | E500_L2CSR0_L2FL | E500_L2CSR0_L2LFC)); - gen_store_spr(sprn, t0); - tcg_temp_free(t0); -} - -static void spr_write_booke206_mmucsr0(DisasContext *ctx, int sprn, int gprn) -{ - gen_helper_booke206_tlbflush(cpu_env, cpu_gpr[gprn]); -} - -static void spr_write_booke_pid(DisasContext *ctx, int sprn, int gprn) -{ - TCGv_i32 t0 = tcg_const_i32(sprn); - gen_helper_booke_setpid(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); -} -static void spr_write_eplc(DisasContext *ctx, int sprn, int gprn) -{ - gen_helper_booke_set_eplc(cpu_env, cpu_gpr[gprn]); -} -static void spr_write_epsc(DisasContext *ctx, int sprn, int gprn) -{ - gen_helper_booke_set_epsc(cpu_env, cpu_gpr[gprn]); -} - -#endif - -static void gen_spr_usprg3(CPUPPCState *env) +static void register_usprg3_sprs(CPUPPCState *env) { spr_register(env, SPR_USPRG3, "USPRG3", &spr_read_ureg, SPR_NOACCESS, @@ -1810,7 +980,7 @@ static void gen_spr_usprg3(CPUPPCState *env) 0x00000000); } -static void gen_spr_usprgh(CPUPPCState *env) +static void register_usprgh_sprs(CPUPPCState *env) { spr_register(env, SPR_USPRG4, "USPRG4", &spr_read_ureg, SPR_NOACCESS, @@ -1831,7 +1001,7 @@ static void gen_spr_usprgh(CPUPPCState *env) } /* PowerPC BookE SPR */ -static void gen_spr_BookE(CPUPPCState *env, uint64_t ivor_mask) +static void register_BookE_sprs(CPUPPCState *env, uint64_t ivor_mask) { const char *ivor_names[64] = { "IVOR0", "IVOR1", "IVOR2", "IVOR3", @@ -2007,7 +1177,8 @@ static void gen_spr_BookE(CPUPPCState *env, uint64_t ivor_mask) 0x00000000); } -static inline uint32_t gen_tlbncfg(uint32_t assoc, uint32_t minsize, +#if !defined(CONFIG_USER_ONLY) +static inline uint32_t register_tlbncfg(uint32_t assoc, uint32_t minsize, uint32_t maxsize, uint32_t flags, uint32_t nentries) { @@ -2016,9 +1187,10 @@ static inline uint32_t gen_tlbncfg(uint32_t assoc, uint32_t minsize, (maxsize << TLBnCFG_MAXSIZE_SHIFT) | flags | nentries; } +#endif /* !CONFIG_USER_ONLY */ /* BookE 2.06 storage control registers */ -static void gen_spr_BookE206(CPUPPCState *env, uint32_t mas_mask, +static void register_BookE206_sprs(CPUPPCState *env, uint32_t mas_mask, uint32_t *tlbncfg, uint32_t mmucfg) { #if !defined(CONFIG_USER_ONLY) @@ -2106,11 +1278,11 @@ static void gen_spr_BookE206(CPUPPCState *env, uint32_t mas_mask, } #endif - gen_spr_usprgh(env); + register_usprgh_sprs(env); } /* SPR specific to PowerPC 440 implementation */ -static void gen_spr_440(CPUPPCState *env) +static void register_440_sprs(CPUPPCState *env) { /* Cache control */ /* XXX : not implemented */ @@ -2251,7 +1423,7 @@ static void gen_spr_440(CPUPPCState *env) } /* SPR shared between PowerPC 40x implementations */ -static void gen_spr_40x(CPUPPCState *env) +static void register_40x_sprs(CPUPPCState *env) { /* Cache */ /* not emulated, as QEMU do not emulate caches */ @@ -2306,7 +1478,7 @@ static void gen_spr_40x(CPUPPCState *env) } /* SPR specific to PowerPC 405 implementation */ -static void gen_spr_405(CPUPPCState *env) +static void register_405_sprs(CPUPPCState *env) { /* MMU */ spr_register(env, SPR_40x_PID, "PID", @@ -2408,11 +1580,11 @@ static void gen_spr_405(CPUPPCState *env) SPR_NOACCESS, SPR_NOACCESS, spr_read_generic, &spr_write_generic, 0x00000000); - gen_spr_usprgh(env); + register_usprgh_sprs(env); } /* SPR shared between PowerPC 401 & 403 implementations */ -static void gen_spr_401_403(CPUPPCState *env) +static void register_401_403_sprs(CPUPPCState *env) { /* Time base */ spr_register(env, SPR_403_VTBL, "TBL", @@ -2440,7 +1612,7 @@ static void gen_spr_401_403(CPUPPCState *env) } /* SPR specific to PowerPC 401 implementation */ -static void gen_spr_401(CPUPPCState *env) +static void register_401_sprs(CPUPPCState *env) { /* Debug interface */ /* XXX : not implemented */ @@ -2482,9 +1654,9 @@ static void gen_spr_401(CPUPPCState *env) 0x00000000); } -static void gen_spr_401x2(CPUPPCState *env) +static void register_401x2_sprs(CPUPPCState *env) { - gen_spr_401(env); + register_401_sprs(env); spr_register(env, SPR_40x_PID, "PID", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -2496,7 +1668,7 @@ static void gen_spr_401x2(CPUPPCState *env) } /* SPR specific to PowerPC 403 implementation */ -static void gen_spr_403(CPUPPCState *env) +static void register_403_sprs(CPUPPCState *env) { /* Debug interface */ /* XXX : not implemented */ @@ -2532,7 +1704,7 @@ static void gen_spr_403(CPUPPCState *env) 0x00000000); } -static void gen_spr_403_real(CPUPPCState *env) +static void register_403_real_sprs(CPUPPCState *env) { spr_register(env, SPR_403_PBL1, "PBL1", SPR_NOACCESS, SPR_NOACCESS, @@ -2552,7 +1724,7 @@ static void gen_spr_403_real(CPUPPCState *env) 0x00000000); } -static void gen_spr_403_mmu(CPUPPCState *env) +static void register_403_mmu_sprs(CPUPPCState *env) { /* MMU */ spr_register(env, SPR_40x_PID, "PID", @@ -2566,7 +1738,7 @@ static void gen_spr_403_mmu(CPUPPCState *env) } /* SPR specific to PowerPC compression coprocessor extension */ -static void gen_spr_compress(CPUPPCState *env) +static void register_compress_sprs(CPUPPCState *env) { /* XXX : not implemented */ spr_register(env, SPR_401_SKR, "SKR", @@ -2575,7 +1747,7 @@ static void gen_spr_compress(CPUPPCState *env) 0x00000000); } -static void gen_spr_5xx_8xx(CPUPPCState *env) +static void register_5xx_8xx_sprs(CPUPPCState *env) { /* Exception processing */ spr_register_kvm(env, SPR_DSISR, "DSISR", @@ -2693,7 +1865,7 @@ static void gen_spr_5xx_8xx(CPUPPCState *env) 0x00000000); } -static void gen_spr_5xx(CPUPPCState *env) +static void register_5xx_sprs(CPUPPCState *env) { /* XXX : not implemented */ spr_register(env, SPR_RCPU_MI_GRA, "MI_GRA", @@ -2802,7 +1974,7 @@ static void gen_spr_5xx(CPUPPCState *env) 0x00000000); } -static void gen_spr_8xx(CPUPPCState *env) +static void register_8xx_sprs(CPUPPCState *env) { /* XXX : not implemented */ spr_register(env, SPR_MPC_IC_CST, "IC_CST", @@ -3457,7 +2629,7 @@ static void init_excp_POWER9(CPUPPCState *env) #if !defined(CONFIG_USER_ONLY) env->excp_vectors[POWERPC_EXCP_HVIRT] = 0x00000EA0; - env->excp_vectors[POWERPC_EXCP_SYSCALL_VECTORED] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_SYSCALL_VECTORED] = 0x00017000; #endif } @@ -3537,9 +2709,9 @@ static bool ppc_cpu_interrupts_big_endian_lpcr(PowerPCCPU *cpu) static void init_proc_401(CPUPPCState *env) { - gen_spr_40x(env); - gen_spr_401_403(env); - gen_spr_401(env); + register_40x_sprs(env); + register_401_403_sprs(env); + register_401_sprs(env); init_excp_4xx_real(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -3583,10 +2755,10 @@ POWERPC_FAMILY(401)(ObjectClass *oc, void *data) static void init_proc_401x2(CPUPPCState *env) { - gen_spr_40x(env); - gen_spr_401_403(env); - gen_spr_401x2(env); - gen_spr_compress(env); + register_40x_sprs(env); + register_401_403_sprs(env); + register_401x2_sprs(env); + register_compress_sprs(env); /* Memory management */ #if !defined(CONFIG_USER_ONLY) env->nb_tlb = 64; @@ -3641,11 +2813,11 @@ POWERPC_FAMILY(401x2)(ObjectClass *oc, void *data) static void init_proc_401x3(CPUPPCState *env) { - gen_spr_40x(env); - gen_spr_401_403(env); - gen_spr_401(env); - gen_spr_401x2(env); - gen_spr_compress(env); + register_40x_sprs(env); + register_401_403_sprs(env); + register_401_sprs(env); + register_401x2_sprs(env); + register_compress_sprs(env); init_excp_4xx_softmmu(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -3694,10 +2866,10 @@ POWERPC_FAMILY(401x3)(ObjectClass *oc, void *data) static void init_proc_IOP480(CPUPPCState *env) { - gen_spr_40x(env); - gen_spr_401_403(env); - gen_spr_401x2(env); - gen_spr_compress(env); + register_40x_sprs(env); + register_401_403_sprs(env); + register_401x2_sprs(env); + register_compress_sprs(env); /* Memory management */ #if !defined(CONFIG_USER_ONLY) env->nb_tlb = 64; @@ -3752,10 +2924,10 @@ POWERPC_FAMILY(IOP480)(ObjectClass *oc, void *data) static void init_proc_403(CPUPPCState *env) { - gen_spr_40x(env); - gen_spr_401_403(env); - gen_spr_403(env); - gen_spr_403_real(env); + register_40x_sprs(env); + register_401_403_sprs(env); + register_403_sprs(env); + register_403_real_sprs(env); init_excp_4xx_real(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -3799,11 +2971,11 @@ POWERPC_FAMILY(403)(ObjectClass *oc, void *data) static void init_proc_403GCX(CPUPPCState *env) { - gen_spr_40x(env); - gen_spr_401_403(env); - gen_spr_403(env); - gen_spr_403_real(env); - gen_spr_403_mmu(env); + register_40x_sprs(env); + register_401_403_sprs(env); + register_403_sprs(env); + register_403_real_sprs(env); + register_403_mmu_sprs(env); /* Bus access control */ /* not emulated, as QEMU never does speculative access */ spr_register(env, SPR_40x_SGR, "SGR", @@ -3867,9 +3039,9 @@ POWERPC_FAMILY(403GCX)(ObjectClass *oc, void *data) static void init_proc_405(CPUPPCState *env) { /* Time base */ - gen_tbl(env); - gen_spr_40x(env); - gen_spr_405(env); + register_tbl(env); + register_40x_sprs(env); + register_405_sprs(env); /* Bus access control */ /* not emulated, as QEMU never does speculative access */ spr_register(env, SPR_40x_SGR, "SGR", @@ -3933,10 +3105,10 @@ POWERPC_FAMILY(405)(ObjectClass *oc, void *data) static void init_proc_440EP(CPUPPCState *env) { /* Time base */ - gen_tbl(env); - gen_spr_BookE(env, 0x000000000000FFFFULL); - gen_spr_440(env); - gen_spr_usprgh(env); + register_tbl(env); + register_BookE_sprs(env, 0x000000000000FFFFULL); + register_440_sprs(env); + register_usprgh_sprs(env); /* Processor identification */ spr_register(env, SPR_BOOKE_PIR, "PIR", SPR_NOACCESS, SPR_NOACCESS, @@ -4075,10 +3247,10 @@ POWERPC_FAMILY(460EX)(ObjectClass *oc, void *data) static void init_proc_440GP(CPUPPCState *env) { /* Time base */ - gen_tbl(env); - gen_spr_BookE(env, 0x000000000000FFFFULL); - gen_spr_440(env); - gen_spr_usprgh(env); + register_tbl(env); + register_BookE_sprs(env, 0x000000000000FFFFULL); + register_440_sprs(env); + register_usprgh_sprs(env); /* Processor identification */ spr_register(env, SPR_BOOKE_PIR, "PIR", SPR_NOACCESS, SPR_NOACCESS, @@ -4158,10 +3330,10 @@ POWERPC_FAMILY(440GP)(ObjectClass *oc, void *data) static void init_proc_440x4(CPUPPCState *env) { /* Time base */ - gen_tbl(env); - gen_spr_BookE(env, 0x000000000000FFFFULL); - gen_spr_440(env); - gen_spr_usprgh(env); + register_tbl(env); + register_BookE_sprs(env, 0x000000000000FFFFULL); + register_440_sprs(env); + register_usprgh_sprs(env); /* Processor identification */ spr_register(env, SPR_BOOKE_PIR, "PIR", SPR_NOACCESS, SPR_NOACCESS, @@ -4241,10 +3413,10 @@ POWERPC_FAMILY(440x4)(ObjectClass *oc, void *data) static void init_proc_440x5(CPUPPCState *env) { /* Time base */ - gen_tbl(env); - gen_spr_BookE(env, 0x000000000000FFFFULL); - gen_spr_440(env); - gen_spr_usprgh(env); + register_tbl(env); + register_BookE_sprs(env, 0x000000000000FFFFULL); + register_440_sprs(env); + register_usprgh_sprs(env); /* Processor identification */ spr_register(env, SPR_BOOKE_PIR, "PIR", SPR_NOACCESS, SPR_NOACCESS, @@ -4380,9 +3552,9 @@ POWERPC_FAMILY(440x5wDFPU)(ObjectClass *oc, void *data) static void init_proc_MPC5xx(CPUPPCState *env) { /* Time base */ - gen_tbl(env); - gen_spr_5xx_8xx(env); - gen_spr_5xx(env); + register_tbl(env); + register_5xx_8xx_sprs(env); + register_5xx_sprs(env); init_excp_MPC5xx(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -4424,9 +3596,9 @@ POWERPC_FAMILY(MPC5xx)(ObjectClass *oc, void *data) static void init_proc_MPC8xx(CPUPPCState *env) { /* Time base */ - gen_tbl(env); - gen_spr_5xx_8xx(env); - gen_spr_8xx(env); + register_tbl(env); + register_5xx_8xx_sprs(env); + register_8xx_sprs(env); init_excp_MPC8xx(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -4468,12 +3640,12 @@ POWERPC_FAMILY(MPC8xx)(ObjectClass *oc, void *data) static void init_proc_G2(CPUPPCState *env) { - gen_spr_ne_601(env); - gen_spr_sdr1(env); - gen_spr_G2_755(env); - gen_spr_G2(env); + register_ne_601_sprs(env); + register_sdr1_sprs(env); + register_G2_755_sprs(env); + register_G2_sprs(env); /* Time base */ - gen_tbl(env); + register_tbl(env); /* External access control */ /* XXX : not implemented */ spr_register(env, SPR_EAR, "EAR", @@ -4497,9 +3669,9 @@ static void init_proc_G2(CPUPPCState *env) &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ - gen_low_BATs(env); - gen_high_BATs(env); - gen_6xx_7xx_soft_tlb(env, 64, 2); + register_low_BATs(env); + register_high_BATs(env); + register_6xx_7xx_soft_tlb(env, 64, 2); init_excp_G2(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -4547,12 +3719,12 @@ POWERPC_FAMILY(G2)(ObjectClass *oc, void *data) static void init_proc_G2LE(CPUPPCState *env) { - gen_spr_ne_601(env); - gen_spr_sdr1(env); - gen_spr_G2_755(env); - gen_spr_G2(env); + register_ne_601_sprs(env); + register_sdr1_sprs(env); + register_G2_755_sprs(env); + register_G2_sprs(env); /* Time base */ - gen_tbl(env); + register_tbl(env); /* External access control */ /* XXX : not implemented */ spr_register(env, SPR_EAR, "EAR", @@ -4577,9 +3749,9 @@ static void init_proc_G2LE(CPUPPCState *env) 0x00000000); /* Memory management */ - gen_low_BATs(env); - gen_high_BATs(env); - gen_6xx_7xx_soft_tlb(env, 64, 2); + register_low_BATs(env); + register_high_BATs(env); + register_6xx_7xx_soft_tlb(env, 64, 2); init_excp_G2(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -4630,15 +3802,15 @@ POWERPC_FAMILY(G2LE)(ObjectClass *oc, void *data) static void init_proc_e200(CPUPPCState *env) { /* Time base */ - gen_tbl(env); - gen_spr_BookE(env, 0x000000070000FFFFULL); + register_tbl(env); + register_BookE_sprs(env, 0x000000070000FFFFULL); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_SPEFSCR, "SPEFSCR", &spr_read_spefscr, &spr_write_spefscr, &spr_read_spefscr, &spr_write_spefscr, 0x00000000); /* Memory management */ - gen_spr_BookE206(env, 0x0000005D, NULL, 0); + register_BookE206_sprs(env, 0x0000005D, NULL, 0); /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, @@ -4784,11 +3956,11 @@ POWERPC_FAMILY(e200)(ObjectClass *oc, void *data) static void init_proc_e300(CPUPPCState *env) { - gen_spr_ne_601(env); - gen_spr_sdr1(env); - gen_spr_603(env); + register_ne_601_sprs(env); + register_sdr1_sprs(env); + register_603_sprs(env); /* Time base */ - gen_tbl(env); + register_tbl(env); /* hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", @@ -4832,9 +4004,9 @@ static void init_proc_e300(CPUPPCState *env) &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ - gen_low_BATs(env); - gen_high_BATs(env); - gen_6xx_7xx_soft_tlb(env, 64, 2); + register_low_BATs(env); + register_high_BATs(env); + register_6xx_7xx_soft_tlb(env, 64, 2); init_excp_603(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -4882,31 +4054,6 @@ POWERPC_FAMILY(e300)(ObjectClass *oc, void *data) POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK; } -#if !defined(CONFIG_USER_ONLY) -static void spr_write_mas73(DisasContext *ctx, int sprn, int gprn) -{ - TCGv val = tcg_temp_new(); - tcg_gen_ext32u_tl(val, cpu_gpr[gprn]); - gen_store_spr(SPR_BOOKE_MAS3, val); - tcg_gen_shri_tl(val, cpu_gpr[gprn], 32); - gen_store_spr(SPR_BOOKE_MAS7, val); - tcg_temp_free(val); -} - -static void spr_read_mas73(DisasContext *ctx, int gprn, int sprn) -{ - TCGv mas7 = tcg_temp_new(); - TCGv mas3 = tcg_temp_new(); - gen_load_spr(mas7, SPR_BOOKE_MAS7); - tcg_gen_shli_tl(mas7, mas7, 32); - gen_load_spr(mas3, SPR_BOOKE_MAS3); - tcg_gen_or_tl(cpu_gpr[gprn], mas3, mas7); - tcg_temp_free(mas3); - tcg_temp_free(mas7); -} - -#endif - enum fsl_e500_version { fsl_e500v1, fsl_e500v2, @@ -4930,11 +4077,11 @@ static void init_proc_e500(CPUPPCState *env, int version) #endif /* Time base */ - gen_tbl(env); + register_tbl(env); /* * XXX The e500 doesn't implement IVOR7 and IVOR9, but doesn't * complain when accessing them. - * gen_spr_BookE(env, 0x0000000F0000FD7FULL); + * register_BookE_sprs(env, 0x0000000F0000FD7FULL); */ switch (version) { case fsl_e500v1: @@ -4950,8 +4097,8 @@ static void init_proc_e500(CPUPPCState *env, int version) ivor_mask = 0x000003FF0000FFFFULL; break; } - gen_spr_BookE(env, ivor_mask); - gen_spr_usprg3(env); + register_BookE_sprs(env, ivor_mask); + register_usprg3_sprs(env); /* Processor identification */ spr_register(env, SPR_BOOKE_PIR, "PIR", SPR_NOACCESS, SPR_NOACCESS, @@ -4969,17 +4116,17 @@ static void init_proc_e500(CPUPPCState *env, int version) env->id_tlbs = 0; switch (version) { case fsl_e500v1: - tlbncfg[0] = gen_tlbncfg(2, 1, 1, 0, 256); - tlbncfg[1] = gen_tlbncfg(16, 1, 9, TLBnCFG_AVAIL | TLBnCFG_IPROT, 16); + tlbncfg[0] = register_tlbncfg(2, 1, 1, 0, 256); + tlbncfg[1] = register_tlbncfg(16, 1, 9, TLBnCFG_AVAIL | TLBnCFG_IPROT, 16); break; case fsl_e500v2: - tlbncfg[0] = gen_tlbncfg(4, 1, 1, 0, 512); - tlbncfg[1] = gen_tlbncfg(16, 1, 12, TLBnCFG_AVAIL | TLBnCFG_IPROT, 16); + tlbncfg[0] = register_tlbncfg(4, 1, 1, 0, 512); + tlbncfg[1] = register_tlbncfg(16, 1, 12, TLBnCFG_AVAIL | TLBnCFG_IPROT, 16); break; case fsl_e500mc: case fsl_e5500: - tlbncfg[0] = gen_tlbncfg(4, 1, 1, 0, 512); - tlbncfg[1] = gen_tlbncfg(64, 1, 12, TLBnCFG_AVAIL | TLBnCFG_IPROT, 64); + tlbncfg[0] = register_tlbncfg(4, 1, 1, 0, 512); + tlbncfg[1] = register_tlbncfg(64, 1, 12, TLBnCFG_AVAIL | TLBnCFG_IPROT, 64); break; case fsl_e6500: mmucfg = 0x6510B45; @@ -5016,7 +4163,7 @@ static void init_proc_e500(CPUPPCState *env, int version) cpu_abort(env_cpu(env), "Unknown CPU: " TARGET_FMT_lx "\n", env->spr[SPR_PVR]); } - gen_spr_BookE206(env, 0x000000DF, tlbncfg, mmucfg); + register_BookE206_sprs(env, 0x000000DF, tlbncfg, mmucfg); /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, @@ -5374,9 +4521,9 @@ POWERPC_FAMILY(e6500)(ObjectClass *oc, void *data) static void init_proc_601(CPUPPCState *env) { - gen_spr_ne_601(env); - gen_spr_sdr1(env); - gen_spr_601(env); + register_ne_601_sprs(env); + register_sdr1_sprs(env); + register_601_sprs(env); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", @@ -5441,7 +4588,7 @@ POWERPC_FAMILY(601)(ObjectClass *oc, void *data) pcc->excp_model = POWERPC_EXCP_601; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->bfd_mach = bfd_mach_ppc_601; - pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_RTC_CLK; + pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_RTC_CLK | POWERPC_FLAG_HID0_LE; } #define POWERPC_MSRR_601v (0x0000000000001040ULL) @@ -5485,16 +4632,16 @@ POWERPC_FAMILY(601v)(ObjectClass *oc, void *data) #endif pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->bfd_mach = bfd_mach_ppc_601; - pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_RTC_CLK; + pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_RTC_CLK | POWERPC_FLAG_HID0_LE; } static void init_proc_602(CPUPPCState *env) { - gen_spr_ne_601(env); - gen_spr_sdr1(env); - gen_spr_602(env); + register_ne_601_sprs(env); + register_sdr1_sprs(env); + register_602_sprs(env); /* Time base */ - gen_tbl(env); + register_tbl(env); /* hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", @@ -5507,8 +4654,8 @@ static void init_proc_602(CPUPPCState *env) &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ - gen_low_BATs(env); - gen_6xx_7xx_soft_tlb(env, 64, 2); + register_low_BATs(env); + register_6xx_7xx_soft_tlb(env, 64, 2); init_excp_602(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -5560,11 +4707,11 @@ POWERPC_FAMILY(602)(ObjectClass *oc, void *data) static void init_proc_603(CPUPPCState *env) { - gen_spr_ne_601(env); - gen_spr_sdr1(env); - gen_spr_603(env); + register_ne_601_sprs(env); + register_sdr1_sprs(env); + register_603_sprs(env); /* Time base */ - gen_tbl(env); + register_tbl(env); /* hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", @@ -5577,8 +4724,8 @@ static void init_proc_603(CPUPPCState *env) &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ - gen_low_BATs(env); - gen_6xx_7xx_soft_tlb(env, 64, 2); + register_low_BATs(env); + register_6xx_7xx_soft_tlb(env, 64, 2); init_excp_603(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -5627,11 +4774,11 @@ POWERPC_FAMILY(603)(ObjectClass *oc, void *data) static void init_proc_603E(CPUPPCState *env) { - gen_spr_ne_601(env); - gen_spr_sdr1(env); - gen_spr_603(env); + register_ne_601_sprs(env); + register_sdr1_sprs(env); + register_603_sprs(env); /* Time base */ - gen_tbl(env); + register_tbl(env); /* hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", @@ -5644,8 +4791,8 @@ static void init_proc_603E(CPUPPCState *env) &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ - gen_low_BATs(env); - gen_6xx_7xx_soft_tlb(env, 64, 2); + register_low_BATs(env); + register_6xx_7xx_soft_tlb(env, 64, 2); init_excp_603(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -5694,11 +4841,11 @@ POWERPC_FAMILY(603E)(ObjectClass *oc, void *data) static void init_proc_604(CPUPPCState *env) { - gen_spr_ne_601(env); - gen_spr_sdr1(env); - gen_spr_604(env); + register_ne_601_sprs(env); + register_sdr1_sprs(env); + register_604_sprs(env); /* Time base */ - gen_tbl(env); + register_tbl(env); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", @@ -5706,7 +4853,7 @@ static void init_proc_604(CPUPPCState *env) &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ - gen_low_BATs(env); + register_low_BATs(env); init_excp_604(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -5758,9 +4905,9 @@ POWERPC_FAMILY(604)(ObjectClass *oc, void *data) static void init_proc_604E(CPUPPCState *env) { - gen_spr_ne_601(env); - gen_spr_sdr1(env); - gen_spr_604(env); + register_ne_601_sprs(env); + register_sdr1_sprs(env); + register_604_sprs(env); /* XXX : not implemented */ spr_register(env, SPR_7XX_MMCR1, "MMCR1", SPR_NOACCESS, SPR_NOACCESS, @@ -5777,7 +4924,7 @@ static void init_proc_604E(CPUPPCState *env) &spr_read_generic, &spr_write_generic, 0x00000000); /* Time base */ - gen_tbl(env); + register_tbl(env); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", @@ -5790,7 +4937,7 @@ static void init_proc_604E(CPUPPCState *env) &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ - gen_low_BATs(env); + register_low_BATs(env); init_excp_604(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -5842,13 +4989,13 @@ POWERPC_FAMILY(604E)(ObjectClass *oc, void *data) static void init_proc_740(CPUPPCState *env) { - gen_spr_ne_601(env); - gen_spr_sdr1(env); - gen_spr_7xx(env); + register_ne_601_sprs(env); + register_sdr1_sprs(env); + register_7xx_sprs(env); /* Time base */ - gen_tbl(env); + register_tbl(env); /* Thermal management */ - gen_spr_thrm(env); + register_thrm_sprs(env); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", @@ -5861,7 +5008,7 @@ static void init_proc_740(CPUPPCState *env) &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ - gen_low_BATs(env); + register_low_BATs(env); init_excp_7x0(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -5913,18 +5060,18 @@ POWERPC_FAMILY(740)(ObjectClass *oc, void *data) static void init_proc_750(CPUPPCState *env) { - gen_spr_ne_601(env); - gen_spr_sdr1(env); - gen_spr_7xx(env); + register_ne_601_sprs(env); + register_sdr1_sprs(env); + register_7xx_sprs(env); /* XXX : not implemented */ spr_register(env, SPR_L2CR, "L2CR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, spr_access_nop, 0x00000000); /* Time base */ - gen_tbl(env); + register_tbl(env); /* Thermal management */ - gen_spr_thrm(env); + register_thrm_sprs(env); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", @@ -5937,7 +5084,7 @@ static void init_proc_750(CPUPPCState *env) &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ - gen_low_BATs(env); + register_low_BATs(env); /* * XXX: high BATs are also present but are known to be bugged on * die version 1.x @@ -5993,16 +5140,16 @@ POWERPC_FAMILY(750)(ObjectClass *oc, void *data) static void init_proc_750cl(CPUPPCState *env) { - gen_spr_ne_601(env); - gen_spr_sdr1(env); - gen_spr_7xx(env); + register_ne_601_sprs(env); + register_sdr1_sprs(env); + register_7xx_sprs(env); /* XXX : not implemented */ spr_register(env, SPR_L2CR, "L2CR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, spr_access_nop, 0x00000000); /* Time base */ - gen_tbl(env); + register_tbl(env); /* Thermal management */ /* Those registers are fake on 750CL */ spr_register(env, SPR_THRM1, "THRM1", @@ -6103,9 +5250,9 @@ static void init_proc_750cl(CPUPPCState *env) &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ - gen_low_BATs(env); + register_low_BATs(env); /* PowerPC 750cl has 8 DBATs and 8 IBATs */ - gen_high_BATs(env); + register_high_BATs(env); init_excp_750cl(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -6196,18 +5343,18 @@ POWERPC_FAMILY(750cl)(ObjectClass *oc, void *data) static void init_proc_750cx(CPUPPCState *env) { - gen_spr_ne_601(env); - gen_spr_sdr1(env); - gen_spr_7xx(env); + register_ne_601_sprs(env); + register_sdr1_sprs(env); + register_7xx_sprs(env); /* XXX : not implemented */ spr_register(env, SPR_L2CR, "L2CR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, spr_access_nop, 0x00000000); /* Time base */ - gen_tbl(env); + register_tbl(env); /* Thermal management */ - gen_spr_thrm(env); + register_thrm_sprs(env); /* This register is not implemented but is present for compatibility */ spr_register(env, SPR_SDA, "SDA", SPR_NOACCESS, SPR_NOACCESS, @@ -6225,9 +5372,9 @@ static void init_proc_750cx(CPUPPCState *env) &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ - gen_low_BATs(env); + register_low_BATs(env); /* PowerPC 750cx has 8 DBATs and 8 IBATs */ - gen_high_BATs(env); + register_high_BATs(env); init_excp_750cx(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -6279,18 +5426,18 @@ POWERPC_FAMILY(750cx)(ObjectClass *oc, void *data) static void init_proc_750fx(CPUPPCState *env) { - gen_spr_ne_601(env); - gen_spr_sdr1(env); - gen_spr_7xx(env); + register_ne_601_sprs(env); + register_sdr1_sprs(env); + register_7xx_sprs(env); /* XXX : not implemented */ spr_register(env, SPR_L2CR, "L2CR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, spr_access_nop, 0x00000000); /* Time base */ - gen_tbl(env); + register_tbl(env); /* Thermal management */ - gen_spr_thrm(env); + register_thrm_sprs(env); /* XXX : not implemented */ spr_register(env, SPR_750_THRM4, "THRM4", SPR_NOACCESS, SPR_NOACCESS, @@ -6313,9 +5460,9 @@ static void init_proc_750fx(CPUPPCState *env) &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ - gen_low_BATs(env); + register_low_BATs(env); /* PowerPC 750fx & 750gx has 8 DBATs and 8 IBATs */ - gen_high_BATs(env); + register_high_BATs(env); init_excp_7x0(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -6367,18 +5514,18 @@ POWERPC_FAMILY(750fx)(ObjectClass *oc, void *data) static void init_proc_750gx(CPUPPCState *env) { - gen_spr_ne_601(env); - gen_spr_sdr1(env); - gen_spr_7xx(env); + register_ne_601_sprs(env); + register_sdr1_sprs(env); + register_7xx_sprs(env); /* XXX : not implemented (XXX: different from 750fx) */ spr_register(env, SPR_L2CR, "L2CR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, spr_access_nop, 0x00000000); /* Time base */ - gen_tbl(env); + register_tbl(env); /* Thermal management */ - gen_spr_thrm(env); + register_thrm_sprs(env); /* XXX : not implemented */ spr_register(env, SPR_750_THRM4, "THRM4", SPR_NOACCESS, SPR_NOACCESS, @@ -6401,9 +5548,9 @@ static void init_proc_750gx(CPUPPCState *env) &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ - gen_low_BATs(env); + register_low_BATs(env); /* PowerPC 750fx & 750gx has 8 DBATs and 8 IBATs */ - gen_high_BATs(env); + register_high_BATs(env); init_excp_7x0(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -6455,14 +5602,14 @@ POWERPC_FAMILY(750gx)(ObjectClass *oc, void *data) static void init_proc_745(CPUPPCState *env) { - gen_spr_ne_601(env); - gen_spr_sdr1(env); - gen_spr_7xx(env); - gen_spr_G2_755(env); + register_ne_601_sprs(env); + register_sdr1_sprs(env); + register_7xx_sprs(env); + register_G2_755_sprs(env); /* Time base */ - gen_tbl(env); + register_tbl(env); /* Thermal management */ - gen_spr_thrm(env); + register_thrm_sprs(env); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", @@ -6480,9 +5627,9 @@ static void init_proc_745(CPUPPCState *env) &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ - gen_low_BATs(env); - gen_high_BATs(env); - gen_6xx_7xx_soft_tlb(env, 64, 2); + register_low_BATs(env); + register_high_BATs(env); + register_6xx_7xx_soft_tlb(env, 64, 2); init_excp_7x5(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -6531,12 +5678,12 @@ POWERPC_FAMILY(745)(ObjectClass *oc, void *data) static void init_proc_755(CPUPPCState *env) { - gen_spr_ne_601(env); - gen_spr_sdr1(env); - gen_spr_7xx(env); - gen_spr_G2_755(env); + register_ne_601_sprs(env); + register_sdr1_sprs(env); + register_7xx_sprs(env); + register_G2_755_sprs(env); /* Time base */ - gen_tbl(env); + register_tbl(env); /* L2 cache control */ /* XXX : not implemented */ spr_register(env, SPR_L2CR, "L2CR", @@ -6549,7 +5696,7 @@ static void init_proc_755(CPUPPCState *env) &spr_read_generic, &spr_write_generic, 0x00000000); /* Thermal management */ - gen_spr_thrm(env); + register_thrm_sprs(env); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", @@ -6567,9 +5714,9 @@ static void init_proc_755(CPUPPCState *env) &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ - gen_low_BATs(env); - gen_high_BATs(env); - gen_6xx_7xx_soft_tlb(env, 64, 2); + register_low_BATs(env); + register_high_BATs(env); + register_6xx_7xx_soft_tlb(env, 64, 2); init_excp_7x5(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -6618,13 +5765,14 @@ POWERPC_FAMILY(755)(ObjectClass *oc, void *data) static void init_proc_7400(CPUPPCState *env) { - gen_spr_ne_601(env); - gen_spr_sdr1(env); - gen_spr_7xx(env); + register_ne_601_sprs(env); + register_sdr1_sprs(env); + register_7xx_sprs(env); /* Time base */ - gen_tbl(env); + register_tbl(env); /* 74xx specific SPR */ - gen_spr_74xx(env); + register_74xx_sprs(env); + vscr_init(env, 0x00010000); /* XXX : not implemented */ spr_register(env, SPR_UBAMR, "UBAMR", &spr_read_ureg, SPR_NOACCESS, @@ -6637,9 +5785,9 @@ static void init_proc_7400(CPUPPCState *env) &spr_read_generic, &spr_write_generic, 0x00000000); /* Thermal management */ - gen_spr_thrm(env); + register_thrm_sprs(env); /* Memory management */ - gen_low_BATs(env); + register_low_BATs(env); init_excp_7400(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -6697,20 +5845,21 @@ POWERPC_FAMILY(7400)(ObjectClass *oc, void *data) static void init_proc_7410(CPUPPCState *env) { - gen_spr_ne_601(env); - gen_spr_sdr1(env); - gen_spr_7xx(env); + register_ne_601_sprs(env); + register_sdr1_sprs(env); + register_7xx_sprs(env); /* Time base */ - gen_tbl(env); + register_tbl(env); /* 74xx specific SPR */ - gen_spr_74xx(env); + register_74xx_sprs(env); + vscr_init(env, 0x00010000); /* XXX : not implemented */ spr_register(env, SPR_UBAMR, "UBAMR", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* Thermal management */ - gen_spr_thrm(env); + register_thrm_sprs(env); /* L2PMCR */ /* XXX : not implemented */ spr_register(env, SPR_L2PMCR, "L2PMCR", @@ -6724,7 +5873,7 @@ static void init_proc_7410(CPUPPCState *env) &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ - gen_low_BATs(env); + register_low_BATs(env); init_excp_7400(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -6782,13 +5931,14 @@ POWERPC_FAMILY(7410)(ObjectClass *oc, void *data) static void init_proc_7440(CPUPPCState *env) { - gen_spr_ne_601(env); - gen_spr_sdr1(env); - gen_spr_7xx(env); + register_ne_601_sprs(env); + register_sdr1_sprs(env); + register_7xx_sprs(env); /* Time base */ - gen_tbl(env); + register_tbl(env); /* 74xx specific SPR */ - gen_spr_74xx(env); + register_74xx_sprs(env); + vscr_init(env, 0x00010000); /* XXX : not implemented */ spr_register(env, SPR_UBAMR, "UBAMR", &spr_read_ureg, SPR_NOACCESS, @@ -6834,8 +5984,8 @@ static void init_proc_7440(CPUPPCState *env) &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* Memory management */ - gen_low_BATs(env); - gen_74xx_soft_tlb(env, 128, 2); + register_low_BATs(env); + register_74xx_soft_tlb(env, 128, 2); init_excp_7450(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -6890,15 +6040,16 @@ POWERPC_FAMILY(7440)(ObjectClass *oc, void *data) static void init_proc_7450(CPUPPCState *env) { - gen_spr_ne_601(env); - gen_spr_sdr1(env); - gen_spr_7xx(env); + register_ne_601_sprs(env); + register_sdr1_sprs(env); + register_7xx_sprs(env); /* Time base */ - gen_tbl(env); + register_tbl(env); /* 74xx specific SPR */ - gen_spr_74xx(env); + register_74xx_sprs(env); + vscr_init(env, 0x00010000); /* Level 3 cache control */ - gen_l3_ctrl(env); + register_l3_ctrl(env); /* L3ITCR1 */ /* XXX : not implemented */ spr_register(env, SPR_L3ITCR1, "L3ITCR1", @@ -6968,8 +6119,8 @@ static void init_proc_7450(CPUPPCState *env) &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* Memory management */ - gen_low_BATs(env); - gen_74xx_soft_tlb(env, 128, 2); + register_low_BATs(env); + register_74xx_soft_tlb(env, 128, 2); init_excp_7450(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -7024,13 +6175,14 @@ POWERPC_FAMILY(7450)(ObjectClass *oc, void *data) static void init_proc_7445(CPUPPCState *env) { - gen_spr_ne_601(env); - gen_spr_sdr1(env); - gen_spr_7xx(env); + register_ne_601_sprs(env); + register_sdr1_sprs(env); + register_7xx_sprs(env); /* Time base */ - gen_tbl(env); + register_tbl(env); /* 74xx specific SPR */ - gen_spr_74xx(env); + register_74xx_sprs(env); + vscr_init(env, 0x00010000); /* LDSTCR */ /* XXX : not implemented */ spr_register(env, SPR_LDSTCR, "LDSTCR", @@ -7104,9 +6256,9 @@ static void init_proc_7445(CPUPPCState *env) &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* Memory management */ - gen_low_BATs(env); - gen_high_BATs(env); - gen_74xx_soft_tlb(env, 128, 2); + register_low_BATs(env); + register_high_BATs(env); + register_74xx_soft_tlb(env, 128, 2); init_excp_7450(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -7161,15 +6313,16 @@ POWERPC_FAMILY(7445)(ObjectClass *oc, void *data) static void init_proc_7455(CPUPPCState *env) { - gen_spr_ne_601(env); - gen_spr_sdr1(env); - gen_spr_7xx(env); + register_ne_601_sprs(env); + register_sdr1_sprs(env); + register_7xx_sprs(env); /* Time base */ - gen_tbl(env); + register_tbl(env); /* 74xx specific SPR */ - gen_spr_74xx(env); + register_74xx_sprs(env); + vscr_init(env, 0x00010000); /* Level 3 cache control */ - gen_l3_ctrl(env); + register_l3_ctrl(env); /* LDSTCR */ /* XXX : not implemented */ spr_register(env, SPR_LDSTCR, "LDSTCR", @@ -7243,9 +6396,9 @@ static void init_proc_7455(CPUPPCState *env) &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* Memory management */ - gen_low_BATs(env); - gen_high_BATs(env); - gen_74xx_soft_tlb(env, 128, 2); + register_low_BATs(env); + register_high_BATs(env); + register_74xx_soft_tlb(env, 128, 2); init_excp_7450(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -7300,15 +6453,16 @@ POWERPC_FAMILY(7455)(ObjectClass *oc, void *data) static void init_proc_7457(CPUPPCState *env) { - gen_spr_ne_601(env); - gen_spr_sdr1(env); - gen_spr_7xx(env); + register_ne_601_sprs(env); + register_sdr1_sprs(env); + register_7xx_sprs(env); /* Time base */ - gen_tbl(env); + register_tbl(env); /* 74xx specific SPR */ - gen_spr_74xx(env); + register_74xx_sprs(env); + vscr_init(env, 0x00010000); /* Level 3 cache control */ - gen_l3_ctrl(env); + register_l3_ctrl(env); /* L3ITCR1 */ /* XXX : not implemented */ spr_register(env, SPR_L3ITCR1, "L3ITCR1", @@ -7406,9 +6560,9 @@ static void init_proc_7457(CPUPPCState *env) &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* Memory management */ - gen_low_BATs(env); - gen_high_BATs(env); - gen_74xx_soft_tlb(env, 128, 2); + register_low_BATs(env); + register_high_BATs(env); + register_74xx_soft_tlb(env, 128, 2); init_excp_7450(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -7463,13 +6617,14 @@ POWERPC_FAMILY(7457)(ObjectClass *oc, void *data) static void init_proc_e600(CPUPPCState *env) { - gen_spr_ne_601(env); - gen_spr_sdr1(env); - gen_spr_7xx(env); + register_ne_601_sprs(env); + register_sdr1_sprs(env); + register_7xx_sprs(env); /* Time base */ - gen_tbl(env); + register_tbl(env); /* 74xx specific SPR */ - gen_spr_74xx(env); + register_74xx_sprs(env); + vscr_init(env, 0x00010000); /* XXX : not implemented */ spr_register(env, SPR_UBAMR, "UBAMR", &spr_read_ureg, SPR_NOACCESS, @@ -7544,9 +6699,9 @@ static void init_proc_e600(CPUPPCState *env) &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* Memory management */ - gen_low_BATs(env); - gen_high_BATs(env); - gen_74xx_soft_tlb(env, 128, 2); + register_low_BATs(env); + register_high_BATs(env); + register_74xx_soft_tlb(env, 128, 2); init_excp_7450(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -7610,58 +6765,6 @@ POWERPC_FAMILY(e600)(ObjectClass *oc, void *data) #define POWERPC970_HID5_INIT 0x00000000 #endif -static void gen_fscr_facility_check(DisasContext *ctx, int facility_sprn, - int bit, int sprn, int cause) -{ - TCGv_i32 t1 = tcg_const_i32(bit); - TCGv_i32 t2 = tcg_const_i32(sprn); - TCGv_i32 t3 = tcg_const_i32(cause); - - gen_helper_fscr_facility_check(cpu_env, t1, t2, t3); - - tcg_temp_free_i32(t3); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t1); -} - -static void gen_msr_facility_check(DisasContext *ctx, int facility_sprn, - int bit, int sprn, int cause) -{ - TCGv_i32 t1 = tcg_const_i32(bit); - TCGv_i32 t2 = tcg_const_i32(sprn); - TCGv_i32 t3 = tcg_const_i32(cause); - - gen_helper_msr_facility_check(cpu_env, t1, t2, t3); - - tcg_temp_free_i32(t3); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t1); -} - -static void spr_read_prev_upper32(DisasContext *ctx, int gprn, int sprn) -{ - TCGv spr_up = tcg_temp_new(); - TCGv spr = tcg_temp_new(); - - gen_load_spr(spr, sprn - 1); - tcg_gen_shri_tl(spr_up, spr, 32); - tcg_gen_ext32u_tl(cpu_gpr[gprn], spr_up); - - tcg_temp_free(spr); - tcg_temp_free(spr_up); -} - -static void spr_write_prev_upper32(DisasContext *ctx, int sprn, int gprn) -{ - TCGv spr = tcg_temp_new(); - - gen_load_spr(spr, sprn - 1); - tcg_gen_deposit_tl(spr, spr, cpu_gpr[gprn], 32, 32); - gen_store_spr(sprn - 1, spr); - - tcg_temp_free(spr); -} - static int check_pow_970(CPUPPCState *env) { if (env->spr[SPR_HID0] & (HID0_DEEPNAP | HID0_DOZE | HID0_NAP)) { @@ -7671,7 +6774,7 @@ static int check_pow_970(CPUPPCState *env) return 0; } -static void gen_spr_970_hid(CPUPPCState *env) +static void register_970_hid_sprs(CPUPPCState *env) { /* Hardware implementation registers */ /* XXX : not implemented */ @@ -7689,7 +6792,7 @@ static void gen_spr_970_hid(CPUPPCState *env) POWERPC970_HID5_INIT); } -static void gen_spr_970_hior(CPUPPCState *env) +static void register_970_hior_sprs(CPUPPCState *env) { spr_register(env, SPR_HIOR, "SPR_HIOR", SPR_NOACCESS, SPR_NOACCESS, @@ -7697,7 +6800,7 @@ static void gen_spr_970_hior(CPUPPCState *env) 0x00000000); } -static void gen_spr_book3s_ctrl(CPUPPCState *env) +static void register_book3s_ctrl_sprs(CPUPPCState *env) { spr_register(env, SPR_CTRL, "SPR_CTRL", SPR_NOACCESS, SPR_NOACCESS, @@ -7709,7 +6812,7 @@ static void gen_spr_book3s_ctrl(CPUPPCState *env) 0x00000000); } -static void gen_spr_book3s_altivec(CPUPPCState *env) +static void register_book3s_altivec_sprs(CPUPPCState *env) { if (!(env->insns_flags & PPC_ALTIVEC)) { return; @@ -7720,14 +6823,9 @@ static void gen_spr_book3s_altivec(CPUPPCState *env) &spr_read_generic, &spr_write_generic, KVM_REG_PPC_VRSAVE, 0x00000000); - /* - * Can't find information on what this should be on reset. This - * value is the one used by 74xx processors. - */ - vscr_init(env, 0x00010000); } -static void gen_spr_book3s_dbg(CPUPPCState *env) +static void register_book3s_dbg_sprs(CPUPPCState *env) { /* * TODO: different specs define different scopes for these, @@ -7746,14 +6844,14 @@ static void gen_spr_book3s_dbg(CPUPPCState *env) KVM_REG_PPC_DABRX, 0x00000000); } -static void gen_spr_book3s_207_dbg(CPUPPCState *env) +static void register_book3s_207_dbg_sprs(CPUPPCState *env) { - spr_register_kvm_hv(env, SPR_DAWR, "DAWR", + spr_register_kvm_hv(env, SPR_DAWR0, "DAWR0", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_DAWR, 0x00000000); - spr_register_kvm_hv(env, SPR_DAWRX, "DAWRX", + spr_register_kvm_hv(env, SPR_DAWRX0, "DAWRX0", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -7765,7 +6863,7 @@ static void gen_spr_book3s_207_dbg(CPUPPCState *env) KVM_REG_PPC_CIABR, 0x00000000); } -static void gen_spr_970_dbg(CPUPPCState *env) +static void register_970_dbg_sprs(CPUPPCState *env) { /* Breakpoints */ spr_register(env, SPR_IABR, "IABR", @@ -7774,7 +6872,7 @@ static void gen_spr_970_dbg(CPUPPCState *env) 0x00000000); } -static void gen_spr_book3s_pmu_sup(CPUPPCState *env) +static void register_book3s_pmu_sup_sprs(CPUPPCState *env) { spr_register_kvm(env, SPR_POWER_MMCR0, "MMCR0", SPR_NOACCESS, SPR_NOACCESS, @@ -7822,7 +6920,7 @@ static void gen_spr_book3s_pmu_sup(CPUPPCState *env) KVM_REG_PPC_SDAR, 0x00000000); } -static void gen_spr_book3s_pmu_user(CPUPPCState *env) +static void register_book3s_pmu_user_sprs(CPUPPCState *env) { spr_register(env, SPR_POWER_UMMCR0, "UMMCR0", &spr_read_ureg, SPR_NOACCESS, @@ -7870,7 +6968,7 @@ static void gen_spr_book3s_pmu_user(CPUPPCState *env) 0x00000000); } -static void gen_spr_970_pmu_sup(CPUPPCState *env) +static void register_970_pmu_sup_sprs(CPUPPCState *env) { spr_register_kvm(env, SPR_970_PMC7, "PMC7", SPR_NOACCESS, SPR_NOACCESS, @@ -7882,7 +6980,7 @@ static void gen_spr_970_pmu_sup(CPUPPCState *env) KVM_REG_PPC_PMC8, 0x00000000); } -static void gen_spr_970_pmu_user(CPUPPCState *env) +static void register_970_pmu_user_sprs(CPUPPCState *env) { spr_register(env, SPR_970_UPMC7, "UPMC7", &spr_read_ureg, SPR_NOACCESS, @@ -7894,7 +6992,7 @@ static void gen_spr_970_pmu_user(CPUPPCState *env) 0x00000000); } -static void gen_spr_power8_pmu_sup(CPUPPCState *env) +static void register_power8_pmu_sup_sprs(CPUPPCState *env) { spr_register_kvm(env, SPR_POWER_MMCR2, "MMCR2", SPR_NOACCESS, SPR_NOACCESS, @@ -7930,7 +7028,7 @@ static void gen_spr_power8_pmu_sup(CPUPPCState *env) KVM_REG_PPC_CSIGR, 0x00000000); } -static void gen_spr_power8_pmu_user(CPUPPCState *env) +static void register_power8_pmu_user_sprs(CPUPPCState *env) { spr_register(env, SPR_POWER_UMMCR2, "UMMCR2", &spr_read_ureg, SPR_NOACCESS, @@ -7942,7 +7040,7 @@ static void gen_spr_power8_pmu_user(CPUPPCState *env) 0x00000000); } -static void gen_spr_power5p_ear(CPUPPCState *env) +static void register_power5p_ear_sprs(CPUPPCState *env) { /* External access control */ spr_register(env, SPR_EAR, "EAR", @@ -7951,7 +7049,7 @@ static void gen_spr_power5p_ear(CPUPPCState *env) 0x00000000); } -static void gen_spr_power5p_tb(CPUPPCState *env) +static void register_power5p_tb_sprs(CPUPPCState *env) { /* TBU40 (High 40 bits of the Timebase register */ spr_register_hv(env, SPR_TBU40, "TBU40", @@ -7961,25 +7059,7 @@ static void gen_spr_power5p_tb(CPUPPCState *env) 0x00000000); } -#if !defined(CONFIG_USER_ONLY) -static void spr_write_hmer(DisasContext *ctx, int sprn, int gprn) -{ - TCGv hmer = tcg_temp_new(); - - gen_load_spr(hmer, sprn); - tcg_gen_and_tl(hmer, cpu_gpr[gprn], hmer); - gen_store_spr(sprn, hmer); - spr_store_dump_spr(sprn); - tcg_temp_free(hmer); -} - -static void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn) -{ - gen_helper_store_lpcr(cpu_env, cpu_gpr[gprn]); -} -#endif /* !defined(CONFIG_USER_ONLY) */ - -static void gen_spr_970_lpar(CPUPPCState *env) +static void register_970_lpar_sprs(CPUPPCState *env) { #if !defined(CONFIG_USER_ONLY) /* @@ -7996,7 +7076,7 @@ static void gen_spr_970_lpar(CPUPPCState *env) #endif } -static void gen_spr_power5p_lpar(CPUPPCState *env) +static void register_power5p_lpar_sprs(CPUPPCState *env) { #if !defined(CONFIG_USER_ONLY) /* Logical partitionning */ @@ -8012,7 +7092,7 @@ static void gen_spr_power5p_lpar(CPUPPCState *env) #endif } -static void gen_spr_book3s_ids(CPUPPCState *env) +static void register_book3s_ids_sprs(CPUPPCState *env) { /* FIXME: Will need to deal with thread vs core only SPRs */ @@ -8104,7 +7184,7 @@ static void gen_spr_book3s_ids(CPUPPCState *env) 0x00000000); } -static void gen_spr_rmor(CPUPPCState *env) +static void register_rmor_sprs(CPUPPCState *env) { spr_register_hv(env, SPR_RMOR, "RMOR", SPR_NOACCESS, SPR_NOACCESS, @@ -8113,7 +7193,7 @@ static void gen_spr_rmor(CPUPPCState *env) 0x00000000); } -static void gen_spr_power8_ids(CPUPPCState *env) +static void register_power8_ids_sprs(CPUPPCState *env) { /* Thread identification */ spr_register(env, SPR_TIR, "TIR", @@ -8122,7 +7202,7 @@ static void gen_spr_power8_ids(CPUPPCState *env) 0x00000000); } -static void gen_spr_book3s_purr(CPUPPCState *env) +static void register_book3s_purr_sprs(CPUPPCState *env) { #if !defined(CONFIG_USER_ONLY) /* PURR & SPURR: Hack - treat these as aliases for the TB for now */ @@ -8139,7 +7219,7 @@ static void gen_spr_book3s_purr(CPUPPCState *env) #endif } -static void gen_spr_power6_dbg(CPUPPCState *env) +static void register_power6_dbg_sprs(CPUPPCState *env) { #if !defined(CONFIG_USER_ONLY) spr_register(env, SPR_CFAR, "SPR_CFAR", @@ -8149,7 +7229,7 @@ static void gen_spr_power6_dbg(CPUPPCState *env) #endif } -static void gen_spr_power5p_common(CPUPPCState *env) +static void register_power5p_common_sprs(CPUPPCState *env) { spr_register_kvm(env, SPR_PPR, "PPR", &spr_read_generic, &spr_write_generic, @@ -8157,7 +7237,7 @@ static void gen_spr_power5p_common(CPUPPCState *env) KVM_REG_PPC_PPR, 0x00000000); } -static void gen_spr_power6_common(CPUPPCState *env) +static void register_power6_common_sprs(CPUPPCState *env) { #if !defined(CONFIG_USER_ONLY) spr_register_kvm(env, SPR_DSCR, "SPR_DSCR", @@ -8176,19 +7256,7 @@ static void gen_spr_power6_common(CPUPPCState *env) 0x00000000); } -static void spr_read_tar(DisasContext *ctx, int gprn, int sprn) -{ - gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR); - spr_read_generic(ctx, gprn, sprn); -} - -static void spr_write_tar(DisasContext *ctx, int sprn, int gprn) -{ - gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR); - spr_write_generic(ctx, sprn, gprn); -} - -static void gen_spr_power8_tce_address_control(CPUPPCState *env) +static void register_power8_tce_address_control_sprs(CPUPPCState *env) { spr_register_kvm(env, SPR_TAR, "TAR", &spr_read_tar, &spr_write_tar, @@ -8196,31 +7264,7 @@ static void gen_spr_power8_tce_address_control(CPUPPCState *env) KVM_REG_PPC_TAR, 0x00000000); } -static void spr_read_tm(DisasContext *ctx, int gprn, int sprn) -{ - gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); - spr_read_generic(ctx, gprn, sprn); -} - -static void spr_write_tm(DisasContext *ctx, int sprn, int gprn) -{ - gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); - spr_write_generic(ctx, sprn, gprn); -} - -static void spr_read_tm_upper32(DisasContext *ctx, int gprn, int sprn) -{ - gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); - spr_read_prev_upper32(ctx, gprn, sprn); -} - -static void spr_write_tm_upper32(DisasContext *ctx, int sprn, int gprn) -{ - gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); - spr_write_prev_upper32(ctx, sprn, gprn); -} - -static void gen_spr_power8_tm(CPUPPCState *env) +static void register_power8_tm_sprs(CPUPPCState *env) { spr_register_kvm(env, SPR_TFHAR, "TFHAR", &spr_read_tm, &spr_write_tm, @@ -8240,31 +7284,7 @@ static void gen_spr_power8_tm(CPUPPCState *env) 0x00000000); } -static void spr_read_ebb(DisasContext *ctx, int gprn, int sprn) -{ - gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); - spr_read_generic(ctx, gprn, sprn); -} - -static void spr_write_ebb(DisasContext *ctx, int sprn, int gprn) -{ - gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); - spr_write_generic(ctx, sprn, gprn); -} - -static void spr_read_ebb_upper32(DisasContext *ctx, int gprn, int sprn) -{ - gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); - spr_read_prev_upper32(ctx, gprn, sprn); -} - -static void spr_write_ebb_upper32(DisasContext *ctx, int sprn, int gprn) -{ - gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); - spr_write_prev_upper32(ctx, sprn, gprn); -} - -static void gen_spr_power8_ebb(CPUPPCState *env) +static void register_power8_ebb_sprs(CPUPPCState *env) { spr_register(env, SPR_BESCRS, "BESCRS", &spr_read_ebb, &spr_write_ebb, @@ -8297,7 +7317,7 @@ static void gen_spr_power8_ebb(CPUPPCState *env) } /* Virtual Time Base */ -static void gen_spr_vtb(CPUPPCState *env) +static void register_vtb_sprs(CPUPPCState *env) { spr_register_kvm_hv(env, SPR_VTB, "VTB", SPR_NOACCESS, SPR_NOACCESS, @@ -8306,7 +7326,7 @@ static void gen_spr_vtb(CPUPPCState *env) KVM_REG_PPC_VTB, 0x00000000); } -static void gen_spr_power8_fscr(CPUPPCState *env) +static void register_power8_fscr_sprs(CPUPPCState *env) { #if defined(CONFIG_USER_ONLY) target_ulong initval = 1ULL << FSCR_TAR; @@ -8319,7 +7339,7 @@ static void gen_spr_power8_fscr(CPUPPCState *env) KVM_REG_PPC_FSCR, initval); } -static void gen_spr_power8_pspb(CPUPPCState *env) +static void register_power8_pspb_sprs(CPUPPCState *env) { spr_register_kvm(env, SPR_PSPB, "PSPB", SPR_NOACCESS, SPR_NOACCESS, @@ -8327,7 +7347,7 @@ static void gen_spr_power8_pspb(CPUPPCState *env) KVM_REG_PPC_PSPB, 0); } -static void gen_spr_power8_dpdes(CPUPPCState *env) +static void register_power8_dpdes_sprs(CPUPPCState *env) { #if !defined(CONFIG_USER_ONLY) /* Directed Privileged Door-bell Exception State, used for IPI */ @@ -8339,7 +7359,7 @@ static void gen_spr_power8_dpdes(CPUPPCState *env) #endif } -static void gen_spr_power8_ic(CPUPPCState *env) +static void register_power8_ic_sprs(CPUPPCState *env) { #if !defined(CONFIG_USER_ONLY) spr_register_hv(env, SPR_IC, "IC", @@ -8350,7 +7370,7 @@ static void gen_spr_power8_ic(CPUPPCState *env) #endif } -static void gen_spr_power8_book4(CPUPPCState *env) +static void register_power8_book4_sprs(CPUPPCState *env) { /* Add a number of P8 book4 registers */ #if !defined(CONFIG_USER_ONLY) @@ -8369,7 +7389,7 @@ static void gen_spr_power8_book4(CPUPPCState *env) #endif } -static void gen_spr_power7_book4(CPUPPCState *env) +static void register_power7_book4_sprs(CPUPPCState *env) { /* Add a number of P7 book4 registers */ #if !defined(CONFIG_USER_ONLY) @@ -8384,7 +7404,7 @@ static void gen_spr_power7_book4(CPUPPCState *env) #endif } -static void gen_spr_power8_rpr(CPUPPCState *env) +static void register_power8_rpr_sprs(CPUPPCState *env) { #if !defined(CONFIG_USER_ONLY) spr_register_hv(env, SPR_RPR, "RPR", @@ -8395,7 +7415,7 @@ static void gen_spr_power8_rpr(CPUPPCState *env) #endif } -static void gen_spr_power9_mmu(CPUPPCState *env) +static void register_power9_mmu_sprs(CPUPPCState *env) { #if !defined(CONFIG_USER_ONLY) /* Partition Table Control */ @@ -8415,30 +7435,35 @@ static void gen_spr_power9_mmu(CPUPPCState *env) static void init_proc_book3s_common(CPUPPCState *env) { - gen_spr_ne_601(env); - gen_tbl(env); - gen_spr_usprg3(env); - gen_spr_book3s_altivec(env); - gen_spr_book3s_pmu_sup(env); - gen_spr_book3s_pmu_user(env); - gen_spr_book3s_ctrl(env); + register_ne_601_sprs(env); + register_tbl(env); + register_usprg3_sprs(env); + register_book3s_altivec_sprs(env); + register_book3s_pmu_sup_sprs(env); + register_book3s_pmu_user_sprs(env); + register_book3s_ctrl_sprs(env); + /* + * Can't find information on what this should be on reset. This + * value is the one used by 74xx processors. + */ + vscr_init(env, 0x00010000); } static void init_proc_970(CPUPPCState *env) { /* Common Registers */ init_proc_book3s_common(env); - gen_spr_sdr1(env); - gen_spr_book3s_dbg(env); + register_sdr1_sprs(env); + register_book3s_dbg_sprs(env); /* 970 Specific Registers */ - gen_spr_970_hid(env); - gen_spr_970_hior(env); - gen_low_BATs(env); - gen_spr_970_pmu_sup(env); - gen_spr_970_pmu_user(env); - gen_spr_970_lpar(env); - gen_spr_970_dbg(env); + register_970_hid_sprs(env); + register_970_hior_sprs(env); + register_low_BATs(env); + register_970_pmu_sup_sprs(env); + register_970_pmu_user_sprs(env); + register_970_lpar_sprs(env); + register_970_dbg_sprs(env); /* env variables */ env->dcache_line_size = 128; @@ -8501,19 +7526,19 @@ static void init_proc_power5plus(CPUPPCState *env) { /* Common Registers */ init_proc_book3s_common(env); - gen_spr_sdr1(env); - gen_spr_book3s_dbg(env); + register_sdr1_sprs(env); + register_book3s_dbg_sprs(env); /* POWER5+ Specific Registers */ - gen_spr_970_hid(env); - gen_spr_970_hior(env); - gen_low_BATs(env); - gen_spr_970_pmu_sup(env); - gen_spr_970_pmu_user(env); - gen_spr_power5p_common(env); - gen_spr_power5p_lpar(env); - gen_spr_power5p_ear(env); - gen_spr_power5p_tb(env); + register_970_hid_sprs(env); + register_970_hior_sprs(env); + register_low_BATs(env); + register_970_pmu_sup_sprs(env); + register_970_pmu_user_sprs(env); + register_power5p_common_sprs(env); + register_power5p_lpar_sprs(env); + register_power5p_ear_sprs(env); + register_power5p_tb_sprs(env); /* env variables */ env->dcache_line_size = 128; @@ -8580,21 +7605,21 @@ static void init_proc_POWER7(CPUPPCState *env) { /* Common Registers */ init_proc_book3s_common(env); - gen_spr_sdr1(env); - gen_spr_book3s_dbg(env); + register_sdr1_sprs(env); + register_book3s_dbg_sprs(env); /* POWER7 Specific Registers */ - gen_spr_book3s_ids(env); - gen_spr_rmor(env); - gen_spr_amr(env); - gen_spr_book3s_purr(env); - gen_spr_power5p_common(env); - gen_spr_power5p_lpar(env); - gen_spr_power5p_ear(env); - gen_spr_power5p_tb(env); - gen_spr_power6_common(env); - gen_spr_power6_dbg(env); - gen_spr_power7_book4(env); + register_book3s_ids_sprs(env); + register_rmor_sprs(env); + register_amr_sprs(env); + register_book3s_purr_sprs(env); + register_power5p_common_sprs(env); + register_power5p_lpar_sprs(env); + register_power5p_ear_sprs(env); + register_power5p_tb_sprs(env); + register_power6_common_sprs(env); + register_power6_dbg_sprs(env); + register_power7_book4_sprs(env); /* env variables */ env->dcache_line_size = 128; @@ -8726,34 +7751,34 @@ static void init_proc_POWER8(CPUPPCState *env) { /* Common Registers */ init_proc_book3s_common(env); - gen_spr_sdr1(env); - gen_spr_book3s_207_dbg(env); + register_sdr1_sprs(env); + register_book3s_207_dbg_sprs(env); /* POWER8 Specific Registers */ - gen_spr_book3s_ids(env); - gen_spr_rmor(env); - gen_spr_amr(env); - gen_spr_iamr(env); - gen_spr_book3s_purr(env); - gen_spr_power5p_common(env); - gen_spr_power5p_lpar(env); - gen_spr_power5p_ear(env); - gen_spr_power5p_tb(env); - gen_spr_power6_common(env); - gen_spr_power6_dbg(env); - gen_spr_power8_tce_address_control(env); - gen_spr_power8_ids(env); - gen_spr_power8_ebb(env); - gen_spr_power8_fscr(env); - gen_spr_power8_pmu_sup(env); - gen_spr_power8_pmu_user(env); - gen_spr_power8_tm(env); - gen_spr_power8_pspb(env); - gen_spr_power8_dpdes(env); - gen_spr_vtb(env); - gen_spr_power8_ic(env); - gen_spr_power8_book4(env); - gen_spr_power8_rpr(env); + register_book3s_ids_sprs(env); + register_rmor_sprs(env); + register_amr_sprs(env); + register_iamr_sprs(env); + register_book3s_purr_sprs(env); + register_power5p_common_sprs(env); + register_power5p_lpar_sprs(env); + register_power5p_ear_sprs(env); + register_power5p_tb_sprs(env); + register_power6_common_sprs(env); + register_power6_dbg_sprs(env); + register_power8_tce_address_control_sprs(env); + register_power8_ids_sprs(env); + register_power8_ebb_sprs(env); + register_power8_fscr_sprs(env); + register_power8_pmu_sup_sprs(env); + register_power8_pmu_user_sprs(env); + register_power8_tm_sprs(env); + register_power8_pspb_sprs(env); + register_power8_dpdes_sprs(env); + register_vtb_sprs(env); + register_power8_ic_sprs(env); + register_power8_book4_sprs(env); + register_power8_rpr_sprs(env); /* env variables */ env->dcache_line_size = 128; @@ -8923,33 +7948,33 @@ static void init_proc_POWER9(CPUPPCState *env) { /* Common Registers */ init_proc_book3s_common(env); - gen_spr_book3s_207_dbg(env); + register_book3s_207_dbg_sprs(env); /* POWER8 Specific Registers */ - gen_spr_book3s_ids(env); - gen_spr_amr(env); - gen_spr_iamr(env); - gen_spr_book3s_purr(env); - gen_spr_power5p_common(env); - gen_spr_power5p_lpar(env); - gen_spr_power5p_ear(env); - gen_spr_power5p_tb(env); - gen_spr_power6_common(env); - gen_spr_power6_dbg(env); - gen_spr_power8_tce_address_control(env); - gen_spr_power8_ids(env); - gen_spr_power8_ebb(env); - gen_spr_power8_fscr(env); - gen_spr_power8_pmu_sup(env); - gen_spr_power8_pmu_user(env); - gen_spr_power8_tm(env); - gen_spr_power8_pspb(env); - gen_spr_power8_dpdes(env); - gen_spr_vtb(env); - gen_spr_power8_ic(env); - gen_spr_power8_book4(env); - gen_spr_power8_rpr(env); - gen_spr_power9_mmu(env); + register_book3s_ids_sprs(env); + register_amr_sprs(env); + register_iamr_sprs(env); + register_book3s_purr_sprs(env); + register_power5p_common_sprs(env); + register_power5p_lpar_sprs(env); + register_power5p_ear_sprs(env); + register_power5p_tb_sprs(env); + register_power6_common_sprs(env); + register_power6_dbg_sprs(env); + register_power8_tce_address_control_sprs(env); + register_power8_ids_sprs(env); + register_power8_ebb_sprs(env); + register_power8_fscr_sprs(env); + register_power8_pmu_sup_sprs(env); + register_power8_pmu_user_sprs(env); + register_power8_tm_sprs(env); + register_power8_pspb_sprs(env); + register_power8_dpdes_sprs(env); + register_vtb_sprs(env); + register_power8_ic_sprs(env); + register_power8_book4_sprs(env); + register_power8_rpr_sprs(env); + register_power9_mmu_sprs(env); /* POWER9 Specific registers */ spr_register_kvm(env, SPR_TIDR, "TIDR", NULL, NULL, @@ -9141,31 +8166,31 @@ static void init_proc_POWER10(CPUPPCState *env) { /* Common Registers */ init_proc_book3s_common(env); - gen_spr_book3s_207_dbg(env); + register_book3s_207_dbg_sprs(env); /* POWER8 Specific Registers */ - gen_spr_book3s_ids(env); - gen_spr_amr(env); - gen_spr_iamr(env); - gen_spr_book3s_purr(env); - gen_spr_power5p_common(env); - gen_spr_power5p_lpar(env); - gen_spr_power5p_ear(env); - gen_spr_power6_common(env); - gen_spr_power6_dbg(env); - gen_spr_power8_tce_address_control(env); - gen_spr_power8_ids(env); - gen_spr_power8_ebb(env); - gen_spr_power8_fscr(env); - gen_spr_power8_pmu_sup(env); - gen_spr_power8_pmu_user(env); - gen_spr_power8_tm(env); - gen_spr_power8_pspb(env); - gen_spr_vtb(env); - gen_spr_power8_ic(env); - gen_spr_power8_book4(env); - gen_spr_power8_rpr(env); - gen_spr_power9_mmu(env); + register_book3s_ids_sprs(env); + register_amr_sprs(env); + register_iamr_sprs(env); + register_book3s_purr_sprs(env); + register_power5p_common_sprs(env); + register_power5p_lpar_sprs(env); + register_power5p_ear_sprs(env); + register_power6_common_sprs(env); + register_power6_dbg_sprs(env); + register_power8_tce_address_control_sprs(env); + register_power8_ids_sprs(env); + register_power8_ebb_sprs(env); + register_power8_fscr_sprs(env); + register_power8_pmu_sup_sprs(env); + register_power8_pmu_user_sprs(env); + register_power8_tm_sprs(env); + register_power8_pspb_sprs(env); + register_vtb_sprs(env); + register_power8_ic_sprs(env); + register_power8_book4_sprs(env); + register_power8_rpr_sprs(env); + register_power9_mmu_sprs(env); /* FIXME: Filter fields properly based on privilege level */ spr_register_kvm_hv(env, SPR_PSSCR, "PSSCR", NULL, NULL, NULL, NULL, @@ -9317,13 +8342,13 @@ POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data) pcc->radix_page_info = &POWER10_radix_page_info; pcc->lrg_decr_bits = 56; #endif - pcc->excp_model = POWERPC_EXCP_POWER9; + pcc->excp_model = POWERPC_EXCP_POWER10; pcc->bus_model = PPC_FLAGS_INPUT_POWER9; pcc->bfd_mach = bfd_mach_ppc64; pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR | - POWERPC_FLAG_VSX | POWERPC_FLAG_TM; + POWERPC_FLAG_VSX | POWERPC_FLAG_TM | POWERPC_FLAG_SCV; pcc->l1_dcache_size = 0x8000; pcc->l1_icache_size = 0x8000; pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_lpcr; @@ -9370,7 +8395,7 @@ static void init_ppc_proc(PowerPCCPU *cpu) env->tlb_type = TLB_NONE; #endif /* Register SPR common to all PowerPC implementations */ - gen_spr_generic(env); + register_generic_sprs(env); spr_register(env, SPR_PVR, "PVR", /* Linux permits userspace to read PVR */ #if defined(CONFIG_LINUX_USER) @@ -9560,590 +8585,6 @@ static void dump_ppc_sprs(CPUPPCState *env) } #endif -/*****************************************************************************/ - -/* Opcode types */ -enum { - PPC_DIRECT = 0, /* Opcode routine */ - PPC_INDIRECT = 1, /* Indirect opcode table */ -}; - -#define PPC_OPCODE_MASK 0x3 - -static inline int is_indirect_opcode(void *handler) -{ - return ((uintptr_t)handler & PPC_OPCODE_MASK) == PPC_INDIRECT; -} - -static inline opc_handler_t **ind_table(void *handler) -{ - return (opc_handler_t **)((uintptr_t)handler & ~PPC_OPCODE_MASK); -} - -/* Instruction table creation */ -/* Opcodes tables creation */ -static void fill_new_table(opc_handler_t **table, int len) -{ - int i; - - for (i = 0; i < len; i++) { - table[i] = &invalid_handler; - } -} - -static int create_new_table(opc_handler_t **table, unsigned char idx) -{ - opc_handler_t **tmp; - - tmp = g_new(opc_handler_t *, PPC_CPU_INDIRECT_OPCODES_LEN); - fill_new_table(tmp, PPC_CPU_INDIRECT_OPCODES_LEN); - table[idx] = (opc_handler_t *)((uintptr_t)tmp | PPC_INDIRECT); - - return 0; -} - -static int insert_in_table(opc_handler_t **table, unsigned char idx, - opc_handler_t *handler) -{ - if (table[idx] != &invalid_handler) { - return -1; - } - table[idx] = handler; - - return 0; -} - -static int register_direct_insn(opc_handler_t **ppc_opcodes, - unsigned char idx, opc_handler_t *handler) -{ - if (insert_in_table(ppc_opcodes, idx, handler) < 0) { - printf("*** ERROR: opcode %02x already assigned in main " - "opcode table\n", idx); -#if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU) - printf(" Registered handler '%s' - new handler '%s'\n", - ppc_opcodes[idx]->oname, handler->oname); -#endif - return -1; - } - - return 0; -} - -static int register_ind_in_table(opc_handler_t **table, - unsigned char idx1, unsigned char idx2, - opc_handler_t *handler) -{ - if (table[idx1] == &invalid_handler) { - if (create_new_table(table, idx1) < 0) { - printf("*** ERROR: unable to create indirect table " - "idx=%02x\n", idx1); - return -1; - } - } else { - if (!is_indirect_opcode(table[idx1])) { - printf("*** ERROR: idx %02x already assigned to a direct " - "opcode\n", idx1); -#if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU) - printf(" Registered handler '%s' - new handler '%s'\n", - ind_table(table[idx1])[idx2]->oname, handler->oname); -#endif - return -1; - } - } - if (handler != NULL && - insert_in_table(ind_table(table[idx1]), idx2, handler) < 0) { - printf("*** ERROR: opcode %02x already assigned in " - "opcode table %02x\n", idx2, idx1); -#if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU) - printf(" Registered handler '%s' - new handler '%s'\n", - ind_table(table[idx1])[idx2]->oname, handler->oname); -#endif - return -1; - } - - return 0; -} - -static int register_ind_insn(opc_handler_t **ppc_opcodes, - unsigned char idx1, unsigned char idx2, - opc_handler_t *handler) -{ - return register_ind_in_table(ppc_opcodes, idx1, idx2, handler); -} - -static int register_dblind_insn(opc_handler_t **ppc_opcodes, - unsigned char idx1, unsigned char idx2, - unsigned char idx3, opc_handler_t *handler) -{ - if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) { - printf("*** ERROR: unable to join indirect table idx " - "[%02x-%02x]\n", idx1, idx2); - return -1; - } - if (register_ind_in_table(ind_table(ppc_opcodes[idx1]), idx2, idx3, - handler) < 0) { - printf("*** ERROR: unable to insert opcode " - "[%02x-%02x-%02x]\n", idx1, idx2, idx3); - return -1; - } - - return 0; -} - -static int register_trplind_insn(opc_handler_t **ppc_opcodes, - unsigned char idx1, unsigned char idx2, - unsigned char idx3, unsigned char idx4, - opc_handler_t *handler) -{ - opc_handler_t **table; - - if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) { - printf("*** ERROR: unable to join indirect table idx " - "[%02x-%02x]\n", idx1, idx2); - return -1; - } - table = ind_table(ppc_opcodes[idx1]); - if (register_ind_in_table(table, idx2, idx3, NULL) < 0) { - printf("*** ERROR: unable to join 2nd-level indirect table idx " - "[%02x-%02x-%02x]\n", idx1, idx2, idx3); - return -1; - } - table = ind_table(table[idx2]); - if (register_ind_in_table(table, idx3, idx4, handler) < 0) { - printf("*** ERROR: unable to insert opcode " - "[%02x-%02x-%02x-%02x]\n", idx1, idx2, idx3, idx4); - return -1; - } - return 0; -} -static int register_insn(opc_handler_t **ppc_opcodes, opcode_t *insn) -{ - if (insn->opc2 != 0xFF) { - if (insn->opc3 != 0xFF) { - if (insn->opc4 != 0xFF) { - if (register_trplind_insn(ppc_opcodes, insn->opc1, insn->opc2, - insn->opc3, insn->opc4, - &insn->handler) < 0) { - return -1; - } - } else { - if (register_dblind_insn(ppc_opcodes, insn->opc1, insn->opc2, - insn->opc3, &insn->handler) < 0) { - return -1; - } - } - } else { - if (register_ind_insn(ppc_opcodes, insn->opc1, - insn->opc2, &insn->handler) < 0) { - return -1; - } - } - } else { - if (register_direct_insn(ppc_opcodes, insn->opc1, &insn->handler) < 0) { - return -1; - } - } - - return 0; -} - -static int test_opcode_table(opc_handler_t **table, int len) -{ - int i, count, tmp; - - for (i = 0, count = 0; i < len; i++) { - /* Consistency fixup */ - if (table[i] == NULL) { - table[i] = &invalid_handler; - } - if (table[i] != &invalid_handler) { - if (is_indirect_opcode(table[i])) { - tmp = test_opcode_table(ind_table(table[i]), - PPC_CPU_INDIRECT_OPCODES_LEN); - if (tmp == 0) { - free(table[i]); - table[i] = &invalid_handler; - } else { - count++; - } - } else { - count++; - } - } - } - - return count; -} - -static void fix_opcode_tables(opc_handler_t **ppc_opcodes) -{ - if (test_opcode_table(ppc_opcodes, PPC_CPU_OPCODES_LEN) == 0) { - printf("*** WARNING: no opcode defined !\n"); - } -} - -/*****************************************************************************/ -static void create_ppc_opcodes(PowerPCCPU *cpu, Error **errp) -{ - PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); - opcode_t *opc; - - fill_new_table(cpu->opcodes, PPC_CPU_OPCODES_LEN); - for (opc = opcodes; opc < &opcodes[ARRAY_SIZE(opcodes)]; opc++) { - if (((opc->handler.type & pcc->insns_flags) != 0) || - ((opc->handler.type2 & pcc->insns_flags2) != 0)) { - if (register_insn(cpu->opcodes, opc) < 0) { - error_setg(errp, "ERROR initializing PowerPC instruction " - "0x%02x 0x%02x 0x%02x", opc->opc1, opc->opc2, - opc->opc3); - return; - } - } - } - fix_opcode_tables(cpu->opcodes); - fflush(stdout); - fflush(stderr); -} - -#if defined(PPC_DUMP_CPU) -static void dump_ppc_insns(CPUPPCState *env) -{ - opc_handler_t **table, *handler; - const char *p, *q; - uint8_t opc1, opc2, opc3, opc4; - - printf("Instructions set:\n"); - /* opc1 is 6 bits long */ - for (opc1 = 0x00; opc1 < PPC_CPU_OPCODES_LEN; opc1++) { - table = env->opcodes; - handler = table[opc1]; - if (is_indirect_opcode(handler)) { - /* opc2 is 5 bits long */ - for (opc2 = 0; opc2 < PPC_CPU_INDIRECT_OPCODES_LEN; opc2++) { - table = env->opcodes; - handler = env->opcodes[opc1]; - table = ind_table(handler); - handler = table[opc2]; - if (is_indirect_opcode(handler)) { - table = ind_table(handler); - /* opc3 is 5 bits long */ - for (opc3 = 0; opc3 < PPC_CPU_INDIRECT_OPCODES_LEN; - opc3++) { - handler = table[opc3]; - if (is_indirect_opcode(handler)) { - table = ind_table(handler); - /* opc4 is 5 bits long */ - for (opc4 = 0; opc4 < PPC_CPU_INDIRECT_OPCODES_LEN; - opc4++) { - handler = table[opc4]; - if (handler->handler != &gen_invalid) { - printf("INSN: %02x %02x %02x %02x -- " - "(%02d %04d %02d) : %s\n", - opc1, opc2, opc3, opc4, - opc1, (opc3 << 5) | opc2, opc4, - handler->oname); - } - } - } else { - if (handler->handler != &gen_invalid) { - /* Special hack to properly dump SPE insns */ - p = strchr(handler->oname, '_'); - if (p == NULL) { - printf("INSN: %02x %02x %02x (%02d %04d) : " - "%s\n", - opc1, opc2, opc3, opc1, - (opc3 << 5) | opc2, - handler->oname); - } else { - q = "speundef"; - if ((p - handler->oname) != strlen(q) - || (memcmp(handler->oname, q, strlen(q)) - != 0)) { - /* First instruction */ - printf("INSN: %02x %02x %02x" - "(%02d %04d) : %.*s\n", - opc1, opc2 << 1, opc3, opc1, - (opc3 << 6) | (opc2 << 1), - (int)(p - handler->oname), - handler->oname); - } - if (strcmp(p + 1, q) != 0) { - /* Second instruction */ - printf("INSN: %02x %02x %02x " - "(%02d %04d) : %s\n", opc1, - (opc2 << 1) | 1, opc3, opc1, - (opc3 << 6) | (opc2 << 1) | 1, - p + 1); - } - } - } - } - } - } else { - if (handler->handler != &gen_invalid) { - printf("INSN: %02x %02x -- (%02d %04d) : %s\n", - opc1, opc2, opc1, opc2, handler->oname); - } - } - } - } else { - if (handler->handler != &gen_invalid) { - printf("INSN: %02x -- -- (%02d ----) : %s\n", - opc1, opc1, handler->oname); - } - } - } -} -#endif - -static bool avr_need_swap(CPUPPCState *env) -{ -#ifdef HOST_WORDS_BIGENDIAN - return msr_le; -#else - return !msr_le; -#endif -} - -#if !defined(CONFIG_USER_ONLY) -static int gdb_find_spr_idx(CPUPPCState *env, int n) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(env->spr_cb); i++) { - ppc_spr_t *spr = &env->spr_cb[i]; - - if (spr->name && spr->gdb_id == n) { - return i; - } - } - return -1; -} - -static int gdb_get_spr_reg(CPUPPCState *env, GByteArray *buf, int n) -{ - int reg; - int len; - - reg = gdb_find_spr_idx(env, n); - if (reg < 0) { - return 0; - } - - len = TARGET_LONG_SIZE; - gdb_get_regl(buf, env->spr[reg]); - ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, len), len); - return len; -} - -static int gdb_set_spr_reg(CPUPPCState *env, uint8_t *mem_buf, int n) -{ - int reg; - int len; - - reg = gdb_find_spr_idx(env, n); - if (reg < 0) { - return 0; - } - - len = TARGET_LONG_SIZE; - ppc_maybe_bswap_register(env, mem_buf, len); - env->spr[reg] = ldn_p(mem_buf, len); - - return len; -} -#endif - -static int gdb_get_float_reg(CPUPPCState *env, GByteArray *buf, int n) -{ - uint8_t *mem_buf; - if (n < 32) { - gdb_get_reg64(buf, *cpu_fpr_ptr(env, n)); - mem_buf = gdb_get_reg_ptr(buf, 8); - ppc_maybe_bswap_register(env, mem_buf, 8); - return 8; - } - if (n == 32) { - gdb_get_reg32(buf, env->fpscr); - mem_buf = gdb_get_reg_ptr(buf, 4); - ppc_maybe_bswap_register(env, mem_buf, 4); - return 4; - } - return 0; -} - -static int gdb_set_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n) -{ - if (n < 32) { - ppc_maybe_bswap_register(env, mem_buf, 8); - *cpu_fpr_ptr(env, n) = ldq_p(mem_buf); - return 8; - } - if (n == 32) { - ppc_maybe_bswap_register(env, mem_buf, 4); - helper_store_fpscr(env, ldl_p(mem_buf), 0xffffffff); - return 4; - } - return 0; -} - -static int gdb_get_avr_reg(CPUPPCState *env, GByteArray *buf, int n) -{ - uint8_t *mem_buf; - - if (n < 32) { - ppc_avr_t *avr = cpu_avr_ptr(env, n); - if (!avr_need_swap(env)) { - gdb_get_reg128(buf, avr->u64[0] , avr->u64[1]); - } else { - gdb_get_reg128(buf, avr->u64[1] , avr->u64[0]); - } - mem_buf = gdb_get_reg_ptr(buf, 16); - ppc_maybe_bswap_register(env, mem_buf, 8); - ppc_maybe_bswap_register(env, mem_buf + 8, 8); - return 16; - } - if (n == 32) { - gdb_get_reg32(buf, helper_mfvscr(env)); - mem_buf = gdb_get_reg_ptr(buf, 4); - ppc_maybe_bswap_register(env, mem_buf, 4); - return 4; - } - if (n == 33) { - gdb_get_reg32(buf, (uint32_t)env->spr[SPR_VRSAVE]); - mem_buf = gdb_get_reg_ptr(buf, 4); - ppc_maybe_bswap_register(env, mem_buf, 4); - return 4; - } - return 0; -} - -static int gdb_set_avr_reg(CPUPPCState *env, uint8_t *mem_buf, int n) -{ - if (n < 32) { - ppc_avr_t *avr = cpu_avr_ptr(env, n); - ppc_maybe_bswap_register(env, mem_buf, 8); - ppc_maybe_bswap_register(env, mem_buf + 8, 8); - if (!avr_need_swap(env)) { - avr->u64[0] = ldq_p(mem_buf); - avr->u64[1] = ldq_p(mem_buf + 8); - } else { - avr->u64[1] = ldq_p(mem_buf); - avr->u64[0] = ldq_p(mem_buf + 8); - } - return 16; - } - if (n == 32) { - ppc_maybe_bswap_register(env, mem_buf, 4); - helper_mtvscr(env, ldl_p(mem_buf)); - return 4; - } - if (n == 33) { - ppc_maybe_bswap_register(env, mem_buf, 4); - env->spr[SPR_VRSAVE] = (target_ulong)ldl_p(mem_buf); - return 4; - } - return 0; -} - -static int gdb_get_spe_reg(CPUPPCState *env, GByteArray *buf, int n) -{ - if (n < 32) { -#if defined(TARGET_PPC64) - gdb_get_reg32(buf, env->gpr[n] >> 32); - ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, 4), 4); -#else - gdb_get_reg32(buf, env->gprh[n]); -#endif - return 4; - } - if (n == 32) { - gdb_get_reg64(buf, env->spe_acc); - ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, 8), 8); - return 8; - } - if (n == 33) { - gdb_get_reg32(buf, env->spe_fscr); - ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, 4), 4); - return 4; - } - return 0; -} - -static int gdb_set_spe_reg(CPUPPCState *env, uint8_t *mem_buf, int n) -{ - if (n < 32) { -#if defined(TARGET_PPC64) - target_ulong lo = (uint32_t)env->gpr[n]; - target_ulong hi; - - ppc_maybe_bswap_register(env, mem_buf, 4); - - hi = (target_ulong)ldl_p(mem_buf) << 32; - env->gpr[n] = lo | hi; -#else - env->gprh[n] = ldl_p(mem_buf); -#endif - return 4; - } - if (n == 32) { - ppc_maybe_bswap_register(env, mem_buf, 8); - env->spe_acc = ldq_p(mem_buf); - return 8; - } - if (n == 33) { - ppc_maybe_bswap_register(env, mem_buf, 4); - env->spe_fscr = ldl_p(mem_buf); - return 4; - } - return 0; -} - -static int gdb_get_vsx_reg(CPUPPCState *env, GByteArray *buf, int n) -{ - if (n < 32) { - gdb_get_reg64(buf, *cpu_vsrl_ptr(env, n)); - ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, 8), 8); - return 8; - } - return 0; -} - -static int gdb_set_vsx_reg(CPUPPCState *env, uint8_t *mem_buf, int n) -{ - if (n < 32) { - ppc_maybe_bswap_register(env, mem_buf, 8); - *cpu_vsrl_ptr(env, n) = ldq_p(mem_buf); - return 8; - } - return 0; -} - -static int ppc_fixup_cpu(PowerPCCPU *cpu) -{ - CPUPPCState *env = &cpu->env; - - /* - * TCG doesn't (yet) emulate some groups of instructions that are - * implemented on some otherwise supported CPUs (e.g. VSX and - * decimal floating point instructions on POWER7). We remove - * unsupported instruction groups from the cpu state's instruction - * masks and hope the guest can cope. For at least the pseries - * machine, the unavailability of these instructions can be - * advertised to the guest via the device tree. - */ - if ((env->insns_flags & ~PPC_TCG_INSNS) - || (env->insns_flags2 & ~PPC_TCG_INSNS2)) { - warn_report("Disabling some instructions which are not " - "emulated by TCG (0x%" PRIx64 ", 0x%" PRIx64 ")", - env->insns_flags & ~PPC_TCG_INSNS, - env->insns_flags2 & ~PPC_TCG_INSNS2); - } - env->insns_flags &= PPC_TCG_INSNS; - env->insns_flags2 &= PPC_TCG_INSNS2; - return 0; -} - static void ppc_cpu_realize(DeviceState *dev, Error **errp) { CPUState *cs = CPU(dev); @@ -10174,26 +8615,7 @@ static void ppc_cpu_realize(DeviceState *dev, Error **errp) } init_ppc_proc(cpu); - if (pcc->insns_flags & PPC_FLOAT) { - gdb_register_coprocessor(cs, gdb_get_float_reg, gdb_set_float_reg, - 33, "power-fpu.xml", 0); - } - if (pcc->insns_flags & PPC_ALTIVEC) { - gdb_register_coprocessor(cs, gdb_get_avr_reg, gdb_set_avr_reg, - 34, "power-altivec.xml", 0); - } - if (pcc->insns_flags & PPC_SPE) { - gdb_register_coprocessor(cs, gdb_get_spe_reg, gdb_set_spe_reg, - 34, "power-spe.xml", 0); - } - if (pcc->insns_flags2 & PPC2_VSX) { - gdb_register_coprocessor(cs, gdb_get_vsx_reg, gdb_set_vsx_reg, - 32, "power-vsx.xml", 0); - } -#ifndef CONFIG_USER_ONLY - gdb_register_coprocessor(cs, gdb_get_spr_reg, gdb_set_spr_reg, - pcc->gdb_num_sprs, "power-spr.xml", 0); -#endif + ppc_gdb_init(cs, pcc); qemu_init_vcpu(cs); pcc->parent_realize(dev, errp); @@ -10374,40 +8796,12 @@ static void ppc_cpu_unrealize(DeviceState *dev) { PowerPCCPU *cpu = POWERPC_CPU(dev); PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); - opc_handler_t **table, **table_2; - int i, j, k; pcc->parent_unrealize(dev); cpu_remove_sync(CPU(cpu)); - for (i = 0; i < PPC_CPU_OPCODES_LEN; i++) { - if (cpu->opcodes[i] == &invalid_handler) { - continue; - } - if (is_indirect_opcode(cpu->opcodes[i])) { - table = ind_table(cpu->opcodes[i]); - for (j = 0; j < PPC_CPU_INDIRECT_OPCODES_LEN; j++) { - if (table[j] == &invalid_handler) { - continue; - } - if (is_indirect_opcode(table[j])) { - table_2 = ind_table(table[j]); - for (k = 0; k < PPC_CPU_INDIRECT_OPCODES_LEN; k++) { - if (table_2[k] != &invalid_handler && - is_indirect_opcode(table_2[k])) { - g_free((opc_handler_t *)((uintptr_t)table_2[k] & - ~PPC_INDIRECT)); - } - } - g_free((opc_handler_t *)((uintptr_t)table[j] & - ~PPC_INDIRECT)); - } - } - g_free((opc_handler_t *)((uintptr_t)cpu->opcodes[i] & - ~PPC_INDIRECT)); - } - } + destroy_ppc_opcodes(cpu); } static gint ppc_cpu_compare_class_pvr(gconstpointer a, gconstpointer b) @@ -10835,15 +9229,6 @@ static bool ppc_pvr_match_default(PowerPCCPUClass *pcc, uint32_t pvr) return pcc->pvr == pvr; } -static gchar *ppc_gdb_arch_name(CPUState *cs) -{ -#if defined(TARGET_PPC64) - return g_strdup("powerpc:common64"); -#else - return g_strdup("powerpc:common"); -#endif -} - static void ppc_disas_set_info(CPUState *cs, disassemble_info *info) { PowerPCCPU *cpu = POWERPC_CPU(cs); @@ -10983,4 +9368,186 @@ static void ppc_cpu_register_types(void) #endif } +void ppc_cpu_dump_state(CPUState *cs, FILE *f, int flags) +{ +#define RGPL 4 +#define RFPL 4 + + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + int i; + + qemu_fprintf(f, "NIP " TARGET_FMT_lx " LR " TARGET_FMT_lx " CTR " + TARGET_FMT_lx " XER " TARGET_FMT_lx " CPU#%d\n", + env->nip, env->lr, env->ctr, cpu_read_xer(env), + cs->cpu_index); + qemu_fprintf(f, "MSR " TARGET_FMT_lx " HID0 " TARGET_FMT_lx " HF " + "%08x iidx %d didx %d\n", + env->msr, env->spr[SPR_HID0], env->hflags, + cpu_mmu_index(env, true), cpu_mmu_index(env, false)); +#if !defined(NO_TIMER_DUMP) + qemu_fprintf(f, "TB %08" PRIu32 " %08" PRIu64 +#if !defined(CONFIG_USER_ONLY) + " DECR " TARGET_FMT_lu +#endif + "\n", + cpu_ppc_load_tbu(env), cpu_ppc_load_tbl(env) +#if !defined(CONFIG_USER_ONLY) + , cpu_ppc_load_decr(env) +#endif + ); +#endif + for (i = 0; i < 32; i++) { + if ((i & (RGPL - 1)) == 0) { + qemu_fprintf(f, "GPR%02d", i); + } + qemu_fprintf(f, " %016" PRIx64, ppc_dump_gpr(env, i)); + if ((i & (RGPL - 1)) == (RGPL - 1)) { + qemu_fprintf(f, "\n"); + } + } + qemu_fprintf(f, "CR "); + for (i = 0; i < 8; i++) + qemu_fprintf(f, "%01x", env->crf[i]); + qemu_fprintf(f, " ["); + for (i = 0; i < 8; i++) { + char a = '-'; + if (env->crf[i] & 0x08) { + a = 'L'; + } else if (env->crf[i] & 0x04) { + a = 'G'; + } else if (env->crf[i] & 0x02) { + a = 'E'; + } + qemu_fprintf(f, " %c%c", a, env->crf[i] & 0x01 ? 'O' : ' '); + } + qemu_fprintf(f, " ] RES " TARGET_FMT_lx "\n", + env->reserve_addr); + + if (flags & CPU_DUMP_FPU) { + for (i = 0; i < 32; i++) { + if ((i & (RFPL - 1)) == 0) { + qemu_fprintf(f, "FPR%02d", i); + } + qemu_fprintf(f, " %016" PRIx64, *cpu_fpr_ptr(env, i)); + if ((i & (RFPL - 1)) == (RFPL - 1)) { + qemu_fprintf(f, "\n"); + } + } + qemu_fprintf(f, "FPSCR " TARGET_FMT_lx "\n", env->fpscr); + } + +#if !defined(CONFIG_USER_ONLY) + qemu_fprintf(f, " SRR0 " TARGET_FMT_lx " SRR1 " TARGET_FMT_lx + " PVR " TARGET_FMT_lx " VRSAVE " TARGET_FMT_lx "\n", + env->spr[SPR_SRR0], env->spr[SPR_SRR1], + env->spr[SPR_PVR], env->spr[SPR_VRSAVE]); + + qemu_fprintf(f, "SPRG0 " TARGET_FMT_lx " SPRG1 " TARGET_FMT_lx + " SPRG2 " TARGET_FMT_lx " SPRG3 " TARGET_FMT_lx "\n", + env->spr[SPR_SPRG0], env->spr[SPR_SPRG1], + env->spr[SPR_SPRG2], env->spr[SPR_SPRG3]); + + qemu_fprintf(f, "SPRG4 " TARGET_FMT_lx " SPRG5 " TARGET_FMT_lx + " SPRG6 " TARGET_FMT_lx " SPRG7 " TARGET_FMT_lx "\n", + env->spr[SPR_SPRG4], env->spr[SPR_SPRG5], + env->spr[SPR_SPRG6], env->spr[SPR_SPRG7]); + +#if defined(TARGET_PPC64) + if (env->excp_model == POWERPC_EXCP_POWER7 || + env->excp_model == POWERPC_EXCP_POWER8 || + env->excp_model == POWERPC_EXCP_POWER9 || + env->excp_model == POWERPC_EXCP_POWER10) { + qemu_fprintf(f, "HSRR0 " TARGET_FMT_lx " HSRR1 " TARGET_FMT_lx "\n", + env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]); + } +#endif + if (env->excp_model == POWERPC_EXCP_BOOKE) { + qemu_fprintf(f, "CSRR0 " TARGET_FMT_lx " CSRR1 " TARGET_FMT_lx + " MCSRR0 " TARGET_FMT_lx " MCSRR1 " TARGET_FMT_lx "\n", + env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1], + env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]); + + qemu_fprintf(f, " TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx + " ESR " TARGET_FMT_lx " DEAR " TARGET_FMT_lx "\n", + env->spr[SPR_BOOKE_TCR], env->spr[SPR_BOOKE_TSR], + env->spr[SPR_BOOKE_ESR], env->spr[SPR_BOOKE_DEAR]); + + qemu_fprintf(f, " PIR " TARGET_FMT_lx " DECAR " TARGET_FMT_lx + " IVPR " TARGET_FMT_lx " EPCR " TARGET_FMT_lx "\n", + env->spr[SPR_BOOKE_PIR], env->spr[SPR_BOOKE_DECAR], + env->spr[SPR_BOOKE_IVPR], env->spr[SPR_BOOKE_EPCR]); + + qemu_fprintf(f, " MCSR " TARGET_FMT_lx " SPRG8 " TARGET_FMT_lx + " EPR " TARGET_FMT_lx "\n", + env->spr[SPR_BOOKE_MCSR], env->spr[SPR_BOOKE_SPRG8], + env->spr[SPR_BOOKE_EPR]); + + /* FSL-specific */ + qemu_fprintf(f, " MCAR " TARGET_FMT_lx " PID1 " TARGET_FMT_lx + " PID2 " TARGET_FMT_lx " SVR " TARGET_FMT_lx "\n", + env->spr[SPR_Exxx_MCAR], env->spr[SPR_BOOKE_PID1], + env->spr[SPR_BOOKE_PID2], env->spr[SPR_E500_SVR]); + + /* + * IVORs are left out as they are large and do not change often -- + * they can be read with "p $ivor0", "p $ivor1", etc. + */ + } + +#if defined(TARGET_PPC64) + if (env->flags & POWERPC_FLAG_CFAR) { + qemu_fprintf(f, " CFAR " TARGET_FMT_lx"\n", env->cfar); + } +#endif + + if (env->spr_cb[SPR_LPCR].name) { + qemu_fprintf(f, " LPCR " TARGET_FMT_lx "\n", env->spr[SPR_LPCR]); + } + + switch (env->mmu_model) { + case POWERPC_MMU_32B: + case POWERPC_MMU_601: + case POWERPC_MMU_SOFT_6xx: + case POWERPC_MMU_SOFT_74xx: +#if defined(TARGET_PPC64) + case POWERPC_MMU_64B: + case POWERPC_MMU_2_03: + case POWERPC_MMU_2_06: + case POWERPC_MMU_2_07: + case POWERPC_MMU_3_00: +#endif + if (env->spr_cb[SPR_SDR1].name) { /* SDR1 Exists */ + qemu_fprintf(f, " SDR1 " TARGET_FMT_lx " ", env->spr[SPR_SDR1]); + } + if (env->spr_cb[SPR_PTCR].name) { /* PTCR Exists */ + qemu_fprintf(f, " PTCR " TARGET_FMT_lx " ", env->spr[SPR_PTCR]); + } + qemu_fprintf(f, " DAR " TARGET_FMT_lx " DSISR " TARGET_FMT_lx "\n", + env->spr[SPR_DAR], env->spr[SPR_DSISR]); + break; + case POWERPC_MMU_BOOKE206: + qemu_fprintf(f, " MAS0 " TARGET_FMT_lx " MAS1 " TARGET_FMT_lx + " MAS2 " TARGET_FMT_lx " MAS3 " TARGET_FMT_lx "\n", + env->spr[SPR_BOOKE_MAS0], env->spr[SPR_BOOKE_MAS1], + env->spr[SPR_BOOKE_MAS2], env->spr[SPR_BOOKE_MAS3]); + + qemu_fprintf(f, " MAS4 " TARGET_FMT_lx " MAS6 " TARGET_FMT_lx + " MAS7 " TARGET_FMT_lx " PID " TARGET_FMT_lx "\n", + env->spr[SPR_BOOKE_MAS4], env->spr[SPR_BOOKE_MAS6], + env->spr[SPR_BOOKE_MAS7], env->spr[SPR_BOOKE_PID]); + + qemu_fprintf(f, "MMUCFG " TARGET_FMT_lx " TLB0CFG " TARGET_FMT_lx + " TLB1CFG " TARGET_FMT_lx "\n", + env->spr[SPR_MMUCFG], env->spr[SPR_BOOKE_TLB0CFG], + env->spr[SPR_BOOKE_TLB1CFG]); + break; + default: + break; + } +#endif + +#undef RGPL +#undef RFPL +} type_init(ppc_cpu_register_types) diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c index 85de7e6c90..f4f15279eb 100644 --- a/target/ppc/excp_helper.c +++ b/target/ppc/excp_helper.c @@ -136,25 +136,157 @@ static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp, return POWERPC_EXCP_RESET; } -static uint64_t ppc_excp_vector_offset(CPUState *cs, int ail) +/* + * AIL - Alternate Interrupt Location, a mode that allows interrupts to be + * taken with the MMU on, and which uses an alternate location (e.g., so the + * kernel/hv can map the vectors there with an effective address). + * + * An interrupt is considered to be taken "with AIL" or "AIL applies" if they + * are delivered in this way. AIL requires the LPCR to be set to enable this + * mode, and then a number of conditions have to be true for AIL to apply. + * + * First of all, SRESET, MCE, and HMI are always delivered without AIL, because + * they specifically want to be in real mode (e.g., the MCE might be signaling + * a SLB multi-hit which requires SLB flush before the MMU can be enabled). + * + * After that, behaviour depends on the current MSR[IR], MSR[DR], MSR[HV], + * whether or not the interrupt changes MSR[HV] from 0 to 1, and the current + * radix mode (LPCR[HR]). + * + * POWER8, POWER9 with LPCR[HR]=0 + * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL | + * +-----------+-------------+---------+-------------+-----+ + * | a | 00/01/10 | x | x | 0 | + * | a | 11 | 0 | 1 | 0 | + * | a | 11 | 1 | 1 | a | + * | a | 11 | 0 | 0 | a | + * +-------------------------------------------------------+ + * + * POWER9 with LPCR[HR]=1 + * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL | + * +-----------+-------------+---------+-------------+-----+ + * | a | 00/01/10 | x | x | 0 | + * | a | 11 | x | x | a | + * +-------------------------------------------------------+ + * + * The difference with POWER9 being that MSR[HV] 0->1 interrupts can be sent to + * the hypervisor in AIL mode if the guest is radix. This is good for + * performance but allows the guest to influence the AIL of hypervisor + * interrupts using its MSR, and also the hypervisor must disallow guest + * interrupts (MSR[HV] 0->0) from using AIL if the hypervisor does not want to + * use AIL for its MSR[HV] 0->1 interrupts. + * + * POWER10 addresses those issues with a new LPCR[HAIL] bit that is applied to + * interrupts that begin execution with MSR[HV]=1 (so both MSR[HV] 0->1 and + * MSR[HV] 1->1). + * + * HAIL=1 is equivalent to AIL=3, for interrupts delivered with MSR[HV]=1. + * + * POWER10 behaviour is + * | LPCR[AIL] | LPCR[HAIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL | + * +-----------+------------+-------------+---------+-------------+-----+ + * | a | h | 00/01/10 | 0 | 0 | 0 | + * | a | h | 11 | 0 | 0 | a | + * | a | h | x | 0 | 1 | h | + * | a | h | 00/01/10 | 1 | 1 | 0 | + * | a | h | 11 | 1 | 1 | h | + * +--------------------------------------------------------------------+ + */ +static inline void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp_model, int excp, + target_ulong msr, + target_ulong *new_msr, + target_ulong *vector) { - uint64_t offset = 0; +#if defined(TARGET_PPC64) + CPUPPCState *env = &cpu->env; + bool mmu_all_on = ((msr >> MSR_IR) & 1) && ((msr >> MSR_DR) & 1); + bool hv_escalation = !(msr & MSR_HVB) && (*new_msr & MSR_HVB); + int ail = 0; + + if (excp == POWERPC_EXCP_MCHECK || + excp == POWERPC_EXCP_RESET || + excp == POWERPC_EXCP_HV_MAINT) { + /* SRESET, MCE, HMI never apply AIL */ + return; + } - switch (ail) { - case AIL_NONE: - break; - case AIL_0001_8000: - offset = 0x18000; - break; - case AIL_C000_0000_0000_4000: - offset = 0xc000000000004000ull; - break; - default: - cpu_abort(cs, "Invalid AIL combination %d\n", ail); - break; + if (excp_model == POWERPC_EXCP_POWER8 || + excp_model == POWERPC_EXCP_POWER9) { + if (!mmu_all_on) { + /* AIL only works if MSR[IR] and MSR[DR] are both enabled. */ + return; + } + if (hv_escalation && !(env->spr[SPR_LPCR] & LPCR_HR)) { + /* + * AIL does not work if there is a MSR[HV] 0->1 transition and the + * partition is in HPT mode. For radix guests, such interrupts are + * allowed to be delivered to the hypervisor in ail mode. + */ + return; + } + + ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; + if (ail == 0) { + return; + } + if (ail == 1) { + /* AIL=1 is reserved, treat it like AIL=0 */ + return; + } + + } else if (excp_model == POWERPC_EXCP_POWER10) { + if (!mmu_all_on && !hv_escalation) { + /* + * AIL works for HV interrupts even with guest MSR[IR/DR] disabled. + * Guest->guest and HV->HV interrupts do require MMU on. + */ + return; + } + + if (*new_msr & MSR_HVB) { + if (!(env->spr[SPR_LPCR] & LPCR_HAIL)) { + /* HV interrupts depend on LPCR[HAIL] */ + return; + } + ail = 3; /* HAIL=1 gives AIL=3 behaviour for HV interrupts */ + } else { + ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; + } + if (ail == 0) { + return; + } + if (ail == 1 || ail == 2) { + /* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */ + return; + } + } else { + /* Other processors do not support AIL */ + return; } - return offset; + /* + * AIL applies, so the new MSR gets IR and DR set, and an offset applied + * to the new IP. + */ + *new_msr |= (1 << MSR_IR) | (1 << MSR_DR); + + if (excp != POWERPC_EXCP_SYSCALL_VECTORED) { + if (ail == 2) { + *vector |= 0x0000000000018000ull; + } else if (ail == 3) { + *vector |= 0xc000000000004000ull; + } + } else { + /* + * scv AIL is a little different. AIL=2 does not change the address, + * only the MSR. AIL=3 replaces the 0x17000 base with 0xc...3000. + */ + if (ail == 3) { + *vector &= ~0x0000000000017000ull; /* Un-apply the base offset */ + *vector |= 0xc000000000003000ull; /* Apply scv's AIL=3 offset */ + } + } +#endif } static inline void powerpc_set_excp_state(PowerPCCPU *cpu, @@ -197,7 +329,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; target_ulong msr, new_msr, vector; - int srr0, srr1, asrr0, asrr1, lev = -1, ail; + int srr0, srr1, asrr0, asrr1, lev = -1; bool lpes0; qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx @@ -238,25 +370,17 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) * * On anything else, we behave as if LPES0 is 1 * (externals don't alter MSR:HV) - * - * AIL is initialized here but can be cleared by - * selected exceptions */ #if defined(TARGET_PPC64) if (excp_model == POWERPC_EXCP_POWER7 || excp_model == POWERPC_EXCP_POWER8 || - excp_model == POWERPC_EXCP_POWER9) { + excp_model == POWERPC_EXCP_POWER9 || + excp_model == POWERPC_EXCP_POWER10) { lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); - if (excp_model != POWERPC_EXCP_POWER7) { - ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; - } else { - ail = 0; - } } else #endif /* defined(TARGET_PPC64) */ { lpes0 = true; - ail = 0; } /* @@ -315,7 +439,6 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) */ new_msr |= (target_ulong)MSR_HVB; } - ail = 0; /* machine check exceptions don't have ME set */ new_msr &= ~((target_ulong)1 << MSR_ME); @@ -519,7 +642,6 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) "exception %d with no HV support\n", excp); } } - ail = 0; break; case POWERPC_EXCP_DSEG: /* Data segment exception */ case POWERPC_EXCP_ISEG: /* Instruction segment exception */ @@ -773,7 +895,8 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) } else if (env->spr[SPR_LPCR] & LPCR_ILE) { new_msr |= (target_ulong)1 << MSR_LE; } - } else if (excp_model == POWERPC_EXCP_POWER9) { + } else if (excp_model == POWERPC_EXCP_POWER9 || + excp_model == POWERPC_EXCP_POWER10) { if (new_msr & MSR_HVB) { if (env->spr[SPR_HID0] & HID0_POWER9_HILE) { new_msr |= (target_ulong)1 << MSR_LE; @@ -790,15 +913,6 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) } #endif - /* - * AIL only works if there is no HV transition and we are running - * with translations enabled - */ - if (!((msr >> MSR_IR) & 1) || !((msr >> MSR_DR) & 1) || - ((new_msr & MSR_HVB) && !(msr & MSR_HVB))) { - ail = 0; - } - vector = env->excp_vectors[excp]; if (vector == (target_ulong)-1ULL) { cpu_abort(cs, "Raised an exception without defined vector %d\n", @@ -839,23 +953,8 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) /* Save MSR */ env->spr[srr1] = msr; - /* Handle AIL */ - if (ail) { - new_msr |= (1 << MSR_IR) | (1 << MSR_DR); - vector |= ppc_excp_vector_offset(cs, ail); - } - #if defined(TARGET_PPC64) } else { - /* scv AIL is a little different */ - if (ail) { - new_msr |= (1 << MSR_IR) | (1 << MSR_DR); - } - if (ail == AIL_C000_0000_0000_4000) { - vector |= 0xc000000000003000ull; - } else { - vector |= 0x0000000000017000ull; - } vector += lev * 0x20; env->lr = env->nip; @@ -863,6 +962,9 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) #endif } + /* This can update new_msr and vector if AIL applies */ + ppc_excp_apply_ail(cpu, excp_model, excp, msr, &new_msr, &vector); + powerpc_set_excp_state(cpu, vector, new_msr); } @@ -1130,6 +1232,15 @@ void helper_store_msr(CPUPPCState *env, target_ulong val) } #if defined(TARGET_PPC64) +void helper_scv(CPUPPCState *env, uint32_t lev) +{ + if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) { + raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev); + } else { + raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV); + } +} + void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn) { CPUState *cs; diff --git a/target/ppc/gdbstub.c b/target/ppc/gdbstub.c index c28319fb97..9339e7eafe 100644 --- a/target/ppc/gdbstub.c +++ b/target/ppc/gdbstub.c @@ -20,6 +20,8 @@ #include "qemu/osdep.h" #include "cpu.h" #include "exec/gdbstub.h" +#include "exec/helper-proto.h" +#include "internal.h" static int ppc_gdb_register_len_apple(int n) { @@ -387,3 +389,259 @@ const char *ppc_gdb_get_dynamic_xml(CPUState *cs, const char *xml_name) return NULL; } #endif + +static bool avr_need_swap(CPUPPCState *env) +{ +#ifdef HOST_WORDS_BIGENDIAN + return msr_le; +#else + return !msr_le; +#endif +} + +#if !defined(CONFIG_USER_ONLY) +static int gdb_find_spr_idx(CPUPPCState *env, int n) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(env->spr_cb); i++) { + ppc_spr_t *spr = &env->spr_cb[i]; + + if (spr->name && spr->gdb_id == n) { + return i; + } + } + return -1; +} + +static int gdb_get_spr_reg(CPUPPCState *env, GByteArray *buf, int n) +{ + int reg; + int len; + + reg = gdb_find_spr_idx(env, n); + if (reg < 0) { + return 0; + } + + len = TARGET_LONG_SIZE; + gdb_get_regl(buf, env->spr[reg]); + ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, len), len); + return len; +} + +static int gdb_set_spr_reg(CPUPPCState *env, uint8_t *mem_buf, int n) +{ + int reg; + int len; + + reg = gdb_find_spr_idx(env, n); + if (reg < 0) { + return 0; + } + + len = TARGET_LONG_SIZE; + ppc_maybe_bswap_register(env, mem_buf, len); + env->spr[reg] = ldn_p(mem_buf, len); + + return len; +} +#endif + +static int gdb_get_float_reg(CPUPPCState *env, GByteArray *buf, int n) +{ + uint8_t *mem_buf; + if (n < 32) { + gdb_get_reg64(buf, *cpu_fpr_ptr(env, n)); + mem_buf = gdb_get_reg_ptr(buf, 8); + ppc_maybe_bswap_register(env, mem_buf, 8); + return 8; + } + if (n == 32) { + gdb_get_reg32(buf, env->fpscr); + mem_buf = gdb_get_reg_ptr(buf, 4); + ppc_maybe_bswap_register(env, mem_buf, 4); + return 4; + } + return 0; +} + +static int gdb_set_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n) +{ + if (n < 32) { + ppc_maybe_bswap_register(env, mem_buf, 8); + *cpu_fpr_ptr(env, n) = ldq_p(mem_buf); + return 8; + } + if (n == 32) { + ppc_maybe_bswap_register(env, mem_buf, 4); + store_fpscr(env, ldl_p(mem_buf), 0xffffffff); + return 4; + } + return 0; +} + +static int gdb_get_avr_reg(CPUPPCState *env, GByteArray *buf, int n) +{ + uint8_t *mem_buf; + + if (n < 32) { + ppc_avr_t *avr = cpu_avr_ptr(env, n); + if (!avr_need_swap(env)) { + gdb_get_reg128(buf, avr->u64[0] , avr->u64[1]); + } else { + gdb_get_reg128(buf, avr->u64[1] , avr->u64[0]); + } + mem_buf = gdb_get_reg_ptr(buf, 16); + ppc_maybe_bswap_register(env, mem_buf, 8); + ppc_maybe_bswap_register(env, mem_buf + 8, 8); + return 16; + } + if (n == 32) { + gdb_get_reg32(buf, ppc_get_vscr(env)); + mem_buf = gdb_get_reg_ptr(buf, 4); + ppc_maybe_bswap_register(env, mem_buf, 4); + return 4; + } + if (n == 33) { + gdb_get_reg32(buf, (uint32_t)env->spr[SPR_VRSAVE]); + mem_buf = gdb_get_reg_ptr(buf, 4); + ppc_maybe_bswap_register(env, mem_buf, 4); + return 4; + } + return 0; +} + +static int gdb_set_avr_reg(CPUPPCState *env, uint8_t *mem_buf, int n) +{ + if (n < 32) { + ppc_avr_t *avr = cpu_avr_ptr(env, n); + ppc_maybe_bswap_register(env, mem_buf, 8); + ppc_maybe_bswap_register(env, mem_buf + 8, 8); + if (!avr_need_swap(env)) { + avr->u64[0] = ldq_p(mem_buf); + avr->u64[1] = ldq_p(mem_buf + 8); + } else { + avr->u64[1] = ldq_p(mem_buf); + avr->u64[0] = ldq_p(mem_buf + 8); + } + return 16; + } + if (n == 32) { + ppc_maybe_bswap_register(env, mem_buf, 4); + ppc_store_vscr(env, ldl_p(mem_buf)); + return 4; + } + if (n == 33) { + ppc_maybe_bswap_register(env, mem_buf, 4); + env->spr[SPR_VRSAVE] = (target_ulong)ldl_p(mem_buf); + return 4; + } + return 0; +} + +static int gdb_get_spe_reg(CPUPPCState *env, GByteArray *buf, int n) +{ + if (n < 32) { +#if defined(TARGET_PPC64) + gdb_get_reg32(buf, env->gpr[n] >> 32); + ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, 4), 4); +#else + gdb_get_reg32(buf, env->gprh[n]); +#endif + return 4; + } + if (n == 32) { + gdb_get_reg64(buf, env->spe_acc); + ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, 8), 8); + return 8; + } + if (n == 33) { + gdb_get_reg32(buf, env->spe_fscr); + ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, 4), 4); + return 4; + } + return 0; +} + +static int gdb_set_spe_reg(CPUPPCState *env, uint8_t *mem_buf, int n) +{ + if (n < 32) { +#if defined(TARGET_PPC64) + target_ulong lo = (uint32_t)env->gpr[n]; + target_ulong hi; + + ppc_maybe_bswap_register(env, mem_buf, 4); + + hi = (target_ulong)ldl_p(mem_buf) << 32; + env->gpr[n] = lo | hi; +#else + env->gprh[n] = ldl_p(mem_buf); +#endif + return 4; + } + if (n == 32) { + ppc_maybe_bswap_register(env, mem_buf, 8); + env->spe_acc = ldq_p(mem_buf); + return 8; + } + if (n == 33) { + ppc_maybe_bswap_register(env, mem_buf, 4); + env->spe_fscr = ldl_p(mem_buf); + return 4; + } + return 0; +} + +static int gdb_get_vsx_reg(CPUPPCState *env, GByteArray *buf, int n) +{ + if (n < 32) { + gdb_get_reg64(buf, *cpu_vsrl_ptr(env, n)); + ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, 8), 8); + return 8; + } + return 0; +} + +static int gdb_set_vsx_reg(CPUPPCState *env, uint8_t *mem_buf, int n) +{ + if (n < 32) { + ppc_maybe_bswap_register(env, mem_buf, 8); + *cpu_vsrl_ptr(env, n) = ldq_p(mem_buf); + return 8; + } + return 0; +} + +gchar *ppc_gdb_arch_name(CPUState *cs) +{ +#if defined(TARGET_PPC64) + return g_strdup("powerpc:common64"); +#else + return g_strdup("powerpc:common"); +#endif +} + +void ppc_gdb_init(CPUState *cs, PowerPCCPUClass *pcc) +{ + if (pcc->insns_flags & PPC_FLOAT) { + gdb_register_coprocessor(cs, gdb_get_float_reg, gdb_set_float_reg, + 33, "power-fpu.xml", 0); + } + if (pcc->insns_flags & PPC_ALTIVEC) { + gdb_register_coprocessor(cs, gdb_get_avr_reg, gdb_set_avr_reg, + 34, "power-altivec.xml", 0); + } + if (pcc->insns_flags & PPC_SPE) { + gdb_register_coprocessor(cs, gdb_get_spe_reg, gdb_set_spe_reg, + 34, "power-spe.xml", 0); + } + if (pcc->insns_flags2 & PPC2_VSX) { + gdb_register_coprocessor(cs, gdb_get_vsx_reg, gdb_set_vsx_reg, + 32, "power-vsx.xml", 0); + } +#ifndef CONFIG_USER_ONLY + gdb_register_coprocessor(cs, gdb_get_spr_reg, gdb_set_spr_reg, + pcc->gdb_num_sprs, "power-spr.xml", 0); +#endif +} diff --git a/target/ppc/helper.h b/target/ppc/helper.h index 6a4dccf70c..ea9f2a236c 100644 --- a/target/ppc/helper.h +++ b/target/ppc/helper.h @@ -1,5 +1,5 @@ -DEF_HELPER_FLAGS_3(raise_exception_err, TCG_CALL_NO_WG, void, env, i32, i32) -DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_WG, void, env, i32) +DEF_HELPER_FLAGS_3(raise_exception_err, TCG_CALL_NO_WG, noreturn, env, i32, i32) +DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_WG, noreturn, env, i32) DEF_HELPER_FLAGS_4(tw, TCG_CALL_NO_WG, void, env, tl, tl, i32) #if defined(TARGET_PPC64) DEF_HELPER_FLAGS_4(td, TCG_CALL_NO_WG, void, env, tl, tl, i32) @@ -13,6 +13,7 @@ DEF_HELPER_1(rfci, void, env) DEF_HELPER_1(rfdi, void, env) DEF_HELPER_1(rfmci, void, env) #if defined(TARGET_PPC64) +DEF_HELPER_2(scv, noreturn, env, i32) DEF_HELPER_2(pminsn, void, env, i32) DEF_HELPER_1(rfid, void, env) DEF_HELPER_1(rfscv, void, env) diff --git a/target/ppc/helper_regs.c b/target/ppc/helper_regs.c new file mode 100644 index 0000000000..3723872aa6 --- /dev/null +++ b/target/ppc/helper_regs.c @@ -0,0 +1,280 @@ +/* + * PowerPC emulation special registers manipulation helpers for qemu. + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "qemu/main-loop.h" +#include "exec/exec-all.h" +#include "sysemu/kvm.h" +#include "helper_regs.h" + +/* Swap temporary saved registers with GPRs */ +void hreg_swap_gpr_tgpr(CPUPPCState *env) +{ + target_ulong tmp; + + tmp = env->gpr[0]; + env->gpr[0] = env->tgpr[0]; + env->tgpr[0] = tmp; + tmp = env->gpr[1]; + env->gpr[1] = env->tgpr[1]; + env->tgpr[1] = tmp; + tmp = env->gpr[2]; + env->gpr[2] = env->tgpr[2]; + env->tgpr[2] = tmp; + tmp = env->gpr[3]; + env->gpr[3] = env->tgpr[3]; + env->tgpr[3] = tmp; +} + +static uint32_t hreg_compute_hflags_value(CPUPPCState *env) +{ + target_ulong msr = env->msr; + uint32_t ppc_flags = env->flags; + uint32_t hflags = 0; + uint32_t msr_mask; + + /* Some bits come straight across from MSR. */ + QEMU_BUILD_BUG_ON(MSR_LE != HFLAGS_LE); + QEMU_BUILD_BUG_ON(MSR_PR != HFLAGS_PR); + QEMU_BUILD_BUG_ON(MSR_DR != HFLAGS_DR); + QEMU_BUILD_BUG_ON(MSR_FP != HFLAGS_FP); + msr_mask = ((1 << MSR_LE) | (1 << MSR_PR) | + (1 << MSR_DR) | (1 << MSR_FP)); + + if (ppc_flags & POWERPC_FLAG_HID0_LE) { + /* + * Note that MSR_LE is not set in env->msr_mask for this cpu, + * and so will never be set in msr. + */ + uint32_t le = extract32(env->spr[SPR_HID0], 3, 1); + hflags |= le << MSR_LE; + } + + if (ppc_flags & POWERPC_FLAG_DE) { + target_ulong dbcr0 = env->spr[SPR_BOOKE_DBCR0]; + if (dbcr0 & DBCR0_ICMP) { + hflags |= 1 << HFLAGS_SE; + } + if (dbcr0 & DBCR0_BRT) { + hflags |= 1 << HFLAGS_BE; + } + } else { + if (ppc_flags & POWERPC_FLAG_BE) { + QEMU_BUILD_BUG_ON(MSR_BE != HFLAGS_BE); + msr_mask |= 1 << MSR_BE; + } + if (ppc_flags & POWERPC_FLAG_SE) { + QEMU_BUILD_BUG_ON(MSR_SE != HFLAGS_SE); + msr_mask |= 1 << MSR_SE; + } + } + + if (msr_is_64bit(env, msr)) { + hflags |= 1 << HFLAGS_64; + } + if ((ppc_flags & POWERPC_FLAG_SPE) && (msr & (1 << MSR_SPE))) { + hflags |= 1 << HFLAGS_SPE; + } + if (ppc_flags & POWERPC_FLAG_VRE) { + QEMU_BUILD_BUG_ON(MSR_VR != HFLAGS_VR); + msr_mask |= 1 << MSR_VR; + } + if (ppc_flags & POWERPC_FLAG_VSX) { + QEMU_BUILD_BUG_ON(MSR_VSX != HFLAGS_VSX); + msr_mask |= 1 << MSR_VSX; + } + if ((ppc_flags & POWERPC_FLAG_TM) && (msr & (1ull << MSR_TM))) { + hflags |= 1 << HFLAGS_TM; + } + if (env->spr[SPR_LPCR] & LPCR_GTSE) { + hflags |= 1 << HFLAGS_GTSE; + } + +#ifndef CONFIG_USER_ONLY + if (!env->has_hv_mode || (msr & (1ull << MSR_HV))) { + hflags |= 1 << HFLAGS_HV; + } + + /* + * This is our encoding for server processors. The architecture + * specifies that there is no such thing as userspace with + * translation off, however it appears that MacOS does it and some + * 32-bit CPUs support it. Weird... + * + * 0 = Guest User space virtual mode + * 1 = Guest Kernel space virtual mode + * 2 = Guest User space real mode + * 3 = Guest Kernel space real mode + * 4 = HV User space virtual mode + * 5 = HV Kernel space virtual mode + * 6 = HV User space real mode + * 7 = HV Kernel space real mode + * + * For BookE, we need 8 MMU modes as follow: + * + * 0 = AS 0 HV User space + * 1 = AS 0 HV Kernel space + * 2 = AS 1 HV User space + * 3 = AS 1 HV Kernel space + * 4 = AS 0 Guest User space + * 5 = AS 0 Guest Kernel space + * 6 = AS 1 Guest User space + * 7 = AS 1 Guest Kernel space + */ + unsigned immu_idx, dmmu_idx; + dmmu_idx = msr & (1 << MSR_PR) ? 0 : 1; + if (env->mmu_model & POWERPC_MMU_BOOKE) { + dmmu_idx |= msr & (1 << MSR_GS) ? 4 : 0; + immu_idx = dmmu_idx; + immu_idx |= msr & (1 << MSR_IS) ? 2 : 0; + dmmu_idx |= msr & (1 << MSR_DS) ? 2 : 0; + } else { + dmmu_idx |= msr & (1ull << MSR_HV) ? 4 : 0; + immu_idx = dmmu_idx; + immu_idx |= msr & (1 << MSR_IR) ? 0 : 2; + dmmu_idx |= msr & (1 << MSR_DR) ? 0 : 2; + } + hflags |= immu_idx << HFLAGS_IMMU_IDX; + hflags |= dmmu_idx << HFLAGS_DMMU_IDX; +#endif + + return hflags | (msr & msr_mask); +} + +void hreg_compute_hflags(CPUPPCState *env) +{ + env->hflags = hreg_compute_hflags_value(env); +} + +#ifdef CONFIG_DEBUG_TCG +void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc, + target_ulong *cs_base, uint32_t *flags) +{ + uint32_t hflags_current = env->hflags; + uint32_t hflags_rebuilt; + + *pc = env->nip; + *cs_base = 0; + *flags = hflags_current; + + hflags_rebuilt = hreg_compute_hflags_value(env); + if (unlikely(hflags_current != hflags_rebuilt)) { + cpu_abort(env_cpu(env), + "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n", + hflags_current, hflags_rebuilt); + } +} +#endif + +void cpu_interrupt_exittb(CPUState *cs) +{ + if (!kvm_enabled()) { + return; + } + + if (!qemu_mutex_iothread_locked()) { + qemu_mutex_lock_iothread(); + cpu_interrupt(cs, CPU_INTERRUPT_EXITTB); + qemu_mutex_unlock_iothread(); + } else { + cpu_interrupt(cs, CPU_INTERRUPT_EXITTB); + } +} + +int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv) +{ + int excp; +#if !defined(CONFIG_USER_ONLY) + CPUState *cs = env_cpu(env); +#endif + + excp = 0; + value &= env->msr_mask; +#if !defined(CONFIG_USER_ONLY) + /* Neither mtmsr nor guest state can alter HV */ + if (!alter_hv || !(env->msr & MSR_HVB)) { + value &= ~MSR_HVB; + value |= env->msr & MSR_HVB; + } + if (((value >> MSR_IR) & 1) != msr_ir || + ((value >> MSR_DR) & 1) != msr_dr) { + cpu_interrupt_exittb(cs); + } + if ((env->mmu_model & POWERPC_MMU_BOOKE) && + ((value >> MSR_GS) & 1) != msr_gs) { + cpu_interrupt_exittb(cs); + } + if (unlikely((env->flags & POWERPC_FLAG_TGPR) && + ((value ^ env->msr) & (1 << MSR_TGPR)))) { + /* Swap temporary saved registers with GPRs */ + hreg_swap_gpr_tgpr(env); + } + if (unlikely((value >> MSR_EP) & 1) != msr_ep) { + /* Change the exception prefix on PowerPC 601 */ + env->excp_prefix = ((value >> MSR_EP) & 1) * 0xFFF00000; + } + /* + * If PR=1 then EE, IR and DR must be 1 + * + * Note: We only enforce this on 64-bit server processors. + * It appears that: + * - 32-bit implementations supports PR=1 and EE/DR/IR=0 and MacOS + * exploits it. + * - 64-bit embedded implementations do not need any operation to be + * performed when PR is set. + */ + if (is_book3s_arch2x(env) && ((value >> MSR_PR) & 1)) { + value |= (1 << MSR_EE) | (1 << MSR_DR) | (1 << MSR_IR); + } +#endif + env->msr = value; + hreg_compute_hflags(env); +#if !defined(CONFIG_USER_ONLY) + if (unlikely(msr_pow == 1)) { + if (!env->pending_interrupts && (*env->check_pow)(env)) { + cs->halted = 1; + excp = EXCP_HALTED; + } + } +#endif + + return excp; +} + +#ifndef CONFIG_USER_ONLY +void check_tlb_flush(CPUPPCState *env, bool global) +{ + CPUState *cs = env_cpu(env); + + /* Handle global flushes first */ + if (global && (env->tlb_need_flush & TLB_NEED_GLOBAL_FLUSH)) { + env->tlb_need_flush &= ~TLB_NEED_GLOBAL_FLUSH; + env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH; + tlb_flush_all_cpus_synced(cs); + return; + } + + /* Then handle local ones */ + if (env->tlb_need_flush & TLB_NEED_LOCAL_FLUSH) { + env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH; + tlb_flush(cs); + } +} +#endif diff --git a/target/ppc/helper_regs.h b/target/ppc/helper_regs.h index efcc903427..42f26870b9 100644 --- a/target/ppc/helper_regs.h +++ b/target/ppc/helper_regs.h @@ -20,184 +20,15 @@ #ifndef HELPER_REGS_H #define HELPER_REGS_H -#include "qemu/main-loop.h" -#include "exec/exec-all.h" -#include "sysemu/kvm.h" +void hreg_swap_gpr_tgpr(CPUPPCState *env); +void hreg_compute_hflags(CPUPPCState *env); +void cpu_interrupt_exittb(CPUState *cs); +int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv); -/* Swap temporary saved registers with GPRs */ -static inline void hreg_swap_gpr_tgpr(CPUPPCState *env) -{ - target_ulong tmp; - - tmp = env->gpr[0]; - env->gpr[0] = env->tgpr[0]; - env->tgpr[0] = tmp; - tmp = env->gpr[1]; - env->gpr[1] = env->tgpr[1]; - env->tgpr[1] = tmp; - tmp = env->gpr[2]; - env->gpr[2] = env->tgpr[2]; - env->tgpr[2] = tmp; - tmp = env->gpr[3]; - env->gpr[3] = env->tgpr[3]; - env->tgpr[3] = tmp; -} - -static inline void hreg_compute_mem_idx(CPUPPCState *env) -{ - /* - * This is our encoding for server processors. The architecture - * specifies that there is no such thing as userspace with - * translation off, however it appears that MacOS does it and some - * 32-bit CPUs support it. Weird... - * - * 0 = Guest User space virtual mode - * 1 = Guest Kernel space virtual mode - * 2 = Guest User space real mode - * 3 = Guest Kernel space real mode - * 4 = HV User space virtual mode - * 5 = HV Kernel space virtual mode - * 6 = HV User space real mode - * 7 = HV Kernel space real mode - * - * For BookE, we need 8 MMU modes as follow: - * - * 0 = AS 0 HV User space - * 1 = AS 0 HV Kernel space - * 2 = AS 1 HV User space - * 3 = AS 1 HV Kernel space - * 4 = AS 0 Guest User space - * 5 = AS 0 Guest Kernel space - * 6 = AS 1 Guest User space - * 7 = AS 1 Guest Kernel space - */ - if (env->mmu_model & POWERPC_MMU_BOOKE) { - env->immu_idx = env->dmmu_idx = msr_pr ? 0 : 1; - env->immu_idx += msr_is ? 2 : 0; - env->dmmu_idx += msr_ds ? 2 : 0; - env->immu_idx += msr_gs ? 4 : 0; - env->dmmu_idx += msr_gs ? 4 : 0; - } else { - env->immu_idx = env->dmmu_idx = msr_pr ? 0 : 1; - env->immu_idx += msr_ir ? 0 : 2; - env->dmmu_idx += msr_dr ? 0 : 2; - env->immu_idx += msr_hv ? 4 : 0; - env->dmmu_idx += msr_hv ? 4 : 0; - } -} - -static inline void hreg_compute_hflags(CPUPPCState *env) -{ - target_ulong hflags_mask; - - /* We 'forget' FE0 & FE1: we'll never generate imprecise exceptions */ - hflags_mask = (1 << MSR_VR) | (1 << MSR_AP) | (1 << MSR_SA) | - (1 << MSR_PR) | (1 << MSR_FP) | (1 << MSR_SE) | (1 << MSR_BE) | - (1 << MSR_LE) | (1 << MSR_VSX) | (1 << MSR_IR) | (1 << MSR_DR); - hflags_mask |= (1ULL << MSR_CM) | (1ULL << MSR_SF) | MSR_HVB; - hreg_compute_mem_idx(env); - env->hflags = env->msr & hflags_mask; - /* Merge with hflags coming from other registers */ - env->hflags |= env->hflags_nmsr; -} - -static inline void cpu_interrupt_exittb(CPUState *cs) -{ - if (!kvm_enabled()) { - return; - } - - if (!qemu_mutex_iothread_locked()) { - qemu_mutex_lock_iothread(); - cpu_interrupt(cs, CPU_INTERRUPT_EXITTB); - qemu_mutex_unlock_iothread(); - } else { - cpu_interrupt(cs, CPU_INTERRUPT_EXITTB); - } -} - -static inline int hreg_store_msr(CPUPPCState *env, target_ulong value, - int alter_hv) -{ - int excp; -#if !defined(CONFIG_USER_ONLY) - CPUState *cs = env_cpu(env); -#endif - - excp = 0; - value &= env->msr_mask; -#if !defined(CONFIG_USER_ONLY) - /* Neither mtmsr nor guest state can alter HV */ - if (!alter_hv || !(env->msr & MSR_HVB)) { - value &= ~MSR_HVB; - value |= env->msr & MSR_HVB; - } - if (((value >> MSR_IR) & 1) != msr_ir || - ((value >> MSR_DR) & 1) != msr_dr) { - cpu_interrupt_exittb(cs); - } - if ((env->mmu_model & POWERPC_MMU_BOOKE) && - ((value >> MSR_GS) & 1) != msr_gs) { - cpu_interrupt_exittb(cs); - } - if (unlikely((env->flags & POWERPC_FLAG_TGPR) && - ((value ^ env->msr) & (1 << MSR_TGPR)))) { - /* Swap temporary saved registers with GPRs */ - hreg_swap_gpr_tgpr(env); - } - if (unlikely((value >> MSR_EP) & 1) != msr_ep) { - /* Change the exception prefix on PowerPC 601 */ - env->excp_prefix = ((value >> MSR_EP) & 1) * 0xFFF00000; - } - /* - * If PR=1 then EE, IR and DR must be 1 - * - * Note: We only enforce this on 64-bit server processors. - * It appears that: - * - 32-bit implementations supports PR=1 and EE/DR/IR=0 and MacOS - * exploits it. - * - 64-bit embedded implementations do not need any operation to be - * performed when PR is set. - */ - if (is_book3s_arch2x(env) && ((value >> MSR_PR) & 1)) { - value |= (1 << MSR_EE) | (1 << MSR_DR) | (1 << MSR_IR); - } -#endif - env->msr = value; - hreg_compute_hflags(env); -#if !defined(CONFIG_USER_ONLY) - if (unlikely(msr_pow == 1)) { - if (!env->pending_interrupts && (*env->check_pow)(env)) { - cs->halted = 1; - excp = EXCP_HALTED; - } - } -#endif - - return excp; -} - -#if !defined(CONFIG_USER_ONLY) -static inline void check_tlb_flush(CPUPPCState *env, bool global) -{ - CPUState *cs = env_cpu(env); - - /* Handle global flushes first */ - if (global && (env->tlb_need_flush & TLB_NEED_GLOBAL_FLUSH)) { - env->tlb_need_flush &= ~TLB_NEED_GLOBAL_FLUSH; - env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH; - tlb_flush_all_cpus_synced(cs); - return; - } - - /* Then handle local ones */ - if (env->tlb_need_flush & TLB_NEED_LOCAL_FLUSH) { - env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH; - tlb_flush(cs); - } -} -#else +#ifdef CONFIG_USER_ONLY static inline void check_tlb_flush(CPUPPCState *env, bool global) { } +#else +void check_tlb_flush(CPUPPCState *env, bool global); #endif #endif /* HELPER_REGS_H */ diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c index 429de28494..41f8477d4b 100644 --- a/target/ppc/int_helper.c +++ b/target/ppc/int_helper.c @@ -22,6 +22,7 @@ #include "internal.h" #include "qemu/host-utils.h" #include "qemu/main-loop.h" +#include "qemu/log.h" #include "exec/helper-proto.h" #include "crypto/aes.h" #include "fpu/softfloat.h" @@ -461,17 +462,12 @@ SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX) void helper_mtvscr(CPUPPCState *env, uint32_t vscr) { - env->vscr = vscr & ~(1u << VSCR_SAT); - /* Which bit we set is completely arbitrary, but clear the rest. */ - env->vscr_sat.u64[0] = vscr & (1u << VSCR_SAT); - env->vscr_sat.u64[1] = 0; - set_flush_to_zero((vscr >> VSCR_NJ) & 1, &env->vec_status); + ppc_store_vscr(env, vscr); } uint32_t helper_mfvscr(CPUPPCState *env) { - uint32_t sat = (env->vscr_sat.u64[0] | env->vscr_sat.u64[1]) != 0; - return env->vscr | (sat << VSCR_SAT); + return ppc_get_vscr(env); } static inline void set_vscr_sat(CPUPPCState *env) diff --git a/target/ppc/internal.h b/target/ppc/internal.h index d547448065..2b4b06eb76 100644 --- a/target/ppc/internal.h +++ b/target/ppc/internal.h @@ -215,4 +215,36 @@ void helper_compute_fprf_float128(CPUPPCState *env, float128 arg); void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); + +/* translate.c */ + +/* #define PPC_DUMP_CPU */ + +int ppc_fixup_cpu(PowerPCCPU *cpu); +void create_ppc_opcodes(PowerPCCPU *cpu, Error **errp); +void destroy_ppc_opcodes(PowerPCCPU *cpu); + +/* gdbstub.c */ +void ppc_gdb_init(CPUState *cs, PowerPCCPUClass *ppc); +gchar *ppc_gdb_arch_name(CPUState *cs); + +/** + * prot_for_access_type: + * @access_type: Access type + * + * Return the protection bit required for the given access type. + */ +static inline int prot_for_access_type(MMUAccessType access_type) +{ + switch (access_type) { + case MMU_INST_FETCH: + return PAGE_EXEC; + case MMU_DATA_LOAD: + return PAGE_READ; + case MMU_DATA_STORE: + return PAGE_WRITE; + } + g_assert_not_reached(); +} + #endif /* PPC_INTERNAL_H */ diff --git a/target/ppc/machine.c b/target/ppc/machine.c index 283db1d28a..93972df58e 100644 --- a/target/ppc/machine.c +++ b/target/ppc/machine.c @@ -8,7 +8,18 @@ #include "qapi/error.h" #include "qemu/main-loop.h" #include "kvm_ppc.h" -#include "exec/helper-proto.h" + +static void post_load_update_msr(CPUPPCState *env) +{ + target_ulong msr = env->msr; + + /* + * Invalidate all supported msr bits except MSR_TGPR/MSR_HVB + * before restoring. Note that this recomputes hflags. + */ + env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB); + ppc_store_msr(env, msr); +} static int cpu_load_old(QEMUFile *f, void *opaque, int version_id) { @@ -95,7 +106,7 @@ static int cpu_load_old(QEMUFile *f, void *opaque, int version_id) ppc_store_sdr1(env, sdr1); } qemu_get_be32s(f, &vscr); - helper_mtvscr(env, vscr); + ppc_store_vscr(env, vscr); qemu_get_be64s(f, &env->spe_acc); qemu_get_be32s(f, &env->spe_fscr); qemu_get_betls(f, &env->msr_mask); @@ -111,13 +122,12 @@ static int cpu_load_old(QEMUFile *f, void *opaque, int version_id) qemu_get_betls(f, &env->ivpr_mask); qemu_get_betls(f, &env->hreset_vector); qemu_get_betls(f, &env->nip); - qemu_get_betls(f, &env->hflags); - qemu_get_betls(f, &env->hflags_nmsr); + qemu_get_sbetl(f); /* Discard unused hflags */ + qemu_get_sbetl(f); /* Discard unused hflags_nmsr */ qemu_get_sbe32(f); /* Discard unused mmu_idx */ qemu_get_sbe32(f); /* Discard unused power_mode */ - /* Recompute mmu indices */ - hreg_compute_mem_idx(env); + post_load_update_msr(env); return 0; } @@ -304,6 +314,10 @@ static int cpu_pre_save(void *opaque) } } + /* Retain migration compatibility for pre 6.0 for 601 machines. */ + env->hflags_compat_nmsr = (env->flags & POWERPC_FLAG_HID0_LE + ? env->hflags & MSR_LE : 0); + return 0; } @@ -333,7 +347,6 @@ static int cpu_post_load(void *opaque, int version_id) PowerPCCPU *cpu = opaque; CPUPPCState *env = &cpu->env; int i; - target_ulong msr; /* * If we're operating in compat mode, we should be ok as long as @@ -407,15 +420,7 @@ static int cpu_post_load(void *opaque, int version_id) ppc_store_sdr1(env, env->spr[SPR_SDR1]); } - /* - * Invalidate all supported msr bits except MSR_TGPR/MSR_HVB - * before restoring - */ - msr = env->msr; - env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB); - ppc_store_msr(env, msr); - - hreg_compute_mem_idx(env); + post_load_update_msr(env); return 0; } @@ -450,7 +455,7 @@ static int get_vscr(QEMUFile *f, void *opaque, size_t size, const VMStateField *field) { PowerPCCPU *cpu = opaque; - helper_mtvscr(&cpu->env, qemu_get_be32(f)); + ppc_store_vscr(&cpu->env, qemu_get_be32(f)); return 0; } @@ -458,7 +463,7 @@ static int put_vscr(QEMUFile *f, void *opaque, size_t size, const VMStateField *field, JSONWriter *vmdesc) { PowerPCCPU *cpu = opaque; - qemu_put_be32(f, helper_mfvscr(&cpu->env)); + qemu_put_be32(f, ppc_get_vscr(&cpu->env)); return 0; } @@ -825,9 +830,8 @@ const VMStateDescription vmstate_ppc_cpu = { /* Supervisor mode architected state */ VMSTATE_UINTTL(env.msr, PowerPCCPU), - /* Internal state */ - VMSTATE_UINTTL(env.hflags_nmsr, PowerPCCPU), - /* FIXME: access_type? */ + /* Backward compatible internal state */ + VMSTATE_UINTTL(env.hflags_compat_nmsr, PowerPCCPU), /* Sanity checking */ VMSTATE_UINTTL_TEST(mig_msr_mask, PowerPCCPU, cpu_pre_2_8_migration), diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c index f4f7e730de..444b2a30ef 100644 --- a/target/ppc/mem_helper.c +++ b/target/ppc/mem_helper.c @@ -278,7 +278,7 @@ static void dcbz_common(CPUPPCState *env, target_ulong addr, target_ulong mask, dcbz_size = env->dcache_line_size; uint32_t i; void *haddr; - int mmu_idx = epid ? PPC_TLB_EPID_STORE : env->dmmu_idx; + int mmu_idx = epid ? PPC_TLB_EPID_STORE : cpu_mmu_index(env, false); #if defined(TARGET_PPC64) /* Check for dcbz vs dcbzl on 970 */ diff --git a/target/ppc/meson.build b/target/ppc/meson.build index bbfef90e08..d1aa7d5d39 100644 --- a/target/ppc/meson.build +++ b/target/ppc/meson.build @@ -2,10 +2,12 @@ ppc_ss = ss.source_set() ppc_ss.add(files( 'cpu-models.c', 'cpu.c', + 'cpu_init.c', 'dfp_helper.c', 'excp_helper.c', 'fpu_helper.c', 'gdbstub.c', + 'helper_regs.c', 'int_helper.c', 'mem_helper.c', 'misc_helper.c', diff --git a/target/ppc/misc_helper.c b/target/ppc/misc_helper.c index 5d6e0de396..08a31da289 100644 --- a/target/ppc/misc_helper.c +++ b/target/ppc/misc_helper.c @@ -194,16 +194,14 @@ void helper_store_hid0_601(CPUPPCState *env, target_ulong val) target_ulong hid0; hid0 = env->spr[SPR_HID0]; + env->spr[SPR_HID0] = (uint32_t)val; + if ((val ^ hid0) & 0x00000008) { /* Change current endianness */ - env->hflags &= ~(1 << MSR_LE); - env->hflags_nmsr &= ~(1 << MSR_LE); - env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE); - env->hflags |= env->hflags_nmsr; - qemu_log("%s: set endianness to %c => " TARGET_FMT_lx "\n", __func__, + hreg_compute_hflags(env); + qemu_log("%s: set endianness to %c => %08x\n", __func__, val & 0x8 ? 'l' : 'b', env->hflags); } - env->spr[SPR_HID0] = (uint32_t)val; } void helper_store_403_pbr(CPUPPCState *env, uint32_t num, target_ulong value) @@ -217,6 +215,9 @@ void helper_store_403_pbr(CPUPPCState *env, uint32_t num, target_ulong value) void helper_store_40x_dbcr0(CPUPPCState *env, target_ulong val) { + /* Bits 26 & 27 affect single-stepping. */ + hreg_compute_hflags(env); + /* Bits 28 & 29 affect reset or shutdown. */ store_40x_dbcr0(env, val); } @@ -260,6 +261,16 @@ void ppc_store_msr(CPUPPCState *env, target_ulong value) hreg_store_msr(env, value, 0); } +void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + CPUPPCState *env = &cpu->env; + + env->spr[SPR_LPCR] = val & pcc->lpcr_mask; + /* The gtse bit affects hflags */ + hreg_compute_hflags(env); +} + /* * This code is lifted from MacOnLinux. It is called whenever THRM1,2 * or 3 is read an fixes up the values in such a way that will make diff --git a/target/ppc/mmu-hash32.c b/target/ppc/mmu-hash32.c index 178cf090b7..744a763f44 100644 --- a/target/ppc/mmu-hash32.c +++ b/target/ppc/mmu-hash32.c @@ -24,6 +24,7 @@ #include "exec/helper-proto.h" #include "sysemu/kvm.h" #include "kvm_ppc.h" +#include "internal.h" #include "mmu-hash32.h" #include "exec/log.h" @@ -152,16 +153,17 @@ static int hash32_bat_601_prot(PowerPCCPU *cpu, return ppc_hash32_pp_prot(key, pp, 0); } -static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea, int rwx, - int *prot) +static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea, + MMUAccessType access_type, int *prot) { CPUPPCState *env = &cpu->env; target_ulong *BATlt, *BATut; + bool ifetch = access_type == MMU_INST_FETCH; int i; LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__, - rwx == 2 ? 'I' : 'D', ea); - if (rwx == 2) { + ifetch ? 'I' : 'D', ea); + if (ifetch) { BATlt = env->IBAT[1]; BATut = env->IBAT[0]; } else { @@ -180,7 +182,7 @@ static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea, int rwx, } LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx " BATl " TARGET_FMT_lx "\n", __func__, - type == ACCESS_CODE ? 'I' : 'D', i, ea, batu, batl); + ifetch ? 'I' : 'D', i, ea, batu, batl); if (mask && ((ea & mask) == (batu & BATU32_BEPI))) { hwaddr raddr = (batl & mask) | (ea & ~mask); @@ -208,7 +210,7 @@ static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea, int rwx, LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " TARGET_FMT_lx " " TARGET_FMT_lx "\n", - __func__, type == ACCESS_CODE ? 'I' : 'D', i, ea, + __func__, ifetch ? 'I' : 'D', i, ea, *BATu, *BATl, BEPIu, BEPIl, bl); } } @@ -218,7 +220,8 @@ static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea, int rwx, } static int ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr, - target_ulong eaddr, int rwx, + target_ulong eaddr, + MMUAccessType access_type, hwaddr *raddr, int *prot) { CPUState *cs = CPU(cpu); @@ -239,7 +242,7 @@ static int ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr, return 0; } - if (rwx == 2) { + if (access_type == MMU_INST_FETCH) { /* No code fetch is allowed in direct-store areas */ cs->exception_index = POWERPC_EXCP_ISI; env->error_code = 0x10000000; @@ -260,7 +263,7 @@ static int ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr, /* lwarx, ldarx or srwcx. */ env->error_code = 0; env->spr[SPR_DAR] = eaddr; - if (rwx == 1) { + if (access_type == MMU_DATA_STORE) { env->spr[SPR_DSISR] = 0x06000000; } else { env->spr[SPR_DSISR] = 0x04000000; @@ -280,7 +283,7 @@ static int ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr, cs->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; env->spr[SPR_DAR] = eaddr; - if (rwx == 1) { + if (access_type == MMU_DATA_STORE) { env->spr[SPR_DSISR] = 0x06100000; } else { env->spr[SPR_DSISR] = 0x04100000; @@ -290,14 +293,15 @@ static int ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr, cpu_abort(cs, "ERROR: instruction should not need " "address translation\n"); } - if ((rwx == 1 || key != 1) && (rwx == 0 || key != 0)) { + if ((access_type == MMU_DATA_STORE || key != 1) && + (access_type == MMU_DATA_LOAD || key != 0)) { *raddr = eaddr; return 0; } else { cs->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; env->spr[SPR_DAR] = eaddr; - if (rwx == 1) { + if (access_type == MMU_DATA_STORE) { env->spr[SPR_DSISR] = 0x0a000000; } else { env->spr[SPR_DSISR] = 0x08000000; @@ -421,13 +425,16 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, hwaddr pte_offset; ppc_hash_pte32_t pte; int prot; - const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC}; + int need_prot; + MMUAccessType access_type; hwaddr raddr; assert((rwx == 0) || (rwx == 1) || (rwx == 2)); + access_type = rwx; + need_prot = prot_for_access_type(access_type); /* 1. Handle real mode accesses */ - if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { + if (access_type == MMU_INST_FETCH ? !msr_ir : !msr_dr) { /* Translation is off */ raddr = eaddr; tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, @@ -438,17 +445,17 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, /* 2. Check Block Address Translation entries (BATs) */ if (env->nb_BATs != 0) { - raddr = ppc_hash32_bat_lookup(cpu, eaddr, rwx, &prot); + raddr = ppc_hash32_bat_lookup(cpu, eaddr, access_type, &prot); if (raddr != -1) { - if (need_prot[rwx] & ~prot) { - if (rwx == 2) { + if (need_prot & ~prot) { + if (access_type == MMU_INST_FETCH) { cs->exception_index = POWERPC_EXCP_ISI; env->error_code = 0x08000000; } else { cs->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; env->spr[SPR_DAR] = eaddr; - if (rwx == 1) { + if (access_type == MMU_DATA_STORE) { env->spr[SPR_DSISR] = 0x0a000000; } else { env->spr[SPR_DSISR] = 0x08000000; @@ -469,7 +476,7 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, /* 4. Handle direct store segments */ if (sr & SR32_T) { - if (ppc_hash32_direct_store(cpu, sr, eaddr, rwx, + if (ppc_hash32_direct_store(cpu, sr, eaddr, access_type, &raddr, &prot) == 0) { tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, prot, mmu_idx, @@ -481,7 +488,7 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, } /* 5. Check for segment level no-execute violation */ - if ((rwx == 2) && (sr & SR32_NX)) { + if (access_type == MMU_INST_FETCH && (sr & SR32_NX)) { cs->exception_index = POWERPC_EXCP_ISI; env->error_code = 0x10000000; return 1; @@ -490,14 +497,14 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, /* 6. Locate the PTE in the hash table */ pte_offset = ppc_hash32_htab_lookup(cpu, sr, eaddr, &pte); if (pte_offset == -1) { - if (rwx == 2) { + if (access_type == MMU_INST_FETCH) { cs->exception_index = POWERPC_EXCP_ISI; env->error_code = 0x40000000; } else { cs->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; env->spr[SPR_DAR] = eaddr; - if (rwx == 1) { + if (access_type == MMU_DATA_STORE) { env->spr[SPR_DSISR] = 0x42000000; } else { env->spr[SPR_DSISR] = 0x40000000; @@ -513,17 +520,17 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, prot = ppc_hash32_pte_prot(cpu, sr, pte); - if (need_prot[rwx] & ~prot) { + if (need_prot & ~prot) { /* Access right violation */ qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); - if (rwx == 2) { + if (access_type == MMU_INST_FETCH) { cs->exception_index = POWERPC_EXCP_ISI; env->error_code = 0x08000000; } else { cs->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; env->spr[SPR_DAR] = eaddr; - if (rwx == 1) { + if (access_type == MMU_DATA_STORE) { env->spr[SPR_DSISR] = 0x0a000000; } else { env->spr[SPR_DSISR] = 0x08000000; @@ -540,7 +547,7 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, ppc_hash32_set_r(cpu, pte_offset, pte.pte1); } if (!(pte.pte1 & HPTE32_R_C)) { - if (rwx == 1) { + if (access_type == MMU_DATA_STORE) { ppc_hash32_set_c(cpu, pte_offset, pte.pte1); } else { /* diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c index 0fabc10302..f48b625f48 100644 --- a/target/ppc/mmu-hash64.c +++ b/target/ppc/mmu-hash64.c @@ -29,7 +29,9 @@ #include "mmu-hash64.h" #include "exec/log.h" #include "hw/hw.h" +#include "internal.h" #include "mmu-book3s-v3.h" +#include "helper_regs.h" /* #define DEBUG_SLB */ @@ -875,10 +877,12 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, hwaddr ptex; ppc_hash_pte64_t pte; int exec_prot, pp_prot, amr_prot, prot; - const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC}; + MMUAccessType access_type; + int need_prot; hwaddr raddr; assert((rwx == 0) || (rwx == 1) || (rwx == 2)); + access_type = rwx; /* * Note on LPCR usage: 970 uses HID4, but our special variant of @@ -889,7 +893,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, */ /* 1. Handle real mode accesses */ - if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { + if (access_type == MMU_INST_FETCH ? !msr_ir : !msr_dr) { /* * Translation is supposedly "off", but in real mode the top 4 * effective address bits are (mostly) ignored @@ -922,14 +926,19 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, /* Emulated old-style RMO mode, bounds check against RMLS */ if (raddr >= limit) { - if (rwx == 2) { + switch (access_type) { + case MMU_INST_FETCH: ppc_hash64_set_isi(cs, SRR1_PROTFAULT); - } else { - int dsisr = DSISR_PROTFAULT; - if (rwx == 1) { - dsisr |= DSISR_ISSTORE; - } - ppc_hash64_set_dsi(cs, eaddr, dsisr); + break; + case MMU_DATA_LOAD: + ppc_hash64_set_dsi(cs, eaddr, DSISR_PROTFAULT); + break; + case MMU_DATA_STORE: + ppc_hash64_set_dsi(cs, eaddr, + DSISR_PROTFAULT | DSISR_ISSTORE); + break; + default: + g_assert_not_reached(); } return 1; } @@ -952,13 +961,19 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, exit(1); } /* Segment still not found, generate the appropriate interrupt */ - if (rwx == 2) { + switch (access_type) { + case MMU_INST_FETCH: cs->exception_index = POWERPC_EXCP_ISEG; env->error_code = 0; - } else { + break; + case MMU_DATA_LOAD: + case MMU_DATA_STORE: cs->exception_index = POWERPC_EXCP_DSEG; env->error_code = 0; env->spr[SPR_DAR] = eaddr; + break; + default: + g_assert_not_reached(); } return 1; } @@ -966,7 +981,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, skip_slb_search: /* 3. Check for segment level no-execute violation */ - if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) { + if (access_type == MMU_INST_FETCH && (slb->vsid & SLB_VSID_N)) { ppc_hash64_set_isi(cs, SRR1_NOEXEC_GUARD); return 1; } @@ -974,14 +989,18 @@ skip_slb_search: /* 4. Locate the PTE in the hash table */ ptex = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift); if (ptex == -1) { - if (rwx == 2) { + switch (access_type) { + case MMU_INST_FETCH: ppc_hash64_set_isi(cs, SRR1_NOPTE); - } else { - int dsisr = DSISR_NOPTE; - if (rwx == 1) { - dsisr |= DSISR_ISSTORE; - } - ppc_hash64_set_dsi(cs, eaddr, dsisr); + break; + case MMU_DATA_LOAD: + ppc_hash64_set_dsi(cs, eaddr, DSISR_NOPTE); + break; + case MMU_DATA_STORE: + ppc_hash64_set_dsi(cs, eaddr, DSISR_NOPTE | DSISR_ISSTORE); + break; + default: + g_assert_not_reached(); } return 1; } @@ -995,10 +1014,11 @@ skip_slb_search: amr_prot = ppc_hash64_amr_prot(cpu, pte); prot = exec_prot & pp_prot & amr_prot; - if ((need_prot[rwx] & ~prot) != 0) { + need_prot = prot_for_access_type(access_type); + if (need_prot & ~prot) { /* Access right violation */ qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); - if (rwx == 2) { + if (access_type == MMU_INST_FETCH) { int srr1 = 0; if (PAGE_EXEC & ~exec_prot) { srr1 |= SRR1_NOEXEC_GUARD; /* Access violates noexec or guard */ @@ -1011,13 +1031,13 @@ skip_slb_search: ppc_hash64_set_isi(cs, srr1); } else { int dsisr = 0; - if (need_prot[rwx] & ~pp_prot) { + if (need_prot & ~pp_prot) { dsisr |= DSISR_PROTFAULT; } - if (rwx == 1) { + if (access_type == MMU_DATA_STORE) { dsisr |= DSISR_ISSTORE; } - if (need_prot[rwx] & ~amr_prot) { + if (need_prot & ~amr_prot) { dsisr |= DSISR_AMR; } ppc_hash64_set_dsi(cs, eaddr, dsisr); @@ -1033,7 +1053,7 @@ skip_slb_search: ppc_hash64_set_r(cpu, ptex, pte.pte1); } if (!(pte.pte1 & HPTE64_R_C)) { - if (rwx == 1) { + if (access_type == MMU_DATA_STORE) { ppc_hash64_set_c(cpu, ptex, pte.pte1); } else { /* @@ -1119,14 +1139,6 @@ void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex, cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH; } -void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val) -{ - PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); - CPUPPCState *env = &cpu->env; - - env->spr[SPR_LPCR] = val & pcc->lpcr_mask; -} - void helper_store_lpcr(CPUPPCState *env, target_ulong val) { PowerPCCPU *cpu = env_archcpu(env); @@ -1197,61 +1209,4 @@ const PPCHash64Options ppc_hash64_opts_POWER7 = { } }; -void ppc_hash64_filter_pagesizes(PowerPCCPU *cpu, - bool (*cb)(void *, uint32_t, uint32_t), - void *opaque) -{ - PPCHash64Options *opts = cpu->hash64_opts; - int i; - int n = 0; - bool ci_largepage = false; - - assert(opts); - - n = 0; - for (i = 0; i < ARRAY_SIZE(opts->sps); i++) { - PPCHash64SegmentPageSizes *sps = &opts->sps[i]; - int j; - int m = 0; - assert(n <= i); - - if (!sps->page_shift) { - break; - } - - for (j = 0; j < ARRAY_SIZE(sps->enc); j++) { - PPCHash64PageSize *ps = &sps->enc[j]; - - assert(m <= j); - if (!ps->page_shift) { - break; - } - - if (cb(opaque, sps->page_shift, ps->page_shift)) { - if (ps->page_shift >= 16) { - ci_largepage = true; - } - sps->enc[m++] = *ps; - } - } - - /* Clear rest of the row */ - for (j = m; j < ARRAY_SIZE(sps->enc); j++) { - memset(&sps->enc[j], 0, sizeof(sps->enc[j])); - } - - if (m) { - n++; - } - } - - /* Clear the rest of the table */ - for (i = n; i < ARRAY_SIZE(opts->sps); i++) { - memset(&opts->sps[i], 0, sizeof(opts->sps[i])); - } - - if (!ci_largepage) { - opts->flags &= ~PPC_HASH64_CI_LARGEPAGE; - } -} diff --git a/target/ppc/mmu-hash64.h b/target/ppc/mmu-hash64.h index 87729d48b3..4b8b8e7950 100644 --- a/target/ppc/mmu-hash64.h +++ b/target/ppc/mmu-hash64.h @@ -15,12 +15,8 @@ void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong pte0, target_ulong pte1); unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu, uint64_t pte0, uint64_t pte1); -void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val); void ppc_hash64_init(PowerPCCPU *cpu); void ppc_hash64_finalize(PowerPCCPU *cpu); -void ppc_hash64_filter_pagesizes(PowerPCCPU *cpu, - bool (*cb)(void *, uint32_t, uint32_t), - void *opaque); #endif /* diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c index 30fcfcf11f..7972153f23 100644 --- a/target/ppc/mmu-radix64.c +++ b/target/ppc/mmu-radix64.c @@ -25,6 +25,7 @@ #include "sysemu/kvm.h" #include "kvm_ppc.h" #include "exec/log.h" +#include "internal.h" #include "mmu-radix64.h" #include "mmu-book3s-v3.h" @@ -74,71 +75,94 @@ static bool ppc_radix64_get_fully_qualified_addr(const CPUPPCState *env, return true; } -static void ppc_radix64_raise_segi(PowerPCCPU *cpu, int rwx, vaddr eaddr) +static void ppc_radix64_raise_segi(PowerPCCPU *cpu, MMUAccessType access_type, + vaddr eaddr) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; - if (rwx == 2) { /* Instruction Segment Interrupt */ + switch (access_type) { + case MMU_INST_FETCH: + /* Instruction Segment Interrupt */ cs->exception_index = POWERPC_EXCP_ISEG; - } else { /* Data Segment Interrupt */ + break; + case MMU_DATA_STORE: + case MMU_DATA_LOAD: + /* Data Segment Interrupt */ cs->exception_index = POWERPC_EXCP_DSEG; env->spr[SPR_DAR] = eaddr; + break; + default: + g_assert_not_reached(); } env->error_code = 0; } -static void ppc_radix64_raise_si(PowerPCCPU *cpu, int rwx, vaddr eaddr, - uint32_t cause) +static void ppc_radix64_raise_si(PowerPCCPU *cpu, MMUAccessType access_type, + vaddr eaddr, uint32_t cause) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; - if (rwx == 2) { /* Instruction Storage Interrupt */ + switch (access_type) { + case MMU_INST_FETCH: + /* Instruction Storage Interrupt */ cs->exception_index = POWERPC_EXCP_ISI; env->error_code = cause; - } else { /* Data Storage Interrupt */ + break; + case MMU_DATA_STORE: + cause |= DSISR_ISSTORE; + /* fall through */ + case MMU_DATA_LOAD: + /* Data Storage Interrupt */ cs->exception_index = POWERPC_EXCP_DSI; - if (rwx == 1) { /* Write -> Store */ - cause |= DSISR_ISSTORE; - } env->spr[SPR_DSISR] = cause; env->spr[SPR_DAR] = eaddr; env->error_code = 0; + break; + default: + g_assert_not_reached(); } } -static void ppc_radix64_raise_hsi(PowerPCCPU *cpu, int rwx, vaddr eaddr, - hwaddr g_raddr, uint32_t cause) +static void ppc_radix64_raise_hsi(PowerPCCPU *cpu, MMUAccessType access_type, + vaddr eaddr, hwaddr g_raddr, uint32_t cause) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; - if (rwx == 2) { /* H Instruction Storage Interrupt */ + switch (access_type) { + case MMU_INST_FETCH: + /* H Instruction Storage Interrupt */ cs->exception_index = POWERPC_EXCP_HISI; env->spr[SPR_ASDR] = g_raddr; env->error_code = cause; - } else { /* H Data Storage Interrupt */ + break; + case MMU_DATA_STORE: + cause |= DSISR_ISSTORE; + /* fall through */ + case MMU_DATA_LOAD: + /* H Data Storage Interrupt */ cs->exception_index = POWERPC_EXCP_HDSI; - if (rwx == 1) { /* Write -> Store */ - cause |= DSISR_ISSTORE; - } env->spr[SPR_HDSISR] = cause; env->spr[SPR_HDAR] = eaddr; env->spr[SPR_ASDR] = g_raddr; env->error_code = 0; + break; + default: + g_assert_not_reached(); } } -static bool ppc_radix64_check_prot(PowerPCCPU *cpu, int rwx, uint64_t pte, - int *fault_cause, int *prot, +static bool ppc_radix64_check_prot(PowerPCCPU *cpu, MMUAccessType access_type, + uint64_t pte, int *fault_cause, int *prot, bool partition_scoped) { CPUPPCState *env = &cpu->env; - const int need_prot[] = { PAGE_READ, PAGE_WRITE, PAGE_EXEC }; + int need_prot; /* Check Page Attributes (pte58:59) */ - if (((pte & R_PTE_ATT) == R_PTE_ATT_NI_IO) && (rwx == 2)) { + if ((pte & R_PTE_ATT) == R_PTE_ATT_NI_IO && access_type == MMU_INST_FETCH) { /* * Radix PTE entries with the non-idempotent I/O attribute are treated * as guarded storage @@ -158,7 +182,8 @@ static bool ppc_radix64_check_prot(PowerPCCPU *cpu, int rwx, uint64_t pte, } /* Check if requested access type is allowed */ - if (need_prot[rwx] & ~(*prot)) { /* Page Protected for that Access */ + need_prot = prot_for_access_type(access_type); + if (need_prot & ~*prot) { /* Page Protected for that Access */ *fault_cause |= DSISR_PROTFAULT; return true; } @@ -166,15 +191,15 @@ static bool ppc_radix64_check_prot(PowerPCCPU *cpu, int rwx, uint64_t pte, return false; } -static void ppc_radix64_set_rc(PowerPCCPU *cpu, int rwx, uint64_t pte, - hwaddr pte_addr, int *prot) +static void ppc_radix64_set_rc(PowerPCCPU *cpu, MMUAccessType access_type, + uint64_t pte, hwaddr pte_addr, int *prot) { CPUState *cs = CPU(cpu); uint64_t npte; npte = pte | R_PTE_R; /* Always set reference bit */ - if (rwx == 1) { /* Store/Write */ + if (access_type == MMU_DATA_STORE) { /* Store/Write */ npte |= R_PTE_C; /* Set change bit */ } else { /* @@ -269,7 +294,8 @@ static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate) return true; } -static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu, int rwx, +static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu, + MMUAccessType access_type, vaddr eaddr, hwaddr g_raddr, ppc_v3_pate_t pate, hwaddr *h_raddr, int *h_prot, @@ -285,24 +311,25 @@ static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu, int rwx, if (ppc_radix64_walk_tree(CPU(cpu)->as, g_raddr, pate.dw0 & PRTBE_R_RPDB, pate.dw0 & PRTBE_R_RPDS, h_raddr, h_page_size, &pte, &fault_cause, &pte_addr) || - ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, h_prot, true)) { + ppc_radix64_check_prot(cpu, access_type, pte, &fault_cause, h_prot, true)) { if (pde_addr) { /* address being translated was that of a guest pde */ fault_cause |= DSISR_PRTABLE_FAULT; } if (guest_visible) { - ppc_radix64_raise_hsi(cpu, rwx, eaddr, g_raddr, fault_cause); + ppc_radix64_raise_hsi(cpu, access_type, eaddr, g_raddr, fault_cause); } return 1; } if (guest_visible) { - ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, h_prot); + ppc_radix64_set_rc(cpu, access_type, pte, pte_addr, h_prot); } return 0; } -static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, int rwx, +static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, + MMUAccessType access_type, vaddr eaddr, uint64_t pid, ppc_v3_pate_t pate, hwaddr *g_raddr, int *g_prot, int *g_page_size, @@ -321,7 +348,7 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, int rwx, if (offset >= size) { /* offset exceeds size of the process table */ if (guest_visible) { - ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE); + ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_NOPTE); } return 1; } @@ -362,7 +389,7 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, int rwx, if (ret) { /* No valid PTE */ if (guest_visible) { - ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause); + ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause); } return ret; } @@ -391,7 +418,7 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, int rwx, if (ret) { /* No valid pte */ if (guest_visible) { - ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause); + ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause); } return ret; } @@ -405,16 +432,16 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, int rwx, *g_raddr = (rpn & ~mask) | (eaddr & mask); } - if (ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, g_prot, false)) { + if (ppc_radix64_check_prot(cpu, access_type, pte, &fault_cause, g_prot, false)) { /* Access denied due to protection */ if (guest_visible) { - ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause); + ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause); } return 1; } if (guest_visible) { - ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, g_prot); + ppc_radix64_set_rc(cpu, access_type, pte, pte_addr, g_prot); } return 0; @@ -437,7 +464,8 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, int rwx, * | = On | Process Scoped | Scoped | * +-------------+----------------+---------------+ */ -static int ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, int rwx, +static int ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, + MMUAccessType access_type, bool relocation, hwaddr *raddr, int *psizep, int *protp, bool guest_visible) @@ -451,7 +479,7 @@ static int ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, int rwx, /* Virtual Mode Access - get the fully qualified address */ if (!ppc_radix64_get_fully_qualified_addr(&cpu->env, eaddr, &lpid, &pid)) { if (guest_visible) { - ppc_radix64_raise_segi(cpu, rwx, eaddr); + ppc_radix64_raise_segi(cpu, access_type, eaddr); } return 1; } @@ -464,13 +492,13 @@ static int ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, int rwx, } else { if (!ppc64_v3_get_pate(cpu, lpid, &pate)) { if (guest_visible) { - ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE); + ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_NOPTE); } return 1; } if (!validate_pate(cpu, lpid, &pate)) { if (guest_visible) { - ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_R_BADCONFIG); + ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_R_BADCONFIG); } return 1; } @@ -488,7 +516,7 @@ static int ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, int rwx, * - Translates an effective address to a guest real address. */ if (relocation) { - int ret = ppc_radix64_process_scoped_xlate(cpu, rwx, eaddr, pid, + int ret = ppc_radix64_process_scoped_xlate(cpu, access_type, eaddr, pid, pate, &g_raddr, &prot, &psize, guest_visible); if (ret) { @@ -511,9 +539,10 @@ static int ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, int rwx, if (lpid || !msr_hv) { int ret; - ret = ppc_radix64_partition_scoped_xlate(cpu, rwx, eaddr, g_raddr, - pate, raddr, &prot, &psize, - false, guest_visible); + ret = ppc_radix64_partition_scoped_xlate(cpu, access_type, eaddr, + g_raddr, pate, raddr, + &prot, &psize, false, + guest_visible); if (ret) { return ret; } @@ -534,12 +563,14 @@ int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, CPUPPCState *env = &cpu->env; int page_size, prot; bool relocation; + MMUAccessType access_type; hwaddr raddr; assert(!(msr_hv && cpu->vhyp)); assert((rwx == 0) || (rwx == 1) || (rwx == 2)); + access_type = rwx; - relocation = ((rwx == 2) && (msr_ir == 1)) || ((rwx != 2) && (msr_dr == 1)); + relocation = (access_type == MMU_INST_FETCH ? msr_ir : msr_dr); /* HV or virtual hypervisor Real Mode Access */ if (!relocation && (msr_hv || cpu->vhyp)) { /* In real mode top 4 effective addr bits (mostly) ignored */ @@ -568,7 +599,7 @@ int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, } /* Translate eaddr to raddr (where raddr is addr qemu needs for access) */ - if (ppc_radix64_xlate(cpu, eaddr, rwx, relocation, &raddr, + if (ppc_radix64_xlate(cpu, eaddr, access_type, relocation, &raddr, &page_size, &prot, true)) { return 1; } diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c index ca88658cba..37986c59ba 100644 --- a/target/ppc/mmu_helper.c +++ b/target/ppc/mmu_helper.c @@ -32,6 +32,7 @@ #include "qemu/error-report.h" #include "qemu/main-loop.h" #include "qemu/qemu-print.h" +#include "internal.h" #include "mmu-book3s-v3.h" #include "mmu-radix64.h" @@ -126,36 +127,14 @@ static int pp_check(int key, int pp, int nx) return access; } -static int check_prot(int prot, int rw, int access_type) +static int check_prot(int prot, MMUAccessType access_type) { - int ret; - - if (access_type == ACCESS_CODE) { - if (prot & PAGE_EXEC) { - ret = 0; - } else { - ret = -2; - } - } else if (rw) { - if (prot & PAGE_WRITE) { - ret = 0; - } else { - ret = -2; - } - } else { - if (prot & PAGE_READ) { - ret = 0; - } else { - ret = -2; - } - } - - return ret; + return prot & prot_for_access_type(access_type) ? 0 : -2; } -static inline int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0, - target_ulong pte1, int h, - int rw, int type) +static int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0, + target_ulong pte1, int h, + MMUAccessType access_type) { target_ulong ptem, mmask; int access, ret, pteh, ptev, pp; @@ -182,7 +161,7 @@ static inline int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0, /* Keep the matching PTE information */ ctx->raddr = pte1; ctx->prot = access; - ret = check_prot(ctx->prot, rw, type); + ret = check_prot(ctx->prot, access_type); if (ret == 0) { /* Access granted */ qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); @@ -197,7 +176,7 @@ static inline int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0, } static int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p, - int ret, int rw) + int ret, MMUAccessType access_type) { int store = 0; @@ -208,7 +187,7 @@ static int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p, store = 1; } if (!(*pte1p & 0x00000080)) { - if (rw == 1 && ret == 0) { + if (access_type == MMU_DATA_STORE && ret == 0) { /* Update changed flag */ *pte1p |= 0x00000080; store = 1; @@ -308,8 +287,8 @@ static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way, env->last_way = way; } -static inline int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong eaddr, int rw, int access_type) +static int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong eaddr, MMUAccessType access_type) { ppc6xx_tlb_t *tlb; int nr, best, way; @@ -318,8 +297,7 @@ static inline int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx, best = -1; ret = -1; /* No TLB found */ for (way = 0; way < env->nb_ways; way++) { - nr = ppc6xx_tlb_getnum(env, eaddr, way, - access_type == ACCESS_CODE ? 1 : 0); + nr = ppc6xx_tlb_getnum(env, eaddr, way, access_type == MMU_INST_FETCH); tlb = &env->tlb.tlb6[nr]; /* This test "emulates" the PTE index match for hardware TLBs */ if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) { @@ -333,9 +311,10 @@ static inline int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx, TARGET_FMT_lx " %c %c\n", nr, env->nb_tlb, pte_is_valid(tlb->pte0) ? "valid" : "inval", tlb->EPN, eaddr, tlb->pte1, - rw ? 'S' : 'L', access_type == ACCESS_CODE ? 'I' : 'D'); + access_type == MMU_DATA_STORE ? 'S' : 'L', + access_type == MMU_INST_FETCH ? 'I' : 'D'); switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1, - 0, rw, access_type)) { + 0, access_type)) { case -3: /* TLB inconsistency */ return -1; @@ -366,7 +345,7 @@ static inline int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx, LOG_SWTLB("found TLB at addr " TARGET_FMT_plx " prot=%01x ret=%d\n", ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret); /* Update page flags */ - pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, rw); + pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, access_type); } return ret; @@ -400,24 +379,22 @@ static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp, } static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong virtual, int rw, int type) + target_ulong virtual, MMUAccessType access_type) { target_ulong *BATlt, *BATut, *BATu, *BATl; target_ulong BEPIl, BEPIu, bl; int i, valid, prot; int ret = -1; + bool ifetch = access_type == MMU_INST_FETCH; LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__, - type == ACCESS_CODE ? 'I' : 'D', virtual); - switch (type) { - case ACCESS_CODE: + ifetch ? 'I' : 'D', virtual); + if (ifetch) { BATlt = env->IBAT[1]; BATut = env->IBAT[0]; - break; - default: + } else { BATlt = env->DBAT[1]; BATut = env->DBAT[0]; - break; } for (i = 0; i < env->nb_BATs; i++) { BATu = &BATut[i]; @@ -427,7 +404,7 @@ static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, bat_size_prot(env, &bl, &valid, &prot, BATu, BATl); LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx " BATl " TARGET_FMT_lx "\n", __func__, - type == ACCESS_CODE ? 'I' : 'D', i, virtual, *BATu, *BATl); + ifetch ? 'I' : 'D', i, virtual, *BATu, *BATl); if ((virtual & 0xF0000000) == BEPIu && ((virtual & 0x0FFE0000) & ~bl) == BEPIl) { /* BAT matches */ @@ -438,7 +415,7 @@ static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, (virtual & 0x0001F000); /* Compute access rights */ ctx->prot = prot; - ret = check_prot(ctx->prot, rw, type); + ret = check_prot(ctx->prot, access_type); if (ret == 0) { LOG_BATS("BAT %d match: r " TARGET_FMT_plx " prot=%c%c\n", i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-', @@ -461,7 +438,7 @@ static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " TARGET_FMT_lx " " TARGET_FMT_lx "\n", - __func__, type == ACCESS_CODE ? 'I' : 'D', i, virtual, + __func__, ifetch ? 'I' : 'D', i, virtual, *BATu, *BATl, BEPIu, BEPIl, bl); } } @@ -472,8 +449,9 @@ static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, } /* Perform segment based translation */ -static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong eaddr, int rw, int type) +static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong eaddr, MMUAccessType access_type, + int type) { PowerPCCPU *cpu = env_archcpu(env); hwaddr hash; @@ -497,7 +475,7 @@ static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx " ir=%d dr=%d pr=%d %d t=%d\n", eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir, - (int)msr_dr, pr != 0 ? 1 : 0, rw, type); + (int)msr_dr, pr != 0 ? 1 : 0, access_type == MMU_DATA_STORE, type); pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits; hash = vsid ^ pgidx; ctx->ptem = (vsid << 7) | (pgidx >> 10); @@ -520,7 +498,7 @@ static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, /* Initialize real address with an invalid value */ ctx->raddr = (hwaddr)-1ULL; /* Software TLB search */ - ret = ppc6xx_tlb_check(env, ctx, eaddr, rw, type); + ret = ppc6xx_tlb_check(env, ctx, eaddr, access_type); #if defined(DUMP_PAGE_TABLES) if (qemu_loglevel_mask(CPU_LOG_MMU)) { CPUState *cs = env_cpu(env); @@ -603,7 +581,8 @@ static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, "address translation\n"); return -4; } - if ((rw == 1 || ctx->key != 1) && (rw == 0 || ctx->key != 0)) { + if ((access_type == MMU_DATA_STORE || ctx->key != 1) && + (access_type == MMU_DATA_LOAD || ctx->key != 0)) { ctx->raddr = eaddr; ret = 2; } else { @@ -682,8 +661,8 @@ static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env) } static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong address, int rw, - int access_type) + target_ulong address, + MMUAccessType access_type) { ppcemb_tlb_t *tlb; hwaddr raddr; @@ -700,8 +679,8 @@ static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, } zsel = (tlb->attr >> 4) & 0xF; zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3; - LOG_SWTLB("%s: TLB %d zsel %d zpr %d rw %d attr %08x\n", - __func__, i, zsel, zpr, rw, tlb->attr); + LOG_SWTLB("%s: TLB %d zsel %d zpr %d ty %d attr %08x\n", + __func__, i, zsel, zpr, access_type, tlb->attr); /* Check execute enable bit */ switch (zpr) { case 0x2: @@ -727,7 +706,7 @@ static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, check_perms: /* Check from TLB entry */ ctx->prot = tlb->prot; - ret = check_prot(ctx->prot, rw, access_type); + ret = check_prot(ctx->prot, access_type); if (ret == -2) { env->spr[SPR_40x_ESR] = 0; } @@ -757,12 +736,11 @@ void store_40x_sler(CPUPPCState *env, uint32_t val) env->spr[SPR_405_SLER] = val; } -static inline int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb, - hwaddr *raddr, int *prot, - target_ulong address, int rw, - int access_type, int i) +static int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb, + hwaddr *raddr, int *prot, target_ulong address, + MMUAccessType access_type, int i) { - int ret, prot2; + int prot2; if (ppcemb_tlb_check(env, tlb, raddr, address, env->spr[SPR_BOOKE_PID], @@ -794,42 +772,24 @@ found_tlb: } /* Check the address space */ - if (access_type == ACCESS_CODE) { - if (msr_ir != (tlb->attr & 1)) { - LOG_SWTLB("%s: AS doesn't match\n", __func__); - return -1; - } - - *prot = prot2; - if (prot2 & PAGE_EXEC) { - LOG_SWTLB("%s: good TLB!\n", __func__); - return 0; - } - - LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__, prot2); - ret = -3; - } else { - if (msr_dr != (tlb->attr & 1)) { - LOG_SWTLB("%s: AS doesn't match\n", __func__); - return -1; - } - - *prot = prot2; - if ((!rw && prot2 & PAGE_READ) || (rw && (prot2 & PAGE_WRITE))) { - LOG_SWTLB("%s: found TLB!\n", __func__); - return 0; - } + if ((access_type == MMU_INST_FETCH ? msr_ir : msr_dr) != (tlb->attr & 1)) { + LOG_SWTLB("%s: AS doesn't match\n", __func__); + return -1; + } - LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__, prot2); - ret = -2; + *prot = prot2; + if (prot2 & prot_for_access_type(access_type)) { + LOG_SWTLB("%s: good TLB!\n", __func__); + return 0; } - return ret; + LOG_SWTLB("%s: no prot match: %x\n", __func__, prot2); + return access_type == MMU_INST_FETCH ? -3 : -2; } static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong address, int rw, - int access_type) + target_ulong address, + MMUAccessType access_type) { ppcemb_tlb_t *tlb; hwaddr raddr; @@ -839,7 +799,7 @@ static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, raddr = (hwaddr)-1ULL; for (i = 0; i < env->nb_tlb; i++) { tlb = &env->tlb.tlbe[i]; - ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, rw, + ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, access_type, i); if (ret != -1) { break; @@ -938,10 +898,10 @@ static bool is_epid_mmu(int mmu_idx) return mmu_idx == PPC_TLB_EPID_STORE || mmu_idx == PPC_TLB_EPID_LOAD; } -static uint32_t mmubooke206_esr(int mmu_idx, bool rw) +static uint32_t mmubooke206_esr(int mmu_idx, MMUAccessType access_type) { uint32_t esr = 0; - if (rw) { + if (access_type == MMU_DATA_STORE) { esr |= ESR_ST; } if (is_epid_mmu(mmu_idx)) { @@ -983,10 +943,9 @@ static bool mmubooke206_get_as(CPUPPCState *env, /* Check if the tlb found by hashing really matches */ static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb, hwaddr *raddr, int *prot, - target_ulong address, int rw, - int access_type, int mmu_idx) + target_ulong address, + MMUAccessType access_type, int mmu_idx) { - int ret; int prot2 = 0; uint32_t epid; bool as, pr; @@ -1043,44 +1002,31 @@ found_tlb: } /* Check the address space and permissions */ - if (access_type == ACCESS_CODE) { + if (access_type == MMU_INST_FETCH) { /* There is no way to fetch code using epid load */ assert(!use_epid); - if (msr_ir != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { - LOG_SWTLB("%s: AS doesn't match\n", __func__); - return -1; - } - - *prot = prot2; - if (prot2 & PAGE_EXEC) { - LOG_SWTLB("%s: good TLB!\n", __func__); - return 0; - } - - LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__, prot2); - ret = -3; - } else { - if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { - LOG_SWTLB("%s: AS doesn't match\n", __func__); - return -1; - } + as = msr_ir; + } - *prot = prot2; - if ((!rw && prot2 & PAGE_READ) || (rw && (prot2 & PAGE_WRITE))) { - LOG_SWTLB("%s: found TLB!\n", __func__); - return 0; - } + if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { + LOG_SWTLB("%s: AS doesn't match\n", __func__); + return -1; + } - LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__, prot2); - ret = -2; + *prot = prot2; + if (prot2 & prot_for_access_type(access_type)) { + LOG_SWTLB("%s: good TLB!\n", __func__); + return 0; } - return ret; + LOG_SWTLB("%s: no prot match: %x\n", __func__, prot2); + return access_type == MMU_INST_FETCH ? -3 : -2; } static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong address, int rw, - int access_type, int mmu_idx) + target_ulong address, + MMUAccessType access_type, + int mmu_idx) { ppcmas_tlb_t *tlb; hwaddr raddr; @@ -1098,7 +1044,7 @@ static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, continue; } ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address, - rw, access_type, mmu_idx); + access_type, mmu_idx); if (ret != -1) { goto found_tlb; } @@ -1361,8 +1307,8 @@ void dump_mmu(CPUPPCState *env) } } -static inline int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong eaddr, int rw) +static int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr, + MMUAccessType access_type) { int in_plb, ret; @@ -1393,7 +1339,7 @@ static inline int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, eaddr >= env->pb[2] && eaddr < env->pb[3]) ? 1 : 0; if (in_plb ^ msr_px) { /* Access in protected area */ - if (rw == 1) { + if (access_type == MMU_DATA_STORE) { /* Access is not allowed */ ret = -2; } @@ -1413,28 +1359,28 @@ static inline int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, return ret; } -static int get_physical_address_wtlb( - CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong eaddr, int rw, int access_type, - int mmu_idx) +static int get_physical_address_wtlb(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong eaddr, + MMUAccessType access_type, int type, + int mmu_idx) { int ret = -1; - bool real_mode = (access_type == ACCESS_CODE && msr_ir == 0) - || (access_type != ACCESS_CODE && msr_dr == 0); + bool real_mode = (type == ACCESS_CODE && msr_ir == 0) + || (type != ACCESS_CODE && msr_dr == 0); switch (env->mmu_model) { case POWERPC_MMU_SOFT_6xx: case POWERPC_MMU_SOFT_74xx: if (real_mode) { - ret = check_physical(env, ctx, eaddr, rw); + ret = check_physical(env, ctx, eaddr, access_type); } else { /* Try to find a BAT */ if (env->nb_BATs != 0) { - ret = get_bat_6xx_tlb(env, ctx, eaddr, rw, access_type); + ret = get_bat_6xx_tlb(env, ctx, eaddr, access_type); } if (ret < 0) { /* We didn't match any BAT entry or don't have BATs */ - ret = get_segment_6xx_tlb(env, ctx, eaddr, rw, access_type); + ret = get_segment_6xx_tlb(env, ctx, eaddr, access_type, type); } } break; @@ -1442,19 +1388,17 @@ static int get_physical_address_wtlb( case POWERPC_MMU_SOFT_4xx: case POWERPC_MMU_SOFT_4xx_Z: if (real_mode) { - ret = check_physical(env, ctx, eaddr, rw); + ret = check_physical(env, ctx, eaddr, access_type); } else { - ret = mmu40x_get_physical_address(env, ctx, eaddr, - rw, access_type); + ret = mmu40x_get_physical_address(env, ctx, eaddr, access_type); } break; case POWERPC_MMU_BOOKE: - ret = mmubooke_get_physical_address(env, ctx, eaddr, - rw, access_type); + ret = mmubooke_get_physical_address(env, ctx, eaddr, access_type); break; case POWERPC_MMU_BOOKE206: - ret = mmubooke206_get_physical_address(env, ctx, eaddr, rw, - access_type, mmu_idx); + ret = mmubooke206_get_physical_address(env, ctx, eaddr, access_type, + mmu_idx); break; case POWERPC_MMU_MPC8xx: /* XXX: TODO */ @@ -1462,7 +1406,7 @@ static int get_physical_address_wtlb( break; case POWERPC_MMU_REAL: if (real_mode) { - ret = check_physical(env, ctx, eaddr, rw); + ret = check_physical(env, ctx, eaddr, access_type); } else { cpu_abort(env_cpu(env), "PowerPC in real mode do not do any translation\n"); @@ -1476,11 +1420,11 @@ static int get_physical_address_wtlb( return ret; } -static int get_physical_address( - CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong eaddr, int rw, int access_type) +static int get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong eaddr, MMUAccessType access_type, + int type) { - return get_physical_address_wtlb(env, ctx, eaddr, rw, access_type, 0); + return get_physical_address_wtlb(env, ctx, eaddr, access_type, type, 0); } hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) @@ -1508,14 +1452,15 @@ hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) ; } - if (unlikely(get_physical_address(env, &ctx, addr, 0, ACCESS_INT) != 0)) { + if (unlikely(get_physical_address(env, &ctx, addr, MMU_DATA_LOAD, + ACCESS_INT) != 0)) { /* * Some MMUs have separate TLBs for code and data. If we only * try an ACCESS_INT, we may not be able to read instructions * mapped by code TLBs, so we also try a ACCESS_CODE. */ - if (unlikely(get_physical_address(env, &ctx, addr, 0, + if (unlikely(get_physical_address(env, &ctx, addr, MMU_INST_FETCH, ACCESS_CODE) != 0)) { return -1; } @@ -1525,13 +1470,14 @@ hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) } static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address, - int rw, int mmu_idx) + MMUAccessType access_type, int mmu_idx) { uint32_t epid; bool as, pr; uint32_t missed_tid = 0; bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); - if (rw == 2) { + + if (access_type == MMU_INST_FETCH) { as = msr_ir; } env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; @@ -1579,24 +1525,23 @@ static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address, /* Perform address translation */ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address, - int rw, int mmu_idx) + MMUAccessType access_type, int mmu_idx) { CPUState *cs = env_cpu(env); PowerPCCPU *cpu = POWERPC_CPU(cs); mmu_ctx_t ctx; - int access_type; + int type; int ret = 0; - if (rw == 2) { + if (access_type == MMU_INST_FETCH) { /* code access */ - rw = 0; - access_type = ACCESS_CODE; + type = ACCESS_CODE; } else { /* data access */ - access_type = env->access_type; + type = env->access_type; } - ret = get_physical_address_wtlb(env, &ctx, address, rw, - access_type, mmu_idx); + ret = get_physical_address_wtlb(env, &ctx, address, access_type, + type, mmu_idx); if (ret == 0) { tlb_set_page(cs, address & TARGET_PAGE_MASK, ctx.raddr & TARGET_PAGE_MASK, ctx.prot, @@ -1604,7 +1549,7 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address, ret = 0; } else if (ret < 0) { LOG_MMU_STATE(cs); - if (access_type == ACCESS_CODE) { + if (type == ACCESS_CODE) { switch (ret) { case -1: /* No matches in page tables or TLB */ @@ -1632,7 +1577,7 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address, cs->exception_index = POWERPC_EXCP_ITLB; env->error_code = 0; env->spr[SPR_BOOKE_DEAR] = address; - env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, 0); + env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, MMU_DATA_LOAD); return -1; case POWERPC_MMU_MPC8xx: /* XXX: TODO */ @@ -1674,7 +1619,7 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address, /* No matches in page tables or TLB */ switch (env->mmu_model) { case POWERPC_MMU_SOFT_6xx: - if (rw == 1) { + if (access_type == MMU_DATA_STORE) { cs->exception_index = POWERPC_EXCP_DSTLB; env->error_code = 1 << 16; } else { @@ -1691,7 +1636,7 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address, get_pteg_offset32(cpu, ctx.hash[1]); break; case POWERPC_MMU_SOFT_74xx: - if (rw == 1) { + if (access_type == MMU_DATA_STORE) { cs->exception_index = POWERPC_EXCP_DSTLB; } else { cs->exception_index = POWERPC_EXCP_DLTLB; @@ -1708,7 +1653,7 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address, cs->exception_index = POWERPC_EXCP_DTLB; env->error_code = 0; env->spr[SPR_40x_DEAR] = address; - if (rw) { + if (access_type == MMU_DATA_STORE) { env->spr[SPR_40x_ESR] = 0x00800000; } else { env->spr[SPR_40x_ESR] = 0x00000000; @@ -1719,13 +1664,13 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address, cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); break; case POWERPC_MMU_BOOKE206: - booke206_update_mas_tlb_miss(env, address, rw, mmu_idx); + booke206_update_mas_tlb_miss(env, address, access_type, mmu_idx); /* fall through */ case POWERPC_MMU_BOOKE: cs->exception_index = POWERPC_EXCP_DTLB; env->error_code = 0; env->spr[SPR_BOOKE_DEAR] = address; - env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, rw); + env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); return -1; case POWERPC_MMU_REAL: cpu_abort(cs, "PowerPC in real mode should never raise " @@ -1743,16 +1688,16 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address, if (env->mmu_model == POWERPC_MMU_SOFT_4xx || env->mmu_model == POWERPC_MMU_SOFT_4xx_Z) { env->spr[SPR_40x_DEAR] = address; - if (rw) { + if (access_type == MMU_DATA_STORE) { env->spr[SPR_40x_ESR] |= 0x00800000; } } else if ((env->mmu_model == POWERPC_MMU_BOOKE) || (env->mmu_model == POWERPC_MMU_BOOKE206)) { env->spr[SPR_BOOKE_DEAR] = address; - env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, rw); + env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); } else { env->spr[SPR_DAR] = address; - if (rw == 1) { + if (access_type == MMU_DATA_STORE) { env->spr[SPR_DSISR] = 0x0A000000; } else { env->spr[SPR_DSISR] = 0x08000000; @@ -1761,7 +1706,7 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address, break; case -4: /* Direct store exception */ - switch (access_type) { + switch (type) { case ACCESS_FLOAT: /* Floating point load/store */ cs->exception_index = POWERPC_EXCP_ALIGN; @@ -1773,7 +1718,7 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address, cs->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; env->spr[SPR_DAR] = address; - if (rw == 1) { + if (access_type == MMU_DATA_STORE) { env->spr[SPR_DSISR] = 0x06000000; } else { env->spr[SPR_DSISR] = 0x04000000; @@ -1784,7 +1729,7 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address, cs->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; env->spr[SPR_DAR] = address; - if (rw == 1) { + if (access_type == MMU_DATA_STORE) { env->spr[SPR_DSISR] = 0x06100000; } else { env->spr[SPR_DSISR] = 0x04100000; @@ -2085,32 +2030,6 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr) /*****************************************************************************/ /* Special registers manipulation */ -void ppc_store_sdr1(CPUPPCState *env, target_ulong value) -{ - PowerPCCPU *cpu = env_archcpu(env); - qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value); - assert(!cpu->vhyp); -#if defined(TARGET_PPC64) - if (mmu_is_64bit(env->mmu_model)) { - target_ulong sdr_mask = SDR_64_HTABORG | SDR_64_HTABSIZE; - target_ulong htabsize = value & SDR_64_HTABSIZE; - - if (value & ~sdr_mask) { - error_report("Invalid bits 0x"TARGET_FMT_lx" set in SDR1", - value & ~sdr_mask); - value &= sdr_mask; - } - if (htabsize > 28) { - error_report("Invalid HTABSIZE 0x" TARGET_FMT_lx" stored in SDR1", - htabsize); - return; - } - } -#endif /* defined(TARGET_PPC64) */ - /* FIXME: Should check for valid HTABMASK values in 32-bit case */ - env->spr[SPR_SDR1] = value; -} - #if defined(TARGET_PPC64) void ppc_store_ptcr(CPUPPCState *env, target_ulong value) { diff --git a/target/ppc/spr_tcg.h b/target/ppc/spr_tcg.h new file mode 100644 index 0000000000..0be5f347d5 --- /dev/null +++ b/target/ppc/spr_tcg.h @@ -0,0 +1,136 @@ +/* + * PowerPC emulation for qemu: read/write callbacks for SPRs + * + * Copyright (C) 2021 Instituto de Pesquisas Eldorado + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#ifndef SPR_TCG_H +#define SPR_TCG_H + +#define SPR_NOACCESS (&spr_noaccess) + +/* prototypes for readers and writers for SPRs */ +void spr_noaccess(DisasContext *ctx, int gprn, int sprn); +void spr_read_generic(DisasContext *ctx, int gprn, int sprn); +void spr_write_generic(DisasContext *ctx, int sprn, int gprn); +void spr_read_xer(DisasContext *ctx, int gprn, int sprn); +void spr_write_xer(DisasContext *ctx, int sprn, int gprn); +void spr_read_lr(DisasContext *ctx, int gprn, int sprn); +void spr_write_lr(DisasContext *ctx, int sprn, int gprn); +void spr_read_ctr(DisasContext *ctx, int gprn, int sprn); +void spr_write_ctr(DisasContext *ctx, int sprn, int gprn); +void spr_read_ureg(DisasContext *ctx, int gprn, int sprn); +void spr_read_tbl(DisasContext *ctx, int gprn, int sprn); +void spr_read_tbu(DisasContext *ctx, int gprn, int sprn); +void spr_read_atbl(DisasContext *ctx, int gprn, int sprn); +void spr_read_atbu(DisasContext *ctx, int gprn, int sprn); +void spr_read_601_rtcl(DisasContext *ctx, int gprn, int sprn); +void spr_read_601_rtcu(DisasContext *ctx, int gprn, int sprn); +void spr_read_spefscr(DisasContext *ctx, int gprn, int sprn); +void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn); + +#ifndef CONFIG_USER_ONLY +void spr_write_generic32(DisasContext *ctx, int sprn, int gprn); +void spr_write_clear(DisasContext *ctx, int sprn, int gprn); +void spr_access_nop(DisasContext *ctx, int sprn, int gprn); +void spr_read_decr(DisasContext *ctx, int gprn, int sprn); +void spr_write_decr(DisasContext *ctx, int sprn, int gprn); +void spr_write_tbl(DisasContext *ctx, int sprn, int gprn); +void spr_write_tbu(DisasContext *ctx, int sprn, int gprn); +void spr_write_atbl(DisasContext *ctx, int sprn, int gprn); +void spr_write_atbu(DisasContext *ctx, int sprn, int gprn); +void spr_read_ibat(DisasContext *ctx, int gprn, int sprn); +void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn); +void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn); +void spr_write_ibatu_h(DisasContext *ctx, int sprn, int gprn); +void spr_write_ibatl(DisasContext *ctx, int sprn, int gprn); +void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn); +void spr_read_dbat(DisasContext *ctx, int gprn, int sprn); +void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn); +void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn); +void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn); +void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn); +void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn); +void spr_write_sdr1(DisasContext *ctx, int sprn, int gprn); +void spr_write_601_rtcu(DisasContext *ctx, int sprn, int gprn); +void spr_write_601_rtcl(DisasContext *ctx, int sprn, int gprn); +void spr_write_hid0_601(DisasContext *ctx, int sprn, int gprn); +void spr_read_601_ubat(DisasContext *ctx, int gprn, int sprn); +void spr_write_601_ubatu(DisasContext *ctx, int sprn, int gprn); +void spr_write_601_ubatl(DisasContext *ctx, int sprn, int gprn); +void spr_read_40x_pit(DisasContext *ctx, int gprn, int sprn); +void spr_write_40x_pit(DisasContext *ctx, int sprn, int gprn); +void spr_write_40x_dbcr0(DisasContext *ctx, int sprn, int gprn); +void spr_write_40x_sler(DisasContext *ctx, int sprn, int gprn); +void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn); +void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn); +void spr_read_403_pbr(DisasContext *ctx, int gprn, int sprn); +void spr_write_403_pbr(DisasContext *ctx, int sprn, int gprn); +void spr_write_pir(DisasContext *ctx, int sprn, int gprn); +void spr_write_excp_prefix(DisasContext *ctx, int sprn, int gprn); +void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn); +void spr_read_thrm(DisasContext *ctx, int gprn, int sprn); +void spr_write_e500_l1csr0(DisasContext *ctx, int sprn, int gprn); +void spr_write_e500_l1csr1(DisasContext *ctx, int sprn, int gprn); +void spr_write_e500_l2csr0(DisasContext *ctx, int sprn, int gprn); +void spr_write_booke206_mmucsr0(DisasContext *ctx, int sprn, int gprn); +void spr_write_booke_pid(DisasContext *ctx, int sprn, int gprn); +void spr_write_eplc(DisasContext *ctx, int sprn, int gprn); +void spr_write_epsc(DisasContext *ctx, int sprn, int gprn); +void spr_write_mas73(DisasContext *ctx, int sprn, int gprn); +void spr_read_mas73(DisasContext *ctx, int gprn, int sprn); +#ifdef TARGET_PPC64 +void spr_read_cfar(DisasContext *ctx, int gprn, int sprn); +void spr_write_cfar(DisasContext *ctx, int sprn, int gprn); +void spr_write_ureg(DisasContext *ctx, int sprn, int gprn); +void spr_read_purr(DisasContext *ctx, int gprn, int sprn); +void spr_write_purr(DisasContext *ctx, int sprn, int gprn); +void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn); +void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn); +void spr_read_vtb(DisasContext *ctx, int gprn, int sprn); +void spr_write_vtb(DisasContext *ctx, int sprn, int gprn); +void spr_write_tbu40(DisasContext *ctx, int sprn, int gprn); +void spr_write_pidr(DisasContext *ctx, int sprn, int gprn); +void spr_write_lpidr(DisasContext *ctx, int sprn, int gprn); +void spr_read_hior(DisasContext *ctx, int gprn, int sprn); +void spr_write_hior(DisasContext *ctx, int sprn, int gprn); +void spr_write_ptcr(DisasContext *ctx, int sprn, int gprn); +void spr_write_pcr(DisasContext *ctx, int sprn, int gprn); +void spr_read_dpdes(DisasContext *ctx, int gprn, int sprn); +void spr_write_dpdes(DisasContext *ctx, int sprn, int gprn); +void spr_write_amr(DisasContext *ctx, int sprn, int gprn); +void spr_write_uamor(DisasContext *ctx, int sprn, int gprn); +void spr_write_iamr(DisasContext *ctx, int sprn, int gprn); +#endif +#endif + +#ifdef TARGET_PPC64 +void spr_read_prev_upper32(DisasContext *ctx, int gprn, int sprn); +void spr_write_prev_upper32(DisasContext *ctx, int sprn, int gprn); +void spr_read_tar(DisasContext *ctx, int gprn, int sprn); +void spr_write_tar(DisasContext *ctx, int sprn, int gprn); +void spr_read_tm(DisasContext *ctx, int gprn, int sprn); +void spr_write_tm(DisasContext *ctx, int sprn, int gprn); +void spr_read_tm_upper32(DisasContext *ctx, int gprn, int sprn); +void spr_write_tm_upper32(DisasContext *ctx, int sprn, int gprn); +void spr_read_ebb(DisasContext *ctx, int gprn, int sprn); +void spr_write_ebb(DisasContext *ctx, int sprn, int gprn); +void spr_read_ebb_upper32(DisasContext *ctx, int gprn, int sprn); +void spr_write_ebb_upper32(DisasContext *ctx, int sprn, int gprn); +void spr_write_hmer(DisasContext *ctx, int sprn, int gprn); +void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn); +#endif + +#endif diff --git a/target/ppc/translate.c b/target/ppc/translate.c index 0984ce637b..ea200f9637 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -36,7 +36,10 @@ #include "exec/translator.h" #include "exec/log.h" #include "qemu/atomic128.h" +#include "spr_tcg.h" +#include "qemu/qemu-print.h" +#include "qapi/error.h" #define CPU_SINGLE_STEP 0x1 #define CPU_BRANCH_STEP 0x2 @@ -154,8 +157,8 @@ void ppc_translate_init(void) /* internal defines */ struct DisasContext { DisasContextBase base; + target_ulong cia; /* current instruction address */ uint32_t opcode; - uint32_t exception; /* Routine used to access memory */ bool pr, hv, dr, le_mode; bool lazy_tlb_flush; @@ -173,7 +176,6 @@ struct DisasContext { bool vsx_enabled; bool spe_enabled; bool tm_enabled; - bool scv_enabled; bool gtse; ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */ int singlestep_enabled; @@ -182,6 +184,11 @@ struct DisasContext { uint64_t insns_flags2; }; +#define DISAS_EXIT DISAS_TARGET_0 /* exit to main loop, pc updated */ +#define DISAS_EXIT_UPDATE DISAS_TARGET_1 /* exit to main loop, pc stale */ +#define DISAS_CHAIN DISAS_TARGET_2 /* lookup next tb, pc updated */ +#define DISAS_CHAIN_UPDATE DISAS_TARGET_3 /* lookup next tb, pc stale */ + /* Return true iff byteswap is needed in a scalar memop */ static inline bool need_byteswap(const DisasContext *ctx) { @@ -253,15 +260,13 @@ static void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error) * These are all synchronous exceptions, we set the PC back to the * faulting instruction */ - if (ctx->exception == POWERPC_EXCP_NONE) { - gen_update_nip(ctx, ctx->base.pc_next - 4); - } + gen_update_nip(ctx, ctx->cia); t0 = tcg_const_i32(excp); t1 = tcg_const_i32(error); gen_helper_raise_exception_err(cpu_env, t0, t1); tcg_temp_free_i32(t0); tcg_temp_free_i32(t1); - ctx->exception = (excp); + ctx->base.is_jmp = DISAS_NORETURN; } static void gen_exception(DisasContext *ctx, uint32_t excp) @@ -272,13 +277,11 @@ static void gen_exception(DisasContext *ctx, uint32_t excp) * These are all synchronous exceptions, we set the PC back to the * faulting instruction */ - if (ctx->exception == POWERPC_EXCP_NONE) { - gen_update_nip(ctx, ctx->base.pc_next - 4); - } + gen_update_nip(ctx, ctx->cia); t0 = tcg_const_i32(excp); gen_helper_raise_exception(cpu_env, t0); tcg_temp_free_i32(t0); - ctx->exception = (excp); + ctx->base.is_jmp = DISAS_NORETURN; } static void gen_exception_nip(DisasContext *ctx, uint32_t excp, @@ -290,7 +293,21 @@ static void gen_exception_nip(DisasContext *ctx, uint32_t excp, t0 = tcg_const_i32(excp); gen_helper_raise_exception(cpu_env, t0); tcg_temp_free_i32(t0); - ctx->exception = (excp); + ctx->base.is_jmp = DISAS_NORETURN; +} + +static void gen_icount_io_start(DisasContext *ctx) +{ + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(); + /* + * An I/O instruction must be last in the TB. + * Chain to the next TB, and let the code from gen_tb_start + * decide if we need to return to the main loop. + * Doing this first also allows this value to be overridden. + */ + ctx->base.is_jmp = DISAS_TOO_MANY; + } } /* @@ -323,19 +340,8 @@ static uint32_t gen_prep_dbgex(DisasContext *ctx) static void gen_debug_exception(DisasContext *ctx) { - TCGv_i32 t0; - - /* - * These are all synchronous exceptions, we set the PC back to the - * faulting instruction - */ - if ((ctx->exception != POWERPC_EXCP_BRANCH) && - (ctx->exception != POWERPC_EXCP_SYNC)) { - gen_update_nip(ctx, ctx->base.pc_next); - } - t0 = tcg_const_i32(EXCP_DEBUG); - gen_helper_raise_exception(cpu_env, t0); - tcg_temp_free_i32(t0); + gen_helper_raise_exception(cpu_env, tcg_constant_i32(EXCP_DEBUG)); + ctx->base.is_jmp = DISAS_NORETURN; } static inline void gen_inval_exception(DisasContext *ctx, uint32_t error) @@ -355,18 +361,924 @@ static inline void gen_hvpriv_exception(DisasContext *ctx, uint32_t error) gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_PRIV | error); } -/* Stop translation */ -static inline void gen_stop_exception(DisasContext *ctx) +/*****************************************************************************/ +/* SPR READ/WRITE CALLBACKS */ + +void spr_noaccess(DisasContext *ctx, int gprn, int sprn) { - gen_update_nip(ctx, ctx->base.pc_next); - ctx->exception = POWERPC_EXCP_STOP; +#if 0 + sprn = ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5); + printf("ERROR: try to access SPR %d !\n", sprn); +#endif +} + +/* #define PPC_DUMP_SPR_ACCESSES */ + +/* + * Generic callbacks: + * do nothing but store/retrieve spr value + */ +static void spr_load_dump_spr(int sprn) +{ +#ifdef PPC_DUMP_SPR_ACCESSES + TCGv_i32 t0 = tcg_const_i32(sprn); + gen_helper_load_dump_spr(cpu_env, t0); + tcg_temp_free_i32(t0); +#endif +} + +void spr_read_generic(DisasContext *ctx, int gprn, int sprn) +{ + gen_load_spr(cpu_gpr[gprn], sprn); + spr_load_dump_spr(sprn); +} + +static void spr_store_dump_spr(int sprn) +{ +#ifdef PPC_DUMP_SPR_ACCESSES + TCGv_i32 t0 = tcg_const_i32(sprn); + gen_helper_store_dump_spr(cpu_env, t0); + tcg_temp_free_i32(t0); +#endif +} + +void spr_write_generic(DisasContext *ctx, int sprn, int gprn) +{ + gen_store_spr(sprn, cpu_gpr[gprn]); + spr_store_dump_spr(sprn); } +#if !defined(CONFIG_USER_ONLY) +void spr_write_generic32(DisasContext *ctx, int sprn, int gprn) +{ +#ifdef TARGET_PPC64 + TCGv t0 = tcg_temp_new(); + tcg_gen_ext32u_tl(t0, cpu_gpr[gprn]); + gen_store_spr(sprn, t0); + tcg_temp_free(t0); + spr_store_dump_spr(sprn); +#else + spr_write_generic(ctx, sprn, gprn); +#endif +} + +void spr_write_clear(DisasContext *ctx, int sprn, int gprn) +{ + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + gen_load_spr(t0, sprn); + tcg_gen_neg_tl(t1, cpu_gpr[gprn]); + tcg_gen_and_tl(t0, t0, t1); + gen_store_spr(sprn, t0); + tcg_temp_free(t0); + tcg_temp_free(t1); +} + +void spr_access_nop(DisasContext *ctx, int sprn, int gprn) +{ +} + +#endif + +/* SPR common to all PowerPC */ +/* XER */ +void spr_read_xer(DisasContext *ctx, int gprn, int sprn) +{ + TCGv dst = cpu_gpr[gprn]; + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + TCGv t2 = tcg_temp_new(); + tcg_gen_mov_tl(dst, cpu_xer); + tcg_gen_shli_tl(t0, cpu_so, XER_SO); + tcg_gen_shli_tl(t1, cpu_ov, XER_OV); + tcg_gen_shli_tl(t2, cpu_ca, XER_CA); + tcg_gen_or_tl(t0, t0, t1); + tcg_gen_or_tl(dst, dst, t2); + tcg_gen_or_tl(dst, dst, t0); + if (is_isa300(ctx)) { + tcg_gen_shli_tl(t0, cpu_ov32, XER_OV32); + tcg_gen_or_tl(dst, dst, t0); + tcg_gen_shli_tl(t0, cpu_ca32, XER_CA32); + tcg_gen_or_tl(dst, dst, t0); + } + tcg_temp_free(t0); + tcg_temp_free(t1); + tcg_temp_free(t2); +} + +void spr_write_xer(DisasContext *ctx, int sprn, int gprn) +{ + TCGv src = cpu_gpr[gprn]; + /* Write all flags, while reading back check for isa300 */ + tcg_gen_andi_tl(cpu_xer, src, + ~((1u << XER_SO) | + (1u << XER_OV) | (1u << XER_OV32) | + (1u << XER_CA) | (1u << XER_CA32))); + tcg_gen_extract_tl(cpu_ov32, src, XER_OV32, 1); + tcg_gen_extract_tl(cpu_ca32, src, XER_CA32, 1); + tcg_gen_extract_tl(cpu_so, src, XER_SO, 1); + tcg_gen_extract_tl(cpu_ov, src, XER_OV, 1); + tcg_gen_extract_tl(cpu_ca, src, XER_CA, 1); +} + +/* LR */ +void spr_read_lr(DisasContext *ctx, int gprn, int sprn) +{ + tcg_gen_mov_tl(cpu_gpr[gprn], cpu_lr); +} + +void spr_write_lr(DisasContext *ctx, int sprn, int gprn) +{ + tcg_gen_mov_tl(cpu_lr, cpu_gpr[gprn]); +} + +/* CFAR */ +#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) +void spr_read_cfar(DisasContext *ctx, int gprn, int sprn) +{ + tcg_gen_mov_tl(cpu_gpr[gprn], cpu_cfar); +} + +void spr_write_cfar(DisasContext *ctx, int sprn, int gprn) +{ + tcg_gen_mov_tl(cpu_cfar, cpu_gpr[gprn]); +} +#endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */ + +/* CTR */ +void spr_read_ctr(DisasContext *ctx, int gprn, int sprn) +{ + tcg_gen_mov_tl(cpu_gpr[gprn], cpu_ctr); +} + +void spr_write_ctr(DisasContext *ctx, int sprn, int gprn) +{ + tcg_gen_mov_tl(cpu_ctr, cpu_gpr[gprn]); +} + +/* User read access to SPR */ +/* USPRx */ +/* UMMCRx */ +/* UPMCx */ +/* USIA */ +/* UDECR */ +void spr_read_ureg(DisasContext *ctx, int gprn, int sprn) +{ + gen_load_spr(cpu_gpr[gprn], sprn + 0x10); +} + +#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) +void spr_write_ureg(DisasContext *ctx, int sprn, int gprn) +{ + gen_store_spr(sprn + 0x10, cpu_gpr[gprn]); +} +#endif + +/* SPR common to all non-embedded PowerPC */ +/* DECR */ +#if !defined(CONFIG_USER_ONLY) +void spr_read_decr(DisasContext *ctx, int gprn, int sprn) +{ + gen_icount_io_start(ctx); + gen_helper_load_decr(cpu_gpr[gprn], cpu_env); +} + +void spr_write_decr(DisasContext *ctx, int sprn, int gprn) +{ + gen_icount_io_start(ctx); + gen_helper_store_decr(cpu_env, cpu_gpr[gprn]); +} +#endif + +/* SPR common to all non-embedded PowerPC, except 601 */ +/* Time base */ +void spr_read_tbl(DisasContext *ctx, int gprn, int sprn) +{ + gen_icount_io_start(ctx); + gen_helper_load_tbl(cpu_gpr[gprn], cpu_env); +} + +void spr_read_tbu(DisasContext *ctx, int gprn, int sprn) +{ + gen_icount_io_start(ctx); + gen_helper_load_tbu(cpu_gpr[gprn], cpu_env); +} + +void spr_read_atbl(DisasContext *ctx, int gprn, int sprn) +{ + gen_helper_load_atbl(cpu_gpr[gprn], cpu_env); +} + +void spr_read_atbu(DisasContext *ctx, int gprn, int sprn) +{ + gen_helper_load_atbu(cpu_gpr[gprn], cpu_env); +} + +#if !defined(CONFIG_USER_ONLY) +void spr_write_tbl(DisasContext *ctx, int sprn, int gprn) +{ + gen_icount_io_start(ctx); + gen_helper_store_tbl(cpu_env, cpu_gpr[gprn]); +} + +void spr_write_tbu(DisasContext *ctx, int sprn, int gprn) +{ + gen_icount_io_start(ctx); + gen_helper_store_tbu(cpu_env, cpu_gpr[gprn]); +} + +void spr_write_atbl(DisasContext *ctx, int sprn, int gprn) +{ + gen_helper_store_atbl(cpu_env, cpu_gpr[gprn]); +} + +void spr_write_atbu(DisasContext *ctx, int sprn, int gprn) +{ + gen_helper_store_atbu(cpu_env, cpu_gpr[gprn]); +} + +#if defined(TARGET_PPC64) +void spr_read_purr(DisasContext *ctx, int gprn, int sprn) +{ + gen_icount_io_start(ctx); + gen_helper_load_purr(cpu_gpr[gprn], cpu_env); +} + +void spr_write_purr(DisasContext *ctx, int sprn, int gprn) +{ + gen_icount_io_start(ctx); + gen_helper_store_purr(cpu_env, cpu_gpr[gprn]); +} + +/* HDECR */ +void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn) +{ + gen_icount_io_start(ctx); + gen_helper_load_hdecr(cpu_gpr[gprn], cpu_env); +} + +void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn) +{ + gen_icount_io_start(ctx); + gen_helper_store_hdecr(cpu_env, cpu_gpr[gprn]); +} + +void spr_read_vtb(DisasContext *ctx, int gprn, int sprn) +{ + gen_icount_io_start(ctx); + gen_helper_load_vtb(cpu_gpr[gprn], cpu_env); +} + +void spr_write_vtb(DisasContext *ctx, int sprn, int gprn) +{ + gen_icount_io_start(ctx); + gen_helper_store_vtb(cpu_env, cpu_gpr[gprn]); +} + +void spr_write_tbu40(DisasContext *ctx, int sprn, int gprn) +{ + gen_icount_io_start(ctx); + gen_helper_store_tbu40(cpu_env, cpu_gpr[gprn]); +} + +#endif +#endif + +#if !defined(CONFIG_USER_ONLY) +/* IBAT0U...IBAT0U */ +/* IBAT0L...IBAT7L */ +void spr_read_ibat(DisasContext *ctx, int gprn, int sprn) +{ + tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, + offsetof(CPUPPCState, + IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2])); +} + +void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn) +{ + tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, + offsetof(CPUPPCState, + IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4])); +} + +void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn) +{ + TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2); + gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]); + tcg_temp_free_i32(t0); +} + +void spr_write_ibatu_h(DisasContext *ctx, int sprn, int gprn) +{ + TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4U) / 2) + 4); + gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]); + tcg_temp_free_i32(t0); +} + +void spr_write_ibatl(DisasContext *ctx, int sprn, int gprn) +{ + TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0L) / 2); + gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]); + tcg_temp_free_i32(t0); +} + +void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn) +{ + TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4L) / 2) + 4); + gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]); + tcg_temp_free_i32(t0); +} + +/* DBAT0U...DBAT7U */ +/* DBAT0L...DBAT7L */ +void spr_read_dbat(DisasContext *ctx, int gprn, int sprn) +{ + tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, + offsetof(CPUPPCState, + DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2])); +} + +void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn) +{ + tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, + offsetof(CPUPPCState, + DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4])); +} + +void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn) +{ + TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0U) / 2); + gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]); + tcg_temp_free_i32(t0); +} + +void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn) +{ + TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4U) / 2) + 4); + gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]); + tcg_temp_free_i32(t0); +} + +void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn) +{ + TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0L) / 2); + gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]); + tcg_temp_free_i32(t0); +} + +void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn) +{ + TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4L) / 2) + 4); + gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]); + tcg_temp_free_i32(t0); +} + +/* SDR1 */ +void spr_write_sdr1(DisasContext *ctx, int sprn, int gprn) +{ + gen_helper_store_sdr1(cpu_env, cpu_gpr[gprn]); +} + +#if defined(TARGET_PPC64) +/* 64 bits PowerPC specific SPRs */ +/* PIDR */ +void spr_write_pidr(DisasContext *ctx, int sprn, int gprn) +{ + gen_helper_store_pidr(cpu_env, cpu_gpr[gprn]); +} + +void spr_write_lpidr(DisasContext *ctx, int sprn, int gprn) +{ + gen_helper_store_lpidr(cpu_env, cpu_gpr[gprn]); +} + +void spr_read_hior(DisasContext *ctx, int gprn, int sprn) +{ + tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, excp_prefix)); +} + +void spr_write_hior(DisasContext *ctx, int sprn, int gprn) +{ + TCGv t0 = tcg_temp_new(); + tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0x3FFFFF00000ULL); + tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix)); + tcg_temp_free(t0); +} +void spr_write_ptcr(DisasContext *ctx, int sprn, int gprn) +{ + gen_helper_store_ptcr(cpu_env, cpu_gpr[gprn]); +} + +void spr_write_pcr(DisasContext *ctx, int sprn, int gprn) +{ + gen_helper_store_pcr(cpu_env, cpu_gpr[gprn]); +} + +/* DPDES */ +void spr_read_dpdes(DisasContext *ctx, int gprn, int sprn) +{ + gen_helper_load_dpdes(cpu_gpr[gprn], cpu_env); +} + +void spr_write_dpdes(DisasContext *ctx, int sprn, int gprn) +{ + gen_helper_store_dpdes(cpu_env, cpu_gpr[gprn]); +} +#endif +#endif + +/* PowerPC 601 specific registers */ +/* RTC */ +void spr_read_601_rtcl(DisasContext *ctx, int gprn, int sprn) +{ + gen_helper_load_601_rtcl(cpu_gpr[gprn], cpu_env); +} + +void spr_read_601_rtcu(DisasContext *ctx, int gprn, int sprn) +{ + gen_helper_load_601_rtcu(cpu_gpr[gprn], cpu_env); +} + +#if !defined(CONFIG_USER_ONLY) +void spr_write_601_rtcu(DisasContext *ctx, int sprn, int gprn) +{ + gen_helper_store_601_rtcu(cpu_env, cpu_gpr[gprn]); +} + +void spr_write_601_rtcl(DisasContext *ctx, int sprn, int gprn) +{ + gen_helper_store_601_rtcl(cpu_env, cpu_gpr[gprn]); +} + +void spr_write_hid0_601(DisasContext *ctx, int sprn, int gprn) +{ + gen_helper_store_hid0_601(cpu_env, cpu_gpr[gprn]); + /* Must stop the translation as endianness may have changed */ + ctx->base.is_jmp = DISAS_EXIT_UPDATE; +} +#endif + +/* Unified bats */ +#if !defined(CONFIG_USER_ONLY) +void spr_read_601_ubat(DisasContext *ctx, int gprn, int sprn) +{ + tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, + offsetof(CPUPPCState, + IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2])); +} + +void spr_write_601_ubatu(DisasContext *ctx, int sprn, int gprn) +{ + TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2); + gen_helper_store_601_batl(cpu_env, t0, cpu_gpr[gprn]); + tcg_temp_free_i32(t0); +} + +void spr_write_601_ubatl(DisasContext *ctx, int sprn, int gprn) +{ + TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2); + gen_helper_store_601_batu(cpu_env, t0, cpu_gpr[gprn]); + tcg_temp_free_i32(t0); +} +#endif + +/* PowerPC 40x specific registers */ +#if !defined(CONFIG_USER_ONLY) +void spr_read_40x_pit(DisasContext *ctx, int gprn, int sprn) +{ + gen_icount_io_start(ctx); + gen_helper_load_40x_pit(cpu_gpr[gprn], cpu_env); +} + +void spr_write_40x_pit(DisasContext *ctx, int sprn, int gprn) +{ + gen_icount_io_start(ctx); + gen_helper_store_40x_pit(cpu_env, cpu_gpr[gprn]); +} + +void spr_write_40x_dbcr0(DisasContext *ctx, int sprn, int gprn) +{ + gen_icount_io_start(ctx); + gen_store_spr(sprn, cpu_gpr[gprn]); + gen_helper_store_40x_dbcr0(cpu_env, cpu_gpr[gprn]); + /* We must stop translation as we may have rebooted */ + ctx->base.is_jmp = DISAS_EXIT_UPDATE; +} + +void spr_write_40x_sler(DisasContext *ctx, int sprn, int gprn) +{ + gen_icount_io_start(ctx); + gen_helper_store_40x_sler(cpu_env, cpu_gpr[gprn]); +} + +void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn) +{ + gen_icount_io_start(ctx); + gen_helper_store_booke_tcr(cpu_env, cpu_gpr[gprn]); +} + +void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn) +{ + gen_icount_io_start(ctx); + gen_helper_store_booke_tsr(cpu_env, cpu_gpr[gprn]); +} +#endif + +/* PowerPC 403 specific registers */ +/* PBL1 / PBU1 / PBL2 / PBU2 */ +#if !defined(CONFIG_USER_ONLY) +void spr_read_403_pbr(DisasContext *ctx, int gprn, int sprn) +{ + tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, + offsetof(CPUPPCState, pb[sprn - SPR_403_PBL1])); +} + +void spr_write_403_pbr(DisasContext *ctx, int sprn, int gprn) +{ + TCGv_i32 t0 = tcg_const_i32(sprn - SPR_403_PBL1); + gen_helper_store_403_pbr(cpu_env, t0, cpu_gpr[gprn]); + tcg_temp_free_i32(t0); +} + +void spr_write_pir(DisasContext *ctx, int sprn, int gprn) +{ + TCGv t0 = tcg_temp_new(); + tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xF); + gen_store_spr(SPR_PIR, t0); + tcg_temp_free(t0); +} +#endif + +/* SPE specific registers */ +void spr_read_spefscr(DisasContext *ctx, int gprn, int sprn) +{ + TCGv_i32 t0 = tcg_temp_new_i32(); + tcg_gen_ld_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr)); + tcg_gen_extu_i32_tl(cpu_gpr[gprn], t0); + tcg_temp_free_i32(t0); +} + +void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn) +{ + TCGv_i32 t0 = tcg_temp_new_i32(); + tcg_gen_trunc_tl_i32(t0, cpu_gpr[gprn]); + tcg_gen_st_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr)); + tcg_temp_free_i32(t0); +} + +#if !defined(CONFIG_USER_ONLY) +/* Callback used to write the exception vector base */ +void spr_write_excp_prefix(DisasContext *ctx, int sprn, int gprn) +{ + TCGv t0 = tcg_temp_new(); + tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivpr_mask)); + tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]); + tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix)); + gen_store_spr(sprn, t0); + tcg_temp_free(t0); +} + +void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn) +{ + int sprn_offs; + + if (sprn >= SPR_BOOKE_IVOR0 && sprn <= SPR_BOOKE_IVOR15) { + sprn_offs = sprn - SPR_BOOKE_IVOR0; + } else if (sprn >= SPR_BOOKE_IVOR32 && sprn <= SPR_BOOKE_IVOR37) { + sprn_offs = sprn - SPR_BOOKE_IVOR32 + 32; + } else if (sprn >= SPR_BOOKE_IVOR38 && sprn <= SPR_BOOKE_IVOR42) { + sprn_offs = sprn - SPR_BOOKE_IVOR38 + 38; + } else { + printf("Trying to write an unknown exception vector %d %03x\n", + sprn, sprn); + gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); + return; + } + + TCGv t0 = tcg_temp_new(); + tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivor_mask)); + tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]); + tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_vectors[sprn_offs])); + gen_store_spr(sprn, t0); + tcg_temp_free(t0); +} +#endif + +#ifdef TARGET_PPC64 +#ifndef CONFIG_USER_ONLY +void spr_write_amr(DisasContext *ctx, int sprn, int gprn) +{ + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + TCGv t2 = tcg_temp_new(); + + /* + * Note, the HV=1 PR=0 case is handled earlier by simply using + * spr_write_generic for HV mode in the SPR table + */ + + /* Build insertion mask into t1 based on context */ + if (ctx->pr) { + gen_load_spr(t1, SPR_UAMOR); + } else { + gen_load_spr(t1, SPR_AMOR); + } + + /* Mask new bits into t2 */ + tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]); + + /* Load AMR and clear new bits in t0 */ + gen_load_spr(t0, SPR_AMR); + tcg_gen_andc_tl(t0, t0, t1); + + /* Or'in new bits and write it out */ + tcg_gen_or_tl(t0, t0, t2); + gen_store_spr(SPR_AMR, t0); + spr_store_dump_spr(SPR_AMR); + + tcg_temp_free(t0); + tcg_temp_free(t1); + tcg_temp_free(t2); +} + +void spr_write_uamor(DisasContext *ctx, int sprn, int gprn) +{ + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + TCGv t2 = tcg_temp_new(); + + /* + * Note, the HV=1 case is handled earlier by simply using + * spr_write_generic for HV mode in the SPR table + */ + + /* Build insertion mask into t1 based on context */ + gen_load_spr(t1, SPR_AMOR); + + /* Mask new bits into t2 */ + tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]); + + /* Load AMR and clear new bits in t0 */ + gen_load_spr(t0, SPR_UAMOR); + tcg_gen_andc_tl(t0, t0, t1); + + /* Or'in new bits and write it out */ + tcg_gen_or_tl(t0, t0, t2); + gen_store_spr(SPR_UAMOR, t0); + spr_store_dump_spr(SPR_UAMOR); + + tcg_temp_free(t0); + tcg_temp_free(t1); + tcg_temp_free(t2); +} + +void spr_write_iamr(DisasContext *ctx, int sprn, int gprn) +{ + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + TCGv t2 = tcg_temp_new(); + + /* + * Note, the HV=1 case is handled earlier by simply using + * spr_write_generic for HV mode in the SPR table + */ + + /* Build insertion mask into t1 based on context */ + gen_load_spr(t1, SPR_AMOR); + + /* Mask new bits into t2 */ + tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]); + + /* Load AMR and clear new bits in t0 */ + gen_load_spr(t0, SPR_IAMR); + tcg_gen_andc_tl(t0, t0, t1); + + /* Or'in new bits and write it out */ + tcg_gen_or_tl(t0, t0, t2); + gen_store_spr(SPR_IAMR, t0); + spr_store_dump_spr(SPR_IAMR); + + tcg_temp_free(t0); + tcg_temp_free(t1); + tcg_temp_free(t2); +} +#endif +#endif + #ifndef CONFIG_USER_ONLY -/* No need to update nip here, as execution flow will change */ -static inline void gen_sync_exception(DisasContext *ctx) +void spr_read_thrm(DisasContext *ctx, int gprn, int sprn) +{ + gen_helper_fixup_thrm(cpu_env); + gen_load_spr(cpu_gpr[gprn], sprn); + spr_load_dump_spr(sprn); +} +#endif /* !CONFIG_USER_ONLY */ + +#if !defined(CONFIG_USER_ONLY) +void spr_write_e500_l1csr0(DisasContext *ctx, int sprn, int gprn) +{ + TCGv t0 = tcg_temp_new(); + + tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR0_DCE | L1CSR0_CPE); + gen_store_spr(sprn, t0); + tcg_temp_free(t0); +} + +void spr_write_e500_l1csr1(DisasContext *ctx, int sprn, int gprn) { - ctx->exception = POWERPC_EXCP_SYNC; + TCGv t0 = tcg_temp_new(); + + tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR1_ICE | L1CSR1_CPE); + gen_store_spr(sprn, t0); + tcg_temp_free(t0); +} + +void spr_write_e500_l2csr0(DisasContext *ctx, int sprn, int gprn) +{ + TCGv t0 = tcg_temp_new(); + + tcg_gen_andi_tl(t0, cpu_gpr[gprn], + ~(E500_L2CSR0_L2FI | E500_L2CSR0_L2FL | E500_L2CSR0_L2LFC)); + gen_store_spr(sprn, t0); + tcg_temp_free(t0); +} + +void spr_write_booke206_mmucsr0(DisasContext *ctx, int sprn, int gprn) +{ + gen_helper_booke206_tlbflush(cpu_env, cpu_gpr[gprn]); +} + +void spr_write_booke_pid(DisasContext *ctx, int sprn, int gprn) +{ + TCGv_i32 t0 = tcg_const_i32(sprn); + gen_helper_booke_setpid(cpu_env, t0, cpu_gpr[gprn]); + tcg_temp_free_i32(t0); +} +void spr_write_eplc(DisasContext *ctx, int sprn, int gprn) +{ + gen_helper_booke_set_eplc(cpu_env, cpu_gpr[gprn]); +} +void spr_write_epsc(DisasContext *ctx, int sprn, int gprn) +{ + gen_helper_booke_set_epsc(cpu_env, cpu_gpr[gprn]); +} + +#endif + +#if !defined(CONFIG_USER_ONLY) +void spr_write_mas73(DisasContext *ctx, int sprn, int gprn) +{ + TCGv val = tcg_temp_new(); + tcg_gen_ext32u_tl(val, cpu_gpr[gprn]); + gen_store_spr(SPR_BOOKE_MAS3, val); + tcg_gen_shri_tl(val, cpu_gpr[gprn], 32); + gen_store_spr(SPR_BOOKE_MAS7, val); + tcg_temp_free(val); +} + +void spr_read_mas73(DisasContext *ctx, int gprn, int sprn) +{ + TCGv mas7 = tcg_temp_new(); + TCGv mas3 = tcg_temp_new(); + gen_load_spr(mas7, SPR_BOOKE_MAS7); + tcg_gen_shli_tl(mas7, mas7, 32); + gen_load_spr(mas3, SPR_BOOKE_MAS3); + tcg_gen_or_tl(cpu_gpr[gprn], mas3, mas7); + tcg_temp_free(mas3); + tcg_temp_free(mas7); +} + +#endif + +#ifdef TARGET_PPC64 +static void gen_fscr_facility_check(DisasContext *ctx, int facility_sprn, + int bit, int sprn, int cause) +{ + TCGv_i32 t1 = tcg_const_i32(bit); + TCGv_i32 t2 = tcg_const_i32(sprn); + TCGv_i32 t3 = tcg_const_i32(cause); + + gen_helper_fscr_facility_check(cpu_env, t1, t2, t3); + + tcg_temp_free_i32(t3); + tcg_temp_free_i32(t2); + tcg_temp_free_i32(t1); +} + +static void gen_msr_facility_check(DisasContext *ctx, int facility_sprn, + int bit, int sprn, int cause) +{ + TCGv_i32 t1 = tcg_const_i32(bit); + TCGv_i32 t2 = tcg_const_i32(sprn); + TCGv_i32 t3 = tcg_const_i32(cause); + + gen_helper_msr_facility_check(cpu_env, t1, t2, t3); + + tcg_temp_free_i32(t3); + tcg_temp_free_i32(t2); + tcg_temp_free_i32(t1); +} + +void spr_read_prev_upper32(DisasContext *ctx, int gprn, int sprn) +{ + TCGv spr_up = tcg_temp_new(); + TCGv spr = tcg_temp_new(); + + gen_load_spr(spr, sprn - 1); + tcg_gen_shri_tl(spr_up, spr, 32); + tcg_gen_ext32u_tl(cpu_gpr[gprn], spr_up); + + tcg_temp_free(spr); + tcg_temp_free(spr_up); +} + +void spr_write_prev_upper32(DisasContext *ctx, int sprn, int gprn) +{ + TCGv spr = tcg_temp_new(); + + gen_load_spr(spr, sprn - 1); + tcg_gen_deposit_tl(spr, spr, cpu_gpr[gprn], 32, 32); + gen_store_spr(sprn - 1, spr); + + tcg_temp_free(spr); +} + +#if !defined(CONFIG_USER_ONLY) +void spr_write_hmer(DisasContext *ctx, int sprn, int gprn) +{ + TCGv hmer = tcg_temp_new(); + + gen_load_spr(hmer, sprn); + tcg_gen_and_tl(hmer, cpu_gpr[gprn], hmer); + gen_store_spr(sprn, hmer); + spr_store_dump_spr(sprn); + tcg_temp_free(hmer); +} + +void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn) +{ + gen_helper_store_lpcr(cpu_env, cpu_gpr[gprn]); +} +#endif /* !defined(CONFIG_USER_ONLY) */ + +void spr_read_tar(DisasContext *ctx, int gprn, int sprn) +{ + gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR); + spr_read_generic(ctx, gprn, sprn); +} + +void spr_write_tar(DisasContext *ctx, int sprn, int gprn) +{ + gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR); + spr_write_generic(ctx, sprn, gprn); +} + +void spr_read_tm(DisasContext *ctx, int gprn, int sprn) +{ + gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); + spr_read_generic(ctx, gprn, sprn); +} + +void spr_write_tm(DisasContext *ctx, int sprn, int gprn) +{ + gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); + spr_write_generic(ctx, sprn, gprn); +} + +void spr_read_tm_upper32(DisasContext *ctx, int gprn, int sprn) +{ + gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); + spr_read_prev_upper32(ctx, gprn, sprn); +} + +void spr_write_tm_upper32(DisasContext *ctx, int sprn, int gprn) +{ + gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); + spr_write_prev_upper32(ctx, sprn, gprn); +} + +void spr_read_ebb(DisasContext *ctx, int gprn, int sprn) +{ + gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); + spr_read_generic(ctx, gprn, sprn); +} + +void spr_write_ebb(DisasContext *ctx, int sprn, int gprn) +{ + gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); + spr_write_generic(ctx, sprn, gprn); +} + +void spr_read_ebb_upper32(DisasContext *ctx, int gprn, int sprn) +{ + gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); + spr_read_prev_upper32(ctx, gprn, sprn); +} + +void spr_write_ebb_upper32(DisasContext *ctx, int sprn, int gprn) +{ + gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); + spr_write_prev_upper32(ctx, sprn, gprn); } #endif @@ -1852,18 +2764,13 @@ static void gen_darn(DisasContext *ctx) if (l > 2) { tcg_gen_movi_i64(cpu_gpr[rD(ctx->opcode)], -1); } else { - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } + gen_icount_io_start(ctx); if (l == 0) { gen_helper_darn32(cpu_gpr[rD(ctx->opcode)]); } else { /* Return 64-bit random for both CRN and RRN */ gen_helper_darn64(cpu_gpr[rD(ctx->opcode)]); } - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_stop_exception(ctx); - } } } #endif @@ -3113,7 +4020,7 @@ static void gen_eieio(DisasContext *ctx) */ if (!(ctx->insns_flags2 & PPC2_ISA300)) { qemu_log_mask(LOG_GUEST_ERROR, "invalid eieio using bit 6 at @" - TARGET_FMT_lx "\n", ctx->base.pc_next - 4); + TARGET_FMT_lx "\n", ctx->cia); } else { bar = TCG_MO_ST_LD; } @@ -3158,7 +4065,7 @@ static void gen_isync(DisasContext *ctx) gen_check_tlb_flush(ctx, false); } tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); - gen_stop_exception(ctx); + ctx->base.is_jmp = DISAS_EXIT_UPDATE; } #define MEMOP_GET_SIZE(x) (1 << ((x) & MO_SIZE)) @@ -3741,8 +4648,9 @@ static void gen_lookup_and_goto_ptr(DisasContext *ctx) } else if (sse & (CPU_SINGLE_STEP | CPU_BRANCH_STEP)) { uint32_t excp = gen_prep_dbgex(ctx); gen_exception(ctx, excp); + } else { + tcg_gen_exit_tb(NULL, 0); } - tcg_gen_exit_tb(NULL, 0); } else { tcg_gen_lookup_and_goto_ptr(); } @@ -3777,20 +4685,20 @@ static void gen_b(DisasContext *ctx) { target_ulong li, target; - ctx->exception = POWERPC_EXCP_BRANCH; /* sign extend LI */ li = LI(ctx->opcode); li = (li ^ 0x02000000) - 0x02000000; if (likely(AA(ctx->opcode) == 0)) { - target = ctx->base.pc_next + li - 4; + target = ctx->cia + li; } else { target = li; } if (LK(ctx->opcode)) { gen_setlr(ctx, ctx->base.pc_next); } - gen_update_cfar(ctx, ctx->base.pc_next - 4); + gen_update_cfar(ctx, ctx->cia); gen_goto_tb(ctx, 0, target); + ctx->base.is_jmp = DISAS_NORETURN; } #define BCOND_IM 0 @@ -3803,7 +4711,6 @@ static void gen_bcond(DisasContext *ctx, int type) uint32_t bo = BO(ctx->opcode); TCGLabel *l1; TCGv target; - ctx->exception = POWERPC_EXCP_BRANCH; if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) { target = tcg_temp_local_new(); @@ -3888,11 +4795,11 @@ static void gen_bcond(DisasContext *ctx, int type) } tcg_temp_free_i32(temp); } - gen_update_cfar(ctx, ctx->base.pc_next - 4); + gen_update_cfar(ctx, ctx->cia); if (type == BCOND_IM) { target_ulong li = (target_long)((int16_t)(BD(ctx->opcode))); if (likely(AA(ctx->opcode) == 0)) { - gen_goto_tb(ctx, 0, ctx->base.pc_next + li - 4); + gen_goto_tb(ctx, 0, ctx->cia + li); } else { gen_goto_tb(ctx, 0, li); } @@ -3910,6 +4817,7 @@ static void gen_bcond(DisasContext *ctx, int type) gen_set_label(l1); gen_goto_tb(ctx, 1, ctx->base.pc_next); } + ctx->base.is_jmp = DISAS_NORETURN; } static void gen_bc(DisasContext *ctx) @@ -4005,12 +4913,10 @@ static void gen_rfi(DisasContext *ctx) } /* Restore CPU state */ CHK_SV; - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_update_cfar(ctx, ctx->base.pc_next - 4); + gen_icount_io_start(ctx); + gen_update_cfar(ctx, ctx->cia); gen_helper_rfi(cpu_env); - gen_sync_exception(ctx); + ctx->base.is_jmp = DISAS_EXIT; #endif } @@ -4022,12 +4928,10 @@ static void gen_rfid(DisasContext *ctx) #else /* Restore CPU state */ CHK_SV; - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_update_cfar(ctx, ctx->base.pc_next - 4); + gen_icount_io_start(ctx); + gen_update_cfar(ctx, ctx->cia); gen_helper_rfid(cpu_env); - gen_sync_exception(ctx); + ctx->base.is_jmp = DISAS_EXIT; #endif } @@ -4039,12 +4943,10 @@ static void gen_rfscv(DisasContext *ctx) #else /* Restore CPU state */ CHK_SV; - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_update_cfar(ctx, ctx->base.pc_next - 4); + gen_icount_io_start(ctx); + gen_update_cfar(ctx, ctx->cia); gen_helper_rfscv(cpu_env); - gen_sync_exception(ctx); + ctx->base.is_jmp = DISAS_EXIT; #endif } #endif @@ -4057,7 +4959,7 @@ static void gen_hrfid(DisasContext *ctx) /* Restore CPU state */ CHK_HV; gen_helper_hrfid(cpu_env); - gen_sync_exception(ctx); + ctx->base.is_jmp = DISAS_EXIT; #endif } #endif @@ -4081,15 +4983,13 @@ static void gen_sc(DisasContext *ctx) #if !defined(CONFIG_USER_ONLY) static void gen_scv(DisasContext *ctx) { - uint32_t lev; + uint32_t lev = (ctx->opcode >> 5) & 0x7F; - if (unlikely(!ctx->scv_enabled)) { - gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_SCV); - return; - } + /* Set the PC back to the faulting instruction. */ + gen_update_nip(ctx, ctx->cia); + gen_helper_scv(cpu_env, tcg_constant_i32(lev)); - lev = (ctx->opcode >> 5) & 0x7F; - gen_exception_err(ctx, POWERPC_SYSCALL_VECTORED, lev); + ctx->base.is_jmp = DISAS_NORETURN; } #endif #endif @@ -4175,43 +5075,6 @@ static void gen_tdi(DisasContext *ctx) /*** Processor control ***/ -static void gen_read_xer(DisasContext *ctx, TCGv dst) -{ - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - TCGv t2 = tcg_temp_new(); - tcg_gen_mov_tl(dst, cpu_xer); - tcg_gen_shli_tl(t0, cpu_so, XER_SO); - tcg_gen_shli_tl(t1, cpu_ov, XER_OV); - tcg_gen_shli_tl(t2, cpu_ca, XER_CA); - tcg_gen_or_tl(t0, t0, t1); - tcg_gen_or_tl(dst, dst, t2); - tcg_gen_or_tl(dst, dst, t0); - if (is_isa300(ctx)) { - tcg_gen_shli_tl(t0, cpu_ov32, XER_OV32); - tcg_gen_or_tl(dst, dst, t0); - tcg_gen_shli_tl(t0, cpu_ca32, XER_CA32); - tcg_gen_or_tl(dst, dst, t0); - } - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); -} - -static void gen_write_xer(TCGv src) -{ - /* Write all flags, while reading back check for isa300 */ - tcg_gen_andi_tl(cpu_xer, src, - ~((1u << XER_SO) | - (1u << XER_OV) | (1u << XER_OV32) | - (1u << XER_CA) | (1u << XER_CA32))); - tcg_gen_extract_tl(cpu_ov32, src, XER_OV32, 1); - tcg_gen_extract_tl(cpu_ca32, src, XER_CA32, 1); - tcg_gen_extract_tl(cpu_so, src, XER_SO, 1); - tcg_gen_extract_tl(cpu_ov, src, XER_OV, 1); - tcg_gen_extract_tl(cpu_ca, src, XER_CA, 1); -} - /* mcrxr */ static void gen_mcrxr(DisasContext *ctx) { @@ -4299,15 +5162,6 @@ static void gen_mfmsr(DisasContext *ctx) tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_msr); } -static void spr_noaccess(DisasContext *ctx, int gprn, int sprn) -{ -#if 0 - sprn = ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5); - printf("ERROR: try to access SPR %d !\n", sprn); -#endif -} -#define SPR_NOACCESS (&spr_noaccess) - /* mfspr */ static inline void gen_op_mfspr(DisasContext *ctx) { @@ -4338,7 +5192,7 @@ static inline void gen_op_mfspr(DisasContext *ctx) if (sprn != SPR_PVR) { qemu_log_mask(LOG_GUEST_ERROR, "Trying to read privileged spr " "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn, - ctx->base.pc_next - 4); + ctx->cia); } gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG); } @@ -4352,7 +5206,7 @@ static inline void gen_op_mfspr(DisasContext *ctx) /* Not defined */ qemu_log_mask(LOG_GUEST_ERROR, "Trying to read invalid spr %d (0x%03x) at " - TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4); + TARGET_FMT_lx "\n", sprn, sprn, ctx->cia); /* * The behaviour depends on MSR:PR and SPR# bit 0x10, it can @@ -4416,9 +5270,7 @@ static void gen_mtmsrd(DisasContext *ctx) CHK_SV; #if !defined(CONFIG_USER_ONLY) - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } + gen_icount_io_start(ctx); if (ctx->opcode & 0x00010000) { /* L=1 form only updates EE and RI */ TCGv t0 = tcg_temp_new(); @@ -4443,7 +5295,7 @@ static void gen_mtmsrd(DisasContext *ctx) gen_helper_store_msr(cpu_env, cpu_gpr[rS(ctx->opcode)]); } /* Must stop the translation as machine state (may have) changed */ - gen_stop_exception(ctx); + ctx->base.is_jmp = DISAS_EXIT_UPDATE; #endif /* !defined(CONFIG_USER_ONLY) */ } #endif /* defined(TARGET_PPC64) */ @@ -4453,9 +5305,7 @@ static void gen_mtmsr(DisasContext *ctx) CHK_SV; #if !defined(CONFIG_USER_ONLY) - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } + gen_icount_io_start(ctx); if (ctx->opcode & 0x00010000) { /* L=1 form only updates EE and RI */ TCGv t0 = tcg_temp_new(); @@ -4488,7 +5338,7 @@ static void gen_mtmsr(DisasContext *ctx) tcg_temp_free(msr); } /* Must stop the translation as machine state (may have) changed */ - gen_stop_exception(ctx); + ctx->base.is_jmp = DISAS_EXIT_UPDATE; #endif } @@ -4516,7 +5366,7 @@ static void gen_mtspr(DisasContext *ctx) /* Privilege exception */ qemu_log_mask(LOG_GUEST_ERROR, "Trying to write privileged spr " "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn, - ctx->base.pc_next - 4); + ctx->cia); gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG); } } else { @@ -4530,7 +5380,7 @@ static void gen_mtspr(DisasContext *ctx) /* Not defined */ qemu_log_mask(LOG_GUEST_ERROR, "Trying to write invalid spr %d (0x%03x) at " - TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4); + TARGET_FMT_lx "\n", sprn, sprn, ctx->cia); /* @@ -5943,7 +6793,7 @@ static void gen_rfsvc(DisasContext *ctx) CHK_SV; gen_helper_rfsvc(cpu_env); - gen_sync_exception(ctx); + ctx->base.is_jmp = DISAS_EXIT; #endif /* defined(CONFIG_USER_ONLY) */ } @@ -6323,7 +7173,7 @@ static void gen_rfci_40x(DisasContext *ctx) CHK_SV; /* Restore CPU state */ gen_helper_40x_rfci(cpu_env); - gen_sync_exception(ctx); + ctx->base.is_jmp = DISAS_EXIT; #endif /* defined(CONFIG_USER_ONLY) */ } @@ -6335,7 +7185,7 @@ static void gen_rfci(DisasContext *ctx) CHK_SV; /* Restore CPU state */ gen_helper_rfci(cpu_env); - gen_sync_exception(ctx); + ctx->base.is_jmp = DISAS_EXIT; #endif /* defined(CONFIG_USER_ONLY) */ } @@ -6350,7 +7200,7 @@ static void gen_rfdi(DisasContext *ctx) CHK_SV; /* Restore CPU state */ gen_helper_rfdi(cpu_env); - gen_sync_exception(ctx); + ctx->base.is_jmp = DISAS_EXIT; #endif /* defined(CONFIG_USER_ONLY) */ } @@ -6363,7 +7213,7 @@ static void gen_rfmci(DisasContext *ctx) CHK_SV; /* Restore CPU state */ gen_helper_rfmci(cpu_env); - gen_sync_exception(ctx); + ctx->base.is_jmp = DISAS_EXIT; #endif /* defined(CONFIG_USER_ONLY) */ } @@ -6625,7 +7475,7 @@ static void gen_wrtee(DisasContext *ctx) * Stop translation to have a chance to raise an exception if we * just set msr_ee to 1 */ - gen_stop_exception(ctx); + ctx->base.is_jmp = DISAS_EXIT_UPDATE; #endif /* defined(CONFIG_USER_ONLY) */ } @@ -6639,7 +7489,7 @@ static void gen_wrteei(DisasContext *ctx) if (ctx->opcode & 0x00008000) { tcg_gen_ori_tl(cpu_msr, cpu_msr, (1 << MSR_EE)); /* Stop translation to have a chance to raise an exception */ - gen_stop_exception(ctx); + ctx->base.is_jmp = DISAS_EXIT_UPDATE; } else { tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE)); } @@ -7638,193 +8488,400 @@ GEN_HANDLER2_E(trechkpt, "trechkpt", 0x1F, 0x0E, 0x1F, 0x03FFF800, \ #include "translate/spe-ops.c.inc" }; -#include "helper_regs.h" -#include "translate_init.c.inc" - /*****************************************************************************/ -/* Misc PowerPC helpers */ -void ppc_cpu_dump_state(CPUState *cs, FILE *f, int flags) +/* Opcode types */ +enum { + PPC_DIRECT = 0, /* Opcode routine */ + PPC_INDIRECT = 1, /* Indirect opcode table */ +}; + +#define PPC_OPCODE_MASK 0x3 + +static inline int is_indirect_opcode(void *handler) { -#define RGPL 4 -#define RFPL 4 + return ((uintptr_t)handler & PPC_OPCODE_MASK) == PPC_INDIRECT; +} - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; +static inline opc_handler_t **ind_table(void *handler) +{ + return (opc_handler_t **)((uintptr_t)handler & ~PPC_OPCODE_MASK); +} + +/* Instruction table creation */ +/* Opcodes tables creation */ +static void fill_new_table(opc_handler_t **table, int len) +{ int i; - qemu_fprintf(f, "NIP " TARGET_FMT_lx " LR " TARGET_FMT_lx " CTR " - TARGET_FMT_lx " XER " TARGET_FMT_lx " CPU#%d\n", - env->nip, env->lr, env->ctr, cpu_read_xer(env), - cs->cpu_index); - qemu_fprintf(f, "MSR " TARGET_FMT_lx " HID0 " TARGET_FMT_lx " HF " - TARGET_FMT_lx " iidx %d didx %d\n", - env->msr, env->spr[SPR_HID0], - env->hflags, env->immu_idx, env->dmmu_idx); -#if !defined(NO_TIMER_DUMP) - qemu_fprintf(f, "TB %08" PRIu32 " %08" PRIu64 -#if !defined(CONFIG_USER_ONLY) - " DECR " TARGET_FMT_lu -#endif - "\n", - cpu_ppc_load_tbu(env), cpu_ppc_load_tbl(env) -#if !defined(CONFIG_USER_ONLY) - , cpu_ppc_load_decr(env) -#endif - ); + for (i = 0; i < len; i++) { + table[i] = &invalid_handler; + } +} + +static int create_new_table(opc_handler_t **table, unsigned char idx) +{ + opc_handler_t **tmp; + + tmp = g_new(opc_handler_t *, PPC_CPU_INDIRECT_OPCODES_LEN); + fill_new_table(tmp, PPC_CPU_INDIRECT_OPCODES_LEN); + table[idx] = (opc_handler_t *)((uintptr_t)tmp | PPC_INDIRECT); + + return 0; +} + +static int insert_in_table(opc_handler_t **table, unsigned char idx, + opc_handler_t *handler) +{ + if (table[idx] != &invalid_handler) { + return -1; + } + table[idx] = handler; + + return 0; +} + +static int register_direct_insn(opc_handler_t **ppc_opcodes, + unsigned char idx, opc_handler_t *handler) +{ + if (insert_in_table(ppc_opcodes, idx, handler) < 0) { + printf("*** ERROR: opcode %02x already assigned in main " + "opcode table\n", idx); +#if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU) + printf(" Registered handler '%s' - new handler '%s'\n", + ppc_opcodes[idx]->oname, handler->oname); #endif - for (i = 0; i < 32; i++) { - if ((i & (RGPL - 1)) == 0) { - qemu_fprintf(f, "GPR%02d", i); + return -1; + } + + return 0; +} + +static int register_ind_in_table(opc_handler_t **table, + unsigned char idx1, unsigned char idx2, + opc_handler_t *handler) +{ + if (table[idx1] == &invalid_handler) { + if (create_new_table(table, idx1) < 0) { + printf("*** ERROR: unable to create indirect table " + "idx=%02x\n", idx1); + return -1; } - qemu_fprintf(f, " %016" PRIx64, ppc_dump_gpr(env, i)); - if ((i & (RGPL - 1)) == (RGPL - 1)) { - qemu_fprintf(f, "\n"); + } else { + if (!is_indirect_opcode(table[idx1])) { + printf("*** ERROR: idx %02x already assigned to a direct " + "opcode\n", idx1); +#if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU) + printf(" Registered handler '%s' - new handler '%s'\n", + ind_table(table[idx1])[idx2]->oname, handler->oname); +#endif + return -1; } } - qemu_fprintf(f, "CR "); - for (i = 0; i < 8; i++) - qemu_fprintf(f, "%01x", env->crf[i]); - qemu_fprintf(f, " ["); - for (i = 0; i < 8; i++) { - char a = '-'; - if (env->crf[i] & 0x08) { - a = 'L'; - } else if (env->crf[i] & 0x04) { - a = 'G'; - } else if (env->crf[i] & 0x02) { - a = 'E'; - } - qemu_fprintf(f, " %c%c", a, env->crf[i] & 0x01 ? 'O' : ' '); + if (handler != NULL && + insert_in_table(ind_table(table[idx1]), idx2, handler) < 0) { + printf("*** ERROR: opcode %02x already assigned in " + "opcode table %02x\n", idx2, idx1); +#if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU) + printf(" Registered handler '%s' - new handler '%s'\n", + ind_table(table[idx1])[idx2]->oname, handler->oname); +#endif + return -1; + } + + return 0; +} + +static int register_ind_insn(opc_handler_t **ppc_opcodes, + unsigned char idx1, unsigned char idx2, + opc_handler_t *handler) +{ + return register_ind_in_table(ppc_opcodes, idx1, idx2, handler); +} + +static int register_dblind_insn(opc_handler_t **ppc_opcodes, + unsigned char idx1, unsigned char idx2, + unsigned char idx3, opc_handler_t *handler) +{ + if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) { + printf("*** ERROR: unable to join indirect table idx " + "[%02x-%02x]\n", idx1, idx2); + return -1; + } + if (register_ind_in_table(ind_table(ppc_opcodes[idx1]), idx2, idx3, + handler) < 0) { + printf("*** ERROR: unable to insert opcode " + "[%02x-%02x-%02x]\n", idx1, idx2, idx3); + return -1; } - qemu_fprintf(f, " ] RES " TARGET_FMT_lx "\n", - env->reserve_addr); - if (flags & CPU_DUMP_FPU) { - for (i = 0; i < 32; i++) { - if ((i & (RFPL - 1)) == 0) { - qemu_fprintf(f, "FPR%02d", i); + return 0; +} + +static int register_trplind_insn(opc_handler_t **ppc_opcodes, + unsigned char idx1, unsigned char idx2, + unsigned char idx3, unsigned char idx4, + opc_handler_t *handler) +{ + opc_handler_t **table; + + if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) { + printf("*** ERROR: unable to join indirect table idx " + "[%02x-%02x]\n", idx1, idx2); + return -1; + } + table = ind_table(ppc_opcodes[idx1]); + if (register_ind_in_table(table, idx2, idx3, NULL) < 0) { + printf("*** ERROR: unable to join 2nd-level indirect table idx " + "[%02x-%02x-%02x]\n", idx1, idx2, idx3); + return -1; + } + table = ind_table(table[idx2]); + if (register_ind_in_table(table, idx3, idx4, handler) < 0) { + printf("*** ERROR: unable to insert opcode " + "[%02x-%02x-%02x-%02x]\n", idx1, idx2, idx3, idx4); + return -1; + } + return 0; +} +static int register_insn(opc_handler_t **ppc_opcodes, opcode_t *insn) +{ + if (insn->opc2 != 0xFF) { + if (insn->opc3 != 0xFF) { + if (insn->opc4 != 0xFF) { + if (register_trplind_insn(ppc_opcodes, insn->opc1, insn->opc2, + insn->opc3, insn->opc4, + &insn->handler) < 0) { + return -1; + } + } else { + if (register_dblind_insn(ppc_opcodes, insn->opc1, insn->opc2, + insn->opc3, &insn->handler) < 0) { + return -1; + } } - qemu_fprintf(f, " %016" PRIx64, *cpu_fpr_ptr(env, i)); - if ((i & (RFPL - 1)) == (RFPL - 1)) { - qemu_fprintf(f, "\n"); + } else { + if (register_ind_insn(ppc_opcodes, insn->opc1, + insn->opc2, &insn->handler) < 0) { + return -1; } } - qemu_fprintf(f, "FPSCR " TARGET_FMT_lx "\n", env->fpscr); + } else { + if (register_direct_insn(ppc_opcodes, insn->opc1, &insn->handler) < 0) { + return -1; + } } -#if !defined(CONFIG_USER_ONLY) - qemu_fprintf(f, " SRR0 " TARGET_FMT_lx " SRR1 " TARGET_FMT_lx - " PVR " TARGET_FMT_lx " VRSAVE " TARGET_FMT_lx "\n", - env->spr[SPR_SRR0], env->spr[SPR_SRR1], - env->spr[SPR_PVR], env->spr[SPR_VRSAVE]); - - qemu_fprintf(f, "SPRG0 " TARGET_FMT_lx " SPRG1 " TARGET_FMT_lx - " SPRG2 " TARGET_FMT_lx " SPRG3 " TARGET_FMT_lx "\n", - env->spr[SPR_SPRG0], env->spr[SPR_SPRG1], - env->spr[SPR_SPRG2], env->spr[SPR_SPRG3]); + return 0; +} - qemu_fprintf(f, "SPRG4 " TARGET_FMT_lx " SPRG5 " TARGET_FMT_lx - " SPRG6 " TARGET_FMT_lx " SPRG7 " TARGET_FMT_lx "\n", - env->spr[SPR_SPRG4], env->spr[SPR_SPRG5], - env->spr[SPR_SPRG6], env->spr[SPR_SPRG7]); +static int test_opcode_table(opc_handler_t **table, int len) +{ + int i, count, tmp; -#if defined(TARGET_PPC64) - if (env->excp_model == POWERPC_EXCP_POWER7 || - env->excp_model == POWERPC_EXCP_POWER8 || - env->excp_model == POWERPC_EXCP_POWER9) { - qemu_fprintf(f, "HSRR0 " TARGET_FMT_lx " HSRR1 " TARGET_FMT_lx "\n", - env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]); + for (i = 0, count = 0; i < len; i++) { + /* Consistency fixup */ + if (table[i] == NULL) { + table[i] = &invalid_handler; + } + if (table[i] != &invalid_handler) { + if (is_indirect_opcode(table[i])) { + tmp = test_opcode_table(ind_table(table[i]), + PPC_CPU_INDIRECT_OPCODES_LEN); + if (tmp == 0) { + free(table[i]); + table[i] = &invalid_handler; + } else { + count++; + } + } else { + count++; + } + } } -#endif - if (env->excp_model == POWERPC_EXCP_BOOKE) { - qemu_fprintf(f, "CSRR0 " TARGET_FMT_lx " CSRR1 " TARGET_FMT_lx - " MCSRR0 " TARGET_FMT_lx " MCSRR1 " TARGET_FMT_lx "\n", - env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1], - env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]); - - qemu_fprintf(f, " TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx - " ESR " TARGET_FMT_lx " DEAR " TARGET_FMT_lx "\n", - env->spr[SPR_BOOKE_TCR], env->spr[SPR_BOOKE_TSR], - env->spr[SPR_BOOKE_ESR], env->spr[SPR_BOOKE_DEAR]); - - qemu_fprintf(f, " PIR " TARGET_FMT_lx " DECAR " TARGET_FMT_lx - " IVPR " TARGET_FMT_lx " EPCR " TARGET_FMT_lx "\n", - env->spr[SPR_BOOKE_PIR], env->spr[SPR_BOOKE_DECAR], - env->spr[SPR_BOOKE_IVPR], env->spr[SPR_BOOKE_EPCR]); - - qemu_fprintf(f, " MCSR " TARGET_FMT_lx " SPRG8 " TARGET_FMT_lx - " EPR " TARGET_FMT_lx "\n", - env->spr[SPR_BOOKE_MCSR], env->spr[SPR_BOOKE_SPRG8], - env->spr[SPR_BOOKE_EPR]); - - /* FSL-specific */ - qemu_fprintf(f, " MCAR " TARGET_FMT_lx " PID1 " TARGET_FMT_lx - " PID2 " TARGET_FMT_lx " SVR " TARGET_FMT_lx "\n", - env->spr[SPR_Exxx_MCAR], env->spr[SPR_BOOKE_PID1], - env->spr[SPR_BOOKE_PID2], env->spr[SPR_E500_SVR]); - /* - * IVORs are left out as they are large and do not change often -- - * they can be read with "p $ivor0", "p $ivor1", etc. - */ - } + return count; +} -#if defined(TARGET_PPC64) - if (env->flags & POWERPC_FLAG_CFAR) { - qemu_fprintf(f, " CFAR " TARGET_FMT_lx"\n", env->cfar); +static void fix_opcode_tables(opc_handler_t **ppc_opcodes) +{ + if (test_opcode_table(ppc_opcodes, PPC_CPU_OPCODES_LEN) == 0) { + printf("*** WARNING: no opcode defined !\n"); } -#endif +} - if (env->spr_cb[SPR_LPCR].name) { - qemu_fprintf(f, " LPCR " TARGET_FMT_lx "\n", env->spr[SPR_LPCR]); +/*****************************************************************************/ +void create_ppc_opcodes(PowerPCCPU *cpu, Error **errp) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + opcode_t *opc; + + fill_new_table(cpu->opcodes, PPC_CPU_OPCODES_LEN); + for (opc = opcodes; opc < &opcodes[ARRAY_SIZE(opcodes)]; opc++) { + if (((opc->handler.type & pcc->insns_flags) != 0) || + ((opc->handler.type2 & pcc->insns_flags2) != 0)) { + if (register_insn(cpu->opcodes, opc) < 0) { + error_setg(errp, "ERROR initializing PowerPC instruction " + "0x%02x 0x%02x 0x%02x", opc->opc1, opc->opc2, + opc->opc3); + return; + } + } } + fix_opcode_tables(cpu->opcodes); + fflush(stdout); + fflush(stderr); +} - switch (env->mmu_model) { - case POWERPC_MMU_32B: - case POWERPC_MMU_601: - case POWERPC_MMU_SOFT_6xx: - case POWERPC_MMU_SOFT_74xx: -#if defined(TARGET_PPC64) - case POWERPC_MMU_64B: - case POWERPC_MMU_2_03: - case POWERPC_MMU_2_06: - case POWERPC_MMU_2_07: - case POWERPC_MMU_3_00: -#endif - if (env->spr_cb[SPR_SDR1].name) { /* SDR1 Exists */ - qemu_fprintf(f, " SDR1 " TARGET_FMT_lx " ", env->spr[SPR_SDR1]); +void destroy_ppc_opcodes(PowerPCCPU *cpu) +{ + opc_handler_t **table, **table_2; + int i, j, k; + + for (i = 0; i < PPC_CPU_OPCODES_LEN; i++) { + if (cpu->opcodes[i] == &invalid_handler) { + continue; } - if (env->spr_cb[SPR_PTCR].name) { /* PTCR Exists */ - qemu_fprintf(f, " PTCR " TARGET_FMT_lx " ", env->spr[SPR_PTCR]); + if (is_indirect_opcode(cpu->opcodes[i])) { + table = ind_table(cpu->opcodes[i]); + for (j = 0; j < PPC_CPU_INDIRECT_OPCODES_LEN; j++) { + if (table[j] == &invalid_handler) { + continue; + } + if (is_indirect_opcode(table[j])) { + table_2 = ind_table(table[j]); + for (k = 0; k < PPC_CPU_INDIRECT_OPCODES_LEN; k++) { + if (table_2[k] != &invalid_handler && + is_indirect_opcode(table_2[k])) { + g_free((opc_handler_t *)((uintptr_t)table_2[k] & + ~PPC_INDIRECT)); + } + } + g_free((opc_handler_t *)((uintptr_t)table[j] & + ~PPC_INDIRECT)); + } + } + g_free((opc_handler_t *)((uintptr_t)cpu->opcodes[i] & + ~PPC_INDIRECT)); + } + } +} + +#if defined(PPC_DUMP_CPU) +static void dump_ppc_insns(CPUPPCState *env) +{ + opc_handler_t **table, *handler; + const char *p, *q; + uint8_t opc1, opc2, opc3, opc4; + + printf("Instructions set:\n"); + /* opc1 is 6 bits long */ + for (opc1 = 0x00; opc1 < PPC_CPU_OPCODES_LEN; opc1++) { + table = env->opcodes; + handler = table[opc1]; + if (is_indirect_opcode(handler)) { + /* opc2 is 5 bits long */ + for (opc2 = 0; opc2 < PPC_CPU_INDIRECT_OPCODES_LEN; opc2++) { + table = env->opcodes; + handler = env->opcodes[opc1]; + table = ind_table(handler); + handler = table[opc2]; + if (is_indirect_opcode(handler)) { + table = ind_table(handler); + /* opc3 is 5 bits long */ + for (opc3 = 0; opc3 < PPC_CPU_INDIRECT_OPCODES_LEN; + opc3++) { + handler = table[opc3]; + if (is_indirect_opcode(handler)) { + table = ind_table(handler); + /* opc4 is 5 bits long */ + for (opc4 = 0; opc4 < PPC_CPU_INDIRECT_OPCODES_LEN; + opc4++) { + handler = table[opc4]; + if (handler->handler != &gen_invalid) { + printf("INSN: %02x %02x %02x %02x -- " + "(%02d %04d %02d) : %s\n", + opc1, opc2, opc3, opc4, + opc1, (opc3 << 5) | opc2, opc4, + handler->oname); + } + } + } else { + if (handler->handler != &gen_invalid) { + /* Special hack to properly dump SPE insns */ + p = strchr(handler->oname, '_'); + if (p == NULL) { + printf("INSN: %02x %02x %02x (%02d %04d) : " + "%s\n", + opc1, opc2, opc3, opc1, + (opc3 << 5) | opc2, + handler->oname); + } else { + q = "speundef"; + if ((p - handler->oname) != strlen(q) + || (memcmp(handler->oname, q, strlen(q)) + != 0)) { + /* First instruction */ + printf("INSN: %02x %02x %02x" + "(%02d %04d) : %.*s\n", + opc1, opc2 << 1, opc3, opc1, + (opc3 << 6) | (opc2 << 1), + (int)(p - handler->oname), + handler->oname); + } + if (strcmp(p + 1, q) != 0) { + /* Second instruction */ + printf("INSN: %02x %02x %02x " + "(%02d %04d) : %s\n", opc1, + (opc2 << 1) | 1, opc3, opc1, + (opc3 << 6) | (opc2 << 1) | 1, + p + 1); + } + } + } + } + } + } else { + if (handler->handler != &gen_invalid) { + printf("INSN: %02x %02x -- (%02d %04d) : %s\n", + opc1, opc2, opc1, opc2, handler->oname); + } + } + } + } else { + if (handler->handler != &gen_invalid) { + printf("INSN: %02x -- -- (%02d ----) : %s\n", + opc1, opc1, handler->oname); + } } - qemu_fprintf(f, " DAR " TARGET_FMT_lx " DSISR " TARGET_FMT_lx "\n", - env->spr[SPR_DAR], env->spr[SPR_DSISR]); - break; - case POWERPC_MMU_BOOKE206: - qemu_fprintf(f, " MAS0 " TARGET_FMT_lx " MAS1 " TARGET_FMT_lx - " MAS2 " TARGET_FMT_lx " MAS3 " TARGET_FMT_lx "\n", - env->spr[SPR_BOOKE_MAS0], env->spr[SPR_BOOKE_MAS1], - env->spr[SPR_BOOKE_MAS2], env->spr[SPR_BOOKE_MAS3]); - - qemu_fprintf(f, " MAS4 " TARGET_FMT_lx " MAS6 " TARGET_FMT_lx - " MAS7 " TARGET_FMT_lx " PID " TARGET_FMT_lx "\n", - env->spr[SPR_BOOKE_MAS4], env->spr[SPR_BOOKE_MAS6], - env->spr[SPR_BOOKE_MAS7], env->spr[SPR_BOOKE_PID]); - - qemu_fprintf(f, "MMUCFG " TARGET_FMT_lx " TLB0CFG " TARGET_FMT_lx - " TLB1CFG " TARGET_FMT_lx "\n", - env->spr[SPR_MMUCFG], env->spr[SPR_BOOKE_TLB0CFG], - env->spr[SPR_BOOKE_TLB1CFG]); - break; - default: - break; } +} #endif +int ppc_fixup_cpu(PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; -#undef RGPL -#undef RFPL + /* + * TCG doesn't (yet) emulate some groups of instructions that are + * implemented on some otherwise supported CPUs (e.g. VSX and + * decimal floating point instructions on POWER7). We remove + * unsupported instruction groups from the cpu state's instruction + * masks and hope the guest can cope. For at least the pseries + * machine, the unavailability of these instructions can be + * advertised to the guest via the device tree. + */ + if ((env->insns_flags & ~PPC_TCG_INSNS) + || (env->insns_flags2 & ~PPC_TCG_INSNS2)) { + warn_report("Disabling some instructions which are not " + "emulated by TCG (0x%" PRIx64 ", 0x%" PRIx64 ")", + env->insns_flags & ~PPC_TCG_INSNS, + env->insns_flags2 & ~PPC_TCG_INSNS2); + } + env->insns_flags &= PPC_TCG_INSNS; + env->insns_flags2 &= PPC_TCG_INSNS2; + return 0; } + void ppc_cpu_dump_statistics(CPUState *cs, int flags) { #if defined(DO_PPC_STATISTICS) @@ -7875,94 +8932,112 @@ void ppc_cpu_dump_statistics(CPUState *cs, int flags) #endif } +static bool decode_legacy(PowerPCCPU *cpu, DisasContext *ctx, uint32_t insn) +{ + opc_handler_t **table, *handler; + uint32_t inval; + + ctx->opcode = insn; + + LOG_DISAS("translate opcode %08x (%02x %02x %02x %02x) (%s)\n", + insn, opc1(insn), opc2(insn), opc3(insn), opc4(insn), + ctx->le_mode ? "little" : "big"); + + table = cpu->opcodes; + handler = table[opc1(insn)]; + if (is_indirect_opcode(handler)) { + table = ind_table(handler); + handler = table[opc2(insn)]; + if (is_indirect_opcode(handler)) { + table = ind_table(handler); + handler = table[opc3(insn)]; + if (is_indirect_opcode(handler)) { + table = ind_table(handler); + handler = table[opc4(insn)]; + } + } + } + + /* Is opcode *REALLY* valid ? */ + if (unlikely(handler->handler == &gen_invalid)) { + qemu_log_mask(LOG_GUEST_ERROR, "invalid/unsupported opcode: " + "%02x - %02x - %02x - %02x (%08x) " + TARGET_FMT_lx "\n", + opc1(insn), opc2(insn), opc3(insn), opc4(insn), + insn, ctx->cia); + return false; + } + + if (unlikely(handler->type & (PPC_SPE | PPC_SPE_SINGLE | PPC_SPE_DOUBLE) + && Rc(insn))) { + inval = handler->inval2; + } else { + inval = handler->inval1; + } + + if (unlikely((insn & inval) != 0)) { + qemu_log_mask(LOG_GUEST_ERROR, "invalid bits: %08x for opcode: " + "%02x - %02x - %02x - %02x (%08x) " + TARGET_FMT_lx "\n", insn & inval, + opc1(insn), opc2(insn), opc3(insn), opc4(insn), + insn, ctx->cia); + return false; + } + + handler->handler(ctx); + return true; +} + static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) { DisasContext *ctx = container_of(dcbase, DisasContext, base); CPUPPCState *env = cs->env_ptr; - int bound; + uint32_t hflags = ctx->base.tb->flags; - ctx->exception = POWERPC_EXCP_NONE; ctx->spr_cb = env->spr_cb; - ctx->pr = msr_pr; - ctx->mem_idx = env->dmmu_idx; - ctx->dr = msr_dr; -#if !defined(CONFIG_USER_ONLY) - ctx->hv = msr_hv || !env->has_hv_mode; -#endif + ctx->pr = (hflags >> HFLAGS_PR) & 1; + ctx->mem_idx = (hflags >> HFLAGS_DMMU_IDX) & 7; + ctx->dr = (hflags >> HFLAGS_DR) & 1; + ctx->hv = (hflags >> HFLAGS_HV) & 1; ctx->insns_flags = env->insns_flags; ctx->insns_flags2 = env->insns_flags2; ctx->access_type = -1; ctx->need_access_type = !mmu_is_64bit(env->mmu_model); - ctx->le_mode = !!(env->hflags & (1 << MSR_LE)); + ctx->le_mode = (hflags >> HFLAGS_LE) & 1; ctx->default_tcg_memop_mask = ctx->le_mode ? MO_LE : MO_BE; ctx->flags = env->flags; #if defined(TARGET_PPC64) - ctx->sf_mode = msr_is_64bit(env, env->msr); + ctx->sf_mode = (hflags >> HFLAGS_64) & 1; ctx->has_cfar = !!(env->flags & POWERPC_FLAG_CFAR); #endif ctx->lazy_tlb_flush = env->mmu_model == POWERPC_MMU_32B || env->mmu_model == POWERPC_MMU_601 || env->mmu_model & POWERPC_MMU_64; - ctx->fpu_enabled = !!msr_fp; - if ((env->flags & POWERPC_FLAG_SPE) && msr_spe) { - ctx->spe_enabled = !!msr_spe; - } else { - ctx->spe_enabled = false; - } - if ((env->flags & POWERPC_FLAG_VRE) && msr_vr) { - ctx->altivec_enabled = !!msr_vr; - } else { - ctx->altivec_enabled = false; - } - if ((env->flags & POWERPC_FLAG_VSX) && msr_vsx) { - ctx->vsx_enabled = !!msr_vsx; - } else { - ctx->vsx_enabled = false; - } - if ((env->flags & POWERPC_FLAG_SCV) - && (env->spr[SPR_FSCR] & (1ull << FSCR_SCV))) { - ctx->scv_enabled = true; - } else { - ctx->scv_enabled = false; - } -#if defined(TARGET_PPC64) - if ((env->flags & POWERPC_FLAG_TM) && msr_tm) { - ctx->tm_enabled = !!msr_tm; - } else { - ctx->tm_enabled = false; - } -#endif - ctx->gtse = !!(env->spr[SPR_LPCR] & LPCR_GTSE); - if ((env->flags & POWERPC_FLAG_SE) && msr_se) { - ctx->singlestep_enabled = CPU_SINGLE_STEP; - } else { - ctx->singlestep_enabled = 0; + ctx->fpu_enabled = (hflags >> HFLAGS_FP) & 1; + ctx->spe_enabled = (hflags >> HFLAGS_SPE) & 1; + ctx->altivec_enabled = (hflags >> HFLAGS_VR) & 1; + ctx->vsx_enabled = (hflags >> HFLAGS_VSX) & 1; + ctx->tm_enabled = (hflags >> HFLAGS_TM) & 1; + ctx->gtse = (hflags >> HFLAGS_GTSE) & 1; + + ctx->singlestep_enabled = 0; + if ((hflags >> HFLAGS_SE) & 1) { + ctx->singlestep_enabled |= CPU_SINGLE_STEP; } - if ((env->flags & POWERPC_FLAG_BE) && msr_be) { + if ((hflags >> HFLAGS_BE) & 1) { ctx->singlestep_enabled |= CPU_BRANCH_STEP; } - if ((env->flags & POWERPC_FLAG_DE) && msr_de) { - ctx->singlestep_enabled = 0; - target_ulong dbcr0 = env->spr[SPR_BOOKE_DBCR0]; - if (dbcr0 & DBCR0_ICMP) { - ctx->singlestep_enabled |= CPU_SINGLE_STEP; - } - if (dbcr0 & DBCR0_BRT) { - ctx->singlestep_enabled |= CPU_BRANCH_STEP; - } - - } if (unlikely(ctx->base.singlestep_enabled)) { ctx->singlestep_enabled |= GDBSTUB_SINGLE_STEP; } -#if defined(DO_SINGLE_STEP) && 0 - /* Single step trace mode */ - msr_se = 1; -#endif - bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4; - ctx->base.max_insns = MIN(ctx->base.max_insns, bound); + if (ctx->singlestep_enabled & (CPU_SINGLE_STEP | GDBSTUB_SINGLE_STEP)) { + ctx->base.max_insns = 1; + } else { + int bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4; + ctx->base.max_insns = MIN(ctx->base.max_insns, bound); + } } static void ppc_tr_tb_start(DisasContextBase *db, CPUState *cs) @@ -7979,8 +9054,8 @@ static bool ppc_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs, { DisasContext *ctx = container_of(dcbase, DisasContext, base); + gen_update_nip(ctx, ctx->base.pc_next); gen_debug_exception(ctx); - dcbase->is_jmp = DISAS_NORETURN; /* * The address covered by the breakpoint must be included in * [tb->pc, tb->pc + tb->size) in order to for it to be properly @@ -7996,100 +9071,93 @@ static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) DisasContext *ctx = container_of(dcbase, DisasContext, base); PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = cs->env_ptr; - opc_handler_t **table, *handler; + uint32_t insn; + bool ok; LOG_DISAS("----------------\n"); LOG_DISAS("nip=" TARGET_FMT_lx " super=%d ir=%d\n", ctx->base.pc_next, ctx->mem_idx, (int)msr_ir); - ctx->opcode = translator_ldl_swap(env, ctx->base.pc_next, - need_byteswap(ctx)); - - LOG_DISAS("translate opcode %08x (%02x %02x %02x %02x) (%s)\n", - ctx->opcode, opc1(ctx->opcode), opc2(ctx->opcode), - opc3(ctx->opcode), opc4(ctx->opcode), - ctx->le_mode ? "little" : "big"); + ctx->cia = ctx->base.pc_next; + insn = translator_ldl_swap(env, ctx->base.pc_next, need_byteswap(ctx)); ctx->base.pc_next += 4; - table = cpu->opcodes; - handler = table[opc1(ctx->opcode)]; - if (is_indirect_opcode(handler)) { - table = ind_table(handler); - handler = table[opc2(ctx->opcode)]; - if (is_indirect_opcode(handler)) { - table = ind_table(handler); - handler = table[opc3(ctx->opcode)]; - if (is_indirect_opcode(handler)) { - table = ind_table(handler); - handler = table[opc4(ctx->opcode)]; - } - } - } - /* Is opcode *REALLY* valid ? */ - if (unlikely(handler->handler == &gen_invalid)) { - qemu_log_mask(LOG_GUEST_ERROR, "invalid/unsupported opcode: " - "%02x - %02x - %02x - %02x (%08x) " - TARGET_FMT_lx " %d\n", - opc1(ctx->opcode), opc2(ctx->opcode), - opc3(ctx->opcode), opc4(ctx->opcode), - ctx->opcode, ctx->base.pc_next - 4, (int)msr_ir); - } else { - uint32_t inval; - if (unlikely(handler->type & (PPC_SPE | PPC_SPE_SINGLE | PPC_SPE_DOUBLE) - && Rc(ctx->opcode))) { - inval = handler->inval2; - } else { - inval = handler->inval1; - } - - if (unlikely((ctx->opcode & inval) != 0)) { - qemu_log_mask(LOG_GUEST_ERROR, "invalid bits: %08x for opcode: " - "%02x - %02x - %02x - %02x (%08x) " - TARGET_FMT_lx "\n", ctx->opcode & inval, - opc1(ctx->opcode), opc2(ctx->opcode), - opc3(ctx->opcode), opc4(ctx->opcode), - ctx->opcode, ctx->base.pc_next - 4); - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); - ctx->base.is_jmp = DISAS_NORETURN; - return; - } + ok = decode_legacy(cpu, ctx, insn); + if (!ok) { + gen_invalid(ctx); } - (*(handler->handler))(ctx); + #if defined(DO_PPC_STATISTICS) handler->count++; #endif - /* Check trace mode exceptions */ - if (unlikely(ctx->singlestep_enabled & CPU_SINGLE_STEP && - (ctx->base.pc_next <= 0x100 || ctx->base.pc_next > 0xF00) && - ctx->exception != POWERPC_SYSCALL && - ctx->exception != POWERPC_EXCP_TRAP && - ctx->exception != POWERPC_EXCP_BRANCH)) { - uint32_t excp = gen_prep_dbgex(ctx); - gen_exception_nip(ctx, excp, ctx->base.pc_next); - } - - if (tcg_check_temp_count()) { - qemu_log("Opcode %02x %02x %02x %02x (%08x) leaked " - "temporaries\n", opc1(ctx->opcode), opc2(ctx->opcode), - opc3(ctx->opcode), opc4(ctx->opcode), ctx->opcode); - } - ctx->base.is_jmp = ctx->exception == POWERPC_EXCP_NONE ? - DISAS_NEXT : DISAS_NORETURN; + translator_loop_temp_check(&ctx->base); } static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) { DisasContext *ctx = container_of(dcbase, DisasContext, base); + DisasJumpType is_jmp = ctx->base.is_jmp; + target_ulong nip = ctx->base.pc_next; + int sse; + + if (is_jmp == DISAS_NORETURN) { + /* We have already exited the TB. */ + return; + } - if (ctx->exception == POWERPC_EXCP_NONE) { - gen_goto_tb(ctx, 0, ctx->base.pc_next); - } else if (ctx->exception != POWERPC_EXCP_BRANCH) { - if (unlikely(ctx->base.singlestep_enabled)) { + /* Honor single stepping. */ + sse = ctx->singlestep_enabled & (CPU_SINGLE_STEP | GDBSTUB_SINGLE_STEP); + if (unlikely(sse)) { + switch (is_jmp) { + case DISAS_TOO_MANY: + case DISAS_EXIT_UPDATE: + case DISAS_CHAIN_UPDATE: + gen_update_nip(ctx, nip); + break; + case DISAS_EXIT: + case DISAS_CHAIN: + break; + default: + g_assert_not_reached(); + } + + if (sse & GDBSTUB_SINGLE_STEP) { gen_debug_exception(ctx); + return; } - /* Generate the return instruction */ + /* else CPU_SINGLE_STEP... */ + if (nip <= 0x100 || nip > 0xf00) { + gen_exception(ctx, gen_prep_dbgex(ctx)); + return; + } + } + + switch (is_jmp) { + case DISAS_TOO_MANY: + if (use_goto_tb(ctx, nip)) { + tcg_gen_goto_tb(0); + gen_update_nip(ctx, nip); + tcg_gen_exit_tb(ctx->base.tb, 0); + break; + } + /* fall through */ + case DISAS_CHAIN_UPDATE: + gen_update_nip(ctx, nip); + /* fall through */ + case DISAS_CHAIN: + tcg_gen_lookup_and_goto_ptr(); + break; + + case DISAS_EXIT_UPDATE: + gen_update_nip(ctx, nip); + /* fall through */ + case DISAS_EXIT: tcg_gen_exit_tb(NULL, 0); + break; + + default: + g_assert_not_reached(); } } diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc index b817d31260..57a7f73bba 100644 --- a/target/ppc/translate/vsx-impl.c.inc +++ b/target/ppc/translate/vsx-impl.c.inc @@ -139,7 +139,7 @@ static void gen_lxvwsx(DisasContext *ctx) gen_addr_reg_index(ctx, EA); data = tcg_temp_new_i32(); - tcg_gen_qemu_ld_i32(data, EA, ctx->mem_idx, MO_TEUL); + tcg_gen_qemu_ld_i32(data, EA, ctx->mem_idx, DEF_MEMOP(MO_UL)); tcg_gen_gvec_dup_i32(MO_UL, vsr_full_offset(xT(ctx->opcode)), 16, 16, data); tcg_temp_free(EA); @@ -162,7 +162,7 @@ static void gen_lxvdsx(DisasContext *ctx) gen_addr_reg_index(ctx, EA); data = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(data, EA, ctx->mem_idx, MO_TEQ); + tcg_gen_qemu_ld_i64(data, EA, ctx->mem_idx, DEF_MEMOP(MO_Q)); tcg_gen_gvec_dup_i64(MO_Q, vsr_full_offset(xT(ctx->opcode)), 16, 16, data); tcg_temp_free(EA); diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index 7d6ed80f6b..3191fd0082 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -88,8 +88,8 @@ const char * const riscv_intr_names[] = { "vs_timer", "m_timer", "u_external", + "s_external", "vs_external", - "h_external", "m_external", "reserved", "reserved", @@ -137,7 +137,7 @@ static void set_feature(CPURISCVState *env, int feature) env->features |= (1ULL << feature); } -static void set_resetvec(CPURISCVState *env, int resetvec) +static void set_resetvec(CPURISCVState *env, target_ulong resetvec) { #ifndef CONFIG_USER_ONLY env->resetvec = resetvec; @@ -147,7 +147,11 @@ static void set_resetvec(CPURISCVState *env, int resetvec) static void riscv_any_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; - set_misa(env, RVXLEN | RVI | RVM | RVA | RVF | RVD | RVC | RVU); +#if defined(TARGET_RISCV32) + set_misa(env, RV32 | RVI | RVM | RVA | RVF | RVD | RVC | RVU); +#elif defined(TARGET_RISCV64) + set_misa(env, RV64 | RVI | RVM | RVA | RVF | RVD | RVC | RVU); +#endif set_priv_version(env, PRIV_VERSION_1_11_0); } @@ -202,6 +206,7 @@ static void rv32_ibex_cpu_init(Object *obj) set_misa(env, RV32 | RVI | RVM | RVC | RVU); set_priv_version(env, PRIV_VERSION_1_10_0); qdev_prop_set_bit(DEVICE(obj), "mmu", false); + qdev_prop_set_bit(DEVICE(obj), "x-epmp", true); } static void rv32_imafcu_nommu_cpu_init(Object *obj) @@ -282,7 +287,7 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "vscause ", env->vscause); } qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mtval ", env->mtval); - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "stval ", env->sbadaddr); + qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "stval ", env->stval); if (riscv_has_ext(env, RVH)) { qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "htval ", env->htval); qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mtval2 ", env->mtval2); @@ -358,7 +363,7 @@ static void riscv_cpu_reset(DeviceState *dev) env->pc = env->resetvec; env->two_stage_lookup = false; #endif - cs->exception_index = EXCP_NONE; + cs->exception_index = RISCV_EXCP_NONE; env->load_res = -1; set_default_nan_mode(1, &env->fp_status); } @@ -412,6 +417,14 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) if (cpu->cfg.pmp) { set_feature(env, RISCV_FEATURE_PMP); + + /* + * Enhanced PMP should only be available + * on harts with PMP support + */ + if (cpu->cfg.epmp) { + set_feature(env, RISCV_FEATURE_EPMP); + } } set_resetvec(env, cpu->cfg.resetvec); @@ -554,6 +567,8 @@ static Property riscv_cpu_properties[] = { DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), + DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false), + DEFINE_PROP_UINT64("resetvec", RISCVCPU, cfg.resetvec, DEFAULT_RSTVEC), DEFINE_PROP_END_OF_LIST(), }; @@ -708,6 +723,7 @@ static const TypeInfo riscv_cpu_type_infos[] = { DEFINE_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init), DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init), DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init), + DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init), #endif }; diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 0a33d387ba..7e879fb9ca 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -38,6 +38,7 @@ #define TYPE_RISCV_CPU_BASE32 RISCV_CPU_TYPE_NAME("rv32") #define TYPE_RISCV_CPU_BASE64 RISCV_CPU_TYPE_NAME("rv64") #define TYPE_RISCV_CPU_IBEX RISCV_CPU_TYPE_NAME("lowrisc-ibex") +#define TYPE_RISCV_CPU_SHAKTI_C RISCV_CPU_TYPE_NAME("shakti-c") #define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31") #define TYPE_RISCV_CPU_SIFIVE_E34 RISCV_CPU_TYPE_NAME("sifive-e34") #define TYPE_RISCV_CPU_SIFIVE_E51 RISCV_CPU_TYPE_NAME("sifive-e51") @@ -53,12 +54,6 @@ #define RV32 ((target_ulong)1 << (TARGET_LONG_BITS - 2)) #define RV64 ((target_ulong)2 << (TARGET_LONG_BITS - 2)) -#if defined(TARGET_RISCV32) -#define RVXLEN RV32 -#elif defined(TARGET_RISCV64) -#define RVXLEN RV64 -#endif - #define RV(x) ((target_ulong)1 << (x - 'A')) #define RVI RV('I') @@ -80,6 +75,7 @@ enum { RISCV_FEATURE_MMU, RISCV_FEATURE_PMP, + RISCV_FEATURE_EPMP, RISCV_FEATURE_MISA }; @@ -163,10 +159,8 @@ struct CPURISCVState { target_ulong mie; target_ulong mideleg; - target_ulong sptbr; /* until: priv-1.9.1 */ target_ulong satp; /* since: priv-1.10.0 */ - target_ulong sbadaddr; - target_ulong mbadaddr; + target_ulong stval; target_ulong medeleg; target_ulong stvec; @@ -230,6 +224,7 @@ struct CPURISCVState { /* physical memory protection */ pmp_table_t pmp_state; + target_ulong mseccfg; /* machine specific rdtime callback */ uint64_t (*rdtime_fn)(uint32_t); @@ -303,6 +298,7 @@ struct RISCVCPU { uint16_t elen; bool mmu; bool pmp; + bool epmp; uint64_t resetvec; } cfg; }; @@ -455,10 +451,13 @@ static inline void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc, *pflags = flags; } -int riscv_csrrw(CPURISCVState *env, int csrno, target_ulong *ret_value, - target_ulong new_value, target_ulong write_mask); -int riscv_csrrw_debug(CPURISCVState *env, int csrno, target_ulong *ret_value, - target_ulong new_value, target_ulong write_mask); +RISCVException riscv_csrrw(CPURISCVState *env, int csrno, + target_ulong *ret_value, + target_ulong new_value, target_ulong write_mask); +RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno, + target_ulong *ret_value, + target_ulong new_value, + target_ulong write_mask); static inline void riscv_csr_write(CPURISCVState *env, int csrno, target_ulong val) @@ -473,13 +472,16 @@ static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno) return val; } -typedef int (*riscv_csr_predicate_fn)(CPURISCVState *env, int csrno); -typedef int (*riscv_csr_read_fn)(CPURISCVState *env, int csrno, - target_ulong *ret_value); -typedef int (*riscv_csr_write_fn)(CPURISCVState *env, int csrno, - target_ulong new_value); -typedef int (*riscv_csr_op_fn)(CPURISCVState *env, int csrno, - target_ulong *ret_value, target_ulong new_value, target_ulong write_mask); +typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env, + int csrno); +typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno, + target_ulong *ret_value); +typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno, + target_ulong new_value); +typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno, + target_ulong *ret_value, + target_ulong new_value, + target_ulong write_mask); typedef struct { const char *name; diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h index caf4599207..52640e6856 100644 --- a/target/riscv/cpu_bits.h +++ b/target/riscv/cpu_bits.h @@ -153,12 +153,6 @@ /* 32-bit only */ #define CSR_MSTATUSH 0x310 -/* Legacy Counter Setup (priv v1.9.1) */ -/* Update to #define CSR_MCOUNTINHIBIT 0x320 for 1.11.0 */ -#define CSR_MUCOUNTEREN 0x320 -#define CSR_MSCOUNTEREN 0x321 -#define CSR_MHCOUNTEREN 0x322 - /* Machine Trap Handling */ #define CSR_MSCRATCH 0x340 #define CSR_MEPC 0x341 @@ -166,9 +160,6 @@ #define CSR_MTVAL 0x343 #define CSR_MIP 0x344 -/* Legacy Machine Trap Handling (priv v1.9.1) */ -#define CSR_MBADADDR 0x343 - /* Supervisor Trap Setup */ #define CSR_SSTATUS 0x100 #define CSR_SEDELEG 0x102 @@ -184,9 +175,6 @@ #define CSR_STVAL 0x143 #define CSR_SIP 0x144 -/* Legacy Supervisor Trap Handling (priv v1.9.1) */ -#define CSR_SBADADDR 0x143 - /* Supervisor Protection and Translation */ #define CSR_SPTBR 0x180 #define CSR_SATP 0x180 @@ -207,17 +195,6 @@ #define CSR_HTIMEDELTA 0x605 #define CSR_HTIMEDELTAH 0x615 -#if defined(TARGET_RISCV32) -#define HGATP_MODE SATP32_MODE -#define HGATP_VMID SATP32_ASID -#define HGATP_PPN SATP32_PPN -#endif -#if defined(TARGET_RISCV64) -#define HGATP_MODE SATP64_MODE -#define HGATP_VMID SATP64_ASID -#define HGATP_PPN SATP64_PPN -#endif - /* Virtual CSRs */ #define CSR_VSSTATUS 0x200 #define CSR_VSIE 0x204 @@ -232,6 +209,9 @@ #define CSR_MTINST 0x34a #define CSR_MTVAL2 0x34b +/* Enhanced Physical Memory Protection (ePMP) */ +#define CSR_MSECCFG 0x390 +#define CSR_MSECCFGH 0x391 /* Physical Memory Protection */ #define CSR_PMPCFG0 0x3a0 #define CSR_PMPCFG1 0x3a1 @@ -354,14 +334,6 @@ #define CSR_MHPMCOUNTER30H 0xb9e #define CSR_MHPMCOUNTER31H 0xb9f -/* Legacy Machine Protection and Translation (priv v1.9.1) */ -#define CSR_MBASE 0x380 -#define CSR_MBOUND 0x381 -#define CSR_MIBASE 0x382 -#define CSR_MIBOUND 0x383 -#define CSR_MDBASE 0x384 -#define CSR_MDBOUND 0x385 - /* mstatus CSR bits */ #define MSTATUS_UIE 0x00000001 #define MSTATUS_SIE 0x00000002 @@ -375,10 +347,8 @@ #define MSTATUS_FS 0x00006000 #define MSTATUS_XS 0x00018000 #define MSTATUS_MPRV 0x00020000 -#define MSTATUS_PUM 0x00040000 /* until: priv-1.9.1 */ #define MSTATUS_SUM 0x00040000 /* since: priv-1.10 */ #define MSTATUS_MXR 0x00080000 -#define MSTATUS_VM 0x1F000000 /* until: priv-1.9.1 */ #define MSTATUS_TVM 0x00100000 /* since: priv-1.10 */ #define MSTATUS_TW 0x00200000 /* since: priv-1.10 */ #define MSTATUS_TSR 0x00400000 /* since: priv-1.10 */ @@ -398,16 +368,6 @@ #define MXL_RV64 2 #define MXL_RV128 3 -#if defined(TARGET_RISCV32) -#define MSTATUS_SD MSTATUS32_SD -#define MISA_MXL MISA32_MXL -#define MXL_VAL MXL_RV32 -#elif defined(TARGET_RISCV64) -#define MSTATUS_SD MSTATUS64_SD -#define MISA_MXL MISA64_MXL -#define MXL_VAL MXL_RV64 -#endif - /* sstatus CSR bits */ #define SSTATUS_UIE 0x00000001 #define SSTATUS_SIE 0x00000002 @@ -416,19 +376,12 @@ #define SSTATUS_SPP 0x00000100 #define SSTATUS_FS 0x00006000 #define SSTATUS_XS 0x00018000 -#define SSTATUS_PUM 0x00040000 /* until: priv-1.9.1 */ #define SSTATUS_SUM 0x00040000 /* since: priv-1.10 */ #define SSTATUS_MXR 0x00080000 #define SSTATUS32_SD 0x80000000 #define SSTATUS64_SD 0x8000000000000000ULL -#if defined(TARGET_RISCV32) -#define SSTATUS_SD SSTATUS32_SD -#elif defined(TARGET_RISCV64) -#define SSTATUS_SD SSTATUS64_SD -#endif - /* hstatus CSR bits */ #define HSTATUS_VSBE 0x00000020 #define HSTATUS_GVA 0x00000040 @@ -443,12 +396,6 @@ #define HSTATUS32_WPRI 0xFF8FF87E #define HSTATUS64_WPRI 0xFFFFFFFFFF8FF87EULL -#if defined(TARGET_RISCV32) -#define HSTATUS_WPRI HSTATUS32_WPRI -#elif defined(TARGET_RISCV64) -#define HSTATUS_WPRI HSTATUS64_WPRI -#endif - #define HCOUNTEREN_CY (1 << 0) #define HCOUNTEREN_TM (1 << 1) #define HCOUNTEREN_IR (1 << 2) @@ -479,17 +426,6 @@ #define SATP64_ASID 0x0FFFF00000000000ULL #define SATP64_PPN 0x00000FFFFFFFFFFFULL -#if defined(TARGET_RISCV32) -#define SATP_MODE SATP32_MODE -#define SATP_ASID SATP32_ASID -#define SATP_PPN SATP32_PPN -#endif -#if defined(TARGET_RISCV64) -#define SATP_MODE SATP64_MODE -#define SATP_ASID SATP64_ASID -#define SATP_PPN SATP64_PPN -#endif - /* VM modes (mstatus.vm) privileged ISA 1.9.1 */ #define VM_1_09_MBARE 0 #define VM_1_09_MBB 1 @@ -527,27 +463,29 @@ #define DEFAULT_RSTVEC 0x1000 /* Exception causes */ -#define EXCP_NONE -1 /* sentinel value */ -#define RISCV_EXCP_INST_ADDR_MIS 0x0 -#define RISCV_EXCP_INST_ACCESS_FAULT 0x1 -#define RISCV_EXCP_ILLEGAL_INST 0x2 -#define RISCV_EXCP_BREAKPOINT 0x3 -#define RISCV_EXCP_LOAD_ADDR_MIS 0x4 -#define RISCV_EXCP_LOAD_ACCESS_FAULT 0x5 -#define RISCV_EXCP_STORE_AMO_ADDR_MIS 0x6 -#define RISCV_EXCP_STORE_AMO_ACCESS_FAULT 0x7 -#define RISCV_EXCP_U_ECALL 0x8 -#define RISCV_EXCP_S_ECALL 0x9 -#define RISCV_EXCP_VS_ECALL 0xa -#define RISCV_EXCP_M_ECALL 0xb -#define RISCV_EXCP_INST_PAGE_FAULT 0xc /* since: priv-1.10.0 */ -#define RISCV_EXCP_LOAD_PAGE_FAULT 0xd /* since: priv-1.10.0 */ -#define RISCV_EXCP_STORE_PAGE_FAULT 0xf /* since: priv-1.10.0 */ -#define RISCV_EXCP_SEMIHOST 0x10 -#define RISCV_EXCP_INST_GUEST_PAGE_FAULT 0x14 -#define RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT 0x15 -#define RISCV_EXCP_VIRT_INSTRUCTION_FAULT 0x16 -#define RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT 0x17 +typedef enum RISCVException { + RISCV_EXCP_NONE = -1, /* sentinel value */ + RISCV_EXCP_INST_ADDR_MIS = 0x0, + RISCV_EXCP_INST_ACCESS_FAULT = 0x1, + RISCV_EXCP_ILLEGAL_INST = 0x2, + RISCV_EXCP_BREAKPOINT = 0x3, + RISCV_EXCP_LOAD_ADDR_MIS = 0x4, + RISCV_EXCP_LOAD_ACCESS_FAULT = 0x5, + RISCV_EXCP_STORE_AMO_ADDR_MIS = 0x6, + RISCV_EXCP_STORE_AMO_ACCESS_FAULT = 0x7, + RISCV_EXCP_U_ECALL = 0x8, + RISCV_EXCP_S_ECALL = 0x9, + RISCV_EXCP_VS_ECALL = 0xa, + RISCV_EXCP_M_ECALL = 0xb, + RISCV_EXCP_INST_PAGE_FAULT = 0xc, /* since: priv-1.10.0 */ + RISCV_EXCP_LOAD_PAGE_FAULT = 0xd, /* since: priv-1.10.0 */ + RISCV_EXCP_STORE_PAGE_FAULT = 0xf, /* since: priv-1.10.0 */ + RISCV_EXCP_SEMIHOST = 0x10, + RISCV_EXCP_INST_GUEST_PAGE_FAULT = 0x14, + RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT = 0x15, + RISCV_EXCP_VIRT_INSTRUCTION_FAULT = 0x16, + RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT = 0x17, +} RISCVException; #define RISCV_EXCP_INT_FLAG 0x80000000 #define RISCV_EXCP_INT_MASK 0x7fffffff diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c index 21c54ef561..968cb8046f 100644 --- a/target/riscv/cpu_helper.c +++ b/target/riscv/cpu_helper.c @@ -72,7 +72,7 @@ static int riscv_cpu_local_irq_pending(CPURISCVState *env) if (irqs) { return ctz64(irqs); /* since non-zero */ } else { - return EXCP_NONE; /* indicates no pending interrupt */ + return RISCV_EXCP_NONE; /* indicates no pending interrupt */ } } #endif @@ -136,8 +136,8 @@ void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env) env->vscause = env->scause; env->scause = env->scause_hs; - env->vstval = env->sbadaddr; - env->sbadaddr = env->stval_hs; + env->vstval = env->stval; + env->stval = env->stval_hs; env->vsatp = env->satp; env->satp = env->satp_hs; @@ -159,8 +159,8 @@ void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env) env->scause_hs = env->scause; env->scause = env->vscause; - env->stval_hs = env->sbadaddr; - env->sbadaddr = env->vstval; + env->stval_hs = env->stval; + env->stval = env->vstval; env->satp_hs = env->satp; env->satp = env->vsatp; @@ -342,12 +342,14 @@ static int get_physical_address_pmp(CPURISCVState *env, int *prot, * @first_stage: Are we in first stage translation? * Second stage is used for hypervisor guest translation * @two_stage: Are we going to perform two stage translation + * @is_debug: Is this access from a debugger or the monitor? */ static int get_physical_address(CPURISCVState *env, hwaddr *physical, int *prot, target_ulong addr, target_ulong *fault_pte_addr, int access_type, int mmu_idx, - bool first_stage, bool two_stage) + bool first_stage, bool two_stage, + bool is_debug) { /* NOTE: the env->pc value visible here will not be * correct, but the value visible to the exception handler @@ -403,20 +405,35 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, if (first_stage == true) { if (use_background) { - base = (hwaddr)get_field(env->vsatp, SATP_PPN) << PGSHIFT; - vm = get_field(env->vsatp, SATP_MODE); + if (riscv_cpu_is_32bit(env)) { + base = (hwaddr)get_field(env->vsatp, SATP32_PPN) << PGSHIFT; + vm = get_field(env->vsatp, SATP32_MODE); + } else { + base = (hwaddr)get_field(env->vsatp, SATP64_PPN) << PGSHIFT; + vm = get_field(env->vsatp, SATP64_MODE); + } } else { - base = (hwaddr)get_field(env->satp, SATP_PPN) << PGSHIFT; - vm = get_field(env->satp, SATP_MODE); + if (riscv_cpu_is_32bit(env)) { + base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT; + vm = get_field(env->satp, SATP32_MODE); + } else { + base = (hwaddr)get_field(env->satp, SATP64_PPN) << PGSHIFT; + vm = get_field(env->satp, SATP64_MODE); + } } widened = 0; } else { - base = (hwaddr)get_field(env->hgatp, HGATP_PPN) << PGSHIFT; - vm = get_field(env->hgatp, HGATP_MODE); + if (riscv_cpu_is_32bit(env)) { + base = (hwaddr)get_field(env->hgatp, SATP32_PPN) << PGSHIFT; + vm = get_field(env->hgatp, SATP32_MODE); + } else { + base = (hwaddr)get_field(env->hgatp, SATP64_PPN) << PGSHIFT; + vm = get_field(env->hgatp, SATP64_MODE); + } widened = 2; } /* status.SUM will be ignored if execute on background */ - sum = get_field(env->mstatus, MSTATUS_SUM) || use_background; + sum = get_field(env->mstatus, MSTATUS_SUM) || use_background || is_debug; switch (vm) { case VM_1_10_SV32: levels = 2; ptidxbits = 10; ptesize = 4; break; @@ -475,7 +492,8 @@ restart: /* Do the second stage translation on the base PTE address. */ int vbase_ret = get_physical_address(env, &vbase, &vbase_prot, base, NULL, MMU_DATA_LOAD, - mmu_idx, false, true); + mmu_idx, false, true, + is_debug); if (vbase_ret != TRANSLATE_SUCCESS) { if (fault_pte_addr) { @@ -615,16 +633,23 @@ static void raise_mmu_exception(CPURISCVState *env, target_ulong address, bool first_stage, bool two_stage) { CPUState *cs = env_cpu(env); - int page_fault_exceptions; + int page_fault_exceptions, vm; + uint64_t stap_mode; + + if (riscv_cpu_is_32bit(env)) { + stap_mode = SATP32_MODE; + } else { + stap_mode = SATP64_MODE; + } + if (first_stage) { - page_fault_exceptions = - get_field(env->satp, SATP_MODE) != VM_1_10_MBARE && - !pmp_violation; + vm = get_field(env->satp, stap_mode); } else { - page_fault_exceptions = - get_field(env->hgatp, HGATP_MODE) != VM_1_10_MBARE && - !pmp_violation; + vm = get_field(env->hgatp, stap_mode); } + + page_fault_exceptions = vm != VM_1_10_MBARE && !pmp_violation; + switch (access_type) { case MMU_INST_FETCH: if (riscv_cpu_virt_enabled(env) && !first_stage) { @@ -666,13 +691,13 @@ hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) int mmu_idx = cpu_mmu_index(&cpu->env, false); if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx, - true, riscv_cpu_virt_enabled(env))) { + true, riscv_cpu_virt_enabled(env), true)) { return -1; } if (riscv_cpu_virt_enabled(env)) { if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL, - 0, mmu_idx, false, true)) { + 0, mmu_idx, false, true, true)) { return -1; } } @@ -691,8 +716,10 @@ void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, if (access_type == MMU_DATA_STORE) { cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT; - } else { + } else if (access_type == MMU_DATA_LOAD) { cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT; + } else { + cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT; } env->badaddr = addr; @@ -768,7 +795,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, /* Two stage lookup */ ret = get_physical_address(env, &pa, &prot, address, &env->guest_phys_fault_addr, access_type, - mmu_idx, true, true); + mmu_idx, true, true, false); /* * A G-stage exception may be triggered during two state lookup. @@ -790,7 +817,8 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, im_address = pa; ret = get_physical_address(env, &pa, &prot2, im_address, NULL, - access_type, mmu_idx, false, true); + access_type, mmu_idx, false, true, + false); qemu_log_mask(CPU_LOG_MMU, "%s 2nd-stage address=%" VADDR_PRIx " ret %d physical " @@ -825,7 +853,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, } else { /* Single stage lookup */ ret = get_physical_address(env, &pa, &prot, address, NULL, - access_type, mmu_idx, true, false); + access_type, mmu_idx, true, false, false); qemu_log_mask(CPU_LOG_MMU, "%s address=%" VADDR_PRIx " ret %d physical " @@ -1023,7 +1051,7 @@ void riscv_cpu_do_interrupt(CPUState *cs) env->mstatus = s; env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1)); env->sepc = env->pc; - env->sbadaddr = tval; + env->stval = tval; env->htval = htval; env->pc = (env->stvec >> 2 << 2) + ((async && (env->stvec & 3) == 1) ? cause * 4 : 0); @@ -1054,7 +1082,7 @@ void riscv_cpu_do_interrupt(CPUState *cs) env->mstatus = s; env->mcause = cause | ~(((target_ulong)-1) >> async); env->mepc = env->pc; - env->mbadaddr = tval; + env->mtval = tval; env->mtval2 = mtval2; env->pc = (env->mtvec >> 2 << 2) + ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0); @@ -1069,5 +1097,5 @@ void riscv_cpu_do_interrupt(CPUState *cs) env->two_stage_lookup = false; #endif - cs->exception_index = EXCP_NONE; /* mark handled to qemu */ + cs->exception_index = RISCV_EXCP_NONE; /* mark handled to qemu */ } diff --git a/target/riscv/csr.c b/target/riscv/csr.c index d2585395bf..fe5628fea6 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -35,29 +35,29 @@ void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops) } /* Predicates */ -static int fs(CPURISCVState *env, int csrno) +static RISCVException fs(CPURISCVState *env, int csrno) { #if !defined(CONFIG_USER_ONLY) /* loose check condition for fcsr in vector extension */ if ((csrno == CSR_FCSR) && (env->misa & RVV)) { - return 0; + return RISCV_EXCP_NONE; } if (!env->debugger && !riscv_cpu_fp_enabled(env)) { - return -RISCV_EXCP_ILLEGAL_INST; + return RISCV_EXCP_ILLEGAL_INST; } #endif - return 0; + return RISCV_EXCP_NONE; } -static int vs(CPURISCVState *env, int csrno) +static RISCVException vs(CPURISCVState *env, int csrno) { if (env->misa & RVV) { - return 0; + return RISCV_EXCP_NONE; } - return -RISCV_EXCP_ILLEGAL_INST; + return RISCV_EXCP_ILLEGAL_INST; } -static int ctr(CPURISCVState *env, int csrno) +static RISCVException ctr(CPURISCVState *env, int csrno) { #if !defined(CONFIG_USER_ONLY) CPUState *cs = env_cpu(env); @@ -65,7 +65,7 @@ static int ctr(CPURISCVState *env, int csrno) if (!cpu->cfg.ext_counters) { /* The Counters extensions is not enabled */ - return -RISCV_EXCP_ILLEGAL_INST; + return RISCV_EXCP_ILLEGAL_INST; } if (riscv_cpu_virt_enabled(env)) { @@ -73,25 +73,25 @@ static int ctr(CPURISCVState *env, int csrno) case CSR_CYCLE: if (!get_field(env->hcounteren, HCOUNTEREN_CY) && get_field(env->mcounteren, HCOUNTEREN_CY)) { - return -RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; } break; case CSR_TIME: if (!get_field(env->hcounteren, HCOUNTEREN_TM) && get_field(env->mcounteren, HCOUNTEREN_TM)) { - return -RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; } break; case CSR_INSTRET: if (!get_field(env->hcounteren, HCOUNTEREN_IR) && get_field(env->mcounteren, HCOUNTEREN_IR)) { - return -RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; } break; case CSR_HPMCOUNTER3...CSR_HPMCOUNTER31: if (!get_field(env->hcounteren, 1 << (csrno - CSR_HPMCOUNTER3)) && get_field(env->mcounteren, 1 << (csrno - CSR_HPMCOUNTER3))) { - return -RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; } break; } @@ -100,148 +100,174 @@ static int ctr(CPURISCVState *env, int csrno) case CSR_CYCLEH: if (!get_field(env->hcounteren, HCOUNTEREN_CY) && get_field(env->mcounteren, HCOUNTEREN_CY)) { - return -RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; } break; case CSR_TIMEH: if (!get_field(env->hcounteren, HCOUNTEREN_TM) && get_field(env->mcounteren, HCOUNTEREN_TM)) { - return -RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; } break; case CSR_INSTRETH: if (!get_field(env->hcounteren, HCOUNTEREN_IR) && get_field(env->mcounteren, HCOUNTEREN_IR)) { - return -RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; } break; case CSR_HPMCOUNTER3H...CSR_HPMCOUNTER31H: if (!get_field(env->hcounteren, 1 << (csrno - CSR_HPMCOUNTER3H)) && get_field(env->mcounteren, 1 << (csrno - CSR_HPMCOUNTER3H))) { - return -RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; } break; } } } #endif - return 0; + return RISCV_EXCP_NONE; } -static int ctr32(CPURISCVState *env, int csrno) +static RISCVException ctr32(CPURISCVState *env, int csrno) { if (!riscv_cpu_is_32bit(env)) { - return -RISCV_EXCP_ILLEGAL_INST; + return RISCV_EXCP_ILLEGAL_INST; } return ctr(env, csrno); } #if !defined(CONFIG_USER_ONLY) -static int any(CPURISCVState *env, int csrno) +static RISCVException any(CPURISCVState *env, int csrno) { - return 0; + return RISCV_EXCP_NONE; } -static int any32(CPURISCVState *env, int csrno) +static RISCVException any32(CPURISCVState *env, int csrno) { if (!riscv_cpu_is_32bit(env)) { - return -RISCV_EXCP_ILLEGAL_INST; + return RISCV_EXCP_ILLEGAL_INST; } return any(env, csrno); } -static int smode(CPURISCVState *env, int csrno) +static RISCVException smode(CPURISCVState *env, int csrno) { - return -!riscv_has_ext(env, RVS); + if (riscv_has_ext(env, RVS)) { + return RISCV_EXCP_NONE; + } + + return RISCV_EXCP_ILLEGAL_INST; } -static int hmode(CPURISCVState *env, int csrno) +static RISCVException hmode(CPURISCVState *env, int csrno) { if (riscv_has_ext(env, RVS) && riscv_has_ext(env, RVH)) { /* Hypervisor extension is supported */ if ((env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) || env->priv == PRV_M) { - return 0; + return RISCV_EXCP_NONE; } else { - return -RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; } } - return -RISCV_EXCP_ILLEGAL_INST; + return RISCV_EXCP_ILLEGAL_INST; } -static int hmode32(CPURISCVState *env, int csrno) +static RISCVException hmode32(CPURISCVState *env, int csrno) { if (!riscv_cpu_is_32bit(env)) { - return 0; + if (riscv_cpu_virt_enabled(env)) { + return RISCV_EXCP_ILLEGAL_INST; + } else { + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + } } return hmode(env, csrno); } -static int pmp(CPURISCVState *env, int csrno) +static RISCVException pmp(CPURISCVState *env, int csrno) +{ + if (riscv_feature(env, RISCV_FEATURE_PMP)) { + return RISCV_EXCP_NONE; + } + + return RISCV_EXCP_ILLEGAL_INST; +} + +static RISCVException epmp(CPURISCVState *env, int csrno) { - return -!riscv_feature(env, RISCV_FEATURE_PMP); + if (env->priv == PRV_M && riscv_feature(env, RISCV_FEATURE_EPMP)) { + return RISCV_EXCP_NONE; + } + + return RISCV_EXCP_ILLEGAL_INST; } #endif /* User Floating-Point CSRs */ -static int read_fflags(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_fflags(CPURISCVState *env, int csrno, + target_ulong *val) { #if !defined(CONFIG_USER_ONLY) if (!env->debugger && !riscv_cpu_fp_enabled(env)) { - return -RISCV_EXCP_ILLEGAL_INST; + return RISCV_EXCP_ILLEGAL_INST; } #endif *val = riscv_cpu_get_fflags(env); - return 0; + return RISCV_EXCP_NONE; } -static int write_fflags(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_fflags(CPURISCVState *env, int csrno, + target_ulong val) { #if !defined(CONFIG_USER_ONLY) if (!env->debugger && !riscv_cpu_fp_enabled(env)) { - return -RISCV_EXCP_ILLEGAL_INST; + return RISCV_EXCP_ILLEGAL_INST; } env->mstatus |= MSTATUS_FS; #endif riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT)); - return 0; + return RISCV_EXCP_NONE; } -static int read_frm(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_frm(CPURISCVState *env, int csrno, + target_ulong *val) { #if !defined(CONFIG_USER_ONLY) if (!env->debugger && !riscv_cpu_fp_enabled(env)) { - return -RISCV_EXCP_ILLEGAL_INST; + return RISCV_EXCP_ILLEGAL_INST; } #endif *val = env->frm; - return 0; + return RISCV_EXCP_NONE; } -static int write_frm(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_frm(CPURISCVState *env, int csrno, + target_ulong val) { #if !defined(CONFIG_USER_ONLY) if (!env->debugger && !riscv_cpu_fp_enabled(env)) { - return -RISCV_EXCP_ILLEGAL_INST; + return RISCV_EXCP_ILLEGAL_INST; } env->mstatus |= MSTATUS_FS; #endif env->frm = val & (FSR_RD >> FSR_RD_SHIFT); - return 0; + return RISCV_EXCP_NONE; } -static int read_fcsr(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_fcsr(CPURISCVState *env, int csrno, + target_ulong *val) { #if !defined(CONFIG_USER_ONLY) if (!env->debugger && !riscv_cpu_fp_enabled(env)) { - return -RISCV_EXCP_ILLEGAL_INST; + return RISCV_EXCP_ILLEGAL_INST; } #endif *val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT) @@ -250,14 +276,15 @@ static int read_fcsr(CPURISCVState *env, int csrno, target_ulong *val) *val |= (env->vxrm << FSR_VXRM_SHIFT) | (env->vxsat << FSR_VXSAT_SHIFT); } - return 0; + return RISCV_EXCP_NONE; } -static int write_fcsr(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_fcsr(CPURISCVState *env, int csrno, + target_ulong val) { #if !defined(CONFIG_USER_ONLY) if (!env->debugger && !riscv_cpu_fp_enabled(env)) { - return -RISCV_EXCP_ILLEGAL_INST; + return RISCV_EXCP_ILLEGAL_INST; } env->mstatus |= MSTATUS_FS; #endif @@ -267,59 +294,68 @@ static int write_fcsr(CPURISCVState *env, int csrno, target_ulong val) env->vxsat = (val & FSR_VXSAT) >> FSR_VXSAT_SHIFT; } riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT); - return 0; + return RISCV_EXCP_NONE; } -static int read_vtype(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_vtype(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->vtype; - return 0; + return RISCV_EXCP_NONE; } -static int read_vl(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_vl(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->vl; - return 0; + return RISCV_EXCP_NONE; } -static int read_vxrm(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_vxrm(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->vxrm; - return 0; + return RISCV_EXCP_NONE; } -static int write_vxrm(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_vxrm(CPURISCVState *env, int csrno, + target_ulong val) { env->vxrm = val; - return 0; + return RISCV_EXCP_NONE; } -static int read_vxsat(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_vxsat(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->vxsat; - return 0; + return RISCV_EXCP_NONE; } -static int write_vxsat(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_vxsat(CPURISCVState *env, int csrno, + target_ulong val) { env->vxsat = val; - return 0; + return RISCV_EXCP_NONE; } -static int read_vstart(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_vstart(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->vstart; - return 0; + return RISCV_EXCP_NONE; } -static int write_vstart(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_vstart(CPURISCVState *env, int csrno, + target_ulong val) { env->vstart = val; - return 0; + return RISCV_EXCP_NONE; } /* User Timers and Counters */ -static int read_instret(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_instret(CPURISCVState *env, int csrno, + target_ulong *val) { #if !defined(CONFIG_USER_ONLY) if (icount_enabled()) { @@ -330,10 +366,11 @@ static int read_instret(CPURISCVState *env, int csrno, target_ulong *val) #else *val = cpu_get_host_ticks(); #endif - return 0; + return RISCV_EXCP_NONE; } -static int read_instreth(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_instreth(CPURISCVState *env, int csrno, + target_ulong *val) { #if !defined(CONFIG_USER_ONLY) if (icount_enabled()) { @@ -344,46 +381,50 @@ static int read_instreth(CPURISCVState *env, int csrno, target_ulong *val) #else *val = cpu_get_host_ticks() >> 32; #endif - return 0; + return RISCV_EXCP_NONE; } #if defined(CONFIG_USER_ONLY) -static int read_time(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_time(CPURISCVState *env, int csrno, + target_ulong *val) { *val = cpu_get_host_ticks(); - return 0; + return RISCV_EXCP_NONE; } -static int read_timeh(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_timeh(CPURISCVState *env, int csrno, + target_ulong *val) { *val = cpu_get_host_ticks() >> 32; - return 0; + return RISCV_EXCP_NONE; } #else /* CONFIG_USER_ONLY */ -static int read_time(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_time(CPURISCVState *env, int csrno, + target_ulong *val) { uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0; if (!env->rdtime_fn) { - return -RISCV_EXCP_ILLEGAL_INST; + return RISCV_EXCP_ILLEGAL_INST; } *val = env->rdtime_fn(env->rdtime_fn_arg) + delta; - return 0; + return RISCV_EXCP_NONE; } -static int read_timeh(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_timeh(CPURISCVState *env, int csrno, + target_ulong *val) { uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0; if (!env->rdtime_fn) { - return -RISCV_EXCP_ILLEGAL_INST; + return RISCV_EXCP_ILLEGAL_INST; } *val = (env->rdtime_fn(env->rdtime_fn_arg) + delta) >> 32; - return 0; + return RISCV_EXCP_NONE; } /* Machine constants */ @@ -418,7 +459,7 @@ static const target_ulong delegable_excps = (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)); static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE | SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS | - SSTATUS_SUM | SSTATUS_MXR | SSTATUS_SD; + SSTATUS_SUM | SSTATUS_MXR; static const target_ulong sip_writable_mask = SIP_SSIP | MIP_USIP | MIP_UEIP; static const target_ulong hip_writable_mask = MIP_VSSIP; static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP; @@ -437,22 +478,26 @@ static const char valid_vm_1_10_64[16] = { }; /* Machine Information Registers */ -static int read_zero(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_zero(CPURISCVState *env, int csrno, + target_ulong *val) { - return *val = 0; + *val = 0; + return RISCV_EXCP_NONE; } -static int read_mhartid(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_mhartid(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->mhartid; - return 0; + return RISCV_EXCP_NONE; } /* Machine Trap Setup */ -static int read_mstatus(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_mstatus(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->mstatus; - return 0; + return RISCV_EXCP_NONE; } static int validate_vm(CPURISCVState *env, target_ulong vm) @@ -464,7 +509,8 @@ static int validate_vm(CPURISCVState *env, target_ulong vm) } } -static int write_mstatus(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mstatus(CPURISCVState *env, int csrno, + target_ulong val) { uint64_t mstatus = env->mstatus; uint64_t mask = 0; @@ -492,19 +538,25 @@ static int write_mstatus(CPURISCVState *env, int csrno, target_ulong val) dirty = ((mstatus & MSTATUS_FS) == MSTATUS_FS) | ((mstatus & MSTATUS_XS) == MSTATUS_XS); - mstatus = set_field(mstatus, MSTATUS_SD, dirty); + if (riscv_cpu_is_32bit(env)) { + mstatus = set_field(mstatus, MSTATUS32_SD, dirty); + } else { + mstatus = set_field(mstatus, MSTATUS64_SD, dirty); + } env->mstatus = mstatus; - return 0; + return RISCV_EXCP_NONE; } -static int read_mstatush(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_mstatush(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->mstatus >> 32; - return 0; + return RISCV_EXCP_NONE; } -static int write_mstatush(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mstatush(CPURISCVState *env, int csrno, + target_ulong val) { uint64_t valh = (uint64_t)val << 32; uint64_t mask = MSTATUS_MPV | MSTATUS_GVA; @@ -515,26 +567,28 @@ static int write_mstatush(CPURISCVState *env, int csrno, target_ulong val) env->mstatus = (env->mstatus & ~mask) | (valh & mask); - return 0; + return RISCV_EXCP_NONE; } -static int read_misa(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_misa(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->misa; - return 0; + return RISCV_EXCP_NONE; } -static int write_misa(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_misa(CPURISCVState *env, int csrno, + target_ulong val) { if (!riscv_feature(env, RISCV_FEATURE_MISA)) { /* drop write to misa */ - return 0; + return RISCV_EXCP_NONE; } /* 'I' or 'E' must be present */ if (!(val & (RVI | RVE))) { /* It is not, drop write to misa */ - return 0; + return RISCV_EXCP_NONE; } /* 'E' excludes all other extensions */ @@ -542,7 +596,7 @@ static int write_misa(CPURISCVState *env, int csrno, target_ulong val) /* when we support 'E' we can do "val = RVE;" however * for now we just drop writes if 'E' is present. */ - return 0; + return RISCV_EXCP_NONE; } /* Mask extensions that are not supported by this hart */ @@ -564,7 +618,11 @@ static int write_misa(CPURISCVState *env, int csrno, target_ulong val) } /* misa.MXL writes are not supported by QEMU */ - val = (env->misa & MISA_MXL) | (val & ~MISA_MXL); + if (riscv_cpu_is_32bit(env)) { + val = (env->misa & MISA32_MXL) | (val & ~MISA32_MXL); + } else { + val = (env->misa & MISA64_MXL) | (val & ~MISA64_MXL); + } /* flush translation cache */ if (val != env->misa) { @@ -573,55 +631,63 @@ static int write_misa(CPURISCVState *env, int csrno, target_ulong val) env->misa = val; - return 0; + return RISCV_EXCP_NONE; } -static int read_medeleg(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_medeleg(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->medeleg; - return 0; + return RISCV_EXCP_NONE; } -static int write_medeleg(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_medeleg(CPURISCVState *env, int csrno, + target_ulong val) { env->medeleg = (env->medeleg & ~delegable_excps) | (val & delegable_excps); - return 0; + return RISCV_EXCP_NONE; } -static int read_mideleg(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_mideleg(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->mideleg; - return 0; + return RISCV_EXCP_NONE; } -static int write_mideleg(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mideleg(CPURISCVState *env, int csrno, + target_ulong val) { env->mideleg = (env->mideleg & ~delegable_ints) | (val & delegable_ints); if (riscv_has_ext(env, RVH)) { env->mideleg |= VS_MODE_INTERRUPTS; } - return 0; + return RISCV_EXCP_NONE; } -static int read_mie(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_mie(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->mie; - return 0; + return RISCV_EXCP_NONE; } -static int write_mie(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mie(CPURISCVState *env, int csrno, + target_ulong val) { env->mie = (env->mie & ~all_ints) | (val & all_ints); - return 0; + return RISCV_EXCP_NONE; } -static int read_mtvec(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_mtvec(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->mtvec; - return 0; + return RISCV_EXCP_NONE; } -static int write_mtvec(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mtvec(CPURISCVState *env, int csrno, + target_ulong val) { /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */ if ((val & 3) < 2) { @@ -629,92 +695,83 @@ static int write_mtvec(CPURISCVState *env, int csrno, target_ulong val) } else { qemu_log_mask(LOG_UNIMP, "CSR_MTVEC: reserved mode not supported\n"); } - return 0; -} - -static int read_mcounteren(CPURISCVState *env, int csrno, target_ulong *val) -{ - *val = env->mcounteren; - return 0; -} - -static int write_mcounteren(CPURISCVState *env, int csrno, target_ulong val) -{ - env->mcounteren = val; - return 0; + return RISCV_EXCP_NONE; } -/* This regiser is replaced with CSR_MCOUNTINHIBIT in 1.11.0 */ -static int read_mscounteren(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_mcounteren(CPURISCVState *env, int csrno, + target_ulong *val) { - if (env->priv_ver < PRIV_VERSION_1_11_0) { - return -RISCV_EXCP_ILLEGAL_INST; - } *val = env->mcounteren; - return 0; + return RISCV_EXCP_NONE; } -/* This regiser is replaced with CSR_MCOUNTINHIBIT in 1.11.0 */ -static int write_mscounteren(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mcounteren(CPURISCVState *env, int csrno, + target_ulong val) { - if (env->priv_ver < PRIV_VERSION_1_11_0) { - return -RISCV_EXCP_ILLEGAL_INST; - } env->mcounteren = val; - return 0; + return RISCV_EXCP_NONE; } /* Machine Trap Handling */ -static int read_mscratch(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_mscratch(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->mscratch; - return 0; + return RISCV_EXCP_NONE; } -static int write_mscratch(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mscratch(CPURISCVState *env, int csrno, + target_ulong val) { env->mscratch = val; - return 0; + return RISCV_EXCP_NONE; } -static int read_mepc(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_mepc(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->mepc; - return 0; + return RISCV_EXCP_NONE; } -static int write_mepc(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mepc(CPURISCVState *env, int csrno, + target_ulong val) { env->mepc = val; - return 0; + return RISCV_EXCP_NONE; } -static int read_mcause(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_mcause(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->mcause; - return 0; + return RISCV_EXCP_NONE; } -static int write_mcause(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mcause(CPURISCVState *env, int csrno, + target_ulong val) { env->mcause = val; - return 0; + return RISCV_EXCP_NONE; } -static int read_mbadaddr(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_mtval(CPURISCVState *env, int csrno, + target_ulong *val) { - *val = env->mbadaddr; - return 0; + *val = env->mtval; + return RISCV_EXCP_NONE; } -static int write_mbadaddr(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mtval(CPURISCVState *env, int csrno, + target_ulong val) { - env->mbadaddr = val; - return 0; + env->mtval = val; + return RISCV_EXCP_NONE; } -static int rmw_mip(CPURISCVState *env, int csrno, target_ulong *ret_value, - target_ulong new_value, target_ulong write_mask) +static RISCVException rmw_mip(CPURISCVState *env, int csrno, + target_ulong *ret_value, + target_ulong new_value, target_ulong write_mask) { RISCVCPU *cpu = env_archcpu(env); /* Allow software control of delegable interrupts not claimed by hardware */ @@ -731,42 +788,54 @@ static int rmw_mip(CPURISCVState *env, int csrno, target_ulong *ret_value, *ret_value = old_mip; } - return 0; + return RISCV_EXCP_NONE; } /* Supervisor Trap Setup */ -static int read_sstatus(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_sstatus(CPURISCVState *env, int csrno, + target_ulong *val) { target_ulong mask = (sstatus_v1_10_mask); + + if (riscv_cpu_is_32bit(env)) { + mask |= SSTATUS32_SD; + } else { + mask |= SSTATUS64_SD; + } + *val = env->mstatus & mask; - return 0; + return RISCV_EXCP_NONE; } -static int write_sstatus(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_sstatus(CPURISCVState *env, int csrno, + target_ulong val) { target_ulong mask = (sstatus_v1_10_mask); target_ulong newval = (env->mstatus & ~mask) | (val & mask); return write_mstatus(env, CSR_MSTATUS, newval); } -static int read_vsie(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_vsie(CPURISCVState *env, int csrno, + target_ulong *val) { /* Shift the VS bits to their S bit location in vsie */ *val = (env->mie & env->hideleg & VS_MODE_INTERRUPTS) >> 1; - return 0; + return RISCV_EXCP_NONE; } -static int read_sie(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_sie(CPURISCVState *env, int csrno, + target_ulong *val) { if (riscv_cpu_virt_enabled(env)) { read_vsie(env, CSR_VSIE, val); } else { *val = env->mie & env->mideleg; } - return 0; + return RISCV_EXCP_NONE; } -static int write_vsie(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_vsie(CPURISCVState *env, int csrno, + target_ulong val) { /* Shift the S bits to their VS bit location in mie */ target_ulong newval = (env->mie & ~VS_MODE_INTERRUPTS) | @@ -784,16 +853,18 @@ static int write_sie(CPURISCVState *env, int csrno, target_ulong val) write_mie(env, CSR_MIE, newval); } - return 0; + return RISCV_EXCP_NONE; } -static int read_stvec(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_stvec(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->stvec; - return 0; + return RISCV_EXCP_NONE; } -static int write_stvec(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_stvec(CPURISCVState *env, int csrno, + target_ulong val) { /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */ if ((val & 3) < 2) { @@ -801,72 +872,83 @@ static int write_stvec(CPURISCVState *env, int csrno, target_ulong val) } else { qemu_log_mask(LOG_UNIMP, "CSR_STVEC: reserved mode not supported\n"); } - return 0; + return RISCV_EXCP_NONE; } -static int read_scounteren(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_scounteren(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->scounteren; - return 0; + return RISCV_EXCP_NONE; } -static int write_scounteren(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_scounteren(CPURISCVState *env, int csrno, + target_ulong val) { env->scounteren = val; - return 0; + return RISCV_EXCP_NONE; } /* Supervisor Trap Handling */ -static int read_sscratch(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_sscratch(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->sscratch; - return 0; + return RISCV_EXCP_NONE; } -static int write_sscratch(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_sscratch(CPURISCVState *env, int csrno, + target_ulong val) { env->sscratch = val; - return 0; + return RISCV_EXCP_NONE; } -static int read_sepc(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_sepc(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->sepc; - return 0; + return RISCV_EXCP_NONE; } -static int write_sepc(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_sepc(CPURISCVState *env, int csrno, + target_ulong val) { env->sepc = val; - return 0; + return RISCV_EXCP_NONE; } -static int read_scause(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_scause(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->scause; - return 0; + return RISCV_EXCP_NONE; } -static int write_scause(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_scause(CPURISCVState *env, int csrno, + target_ulong val) { env->scause = val; - return 0; + return RISCV_EXCP_NONE; } -static int read_sbadaddr(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_stval(CPURISCVState *env, int csrno, + target_ulong *val) { - *val = env->sbadaddr; - return 0; + *val = env->stval; + return RISCV_EXCP_NONE; } -static int write_sbadaddr(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_stval(CPURISCVState *env, int csrno, + target_ulong val) { - env->sbadaddr = val; - return 0; + env->stval = val; + return RISCV_EXCP_NONE; } -static int rmw_vsip(CPURISCVState *env, int csrno, target_ulong *ret_value, - target_ulong new_value, target_ulong write_mask) +static RISCVException rmw_vsip(CPURISCVState *env, int csrno, + target_ulong *ret_value, + target_ulong new_value, target_ulong write_mask) { /* Shift the S bits to their VS bit location in mip */ int ret = rmw_mip(env, 0, ret_value, new_value << 1, @@ -877,8 +959,9 @@ static int rmw_vsip(CPURISCVState *env, int csrno, target_ulong *ret_value, return ret; } -static int rmw_sip(CPURISCVState *env, int csrno, target_ulong *ret_value, - target_ulong new_value, target_ulong write_mask) +static RISCVException rmw_sip(CPURISCVState *env, int csrno, + target_ulong *ret_value, + target_ulong new_value, target_ulong write_mask) { int ret; @@ -894,44 +977,58 @@ static int rmw_sip(CPURISCVState *env, int csrno, target_ulong *ret_value, } /* Supervisor Protection and Translation */ -static int read_satp(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_satp(CPURISCVState *env, int csrno, + target_ulong *val) { if (!riscv_feature(env, RISCV_FEATURE_MMU)) { *val = 0; - return 0; + return RISCV_EXCP_NONE; } if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) { - return -RISCV_EXCP_ILLEGAL_INST; + return RISCV_EXCP_ILLEGAL_INST; } else { *val = env->satp; } - return 0; + return RISCV_EXCP_NONE; } -static int write_satp(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_satp(CPURISCVState *env, int csrno, + target_ulong val) { + int vm, mask, asid; + if (!riscv_feature(env, RISCV_FEATURE_MMU)) { - return 0; + return RISCV_EXCP_NONE; + } + + if (riscv_cpu_is_32bit(env)) { + vm = validate_vm(env, get_field(val, SATP32_MODE)); + mask = (val ^ env->satp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN); + asid = (val ^ env->satp) & SATP32_ASID; + } else { + vm = validate_vm(env, get_field(val, SATP64_MODE)); + mask = (val ^ env->satp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN); + asid = (val ^ env->satp) & SATP64_ASID; } - if (validate_vm(env, get_field(val, SATP_MODE)) && - ((val ^ env->satp) & (SATP_MODE | SATP_ASID | SATP_PPN))) - { + + if (vm && mask) { if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) { - return -RISCV_EXCP_ILLEGAL_INST; + return RISCV_EXCP_ILLEGAL_INST; } else { - if ((val ^ env->satp) & SATP_ASID) { + if (asid) { tlb_flush(env_cpu(env)); } env->satp = val; } } - return 0; + return RISCV_EXCP_NONE; } /* Hypervisor Extensions */ -static int read_hstatus(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_hstatus(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->hstatus; if (!riscv_cpu_is_32bit(env)) { @@ -940,10 +1037,11 @@ static int read_hstatus(CPURISCVState *env, int csrno, target_ulong *val) } /* We only support little endian */ *val = set_field(*val, HSTATUS_VSBE, 0); - return 0; + return RISCV_EXCP_NONE; } -static int write_hstatus(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_hstatus(CPURISCVState *env, int csrno, + target_ulong val) { env->hstatus = val; if (!riscv_cpu_is_32bit(env) && get_field(val, HSTATUS_VSXL) != 2) { @@ -952,35 +1050,40 @@ static int write_hstatus(CPURISCVState *env, int csrno, target_ulong val) if (get_field(val, HSTATUS_VSBE) != 0) { qemu_log_mask(LOG_UNIMP, "QEMU does not support big endian guests."); } - return 0; + return RISCV_EXCP_NONE; } -static int read_hedeleg(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_hedeleg(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->hedeleg; - return 0; + return RISCV_EXCP_NONE; } -static int write_hedeleg(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_hedeleg(CPURISCVState *env, int csrno, + target_ulong val) { env->hedeleg = val; - return 0; + return RISCV_EXCP_NONE; } -static int read_hideleg(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_hideleg(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->hideleg; - return 0; + return RISCV_EXCP_NONE; } -static int write_hideleg(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_hideleg(CPURISCVState *env, int csrno, + target_ulong val) { env->hideleg = val; - return 0; + return RISCV_EXCP_NONE; } -static int rmw_hvip(CPURISCVState *env, int csrno, target_ulong *ret_value, - target_ulong new_value, target_ulong write_mask) +static RISCVException rmw_hvip(CPURISCVState *env, int csrno, + target_ulong *ret_value, + target_ulong new_value, target_ulong write_mask) { int ret = rmw_mip(env, 0, ret_value, new_value, write_mask & hvip_writable_mask); @@ -990,8 +1093,9 @@ static int rmw_hvip(CPURISCVState *env, int csrno, target_ulong *ret_value, return ret; } -static int rmw_hip(CPURISCVState *env, int csrno, target_ulong *ret_value, - target_ulong new_value, target_ulong write_mask) +static RISCVException rmw_hip(CPURISCVState *env, int csrno, + target_ulong *ret_value, + target_ulong new_value, target_ulong write_mask) { int ret = rmw_mip(env, 0, ret_value, new_value, write_mask & hip_writable_mask); @@ -1001,103 +1105,119 @@ static int rmw_hip(CPURISCVState *env, int csrno, target_ulong *ret_value, return ret; } -static int read_hie(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_hie(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->mie & VS_MODE_INTERRUPTS; - return 0; + return RISCV_EXCP_NONE; } -static int write_hie(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_hie(CPURISCVState *env, int csrno, + target_ulong val) { target_ulong newval = (env->mie & ~VS_MODE_INTERRUPTS) | (val & VS_MODE_INTERRUPTS); return write_mie(env, CSR_MIE, newval); } -static int read_hcounteren(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_hcounteren(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->hcounteren; - return 0; + return RISCV_EXCP_NONE; } -static int write_hcounteren(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_hcounteren(CPURISCVState *env, int csrno, + target_ulong val) { env->hcounteren = val; - return 0; + return RISCV_EXCP_NONE; } -static int read_hgeie(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_hgeie(CPURISCVState *env, int csrno, + target_ulong *val) { qemu_log_mask(LOG_UNIMP, "No support for a non-zero GEILEN."); - return 0; + return RISCV_EXCP_NONE; } -static int write_hgeie(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_hgeie(CPURISCVState *env, int csrno, + target_ulong val) { qemu_log_mask(LOG_UNIMP, "No support for a non-zero GEILEN."); - return 0; + return RISCV_EXCP_NONE; } -static int read_htval(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_htval(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->htval; - return 0; + return RISCV_EXCP_NONE; } -static int write_htval(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_htval(CPURISCVState *env, int csrno, + target_ulong val) { env->htval = val; - return 0; + return RISCV_EXCP_NONE; } -static int read_htinst(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_htinst(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->htinst; - return 0; + return RISCV_EXCP_NONE; } -static int write_htinst(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_htinst(CPURISCVState *env, int csrno, + target_ulong val) { - return 0; + return RISCV_EXCP_NONE; } -static int read_hgeip(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_hgeip(CPURISCVState *env, int csrno, + target_ulong *val) { qemu_log_mask(LOG_UNIMP, "No support for a non-zero GEILEN."); - return 0; + return RISCV_EXCP_NONE; } -static int write_hgeip(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_hgeip(CPURISCVState *env, int csrno, + target_ulong val) { qemu_log_mask(LOG_UNIMP, "No support for a non-zero GEILEN."); - return 0; + return RISCV_EXCP_NONE; } -static int read_hgatp(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_hgatp(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->hgatp; - return 0; + return RISCV_EXCP_NONE; } -static int write_hgatp(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_hgatp(CPURISCVState *env, int csrno, + target_ulong val) { env->hgatp = val; - return 0; + return RISCV_EXCP_NONE; } -static int read_htimedelta(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_htimedelta(CPURISCVState *env, int csrno, + target_ulong *val) { if (!env->rdtime_fn) { - return -RISCV_EXCP_ILLEGAL_INST; + return RISCV_EXCP_ILLEGAL_INST; } *val = env->htimedelta; - return 0; + return RISCV_EXCP_NONE; } -static int write_htimedelta(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_htimedelta(CPURISCVState *env, int csrno, + target_ulong val) { if (!env->rdtime_fn) { - return -RISCV_EXCP_ILLEGAL_INST; + return RISCV_EXCP_ILLEGAL_INST; } if (riscv_cpu_is_32bit(env)) { @@ -1105,162 +1225,199 @@ static int write_htimedelta(CPURISCVState *env, int csrno, target_ulong val) } else { env->htimedelta = val; } - return 0; + return RISCV_EXCP_NONE; } -static int read_htimedeltah(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_htimedeltah(CPURISCVState *env, int csrno, + target_ulong *val) { if (!env->rdtime_fn) { - return -RISCV_EXCP_ILLEGAL_INST; + return RISCV_EXCP_ILLEGAL_INST; } *val = env->htimedelta >> 32; - return 0; + return RISCV_EXCP_NONE; } -static int write_htimedeltah(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_htimedeltah(CPURISCVState *env, int csrno, + target_ulong val) { if (!env->rdtime_fn) { - return -RISCV_EXCP_ILLEGAL_INST; + return RISCV_EXCP_ILLEGAL_INST; } env->htimedelta = deposit64(env->htimedelta, 32, 32, (uint64_t)val); - return 0; + return RISCV_EXCP_NONE; } /* Virtual CSR Registers */ -static int read_vsstatus(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_vsstatus(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->vsstatus; - return 0; + return RISCV_EXCP_NONE; } -static int write_vsstatus(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_vsstatus(CPURISCVState *env, int csrno, + target_ulong val) { uint64_t mask = (target_ulong)-1; env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val; - return 0; + return RISCV_EXCP_NONE; } static int read_vstvec(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->vstvec; - return 0; + return RISCV_EXCP_NONE; } -static int write_vstvec(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_vstvec(CPURISCVState *env, int csrno, + target_ulong val) { env->vstvec = val; - return 0; + return RISCV_EXCP_NONE; } -static int read_vsscratch(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_vsscratch(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->vsscratch; - return 0; + return RISCV_EXCP_NONE; } -static int write_vsscratch(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_vsscratch(CPURISCVState *env, int csrno, + target_ulong val) { env->vsscratch = val; - return 0; + return RISCV_EXCP_NONE; } -static int read_vsepc(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_vsepc(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->vsepc; - return 0; + return RISCV_EXCP_NONE; } -static int write_vsepc(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_vsepc(CPURISCVState *env, int csrno, + target_ulong val) { env->vsepc = val; - return 0; + return RISCV_EXCP_NONE; } -static int read_vscause(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_vscause(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->vscause; - return 0; + return RISCV_EXCP_NONE; } -static int write_vscause(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_vscause(CPURISCVState *env, int csrno, + target_ulong val) { env->vscause = val; - return 0; + return RISCV_EXCP_NONE; } -static int read_vstval(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_vstval(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->vstval; - return 0; + return RISCV_EXCP_NONE; } -static int write_vstval(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_vstval(CPURISCVState *env, int csrno, + target_ulong val) { env->vstval = val; - return 0; + return RISCV_EXCP_NONE; } -static int read_vsatp(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_vsatp(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->vsatp; - return 0; + return RISCV_EXCP_NONE; } -static int write_vsatp(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_vsatp(CPURISCVState *env, int csrno, + target_ulong val) { env->vsatp = val; - return 0; + return RISCV_EXCP_NONE; } -static int read_mtval2(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_mtval2(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->mtval2; - return 0; + return RISCV_EXCP_NONE; } -static int write_mtval2(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mtval2(CPURISCVState *env, int csrno, + target_ulong val) { env->mtval2 = val; - return 0; + return RISCV_EXCP_NONE; } -static int read_mtinst(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_mtinst(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->mtinst; - return 0; + return RISCV_EXCP_NONE; } -static int write_mtinst(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mtinst(CPURISCVState *env, int csrno, + target_ulong val) { env->mtinst = val; - return 0; + return RISCV_EXCP_NONE; } /* Physical Memory Protection */ -static int read_pmpcfg(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_mseccfg(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = mseccfg_csr_read(env); + return RISCV_EXCP_NONE; +} + +static RISCVException write_mseccfg(CPURISCVState *env, int csrno, + target_ulong val) +{ + mseccfg_csr_write(env, val); + return RISCV_EXCP_NONE; +} + +static RISCVException read_pmpcfg(CPURISCVState *env, int csrno, + target_ulong *val) { *val = pmpcfg_csr_read(env, csrno - CSR_PMPCFG0); - return 0; + return RISCV_EXCP_NONE; } -static int write_pmpcfg(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_pmpcfg(CPURISCVState *env, int csrno, + target_ulong val) { pmpcfg_csr_write(env, csrno - CSR_PMPCFG0, val); - return 0; + return RISCV_EXCP_NONE; } -static int read_pmpaddr(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_pmpaddr(CPURISCVState *env, int csrno, + target_ulong *val) { *val = pmpaddr_csr_read(env, csrno - CSR_PMPADDR0); - return 0; + return RISCV_EXCP_NONE; } -static int write_pmpaddr(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_pmpaddr(CPURISCVState *env, int csrno, + target_ulong val) { pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val); - return 0; + return RISCV_EXCP_NONE; } #endif @@ -1274,10 +1431,11 @@ static int write_pmpaddr(CPURISCVState *env, int csrno, target_ulong val) * csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value); */ -int riscv_csrrw(CPURISCVState *env, int csrno, target_ulong *ret_value, - target_ulong new_value, target_ulong write_mask) +RISCVException riscv_csrrw(CPURISCVState *env, int csrno, + target_ulong *ret_value, + target_ulong new_value, target_ulong write_mask) { - int ret; + RISCVException ret; target_ulong old_value; RISCVCPU *cpu = env_archcpu(env); @@ -1299,21 +1457,21 @@ int riscv_csrrw(CPURISCVState *env, int csrno, target_ulong *ret_value, if ((write_mask && read_only) || (!env->debugger && (effective_priv < get_field(csrno, 0x300)))) { - return -RISCV_EXCP_ILLEGAL_INST; + return RISCV_EXCP_ILLEGAL_INST; } #endif /* ensure the CSR extension is enabled. */ if (!cpu->cfg.ext_icsr) { - return -RISCV_EXCP_ILLEGAL_INST; + return RISCV_EXCP_ILLEGAL_INST; } /* check predicate */ if (!csr_ops[csrno].predicate) { - return -RISCV_EXCP_ILLEGAL_INST; + return RISCV_EXCP_ILLEGAL_INST; } ret = csr_ops[csrno].predicate(env, csrno); - if (ret < 0) { + if (ret != RISCV_EXCP_NONE) { return ret; } @@ -1324,12 +1482,11 @@ int riscv_csrrw(CPURISCVState *env, int csrno, target_ulong *ret_value, /* if no accessor exists then return failure */ if (!csr_ops[csrno].read) { - return -RISCV_EXCP_ILLEGAL_INST; + return RISCV_EXCP_ILLEGAL_INST; } - /* read old value */ ret = csr_ops[csrno].read(env, csrno, &old_value); - if (ret < 0) { + if (ret != RISCV_EXCP_NONE) { return ret; } @@ -1338,7 +1495,7 @@ int riscv_csrrw(CPURISCVState *env, int csrno, target_ulong *ret_value, new_value = (old_value & ~write_mask) | (new_value & write_mask); if (csr_ops[csrno].write) { ret = csr_ops[csrno].write(env, csrno, new_value); - if (ret < 0) { + if (ret != RISCV_EXCP_NONE) { return ret; } } @@ -1349,17 +1506,19 @@ int riscv_csrrw(CPURISCVState *env, int csrno, target_ulong *ret_value, *ret_value = old_value; } - return 0; + return RISCV_EXCP_NONE; } /* * Debugger support. If not in user mode, set env->debugger before the * riscv_csrrw call and clear it after the call. */ -int riscv_csrrw_debug(CPURISCVState *env, int csrno, target_ulong *ret_value, - target_ulong new_value, target_ulong write_mask) +RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno, + target_ulong *ret_value, + target_ulong new_value, + target_ulong write_mask) { - int ret; + RISCVException ret; #if !defined(CONFIG_USER_ONLY) env->debugger = true; #endif @@ -1419,13 +1578,11 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_MSTATUSH] = { "mstatush", any32, read_mstatush, write_mstatush }, - [CSR_MSCOUNTEREN] = { "msounteren", any, read_mscounteren, write_mscounteren }, - /* Machine Trap Handling */ [CSR_MSCRATCH] = { "mscratch", any, read_mscratch, write_mscratch }, [CSR_MEPC] = { "mepc", any, read_mepc, write_mepc }, [CSR_MCAUSE] = { "mcause", any, read_mcause, write_mcause }, - [CSR_MBADADDR] = { "mbadaddr", any, read_mbadaddr, write_mbadaddr }, + [CSR_MTVAL] = { "mtval", any, read_mtval, write_mtval }, [CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip }, /* Supervisor Trap Setup */ @@ -1438,7 +1595,7 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch }, [CSR_SEPC] = { "sepc", smode, read_sepc, write_sepc }, [CSR_SCAUSE] = { "scause", smode, read_scause, write_scause }, - [CSR_SBADADDR] = { "sbadaddr", smode, read_sbadaddr, write_sbadaddr }, + [CSR_STVAL] = { "stval", smode, read_stval, write_stval }, [CSR_SIP] = { "sip", smode, NULL, NULL, rmw_sip }, /* Supervisor Protection and Translation */ @@ -1473,6 +1630,7 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst }, /* Physical Memory Protection */ + [CSR_MSECCFG] = { "mseccfg", epmp, read_mseccfg, write_mseccfg }, [CSR_PMPCFG0] = { "pmpcfg0", pmp, read_pmpcfg, write_pmpcfg }, [CSR_PMPCFG1] = { "pmpcfg1", pmp, read_pmpcfg, write_pmpcfg }, [CSR_PMPCFG2] = { "pmpcfg2", pmp, read_pmpcfg, write_pmpcfg }, diff --git a/target/riscv/fpu_helper.c b/target/riscv/fpu_helper.c index 7c4ab92ecb..8700516a14 100644 --- a/target/riscv/fpu_helper.c +++ b/target/riscv/fpu_helper.c @@ -223,13 +223,13 @@ target_ulong helper_fcvt_wu_s(CPURISCVState *env, uint64_t rs1) return (int32_t)float32_to_uint32(frs1, &env->fp_status); } -uint64_t helper_fcvt_l_s(CPURISCVState *env, uint64_t rs1) +target_ulong helper_fcvt_l_s(CPURISCVState *env, uint64_t rs1) { float32 frs1 = check_nanbox_s(rs1); return float32_to_int64(frs1, &env->fp_status); } -uint64_t helper_fcvt_lu_s(CPURISCVState *env, uint64_t rs1) +target_ulong helper_fcvt_lu_s(CPURISCVState *env, uint64_t rs1) { float32 frs1 = check_nanbox_s(rs1); return float32_to_uint64(frs1, &env->fp_status); @@ -245,12 +245,12 @@ uint64_t helper_fcvt_s_wu(CPURISCVState *env, target_ulong rs1) return nanbox_s(uint32_to_float32((uint32_t)rs1, &env->fp_status)); } -uint64_t helper_fcvt_s_l(CPURISCVState *env, uint64_t rs1) +uint64_t helper_fcvt_s_l(CPURISCVState *env, target_ulong rs1) { return nanbox_s(int64_to_float32(rs1, &env->fp_status)); } -uint64_t helper_fcvt_s_lu(CPURISCVState *env, uint64_t rs1) +uint64_t helper_fcvt_s_lu(CPURISCVState *env, target_ulong rs1) { return nanbox_s(uint64_to_float32(rs1, &env->fp_status)); } @@ -332,12 +332,12 @@ target_ulong helper_fcvt_wu_d(CPURISCVState *env, uint64_t frs1) return (int32_t)float64_to_uint32(frs1, &env->fp_status); } -uint64_t helper_fcvt_l_d(CPURISCVState *env, uint64_t frs1) +target_ulong helper_fcvt_l_d(CPURISCVState *env, uint64_t frs1) { return float64_to_int64(frs1, &env->fp_status); } -uint64_t helper_fcvt_lu_d(CPURISCVState *env, uint64_t frs1) +target_ulong helper_fcvt_lu_d(CPURISCVState *env, uint64_t frs1) { return float64_to_uint64(frs1, &env->fp_status); } @@ -352,12 +352,12 @@ uint64_t helper_fcvt_d_wu(CPURISCVState *env, target_ulong rs1) return uint32_to_float64((uint32_t)rs1, &env->fp_status); } -uint64_t helper_fcvt_d_l(CPURISCVState *env, uint64_t rs1) +uint64_t helper_fcvt_d_l(CPURISCVState *env, target_ulong rs1) { return int64_to_float64(rs1, &env->fp_status); } -uint64_t helper_fcvt_d_lu(CPURISCVState *env, uint64_t rs1) +uint64_t helper_fcvt_d_lu(CPURISCVState *env, target_ulong rs1) { return uint64_to_float64(rs1, &env->fp_status); } diff --git a/target/riscv/gdbstub.c b/target/riscv/gdbstub.c index 5f96b7ea2a..ca78682cf4 100644 --- a/target/riscv/gdbstub.c +++ b/target/riscv/gdbstub.c @@ -71,7 +71,7 @@ static int riscv_gdb_get_fpu(CPURISCVState *env, GByteArray *buf, int n) */ result = riscv_csrrw_debug(env, n - 32, &val, 0, 0); - if (result == 0) { + if (result == RISCV_EXCP_NONE) { return gdb_get_regl(buf, val); } } @@ -94,7 +94,7 @@ static int riscv_gdb_set_fpu(CPURISCVState *env, uint8_t *mem_buf, int n) */ result = riscv_csrrw_debug(env, n - 32, NULL, val, -1); - if (result == 0) { + if (result == RISCV_EXCP_NONE) { return sizeof(target_ulong); } } @@ -108,7 +108,7 @@ static int riscv_gdb_get_csr(CPURISCVState *env, GByteArray *buf, int n) int result; result = riscv_csrrw_debug(env, n, &val, 0, 0); - if (result == 0) { + if (result == RISCV_EXCP_NONE) { return gdb_get_regl(buf, val); } } @@ -122,7 +122,7 @@ static int riscv_gdb_set_csr(CPURISCVState *env, uint8_t *mem_buf, int n) int result; result = riscv_csrrw_debug(env, n, NULL, val, -1); - if (result == 0) { + if (result == RISCV_EXCP_NONE) { return sizeof(target_ulong); } } diff --git a/target/riscv/helper.h b/target/riscv/helper.h index e3f3f41e89..c7267593c3 100644 --- a/target/riscv/helper.h +++ b/target/riscv/helper.h @@ -27,12 +27,12 @@ DEF_HELPER_FLAGS_3(flt_s, TCG_CALL_NO_RWG, tl, env, i64, i64) DEF_HELPER_FLAGS_3(feq_s, TCG_CALL_NO_RWG, tl, env, i64, i64) DEF_HELPER_FLAGS_2(fcvt_w_s, TCG_CALL_NO_RWG, tl, env, i64) DEF_HELPER_FLAGS_2(fcvt_wu_s, TCG_CALL_NO_RWG, tl, env, i64) -DEF_HELPER_FLAGS_2(fcvt_l_s, TCG_CALL_NO_RWG, i64, env, i64) -DEF_HELPER_FLAGS_2(fcvt_lu_s, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_2(fcvt_l_s, TCG_CALL_NO_RWG, tl, env, i64) +DEF_HELPER_FLAGS_2(fcvt_lu_s, TCG_CALL_NO_RWG, tl, env, i64) DEF_HELPER_FLAGS_2(fcvt_s_w, TCG_CALL_NO_RWG, i64, env, tl) DEF_HELPER_FLAGS_2(fcvt_s_wu, TCG_CALL_NO_RWG, i64, env, tl) -DEF_HELPER_FLAGS_2(fcvt_s_l, TCG_CALL_NO_RWG, i64, env, i64) -DEF_HELPER_FLAGS_2(fcvt_s_lu, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_2(fcvt_s_l, TCG_CALL_NO_RWG, i64, env, tl) +DEF_HELPER_FLAGS_2(fcvt_s_lu, TCG_CALL_NO_RWG, i64, env, tl) DEF_HELPER_FLAGS_1(fclass_s, TCG_CALL_NO_RWG_SE, tl, i64) /* Floating Point - Double Precision */ @@ -50,12 +50,12 @@ DEF_HELPER_FLAGS_3(flt_d, TCG_CALL_NO_RWG, tl, env, i64, i64) DEF_HELPER_FLAGS_3(feq_d, TCG_CALL_NO_RWG, tl, env, i64, i64) DEF_HELPER_FLAGS_2(fcvt_w_d, TCG_CALL_NO_RWG, tl, env, i64) DEF_HELPER_FLAGS_2(fcvt_wu_d, TCG_CALL_NO_RWG, tl, env, i64) -DEF_HELPER_FLAGS_2(fcvt_l_d, TCG_CALL_NO_RWG, i64, env, i64) -DEF_HELPER_FLAGS_2(fcvt_lu_d, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_2(fcvt_l_d, TCG_CALL_NO_RWG, tl, env, i64) +DEF_HELPER_FLAGS_2(fcvt_lu_d, TCG_CALL_NO_RWG, tl, env, i64) DEF_HELPER_FLAGS_2(fcvt_d_w, TCG_CALL_NO_RWG, i64, env, tl) DEF_HELPER_FLAGS_2(fcvt_d_wu, TCG_CALL_NO_RWG, i64, env, tl) -DEF_HELPER_FLAGS_2(fcvt_d_l, TCG_CALL_NO_RWG, i64, env, i64) -DEF_HELPER_FLAGS_2(fcvt_d_lu, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_2(fcvt_d_l, TCG_CALL_NO_RWG, i64, env, tl) +DEF_HELPER_FLAGS_2(fcvt_d_lu, TCG_CALL_NO_RWG, i64, env, tl) DEF_HELPER_FLAGS_1(fclass_d, TCG_CALL_NO_RWG_SE, tl, i64) /* Special functions */ @@ -241,7 +241,6 @@ DEF_HELPER_5(vlhuff_v_w, void, ptr, ptr, tl, env, i32) DEF_HELPER_5(vlhuff_v_d, void, ptr, ptr, tl, env, i32) DEF_HELPER_5(vlwuff_v_w, void, ptr, ptr, tl, env, i32) DEF_HELPER_5(vlwuff_v_d, void, ptr, ptr, tl, env, i32) -#ifdef TARGET_RISCV64 DEF_HELPER_6(vamoswapw_v_d, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vamoswapd_v_d, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vamoaddw_v_d, void, ptr, ptr, tl, ptr, env, i32) @@ -260,7 +259,6 @@ DEF_HELPER_6(vamominuw_v_d, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vamominud_v_d, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vamomaxuw_v_d, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vamomaxud_v_d, void, ptr, ptr, tl, ptr, env, i32) -#endif DEF_HELPER_6(vamoswapw_v_w, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vamoaddw_v_w, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vamoxorw_v_w, void, ptr, ptr, tl, ptr, env, i32) diff --git a/target/riscv/insn16-32.decode b/target/riscv/insn16-32.decode deleted file mode 100644 index 0819b17028..0000000000 --- a/target/riscv/insn16-32.decode +++ /dev/null @@ -1,28 +0,0 @@ -# -# RISC-V translation routines for the RVXI Base Integer Instruction Set. -# -# Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de -# Bastian Koppelmann, kbastian@mail.uni-paderborn.de -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2 or later, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License along with -# this program. If not, see <http://www.gnu.org/licenses/>. - -# *** RV32C Standard Extension (Quadrant 0) *** -flw 011 ... ... .. ... 00 @cl_w -fsw 111 ... ... .. ... 00 @cs_w - -# *** RV32C Standard Extension (Quadrant 1) *** -jal 001 ........... 01 @cj rd=1 # C.JAL - -# *** RV32C Standard Extension (Quadrant 2) *** -flw 011 . ..... ..... 10 @c_lwsp -fsw 111 . ..... ..... 10 @c_swsp diff --git a/target/riscv/insn16-64.decode b/target/riscv/insn16-64.decode deleted file mode 100644 index 672e1e916f..0000000000 --- a/target/riscv/insn16-64.decode +++ /dev/null @@ -1,36 +0,0 @@ -# -# RISC-V translation routines for the RVXI Base Integer Instruction Set. -# -# Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de -# Bastian Koppelmann, kbastian@mail.uni-paderborn.de -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2 or later, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License along with -# this program. If not, see <http://www.gnu.org/licenses/>. - -# *** RV64C Standard Extension (Quadrant 0) *** -ld 011 ... ... .. ... 00 @cl_d -sd 111 ... ... .. ... 00 @cs_d - -# *** RV64C Standard Extension (Quadrant 1) *** -{ - illegal 001 - 00000 ----- 01 # c.addiw, RES rd=0 - addiw 001 . ..... ..... 01 @ci -} -subw 100 1 11 ... 00 ... 01 @cs_2 -addw 100 1 11 ... 01 ... 01 @cs_2 - -# *** RV64C Standard Extension (Quadrant 2) *** -{ - illegal 011 - 00000 ----- 10 # c.ldsp, RES rd=0 - ld 011 . ..... ..... 10 @c_ldsp -} -sd 111 . ..... ..... 10 @c_sdsp diff --git a/target/riscv/insn16.decode b/target/riscv/insn16.decode index 1cb93876fe..2e9212663c 100644 --- a/target/riscv/insn16.decode +++ b/target/riscv/insn16.decode @@ -92,6 +92,16 @@ lw 010 ... ... .. ... 00 @cl_w fsd 101 ... ... .. ... 00 @cs_d sw 110 ... ... .. ... 00 @cs_w +# *** RV32C and RV64C specific Standard Extension (Quadrant 0) *** +{ + ld 011 ... ... .. ... 00 @cl_d + flw 011 ... ... .. ... 00 @cl_w +} +{ + sd 111 ... ... .. ... 00 @cs_d + fsw 111 ... ... .. ... 00 @cs_w +} + # *** RV32/64C Standard Extension (Quadrant 1) *** addi 000 . ..... ..... 01 @ci addi 010 . ..... ..... 01 @c_li @@ -111,6 +121,15 @@ jal 101 ........... 01 @cj rd=0 # C.J beq 110 ... ... ..... 01 @cb_z bne 111 ... ... ..... 01 @cb_z +# *** RV64C and RV32C specific Standard Extension (Quadrant 1) *** +{ + c64_illegal 001 - 00000 ----- 01 # c.addiw, RES rd=0 + addiw 001 . ..... ..... 01 @ci + jal 001 ........... 01 @cj rd=1 # C.JAL +} +subw 100 1 11 ... 00 ... 01 @cs_2 +addw 100 1 11 ... 01 ... 01 @cs_2 + # *** RV32/64C Standard Extension (Quadrant 2) *** slli 000 . ..... ..... 10 @c_shift2 fld 001 . ..... ..... 10 @c_ldsp @@ -130,3 +149,14 @@ fld 001 . ..... ..... 10 @c_ldsp } fsd 101 ...... ..... 10 @c_sdsp sw 110 . ..... ..... 10 @c_swsp + +# *** RV32C and RV64C specific Standard Extension (Quadrant 2) *** +{ + c64_illegal 011 - 00000 ----- 10 # c.ldsp, RES rd=0 + ld 011 . ..... ..... 10 @c_ldsp + flw 011 . ..... ..... 10 @c_lwsp +} +{ + sd 111 . ..... ..... 10 @c_sdsp + fsw 111 . ..... ..... 10 @c_swsp +} diff --git a/target/riscv/insn32-64.decode b/target/riscv/insn32-64.decode deleted file mode 100644 index 8157dee8b7..0000000000 --- a/target/riscv/insn32-64.decode +++ /dev/null @@ -1,88 +0,0 @@ -# -# RISC-V translation routines for the RV Instruction Set. -# -# Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de -# Bastian Koppelmann, kbastian@mail.uni-paderborn.de -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2 or later, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License along with -# this program. If not, see <http://www.gnu.org/licenses/>. - -# This is concatenated with insn32.decode for risc64 targets. -# Most of the fields and formats are there. - -%sh5 20:5 - -@sh5 ....... ..... ..... ... ..... ....... &shift shamt=%sh5 %rs1 %rd - -# *** RV64I Base Instruction Set (in addition to RV32I) *** -lwu ............ ..... 110 ..... 0000011 @i -ld ............ ..... 011 ..... 0000011 @i -sd ....... ..... ..... 011 ..... 0100011 @s -addiw ............ ..... 000 ..... 0011011 @i -slliw 0000000 ..... ..... 001 ..... 0011011 @sh5 -srliw 0000000 ..... ..... 101 ..... 0011011 @sh5 -sraiw 0100000 ..... ..... 101 ..... 0011011 @sh5 -addw 0000000 ..... ..... 000 ..... 0111011 @r -subw 0100000 ..... ..... 000 ..... 0111011 @r -sllw 0000000 ..... ..... 001 ..... 0111011 @r -srlw 0000000 ..... ..... 101 ..... 0111011 @r -sraw 0100000 ..... ..... 101 ..... 0111011 @r - -# *** RV64M Standard Extension (in addition to RV32M) *** -mulw 0000001 ..... ..... 000 ..... 0111011 @r -divw 0000001 ..... ..... 100 ..... 0111011 @r -divuw 0000001 ..... ..... 101 ..... 0111011 @r -remw 0000001 ..... ..... 110 ..... 0111011 @r -remuw 0000001 ..... ..... 111 ..... 0111011 @r - -# *** RV64A Standard Extension (in addition to RV32A) *** -lr_d 00010 . . 00000 ..... 011 ..... 0101111 @atom_ld -sc_d 00011 . . ..... ..... 011 ..... 0101111 @atom_st -amoswap_d 00001 . . ..... ..... 011 ..... 0101111 @atom_st -amoadd_d 00000 . . ..... ..... 011 ..... 0101111 @atom_st -amoxor_d 00100 . . ..... ..... 011 ..... 0101111 @atom_st -amoand_d 01100 . . ..... ..... 011 ..... 0101111 @atom_st -amoor_d 01000 . . ..... ..... 011 ..... 0101111 @atom_st -amomin_d 10000 . . ..... ..... 011 ..... 0101111 @atom_st -amomax_d 10100 . . ..... ..... 011 ..... 0101111 @atom_st -amominu_d 11000 . . ..... ..... 011 ..... 0101111 @atom_st -amomaxu_d 11100 . . ..... ..... 011 ..... 0101111 @atom_st - -#*** Vector AMO operations (in addition to Zvamo) *** -vamoswapd_v 00001 . . ..... ..... 111 ..... 0101111 @r_wdvm -vamoaddd_v 00000 . . ..... ..... 111 ..... 0101111 @r_wdvm -vamoxord_v 00100 . . ..... ..... 111 ..... 0101111 @r_wdvm -vamoandd_v 01100 . . ..... ..... 111 ..... 0101111 @r_wdvm -vamoord_v 01000 . . ..... ..... 111 ..... 0101111 @r_wdvm -vamomind_v 10000 . . ..... ..... 111 ..... 0101111 @r_wdvm -vamomaxd_v 10100 . . ..... ..... 111 ..... 0101111 @r_wdvm -vamominud_v 11000 . . ..... ..... 111 ..... 0101111 @r_wdvm -vamomaxud_v 11100 . . ..... ..... 111 ..... 0101111 @r_wdvm - -# *** RV64F Standard Extension (in addition to RV32F) *** -fcvt_l_s 1100000 00010 ..... ... ..... 1010011 @r2_rm -fcvt_lu_s 1100000 00011 ..... ... ..... 1010011 @r2_rm -fcvt_s_l 1101000 00010 ..... ... ..... 1010011 @r2_rm -fcvt_s_lu 1101000 00011 ..... ... ..... 1010011 @r2_rm - -# *** RV64D Standard Extension (in addition to RV32D) *** -fcvt_l_d 1100001 00010 ..... ... ..... 1010011 @r2_rm -fcvt_lu_d 1100001 00011 ..... ... ..... 1010011 @r2_rm -fmv_x_d 1110001 00000 ..... 000 ..... 1010011 @r2 -fcvt_d_l 1101001 00010 ..... ... ..... 1010011 @r2_rm -fcvt_d_lu 1101001 00011 ..... ... ..... 1010011 @r2_rm -fmv_d_x 1111001 00000 ..... 000 ..... 1010011 @r2 - -# *** RV32H Base Instruction Set *** -hlv_wu 0110100 00001 ..... 100 ..... 1110011 @r2 -hlv_d 0110110 00000 ..... 100 ..... 1110011 @r2 -hsv_d 0110111 ..... ..... 100 00000 1110011 @r2_s diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode index 84080dd18c..8901ba1e1b 100644 --- a/target/riscv/insn32.decode +++ b/target/riscv/insn32.decode @@ -21,6 +21,7 @@ %rs2 20:5 %rs1 15:5 %rd 7:5 +%sh5 20:5 %sh10 20:10 %csr 20:12 @@ -86,6 +87,8 @@ @sfence_vma ....... ..... ..... ... ..... ....... %rs2 %rs1 @sfence_vm ....... ..... ..... ... ..... ....... %rs1 +# Formats 64: +@sh5 ....... ..... ..... ... ..... ....... &shift shamt=%sh5 %rs1 %rd # *** Privileged Instructions *** ecall 000000000000 00000 000 00000 1110011 @@ -144,6 +147,20 @@ csrrwi ............ ..... 101 ..... 1110011 @csr csrrsi ............ ..... 110 ..... 1110011 @csr csrrci ............ ..... 111 ..... 1110011 @csr +# *** RV64I Base Instruction Set (in addition to RV32I) *** +lwu ............ ..... 110 ..... 0000011 @i +ld ............ ..... 011 ..... 0000011 @i +sd ....... ..... ..... 011 ..... 0100011 @s +addiw ............ ..... 000 ..... 0011011 @i +slliw 0000000 ..... ..... 001 ..... 0011011 @sh5 +srliw 0000000 ..... ..... 101 ..... 0011011 @sh5 +sraiw 0100000 ..... ..... 101 ..... 0011011 @sh5 +addw 0000000 ..... ..... 000 ..... 0111011 @r +subw 0100000 ..... ..... 000 ..... 0111011 @r +sllw 0000000 ..... ..... 001 ..... 0111011 @r +srlw 0000000 ..... ..... 101 ..... 0111011 @r +sraw 0100000 ..... ..... 101 ..... 0111011 @r + # *** RV32M Standard Extension *** mul 0000001 ..... ..... 000 ..... 0110011 @r mulh 0000001 ..... ..... 001 ..... 0110011 @r @@ -154,6 +171,13 @@ divu 0000001 ..... ..... 101 ..... 0110011 @r rem 0000001 ..... ..... 110 ..... 0110011 @r remu 0000001 ..... ..... 111 ..... 0110011 @r +# *** RV64M Standard Extension (in addition to RV32M) *** +mulw 0000001 ..... ..... 000 ..... 0111011 @r +divw 0000001 ..... ..... 100 ..... 0111011 @r +divuw 0000001 ..... ..... 101 ..... 0111011 @r +remw 0000001 ..... ..... 110 ..... 0111011 @r +remuw 0000001 ..... ..... 111 ..... 0111011 @r + # *** RV32A Standard Extension *** lr_w 00010 . . 00000 ..... 010 ..... 0101111 @atom_ld sc_w 00011 . . ..... ..... 010 ..... 0101111 @atom_st @@ -167,6 +191,19 @@ amomax_w 10100 . . ..... ..... 010 ..... 0101111 @atom_st amominu_w 11000 . . ..... ..... 010 ..... 0101111 @atom_st amomaxu_w 11100 . . ..... ..... 010 ..... 0101111 @atom_st +# *** RV64A Standard Extension (in addition to RV32A) *** +lr_d 00010 . . 00000 ..... 011 ..... 0101111 @atom_ld +sc_d 00011 . . ..... ..... 011 ..... 0101111 @atom_st +amoswap_d 00001 . . ..... ..... 011 ..... 0101111 @atom_st +amoadd_d 00000 . . ..... ..... 011 ..... 0101111 @atom_st +amoxor_d 00100 . . ..... ..... 011 ..... 0101111 @atom_st +amoand_d 01100 . . ..... ..... 011 ..... 0101111 @atom_st +amoor_d 01000 . . ..... ..... 011 ..... 0101111 @atom_st +amomin_d 10000 . . ..... ..... 011 ..... 0101111 @atom_st +amomax_d 10100 . . ..... ..... 011 ..... 0101111 @atom_st +amominu_d 11000 . . ..... ..... 011 ..... 0101111 @atom_st +amomaxu_d 11100 . . ..... ..... 011 ..... 0101111 @atom_st + # *** RV32F Standard Extension *** flw ............ ..... 010 ..... 0000111 @i fsw ....... ..... ..... 010 ..... 0100111 @s @@ -195,6 +232,12 @@ fcvt_s_w 1101000 00000 ..... ... ..... 1010011 @r2_rm fcvt_s_wu 1101000 00001 ..... ... ..... 1010011 @r2_rm fmv_w_x 1111000 00000 ..... 000 ..... 1010011 @r2 +# *** RV64F Standard Extension (in addition to RV32F) *** +fcvt_l_s 1100000 00010 ..... ... ..... 1010011 @r2_rm +fcvt_lu_s 1100000 00011 ..... ... ..... 1010011 @r2_rm +fcvt_s_l 1101000 00010 ..... ... ..... 1010011 @r2_rm +fcvt_s_lu 1101000 00011 ..... ... ..... 1010011 @r2_rm + # *** RV32D Standard Extension *** fld ............ ..... 011 ..... 0000111 @i fsd ....... ..... ..... 011 ..... 0100111 @s @@ -223,6 +266,14 @@ fcvt_wu_d 1100001 00001 ..... ... ..... 1010011 @r2_rm fcvt_d_w 1101001 00000 ..... ... ..... 1010011 @r2_rm fcvt_d_wu 1101001 00001 ..... ... ..... 1010011 @r2_rm +# *** RV64D Standard Extension (in addition to RV32D) *** +fcvt_l_d 1100001 00010 ..... ... ..... 1010011 @r2_rm +fcvt_lu_d 1100001 00011 ..... ... ..... 1010011 @r2_rm +fmv_x_d 1110001 00000 ..... 000 ..... 1010011 @r2 +fcvt_d_l 1101001 00010 ..... ... ..... 1010011 @r2_rm +fcvt_d_lu 1101001 00011 ..... ... ..... 1010011 @r2_rm +fmv_d_x 1111001 00000 ..... 000 ..... 1010011 @r2 + # *** RV32H Base Instruction Set *** hlv_b 0110000 00000 ..... 100 ..... 1110011 @r2 hlv_bu 0110000 00001 ..... 100 ..... 1110011 @r2 @@ -237,7 +288,10 @@ hsv_w 0110101 ..... ..... 100 00000 1110011 @r2_s hfence_gvma 0110001 ..... ..... 000 00000 1110011 @hfence_gvma hfence_vvma 0010001 ..... ..... 000 00000 1110011 @hfence_vvma -# *** RV32V Extension *** +# *** RV64H Base Instruction Set *** +hlv_wu 0110100 00001 ..... 100 ..... 1110011 @r2 +hlv_d 0110110 00000 ..... 100 ..... 1110011 @r2 +hsv_d 0110111 ..... ..... 100 00000 1110011 @r2_s # *** Vector loads and stores are encoded within LOADFP/STORE-FP *** vlb_v ... 100 . 00000 ..... 000 ..... 0000111 @r2_nfvm @@ -592,3 +646,14 @@ vcompress_vm 010111 - ..... ..... 010 ..... 1010111 @r vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm vsetvl 1000000 ..... ..... 111 ..... 1010111 @r + +#*** Vector AMO operations (in addition to Zvamo) *** +vamoswapd_v 00001 . . ..... ..... 111 ..... 0101111 @r_wdvm +vamoaddd_v 00000 . . ..... ..... 111 ..... 0101111 @r_wdvm +vamoxord_v 00100 . . ..... ..... 111 ..... 0101111 @r_wdvm +vamoandd_v 01100 . . ..... ..... 111 ..... 0101111 @r_wdvm +vamoord_v 01000 . . ..... ..... 111 ..... 0101111 @r_wdvm +vamomind_v 10000 . . ..... ..... 111 ..... 0101111 @r_wdvm +vamomaxd_v 10100 . . ..... ..... 111 ..... 0101111 @r_wdvm +vamominud_v 11000 . . ..... ..... 111 ..... 0101111 @r_wdvm +vamomaxud_v 11100 . . ..... ..... 111 ..... 0101111 @r_wdvm diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc index be8a9f06dd..ab2ec4f0a5 100644 --- a/target/riscv/insn_trans/trans_rva.c.inc +++ b/target/riscv/insn_trans/trans_rva.c.inc @@ -165,60 +165,68 @@ static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a) return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TESL)); } -#ifdef TARGET_RISCV64 - static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a) { + REQUIRE_64BIT(ctx); return gen_lr(ctx, a, MO_ALIGN | MO_TEQ); } static bool trans_sc_d(DisasContext *ctx, arg_sc_d *a) { + REQUIRE_64BIT(ctx); return gen_sc(ctx, a, (MO_ALIGN | MO_TEQ)); } static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a) { + REQUIRE_64BIT(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEQ)); } static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a) { + REQUIRE_64BIT(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEQ)); } static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a) { + REQUIRE_64BIT(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEQ)); } static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a) { + REQUIRE_64BIT(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEQ)); } static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a) { + REQUIRE_64BIT(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEQ)); } static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a) { + REQUIRE_64BIT(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEQ)); } static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a) { + REQUIRE_64BIT(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEQ)); } static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a) { + REQUIRE_64BIT(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEQ)); } static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a) { + REQUIRE_64BIT(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEQ)); } -#endif diff --git a/target/riscv/insn_trans/trans_rvd.c.inc b/target/riscv/insn_trans/trans_rvd.c.inc index 4f832637fa..7e45538ae0 100644 --- a/target/riscv/insn_trans/trans_rvd.c.inc +++ b/target/riscv/insn_trans/trans_rvd.c.inc @@ -358,10 +358,9 @@ static bool trans_fcvt_d_wu(DisasContext *ctx, arg_fcvt_d_wu *a) return true; } -#ifdef TARGET_RISCV64 - static bool trans_fcvt_l_d(DisasContext *ctx, arg_fcvt_l_d *a) { + REQUIRE_64BIT(ctx); REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); @@ -375,6 +374,7 @@ static bool trans_fcvt_l_d(DisasContext *ctx, arg_fcvt_l_d *a) static bool trans_fcvt_lu_d(DisasContext *ctx, arg_fcvt_lu_d *a) { + REQUIRE_64BIT(ctx); REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); @@ -388,15 +388,21 @@ static bool trans_fcvt_lu_d(DisasContext *ctx, arg_fcvt_lu_d *a) static bool trans_fmv_x_d(DisasContext *ctx, arg_fmv_x_d *a) { + REQUIRE_64BIT(ctx); REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); +#ifdef TARGET_RISCV64 gen_set_gpr(a->rd, cpu_fpr[a->rs1]); return true; +#else + qemu_build_not_reached(); +#endif } static bool trans_fcvt_d_l(DisasContext *ctx, arg_fcvt_d_l *a) { + REQUIRE_64BIT(ctx); REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); @@ -412,6 +418,7 @@ static bool trans_fcvt_d_l(DisasContext *ctx, arg_fcvt_d_l *a) static bool trans_fcvt_d_lu(DisasContext *ctx, arg_fcvt_d_lu *a) { + REQUIRE_64BIT(ctx); REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); @@ -427,9 +434,11 @@ static bool trans_fcvt_d_lu(DisasContext *ctx, arg_fcvt_d_lu *a) static bool trans_fmv_d_x(DisasContext *ctx, arg_fmv_d_x *a) { + REQUIRE_64BIT(ctx); REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); +#ifdef TARGET_RISCV64 TCGv t0 = tcg_temp_new(); gen_get_gpr(t0, a->rs1); @@ -437,5 +446,7 @@ static bool trans_fmv_d_x(DisasContext *ctx, arg_fmv_d_x *a) tcg_temp_free(t0); mark_fs_dirty(ctx); return true; -} +#else + qemu_build_not_reached(); #endif +} diff --git a/target/riscv/insn_trans/trans_rvf.c.inc b/target/riscv/insn_trans/trans_rvf.c.inc index 3dfec8211d..db1c0c9974 100644 --- a/target/riscv/insn_trans/trans_rvf.c.inc +++ b/target/riscv/insn_trans/trans_rvf.c.inc @@ -415,9 +415,9 @@ static bool trans_fmv_w_x(DisasContext *ctx, arg_fmv_w_x *a) return true; } -#ifdef TARGET_RISCV64 static bool trans_fcvt_l_s(DisasContext *ctx, arg_fcvt_l_s *a) { + REQUIRE_64BIT(ctx); REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); @@ -431,6 +431,7 @@ static bool trans_fcvt_l_s(DisasContext *ctx, arg_fcvt_l_s *a) static bool trans_fcvt_lu_s(DisasContext *ctx, arg_fcvt_lu_s *a) { + REQUIRE_64BIT(ctx); REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); @@ -444,6 +445,7 @@ static bool trans_fcvt_lu_s(DisasContext *ctx, arg_fcvt_lu_s *a) static bool trans_fcvt_s_l(DisasContext *ctx, arg_fcvt_s_l *a) { + REQUIRE_64BIT(ctx); REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); @@ -460,6 +462,7 @@ static bool trans_fcvt_s_l(DisasContext *ctx, arg_fcvt_s_l *a) static bool trans_fcvt_s_lu(DisasContext *ctx, arg_fcvt_s_lu *a) { + REQUIRE_64BIT(ctx); REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); @@ -473,4 +476,3 @@ static bool trans_fcvt_s_lu(DisasContext *ctx, arg_fcvt_s_lu *a) tcg_temp_free(t0); return true; } -#endif diff --git a/target/riscv/insn_trans/trans_rvh.c.inc b/target/riscv/insn_trans/trans_rvh.c.inc index ce7ed5affb..6b5edf82b7 100644 --- a/target/riscv/insn_trans/trans_rvh.c.inc +++ b/target/riscv/insn_trans/trans_rvh.c.inc @@ -203,10 +203,11 @@ static bool trans_hsv_w(DisasContext *ctx, arg_hsv_w *a) #endif } -#ifdef TARGET_RISCV64 static bool trans_hlv_wu(DisasContext *ctx, arg_hlv_wu *a) { + REQUIRE_64BIT(ctx); REQUIRE_EXT(ctx, RVH); + #ifndef CONFIG_USER_ONLY TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_new(); @@ -228,7 +229,9 @@ static bool trans_hlv_wu(DisasContext *ctx, arg_hlv_wu *a) static bool trans_hlv_d(DisasContext *ctx, arg_hlv_d *a) { + REQUIRE_64BIT(ctx); REQUIRE_EXT(ctx, RVH); + #ifndef CONFIG_USER_ONLY TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_new(); @@ -250,7 +253,9 @@ static bool trans_hlv_d(DisasContext *ctx, arg_hlv_d *a) static bool trans_hsv_d(DisasContext *ctx, arg_hsv_d *a) { + REQUIRE_64BIT(ctx); REQUIRE_EXT(ctx, RVH); + #ifndef CONFIG_USER_ONLY TCGv t0 = tcg_temp_new(); TCGv dat = tcg_temp_new(); @@ -269,7 +274,6 @@ static bool trans_hsv_d(DisasContext *ctx, arg_hsv_d *a) return false; #endif } -#endif static bool trans_hlvx_hu(DisasContext *ctx, arg_hlvx_hu *a) { diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc index d04ca0394c..bd93f634cf 100644 --- a/target/riscv/insn_trans/trans_rvi.c.inc +++ b/target/riscv/insn_trans/trans_rvi.c.inc @@ -24,6 +24,12 @@ static bool trans_illegal(DisasContext *ctx, arg_empty *a) return true; } +static bool trans_c64_illegal(DisasContext *ctx, arg_empty *a) +{ + REQUIRE_64BIT(ctx); + return trans_illegal(ctx, a); +} + static bool trans_lui(DisasContext *ctx, arg_lui *a) { if (a->rd != 0) { @@ -204,22 +210,23 @@ static bool trans_sw(DisasContext *ctx, arg_sw *a) return gen_store(ctx, a, MO_TESL); } -#ifdef TARGET_RISCV64 static bool trans_lwu(DisasContext *ctx, arg_lwu *a) { + REQUIRE_64BIT(ctx); return gen_load(ctx, a, MO_TEUL); } static bool trans_ld(DisasContext *ctx, arg_ld *a) { + REQUIRE_64BIT(ctx); return gen_load(ctx, a, MO_TEQ); } static bool trans_sd(DisasContext *ctx, arg_sd *a) { + REQUIRE_64BIT(ctx); return gen_store(ctx, a, MO_TEQ); } -#endif static bool trans_addi(DisasContext *ctx, arg_addi *a) { @@ -361,14 +368,15 @@ static bool trans_and(DisasContext *ctx, arg_and *a) return gen_arith(ctx, a, &tcg_gen_and_tl); } -#ifdef TARGET_RISCV64 static bool trans_addiw(DisasContext *ctx, arg_addiw *a) { + REQUIRE_64BIT(ctx); return gen_arith_imm_tl(ctx, a, &gen_addw); } static bool trans_slliw(DisasContext *ctx, arg_slliw *a) { + REQUIRE_64BIT(ctx); TCGv source1; source1 = tcg_temp_new(); gen_get_gpr(source1, a->rs1); @@ -383,6 +391,7 @@ static bool trans_slliw(DisasContext *ctx, arg_slliw *a) static bool trans_srliw(DisasContext *ctx, arg_srliw *a) { + REQUIRE_64BIT(ctx); TCGv t = tcg_temp_new(); gen_get_gpr(t, a->rs1); tcg_gen_extract_tl(t, t, a->shamt, 32 - a->shamt); @@ -395,6 +404,7 @@ static bool trans_srliw(DisasContext *ctx, arg_srliw *a) static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a) { + REQUIRE_64BIT(ctx); TCGv t = tcg_temp_new(); gen_get_gpr(t, a->rs1); tcg_gen_sextract_tl(t, t, a->shamt, 32 - a->shamt); @@ -405,16 +415,19 @@ static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a) static bool trans_addw(DisasContext *ctx, arg_addw *a) { + REQUIRE_64BIT(ctx); return gen_arith(ctx, a, &gen_addw); } static bool trans_subw(DisasContext *ctx, arg_subw *a) { + REQUIRE_64BIT(ctx); return gen_arith(ctx, a, &gen_subw); } static bool trans_sllw(DisasContext *ctx, arg_sllw *a) { + REQUIRE_64BIT(ctx); TCGv source1 = tcg_temp_new(); TCGv source2 = tcg_temp_new(); @@ -433,6 +446,7 @@ static bool trans_sllw(DisasContext *ctx, arg_sllw *a) static bool trans_srlw(DisasContext *ctx, arg_srlw *a) { + REQUIRE_64BIT(ctx); TCGv source1 = tcg_temp_new(); TCGv source2 = tcg_temp_new(); @@ -453,6 +467,7 @@ static bool trans_srlw(DisasContext *ctx, arg_srlw *a) static bool trans_sraw(DisasContext *ctx, arg_sraw *a) { + REQUIRE_64BIT(ctx); TCGv source1 = tcg_temp_new(); TCGv source2 = tcg_temp_new(); @@ -473,7 +488,6 @@ static bool trans_sraw(DisasContext *ctx, arg_sraw *a) return true; } -#endif static bool trans_fence(DisasContext *ctx, arg_fence *a) { diff --git a/target/riscv/insn_trans/trans_rvm.c.inc b/target/riscv/insn_trans/trans_rvm.c.inc index 47cd6edc72..10ecc456fc 100644 --- a/target/riscv/insn_trans/trans_rvm.c.inc +++ b/target/riscv/insn_trans/trans_rvm.c.inc @@ -87,34 +87,42 @@ static bool trans_remu(DisasContext *ctx, arg_remu *a) return gen_arith(ctx, a, &gen_remu); } -#ifdef TARGET_RISCV64 static bool trans_mulw(DisasContext *ctx, arg_mulw *a) { + REQUIRE_64BIT(ctx); REQUIRE_EXT(ctx, RVM); + return gen_arith(ctx, a, &gen_mulw); } static bool trans_divw(DisasContext *ctx, arg_divw *a) { + REQUIRE_64BIT(ctx); REQUIRE_EXT(ctx, RVM); + return gen_arith_div_w(ctx, a, &gen_div); } static bool trans_divuw(DisasContext *ctx, arg_divuw *a) { + REQUIRE_64BIT(ctx); REQUIRE_EXT(ctx, RVM); + return gen_arith_div_uw(ctx, a, &gen_divu); } static bool trans_remw(DisasContext *ctx, arg_remw *a) { + REQUIRE_64BIT(ctx); REQUIRE_EXT(ctx, RVM); + return gen_arith_div_w(ctx, a, &gen_rem); } static bool trans_remuw(DisasContext *ctx, arg_remuw *a) { + REQUIRE_64BIT(ctx); REQUIRE_EXT(ctx, RVM); + return gen_arith_div_uw(ctx, a, &gen_remu); } -#endif diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc index 887c6b8883..47914a3b69 100644 --- a/target/riscv/insn_trans/trans_rvv.c.inc +++ b/target/riscv/insn_trans/trans_rvv.c.inc @@ -705,7 +705,6 @@ static bool amo_op(DisasContext *s, arg_rwdvm *a, uint8_t seq) gen_helper_vamominuw_v_w, gen_helper_vamomaxuw_v_w }; -#ifdef TARGET_RISCV64 static gen_helper_amo *const fnsd[18] = { gen_helper_vamoswapw_v_d, gen_helper_vamoaddw_v_d, @@ -726,7 +725,6 @@ static bool amo_op(DisasContext *s, arg_rwdvm *a, uint8_t seq) gen_helper_vamominud_v_d, gen_helper_vamomaxud_v_d }; -#endif if (tb_cflags(s->base.tb) & CF_PARALLEL) { gen_helper_exit_atomic(cpu_env); @@ -734,12 +732,12 @@ static bool amo_op(DisasContext *s, arg_rwdvm *a, uint8_t seq) return true; } else { if (s->sew == 3) { -#ifdef TARGET_RISCV64 - fn = fnsd[seq]; -#else - /* Check done in amo_check(). */ - g_assert_not_reached(); -#endif + if (!is_32bit(s)) { + fn = fnsd[seq]; + } else { + /* Check done in amo_check(). */ + g_assert_not_reached(); + } } else { assert(seq < ARRAY_SIZE(fnsw)); fn = fnsw[seq]; @@ -769,6 +767,11 @@ static bool amo_check(DisasContext *s, arg_rwdvm* a) ((1 << s->sew) >= 4)); } +static bool amo_check64(DisasContext *s, arg_rwdvm* a) +{ + return !is_32bit(s) && amo_check(s, a); +} + GEN_VEXT_TRANS(vamoswapw_v, 0, rwdvm, amo_op, amo_check) GEN_VEXT_TRANS(vamoaddw_v, 1, rwdvm, amo_op, amo_check) GEN_VEXT_TRANS(vamoxorw_v, 2, rwdvm, amo_op, amo_check) @@ -778,17 +781,15 @@ GEN_VEXT_TRANS(vamominw_v, 5, rwdvm, amo_op, amo_check) GEN_VEXT_TRANS(vamomaxw_v, 6, rwdvm, amo_op, amo_check) GEN_VEXT_TRANS(vamominuw_v, 7, rwdvm, amo_op, amo_check) GEN_VEXT_TRANS(vamomaxuw_v, 8, rwdvm, amo_op, amo_check) -#ifdef TARGET_RISCV64 -GEN_VEXT_TRANS(vamoswapd_v, 9, rwdvm, amo_op, amo_check) -GEN_VEXT_TRANS(vamoaddd_v, 10, rwdvm, amo_op, amo_check) -GEN_VEXT_TRANS(vamoxord_v, 11, rwdvm, amo_op, amo_check) -GEN_VEXT_TRANS(vamoandd_v, 12, rwdvm, amo_op, amo_check) -GEN_VEXT_TRANS(vamoord_v, 13, rwdvm, amo_op, amo_check) -GEN_VEXT_TRANS(vamomind_v, 14, rwdvm, amo_op, amo_check) -GEN_VEXT_TRANS(vamomaxd_v, 15, rwdvm, amo_op, amo_check) -GEN_VEXT_TRANS(vamominud_v, 16, rwdvm, amo_op, amo_check) -GEN_VEXT_TRANS(vamomaxud_v, 17, rwdvm, amo_op, amo_check) -#endif +GEN_VEXT_TRANS(vamoswapd_v, 9, rwdvm, amo_op, amo_check64) +GEN_VEXT_TRANS(vamoaddd_v, 10, rwdvm, amo_op, amo_check64) +GEN_VEXT_TRANS(vamoxord_v, 11, rwdvm, amo_op, amo_check64) +GEN_VEXT_TRANS(vamoandd_v, 12, rwdvm, amo_op, amo_check64) +GEN_VEXT_TRANS(vamoord_v, 13, rwdvm, amo_op, amo_check64) +GEN_VEXT_TRANS(vamomind_v, 14, rwdvm, amo_op, amo_check64) +GEN_VEXT_TRANS(vamomaxd_v, 15, rwdvm, amo_op, amo_check64) +GEN_VEXT_TRANS(vamominud_v, 16, rwdvm, amo_op, amo_check64) +GEN_VEXT_TRANS(vamomaxud_v, 17, rwdvm, amo_op, amo_check64) /* *** Vector Integer Arithmetic Instructions diff --git a/target/riscv/machine.c b/target/riscv/machine.c index 44d4015bd6..16a08302da 100644 --- a/target/riscv/machine.c +++ b/target/riscv/machine.c @@ -140,8 +140,8 @@ static const VMStateDescription vmstate_hyper = { const VMStateDescription vmstate_riscv_cpu = { .name = "cpu", - .version_id = 1, - .minimum_version_id = 1, + .version_id = 2, + .minimum_version_id = 2, .fields = (VMStateField[]) { VMSTATE_UINTTL_ARRAY(env.gpr, RISCVCPU, 32), VMSTATE_UINT64_ARRAY(env.fpr, RISCVCPU, 32), @@ -165,10 +165,8 @@ const VMStateDescription vmstate_riscv_cpu = { VMSTATE_UINT32(env.miclaim, RISCVCPU), VMSTATE_UINTTL(env.mie, RISCVCPU), VMSTATE_UINTTL(env.mideleg, RISCVCPU), - VMSTATE_UINTTL(env.sptbr, RISCVCPU), VMSTATE_UINTTL(env.satp, RISCVCPU), - VMSTATE_UINTTL(env.sbadaddr, RISCVCPU), - VMSTATE_UINTTL(env.mbadaddr, RISCVCPU), + VMSTATE_UINTTL(env.stval, RISCVCPU), VMSTATE_UINTTL(env.medeleg, RISCVCPU), VMSTATE_UINTTL(env.stvec, RISCVCPU), VMSTATE_UINTTL(env.sepc, RISCVCPU), diff --git a/target/riscv/meson.build b/target/riscv/meson.build index 88ab850682..af6c3416b7 100644 --- a/target/riscv/meson.build +++ b/target/riscv/meson.build @@ -1,18 +1,13 @@ # FIXME extra_args should accept files() dir = meson.current_source_dir() -gen32 = [ - decodetree.process('insn16.decode', extra_args: [dir / 'insn16-32.decode', '--static-decode=decode_insn16', '--insnwidth=16']), - decodetree.process('insn32.decode', extra_args: '--static-decode=decode_insn32'), -] -gen64 = [ - decodetree.process('insn16.decode', extra_args: [dir / 'insn16-64.decode', '--static-decode=decode_insn16', '--insnwidth=16']), - decodetree.process('insn32.decode', extra_args: [dir / 'insn32-64.decode', '--static-decode=decode_insn32']), +gen = [ + decodetree.process('insn16.decode', extra_args: ['--static-decode=decode_insn16', '--insnwidth=16']), + decodetree.process('insn32.decode', extra_args: '--static-decode=decode_insn32'), ] riscv_ss = ss.source_set() -riscv_ss.add(when: 'TARGET_RISCV32', if_true: gen32) -riscv_ss.add(when: 'TARGET_RISCV64', if_true: gen64) +riscv_ss.add(gen) riscv_ss.add(files( 'cpu.c', 'cpu_helper.c', diff --git a/target/riscv/monitor.c b/target/riscv/monitor.c index e51188f919..f7e6ea72b3 100644 --- a/target/riscv/monitor.c +++ b/target/riscv/monitor.c @@ -150,9 +150,14 @@ static void mem_info_svxx(Monitor *mon, CPUArchState *env) target_ulong last_size; int last_attr; - base = (hwaddr)get_field(env->satp, SATP_PPN) << PGSHIFT; + if (riscv_cpu_is_32bit(env)) { + base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT; + vm = get_field(env->satp, SATP32_MODE); + } else { + base = (hwaddr)get_field(env->satp, SATP64_PPN) << PGSHIFT; + vm = get_field(env->satp, SATP64_MODE); + } - vm = get_field(env->satp, SATP_MODE); switch (vm) { case VM_1_10_SV32: levels = 2; @@ -215,9 +220,16 @@ void hmp_info_mem(Monitor *mon, const QDict *qdict) return; } - if (!(env->satp & SATP_MODE)) { - monitor_printf(mon, "No translation or protection\n"); - return; + if (riscv_cpu_is_32bit(env)) { + if (!(env->satp & SATP32_MODE)) { + monitor_printf(mon, "No translation or protection\n"); + return; + } + } else { + if (!(env->satp & SATP64_MODE)) { + monitor_printf(mon, "No translation or protection\n"); + return; + } } mem_info_svxx(mon, env); diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c index 1eddcb94de..170b494227 100644 --- a/target/riscv/op_helper.c +++ b/target/riscv/op_helper.c @@ -18,7 +18,6 @@ */ #include "qemu/osdep.h" -#include "qemu/log.h" #include "cpu.h" #include "qemu/main-loop.h" #include "exec/exec-all.h" @@ -42,10 +41,10 @@ target_ulong helper_csrrw(CPURISCVState *env, target_ulong src, target_ulong csr) { target_ulong val = 0; - int ret = riscv_csrrw(env, csr, &val, src, -1); + RISCVException ret = riscv_csrrw(env, csr, &val, src, -1); - if (ret < 0) { - riscv_raise_exception(env, -ret, GETPC()); + if (ret != RISCV_EXCP_NONE) { + riscv_raise_exception(env, ret, GETPC()); } return val; } @@ -54,10 +53,10 @@ target_ulong helper_csrrs(CPURISCVState *env, target_ulong src, target_ulong csr, target_ulong rs1_pass) { target_ulong val = 0; - int ret = riscv_csrrw(env, csr, &val, -1, rs1_pass ? src : 0); + RISCVException ret = riscv_csrrw(env, csr, &val, -1, rs1_pass ? src : 0); - if (ret < 0) { - riscv_raise_exception(env, -ret, GETPC()); + if (ret != RISCV_EXCP_NONE) { + riscv_raise_exception(env, ret, GETPC()); } return val; } @@ -66,10 +65,10 @@ target_ulong helper_csrrc(CPURISCVState *env, target_ulong src, target_ulong csr, target_ulong rs1_pass) { target_ulong val = 0; - int ret = riscv_csrrw(env, csr, &val, 0, rs1_pass ? src : 0); + RISCVException ret = riscv_csrrw(env, csr, &val, 0, rs1_pass ? src : 0); - if (ret < 0) { - riscv_raise_exception(env, -ret, GETPC()); + if (ret != RISCV_EXCP_NONE) { + riscv_raise_exception(env, ret, GETPC()); } return val; } diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c index cff020122a..78203291de 100644 --- a/target/riscv/pmp.c +++ b/target/riscv/pmp.c @@ -19,10 +19,6 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ -/* - * PMP (Physical Memory Protection) is as-of-yet unused and needs testing. - */ - #include "qemu/osdep.h" #include "qemu/log.h" #include "qapi/error.h" @@ -59,16 +55,6 @@ static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index) return 0; } - /* In TOR mode, need to check the lock bit of the next pmp - * (if there is a next) - */ - const uint8_t a_field = - pmp_get_a_field(env->pmp_state.pmp[pmp_index + 1].cfg_reg); - if ((env->pmp_state.pmp[pmp_index + 1u].cfg_reg & PMP_LOCK) && - (PMP_AMATCH_TOR == a_field)) { - return 1; - } - return 0; } @@ -100,11 +86,42 @@ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index) static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val) { if (pmp_index < MAX_RISCV_PMPS) { - if (!pmp_is_locked(env, pmp_index)) { - env->pmp_state.pmp[pmp_index].cfg_reg = val; - pmp_update_rule(env, pmp_index); + bool locked = true; + + if (riscv_feature(env, RISCV_FEATURE_EPMP)) { + /* mseccfg.RLB is set */ + if (MSECCFG_RLB_ISSET(env)) { + locked = false; + } + + /* mseccfg.MML is not set */ + if (!MSECCFG_MML_ISSET(env) && !pmp_is_locked(env, pmp_index)) { + locked = false; + } + + /* mseccfg.MML is set */ + if (MSECCFG_MML_ISSET(env)) { + /* not adding execute bit */ + if ((val & PMP_LOCK) != 0 && (val & PMP_EXEC) != PMP_EXEC) { + locked = false; + } + /* shared region and not adding X bit */ + if ((val & PMP_LOCK) != PMP_LOCK && + (val & 0x7) != (PMP_WRITE | PMP_EXEC)) { + locked = false; + } + } } else { + if (!pmp_is_locked(env, pmp_index)) { + locked = false; + } + } + + if (locked) { qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n"); + } else { + env->pmp_state.pmp[pmp_index].cfg_reg = val; + pmp_update_rule(env, pmp_index); } } else { qemu_log_mask(LOG_GUEST_ERROR, @@ -227,6 +244,32 @@ static bool pmp_hart_has_privs_default(CPURISCVState *env, target_ulong addr, { bool ret; + if (riscv_feature(env, RISCV_FEATURE_EPMP)) { + if (MSECCFG_MMWP_ISSET(env)) { + /* + * The Machine Mode Whitelist Policy (mseccfg.MMWP) is set + * so we default to deny all, even for M-mode. + */ + *allowed_privs = 0; + return false; + } else if (MSECCFG_MML_ISSET(env)) { + /* + * The Machine Mode Lockdown (mseccfg.MML) bit is set + * so we can only execute code in M-mode with an applicable + * rule. Other modes are disabled. + */ + if (mode == PRV_M && !(privs & PMP_EXEC)) { + ret = true; + *allowed_privs = PMP_READ | PMP_WRITE; + } else { + ret = false; + *allowed_privs = 0; + } + + return ret; + } + } + if ((!riscv_feature(env, RISCV_FEATURE_PMP)) || (mode == PRV_M)) { /* * Privileged spec v1.10 states if HW doesn't implement any PMP entry @@ -304,13 +347,94 @@ bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); /* - * If the PMP entry is not off and the address is in range, do the priv - * check + * Convert the PMP permissions to match the truth table in the + * ePMP spec. */ + const uint8_t epmp_operation = + ((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) | + ((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) | + (env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) | + ((env->pmp_state.pmp[i].cfg_reg & PMP_EXEC) >> 2); + if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) { - *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; - if ((mode != PRV_M) || pmp_is_locked(env, i)) { - *allowed_privs &= env->pmp_state.pmp[i].cfg_reg; + /* + * If the PMP entry is not off and the address is in range, + * do the priv check + */ + if (!MSECCFG_MML_ISSET(env)) { + /* + * If mseccfg.MML Bit is not set, do pmp priv check + * This will always apply to regular PMP. + */ + *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; + if ((mode != PRV_M) || pmp_is_locked(env, i)) { + *allowed_privs &= env->pmp_state.pmp[i].cfg_reg; + } + } else { + /* + * If mseccfg.MML Bit set, do the enhanced pmp priv check + */ + if (mode == PRV_M) { + switch (epmp_operation) { + case 0: + case 1: + case 4: + case 5: + case 6: + case 7: + case 8: + *allowed_privs = 0; + break; + case 2: + case 3: + case 14: + *allowed_privs = PMP_READ | PMP_WRITE; + break; + case 9: + case 10: + *allowed_privs = PMP_EXEC; + break; + case 11: + case 13: + *allowed_privs = PMP_READ | PMP_EXEC; + break; + case 12: + case 15: + *allowed_privs = PMP_READ; + break; + } + } else { + switch (epmp_operation) { + case 0: + case 8: + case 9: + case 12: + case 13: + case 14: + *allowed_privs = 0; + break; + case 1: + case 10: + case 11: + *allowed_privs = PMP_EXEC; + break; + case 2: + case 4: + case 15: + *allowed_privs = PMP_READ; + break; + case 3: + case 6: + *allowed_privs = PMP_READ | PMP_WRITE; + break; + case 5: + *allowed_privs = PMP_READ | PMP_EXEC; + break; + case 7: + *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; + break; + } + } } ret = ((privs & *allowed_privs) == privs); @@ -380,7 +504,23 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, target_ulong val) { trace_pmpaddr_csr_write(env->mhartid, addr_index, val); + if (addr_index < MAX_RISCV_PMPS) { + /* + * In TOR mode, need to check the lock bit of the next pmp + * (if there is a next). + */ + if (addr_index + 1 < MAX_RISCV_PMPS) { + uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg; + + if (pmp_cfg & PMP_LOCK && + PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg)) { + qemu_log_mask(LOG_GUEST_ERROR, + "ignoring pmpaddr write - pmpcfg + 1 locked\n"); + return; + } + } + if (!pmp_is_locked(env, addr_index)) { env->pmp_state.pmp[addr_index].addr_reg = val; pmp_update_rule(env, addr_index); @@ -414,6 +554,40 @@ target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index) } /* + * Handle a write to a mseccfg CSR + */ +void mseccfg_csr_write(CPURISCVState *env, target_ulong val) +{ + int i; + + trace_mseccfg_csr_write(env->mhartid, val); + + /* RLB cannot be enabled if it's already 0 and if any regions are locked */ + if (!MSECCFG_RLB_ISSET(env)) { + for (i = 0; i < MAX_RISCV_PMPS; i++) { + if (pmp_is_locked(env, i)) { + val &= ~MSECCFG_RLB; + break; + } + } + } + + /* Sticky bits */ + val |= (env->mseccfg & (MSECCFG_MMWP | MSECCFG_MML)); + + env->mseccfg = val; +} + +/* + * Handle a read from a mseccfg CSR + */ +target_ulong mseccfg_csr_read(CPURISCVState *env) +{ + trace_mseccfg_csr_read(env->mhartid, env->mseccfg); + return env->mseccfg; +} + +/* * Calculate the TLB size if the start address or the end address of * PMP entry is presented in thie TLB page. */ diff --git a/target/riscv/pmp.h b/target/riscv/pmp.h index b82a30f0d5..a9a0b363a7 100644 --- a/target/riscv/pmp.h +++ b/target/riscv/pmp.h @@ -36,6 +36,12 @@ typedef enum { PMP_AMATCH_NAPOT /* Naturally aligned power-of-two region */ } pmp_am_t; +typedef enum { + MSECCFG_MML = 1 << 0, + MSECCFG_MMWP = 1 << 1, + MSECCFG_RLB = 1 << 2 +} mseccfg_field_t; + typedef struct { target_ulong addr_reg; uint8_t cfg_reg; @@ -55,6 +61,10 @@ typedef struct { void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index, target_ulong val); target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index); + +void mseccfg_csr_write(CPURISCVState *env, target_ulong val); +target_ulong mseccfg_csr_read(CPURISCVState *env); + void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, target_ulong val); target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index); @@ -68,4 +78,8 @@ void pmp_update_rule_nums(CPURISCVState *env); uint32_t pmp_get_num_rules(CPURISCVState *env); int pmp_priv_to_page_prot(pmp_priv_t pmp_priv); +#define MSECCFG_MML_ISSET(env) get_field(env->mseccfg, MSECCFG_MML) +#define MSECCFG_MMWP_ISSET(env) get_field(env->mseccfg, MSECCFG_MMWP) +#define MSECCFG_RLB_ISSET(env) get_field(env->mseccfg, MSECCFG_RLB) + #endif diff --git a/target/riscv/trace-events b/target/riscv/trace-events index b7e371ee97..49ec4d3b7d 100644 --- a/target/riscv/trace-events +++ b/target/riscv/trace-events @@ -6,3 +6,6 @@ pmpcfg_csr_read(uint64_t mhartid, uint32_t reg_index, uint64_t val) "hart %" PRI pmpcfg_csr_write(uint64_t mhartid, uint32_t reg_index, uint64_t val) "hart %" PRIu64 ": write reg%" PRIu32", val: 0x%" PRIx64 pmpaddr_csr_read(uint64_t mhartid, uint32_t addr_index, uint64_t val) "hart %" PRIu64 ": read addr%" PRIu32", val: 0x%" PRIx64 pmpaddr_csr_write(uint64_t mhartid, uint32_t addr_index, uint64_t val) "hart %" PRIu64 ": write addr%" PRIu32", val: 0x%" PRIx64 + +mseccfg_csr_read(uint64_t mhartid, uint64_t val) "hart %" PRIu64 ": read mseccfg, val: 0x%" PRIx64 +mseccfg_csr_write(uint64_t mhartid, uint64_t val) "hart %" PRIu64 ": write mseccfg, val: 0x%" PRIx64 diff --git a/target/riscv/translate.c b/target/riscv/translate.c index 2f9f5ccc62..e945352bca 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -67,17 +67,22 @@ typedef struct DisasContext { CPUState *cs; } DisasContext; -#ifdef TARGET_RISCV64 -#define CASE_OP_32_64(X) case X: case glue(X, W) -#else -#define CASE_OP_32_64(X) case X -#endif - static inline bool has_ext(DisasContext *ctx, uint32_t ext) { return ctx->misa & ext; } +#ifdef TARGET_RISCV32 +# define is_32bit(ctx) true +#elif defined(CONFIG_USER_ONLY) +# define is_32bit(ctx) false +#else +static inline bool is_32bit(DisasContext *ctx) +{ + return (ctx->misa & RV32) == RV32; +} +#endif + /* * RISC-V requires NaN-boxing of narrower width floating point values. * This applies when a 32-bit value is assigned to a 64-bit FP register. @@ -116,7 +121,7 @@ static void generate_exception(DisasContext *ctx, int excp) ctx->base.is_jmp = DISAS_NORETURN; } -static void generate_exception_mbadaddr(DisasContext *ctx, int excp) +static void generate_exception_mtval(DisasContext *ctx, int excp) { tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next); tcg_gen_st_tl(cpu_pc, cpu_env, offsetof(CPURISCVState, badaddr)); @@ -160,7 +165,7 @@ static void gen_exception_illegal(DisasContext *ctx) static void gen_exception_inst_addr_mis(DisasContext *ctx) { - generate_exception_mbadaddr(ctx, RISCV_EXCP_INST_ADDR_MIS); + generate_exception_mtval(ctx, RISCV_EXCP_INST_ADDR_MIS); } static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest) @@ -369,6 +374,8 @@ static void gen_jal(DisasContext *ctx, int rd, target_ulong imm) static void mark_fs_dirty(DisasContext *ctx) { TCGv tmp; + target_ulong sd; + if (ctx->mstatus_fs == MSTATUS_FS) { return; } @@ -376,13 +383,15 @@ static void mark_fs_dirty(DisasContext *ctx) ctx->mstatus_fs = MSTATUS_FS; tmp = tcg_temp_new(); + sd = is_32bit(ctx) ? MSTATUS32_SD : MSTATUS64_SD; + tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus)); - tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS | MSTATUS_SD); + tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS | sd); tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus)); if (ctx->virt_enabled) { tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs)); - tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS | MSTATUS_SD); + tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS | sd); tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs)); } tcg_temp_free(tmp); @@ -426,6 +435,12 @@ EX_SH(12) } \ } while (0) +#define REQUIRE_64BIT(ctx) do { \ + if (is_32bit(ctx)) { \ + return false; \ + } \ +} while (0) + static int ex_rvc_register(DisasContext *ctx, int reg) { return 8 + reg; @@ -473,7 +488,6 @@ static bool gen_arith_imm_tl(DisasContext *ctx, arg_i *a, return true; } -#ifdef TARGET_RISCV64 static void gen_addw(TCGv ret, TCGv arg1, TCGv arg2) { tcg_gen_add_tl(ret, arg1, arg2); @@ -534,8 +548,6 @@ static bool gen_arith_div_uw(DisasContext *ctx, arg_r *a, return true; } -#endif - static bool gen_arith(DisasContext *ctx, arg_r *a, void(*func)(TCGv, TCGv, TCGv)) { diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c index a156573d28..12c31aa4b4 100644 --- a/target/riscv/vector_helper.c +++ b/target/riscv/vector_helper.c @@ -751,7 +751,6 @@ GEN_VEXT_AMO_NOATOMIC_OP(vamominw_v_w, 32, 32, H4, DO_MIN, l) GEN_VEXT_AMO_NOATOMIC_OP(vamomaxw_v_w, 32, 32, H4, DO_MAX, l) GEN_VEXT_AMO_NOATOMIC_OP(vamominuw_v_w, 32, 32, H4, DO_MINU, l) GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuw_v_w, 32, 32, H4, DO_MAXU, l) -#ifdef TARGET_RISCV64 GEN_VEXT_AMO_NOATOMIC_OP(vamoswapw_v_d, 64, 32, H8, DO_SWAP, l) GEN_VEXT_AMO_NOATOMIC_OP(vamoswapd_v_d, 64, 64, H8, DO_SWAP, q) GEN_VEXT_AMO_NOATOMIC_OP(vamoaddw_v_d, 64, 32, H8, DO_ADD, l) @@ -770,7 +769,6 @@ GEN_VEXT_AMO_NOATOMIC_OP(vamominuw_v_d, 64, 32, H8, DO_MINU, l) GEN_VEXT_AMO_NOATOMIC_OP(vamominud_v_d, 64, 64, H8, DO_MINU, q) GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuw_v_d, 64, 32, H8, DO_MAXU, l) GEN_VEXT_AMO_NOATOMIC_OP(vamomaxud_v_d, 64, 64, H8, DO_MAXU, q) -#endif static inline void vext_amo_noatomic(void *vs3, void *v0, target_ulong base, @@ -814,7 +812,6 @@ void HELPER(NAME)(void *vs3, void *v0, target_ulong base, \ GETPC()); \ } -#ifdef TARGET_RISCV64 GEN_VEXT_AMO(vamoswapw_v_d, int32_t, int64_t, idx_d, clearq) GEN_VEXT_AMO(vamoswapd_v_d, int64_t, int64_t, idx_d, clearq) GEN_VEXT_AMO(vamoaddw_v_d, int32_t, int64_t, idx_d, clearq) @@ -833,7 +830,6 @@ GEN_VEXT_AMO(vamominuw_v_d, uint32_t, uint64_t, idx_d, clearq) GEN_VEXT_AMO(vamominud_v_d, uint64_t, uint64_t, idx_d, clearq) GEN_VEXT_AMO(vamomaxuw_v_d, uint32_t, uint64_t, idx_d, clearq) GEN_VEXT_AMO(vamomaxud_v_d, uint64_t, uint64_t, idx_d, clearq) -#endif GEN_VEXT_AMO(vamoswapw_v_w, int32_t, int32_t, idx_w, clearl) GEN_VEXT_AMO(vamoaddw_v_w, int32_t, int32_t, idx_w, clearl) GEN_VEXT_AMO(vamoxorw_v_w, int32_t, int32_t, idx_w, clearl) @@ -2451,7 +2447,7 @@ static inline int8_t ssub8(CPURISCVState *env, int vxrm, int8_t a, int8_t b) { int8_t res = a - b; if ((res ^ a) & (a ^ b) & INT8_MIN) { - res = a > 0 ? INT8_MAX : INT8_MIN; + res = a >= 0 ? INT8_MAX : INT8_MIN; env->vxsat = 0x1; } return res; @@ -2461,7 +2457,7 @@ static inline int16_t ssub16(CPURISCVState *env, int vxrm, int16_t a, int16_t b) { int16_t res = a - b; if ((res ^ a) & (a ^ b) & INT16_MIN) { - res = a > 0 ? INT16_MAX : INT16_MIN; + res = a >= 0 ? INT16_MAX : INT16_MIN; env->vxsat = 0x1; } return res; @@ -2471,7 +2467,7 @@ static inline int32_t ssub32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) { int32_t res = a - b; if ((res ^ a) & (a ^ b) & INT32_MIN) { - res = a > 0 ? INT32_MAX : INT32_MIN; + res = a >= 0 ? INT32_MAX : INT32_MIN; env->vxsat = 0x1; } return res; @@ -2481,7 +2477,7 @@ static inline int64_t ssub64(CPURISCVState *env, int vxrm, int64_t a, int64_t b) { int64_t res = a - b; if ((res ^ a) & (a ^ b) & INT64_MIN) { - res = a > 0 ? INT64_MAX : INT64_MIN; + res = a >= 0 ? INT64_MAX : INT64_MIN; env->vxsat = 0x1; } return res; @@ -4796,7 +4792,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - uint32_t index, i; \ + uint64_t index; \ + uint32_t i; \ \ for (i = 0; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, mlen, i)) { \ @@ -4826,7 +4823,8 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - uint32_t index = s1, i; \ + uint64_t index = s1; \ + uint32_t i; \ \ for (i = 0; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, mlen, i)) { \ diff --git a/target/rx/helper.c b/target/rx/helper.c index 3e380a94fe..db6b07e389 100644 --- a/target/rx/helper.c +++ b/target/rx/helper.c @@ -21,7 +21,6 @@ #include "cpu.h" #include "exec/log.h" #include "exec/cpu_ldst.h" -#include "sysemu/sysemu.h" #include "hw/irq.h" void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte) diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c index d35eb39a1b..64455cf309 100644 --- a/target/s390x/cpu.c +++ b/target/s390x/cpu.c @@ -40,7 +40,6 @@ #include "hw/s390x/pv.h" #include "hw/boards.h" #include "sysemu/arch_init.h" -#include "sysemu/sysemu.h" #include "sysemu/tcg.h" #endif #include "fpu/softfloat-helpers.h" diff --git a/target/s390x/diag.c b/target/s390x/diag.c index 1a48429564..d620cd4bd4 100644 --- a/target/s390x/diag.c +++ b/target/s390x/diag.c @@ -15,7 +15,6 @@ #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" -#include "exec/address-spaces.h" #include "hw/watchdog/wdt_diag288.h" #include "sysemu/cpus.h" #include "hw/s390x/ipl.h" diff --git a/target/s390x/excp_helper.c b/target/s390x/excp_helper.c index c48cd6b46f..20625c2c8f 100644 --- a/target/s390x/excp_helper.c +++ b/target/s390x/excp_helper.c @@ -29,7 +29,6 @@ #include "exec/address-spaces.h" #include "tcg_s390x.h" #ifndef CONFIG_USER_ONLY -#include "sysemu/sysemu.h" #include "hw/s390x/s390_flic.h" #include "hw/boards.h" #endif diff --git a/target/s390x/interrupt.c b/target/s390x/interrupt.c index 4cdbbc8849..9b4d08f2be 100644 --- a/target/s390x/interrupt.c +++ b/target/s390x/interrupt.c @@ -8,7 +8,6 @@ */ #include "qemu/osdep.h" -#include "qemu/log.h" #include "cpu.h" #include "kvm_s390x.h" #include "internal.h" diff --git a/target/s390x/translate.c b/target/s390x/translate.c index 4f953ddfba..e243624d2a 100644 --- a/target/s390x/translate.c +++ b/target/s390x/translate.c @@ -6412,7 +6412,8 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n", s->fields.op, s->fields.op2); gen_illegal_opcode(s); - return DISAS_NORETURN; + ret = DISAS_NORETURN; + goto out; } #ifndef CONFIG_USER_ONLY @@ -6428,7 +6429,8 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) /* privileged instruction */ if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) { gen_program_exception(s, PGM_PRIVILEGED); - return DISAS_NORETURN; + ret = DISAS_NORETURN; + goto out; } /* if AFP is not enabled, instructions and registers are forbidden */ @@ -6455,7 +6457,8 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) } if (dxc) { gen_data_exception(dxc); - return DISAS_NORETURN; + ret = DISAS_NORETURN; + goto out; } } @@ -6463,7 +6466,8 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) if (insn->flags & IF_VEC) { if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) { gen_data_exception(0xfe); - return DISAS_NORETURN; + ret = DISAS_NORETURN; + goto out; } } @@ -6484,7 +6488,8 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) || (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) { gen_program_exception(s, PGM_SPECIFICATION); - return DISAS_NORETURN; + ret = DISAS_NORETURN; + goto out; } } @@ -6544,6 +6549,7 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) } #endif +out: /* Advance to the next instruction. */ s->base.pc_next = s->pc_tmp; return ret; diff --git a/target/sh4/helper.c b/target/sh4/helper.c index bd8e034f17..2d622081e8 100644 --- a/target/sh4/helper.c +++ b/target/sh4/helper.c @@ -441,9 +441,12 @@ hwaddr superh_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) target_ulong physical; int prot; - get_physical_address(&cpu->env, &physical, &prot, addr, MMU_DATA_LOAD); + if (get_physical_address(&cpu->env, &physical, &prot, addr, MMU_DATA_LOAD) + == MMU_OK) { + return physical; + } - return physical; + return -1; } void cpu_load_tlb(CPUSH4State * env) diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h index 4b2290650b..ff8ae73002 100644 --- a/target/sparc/cpu.h +++ b/target/sparc/cpu.h @@ -615,15 +615,9 @@ int cpu_cwp_inc(CPUSPARCState *env1, int cwp); int cpu_cwp_dec(CPUSPARCState *env1, int cwp); void cpu_set_cwp(CPUSPARCState *env1, int new_cwp); -/* int_helper.c */ -void leon3_irq_manager(CPUSPARCState *env, void *irq_manager, int intno); - /* sun4m.c, sun4u.c */ void cpu_check_irqs(CPUSPARCState *env); -/* leon3.c */ -void leon3_irq_ack(void *irq_manager, int intno); - #if defined (TARGET_SPARC64) static inline int compare_masked(uint64_t x, uint64_t y, uint64_t mask) diff --git a/target/sparc/int32_helper.c b/target/sparc/int32_helper.c index 817a463a17..82e8418e46 100644 --- a/target/sparc/int32_helper.c +++ b/target/sparc/int32_helper.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" +#include "qemu/main-loop.h" #include "cpu.h" #include "trace.h" #include "exec/log.h" @@ -64,6 +65,38 @@ static const char *excp_name_str(int32_t exception_index) return excp_names[exception_index]; } +void cpu_check_irqs(CPUSPARCState *env) +{ + CPUState *cs; + + /* We should be holding the BQL before we mess with IRQs */ + g_assert(qemu_mutex_iothread_locked()); + + if (env->pil_in && (env->interrupt_index == 0 || + (env->interrupt_index & ~15) == TT_EXTINT)) { + unsigned int i; + + for (i = 15; i > 0; i--) { + if (env->pil_in & (1 << i)) { + int old_interrupt = env->interrupt_index; + + env->interrupt_index = TT_EXTINT | i; + if (old_interrupt != env->interrupt_index) { + cs = env_cpu(env); + trace_sun4m_cpu_interrupt(i); + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + } + break; + } + } + } else if (!env->pil_in && (env->interrupt_index & ~15) == TT_EXTINT) { + cs = env_cpu(env); + trace_sun4m_cpu_reset_interrupt(env->interrupt_index & 15); + env->interrupt_index = 0; + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); + } +} + void sparc_cpu_do_interrupt(CPUState *cs) { SPARCCPU *cpu = SPARC_CPU(cs); @@ -136,40 +169,3 @@ void sparc_cpu_do_interrupt(CPUState *cs) } #endif } - -#if !defined(CONFIG_USER_ONLY) -static void leon3_cache_control_int(CPUSPARCState *env) -{ - uint32_t state = 0; - - if (env->cache_control & CACHE_CTRL_IF) { - /* Instruction cache state */ - state = env->cache_control & CACHE_STATE_MASK; - if (state == CACHE_ENABLED) { - state = CACHE_FROZEN; - trace_int_helper_icache_freeze(); - } - - env->cache_control &= ~CACHE_STATE_MASK; - env->cache_control |= state; - } - - if (env->cache_control & CACHE_CTRL_DF) { - /* Data cache state */ - state = (env->cache_control >> 2) & CACHE_STATE_MASK; - if (state == CACHE_ENABLED) { - state = CACHE_FROZEN; - trace_int_helper_dcache_freeze(); - } - - env->cache_control &= ~(CACHE_STATE_MASK << 2); - env->cache_control |= (state << 2); - } -} - -void leon3_irq_manager(CPUSPARCState *env, void *irq_manager, int intno) -{ - leon3_irq_ack(irq_manager, intno); - leon3_cache_control_int(env); -} -#endif diff --git a/target/sparc/int64_helper.c b/target/sparc/int64_helper.c index 7fb8ab211c..793e57c536 100644 --- a/target/sparc/int64_helper.c +++ b/target/sparc/int64_helper.c @@ -62,6 +62,72 @@ static const char * const excp_names[0x80] = { }; #endif +void cpu_check_irqs(CPUSPARCState *env) +{ + CPUState *cs; + uint32_t pil = env->pil_in | + (env->softint & ~(SOFTINT_TIMER | SOFTINT_STIMER)); + + /* We should be holding the BQL before we mess with IRQs */ + g_assert(qemu_mutex_iothread_locked()); + + /* TT_IVEC has a higher priority (16) than TT_EXTINT (31..17) */ + if (env->ivec_status & 0x20) { + return; + } + cs = env_cpu(env); + /* + * check if TM or SM in SOFTINT are set + * setting these also causes interrupt 14 + */ + if (env->softint & (SOFTINT_TIMER | SOFTINT_STIMER)) { + pil |= 1 << 14; + } + + /* + * The bit corresponding to psrpil is (1<< psrpil), + * the next bit is (2 << psrpil). + */ + if (pil < (2 << env->psrpil)) { + if (cs->interrupt_request & CPU_INTERRUPT_HARD) { + trace_sparc64_cpu_check_irqs_reset_irq(env->interrupt_index); + env->interrupt_index = 0; + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); + } + return; + } + + if (cpu_interrupts_enabled(env)) { + + unsigned int i; + + for (i = 15; i > env->psrpil; i--) { + if (pil & (1 << i)) { + int old_interrupt = env->interrupt_index; + int new_interrupt = TT_EXTINT | i; + + if (unlikely(env->tl > 0 && cpu_tsptr(env)->tt > new_interrupt + && ((cpu_tsptr(env)->tt & 0x1f0) == TT_EXTINT))) { + trace_sparc64_cpu_check_irqs_noset_irq(env->tl, + cpu_tsptr(env)->tt, + new_interrupt); + } else if (old_interrupt != new_interrupt) { + env->interrupt_index = new_interrupt; + trace_sparc64_cpu_check_irqs_set_irq(i, old_interrupt, + new_interrupt); + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + } + break; + } + } + } else if (cs->interrupt_request & CPU_INTERRUPT_HARD) { + trace_sparc64_cpu_check_irqs_disabled(pil, env->pil_in, env->softint, + env->interrupt_index); + env->interrupt_index = 0; + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); + } +} + void sparc_cpu_do_interrupt(CPUState *cs) { SPARCCPU *cpu = SPARC_CPU(cs); diff --git a/target/sparc/trace-events b/target/sparc/trace-events index 6a064e2327..75e7093d5f 100644 --- a/target/sparc/trace-events +++ b/target/sparc/trace-events @@ -10,14 +10,18 @@ mmu_helper_get_phys_addr_code(uint32_t tl, int mmu_idx, uint64_t prim_context, u mmu_helper_get_phys_addr_data(uint32_t tl, int mmu_idx, uint64_t prim_context, uint64_t sec_context, uint64_t address) "tl=%d mmu_idx=%d primary context=0x%"PRIx64" secondary context=0x%"PRIx64" address=0x%"PRIx64 mmu_helper_mmu_fault(uint64_t address, uint64_t paddr, int mmu_idx, uint32_t tl, uint64_t prim_context, uint64_t sec_context) "Translate at 0x%"PRIx64" -> 0x%"PRIx64", mmu_idx=%d tl=%d primary context=0x%"PRIx64" secondary context=0x%"PRIx64 +# int32_helper.c +sun4m_cpu_interrupt(unsigned int level) "Set CPU IRQ %d" +sun4m_cpu_reset_interrupt(unsigned int level) "Reset CPU IRQ %d" + # int64_helper.c int_helper_set_softint(uint32_t softint) "new 0x%08x" int_helper_clear_softint(uint32_t softint) "new 0x%08x" int_helper_write_softint(uint32_t softint) "new 0x%08x" - -# int32_helper.c -int_helper_icache_freeze(void) "Instruction cache: freeze" -int_helper_dcache_freeze(void) "Data cache: freeze" +sparc64_cpu_check_irqs_reset_irq(int intno) "Reset CPU IRQ (current interrupt 0x%x)" +sparc64_cpu_check_irqs_noset_irq(uint32_t tl, uint32_t tt, int intno) "Not setting CPU IRQ: TL=%d current 0x%x >= pending 0x%x" +sparc64_cpu_check_irqs_set_irq(unsigned int i, int old, int new) "Set CPU IRQ %d old=0x%x new=0x%x" +sparc64_cpu_check_irqs_disabled(uint32_t pil, uint32_t pil_in, uint32_t softint, int intno) "Interrupts disabled, pil=0x%08x pil_in=0x%08x softint=0x%08x current interrupt 0x%x" # win_helper.c win_helper_gregset_error(uint32_t pstate) "ERROR in get_gregset: active pstate bits=0x%x" diff --git a/target/unicore32/cpu-param.h b/target/unicore32/cpu-param.h deleted file mode 100644 index 94d8a5daa1..0000000000 --- a/target/unicore32/cpu-param.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * UniCore32 cpu parameters for qemu. - * - * Copyright (C) 2010-2012 Guan Xuetao - * SPDX-License-Identifier: GPL-2.0+ - */ - -#ifndef UNICORE32_CPU_PARAM_H -#define UNICORE32_CPU_PARAM_H 1 - -#define TARGET_LONG_BITS 32 -#define TARGET_PAGE_BITS 12 -#define TARGET_PHYS_ADDR_SPACE_BITS 32 -#define TARGET_VIRT_ADDR_SPACE_BITS 32 -#define NB_MMU_MODES 2 - -#endif diff --git a/target/unicore32/cpu-qom.h b/target/unicore32/cpu-qom.h deleted file mode 100644 index 43621e7479..0000000000 --- a/target/unicore32/cpu-qom.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * QEMU UniCore32 CPU - * - * Copyright (c) 2012 SUSE LINUX Products GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation, or (at your option) any - * later version. See the COPYING file in the top-level directory. - */ -#ifndef QEMU_UC32_CPU_QOM_H -#define QEMU_UC32_CPU_QOM_H - -#include "hw/core/cpu.h" -#include "qom/object.h" - -#define TYPE_UNICORE32_CPU "unicore32-cpu" - -OBJECT_DECLARE_TYPE(UniCore32CPU, UniCore32CPUClass, - UNICORE32_CPU) - -/** - * UniCore32CPUClass: - * @parent_realize: The parent class' realize handler. - * - * A UniCore32 CPU model. - */ -struct UniCore32CPUClass { - /*< private >*/ - CPUClass parent_class; - /*< public >*/ - - DeviceRealize parent_realize; -}; - - -#endif diff --git a/target/unicore32/cpu.c b/target/unicore32/cpu.c deleted file mode 100644 index 0258884f84..0000000000 --- a/target/unicore32/cpu.c +++ /dev/null @@ -1,174 +0,0 @@ -/* - * QEMU UniCore32 CPU - * - * Copyright (c) 2010-2012 Guan Xuetao - * Copyright (c) 2012 SUSE LINUX Products GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Contributions from 2012-04-01 on are considered under GPL version 2, - * or (at your option) any later version. - */ - -#include "qemu/osdep.h" -#include "qapi/error.h" -#include "cpu.h" -#include "migration/vmstate.h" -#include "exec/exec-all.h" - -static void uc32_cpu_set_pc(CPUState *cs, vaddr value) -{ - UniCore32CPU *cpu = UNICORE32_CPU(cs); - - cpu->env.regs[31] = value; -} - -static bool uc32_cpu_has_work(CPUState *cs) -{ - return cs->interrupt_request & - (CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB); -} - -static inline void set_feature(CPUUniCore32State *env, int feature) -{ - env->features |= feature; -} - -/* CPU models */ - -static ObjectClass *uc32_cpu_class_by_name(const char *cpu_model) -{ - ObjectClass *oc; - char *typename; - - typename = g_strdup_printf(UNICORE32_CPU_TYPE_NAME("%s"), cpu_model); - oc = object_class_by_name(typename); - g_free(typename); - if (oc != NULL && (!object_class_dynamic_cast(oc, TYPE_UNICORE32_CPU) || - object_class_is_abstract(oc))) { - oc = NULL; - } - return oc; -} - -static void unicore_ii_cpu_initfn(Object *obj) -{ - UniCore32CPU *cpu = UNICORE32_CPU(obj); - CPUUniCore32State *env = &cpu->env; - - env->cp0.c0_cpuid = 0x4d000863; - env->cp0.c0_cachetype = 0x0d152152; - env->cp0.c1_sys = 0x2000; - env->cp0.c2_base = 0x0; - env->cp0.c3_faultstatus = 0x0; - env->cp0.c4_faultaddr = 0x0; - env->ucf64.xregs[UC32_UCF64_FPSCR] = 0; - - set_feature(env, UC32_HWCAP_CMOV); - set_feature(env, UC32_HWCAP_UCF64); -} - -static void uc32_any_cpu_initfn(Object *obj) -{ - UniCore32CPU *cpu = UNICORE32_CPU(obj); - CPUUniCore32State *env = &cpu->env; - - env->cp0.c0_cpuid = 0xffffffff; - env->ucf64.xregs[UC32_UCF64_FPSCR] = 0; - - set_feature(env, UC32_HWCAP_CMOV); - set_feature(env, UC32_HWCAP_UCF64); -} - -static void uc32_cpu_realizefn(DeviceState *dev, Error **errp) -{ - CPUState *cs = CPU(dev); - UniCore32CPUClass *ucc = UNICORE32_CPU_GET_CLASS(dev); - Error *local_err = NULL; - - cpu_exec_realizefn(cs, &local_err); - if (local_err != NULL) { - error_propagate(errp, local_err); - return; - } - - qemu_init_vcpu(cs); - - ucc->parent_realize(dev, errp); -} - -static void uc32_cpu_initfn(Object *obj) -{ - UniCore32CPU *cpu = UNICORE32_CPU(obj); - CPUUniCore32State *env = &cpu->env; - - cpu_set_cpustate_pointers(cpu); - -#ifdef CONFIG_USER_ONLY - env->uncached_asr = ASR_MODE_USER; - env->regs[31] = 0; -#else - env->uncached_asr = ASR_MODE_PRIV; - env->regs[31] = 0x03000000; -#endif -} - -static const VMStateDescription vmstate_uc32_cpu = { - .name = "cpu", - .unmigratable = 1, -}; - -#include "hw/core/tcg-cpu-ops.h" - -static struct TCGCPUOps uc32_tcg_ops = { - .initialize = uc32_translate_init, - .cpu_exec_interrupt = uc32_cpu_exec_interrupt, - .tlb_fill = uc32_cpu_tlb_fill, - -#ifndef CONFIG_USER_ONLY - .do_interrupt = uc32_cpu_do_interrupt, -#endif /* !CONFIG_USER_ONLY */ -}; - -static void uc32_cpu_class_init(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - CPUClass *cc = CPU_CLASS(oc); - UniCore32CPUClass *ucc = UNICORE32_CPU_CLASS(oc); - - device_class_set_parent_realize(dc, uc32_cpu_realizefn, - &ucc->parent_realize); - - cc->class_by_name = uc32_cpu_class_by_name; - cc->has_work = uc32_cpu_has_work; - cc->dump_state = uc32_cpu_dump_state; - cc->set_pc = uc32_cpu_set_pc; - cc->get_phys_page_debug = uc32_cpu_get_phys_page_debug; - dc->vmsd = &vmstate_uc32_cpu; - cc->tcg_ops = &uc32_tcg_ops; -} - -#define DEFINE_UNICORE32_CPU_TYPE(cpu_model, initfn) \ - { \ - .parent = TYPE_UNICORE32_CPU, \ - .instance_init = initfn, \ - .name = UNICORE32_CPU_TYPE_NAME(cpu_model), \ - } - -static const TypeInfo uc32_cpu_type_infos[] = { - { - .name = TYPE_UNICORE32_CPU, - .parent = TYPE_CPU, - .instance_size = sizeof(UniCore32CPU), - .instance_init = uc32_cpu_initfn, - .abstract = true, - .class_size = sizeof(UniCore32CPUClass), - .class_init = uc32_cpu_class_init, - }, - DEFINE_UNICORE32_CPU_TYPE("UniCore-II", unicore_ii_cpu_initfn), - DEFINE_UNICORE32_CPU_TYPE("any", uc32_any_cpu_initfn), -}; - -DEFINE_TYPES(uc32_cpu_type_infos) diff --git a/target/unicore32/cpu.h b/target/unicore32/cpu.h deleted file mode 100644 index 7a32e086ed..0000000000 --- a/target/unicore32/cpu.h +++ /dev/null @@ -1,168 +0,0 @@ -/* - * UniCore32 virtual CPU header - * - * Copyright (C) 2010-2012 Guan Xuetao - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation, or (at your option) any - * later version. See the COPYING file in the top-level directory. - */ - -#ifndef UNICORE32_CPU_H -#define UNICORE32_CPU_H - -#include "cpu-qom.h" -#include "exec/cpu-defs.h" - -typedef struct CPUUniCore32State { - /* Regs for current mode. */ - uint32_t regs[32]; - /* Frequently accessed ASR bits are stored separately for efficiently. - This contains all the other bits. Use asr_{read,write} to access - the whole ASR. */ - uint32_t uncached_asr; - uint32_t bsr; - - /* Banked registers. */ - uint32_t banked_bsr[6]; - uint32_t banked_r29[6]; - uint32_t banked_r30[6]; - - /* asr flag cache for faster execution */ - uint32_t CF; /* 0 or 1 */ - uint32_t VF; /* V is the bit 31. All other bits are undefined */ - uint32_t NF; /* N is bit 31. All other bits are undefined. */ - uint32_t ZF; /* Z set if zero. */ - - /* System control coprocessor (cp0) */ - struct { - uint32_t c0_cpuid; - uint32_t c0_cachetype; - uint32_t c1_sys; /* System control register. */ - uint32_t c2_base; /* MMU translation table base. */ - uint32_t c3_faultstatus; /* Fault status registers. */ - uint32_t c4_faultaddr; /* Fault address registers. */ - uint32_t c5_cacheop; /* Cache operation registers. */ - uint32_t c6_tlbop; /* TLB operation registers. */ - } cp0; - - /* UniCore-F64 coprocessor state. */ - struct { - float64 regs[16]; - uint32_t xregs[32]; - float_status fp_status; - } ucf64; - - /* Internal CPU feature flags. */ - uint32_t features; - -} CPUUniCore32State; - -/** - * UniCore32CPU: - * @env: #CPUUniCore32State - * - * A UniCore32 CPU. - */ -struct UniCore32CPU { - /*< private >*/ - CPUState parent_obj; - /*< public >*/ - - CPUNegativeOffsetState neg; - CPUUniCore32State env; -}; - - -void uc32_cpu_do_interrupt(CPUState *cpu); -bool uc32_cpu_exec_interrupt(CPUState *cpu, int int_req); -void uc32_cpu_dump_state(CPUState *cpu, FILE *f, int flags); -hwaddr uc32_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); - -#define ASR_M (0x1f) -#define ASR_MODE_USER (0x10) -#define ASR_MODE_INTR (0x12) -#define ASR_MODE_PRIV (0x13) -#define ASR_MODE_TRAP (0x17) -#define ASR_MODE_EXTN (0x1b) -#define ASR_MODE_SUSR (0x1f) -#define ASR_I (1 << 7) -#define ASR_V (1 << 28) -#define ASR_C (1 << 29) -#define ASR_Z (1 << 30) -#define ASR_N (1 << 31) -#define ASR_NZCV (ASR_N | ASR_Z | ASR_C | ASR_V) -#define ASR_RESERVED (~(ASR_M | ASR_I | ASR_NZCV)) - -#define UC32_EXCP_PRIV (1) -#define UC32_EXCP_ITRAP (2) -#define UC32_EXCP_DTRAP (3) -#define UC32_EXCP_INTR (4) - -/* Return the current ASR value. */ -target_ulong cpu_asr_read(CPUUniCore32State *env1); -/* Set the ASR. Note that some bits of mask must be all-set or all-clear. */ -void cpu_asr_write(CPUUniCore32State *env1, target_ulong val, target_ulong mask); - -/* UniCore-F64 system registers. */ -#define UC32_UCF64_FPSCR (31) -#define UCF64_FPSCR_MASK (0x27ffffff) -#define UCF64_FPSCR_RND_MASK (0x7) -#define UCF64_FPSCR_RND(r) (((r) >> 0) & UCF64_FPSCR_RND_MASK) -#define UCF64_FPSCR_TRAPEN_MASK (0x7f) -#define UCF64_FPSCR_TRAPEN(r) (((r) >> 10) & UCF64_FPSCR_TRAPEN_MASK) -#define UCF64_FPSCR_FLAG_MASK (0x3ff) -#define UCF64_FPSCR_FLAG(r) (((r) >> 17) & UCF64_FPSCR_FLAG_MASK) -#define UCF64_FPSCR_FLAG_ZERO (1 << 17) -#define UCF64_FPSCR_FLAG_INFINITY (1 << 18) -#define UCF64_FPSCR_FLAG_INVALID (1 << 19) -#define UCF64_FPSCR_FLAG_UNDERFLOW (1 << 20) -#define UCF64_FPSCR_FLAG_OVERFLOW (1 << 21) -#define UCF64_FPSCR_FLAG_INEXACT (1 << 22) -#define UCF64_FPSCR_FLAG_HUGEINT (1 << 23) -#define UCF64_FPSCR_FLAG_DENORMAL (1 << 24) -#define UCF64_FPSCR_FLAG_UNIMP (1 << 25) -#define UCF64_FPSCR_FLAG_DIVZERO (1 << 26) - -#define UC32_HWCAP_CMOV 4 /* 1 << 2 */ -#define UC32_HWCAP_UCF64 8 /* 1 << 3 */ - -#define cpu_signal_handler uc32_cpu_signal_handler - -int uc32_cpu_signal_handler(int host_signum, void *pinfo, void *puc); - -/* MMU modes definitions */ -#define MMU_USER_IDX 1 -static inline int cpu_mmu_index(CPUUniCore32State *env, bool ifetch) -{ - return (env->uncached_asr & ASR_M) == ASR_MODE_USER ? 1 : 0; -} - -typedef CPUUniCore32State CPUArchState; -typedef UniCore32CPU ArchCPU; - -#include "exec/cpu-all.h" - -#define UNICORE32_CPU_TYPE_SUFFIX "-" TYPE_UNICORE32_CPU -#define UNICORE32_CPU_TYPE_NAME(model) model UNICORE32_CPU_TYPE_SUFFIX -#define CPU_RESOLVING_TYPE TYPE_UNICORE32_CPU - -static inline void cpu_get_tb_cpu_state(CPUUniCore32State *env, target_ulong *pc, - target_ulong *cs_base, uint32_t *flags) -{ - *pc = env->regs[31]; - *cs_base = 0; - *flags = 0; - if ((env->uncached_asr & ASR_M) != ASR_MODE_USER) { - *flags |= (1 << 6); - } -} - -bool uc32_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); -void uc32_translate_init(void); -void switch_mode(CPUUniCore32State *, int); - -#endif /* UNICORE32_CPU_H */ diff --git a/target/unicore32/helper.c b/target/unicore32/helper.c deleted file mode 100644 index 704393c27f..0000000000 --- a/target/unicore32/helper.c +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Copyright (C) 2010-2012 Guan Xuetao - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Contributions from 2012-04-01 on are considered under GPL version 2, - * or (at your option) any later version. - */ - -#include "qemu/osdep.h" -#include "qemu/log.h" -#include "cpu.h" -#include "exec/exec-all.h" -#include "exec/helper-proto.h" -#include "semihosting/console.h" - -#undef DEBUG_UC32 - -#ifdef DEBUG_UC32 -#define DPRINTF(fmt, ...) printf("%s: " fmt , __func__, ## __VA_ARGS__) -#else -#define DPRINTF(fmt, ...) do {} while (0) -#endif - -#ifndef CONFIG_USER_ONLY -void helper_cp0_set(CPUUniCore32State *env, uint32_t val, uint32_t creg, - uint32_t cop) -{ - /* - * movc pp.nn, rn, #imm9 - * rn: UCOP_REG_D - * nn: UCOP_REG_N - * 1: sys control reg. - * 2: page table base reg. - * 3: data fault status reg. - * 4: insn fault status reg. - * 5: cache op. reg. - * 6: tlb op. reg. - * imm9: split UCOP_IMM10 with bit5 is 0 - */ - switch (creg) { - case 1: - if (cop != 0) { - goto unrecognized; - } - env->cp0.c1_sys = val; - break; - case 2: - if (cop != 0) { - goto unrecognized; - } - env->cp0.c2_base = val; - break; - case 3: - if (cop != 0) { - goto unrecognized; - } - env->cp0.c3_faultstatus = val; - break; - case 4: - if (cop != 0) { - goto unrecognized; - } - env->cp0.c4_faultaddr = val; - break; - case 5: - switch (cop) { - case 28: - DPRINTF("Invalidate Entire I&D cache\n"); - return; - case 20: - DPRINTF("Invalidate Entire Icache\n"); - return; - case 12: - DPRINTF("Invalidate Entire Dcache\n"); - return; - case 10: - DPRINTF("Clean Entire Dcache\n"); - return; - case 14: - DPRINTF("Flush Entire Dcache\n"); - return; - case 13: - DPRINTF("Invalidate Dcache line\n"); - return; - case 11: - DPRINTF("Clean Dcache line\n"); - return; - case 15: - DPRINTF("Flush Dcache line\n"); - return; - } - break; - case 6: - if ((cop <= 6) && (cop >= 2)) { - /* invalid all tlb */ - tlb_flush(env_cpu(env)); - return; - } - break; - default: - goto unrecognized; - } - return; -unrecognized: - qemu_log_mask(LOG_GUEST_ERROR, - "Wrong register (%d) or wrong operation (%d) in cp0_set!\n", - creg, cop); -} - -uint32_t helper_cp0_get(CPUUniCore32State *env, uint32_t creg, uint32_t cop) -{ - /* - * movc rd, pp.nn, #imm9 - * rd: UCOP_REG_D - * nn: UCOP_REG_N - * 0: cpuid and cachetype - * 1: sys control reg. - * 2: page table base reg. - * 3: data fault status reg. - * 4: insn fault status reg. - * imm9: split UCOP_IMM10 with bit5 is 0 - */ - switch (creg) { - case 0: - switch (cop) { - case 0: - return env->cp0.c0_cpuid; - case 1: - return env->cp0.c0_cachetype; - } - break; - case 1: - if (cop == 0) { - return env->cp0.c1_sys; - } - break; - case 2: - if (cop == 0) { - return env->cp0.c2_base; - } - break; - case 3: - if (cop == 0) { - return env->cp0.c3_faultstatus; - } - break; - case 4: - if (cop == 0) { - return env->cp0.c4_faultaddr; - } - break; - } - qemu_log_mask(LOG_GUEST_ERROR, - "Wrong register (%d) or wrong operation (%d) in cp0_set!\n", - creg, cop); - return 0; -} - -void helper_cp1_putc(target_ulong regval) -{ - const char c = regval; - - qemu_semihosting_log_out(&c, sizeof(c)); -} -#endif /* !CONFIG_USER_ONLY */ - -bool uc32_cpu_exec_interrupt(CPUState *cs, int interrupt_request) -{ - if (interrupt_request & CPU_INTERRUPT_HARD) { - UniCore32CPU *cpu = UNICORE32_CPU(cs); - CPUUniCore32State *env = &cpu->env; - - if (!(env->uncached_asr & ASR_I)) { - cs->exception_index = UC32_EXCP_INTR; - uc32_cpu_do_interrupt(cs); - return true; - } - } - return false; -} diff --git a/target/unicore32/helper.h b/target/unicore32/helper.h deleted file mode 100644 index a4a5d45d1d..0000000000 --- a/target/unicore32/helper.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (C) 2010-2012 Guan Xuetao - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation, or (at your option) any - * later version. See the COPYING file in the top-level directory. - */ - -#ifndef CONFIG_USER_ONLY -DEF_HELPER_4(cp0_set, void, env, i32, i32, i32) -DEF_HELPER_3(cp0_get, i32, env, i32, i32) -DEF_HELPER_1(cp1_putc, void, i32) -#endif - -DEF_HELPER_2(exception, void, env, i32) - -DEF_HELPER_3(asr_write, void, env, i32, i32) -DEF_HELPER_1(asr_read, i32, env) - -DEF_HELPER_2(get_user_reg, i32, env, i32) -DEF_HELPER_3(set_user_reg, void, env, i32, i32) - -DEF_HELPER_3(add_cc, i32, env, i32, i32) -DEF_HELPER_3(adc_cc, i32, env, i32, i32) -DEF_HELPER_3(sub_cc, i32, env, i32, i32) -DEF_HELPER_3(sbc_cc, i32, env, i32, i32) - -DEF_HELPER_2(shl, i32, i32, i32) -DEF_HELPER_2(shr, i32, i32, i32) -DEF_HELPER_2(sar, i32, i32, i32) -DEF_HELPER_3(shl_cc, i32, env, i32, i32) -DEF_HELPER_3(shr_cc, i32, env, i32, i32) -DEF_HELPER_3(sar_cc, i32, env, i32, i32) -DEF_HELPER_3(ror_cc, i32, env, i32, i32) - -DEF_HELPER_1(ucf64_get_fpscr, i32, env) -DEF_HELPER_2(ucf64_set_fpscr, void, env, i32) - -DEF_HELPER_3(ucf64_adds, f32, f32, f32, env) -DEF_HELPER_3(ucf64_addd, f64, f64, f64, env) -DEF_HELPER_3(ucf64_subs, f32, f32, f32, env) -DEF_HELPER_3(ucf64_subd, f64, f64, f64, env) -DEF_HELPER_3(ucf64_muls, f32, f32, f32, env) -DEF_HELPER_3(ucf64_muld, f64, f64, f64, env) -DEF_HELPER_3(ucf64_divs, f32, f32, f32, env) -DEF_HELPER_3(ucf64_divd, f64, f64, f64, env) -DEF_HELPER_1(ucf64_negs, f32, f32) -DEF_HELPER_1(ucf64_negd, f64, f64) -DEF_HELPER_1(ucf64_abss, f32, f32) -DEF_HELPER_1(ucf64_absd, f64, f64) -DEF_HELPER_4(ucf64_cmps, void, f32, f32, i32, env) -DEF_HELPER_4(ucf64_cmpd, void, f64, f64, i32, env) - -DEF_HELPER_2(ucf64_sf2df, f64, f32, env) -DEF_HELPER_2(ucf64_df2sf, f32, f64, env) - -DEF_HELPER_2(ucf64_si2sf, f32, f32, env) -DEF_HELPER_2(ucf64_si2df, f64, f32, env) - -DEF_HELPER_2(ucf64_sf2si, f32, f32, env) -DEF_HELPER_2(ucf64_df2si, f32, f64, env) diff --git a/target/unicore32/meson.build b/target/unicore32/meson.build deleted file mode 100644 index 0fa78772eb..0000000000 --- a/target/unicore32/meson.build +++ /dev/null @@ -1,14 +0,0 @@ -unicore32_ss = ss.source_set() -unicore32_ss.add(files( - 'cpu.c', - 'helper.c', - 'op_helper.c', - 'translate.c', - 'ucf64_helper.c', -), curses) - -unicore32_softmmu_ss = ss.source_set() -unicore32_softmmu_ss.add(files('softmmu.c')) - -target_arch += {'unicore32': unicore32_ss} -target_softmmu_arch += {'unicore32': unicore32_softmmu_ss} diff --git a/target/unicore32/op_helper.c b/target/unicore32/op_helper.c deleted file mode 100644 index eeaa78601a..0000000000 --- a/target/unicore32/op_helper.c +++ /dev/null @@ -1,244 +0,0 @@ -/* - * UniCore32 helper routines - * - * Copyright (C) 2010-2012 Guan Xuetao - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation, or (at your option) any - * later version. See the COPYING file in the top-level directory. - */ -#include "qemu/osdep.h" -#include "cpu.h" -#include "exec/helper-proto.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" - -#define SIGNBIT (uint32_t)0x80000000 -#define SIGNBIT64 ((uint64_t)1 << 63) - -void HELPER(exception)(CPUUniCore32State *env, uint32_t excp) -{ - CPUState *cs = env_cpu(env); - - cs->exception_index = excp; - cpu_loop_exit(cs); -} - -static target_ulong asr_read(CPUUniCore32State *env) -{ - int ZF; - ZF = (env->ZF == 0); - return env->uncached_asr | (env->NF & 0x80000000) | (ZF << 30) | - (env->CF << 29) | ((env->VF & 0x80000000) >> 3); -} - -target_ulong cpu_asr_read(CPUUniCore32State *env) -{ - return asr_read(env); -} - -target_ulong HELPER(asr_read)(CPUUniCore32State *env) -{ - return asr_read(env); -} - -static void asr_write(CPUUniCore32State *env, target_ulong val, - target_ulong mask) -{ - if (mask & ASR_NZCV) { - env->ZF = (~val) & ASR_Z; - env->NF = val; - env->CF = (val >> 29) & 1; - env->VF = (val << 3) & 0x80000000; - } - - if ((env->uncached_asr ^ val) & mask & ASR_M) { - switch_mode(env, val & ASR_M); - } - mask &= ~ASR_NZCV; - env->uncached_asr = (env->uncached_asr & ~mask) | (val & mask); -} - -void cpu_asr_write(CPUUniCore32State *env, target_ulong val, target_ulong mask) -{ - asr_write(env, val, mask); -} - -void HELPER(asr_write)(CPUUniCore32State *env, target_ulong val, - target_ulong mask) -{ - asr_write(env, val, mask); -} - -/* Access to user mode registers from privileged modes. */ -uint32_t HELPER(get_user_reg)(CPUUniCore32State *env, uint32_t regno) -{ - uint32_t val; - - if (regno == 29) { - val = env->banked_r29[0]; - } else if (regno == 30) { - val = env->banked_r30[0]; - } else { - val = env->regs[regno]; - } - return val; -} - -void HELPER(set_user_reg)(CPUUniCore32State *env, uint32_t regno, uint32_t val) -{ - if (regno == 29) { - env->banked_r29[0] = val; - } else if (regno == 30) { - env->banked_r30[0] = val; - } else { - env->regs[regno] = val; - } -} - -/* ??? Flag setting arithmetic is awkward because we need to do comparisons. - The only way to do that in TCG is a conditional branch, which clobbers - all our temporaries. For now implement these as helper functions. */ - -uint32_t HELPER(add_cc)(CPUUniCore32State *env, uint32_t a, uint32_t b) -{ - uint32_t result; - result = a + b; - env->NF = env->ZF = result; - env->CF = result < a; - env->VF = (a ^ b ^ -1) & (a ^ result); - return result; -} - -uint32_t HELPER(adc_cc)(CPUUniCore32State *env, uint32_t a, uint32_t b) -{ - uint32_t result; - if (!env->CF) { - result = a + b; - env->CF = result < a; - } else { - result = a + b + 1; - env->CF = result <= a; - } - env->VF = (a ^ b ^ -1) & (a ^ result); - env->NF = env->ZF = result; - return result; -} - -uint32_t HELPER(sub_cc)(CPUUniCore32State *env, uint32_t a, uint32_t b) -{ - uint32_t result; - result = a - b; - env->NF = env->ZF = result; - env->CF = a >= b; - env->VF = (a ^ b) & (a ^ result); - return result; -} - -uint32_t HELPER(sbc_cc)(CPUUniCore32State *env, uint32_t a, uint32_t b) -{ - uint32_t result; - if (!env->CF) { - result = a - b - 1; - env->CF = a > b; - } else { - result = a - b; - env->CF = a >= b; - } - env->VF = (a ^ b) & (a ^ result); - env->NF = env->ZF = result; - return result; -} - -/* Similarly for variable shift instructions. */ - -uint32_t HELPER(shl)(uint32_t x, uint32_t i) -{ - int shift = i & 0xff; - if (shift >= 32) { - return 0; - } - return x << shift; -} - -uint32_t HELPER(shr)(uint32_t x, uint32_t i) -{ - int shift = i & 0xff; - if (shift >= 32) { - return 0; - } - return (uint32_t)x >> shift; -} - -uint32_t HELPER(sar)(uint32_t x, uint32_t i) -{ - int shift = i & 0xff; - if (shift >= 32) { - shift = 31; - } - return (int32_t)x >> shift; -} - -uint32_t HELPER(shl_cc)(CPUUniCore32State *env, uint32_t x, uint32_t i) -{ - int shift = i & 0xff; - if (shift >= 32) { - if (shift == 32) { - env->CF = x & 1; - } else { - env->CF = 0; - } - return 0; - } else if (shift != 0) { - env->CF = (x >> (32 - shift)) & 1; - return x << shift; - } - return x; -} - -uint32_t HELPER(shr_cc)(CPUUniCore32State *env, uint32_t x, uint32_t i) -{ - int shift = i & 0xff; - if (shift >= 32) { - if (shift == 32) { - env->CF = (x >> 31) & 1; - } else { - env->CF = 0; - } - return 0; - } else if (shift != 0) { - env->CF = (x >> (shift - 1)) & 1; - return x >> shift; - } - return x; -} - -uint32_t HELPER(sar_cc)(CPUUniCore32State *env, uint32_t x, uint32_t i) -{ - int shift = i & 0xff; - if (shift >= 32) { - env->CF = (x >> 31) & 1; - return (int32_t)x >> 31; - } else if (shift != 0) { - env->CF = (x >> (shift - 1)) & 1; - return (int32_t)x >> shift; - } - return x; -} - -uint32_t HELPER(ror_cc)(CPUUniCore32State *env, uint32_t x, uint32_t i) -{ - int shift1, shift; - shift1 = i & 0xff; - shift = shift1 & 0x1f; - if (shift == 0) { - if (shift1 != 0) { - env->CF = (x >> 31) & 1; - } - return x; - } else { - env->CF = (x >> (shift - 1)) & 1; - return ((uint32_t)x >> shift) | (x << (32 - shift)); - } -} diff --git a/target/unicore32/softmmu.c b/target/unicore32/softmmu.c deleted file mode 100644 index cbdaa500b7..0000000000 --- a/target/unicore32/softmmu.c +++ /dev/null @@ -1,280 +0,0 @@ -/* - * Softmmu related functions - * - * Copyright (C) 2010-2012 Guan Xuetao - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation, or any later version. - * See the COPYING file in the top-level directory. - */ -#ifdef CONFIG_USER_ONLY -#error This file only exist under softmmu circumstance -#endif - -#include "qemu/osdep.h" -#include "cpu.h" -#include "exec/exec-all.h" -#include "qemu/error-report.h" - -#undef DEBUG_UC32 - -#ifdef DEBUG_UC32 -#define DPRINTF(fmt, ...) printf("%s: " fmt , __func__, ## __VA_ARGS__) -#else -#define DPRINTF(fmt, ...) do {} while (0) -#endif - -#define SUPERPAGE_SIZE (1 << 22) -#define UC32_PAGETABLE_READ (1 << 8) -#define UC32_PAGETABLE_WRITE (1 << 7) -#define UC32_PAGETABLE_EXEC (1 << 6) -#define UC32_PAGETABLE_EXIST (1 << 2) -#define PAGETABLE_TYPE(x) ((x) & 3) - - -/* Map CPU modes onto saved register banks. */ -static inline int bank_number(CPUUniCore32State *env, int mode) -{ - switch (mode) { - case ASR_MODE_USER: - case ASR_MODE_SUSR: - return 0; - case ASR_MODE_PRIV: - return 1; - case ASR_MODE_TRAP: - return 2; - case ASR_MODE_EXTN: - return 3; - case ASR_MODE_INTR: - return 4; - } - cpu_abort(env_cpu(env), "Bad mode %x\n", mode); - return -1; -} - -void switch_mode(CPUUniCore32State *env, int mode) -{ - int old_mode; - int i; - - old_mode = env->uncached_asr & ASR_M; - if (mode == old_mode) { - return; - } - - i = bank_number(env, old_mode); - env->banked_r29[i] = env->regs[29]; - env->banked_r30[i] = env->regs[30]; - env->banked_bsr[i] = env->bsr; - - i = bank_number(env, mode); - env->regs[29] = env->banked_r29[i]; - env->regs[30] = env->banked_r30[i]; - env->bsr = env->banked_bsr[i]; -} - -/* Handle a CPU exception. */ -void uc32_cpu_do_interrupt(CPUState *cs) -{ - UniCore32CPU *cpu = UNICORE32_CPU(cs); - CPUUniCore32State *env = &cpu->env; - uint32_t addr; - int new_mode; - - switch (cs->exception_index) { - case UC32_EXCP_PRIV: - new_mode = ASR_MODE_PRIV; - addr = 0x08; - break; - case UC32_EXCP_ITRAP: - DPRINTF("itrap happened at %x\n", env->regs[31]); - new_mode = ASR_MODE_TRAP; - addr = 0x0c; - break; - case UC32_EXCP_DTRAP: - DPRINTF("dtrap happened at %x\n", env->regs[31]); - new_mode = ASR_MODE_TRAP; - addr = 0x10; - break; - case UC32_EXCP_INTR: - new_mode = ASR_MODE_INTR; - addr = 0x18; - break; - default: - cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); - return; - } - /* High vectors. */ - if (env->cp0.c1_sys & (1 << 13)) { - addr += 0xffff0000; - } - - switch_mode(env, new_mode); - env->bsr = cpu_asr_read(env); - env->uncached_asr = (env->uncached_asr & ~ASR_M) | new_mode; - env->uncached_asr |= ASR_I; - /* The PC already points to the proper instruction. */ - env->regs[30] = env->regs[31]; - env->regs[31] = addr; - cs->interrupt_request |= CPU_INTERRUPT_EXITTB; -} - -static int get_phys_addr_ucv2(CPUUniCore32State *env, uint32_t address, - int access_type, int is_user, uint32_t *phys_ptr, int *prot, - target_ulong *page_size) -{ - CPUState *cs = env_cpu(env); - int code; - uint32_t table; - uint32_t desc; - uint32_t phys_addr; - - /* Pagetable walk. */ - /* Lookup l1 descriptor. */ - table = env->cp0.c2_base & 0xfffff000; - table |= (address >> 20) & 0xffc; - desc = ldl_phys(cs->as, table); - code = 0; - switch (PAGETABLE_TYPE(desc)) { - case 3: - /* Superpage */ - if (!(desc & UC32_PAGETABLE_EXIST)) { - code = 0x0b; /* superpage miss */ - goto do_fault; - } - phys_addr = (desc & 0xffc00000) | (address & 0x003fffff); - *page_size = SUPERPAGE_SIZE; - break; - case 0: - /* Lookup l2 entry. */ - if (is_user) { - DPRINTF("PGD address %x, desc %x\n", table, desc); - } - if (!(desc & UC32_PAGETABLE_EXIST)) { - code = 0x05; /* second pagetable miss */ - goto do_fault; - } - table = (desc & 0xfffff000) | ((address >> 10) & 0xffc); - desc = ldl_phys(cs->as, table); - /* 4k page. */ - if (is_user) { - DPRINTF("PTE address %x, desc %x\n", table, desc); - } - if (!(desc & UC32_PAGETABLE_EXIST)) { - code = 0x08; /* page miss */ - goto do_fault; - } - switch (PAGETABLE_TYPE(desc)) { - case 0: - phys_addr = (desc & 0xfffff000) | (address & 0xfff); - *page_size = TARGET_PAGE_SIZE; - break; - default: - cpu_abort(cs, "wrong page type!"); - } - break; - default: - cpu_abort(cs, "wrong page type!"); - } - - *phys_ptr = phys_addr; - *prot = 0; - /* Check access permissions. */ - if (desc & UC32_PAGETABLE_READ) { - *prot |= PAGE_READ; - } else { - if (is_user && (access_type == 0)) { - code = 0x11; /* access unreadable area */ - goto do_fault; - } - } - - if (desc & UC32_PAGETABLE_WRITE) { - *prot |= PAGE_WRITE; - } else { - if (is_user && (access_type == 1)) { - code = 0x12; /* access unwritable area */ - goto do_fault; - } - } - - if (desc & UC32_PAGETABLE_EXEC) { - *prot |= PAGE_EXEC; - } else { - if (is_user && (access_type == 2)) { - code = 0x13; /* access unexecutable area */ - goto do_fault; - } - } - -do_fault: - return code; -} - -bool uc32_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) -{ - UniCore32CPU *cpu = UNICORE32_CPU(cs); - CPUUniCore32State *env = &cpu->env; - uint32_t phys_addr; - target_ulong page_size; - int prot; - int ret, is_user; - - ret = 1; - is_user = mmu_idx == MMU_USER_IDX; - - if ((env->cp0.c1_sys & 1) == 0) { - /* MMU disabled. */ - phys_addr = address; - prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - page_size = TARGET_PAGE_SIZE; - ret = 0; - } else { - if ((address & (1 << 31)) || (is_user)) { - ret = get_phys_addr_ucv2(env, address, access_type, is_user, - &phys_addr, &prot, &page_size); - if (is_user) { - DPRINTF("user space access: ret %x, address %" VADDR_PRIx ", " - "access_type %x, phys_addr %x, prot %x\n", - ret, address, access_type, phys_addr, prot); - } - } else { - /*IO memory */ - phys_addr = address | (1 << 31); - prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - page_size = TARGET_PAGE_SIZE; - ret = 0; - } - } - - if (ret == 0) { - /* Map a single page. */ - phys_addr &= TARGET_PAGE_MASK; - address &= TARGET_PAGE_MASK; - tlb_set_page(cs, address, phys_addr, prot, mmu_idx, page_size); - return true; - } - - if (probe) { - return false; - } - - env->cp0.c3_faultstatus = ret; - env->cp0.c4_faultaddr = address; - if (access_type == 2) { - cs->exception_index = UC32_EXCP_ITRAP; - } else { - cs->exception_index = UC32_EXCP_DTRAP; - } - cpu_loop_exit_restore(cs, retaddr); -} - -hwaddr uc32_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) -{ - error_report("function uc32_cpu_get_phys_page_debug not " - "implemented, aborting"); - return -1; -} diff --git a/target/unicore32/translate.c b/target/unicore32/translate.c deleted file mode 100644 index 370709c9ea..0000000000 --- a/target/unicore32/translate.c +++ /dev/null @@ -1,2083 +0,0 @@ -/* - * UniCore32 translation - * - * Copyright (C) 2010-2012 Guan Xuetao - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation, or (at your option) any - * later version. See the COPYING file in the top-level directory. - */ -#include "qemu/osdep.h" - -#include "cpu.h" -#include "disas/disas.h" -#include "exec/exec-all.h" -#include "tcg/tcg-op.h" -#include "qemu/log.h" -#include "exec/cpu_ldst.h" -#include "exec/translator.h" -#include "qemu/qemu-print.h" - -#include "exec/helper-proto.h" -#include "exec/helper-gen.h" - -#include "trace-tcg.h" -#include "exec/log.h" - - -/* internal defines */ -typedef struct DisasContext { - target_ulong pc; - int is_jmp; - /* Nonzero if this instruction has been conditionally skipped. */ - int condjmp; - /* The label that will be jumped to when the instruction is skipped. */ - TCGLabel *condlabel; - TranslationBlock *tb; - int singlestep_enabled; -#ifndef CONFIG_USER_ONLY - int user; -#endif -} DisasContext; - -#ifndef CONFIG_USER_ONLY -#define IS_USER(s) (s->user) -#else -#define IS_USER(s) 1 -#endif - -/* is_jmp field values */ -#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */ -#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */ -#define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */ -/* These instructions trap after executing, so defer them until after the - conditional executions state has been updated. */ -#define DISAS_SYSCALL DISAS_TARGET_3 - -static TCGv_i32 cpu_R[32]; - -/* FIXME: These should be removed. */ -static TCGv cpu_F0s, cpu_F1s; -static TCGv_i64 cpu_F0d, cpu_F1d; - -#include "exec/gen-icount.h" - -static const char *regnames[] = { - "r00", "r01", "r02", "r03", "r04", "r05", "r06", "r07", - "r08", "r09", "r10", "r11", "r12", "r13", "r14", "r15", - "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", - "r24", "r25", "r26", "r27", "r28", "r29", "r30", "pc" }; - -/* initialize TCG globals. */ -void uc32_translate_init(void) -{ - int i; - - for (i = 0; i < 32; i++) { - cpu_R[i] = tcg_global_mem_new_i32(cpu_env, - offsetof(CPUUniCore32State, regs[i]), regnames[i]); - } -} - -static int num_temps; - -/* Allocate a temporary variable. */ -static TCGv_i32 new_tmp(void) -{ - num_temps++; - return tcg_temp_new_i32(); -} - -/* Release a temporary variable. */ -static void dead_tmp(TCGv tmp) -{ - tcg_temp_free(tmp); - num_temps--; -} - -static inline TCGv load_cpu_offset(int offset) -{ - TCGv tmp = new_tmp(); - tcg_gen_ld_i32(tmp, cpu_env, offset); - return tmp; -} - -#define load_cpu_field(name) load_cpu_offset(offsetof(CPUUniCore32State, name)) - -static inline void store_cpu_offset(TCGv var, int offset) -{ - tcg_gen_st_i32(var, cpu_env, offset); - dead_tmp(var); -} - -#define store_cpu_field(var, name) \ - store_cpu_offset(var, offsetof(CPUUniCore32State, name)) - -/* Set a variable to the value of a CPU register. */ -static void load_reg_var(DisasContext *s, TCGv var, int reg) -{ - if (reg == 31) { - uint32_t addr; - /* normaly, since we updated PC */ - addr = (long)s->pc; - tcg_gen_movi_i32(var, addr); - } else { - tcg_gen_mov_i32(var, cpu_R[reg]); - } -} - -/* Create a new temporary and set it to the value of a CPU register. */ -static inline TCGv load_reg(DisasContext *s, int reg) -{ - TCGv tmp = new_tmp(); - load_reg_var(s, tmp, reg); - return tmp; -} - -/* Set a CPU register. The source must be a temporary and will be - marked as dead. */ -static void store_reg(DisasContext *s, int reg, TCGv var) -{ - if (reg == 31) { - tcg_gen_andi_i32(var, var, ~3); - s->is_jmp = DISAS_JUMP; - } - tcg_gen_mov_i32(cpu_R[reg], var); - dead_tmp(var); -} - -/* Value extensions. */ -#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var) -#define gen_uxth(var) tcg_gen_ext16u_i32(var, var) -#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var) -#define gen_sxth(var) tcg_gen_ext16s_i32(var, var) - -#define UCOP_REG_M (((insn) >> 0) & 0x1f) -#define UCOP_REG_N (((insn) >> 19) & 0x1f) -#define UCOP_REG_D (((insn) >> 14) & 0x1f) -#define UCOP_REG_S (((insn) >> 9) & 0x1f) -#define UCOP_REG_LO (((insn) >> 14) & 0x1f) -#define UCOP_REG_HI (((insn) >> 9) & 0x1f) -#define UCOP_SH_OP (((insn) >> 6) & 0x03) -#define UCOP_SH_IM (((insn) >> 9) & 0x1f) -#define UCOP_OPCODES (((insn) >> 25) & 0x0f) -#define UCOP_IMM_9 (((insn) >> 0) & 0x1ff) -#define UCOP_IMM10 (((insn) >> 0) & 0x3ff) -#define UCOP_IMM14 (((insn) >> 0) & 0x3fff) -#define UCOP_COND (((insn) >> 25) & 0x0f) -#define UCOP_CMOV_COND (((insn) >> 19) & 0x0f) -#define UCOP_CPNUM (((insn) >> 10) & 0x0f) -#define UCOP_UCF64_FMT (((insn) >> 24) & 0x03) -#define UCOP_UCF64_FUNC (((insn) >> 6) & 0x0f) -#define UCOP_UCF64_COND (((insn) >> 6) & 0x0f) - -#define UCOP_SET(i) ((insn) & (1 << (i))) -#define UCOP_SET_P UCOP_SET(28) -#define UCOP_SET_U UCOP_SET(27) -#define UCOP_SET_B UCOP_SET(26) -#define UCOP_SET_W UCOP_SET(25) -#define UCOP_SET_L UCOP_SET(24) -#define UCOP_SET_S UCOP_SET(24) - -#define ILLEGAL cpu_abort(env_cpu(env), \ - "Illegal UniCore32 instruction %x at line %d!", \ - insn, __LINE__) - -#ifndef CONFIG_USER_ONLY -static void disas_cp0_insn(CPUUniCore32State *env, DisasContext *s, - uint32_t insn) -{ - TCGv tmp, tmp2, tmp3; - if ((insn & 0xfe000000) == 0xe0000000) { - tmp2 = new_tmp(); - tmp3 = new_tmp(); - tcg_gen_movi_i32(tmp2, UCOP_REG_N); - tcg_gen_movi_i32(tmp3, UCOP_IMM10); - if (UCOP_SET_L) { - tmp = new_tmp(); - gen_helper_cp0_get(tmp, cpu_env, tmp2, tmp3); - store_reg(s, UCOP_REG_D, tmp); - } else { - tmp = load_reg(s, UCOP_REG_D); - gen_helper_cp0_set(cpu_env, tmp, tmp2, tmp3); - dead_tmp(tmp); - } - dead_tmp(tmp2); - dead_tmp(tmp3); - return; - } - ILLEGAL; -} - -static void disas_ocd_insn(CPUUniCore32State *env, DisasContext *s, - uint32_t insn) -{ - TCGv tmp; - - if ((insn & 0xff003fff) == 0xe1000400) { - /* - * movc rd, pp.nn, #imm9 - * rd: UCOP_REG_D - * nn: UCOP_REG_N (must be 0) - * imm9: 0 - */ - if (UCOP_REG_N == 0) { - tmp = new_tmp(); - tcg_gen_movi_i32(tmp, 0); - store_reg(s, UCOP_REG_D, tmp); - return; - } else { - ILLEGAL; - } - } - if ((insn & 0xff003fff) == 0xe0000401) { - /* - * movc pp.nn, rn, #imm9 - * rn: UCOP_REG_D - * nn: UCOP_REG_N (must be 1) - * imm9: 1 - */ - if (UCOP_REG_N == 1) { - tmp = load_reg(s, UCOP_REG_D); - gen_helper_cp1_putc(tmp); - dead_tmp(tmp); - return; - } else { - ILLEGAL; - } - } - ILLEGAL; -} -#endif - -static inline void gen_set_asr(TCGv var, uint32_t mask) -{ - TCGv tmp_mask = tcg_const_i32(mask); - gen_helper_asr_write(cpu_env, var, tmp_mask); - tcg_temp_free_i32(tmp_mask); -} -/* Set NZCV flags from the high 4 bits of var. */ -#define gen_set_nzcv(var) gen_set_asr(var, ASR_NZCV) - -static void gen_exception(int excp) -{ - TCGv tmp = new_tmp(); - tcg_gen_movi_i32(tmp, excp); - gen_helper_exception(cpu_env, tmp); - dead_tmp(tmp); -} - -#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, CF)) - -/* Set CF to the top bit of var. */ -static void gen_set_CF_bit31(TCGv var) -{ - TCGv tmp = new_tmp(); - tcg_gen_shri_i32(tmp, var, 31); - gen_set_CF(tmp); - dead_tmp(tmp); -} - -/* Set N and Z flags from var. */ -static inline void gen_logic_CC(TCGv var) -{ - tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, NF)); - tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, ZF)); -} - -/* dest = T0 + T1 + CF. */ -static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1) -{ - TCGv tmp; - tcg_gen_add_i32(dest, t0, t1); - tmp = load_cpu_field(CF); - tcg_gen_add_i32(dest, dest, tmp); - dead_tmp(tmp); -} - -/* dest = T0 - T1 + CF - 1. */ -static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1) -{ - TCGv tmp; - tcg_gen_sub_i32(dest, t0, t1); - tmp = load_cpu_field(CF); - tcg_gen_add_i32(dest, dest, tmp); - tcg_gen_subi_i32(dest, dest, 1); - dead_tmp(tmp); -} - -static void shifter_out_im(TCGv var, int shift) -{ - TCGv tmp = new_tmp(); - if (shift == 0) { - tcg_gen_andi_i32(tmp, var, 1); - } else { - tcg_gen_shri_i32(tmp, var, shift); - if (shift != 31) { - tcg_gen_andi_i32(tmp, tmp, 1); - } - } - gen_set_CF(tmp); - dead_tmp(tmp); -} - -/* Shift by immediate. Includes special handling for shift == 0. */ -static inline void gen_uc32_shift_im(TCGv var, int shiftop, int shift, - int flags) -{ - switch (shiftop) { - case 0: /* LSL */ - if (shift != 0) { - if (flags) { - shifter_out_im(var, 32 - shift); - } - tcg_gen_shli_i32(var, var, shift); - } - break; - case 1: /* LSR */ - if (shift == 0) { - if (flags) { - tcg_gen_shri_i32(var, var, 31); - gen_set_CF(var); - } - tcg_gen_movi_i32(var, 0); - } else { - if (flags) { - shifter_out_im(var, shift - 1); - } - tcg_gen_shri_i32(var, var, shift); - } - break; - case 2: /* ASR */ - if (shift == 0) { - shift = 32; - } - if (flags) { - shifter_out_im(var, shift - 1); - } - if (shift == 32) { - shift = 31; - } - tcg_gen_sari_i32(var, var, shift); - break; - case 3: /* ROR/RRX */ - if (shift != 0) { - if (flags) { - shifter_out_im(var, shift - 1); - } - tcg_gen_rotri_i32(var, var, shift); break; - } else { - TCGv tmp = load_cpu_field(CF); - if (flags) { - shifter_out_im(var, 0); - } - tcg_gen_shri_i32(var, var, 1); - tcg_gen_shli_i32(tmp, tmp, 31); - tcg_gen_or_i32(var, var, tmp); - dead_tmp(tmp); - } - } -}; - -static inline void gen_uc32_shift_reg(TCGv var, int shiftop, - TCGv shift, int flags) -{ - if (flags) { - switch (shiftop) { - case 0: - gen_helper_shl_cc(var, cpu_env, var, shift); - break; - case 1: - gen_helper_shr_cc(var, cpu_env, var, shift); - break; - case 2: - gen_helper_sar_cc(var, cpu_env, var, shift); - break; - case 3: - gen_helper_ror_cc(var, cpu_env, var, shift); - break; - } - } else { - switch (shiftop) { - case 0: - gen_helper_shl(var, var, shift); - break; - case 1: - gen_helper_shr(var, var, shift); - break; - case 2: - gen_helper_sar(var, var, shift); - break; - case 3: - tcg_gen_andi_i32(shift, shift, 0x1f); - tcg_gen_rotr_i32(var, var, shift); - break; - } - } - dead_tmp(shift); -} - -static void gen_test_cc(int cc, TCGLabel *label) -{ - TCGv tmp; - TCGv tmp2; - TCGLabel *inv; - - switch (cc) { - case 0: /* eq: Z */ - tmp = load_cpu_field(ZF); - tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); - break; - case 1: /* ne: !Z */ - tmp = load_cpu_field(ZF); - tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label); - break; - case 2: /* cs: C */ - tmp = load_cpu_field(CF); - tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label); - break; - case 3: /* cc: !C */ - tmp = load_cpu_field(CF); - tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); - break; - case 4: /* mi: N */ - tmp = load_cpu_field(NF); - tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); - break; - case 5: /* pl: !N */ - tmp = load_cpu_field(NF); - tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); - break; - case 6: /* vs: V */ - tmp = load_cpu_field(VF); - tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); - break; - case 7: /* vc: !V */ - tmp = load_cpu_field(VF); - tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); - break; - case 8: /* hi: C && !Z */ - inv = gen_new_label(); - tmp = load_cpu_field(CF); - tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv); - dead_tmp(tmp); - tmp = load_cpu_field(ZF); - tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label); - gen_set_label(inv); - break; - case 9: /* ls: !C || Z */ - tmp = load_cpu_field(CF); - tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); - dead_tmp(tmp); - tmp = load_cpu_field(ZF); - tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); - break; - case 10: /* ge: N == V -> N ^ V == 0 */ - tmp = load_cpu_field(VF); - tmp2 = load_cpu_field(NF); - tcg_gen_xor_i32(tmp, tmp, tmp2); - dead_tmp(tmp2); - tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); - break; - case 11: /* lt: N != V -> N ^ V != 0 */ - tmp = load_cpu_field(VF); - tmp2 = load_cpu_field(NF); - tcg_gen_xor_i32(tmp, tmp, tmp2); - dead_tmp(tmp2); - tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); - break; - case 12: /* gt: !Z && N == V */ - inv = gen_new_label(); - tmp = load_cpu_field(ZF); - tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv); - dead_tmp(tmp); - tmp = load_cpu_field(VF); - tmp2 = load_cpu_field(NF); - tcg_gen_xor_i32(tmp, tmp, tmp2); - dead_tmp(tmp2); - tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); - gen_set_label(inv); - break; - case 13: /* le: Z || N != V */ - tmp = load_cpu_field(ZF); - tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); - dead_tmp(tmp); - tmp = load_cpu_field(VF); - tmp2 = load_cpu_field(NF); - tcg_gen_xor_i32(tmp, tmp, tmp2); - dead_tmp(tmp2); - tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); - break; - default: - fprintf(stderr, "Bad condition code 0x%x\n", cc); - abort(); - } - dead_tmp(tmp); -} - -static const uint8_t table_logic_cc[16] = { - 1, /* and */ 1, /* xor */ 0, /* sub */ 0, /* rsb */ - 0, /* add */ 0, /* adc */ 0, /* sbc */ 0, /* rsc */ - 1, /* andl */ 1, /* xorl */ 0, /* cmp */ 0, /* cmn */ - 1, /* orr */ 1, /* mov */ 1, /* bic */ 1, /* mvn */ -}; - -/* Set PC state from an immediate address. */ -static inline void gen_bx_im(DisasContext *s, uint32_t addr) -{ - s->is_jmp = DISAS_UPDATE; - tcg_gen_movi_i32(cpu_R[31], addr & ~3); -} - -/* Set PC state from var. var is marked as dead. */ -static inline void gen_bx(DisasContext *s, TCGv var) -{ - s->is_jmp = DISAS_UPDATE; - tcg_gen_andi_i32(cpu_R[31], var, ~3); - dead_tmp(var); -} - -static inline void store_reg_bx(DisasContext *s, int reg, TCGv var) -{ - store_reg(s, reg, var); -} - -static inline TCGv gen_ld8s(TCGv addr, int index) -{ - TCGv tmp = new_tmp(); - tcg_gen_qemu_ld8s(tmp, addr, index); - return tmp; -} - -static inline TCGv gen_ld8u(TCGv addr, int index) -{ - TCGv tmp = new_tmp(); - tcg_gen_qemu_ld8u(tmp, addr, index); - return tmp; -} - -static inline TCGv gen_ld16s(TCGv addr, int index) -{ - TCGv tmp = new_tmp(); - tcg_gen_qemu_ld16s(tmp, addr, index); - return tmp; -} - -static inline TCGv gen_ld16u(TCGv addr, int index) -{ - TCGv tmp = new_tmp(); - tcg_gen_qemu_ld16u(tmp, addr, index); - return tmp; -} - -static inline TCGv gen_ld32(TCGv addr, int index) -{ - TCGv tmp = new_tmp(); - tcg_gen_qemu_ld32u(tmp, addr, index); - return tmp; -} - -static inline void gen_st8(TCGv val, TCGv addr, int index) -{ - tcg_gen_qemu_st8(val, addr, index); - dead_tmp(val); -} - -static inline void gen_st16(TCGv val, TCGv addr, int index) -{ - tcg_gen_qemu_st16(val, addr, index); - dead_tmp(val); -} - -static inline void gen_st32(TCGv val, TCGv addr, int index) -{ - tcg_gen_qemu_st32(val, addr, index); - dead_tmp(val); -} - -static inline void gen_set_pc_im(uint32_t val) -{ - tcg_gen_movi_i32(cpu_R[31], val); -} - -/* Force a TB lookup after an instruction that changes the CPU state. */ -static inline void gen_lookup_tb(DisasContext *s) -{ - tcg_gen_movi_i32(cpu_R[31], s->pc & ~1); - s->is_jmp = DISAS_UPDATE; -} - -static inline void gen_add_data_offset(DisasContext *s, unsigned int insn, - TCGv var) -{ - int val; - TCGv offset; - - if (UCOP_SET(29)) { - /* immediate */ - val = UCOP_IMM14; - if (!UCOP_SET_U) { - val = -val; - } - if (val != 0) { - tcg_gen_addi_i32(var, var, val); - } - } else { - /* shift/register */ - offset = load_reg(s, UCOP_REG_M); - gen_uc32_shift_im(offset, UCOP_SH_OP, UCOP_SH_IM, 0); - if (!UCOP_SET_U) { - tcg_gen_sub_i32(var, var, offset); - } else { - tcg_gen_add_i32(var, var, offset); - } - dead_tmp(offset); - } -} - -static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn, - TCGv var) -{ - int val; - TCGv offset; - - if (UCOP_SET(26)) { - /* immediate */ - val = (insn & 0x1f) | ((insn >> 4) & 0x3e0); - if (!UCOP_SET_U) { - val = -val; - } - if (val != 0) { - tcg_gen_addi_i32(var, var, val); - } - } else { - /* register */ - offset = load_reg(s, UCOP_REG_M); - if (!UCOP_SET_U) { - tcg_gen_sub_i32(var, var, offset); - } else { - tcg_gen_add_i32(var, var, offset); - } - dead_tmp(offset); - } -} - -static inline long ucf64_reg_offset(int reg) -{ - if (reg & 1) { - return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1]) - + offsetof(CPU_DoubleU, l.upper); - } else { - return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1]) - + offsetof(CPU_DoubleU, l.lower); - } -} - -#define ucf64_gen_ld32(reg) load_cpu_offset(ucf64_reg_offset(reg)) -#define ucf64_gen_st32(var, reg) store_cpu_offset(var, ucf64_reg_offset(reg)) - -/* UniCore-F64 single load/store I_offset */ -static void do_ucf64_ldst_i(CPUUniCore32State *env, DisasContext *s, uint32_t insn) -{ - int offset; - TCGv tmp; - TCGv addr; - - addr = load_reg(s, UCOP_REG_N); - if (!UCOP_SET_P && !UCOP_SET_W) { - ILLEGAL; - } - - if (UCOP_SET_P) { - offset = UCOP_IMM10 << 2; - if (!UCOP_SET_U) { - offset = -offset; - } - if (offset != 0) { - tcg_gen_addi_i32(addr, addr, offset); - } - } - - if (UCOP_SET_L) { /* load */ - tmp = gen_ld32(addr, IS_USER(s)); - ucf64_gen_st32(tmp, UCOP_REG_D); - } else { /* store */ - tmp = ucf64_gen_ld32(UCOP_REG_D); - gen_st32(tmp, addr, IS_USER(s)); - } - - if (!UCOP_SET_P) { - offset = UCOP_IMM10 << 2; - if (!UCOP_SET_U) { - offset = -offset; - } - if (offset != 0) { - tcg_gen_addi_i32(addr, addr, offset); - } - } - if (UCOP_SET_W) { - store_reg(s, UCOP_REG_N, addr); - } else { - dead_tmp(addr); - } -} - -/* UniCore-F64 load/store multiple words */ -static void do_ucf64_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn) -{ - unsigned int i; - int j, n, freg; - TCGv tmp; - TCGv addr; - - if (UCOP_REG_D != 0) { - ILLEGAL; - } - if (UCOP_REG_N == 31) { - ILLEGAL; - } - if ((insn << 24) == 0) { - ILLEGAL; - } - - addr = load_reg(s, UCOP_REG_N); - - n = 0; - for (i = 0; i < 8; i++) { - if (UCOP_SET(i)) { - n++; - } - } - - if (UCOP_SET_U) { - if (UCOP_SET_P) { /* pre increment */ - tcg_gen_addi_i32(addr, addr, 4); - } /* unnecessary to do anything when post increment */ - } else { - if (UCOP_SET_P) { /* pre decrement */ - tcg_gen_addi_i32(addr, addr, -(n * 4)); - } else { /* post decrement */ - if (n != 1) { - tcg_gen_addi_i32(addr, addr, -((n - 1) * 4)); - } - } - } - - freg = ((insn >> 8) & 3) << 3; /* freg should be 0, 8, 16, 24 */ - - for (i = 0, j = 0; i < 8; i++, freg++) { - if (!UCOP_SET(i)) { - continue; - } - - if (UCOP_SET_L) { /* load */ - tmp = gen_ld32(addr, IS_USER(s)); - ucf64_gen_st32(tmp, freg); - } else { /* store */ - tmp = ucf64_gen_ld32(freg); - gen_st32(tmp, addr, IS_USER(s)); - } - - j++; - /* unnecessary to add after the last transfer */ - if (j != n) { - tcg_gen_addi_i32(addr, addr, 4); - } - } - - if (UCOP_SET_W) { /* write back */ - if (UCOP_SET_U) { - if (!UCOP_SET_P) { /* post increment */ - tcg_gen_addi_i32(addr, addr, 4); - } /* unnecessary to do anything when pre increment */ - } else { - if (UCOP_SET_P) { - /* pre decrement */ - if (n != 1) { - tcg_gen_addi_i32(addr, addr, -((n - 1) * 4)); - } - } else { - /* post decrement */ - tcg_gen_addi_i32(addr, addr, -(n * 4)); - } - } - store_reg(s, UCOP_REG_N, addr); - } else { - dead_tmp(addr); - } -} - -/* UniCore-F64 mrc/mcr */ -static void do_ucf64_trans(CPUUniCore32State *env, DisasContext *s, uint32_t insn) -{ - TCGv tmp; - - if ((insn & 0xfe0003ff) == 0xe2000000) { - /* control register */ - if ((UCOP_REG_N != UC32_UCF64_FPSCR) || (UCOP_REG_D == 31)) { - ILLEGAL; - } - if (UCOP_SET(24)) { - /* CFF */ - tmp = new_tmp(); - gen_helper_ucf64_get_fpscr(tmp, cpu_env); - store_reg(s, UCOP_REG_D, tmp); - } else { - /* CTF */ - tmp = load_reg(s, UCOP_REG_D); - gen_helper_ucf64_set_fpscr(cpu_env, tmp); - dead_tmp(tmp); - gen_lookup_tb(s); - } - return; - } - if ((insn & 0xfe0003ff) == 0xe0000000) { - /* general register */ - if (UCOP_REG_D == 31) { - ILLEGAL; - } - if (UCOP_SET(24)) { /* MFF */ - tmp = ucf64_gen_ld32(UCOP_REG_N); - store_reg(s, UCOP_REG_D, tmp); - } else { /* MTF */ - tmp = load_reg(s, UCOP_REG_D); - ucf64_gen_st32(tmp, UCOP_REG_N); - } - return; - } - if ((insn & 0xfb000000) == 0xe9000000) { - /* MFFC */ - if (UCOP_REG_D != 31) { - ILLEGAL; - } - if (UCOP_UCF64_COND & 0x8) { - ILLEGAL; - } - - tmp = new_tmp(); - tcg_gen_movi_i32(tmp, UCOP_UCF64_COND); - if (UCOP_SET(26)) { - tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N)); - tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M)); - gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, tmp, cpu_env); - } else { - tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N)); - tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M)); - gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, tmp, cpu_env); - } - dead_tmp(tmp); - return; - } - ILLEGAL; -} - -/* UniCore-F64 convert instructions */ -static void do_ucf64_fcvt(CPUUniCore32State *env, DisasContext *s, uint32_t insn) -{ - if (UCOP_UCF64_FMT == 3) { - ILLEGAL; - } - if (UCOP_REG_N != 0) { - ILLEGAL; - } - switch (UCOP_UCF64_FUNC) { - case 0: /* cvt.s */ - switch (UCOP_UCF64_FMT) { - case 1 /* d */: - tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M)); - gen_helper_ucf64_df2sf(cpu_F0s, cpu_F0d, cpu_env); - tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D)); - break; - case 2 /* w */: - tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M)); - gen_helper_ucf64_si2sf(cpu_F0s, cpu_F0s, cpu_env); - tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D)); - break; - default /* s */: - ILLEGAL; - break; - } - break; - case 1: /* cvt.d */ - switch (UCOP_UCF64_FMT) { - case 0 /* s */: - tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M)); - gen_helper_ucf64_sf2df(cpu_F0d, cpu_F0s, cpu_env); - tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D)); - break; - case 2 /* w */: - tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M)); - gen_helper_ucf64_si2df(cpu_F0d, cpu_F0s, cpu_env); - tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D)); - break; - default /* d */: - ILLEGAL; - break; - } - break; - case 4: /* cvt.w */ - switch (UCOP_UCF64_FMT) { - case 0 /* s */: - tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M)); - gen_helper_ucf64_sf2si(cpu_F0s, cpu_F0s, cpu_env); - tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D)); - break; - case 1 /* d */: - tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M)); - gen_helper_ucf64_df2si(cpu_F0s, cpu_F0d, cpu_env); - tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D)); - break; - default /* w */: - ILLEGAL; - break; - } - break; - default: - ILLEGAL; - } -} - -/* UniCore-F64 compare instructions */ -static void do_ucf64_fcmp(CPUUniCore32State *env, DisasContext *s, uint32_t insn) -{ - if (UCOP_SET(25)) { - ILLEGAL; - } - if (UCOP_REG_D != 0) { - ILLEGAL; - } - - ILLEGAL; /* TODO */ - if (UCOP_SET(24)) { - tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N)); - tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M)); - /* gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, cpu_env); */ - } else { - tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N)); - tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M)); - /* gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, cpu_env); */ - } -} - -#define gen_helper_ucf64_movs(x, y) do { } while (0) -#define gen_helper_ucf64_movd(x, y) do { } while (0) - -#define UCF64_OP1(name) do { \ - if (UCOP_REG_N != 0) { \ - ILLEGAL; \ - } \ - switch (UCOP_UCF64_FMT) { \ - case 0 /* s */: \ - tcg_gen_ld_i32(cpu_F0s, cpu_env, \ - ucf64_reg_offset(UCOP_REG_M)); \ - gen_helper_ucf64_##name##s(cpu_F0s, cpu_F0s); \ - tcg_gen_st_i32(cpu_F0s, cpu_env, \ - ucf64_reg_offset(UCOP_REG_D)); \ - break; \ - case 1 /* d */: \ - tcg_gen_ld_i64(cpu_F0d, cpu_env, \ - ucf64_reg_offset(UCOP_REG_M)); \ - gen_helper_ucf64_##name##d(cpu_F0d, cpu_F0d); \ - tcg_gen_st_i64(cpu_F0d, cpu_env, \ - ucf64_reg_offset(UCOP_REG_D)); \ - break; \ - case 2 /* w */: \ - ILLEGAL; \ - break; \ - } \ - } while (0) - -#define UCF64_OP2(name) do { \ - switch (UCOP_UCF64_FMT) { \ - case 0 /* s */: \ - tcg_gen_ld_i32(cpu_F0s, cpu_env, \ - ucf64_reg_offset(UCOP_REG_N)); \ - tcg_gen_ld_i32(cpu_F1s, cpu_env, \ - ucf64_reg_offset(UCOP_REG_M)); \ - gen_helper_ucf64_##name##s(cpu_F0s, \ - cpu_F0s, cpu_F1s, cpu_env); \ - tcg_gen_st_i32(cpu_F0s, cpu_env, \ - ucf64_reg_offset(UCOP_REG_D)); \ - break; \ - case 1 /* d */: \ - tcg_gen_ld_i64(cpu_F0d, cpu_env, \ - ucf64_reg_offset(UCOP_REG_N)); \ - tcg_gen_ld_i64(cpu_F1d, cpu_env, \ - ucf64_reg_offset(UCOP_REG_M)); \ - gen_helper_ucf64_##name##d(cpu_F0d, \ - cpu_F0d, cpu_F1d, cpu_env); \ - tcg_gen_st_i64(cpu_F0d, cpu_env, \ - ucf64_reg_offset(UCOP_REG_D)); \ - break; \ - case 2 /* w */: \ - ILLEGAL; \ - break; \ - } \ - } while (0) - -/* UniCore-F64 data processing */ -static void do_ucf64_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn) -{ - if (UCOP_UCF64_FMT == 3) { - ILLEGAL; - } - switch (UCOP_UCF64_FUNC) { - case 0: /* add */ - UCF64_OP2(add); - break; - case 1: /* sub */ - UCF64_OP2(sub); - break; - case 2: /* mul */ - UCF64_OP2(mul); - break; - case 4: /* div */ - UCF64_OP2(div); - break; - case 5: /* abs */ - UCF64_OP1(abs); - break; - case 6: /* mov */ - UCF64_OP1(mov); - break; - case 7: /* neg */ - UCF64_OP1(neg); - break; - default: - ILLEGAL; - } -} - -/* Disassemble an F64 instruction */ -static void disas_ucf64_insn(CPUUniCore32State *env, DisasContext *s, uint32_t insn) -{ - if (!UCOP_SET(29)) { - if (UCOP_SET(26)) { - do_ucf64_ldst_m(env, s, insn); - } else { - do_ucf64_ldst_i(env, s, insn); - } - } else { - if (UCOP_SET(5)) { - switch ((insn >> 26) & 0x3) { - case 0: - do_ucf64_datap(env, s, insn); - break; - case 1: - ILLEGAL; - break; - case 2: - do_ucf64_fcvt(env, s, insn); - break; - case 3: - do_ucf64_fcmp(env, s, insn); - break; - } - } else { - do_ucf64_trans(env, s, insn); - } - } -} - -static inline bool use_goto_tb(DisasContext *s, uint32_t dest) -{ -#ifndef CONFIG_USER_ONLY - return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); -#else - return true; -#endif -} - -static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest) -{ - if (use_goto_tb(s, dest)) { - tcg_gen_goto_tb(n); - gen_set_pc_im(dest); - tcg_gen_exit_tb(s->tb, n); - } else { - gen_set_pc_im(dest); - tcg_gen_exit_tb(NULL, 0); - } -} - -static inline void gen_jmp(DisasContext *s, uint32_t dest) -{ - if (unlikely(s->singlestep_enabled)) { - /* An indirect jump so that we still trigger the debug exception. */ - gen_bx_im(s, dest); - } else { - gen_goto_tb(s, 0, dest); - s->is_jmp = DISAS_TB_JUMP; - } -} - -/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */ -static int gen_set_psr(DisasContext *s, uint32_t mask, int bsr, TCGv t0) -{ - TCGv tmp; - if (bsr) { - /* ??? This is also undefined in system mode. */ - if (IS_USER(s)) { - return 1; - } - - tmp = load_cpu_field(bsr); - tcg_gen_andi_i32(tmp, tmp, ~mask); - tcg_gen_andi_i32(t0, t0, mask); - tcg_gen_or_i32(tmp, tmp, t0); - store_cpu_field(tmp, bsr); - } else { - gen_set_asr(t0, mask); - } - dead_tmp(t0); - gen_lookup_tb(s); - return 0; -} - -/* Generate an old-style exception return. Marks pc as dead. */ -static void gen_exception_return(DisasContext *s, TCGv pc) -{ - TCGv tmp; - store_reg(s, 31, pc); - tmp = load_cpu_field(bsr); - gen_set_asr(tmp, 0xffffffff); - dead_tmp(tmp); - s->is_jmp = DISAS_UPDATE; -} - -static void disas_coproc_insn(CPUUniCore32State *env, DisasContext *s, - uint32_t insn) -{ - switch (UCOP_CPNUM) { -#ifndef CONFIG_USER_ONLY - case 0: - disas_cp0_insn(env, s, insn); - break; - case 1: - disas_ocd_insn(env, s, insn); - break; -#endif - case 2: - disas_ucf64_insn(env, s, insn); - break; - default: - /* Unknown coprocessor. */ - cpu_abort(env_cpu(env), "Unknown coprocessor!"); - } -} - -/* data processing instructions */ -static void do_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn) -{ - TCGv tmp; - TCGv tmp2; - int logic_cc; - - if (UCOP_OPCODES == 0x0f || UCOP_OPCODES == 0x0d) { - if (UCOP_SET(23)) { /* CMOV instructions */ - if ((UCOP_CMOV_COND == 0xe) || (UCOP_CMOV_COND == 0xf)) { - ILLEGAL; - } - /* if not always execute, we generate a conditional jump to - next instruction */ - s->condlabel = gen_new_label(); - gen_test_cc(UCOP_CMOV_COND ^ 1, s->condlabel); - s->condjmp = 1; - } - } - - logic_cc = table_logic_cc[UCOP_OPCODES] & (UCOP_SET_S >> 24); - - if (UCOP_SET(29)) { - unsigned int val; - /* immediate operand */ - val = UCOP_IMM_9; - if (UCOP_SH_IM) { - val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM)); - } - tmp2 = new_tmp(); - tcg_gen_movi_i32(tmp2, val); - if (logic_cc && UCOP_SH_IM) { - gen_set_CF_bit31(tmp2); - } - } else { - /* register */ - tmp2 = load_reg(s, UCOP_REG_M); - if (UCOP_SET(5)) { - tmp = load_reg(s, UCOP_REG_S); - gen_uc32_shift_reg(tmp2, UCOP_SH_OP, tmp, logic_cc); - } else { - gen_uc32_shift_im(tmp2, UCOP_SH_OP, UCOP_SH_IM, logic_cc); - } - } - - if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) { - tmp = load_reg(s, UCOP_REG_N); - } else { - tmp = NULL; - } - - switch (UCOP_OPCODES) { - case 0x00: - tcg_gen_and_i32(tmp, tmp, tmp2); - if (logic_cc) { - gen_logic_CC(tmp); - } - store_reg_bx(s, UCOP_REG_D, tmp); - break; - case 0x01: - tcg_gen_xor_i32(tmp, tmp, tmp2); - if (logic_cc) { - gen_logic_CC(tmp); - } - store_reg_bx(s, UCOP_REG_D, tmp); - break; - case 0x02: - if (UCOP_SET_S && UCOP_REG_D == 31) { - /* SUBS r31, ... is used for exception return. */ - if (IS_USER(s)) { - ILLEGAL; - } - gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2); - gen_exception_return(s, tmp); - } else { - if (UCOP_SET_S) { - gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2); - } else { - tcg_gen_sub_i32(tmp, tmp, tmp2); - } - store_reg_bx(s, UCOP_REG_D, tmp); - } - break; - case 0x03: - if (UCOP_SET_S) { - gen_helper_sub_cc(tmp, cpu_env, tmp2, tmp); - } else { - tcg_gen_sub_i32(tmp, tmp2, tmp); - } - store_reg_bx(s, UCOP_REG_D, tmp); - break; - case 0x04: - if (UCOP_SET_S) { - gen_helper_add_cc(tmp, cpu_env, tmp, tmp2); - } else { - tcg_gen_add_i32(tmp, tmp, tmp2); - } - store_reg_bx(s, UCOP_REG_D, tmp); - break; - case 0x05: - if (UCOP_SET_S) { - gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2); - } else { - gen_add_carry(tmp, tmp, tmp2); - } - store_reg_bx(s, UCOP_REG_D, tmp); - break; - case 0x06: - if (UCOP_SET_S) { - gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2); - } else { - gen_sub_carry(tmp, tmp, tmp2); - } - store_reg_bx(s, UCOP_REG_D, tmp); - break; - case 0x07: - if (UCOP_SET_S) { - gen_helper_sbc_cc(tmp, cpu_env, tmp2, tmp); - } else { - gen_sub_carry(tmp, tmp2, tmp); - } - store_reg_bx(s, UCOP_REG_D, tmp); - break; - case 0x08: - if (UCOP_SET_S) { - tcg_gen_and_i32(tmp, tmp, tmp2); - gen_logic_CC(tmp); - } - dead_tmp(tmp); - break; - case 0x09: - if (UCOP_SET_S) { - tcg_gen_xor_i32(tmp, tmp, tmp2); - gen_logic_CC(tmp); - } - dead_tmp(tmp); - break; - case 0x0a: - if (UCOP_SET_S) { - gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2); - } - dead_tmp(tmp); - break; - case 0x0b: - if (UCOP_SET_S) { - gen_helper_add_cc(tmp, cpu_env, tmp, tmp2); - } - dead_tmp(tmp); - break; - case 0x0c: - tcg_gen_or_i32(tmp, tmp, tmp2); - if (logic_cc) { - gen_logic_CC(tmp); - } - store_reg_bx(s, UCOP_REG_D, tmp); - break; - case 0x0d: - if (logic_cc && UCOP_REG_D == 31) { - /* MOVS r31, ... is used for exception return. */ - if (IS_USER(s)) { - ILLEGAL; - } - gen_exception_return(s, tmp2); - } else { - if (logic_cc) { - gen_logic_CC(tmp2); - } - store_reg_bx(s, UCOP_REG_D, tmp2); - } - break; - case 0x0e: - tcg_gen_andc_i32(tmp, tmp, tmp2); - if (logic_cc) { - gen_logic_CC(tmp); - } - store_reg_bx(s, UCOP_REG_D, tmp); - break; - default: - case 0x0f: - tcg_gen_not_i32(tmp2, tmp2); - if (logic_cc) { - gen_logic_CC(tmp2); - } - store_reg_bx(s, UCOP_REG_D, tmp2); - break; - } - if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) { - dead_tmp(tmp2); - } -} - -/* multiply */ -static void do_mult(CPUUniCore32State *env, DisasContext *s, uint32_t insn) -{ - TCGv tmp, tmp2, tmp3, tmp4; - - if (UCOP_SET(27)) { - /* 64 bit mul */ - tmp = load_reg(s, UCOP_REG_M); - tmp2 = load_reg(s, UCOP_REG_N); - if (UCOP_SET(26)) { - tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2); - } else { - tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2); - } - if (UCOP_SET(25)) { /* mult accumulate */ - tmp3 = load_reg(s, UCOP_REG_LO); - tmp4 = load_reg(s, UCOP_REG_HI); - tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, tmp3, tmp4); - dead_tmp(tmp3); - dead_tmp(tmp4); - } - store_reg(s, UCOP_REG_LO, tmp); - store_reg(s, UCOP_REG_HI, tmp2); - } else { - /* 32 bit mul */ - tmp = load_reg(s, UCOP_REG_M); - tmp2 = load_reg(s, UCOP_REG_N); - tcg_gen_mul_i32(tmp, tmp, tmp2); - dead_tmp(tmp2); - if (UCOP_SET(25)) { - /* Add */ - tmp2 = load_reg(s, UCOP_REG_S); - tcg_gen_add_i32(tmp, tmp, tmp2); - dead_tmp(tmp2); - } - if (UCOP_SET_S) { - gen_logic_CC(tmp); - } - store_reg(s, UCOP_REG_D, tmp); - } -} - -/* miscellaneous instructions */ -static void do_misc(CPUUniCore32State *env, DisasContext *s, uint32_t insn) -{ - unsigned int val; - TCGv tmp; - - if ((insn & 0xffffffe0) == 0x10ffc120) { - /* Trivial implementation equivalent to bx. */ - tmp = load_reg(s, UCOP_REG_M); - gen_bx(s, tmp); - return; - } - - if ((insn & 0xfbffc000) == 0x30ffc000) { - /* PSR = immediate */ - val = UCOP_IMM_9; - if (UCOP_SH_IM) { - val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM)); - } - tmp = new_tmp(); - tcg_gen_movi_i32(tmp, val); - if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) { - ILLEGAL; - } - return; - } - - if ((insn & 0xfbffffe0) == 0x12ffc020) { - /* PSR.flag = reg */ - tmp = load_reg(s, UCOP_REG_M); - if (gen_set_psr(s, ASR_NZCV, UCOP_SET_B, tmp)) { - ILLEGAL; - } - return; - } - - if ((insn & 0xfbffffe0) == 0x10ffc020) { - /* PSR = reg */ - tmp = load_reg(s, UCOP_REG_M); - if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) { - ILLEGAL; - } - return; - } - - if ((insn & 0xfbf83fff) == 0x10f80000) { - /* reg = PSR */ - if (UCOP_SET_B) { - if (IS_USER(s)) { - ILLEGAL; - } - tmp = load_cpu_field(bsr); - } else { - tmp = new_tmp(); - gen_helper_asr_read(tmp, cpu_env); - } - store_reg(s, UCOP_REG_D, tmp); - return; - } - - if ((insn & 0xfbf83fe0) == 0x12f80120) { - /* clz */ - tmp = load_reg(s, UCOP_REG_M); - if (UCOP_SET(26)) { - /* clo */ - tcg_gen_not_i32(tmp, tmp); - } - tcg_gen_clzi_i32(tmp, tmp, 32); - store_reg(s, UCOP_REG_D, tmp); - return; - } - - /* otherwise */ - ILLEGAL; -} - -/* load/store I_offset and R_offset */ -static void do_ldst_ir(CPUUniCore32State *env, DisasContext *s, uint32_t insn) -{ - unsigned int mmu_idx; - TCGv tmp; - TCGv tmp2; - - tmp2 = load_reg(s, UCOP_REG_N); - mmu_idx = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W)); - - /* immediate */ - if (UCOP_SET_P) { - gen_add_data_offset(s, insn, tmp2); - } - - if (UCOP_SET_L) { - /* load */ - if (UCOP_SET_B) { - tmp = gen_ld8u(tmp2, mmu_idx); - } else { - tmp = gen_ld32(tmp2, mmu_idx); - } - } else { - /* store */ - tmp = load_reg(s, UCOP_REG_D); - if (UCOP_SET_B) { - gen_st8(tmp, tmp2, mmu_idx); - } else { - gen_st32(tmp, tmp2, mmu_idx); - } - } - if (!UCOP_SET_P) { - gen_add_data_offset(s, insn, tmp2); - store_reg(s, UCOP_REG_N, tmp2); - } else if (UCOP_SET_W) { - store_reg(s, UCOP_REG_N, tmp2); - } else { - dead_tmp(tmp2); - } - if (UCOP_SET_L) { - /* Complete the load. */ - if (UCOP_REG_D == 31) { - gen_bx(s, tmp); - } else { - store_reg(s, UCOP_REG_D, tmp); - } - } -} - -/* SWP instruction */ -static void do_swap(CPUUniCore32State *env, DisasContext *s, uint32_t insn) -{ - TCGv addr; - TCGv tmp; - TCGv tmp2; - - if ((insn & 0xff003fe0) != 0x40000120) { - ILLEGAL; - } - - /* ??? This is not really atomic. However we know - we never have multiple CPUs running in parallel, - so it is good enough. */ - addr = load_reg(s, UCOP_REG_N); - tmp = load_reg(s, UCOP_REG_M); - if (UCOP_SET_B) { - tmp2 = gen_ld8u(addr, IS_USER(s)); - gen_st8(tmp, addr, IS_USER(s)); - } else { - tmp2 = gen_ld32(addr, IS_USER(s)); - gen_st32(tmp, addr, IS_USER(s)); - } - dead_tmp(addr); - store_reg(s, UCOP_REG_D, tmp2); -} - -/* load/store hw/sb */ -static void do_ldst_hwsb(CPUUniCore32State *env, DisasContext *s, uint32_t insn) -{ - TCGv addr; - TCGv tmp; - - if (UCOP_SH_OP == 0) { - do_swap(env, s, insn); - return; - } - - addr = load_reg(s, UCOP_REG_N); - if (UCOP_SET_P) { - gen_add_datah_offset(s, insn, addr); - } - - if (UCOP_SET_L) { /* load */ - switch (UCOP_SH_OP) { - case 1: - tmp = gen_ld16u(addr, IS_USER(s)); - break; - case 2: - tmp = gen_ld8s(addr, IS_USER(s)); - break; - default: /* see do_swap */ - case 3: - tmp = gen_ld16s(addr, IS_USER(s)); - break; - } - } else { /* store */ - if (UCOP_SH_OP != 1) { - ILLEGAL; - } - tmp = load_reg(s, UCOP_REG_D); - gen_st16(tmp, addr, IS_USER(s)); - } - /* Perform base writeback before the loaded value to - ensure correct behavior with overlapping index registers. */ - if (!UCOP_SET_P) { - gen_add_datah_offset(s, insn, addr); - store_reg(s, UCOP_REG_N, addr); - } else if (UCOP_SET_W) { - store_reg(s, UCOP_REG_N, addr); - } else { - dead_tmp(addr); - } - if (UCOP_SET_L) { - /* Complete the load. */ - store_reg(s, UCOP_REG_D, tmp); - } -} - -/* load/store multiple words */ -static void do_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn) -{ - unsigned int val, i, mmu_idx; - int j, n, reg, user, loaded_base; - TCGv tmp; - TCGv tmp2; - TCGv addr; - TCGv loaded_var; - - if (UCOP_SET(7)) { - ILLEGAL; - } - /* XXX: store correct base if write back */ - user = 0; - if (UCOP_SET_B) { /* S bit in instruction table */ - if (IS_USER(s)) { - ILLEGAL; /* only usable in supervisor mode */ - } - if (UCOP_SET(18) == 0) { /* pc reg */ - user = 1; - } - } - - mmu_idx = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W)); - addr = load_reg(s, UCOP_REG_N); - - /* compute total size */ - loaded_base = 0; - loaded_var = NULL; - n = 0; - for (i = 0; i < 6; i++) { - if (UCOP_SET(i)) { - n++; - } - } - for (i = 9; i < 19; i++) { - if (UCOP_SET(i)) { - n++; - } - } - /* XXX: test invalid n == 0 case ? */ - if (UCOP_SET_U) { - if (UCOP_SET_P) { - /* pre increment */ - tcg_gen_addi_i32(addr, addr, 4); - } else { - /* post increment */ - } - } else { - if (UCOP_SET_P) { - /* pre decrement */ - tcg_gen_addi_i32(addr, addr, -(n * 4)); - } else { - /* post decrement */ - if (n != 1) { - tcg_gen_addi_i32(addr, addr, -((n - 1) * 4)); - } - } - } - - j = 0; - reg = UCOP_SET(6) ? 16 : 0; - for (i = 0; i < 19; i++, reg++) { - if (i == 6) { - i = i + 3; - } - if (UCOP_SET(i)) { - if (UCOP_SET_L) { /* load */ - tmp = gen_ld32(addr, mmu_idx); - if (reg == 31) { - gen_bx(s, tmp); - } else if (user) { - tmp2 = tcg_const_i32(reg); - gen_helper_set_user_reg(cpu_env, tmp2, tmp); - tcg_temp_free_i32(tmp2); - dead_tmp(tmp); - } else if (reg == UCOP_REG_N) { - loaded_var = tmp; - loaded_base = 1; - } else { - store_reg(s, reg, tmp); - } - } else { /* store */ - if (reg == 31) { - /* special case: r31 = PC + 4 */ - val = (long)s->pc; - tmp = new_tmp(); - tcg_gen_movi_i32(tmp, val); - } else if (user) { - tmp = new_tmp(); - tmp2 = tcg_const_i32(reg); - gen_helper_get_user_reg(tmp, cpu_env, tmp2); - tcg_temp_free_i32(tmp2); - } else { - tmp = load_reg(s, reg); - } - gen_st32(tmp, addr, mmu_idx); - } - j++; - /* no need to add after the last transfer */ - if (j != n) { - tcg_gen_addi_i32(addr, addr, 4); - } - } - } - if (UCOP_SET_W) { /* write back */ - if (UCOP_SET_U) { - if (UCOP_SET_P) { - /* pre increment */ - } else { - /* post increment */ - tcg_gen_addi_i32(addr, addr, 4); - } - } else { - if (UCOP_SET_P) { - /* pre decrement */ - if (n != 1) { - tcg_gen_addi_i32(addr, addr, -((n - 1) * 4)); - } - } else { - /* post decrement */ - tcg_gen_addi_i32(addr, addr, -(n * 4)); - } - } - store_reg(s, UCOP_REG_N, addr); - } else { - dead_tmp(addr); - } - if (loaded_base) { - store_reg(s, UCOP_REG_N, loaded_var); - } - if (UCOP_SET_B && !user) { - /* Restore ASR from BSR. */ - tmp = load_cpu_field(bsr); - gen_set_asr(tmp, 0xffffffff); - dead_tmp(tmp); - s->is_jmp = DISAS_UPDATE; - } -} - -/* branch (and link) */ -static void do_branch(CPUUniCore32State *env, DisasContext *s, uint32_t insn) -{ - unsigned int val; - int32_t offset; - TCGv tmp; - - if (UCOP_COND == 0xf) { - ILLEGAL; - } - - if (UCOP_COND != 0xe) { - /* if not always execute, we generate a conditional jump to - next instruction */ - s->condlabel = gen_new_label(); - gen_test_cc(UCOP_COND ^ 1, s->condlabel); - s->condjmp = 1; - } - - val = (int32_t)s->pc; - if (UCOP_SET_L) { - tmp = new_tmp(); - tcg_gen_movi_i32(tmp, val); - store_reg(s, 30, tmp); - } - offset = (((int32_t)insn << 8) >> 8); - val += (offset << 2); /* unicore is pc+4 */ - gen_jmp(s, val); -} - -static void disas_uc32_insn(CPUUniCore32State *env, DisasContext *s) -{ - unsigned int insn; - - insn = cpu_ldl_code(env, s->pc); - s->pc += 4; - - /* UniCore instructions class: - * AAAB BBBC xxxx xxxx xxxx xxxD xxEx xxxx - * AAA : see switch case - * BBBB : opcodes or cond or PUBW - * C : S OR L - * D : 8 - * E : 5 - */ - switch (insn >> 29) { - case 0x0: - if (UCOP_SET(5) && UCOP_SET(8) && !UCOP_SET(28)) { - do_mult(env, s, insn); - break; - } - - if (UCOP_SET(8)) { - do_misc(env, s, insn); - break; - } - /* fallthrough */ - case 0x1: - if (((UCOP_OPCODES >> 2) == 2) && !UCOP_SET_S) { - do_misc(env, s, insn); - break; - } - do_datap(env, s, insn); - break; - - case 0x2: - if (UCOP_SET(8) && UCOP_SET(5)) { - do_ldst_hwsb(env, s, insn); - break; - } - if (UCOP_SET(8) || UCOP_SET(5)) { - ILLEGAL; - } - /* fallthrough */ - case 0x3: - do_ldst_ir(env, s, insn); - break; - - case 0x4: - if (UCOP_SET(8)) { - ILLEGAL; /* extended instructions */ - } - do_ldst_m(env, s, insn); - break; - case 0x5: - do_branch(env, s, insn); - break; - case 0x6: - /* Coprocessor. */ - disas_coproc_insn(env, s, insn); - break; - case 0x7: - if (!UCOP_SET(28)) { - disas_coproc_insn(env, s, insn); - break; - } - if ((insn & 0xff000000) == 0xff000000) { /* syscall */ - gen_set_pc_im(s->pc); - s->is_jmp = DISAS_SYSCALL; - break; - } - ILLEGAL; - } -} - -/* generate intermediate code for basic block 'tb'. */ -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) -{ - CPUUniCore32State *env = cs->env_ptr; - DisasContext dc1, *dc = &dc1; - target_ulong pc_start; - uint32_t page_start; - int num_insns; - - /* generate intermediate code */ - num_temps = 0; - - pc_start = tb->pc; - - dc->tb = tb; - - dc->is_jmp = DISAS_NEXT; - dc->pc = pc_start; - dc->singlestep_enabled = cs->singlestep_enabled; - dc->condjmp = 0; - cpu_F0s = tcg_temp_new_i32(); - cpu_F1s = tcg_temp_new_i32(); - cpu_F0d = tcg_temp_new_i64(); - cpu_F1d = tcg_temp_new_i64(); - page_start = pc_start & TARGET_PAGE_MASK; - num_insns = 0; - -#ifndef CONFIG_USER_ONLY - if ((env->uncached_asr & ASR_M) == ASR_MODE_USER) { - dc->user = 1; - } else { - dc->user = 0; - } -#endif - - gen_tb_start(tb); - do { - tcg_gen_insn_start(dc->pc); - num_insns++; - - if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) { - gen_set_pc_im(dc->pc); - gen_exception(EXCP_DEBUG); - dc->is_jmp = DISAS_JUMP; - /* The address covered by the breakpoint must be included in - [tb->pc, tb->pc + tb->size) in order to for it to be - properly cleared -- thus we increment the PC here so that - the logic setting tb->size below does the right thing. */ - dc->pc += 4; - goto done_generating; - } - - if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) { - gen_io_start(); - } - - disas_uc32_insn(env, dc); - - if (num_temps) { - fprintf(stderr, "Internal resource leak before %08x\n", dc->pc); - num_temps = 0; - } - - if (dc->condjmp && !dc->is_jmp) { - gen_set_label(dc->condlabel); - dc->condjmp = 0; - } - /* Translation stops when a conditional branch is encountered. - * Otherwise the subsequent code could get translated several times. - * Also stop translation when a page boundary is reached. This - * ensures prefetch aborts occur at the right place. */ - } while (!dc->is_jmp && !tcg_op_buf_full() && - !cs->singlestep_enabled && - !singlestep && - dc->pc - page_start < TARGET_PAGE_SIZE && - num_insns < max_insns); - - if (tb_cflags(tb) & CF_LAST_IO) { - if (dc->condjmp) { - /* FIXME: This can theoretically happen with self-modifying - code. */ - cpu_abort(cs, "IO on conditional branch instruction"); - } - } - - /* At this stage dc->condjmp will only be set when the skipped - instruction was a conditional branch or trap, and the PC has - already been written. */ - if (unlikely(cs->singlestep_enabled)) { - /* Make sure the pc is updated, and raise a debug exception. */ - if (dc->condjmp) { - if (dc->is_jmp == DISAS_SYSCALL) { - gen_exception(UC32_EXCP_PRIV); - } else { - gen_exception(EXCP_DEBUG); - } - gen_set_label(dc->condlabel); - } - if (dc->condjmp || !dc->is_jmp) { - gen_set_pc_im(dc->pc); - dc->condjmp = 0; - } - if (dc->is_jmp == DISAS_SYSCALL && !dc->condjmp) { - gen_exception(UC32_EXCP_PRIV); - } else { - gen_exception(EXCP_DEBUG); - } - } else { - /* While branches must always occur at the end of an IT block, - there are a few other things that can cause us to terminate - the TB in the middel of an IT block: - - Exception generating instructions (bkpt, swi, undefined). - - Page boundaries. - - Hardware watchpoints. - Hardware breakpoints have already been handled and skip this code. - */ - switch (dc->is_jmp) { - case DISAS_NEXT: - gen_goto_tb(dc, 1, dc->pc); - break; - default: - case DISAS_JUMP: - case DISAS_UPDATE: - /* indicate that the hash table must be used to find the next TB */ - tcg_gen_exit_tb(NULL, 0); - break; - case DISAS_TB_JUMP: - /* nothing more to generate */ - break; - case DISAS_SYSCALL: - gen_exception(UC32_EXCP_PRIV); - break; - } - if (dc->condjmp) { - gen_set_label(dc->condlabel); - gen_goto_tb(dc, 1, dc->pc); - dc->condjmp = 0; - } - } - -done_generating: - gen_tb_end(tb, num_insns); - -#ifdef DEBUG_DISAS - if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) - && qemu_log_in_addr_range(pc_start)) { - FILE *logfile = qemu_log_lock(); - qemu_log("----------------\n"); - qemu_log("IN: %s\n", lookup_symbol(pc_start)); - log_target_disas(cs, pc_start, dc->pc - pc_start); - qemu_log("\n"); - qemu_log_unlock(logfile); - } -#endif - tb->size = dc->pc - pc_start; - tb->icount = num_insns; -} - -static const char *cpu_mode_names[16] = { - "USER", "REAL", "INTR", "PRIV", "UM14", "UM15", "UM16", "TRAP", - "UM18", "UM19", "UM1A", "EXTN", "UM1C", "UM1D", "UM1E", "SUSR" -}; - -#undef UCF64_DUMP_STATE -#ifdef UCF64_DUMP_STATE -static void cpu_dump_state_ucf64(CPUUniCore32State *env, int flags) -{ - int i; - union { - uint32_t i; - float s; - } s0, s1; - CPU_DoubleU d; - /* ??? This assumes float64 and double have the same layout. - Oh well, it's only debug dumps. */ - union { - float64 f64; - double d; - } d0; - - for (i = 0; i < 16; i++) { - d.d = env->ucf64.regs[i]; - s0.i = d.l.lower; - s1.i = d.l.upper; - d0.f64 = d.d; - qemu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g)", - i * 2, (int)s0.i, s0.s, - i * 2 + 1, (int)s1.i, s1.s); - qemu_fprintf(f, " d%02d=%" PRIx64 "(%8g)\n", - i, (uint64_t)d0.f64, d0.d); - } - qemu_fprintf(f, "FPSCR: %08x\n", (int)env->ucf64.xregs[UC32_UCF64_FPSCR]); -} -#else -#define cpu_dump_state_ucf64(env, file, pr, flags) do { } while (0) -#endif - -void uc32_cpu_dump_state(CPUState *cs, FILE *f, int flags) -{ - UniCore32CPU *cpu = UNICORE32_CPU(cs); - CPUUniCore32State *env = &cpu->env; - int i; - uint32_t psr; - - for (i = 0; i < 32; i++) { - qemu_fprintf(f, "R%02d=%08x", i, env->regs[i]); - if ((i % 4) == 3) { - qemu_fprintf(f, "\n"); - } else { - qemu_fprintf(f, " "); - } - } - psr = cpu_asr_read(env); - qemu_fprintf(f, "PSR=%08x %c%c%c%c %s\n", - psr, - psr & (1 << 31) ? 'N' : '-', - psr & (1 << 30) ? 'Z' : '-', - psr & (1 << 29) ? 'C' : '-', - psr & (1 << 28) ? 'V' : '-', - cpu_mode_names[psr & 0xf]); - - if (flags & CPU_DUMP_FPU) { - cpu_dump_state_ucf64(env, f, cpu_fprintf, flags); - } -} - -void restore_state_to_opc(CPUUniCore32State *env, TranslationBlock *tb, - target_ulong *data) -{ - env->regs[31] = data[0]; -} diff --git a/target/unicore32/ucf64_helper.c b/target/unicore32/ucf64_helper.c deleted file mode 100644 index 12a91900f6..0000000000 --- a/target/unicore32/ucf64_helper.c +++ /dev/null @@ -1,324 +0,0 @@ -/* - * UniCore-F64 simulation helpers for QEMU. - * - * Copyright (C) 2010-2012 Guan Xuetao - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation, or any later version. - * See the COPYING file in the top-level directory. - */ -#include "qemu/osdep.h" -#include "cpu.h" -#include "exec/helper-proto.h" -#include "fpu/softfloat.h" - -/* - * The convention used for UniCore-F64 instructions: - * Single precition routines have a "s" suffix - * Double precision routines have a "d" suffix. - */ - -/* Convert host exception flags to f64 form. */ -static inline int ucf64_exceptbits_from_host(int host_bits) -{ - int target_bits = 0; - - if (host_bits & float_flag_invalid) { - target_bits |= UCF64_FPSCR_FLAG_INVALID; - } - if (host_bits & float_flag_divbyzero) { - target_bits |= UCF64_FPSCR_FLAG_DIVZERO; - } - if (host_bits & float_flag_overflow) { - target_bits |= UCF64_FPSCR_FLAG_OVERFLOW; - } - if (host_bits & float_flag_underflow) { - target_bits |= UCF64_FPSCR_FLAG_UNDERFLOW; - } - if (host_bits & float_flag_inexact) { - target_bits |= UCF64_FPSCR_FLAG_INEXACT; - } - return target_bits; -} - -uint32_t HELPER(ucf64_get_fpscr)(CPUUniCore32State *env) -{ - int i; - uint32_t fpscr; - - fpscr = (env->ucf64.xregs[UC32_UCF64_FPSCR] & UCF64_FPSCR_MASK); - i = get_float_exception_flags(&env->ucf64.fp_status); - fpscr |= ucf64_exceptbits_from_host(i); - return fpscr; -} - -/* Convert ucf64 exception flags to target form. */ -static inline int ucf64_exceptbits_to_host(int target_bits) -{ - int host_bits = 0; - - if (target_bits & UCF64_FPSCR_FLAG_INVALID) { - host_bits |= float_flag_invalid; - } - if (target_bits & UCF64_FPSCR_FLAG_DIVZERO) { - host_bits |= float_flag_divbyzero; - } - if (target_bits & UCF64_FPSCR_FLAG_OVERFLOW) { - host_bits |= float_flag_overflow; - } - if (target_bits & UCF64_FPSCR_FLAG_UNDERFLOW) { - host_bits |= float_flag_underflow; - } - if (target_bits & UCF64_FPSCR_FLAG_INEXACT) { - host_bits |= float_flag_inexact; - } - return host_bits; -} - -void HELPER(ucf64_set_fpscr)(CPUUniCore32State *env, uint32_t val) -{ - UniCore32CPU *cpu = env_archcpu(env); - int i; - uint32_t changed; - - changed = env->ucf64.xregs[UC32_UCF64_FPSCR]; - env->ucf64.xregs[UC32_UCF64_FPSCR] = (val & UCF64_FPSCR_MASK); - - changed ^= val; - if (changed & (UCF64_FPSCR_RND_MASK)) { - i = UCF64_FPSCR_RND(val); - switch (i) { - case 0: - i = float_round_nearest_even; - break; - case 1: - i = float_round_to_zero; - break; - case 2: - i = float_round_up; - break; - case 3: - i = float_round_down; - break; - default: /* 100 and 101 not implement */ - cpu_abort(CPU(cpu), "Unsupported UniCore-F64 round mode"); - } - set_float_rounding_mode(i, &env->ucf64.fp_status); - } - - i = ucf64_exceptbits_to_host(UCF64_FPSCR_TRAPEN(val)); - set_float_exception_flags(i, &env->ucf64.fp_status); -} - -float32 HELPER(ucf64_adds)(float32 a, float32 b, CPUUniCore32State *env) -{ - return float32_add(a, b, &env->ucf64.fp_status); -} - -float64 HELPER(ucf64_addd)(float64 a, float64 b, CPUUniCore32State *env) -{ - return float64_add(a, b, &env->ucf64.fp_status); -} - -float32 HELPER(ucf64_subs)(float32 a, float32 b, CPUUniCore32State *env) -{ - return float32_sub(a, b, &env->ucf64.fp_status); -} - -float64 HELPER(ucf64_subd)(float64 a, float64 b, CPUUniCore32State *env) -{ - return float64_sub(a, b, &env->ucf64.fp_status); -} - -float32 HELPER(ucf64_muls)(float32 a, float32 b, CPUUniCore32State *env) -{ - return float32_mul(a, b, &env->ucf64.fp_status); -} - -float64 HELPER(ucf64_muld)(float64 a, float64 b, CPUUniCore32State *env) -{ - return float64_mul(a, b, &env->ucf64.fp_status); -} - -float32 HELPER(ucf64_divs)(float32 a, float32 b, CPUUniCore32State *env) -{ - return float32_div(a, b, &env->ucf64.fp_status); -} - -float64 HELPER(ucf64_divd)(float64 a, float64 b, CPUUniCore32State *env) -{ - return float64_div(a, b, &env->ucf64.fp_status); -} - -float32 HELPER(ucf64_negs)(float32 a) -{ - return float32_chs(a); -} - -float64 HELPER(ucf64_negd)(float64 a) -{ - return float64_chs(a); -} - -float32 HELPER(ucf64_abss)(float32 a) -{ - return float32_abs(a); -} - -float64 HELPER(ucf64_absd)(float64 a) -{ - return float64_abs(a); -} - -void HELPER(ucf64_cmps)(float32 a, float32 b, uint32_t c, - CPUUniCore32State *env) -{ - FloatRelation flag = float32_compare_quiet(a, b, &env->ucf64.fp_status); - env->CF = 0; - switch (c & 0x7) { - case 0: /* F */ - break; - case 1: /* UN */ - if (flag == 2) { - env->CF = 1; - } - break; - case 2: /* EQ */ - if (flag == 0) { - env->CF = 1; - } - break; - case 3: /* UEQ */ - if ((flag == 0) || (flag == 2)) { - env->CF = 1; - } - break; - case 4: /* OLT */ - if (flag == -1) { - env->CF = 1; - } - break; - case 5: /* ULT */ - if ((flag == -1) || (flag == 2)) { - env->CF = 1; - } - break; - case 6: /* OLE */ - if ((flag == -1) || (flag == 0)) { - env->CF = 1; - } - break; - case 7: /* ULE */ - if (flag != 1) { - env->CF = 1; - } - break; - } - env->ucf64.xregs[UC32_UCF64_FPSCR] = (env->CF << 29) - | (env->ucf64.xregs[UC32_UCF64_FPSCR] & 0x0fffffff); -} - -void HELPER(ucf64_cmpd)(float64 a, float64 b, uint32_t c, - CPUUniCore32State *env) -{ - FloatRelation flag = float64_compare_quiet(a, b, &env->ucf64.fp_status); - env->CF = 0; - switch (c & 0x7) { - case 0: /* F */ - break; - case 1: /* UN */ - if (flag == 2) { - env->CF = 1; - } - break; - case 2: /* EQ */ - if (flag == 0) { - env->CF = 1; - } - break; - case 3: /* UEQ */ - if ((flag == 0) || (flag == 2)) { - env->CF = 1; - } - break; - case 4: /* OLT */ - if (flag == -1) { - env->CF = 1; - } - break; - case 5: /* ULT */ - if ((flag == -1) || (flag == 2)) { - env->CF = 1; - } - break; - case 6: /* OLE */ - if ((flag == -1) || (flag == 0)) { - env->CF = 1; - } - break; - case 7: /* ULE */ - if (flag != 1) { - env->CF = 1; - } - break; - } - env->ucf64.xregs[UC32_UCF64_FPSCR] = (env->CF << 29) - | (env->ucf64.xregs[UC32_UCF64_FPSCR] & 0x0fffffff); -} - -/* Helper routines to perform bitwise copies between float and int. */ -static inline float32 ucf64_itos(uint32_t i) -{ - union { - uint32_t i; - float32 s; - } v; - - v.i = i; - return v.s; -} - -static inline uint32_t ucf64_stoi(float32 s) -{ - union { - uint32_t i; - float32 s; - } v; - - v.s = s; - return v.i; -} - -/* Integer to float conversion. */ -float32 HELPER(ucf64_si2sf)(float32 x, CPUUniCore32State *env) -{ - return int32_to_float32(ucf64_stoi(x), &env->ucf64.fp_status); -} - -float64 HELPER(ucf64_si2df)(float32 x, CPUUniCore32State *env) -{ - return int32_to_float64(ucf64_stoi(x), &env->ucf64.fp_status); -} - -/* Float to integer conversion. */ -float32 HELPER(ucf64_sf2si)(float32 x, CPUUniCore32State *env) -{ - return ucf64_itos(float32_to_int32(x, &env->ucf64.fp_status)); -} - -float32 HELPER(ucf64_df2si)(float64 x, CPUUniCore32State *env) -{ - return ucf64_itos(float64_to_int32(x, &env->ucf64.fp_status)); -} - -/* floating point conversion */ -float64 HELPER(ucf64_sf2df)(float32 x, CPUUniCore32State *env) -{ - return float32_to_float64(x, &env->ucf64.fp_status); -} - -float32 HELPER(ucf64_df2sf)(float64 x, CPUUniCore32State *env) -{ - return float64_to_float32(x, &env->ucf64.fp_status); -} diff --git a/target/xtensa/op_helper.c b/target/xtensa/op_helper.c index 143476849f..d85d3516d6 100644 --- a/target/xtensa/op_helper.c +++ b/target/xtensa/op_helper.c @@ -32,7 +32,6 @@ #include "qemu/host-utils.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" -#include "exec/address-spaces.h" #include "qemu/timer.h" #ifndef CONFIG_USER_ONLY diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c index 0ae4efc48a..73584d9d60 100644 --- a/target/xtensa/translate.c +++ b/target/xtensa/translate.c @@ -917,6 +917,7 @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc) "unknown instruction length (pc = %08x)\n", dc->pc); gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); + dc->base.pc_next = dc->pc + 1; return; } @@ -1274,11 +1275,13 @@ static void xtensa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) if ((tb_cflags(dc->base.tb) & CF_USE_ICOUNT) && (dc->base.tb->flags & XTENSA_TBFLAG_YIELD)) { gen_exception(dc, EXCP_YIELD); + dc->base.pc_next = dc->pc + 1; dc->base.is_jmp = DISAS_NORETURN; return; } if (dc->base.tb->flags & XTENSA_TBFLAG_EXCEPTION) { gen_exception(dc, EXCP_DEBUG); + dc->base.pc_next = dc->pc + 1; dc->base.is_jmp = DISAS_NORETURN; return; } |