diff options
Diffstat (limited to 'target')
| -rw-r--r-- | target/riscv/cpu.h | 16 | ||||
| -rw-r--r-- | target/riscv/cpu_helper.c | 6 | ||||
| -rw-r--r-- | target/riscv/csr.c | 278 | ||||
| -rw-r--r-- | target/riscv/insn32.decode | 18 | ||||
| -rw-r--r-- | target/riscv/insn_trans/trans_rvbf16.c.inc | 9 | ||||
| -rw-r--r-- | target/riscv/insn_trans/trans_rvv.c.inc | 644 | ||||
| -rw-r--r-- | target/riscv/internals.h | 5 | ||||
| -rw-r--r-- | target/riscv/kvm/kvm-cpu.c | 333 | ||||
| -rw-r--r-- | target/riscv/op_helper.c | 13 | ||||
| -rw-r--r-- | target/riscv/pmp.c | 147 | ||||
| -rw-r--r-- | target/riscv/translate.c | 5 | ||||
| -rw-r--r-- | target/riscv/vector_helper.c | 63 |
12 files changed, 1040 insertions, 497 deletions
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index c66ac3bc27..b56d3afa69 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -75,6 +75,7 @@ const char *riscv_get_misa_ext_name(uint32_t bit); const char *riscv_get_misa_ext_description(uint32_t bit); #define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop) +#define ENV_CSR_OFFSET(_csr) offsetof(CPURISCVState, _csr) typedef struct riscv_cpu_profile { struct riscv_cpu_profile *u_parent; @@ -813,8 +814,8 @@ RISCVException riscv_csrr(CPURISCVState *env, int csrno, target_ulong *ret_value); RISCVException riscv_csrrw(CPURISCVState *env, int csrno, - target_ulong *ret_value, - target_ulong new_value, target_ulong write_mask); + target_ulong *ret_value, target_ulong new_value, + target_ulong write_mask, uintptr_t ra); RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno, target_ulong *ret_value, target_ulong new_value, @@ -823,13 +824,13 @@ RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno, static inline void riscv_csr_write(CPURISCVState *env, int csrno, target_ulong val) { - riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS)); + riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS), 0); } static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno) { target_ulong val = 0; - riscv_csrrw(env, csrno, &val, 0, 0); + riscv_csrrw(env, csrno, &val, 0, 0, 0); return val; } @@ -838,7 +839,8 @@ typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env, typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno, target_ulong *ret_value); typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno, - target_ulong new_value); + target_ulong new_value, + uintptr_t ra); typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno, target_ulong *ret_value, target_ulong new_value, @@ -847,8 +849,8 @@ typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno, RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno, Int128 *ret_value); RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno, - Int128 *ret_value, - Int128 new_value, Int128 write_mask); + Int128 *ret_value, Int128 new_value, + Int128 write_mask, uintptr_t ra); typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno, Int128 *ret_value); diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c index d5039f69a9..2ed69d7c2d 100644 --- a/target/riscv/cpu_helper.c +++ b/target/riscv/cpu_helper.c @@ -1566,9 +1566,11 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, target_ulong *pte_pa = qemu_map_ram_ptr(mr->ram_block, addr1); target_ulong old_pte; if (riscv_cpu_sxl(env) == MXL_RV32) { - old_pte = qatomic_cmpxchg((uint32_t *)pte_pa, pte, updated_pte); + old_pte = qatomic_cmpxchg((uint32_t *)pte_pa, cpu_to_le32(pte), cpu_to_le32(updated_pte)); + old_pte = le32_to_cpu(old_pte); } else { - old_pte = qatomic_cmpxchg(pte_pa, pte, updated_pte); + old_pte = qatomic_cmpxchg(pte_pa, cpu_to_le64(pte), cpu_to_le64(updated_pte)); + old_pte = le64_to_cpu(old_pte); } if (old_pte != pte) { goto restart; diff --git a/target/riscv/csr.c b/target/riscv/csr.c index a32e1455c9..288edeedea 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -30,6 +30,8 @@ #include "accel/tcg/getpc.h" #include "qemu/guest-random.h" #include "qapi/error.h" +#include "tcg/insn-start-words.h" +#include "internals.h" #include <stdbool.h> /* CSR function table public API */ @@ -830,13 +832,15 @@ static RISCVException seed(CPURISCVState *env, int csrno) } /* zicfiss CSR_SSP read and write */ -static int read_ssp(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_ssp(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->ssp; return RISCV_EXCP_NONE; } -static int write_ssp(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_ssp(CPURISCVState *env, int csrno, + target_ulong val, uintptr_t ra) { env->ssp = val; return RISCV_EXCP_NONE; @@ -851,7 +855,7 @@ static RISCVException read_fflags(CPURISCVState *env, int csrno, } static RISCVException write_fflags(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { #if !defined(CONFIG_USER_ONLY) if (riscv_has_ext(env, RVF)) { @@ -870,7 +874,7 @@ static RISCVException read_frm(CPURISCVState *env, int csrno, } static RISCVException write_frm(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { #if !defined(CONFIG_USER_ONLY) if (riscv_has_ext(env, RVF)) { @@ -890,7 +894,7 @@ static RISCVException read_fcsr(CPURISCVState *env, int csrno, } static RISCVException write_fcsr(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { #if !defined(CONFIG_USER_ONLY) if (riscv_has_ext(env, RVF)) { @@ -942,7 +946,7 @@ static RISCVException read_vxrm(CPURISCVState *env, int csrno, } static RISCVException write_vxrm(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { #if !defined(CONFIG_USER_ONLY) env->mstatus |= MSTATUS_VS; @@ -959,7 +963,7 @@ static RISCVException read_vxsat(CPURISCVState *env, int csrno, } static RISCVException write_vxsat(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { #if !defined(CONFIG_USER_ONLY) env->mstatus |= MSTATUS_VS; @@ -976,7 +980,7 @@ static RISCVException read_vstart(CPURISCVState *env, int csrno, } static RISCVException write_vstart(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { #if !defined(CONFIG_USER_ONLY) env->mstatus |= MSTATUS_VS; @@ -997,7 +1001,7 @@ static RISCVException read_vcsr(CPURISCVState *env, int csrno, } static RISCVException write_vcsr(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { #if !defined(CONFIG_USER_ONLY) env->mstatus |= MSTATUS_VS; @@ -1055,7 +1059,7 @@ static RISCVException read_mcyclecfg(CPURISCVState *env, int csrno, } static RISCVException write_mcyclecfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { uint64_t inh_avail_mask; @@ -1084,7 +1088,7 @@ static RISCVException read_mcyclecfgh(CPURISCVState *env, int csrno, } static RISCVException write_mcyclecfgh(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK | MCYCLECFGH_BIT_MINH); @@ -1109,7 +1113,7 @@ static RISCVException read_minstretcfg(CPURISCVState *env, int csrno, } static RISCVException write_minstretcfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { uint64_t inh_avail_mask; @@ -1136,7 +1140,7 @@ static RISCVException read_minstretcfgh(CPURISCVState *env, int csrno, } static RISCVException write_minstretcfgh(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK | MINSTRETCFGH_BIT_MINH); @@ -1163,7 +1167,7 @@ static RISCVException read_mhpmevent(CPURISCVState *env, int csrno, } static RISCVException write_mhpmevent(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { int evt_index = csrno - CSR_MCOUNTINHIBIT; uint64_t mhpmevt_val = val; @@ -1201,7 +1205,7 @@ static RISCVException read_mhpmeventh(CPURISCVState *env, int csrno, } static RISCVException write_mhpmeventh(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { int evt_index = csrno - CSR_MHPMEVENT3H + 3; uint64_t mhpmevth_val; @@ -1343,14 +1347,16 @@ static RISCVException riscv_pmu_write_ctrh(CPURISCVState *env, target_ulong val, return RISCV_EXCP_NONE; } -static int write_mhpmcounter(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno, + target_ulong val, uintptr_t ra) { int ctr_idx = csrno - CSR_MCYCLE; return riscv_pmu_write_ctr(env, val, ctr_idx); } -static int write_mhpmcounterh(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno, + target_ulong val, uintptr_t ra) { int ctr_idx = csrno - CSR_MCYCLEH; @@ -1661,7 +1667,7 @@ static RISCVException read_vstimecmph(CPURISCVState *env, int csrno, } static RISCVException write_vstimecmp(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { if (riscv_cpu_mxl(env) == MXL_RV32) { env->vstimecmp = deposit64(env->vstimecmp, 0, 32, (uint64_t)val); @@ -1676,7 +1682,7 @@ static RISCVException write_vstimecmp(CPURISCVState *env, int csrno, } static RISCVException write_vstimecmph(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->vstimecmp = deposit64(env->vstimecmp, 32, 32, (uint64_t)val); riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp, @@ -1710,13 +1716,13 @@ static RISCVException read_stimecmph(CPURISCVState *env, int csrno, } static RISCVException write_stimecmp(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { if (env->virt_enabled) { if (env->hvictl & HVICTL_VTI) { return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; } - return write_vstimecmp(env, csrno, val); + return write_vstimecmp(env, csrno, val, ra); } if (riscv_cpu_mxl(env) == MXL_RV32) { @@ -1731,13 +1737,13 @@ static RISCVException write_stimecmp(CPURISCVState *env, int csrno, } static RISCVException write_stimecmph(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { if (env->virt_enabled) { if (env->hvictl & HVICTL_VTI) { return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; } - return write_vstimecmph(env, csrno, val); + return write_vstimecmph(env, csrno, val, ra); } env->stimecmp = deposit64(env->stimecmp, 32, 32, (uint64_t)val); @@ -1842,7 +1848,7 @@ static RISCVException read_zero(CPURISCVState *env, int csrno, } static RISCVException write_ignore(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { return RISCV_EXCP_NONE; } @@ -1963,7 +1969,7 @@ static target_ulong legalize_mpp(CPURISCVState *env, target_ulong old_mpp, } static RISCVException write_mstatus(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { uint64_t mstatus = env->mstatus; uint64_t mask = 0; @@ -2042,7 +2048,7 @@ static RISCVException read_mstatush(CPURISCVState *env, int csrno, } static RISCVException write_mstatush(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { uint64_t valh = (uint64_t)val << 32; uint64_t mask = riscv_has_ext(env, RVH) ? MSTATUS_MPV | MSTATUS_GVA : 0; @@ -2095,8 +2101,21 @@ static RISCVException read_misa(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +static target_ulong get_next_pc(CPURISCVState *env, uintptr_t ra) +{ + uint64_t data[INSN_START_WORDS]; + + /* Outside of a running cpu, env contains the next pc. */ + if (ra == 0 || !cpu_unwind_state_data(env_cpu(env), ra, data)) { + return env->pc; + } + + /* Within unwind data, [0] is pc and [1] is the opcode. */ + return data[0] + insn_len(data[1]); +} + static RISCVException write_misa(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { RISCVCPU *cpu = env_archcpu(env); uint32_t orig_misa_ext = env->misa_ext; @@ -2110,11 +2129,8 @@ static RISCVException write_misa(CPURISCVState *env, int csrno, /* Mask extensions that are not supported by this hart */ val &= env->misa_ext_mask; - /* - * Suppress 'C' if next instruction is not aligned - * TODO: this should check next_pc - */ - if ((val & RVC) && (GETPC() & ~3) != 0) { + /* Suppress 'C' if next instruction is not aligned. */ + if ((val & RVC) && (get_next_pc(env, ra) & 3) != 0) { val &= ~RVC; } @@ -2160,7 +2176,7 @@ static RISCVException read_medeleg(CPURISCVState *env, int csrno, } static RISCVException write_medeleg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->medeleg = (env->medeleg & ~DELEGABLE_EXCPS) | (val & DELEGABLE_EXCPS); return RISCV_EXCP_NONE; @@ -2955,7 +2971,7 @@ static RISCVException read_mtvec(CPURISCVState *env, int csrno, } static RISCVException write_mtvec(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */ if ((val & 3) < 2) { @@ -2974,7 +2990,7 @@ static RISCVException read_mcountinhibit(CPURISCVState *env, int csrno, } static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { int cidx; PMUCTRState *counter; @@ -3049,10 +3065,9 @@ static RISCVException read_scountinhibit(CPURISCVState *env, int csrno, } static RISCVException write_scountinhibit(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { - write_mcountinhibit(env, csrno, val & env->mcounteren); - return RISCV_EXCP_NONE; + return write_mcountinhibit(env, csrno, val & env->mcounteren, ra); } static RISCVException read_mcounteren(CPURISCVState *env, int csrno, @@ -3063,7 +3078,7 @@ static RISCVException read_mcounteren(CPURISCVState *env, int csrno, } static RISCVException write_mcounteren(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { RISCVCPU *cpu = env_archcpu(env); @@ -3097,7 +3112,7 @@ static RISCVException read_mscratch(CPURISCVState *env, int csrno, } static RISCVException write_mscratch(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->mscratch = val; return RISCV_EXCP_NONE; @@ -3111,7 +3126,7 @@ static RISCVException read_mepc(CPURISCVState *env, int csrno, } static RISCVException write_mepc(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->mepc = val; return RISCV_EXCP_NONE; @@ -3125,7 +3140,7 @@ static RISCVException read_mcause(CPURISCVState *env, int csrno, } static RISCVException write_mcause(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->mcause = val; return RISCV_EXCP_NONE; @@ -3139,7 +3154,7 @@ static RISCVException read_mtval(CPURISCVState *env, int csrno, } static RISCVException write_mtval(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->mtval = val; return RISCV_EXCP_NONE; @@ -3154,9 +3169,9 @@ static RISCVException read_menvcfg(CPURISCVState *env, int csrno, } static RISCVException write_henvcfg(CPURISCVState *env, int csrno, - target_ulong val); + target_ulong val, uintptr_t ra); static RISCVException write_menvcfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { const RISCVCPUConfig *cfg = riscv_cpu_cfg(env); uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | @@ -3188,9 +3203,7 @@ static RISCVException write_menvcfg(CPURISCVState *env, int csrno, } } env->menvcfg = (env->menvcfg & ~mask) | (val & mask); - write_henvcfg(env, CSR_HENVCFG, env->henvcfg); - - return RISCV_EXCP_NONE; + return write_henvcfg(env, CSR_HENVCFG, env->henvcfg, ra); } static RISCVException read_menvcfgh(CPURISCVState *env, int csrno, @@ -3201,9 +3214,9 @@ static RISCVException read_menvcfgh(CPURISCVState *env, int csrno, } static RISCVException write_henvcfgh(CPURISCVState *env, int csrno, - target_ulong val); + target_ulong val, uintptr_t ra); static RISCVException write_menvcfgh(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { const RISCVCPUConfig *cfg = riscv_cpu_cfg(env); uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) | @@ -3218,9 +3231,7 @@ static RISCVException write_menvcfgh(CPURISCVState *env, int csrno, } env->menvcfg = (env->menvcfg & ~mask) | (valh & mask); - write_henvcfgh(env, CSR_HENVCFGH, env->henvcfg >> 32); - - return RISCV_EXCP_NONE; + return write_henvcfgh(env, CSR_HENVCFGH, env->henvcfg >> 32, ra); } static RISCVException read_senvcfg(CPURISCVState *env, int csrno, @@ -3238,7 +3249,7 @@ static RISCVException read_senvcfg(CPURISCVState *env, int csrno, } static RISCVException write_senvcfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE; RISCVException ret; @@ -3295,7 +3306,7 @@ static RISCVException read_henvcfg(CPURISCVState *env, int csrno, } static RISCVException write_henvcfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE; RISCVException ret; @@ -3350,7 +3361,7 @@ static RISCVException read_henvcfgh(CPURISCVState *env, int csrno, } static RISCVException write_henvcfgh(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { uint64_t mask = env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE | HENVCFG_DTE); @@ -3388,7 +3399,7 @@ static RISCVException write_mstateen(CPURISCVState *env, int csrno, } static RISCVException write_mstateen0(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; if (!riscv_has_ext(env, RVF)) { @@ -3420,7 +3431,7 @@ static RISCVException write_mstateen0(CPURISCVState *env, int csrno, } static RISCVException write_mstateen_1_3(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { return write_mstateen(env, csrno, SMSTATEEN_STATEEN, new_val); } @@ -3447,7 +3458,7 @@ static RISCVException write_mstateenh(CPURISCVState *env, int csrno, } static RISCVException write_mstateen0h(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; @@ -3463,7 +3474,7 @@ static RISCVException write_mstateen0h(CPURISCVState *env, int csrno, } static RISCVException write_mstateenh_1_3(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { return write_mstateenh(env, csrno, SMSTATEEN_STATEEN, new_val); } @@ -3492,7 +3503,7 @@ static RISCVException write_hstateen(CPURISCVState *env, int csrno, } static RISCVException write_hstateen0(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; @@ -3521,7 +3532,7 @@ static RISCVException write_hstateen0(CPURISCVState *env, int csrno, } static RISCVException write_hstateen_1_3(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { return write_hstateen(env, csrno, SMSTATEEN_STATEEN, new_val); } @@ -3552,7 +3563,7 @@ static RISCVException write_hstateenh(CPURISCVState *env, int csrno, } static RISCVException write_hstateen0h(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; @@ -3564,7 +3575,7 @@ static RISCVException write_hstateen0h(CPURISCVState *env, int csrno, } static RISCVException write_hstateenh_1_3(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { return write_hstateenh(env, csrno, SMSTATEEN_STATEEN, new_val); } @@ -3603,7 +3614,7 @@ static RISCVException write_sstateen(CPURISCVState *env, int csrno, } static RISCVException write_sstateen0(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; @@ -3615,7 +3626,7 @@ static RISCVException write_sstateen0(CPURISCVState *env, int csrno, } static RISCVException write_sstateen_1_3(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { return write_sstateen(env, csrno, SMSTATEEN_STATEEN, new_val); } @@ -3866,7 +3877,7 @@ static RISCVException read_sstatus(CPURISCVState *env, int csrno, } static RISCVException write_sstatus(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { target_ulong mask = (sstatus_v1_10_mask); @@ -3883,7 +3894,7 @@ static RISCVException write_sstatus(CPURISCVState *env, int csrno, mask |= SSTATUS_SDT; } target_ulong newval = (env->mstatus & ~mask) | (val & mask); - return write_mstatus(env, CSR_MSTATUS, newval); + return write_mstatus(env, CSR_MSTATUS, newval, ra); } static RISCVException rmw_vsie64(CPURISCVState *env, int csrno, @@ -4035,7 +4046,7 @@ static RISCVException read_stvec(CPURISCVState *env, int csrno, } static RISCVException write_stvec(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */ if ((val & 3) < 2) { @@ -4054,7 +4065,7 @@ static RISCVException read_scounteren(CPURISCVState *env, int csrno, } static RISCVException write_scounteren(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { RISCVCPU *cpu = env_archcpu(env); @@ -4088,7 +4099,7 @@ static RISCVException read_sscratch(CPURISCVState *env, int csrno, } static RISCVException write_sscratch(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->sscratch = val; return RISCV_EXCP_NONE; @@ -4102,7 +4113,7 @@ static RISCVException read_sepc(CPURISCVState *env, int csrno, } static RISCVException write_sepc(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->sepc = val; return RISCV_EXCP_NONE; @@ -4116,7 +4127,7 @@ static RISCVException read_scause(CPURISCVState *env, int csrno, } static RISCVException write_scause(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->scause = val; return RISCV_EXCP_NONE; @@ -4130,7 +4141,7 @@ static RISCVException read_stval(CPURISCVState *env, int csrno, } static RISCVException write_stval(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->stval = val; return RISCV_EXCP_NONE; @@ -4270,7 +4281,7 @@ static RISCVException read_satp(CPURISCVState *env, int csrno, } static RISCVException write_satp(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { if (!riscv_cpu_cfg(env)->mmu) { return RISCV_EXCP_NONE; @@ -4492,7 +4503,7 @@ static RISCVException read_hstatus(CPURISCVState *env, int csrno, } static RISCVException write_hstatus(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { uint64_t mask = (target_ulong)-1; if (!env_archcpu(env)->cfg.ext_svukte) { @@ -4524,7 +4535,7 @@ static RISCVException read_hedeleg(CPURISCVState *env, int csrno, } static RISCVException write_hedeleg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->hedeleg = val & vs_delegable_excps; return RISCV_EXCP_NONE; @@ -4545,7 +4556,7 @@ static RISCVException read_hedelegh(CPURISCVState *env, int csrno, } static RISCVException write_hedelegh(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { RISCVException ret; ret = smstateen_acc_ok(env, 0, SMSTATEEN0_P1P13); @@ -4808,7 +4819,7 @@ static RISCVException read_hcounteren(CPURISCVState *env, int csrno, } static RISCVException write_hcounteren(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { RISCVCPU *cpu = env_archcpu(env); @@ -4828,7 +4839,7 @@ static RISCVException read_hgeie(CPURISCVState *env, int csrno, } static RISCVException write_hgeie(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { /* Only GEILEN:1 bits implemented and BIT0 is never implemented */ val &= ((((target_ulong)1) << env->geilen) - 1) << 1; @@ -4847,7 +4858,7 @@ static RISCVException read_htval(CPURISCVState *env, int csrno, } static RISCVException write_htval(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->htval = val; return RISCV_EXCP_NONE; @@ -4861,7 +4872,7 @@ static RISCVException read_htinst(CPURISCVState *env, int csrno, } static RISCVException write_htinst(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { return RISCV_EXCP_NONE; } @@ -4883,7 +4894,7 @@ static RISCVException read_hgatp(CPURISCVState *env, int csrno, } static RISCVException write_hgatp(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->hgatp = legalize_xatp(env, env->hgatp, val); return RISCV_EXCP_NONE; @@ -4901,7 +4912,7 @@ static RISCVException read_htimedelta(CPURISCVState *env, int csrno, } static RISCVException write_htimedelta(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { if (!env->rdtime_fn) { return RISCV_EXCP_ILLEGAL_INST; @@ -4933,7 +4944,7 @@ static RISCVException read_htimedeltah(CPURISCVState *env, int csrno, } static RISCVException write_htimedeltah(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { if (!env->rdtime_fn) { return RISCV_EXCP_ILLEGAL_INST; @@ -4957,7 +4968,7 @@ static RISCVException read_hvictl(CPURISCVState *env, int csrno, } static RISCVException write_hvictl(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->hvictl = val & HVICTL_VALID_MASK; return RISCV_EXCP_NONE; @@ -5022,7 +5033,7 @@ static RISCVException read_hviprio1(CPURISCVState *env, int csrno, } static RISCVException write_hviprio1(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { return write_hvipriox(env, 0, env->hviprio, val); } @@ -5034,7 +5045,7 @@ static RISCVException read_hviprio1h(CPURISCVState *env, int csrno, } static RISCVException write_hviprio1h(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { return write_hvipriox(env, 4, env->hviprio, val); } @@ -5046,7 +5057,7 @@ static RISCVException read_hviprio2(CPURISCVState *env, int csrno, } static RISCVException write_hviprio2(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { return write_hvipriox(env, 8, env->hviprio, val); } @@ -5058,7 +5069,7 @@ static RISCVException read_hviprio2h(CPURISCVState *env, int csrno, } static RISCVException write_hviprio2h(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { return write_hvipriox(env, 12, env->hviprio, val); } @@ -5072,7 +5083,7 @@ static RISCVException read_vsstatus(CPURISCVState *env, int csrno, } static RISCVException write_vsstatus(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { uint64_t mask = (target_ulong)-1; if ((val & VSSTATUS64_UXL) == 0) { @@ -5097,7 +5108,7 @@ static RISCVException read_vstvec(CPURISCVState *env, int csrno, } static RISCVException write_vstvec(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */ if ((val & 3) < 2) { @@ -5116,7 +5127,7 @@ static RISCVException read_vsscratch(CPURISCVState *env, int csrno, } static RISCVException write_vsscratch(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->vsscratch = val; return RISCV_EXCP_NONE; @@ -5130,7 +5141,7 @@ static RISCVException read_vsepc(CPURISCVState *env, int csrno, } static RISCVException write_vsepc(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->vsepc = val; return RISCV_EXCP_NONE; @@ -5144,7 +5155,7 @@ static RISCVException read_vscause(CPURISCVState *env, int csrno, } static RISCVException write_vscause(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->vscause = val; return RISCV_EXCP_NONE; @@ -5158,7 +5169,7 @@ static RISCVException read_vstval(CPURISCVState *env, int csrno, } static RISCVException write_vstval(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->vstval = val; return RISCV_EXCP_NONE; @@ -5172,7 +5183,7 @@ static RISCVException read_vsatp(CPURISCVState *env, int csrno, } static RISCVException write_vsatp(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->vsatp = legalize_xatp(env, env->vsatp, val); return RISCV_EXCP_NONE; @@ -5186,7 +5197,7 @@ static RISCVException read_mtval2(CPURISCVState *env, int csrno, } static RISCVException write_mtval2(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->mtval2 = val; return RISCV_EXCP_NONE; @@ -5200,7 +5211,7 @@ static RISCVException read_mtinst(CPURISCVState *env, int csrno, } static RISCVException write_mtinst(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->mtinst = val; return RISCV_EXCP_NONE; @@ -5215,7 +5226,7 @@ static RISCVException read_mseccfg(CPURISCVState *env, int csrno, } static RISCVException write_mseccfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { mseccfg_csr_write(env, val); return RISCV_EXCP_NONE; @@ -5231,7 +5242,7 @@ static RISCVException read_pmpcfg(CPURISCVState *env, int csrno, } static RISCVException write_pmpcfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { uint32_t reg_index = csrno - CSR_PMPCFG0; @@ -5247,7 +5258,7 @@ static RISCVException read_pmpaddr(CPURISCVState *env, int csrno, } static RISCVException write_pmpaddr(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val); return RISCV_EXCP_NONE; @@ -5261,7 +5272,7 @@ static RISCVException read_tselect(CPURISCVState *env, int csrno, } static RISCVException write_tselect(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { tselect_csr_write(env, val); return RISCV_EXCP_NONE; @@ -5285,7 +5296,7 @@ static RISCVException read_tdata(CPURISCVState *env, int csrno, } static RISCVException write_tdata(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { if (!tdata_available(env, csrno - CSR_TDATA1)) { return RISCV_EXCP_ILLEGAL_INST; @@ -5310,7 +5321,7 @@ static RISCVException read_mcontext(CPURISCVState *env, int csrno, } static RISCVException write_mcontext(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false; int32_t mask; @@ -5334,43 +5345,50 @@ static RISCVException read_mnscratch(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static int write_mnscratch(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mnscratch(CPURISCVState *env, int csrno, + target_ulong val, uintptr_t ra) { env->mnscratch = val; return RISCV_EXCP_NONE; } -static int read_mnepc(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_mnepc(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->mnepc; return RISCV_EXCP_NONE; } -static int write_mnepc(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mnepc(CPURISCVState *env, int csrno, + target_ulong val, uintptr_t ra) { env->mnepc = val; return RISCV_EXCP_NONE; } -static int read_mncause(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_mncause(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->mncause; return RISCV_EXCP_NONE; } -static int write_mncause(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mncause(CPURISCVState *env, int csrno, + target_ulong val, uintptr_t ra) { env->mncause = val; return RISCV_EXCP_NONE; } -static int read_mnstatus(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_mnstatus(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->mnstatus; return RISCV_EXCP_NONE; } -static int write_mnstatus(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mnstatus(CPURISCVState *env, int csrno, + target_ulong val, uintptr_t ra) { target_ulong mask = (MNSTATUS_NMIE | MNSTATUS_MNPP); @@ -5510,7 +5528,8 @@ static inline RISCVException riscv_csrrw_check(CPURISCVState *env, static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno, target_ulong *ret_value, target_ulong new_value, - target_ulong write_mask) + target_ulong write_mask, + uintptr_t ra) { RISCVException ret; target_ulong old_value = 0; @@ -5540,7 +5559,7 @@ static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno, if (write_mask) { new_value = (old_value & ~write_mask) | (new_value & write_mask); if (csr_ops[csrno].write) { - ret = csr_ops[csrno].write(env, csrno, new_value); + ret = csr_ops[csrno].write(env, csrno, new_value, ra); if (ret != RISCV_EXCP_NONE) { return ret; } @@ -5563,25 +5582,25 @@ RISCVException riscv_csrr(CPURISCVState *env, int csrno, return ret; } - return riscv_csrrw_do64(env, csrno, ret_value, 0, 0); + return riscv_csrrw_do64(env, csrno, ret_value, 0, 0, 0); } RISCVException riscv_csrrw(CPURISCVState *env, int csrno, - target_ulong *ret_value, - target_ulong new_value, target_ulong write_mask) + target_ulong *ret_value, target_ulong new_value, + target_ulong write_mask, uintptr_t ra) { RISCVException ret = riscv_csrrw_check(env, csrno, true); if (ret != RISCV_EXCP_NONE) { return ret; } - return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask); + return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask, ra); } static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno, Int128 *ret_value, Int128 new_value, - Int128 write_mask) + Int128 write_mask, uintptr_t ra) { RISCVException ret; Int128 old_value; @@ -5603,7 +5622,7 @@ static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno, } } else if (csr_ops[csrno].write) { /* avoids having to write wrappers for all registers */ - ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value)); + ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value), ra); if (ret != RISCV_EXCP_NONE) { return ret; } @@ -5630,7 +5649,7 @@ RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno, if (csr_ops[csrno].read128) { return riscv_csrrw_do128(env, csrno, ret_value, - int128_zero(), int128_zero()); + int128_zero(), int128_zero(), 0); } /* @@ -5641,9 +5660,7 @@ RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno, * accesses */ target_ulong old_value; - ret = riscv_csrrw_do64(env, csrno, &old_value, - (target_ulong)0, - (target_ulong)0); + ret = riscv_csrrw_do64(env, csrno, &old_value, 0, 0, 0); if (ret == RISCV_EXCP_NONE && ret_value) { *ret_value = int128_make64(old_value); } @@ -5651,8 +5668,8 @@ RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno, } RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno, - Int128 *ret_value, - Int128 new_value, Int128 write_mask) + Int128 *ret_value, Int128 new_value, + Int128 write_mask, uintptr_t ra) { RISCVException ret; @@ -5662,7 +5679,8 @@ RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno, } if (csr_ops[csrno].read128) { - return riscv_csrrw_do128(env, csrno, ret_value, new_value, write_mask); + return riscv_csrrw_do128(env, csrno, ret_value, + new_value, write_mask, ra); } /* @@ -5675,7 +5693,7 @@ RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno, target_ulong old_value; ret = riscv_csrrw_do64(env, csrno, &old_value, int128_getlo(new_value), - int128_getlo(write_mask)); + int128_getlo(write_mask), ra); if (ret == RISCV_EXCP_NONE && ret_value) { *ret_value = int128_make64(old_value); } @@ -5698,7 +5716,7 @@ RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno, if (!write_mask) { ret = riscv_csrr(env, csrno, ret_value); } else { - ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask); + ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask, 0); } #if !defined(CONFIG_USER_ONLY) env->debugger = false; @@ -5714,7 +5732,7 @@ static RISCVException read_jvt(CPURISCVState *env, int csrno, } static RISCVException write_jvt(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->jvt = val; return RISCV_EXCP_NONE; diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode index 6d1a13c826..cd23b1f3a9 100644 --- a/target/riscv/insn32.decode +++ b/target/riscv/insn32.decode @@ -703,14 +703,14 @@ vfredmax_vs 000111 . ..... ..... 001 ..... 1010111 @r_vm # Vector widening ordered and unordered float reduction sum vfwredusum_vs 110001 . ..... ..... 001 ..... 1010111 @r_vm vfwredosum_vs 110011 . ..... ..... 001 ..... 1010111 @r_vm -vmand_mm 011001 - ..... ..... 010 ..... 1010111 @r -vmnand_mm 011101 - ..... ..... 010 ..... 1010111 @r -vmandn_mm 011000 - ..... ..... 010 ..... 1010111 @r -vmxor_mm 011011 - ..... ..... 010 ..... 1010111 @r -vmor_mm 011010 - ..... ..... 010 ..... 1010111 @r -vmnor_mm 011110 - ..... ..... 010 ..... 1010111 @r -vmorn_mm 011100 - ..... ..... 010 ..... 1010111 @r -vmxnor_mm 011111 - ..... ..... 010 ..... 1010111 @r +vmand_mm 011001 1 ..... ..... 010 ..... 1010111 @r +vmnand_mm 011101 1 ..... ..... 010 ..... 1010111 @r +vmandn_mm 011000 1 ..... ..... 010 ..... 1010111 @r +vmxor_mm 011011 1 ..... ..... 010 ..... 1010111 @r +vmor_mm 011010 1 ..... ..... 010 ..... 1010111 @r +vmnor_mm 011110 1 ..... ..... 010 ..... 1010111 @r +vmorn_mm 011100 1 ..... ..... 010 ..... 1010111 @r +vmxnor_mm 011111 1 ..... ..... 010 ..... 1010111 @r vcpop_m 010000 . ..... 10000 010 ..... 1010111 @r2_vm vfirst_m 010000 . ..... 10001 010 ..... 1010111 @r2_vm vmsbf_m 010100 . ..... 00001 010 ..... 1010111 @r2_vm @@ -732,7 +732,7 @@ vrgather_vv 001100 . ..... ..... 000 ..... 1010111 @r_vm vrgatherei16_vv 001110 . ..... ..... 000 ..... 1010111 @r_vm vrgather_vx 001100 . ..... ..... 100 ..... 1010111 @r_vm vrgather_vi 001100 . ..... ..... 011 ..... 1010111 @r_vm -vcompress_vm 010111 - ..... ..... 010 ..... 1010111 @r +vcompress_vm 010111 1 ..... ..... 010 ..... 1010111 @r vmv1r_v 100111 1 ..... 00000 011 ..... 1010111 @r2rd vmv2r_v 100111 1 ..... 00001 011 ..... 1010111 @r2rd vmv4r_v 100111 1 ..... 00011 011 ..... 1010111 @r2rd diff --git a/target/riscv/insn_trans/trans_rvbf16.c.inc b/target/riscv/insn_trans/trans_rvbf16.c.inc index 0a9cd1ec31..066dc364c5 100644 --- a/target/riscv/insn_trans/trans_rvbf16.c.inc +++ b/target/riscv/insn_trans/trans_rvbf16.c.inc @@ -119,8 +119,11 @@ static bool trans_vfwmaccbf16_vv(DisasContext *ctx, arg_vfwmaccbf16_vv *a) REQUIRE_FPU; REQUIRE_ZVFBFWMA(ctx); + uint8_t sew = ctx->sew; if (require_rvv(ctx) && vext_check_isa_ill(ctx) && (ctx->sew == MO_16) && - vext_check_dss(ctx, a->rd, a->rs1, a->rs2, a->vm)) { + vext_check_dss(ctx, a->rd, a->rs1, a->rs2, a->vm) && + vext_check_input_eew(ctx, a->rd, sew + 1, a->rs1, sew, a->vm) && + vext_check_input_eew(ctx, a->rd, sew + 1, a->rs2, sew, a->vm)) { uint32_t data = 0; gen_set_rm_chkfrm(ctx, RISCV_FRM_DYN); @@ -146,8 +149,10 @@ static bool trans_vfwmaccbf16_vf(DisasContext *ctx, arg_vfwmaccbf16_vf *a) REQUIRE_FPU; REQUIRE_ZVFBFWMA(ctx); + uint8_t sew = ctx->sew; if (require_rvv(ctx) && (ctx->sew == MO_16) && vext_check_isa_ill(ctx) && - vext_check_ds(ctx, a->rd, a->rs2, a->vm)) { + vext_check_ds(ctx, a->rd, a->rs2, a->vm) && + vext_check_input_eew(ctx, a->rd, sew + 1, a->rs2, sew, a->vm)) { uint32_t data = 0; gen_set_rm(ctx, RISCV_FRM_DYN); diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc index b9883a5d32..2b6077ac06 100644 --- a/target/riscv/insn_trans/trans_rvv.c.inc +++ b/target/riscv/insn_trans/trans_rvv.c.inc @@ -100,10 +100,33 @@ static bool require_scale_rvfmin(DisasContext *s) } } -/* Destination vector register group cannot overlap source mask register. */ -static bool require_vm(int vm, int vd) +/* + * Source and destination vector register groups cannot overlap source mask + * register: + * + * A vector register cannot be used to provide source operands with more than + * one EEW for a single instruction. A mask register source is considered to + * have EEW=1 for this constraint. An encoding that would result in the same + * vector register being read with two or more different EEWs, including when + * the vector register appears at different positions within two or more vector + * register groups, is reserved. + * (Section 5.2) + * + * A destination vector register group can overlap a source vector + * register group only if one of the following holds: + * 1. The destination EEW equals the source EEW. + * 2. The destination EEW is smaller than the source EEW and the overlap + * is in the lowest-numbered part of the source register group. + * 3. The destination EEW is greater than the source EEW, the source EMUL + * is at least 1, and the overlap is in the highest-numbered part of + * the destination register group. + * For the purpose of determining register group overlap constraints, mask + * elements have EEW=1. + * (Section 5.2) + */ +static bool require_vm(int vm, int v) { - return (vm != 0 || vd != 0); + return (vm != 0 || v != 0); } static bool require_nf(int vd, int nf, int lmul) @@ -356,11 +379,41 @@ static bool vext_check_ld_index(DisasContext *s, int vd, int vs2, return ret; } +/* + * Check whether a vector register is used to provide source operands with + * more than one EEW for the vector instruction. + * Returns true if the instruction has valid encoding + * Returns false if encoding violates the mismatched input EEWs constraint + */ +static bool vext_check_input_eew(DisasContext *s, int vs1, uint8_t eew_vs1, + int vs2, uint8_t eew_vs2, int vm) +{ + bool is_valid = true; + int8_t emul_vs1 = eew_vs1 - s->sew + s->lmul; + int8_t emul_vs2 = eew_vs2 - s->sew + s->lmul; + + /* When vm is 0, vs1 & vs2(EEW!=1) group can't overlap v0 (EEW=1) */ + if ((vs1 != -1 && !require_vm(vm, vs1)) || + (vs2 != -1 && !require_vm(vm, vs2))) { + is_valid = false; + } + + /* When eew_vs1 != eew_vs2, check whether vs1 and vs2 are overlapped */ + if ((vs1 != -1 && vs2 != -1) && (eew_vs1 != eew_vs2) && + is_overlapped(vs1, 1 << MAX(emul_vs1, 0), + vs2, 1 << MAX(emul_vs2, 0))) { + is_valid = false; + } + + return is_valid; +} + static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm) { return require_vm(vm, vd) && require_align(vd, s->lmul) && - require_align(vs, s->lmul); + require_align(vs, s->lmul) && + vext_check_input_eew(s, vs, s->sew, -1, s->sew, vm); } /* @@ -379,6 +432,7 @@ static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm) static bool vext_check_sss(DisasContext *s, int vd, int vs1, int vs2, int vm) { return vext_check_ss(s, vd, vs2, vm) && + vext_check_input_eew(s, vs1, s->sew, vs2, s->sew, vm) && require_align(vs1, s->lmul); } @@ -474,6 +528,7 @@ static bool vext_narrow_check_common(DisasContext *s, int vd, int vs2, static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm) { return vext_wide_check_common(s, vd, vm) && + vext_check_input_eew(s, vs, s->sew, -1, 0, vm) && require_align(vs, s->lmul) && require_noover(vd, s->lmul + 1, vs, s->lmul); } @@ -481,6 +536,7 @@ static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm) static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm) { return vext_wide_check_common(s, vd, vm) && + vext_check_input_eew(s, vs, s->sew + 1, -1, 0, vm) && require_align(vs, s->lmul + 1); } @@ -499,6 +555,7 @@ static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm) static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm) { return vext_check_ds(s, vd, vs2, vm) && + vext_check_input_eew(s, vs1, s->sew, vs2, s->sew, vm) && require_align(vs1, s->lmul) && require_noover(vd, s->lmul + 1, vs1, s->lmul); } @@ -521,12 +578,14 @@ static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm) static bool vext_check_dds(DisasContext *s, int vd, int vs1, int vs2, int vm) { return vext_check_ds(s, vd, vs1, vm) && + vext_check_input_eew(s, vs1, s->sew, vs2, s->sew + 1, vm) && require_align(vs2, s->lmul + 1); } static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm) { - bool ret = vext_narrow_check_common(s, vd, vs, vm); + bool ret = vext_narrow_check_common(s, vd, vs, vm) && + vext_check_input_eew(s, vs, s->sew + 1, -1, 0, vm); if (vd != vs) { ret &= require_noover(vd, s->lmul, vs, s->lmul + 1); } @@ -549,6 +608,7 @@ static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm) static bool vext_check_sds(DisasContext *s, int vd, int vs1, int vs2, int vm) { return vext_check_sd(s, vd, vs2, vm) && + vext_check_input_eew(s, vs1, s->sew, vs2, s->sew + 1, vm) && require_align(vs1, s->lmul); } @@ -584,7 +644,9 @@ static bool vext_check_slide(DisasContext *s, int vd, int vs2, { bool ret = require_align(vs2, s->lmul) && require_align(vd, s->lmul) && - require_vm(vm, vd); + require_vm(vm, vd) && + vext_check_input_eew(s, -1, 0, vs2, s->sew, vm); + if (is_over) { ret &= (vd != vs2); } @@ -802,32 +864,286 @@ GEN_VEXT_TRANS(vlm_v, MO_8, vlm_v, ld_us_mask_op, ld_us_mask_check) GEN_VEXT_TRANS(vsm_v, MO_8, vsm_v, st_us_mask_op, st_us_mask_check) /* - *** stride load and store + * MAXSZ returns the maximum vector size can be operated in bytes, + * which is used in GVEC IR when vl_eq_vlmax flag is set to true + * to accelerate vector operation. + */ +static inline uint32_t MAXSZ(DisasContext *s) +{ + int max_sz = s->cfg_ptr->vlenb << 3; + return max_sz >> (3 - s->lmul); +} + +static inline uint32_t get_log2(uint32_t a) +{ + uint32_t i = 0; + for (; a > 0;) { + a >>= 1; + i++; + } + return i; +} + +typedef void gen_tl_ldst(TCGv, TCGv_ptr, tcg_target_long); + +/* + * Simulate the strided load/store main loop: + * + * for (i = env->vstart; i < env->vl; env->vstart = ++i) { + * k = 0; + * while (k < nf) { + * if (!vm && !vext_elem_mask(v0, i)) { + * vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz, + * (i + k * max_elems + 1) * esz); + * k++; + * continue; + * } + * target_ulong addr = base + stride * i + (k << log2_esz); + * ldst(env, adjust_addr(env, addr), i + k * max_elems, vd, ra); + * k++; + * } + * } */ -typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv, - TCGv, TCGv_env, TCGv_i32); +static void gen_ldst_stride_main_loop(DisasContext *s, TCGv dest, uint32_t rs1, + uint32_t rs2, uint32_t vm, uint32_t nf, + gen_tl_ldst *ld_fn, gen_tl_ldst *st_fn, + bool is_load) +{ + TCGv addr = tcg_temp_new(); + TCGv base = get_gpr(s, rs1, EXT_NONE); + TCGv stride = get_gpr(s, rs2, EXT_NONE); + + TCGv i = tcg_temp_new(); + TCGv i_esz = tcg_temp_new(); + TCGv k = tcg_temp_new(); + TCGv k_esz = tcg_temp_new(); + TCGv k_max = tcg_temp_new(); + TCGv mask = tcg_temp_new(); + TCGv mask_offs = tcg_temp_new(); + TCGv mask_offs_64 = tcg_temp_new(); + TCGv mask_elem = tcg_temp_new(); + TCGv mask_offs_rem = tcg_temp_new(); + TCGv vreg = tcg_temp_new(); + TCGv dest_offs = tcg_temp_new(); + TCGv stride_offs = tcg_temp_new(); + + uint32_t max_elems = MAXSZ(s) >> s->sew; + + TCGLabel *start = gen_new_label(); + TCGLabel *end = gen_new_label(); + TCGLabel *start_k = gen_new_label(); + TCGLabel *inc_k = gen_new_label(); + TCGLabel *end_k = gen_new_label(); + + MemOp atomicity = MO_ATOM_NONE; + if (s->sew == 0) { + atomicity = MO_ATOM_NONE; + } else { + atomicity = MO_ATOM_IFALIGN_PAIR; + } + + mark_vs_dirty(s); + + tcg_gen_addi_tl(mask, (TCGv)tcg_env, vreg_ofs(s, 0)); + + /* Start of outer loop. */ + tcg_gen_mov_tl(i, cpu_vstart); + gen_set_label(start); + tcg_gen_brcond_tl(TCG_COND_GE, i, cpu_vl, end); + tcg_gen_shli_tl(i_esz, i, s->sew); + /* Start of inner loop. */ + tcg_gen_movi_tl(k, 0); + gen_set_label(start_k); + tcg_gen_brcond_tl(TCG_COND_GE, k, tcg_constant_tl(nf), end_k); + /* + * If we are in mask agnostic regime and the operation is not unmasked we + * set the inactive elements to 1. + */ + if (!vm && s->vma) { + TCGLabel *active_element = gen_new_label(); + /* (i + k * max_elems) * esz */ + tcg_gen_shli_tl(mask_offs, k, get_log2(max_elems << s->sew)); + tcg_gen_add_tl(mask_offs, mask_offs, i_esz); + + /* + * Check whether the i bit of the mask is 0 or 1. + * + * static inline int vext_elem_mask(void *v0, int index) + * { + * int idx = index / 64; + * int pos = index % 64; + * return (((uint64_t *)v0)[idx] >> pos) & 1; + * } + */ + tcg_gen_shri_tl(mask_offs_64, mask_offs, 3); + tcg_gen_add_tl(mask_offs_64, mask_offs_64, mask); + tcg_gen_ld_i64((TCGv_i64)mask_elem, (TCGv_ptr)mask_offs_64, 0); + tcg_gen_rem_tl(mask_offs_rem, mask_offs, tcg_constant_tl(8)); + tcg_gen_shr_tl(mask_elem, mask_elem, mask_offs_rem); + tcg_gen_andi_tl(mask_elem, mask_elem, 1); + tcg_gen_brcond_tl(TCG_COND_NE, mask_elem, tcg_constant_tl(0), + active_element); + /* + * Set masked-off elements in the destination vector register to 1s. + * Store instructions simply skip this bit as memory ops access memory + * only for active elements. + */ + if (is_load) { + tcg_gen_shli_tl(mask_offs, mask_offs, s->sew); + tcg_gen_add_tl(mask_offs, mask_offs, dest); + st_fn(tcg_constant_tl(-1), (TCGv_ptr)mask_offs, 0); + } + tcg_gen_br(inc_k); + gen_set_label(active_element); + } + /* + * The element is active, calculate the address with stride: + * target_ulong addr = base + stride * i + (k << log2_esz); + */ + tcg_gen_mul_tl(stride_offs, stride, i); + tcg_gen_shli_tl(k_esz, k, s->sew); + tcg_gen_add_tl(stride_offs, stride_offs, k_esz); + tcg_gen_add_tl(addr, base, stride_offs); + /* Calculate the offset in the dst/src vector register. */ + tcg_gen_shli_tl(k_max, k, get_log2(max_elems)); + tcg_gen_add_tl(dest_offs, i, k_max); + tcg_gen_shli_tl(dest_offs, dest_offs, s->sew); + tcg_gen_add_tl(dest_offs, dest_offs, dest); + if (is_load) { + tcg_gen_qemu_ld_tl(vreg, addr, s->mem_idx, MO_LE | s->sew | atomicity); + st_fn((TCGv)vreg, (TCGv_ptr)dest_offs, 0); + } else { + ld_fn((TCGv)vreg, (TCGv_ptr)dest_offs, 0); + tcg_gen_qemu_st_tl(vreg, addr, s->mem_idx, MO_LE | s->sew | atomicity); + } + /* + * We don't execute the load/store above if the element was inactive. + * We jump instead directly to incrementing k and continuing the loop. + */ + if (!vm && s->vma) { + gen_set_label(inc_k); + } + tcg_gen_addi_tl(k, k, 1); + tcg_gen_br(start_k); + /* End of the inner loop. */ + gen_set_label(end_k); + + tcg_gen_addi_tl(i, i, 1); + tcg_gen_mov_tl(cpu_vstart, i); + tcg_gen_br(start); + + /* End of the outer loop. */ + gen_set_label(end); + + return; +} + + +/* + * Set the tail bytes of the strided loads/stores to 1: + * + * for (k = 0; k < nf; ++k) { + * cnt = (k * max_elems + vl) * esz; + * tot = (k * max_elems + max_elems) * esz; + * for (i = cnt; i < tot; i += esz) { + * store_1s(-1, vd[vl+i]); + * } + * } + */ +static void gen_ldst_stride_tail_loop(DisasContext *s, TCGv dest, uint32_t nf, + gen_tl_ldst *st_fn) +{ + TCGv i = tcg_temp_new(); + TCGv k = tcg_temp_new(); + TCGv tail_cnt = tcg_temp_new(); + TCGv tail_tot = tcg_temp_new(); + TCGv tail_addr = tcg_temp_new(); + + TCGLabel *start = gen_new_label(); + TCGLabel *end = gen_new_label(); + TCGLabel *start_i = gen_new_label(); + TCGLabel *end_i = gen_new_label(); + + uint32_t max_elems_b = MAXSZ(s); + uint32_t esz = 1 << s->sew; + + /* Start of the outer loop. */ + tcg_gen_movi_tl(k, 0); + tcg_gen_shli_tl(tail_cnt, cpu_vl, s->sew); + tcg_gen_movi_tl(tail_tot, max_elems_b); + tcg_gen_add_tl(tail_addr, dest, tail_cnt); + gen_set_label(start); + tcg_gen_brcond_tl(TCG_COND_GE, k, tcg_constant_tl(nf), end); + /* Start of the inner loop. */ + tcg_gen_mov_tl(i, tail_cnt); + gen_set_label(start_i); + tcg_gen_brcond_tl(TCG_COND_GE, i, tail_tot, end_i); + /* store_1s(-1, vd[vl+i]); */ + st_fn(tcg_constant_tl(-1), (TCGv_ptr)tail_addr, 0); + tcg_gen_addi_tl(tail_addr, tail_addr, esz); + tcg_gen_addi_tl(i, i, esz); + tcg_gen_br(start_i); + /* End of the inner loop. */ + gen_set_label(end_i); + /* Update the counts */ + tcg_gen_addi_tl(tail_cnt, tail_cnt, max_elems_b); + tcg_gen_addi_tl(tail_tot, tail_cnt, max_elems_b); + tcg_gen_addi_tl(k, k, 1); + tcg_gen_br(start); + /* End of the outer loop. */ + gen_set_label(end); + + return; +} static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2, - uint32_t data, gen_helper_ldst_stride *fn, - DisasContext *s) + uint32_t data, DisasContext *s, bool is_load) { - TCGv_ptr dest, mask; - TCGv base, stride; - TCGv_i32 desc; + if (!s->vstart_eq_zero) { + return false; + } - dest = tcg_temp_new_ptr(); - mask = tcg_temp_new_ptr(); - base = get_gpr(s, rs1, EXT_NONE); - stride = get_gpr(s, rs2, EXT_NONE); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, - s->cfg_ptr->vlenb, data)); + TCGv dest = tcg_temp_new(); - tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); - tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0)); + uint32_t nf = FIELD_EX32(data, VDATA, NF); + uint32_t vm = FIELD_EX32(data, VDATA, VM); + + /* Destination register and mask register */ + tcg_gen_addi_tl(dest, (TCGv)tcg_env, vreg_ofs(s, vd)); + + /* + * Select the appropriate load/tore to retrieve data from the vector + * register given a specific sew. + */ + static gen_tl_ldst * const ld_fns[4] = { + tcg_gen_ld8u_tl, tcg_gen_ld16u_tl, + tcg_gen_ld32u_tl, tcg_gen_ld_tl + }; + + static gen_tl_ldst * const st_fns[4] = { + tcg_gen_st8_tl, tcg_gen_st16_tl, + tcg_gen_st32_tl, tcg_gen_st_tl + }; + + gen_tl_ldst *ld_fn = ld_fns[s->sew]; + gen_tl_ldst *st_fn = st_fns[s->sew]; + + if (ld_fn == NULL || st_fn == NULL) { + return false; + } mark_vs_dirty(s); - fn(dest, mask, base, stride, tcg_env, desc); + gen_ldst_stride_main_loop(s, dest, rs1, rs2, vm, nf, ld_fn, st_fn, is_load); + + tcg_gen_movi_tl(cpu_vstart, 0); + + /* + * Set the tail bytes to 1 if tail agnostic: + */ + if (s->vta != 0 && is_load) { + gen_ldst_stride_tail_loop(s, dest, nf, st_fn); + } finalize_rvv_inst(s); return true; @@ -836,16 +1152,6 @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2, static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew) { uint32_t data = 0; - gen_helper_ldst_stride *fn; - static gen_helper_ldst_stride * const fns[4] = { - gen_helper_vlse8_v, gen_helper_vlse16_v, - gen_helper_vlse32_v, gen_helper_vlse64_v - }; - - fn = fns[eew]; - if (fn == NULL) { - return false; - } uint8_t emul = vext_get_emul(s, eew); data = FIELD_DP32(data, VDATA, VM, a->vm); @@ -853,7 +1159,7 @@ static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew) data = FIELD_DP32(data, VDATA, NF, a->nf); data = FIELD_DP32(data, VDATA, VTA, s->vta); data = FIELD_DP32(data, VDATA, VMA, s->vma); - return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s); + return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, s, true); } static bool ld_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) @@ -871,23 +1177,13 @@ GEN_VEXT_TRANS(vlse64_v, MO_64, rnfvm, ld_stride_op, ld_stride_check) static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew) { uint32_t data = 0; - gen_helper_ldst_stride *fn; - static gen_helper_ldst_stride * const fns[4] = { - /* masked stride store */ - gen_helper_vsse8_v, gen_helper_vsse16_v, - gen_helper_vsse32_v, gen_helper_vsse64_v - }; uint8_t emul = vext_get_emul(s, eew); data = FIELD_DP32(data, VDATA, VM, a->vm); data = FIELD_DP32(data, VDATA, LMUL, emul); data = FIELD_DP32(data, VDATA, NF, a->nf); - fn = fns[eew]; - if (fn == NULL) { - return false; - } - return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s); + return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, s, false); } static bool st_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) @@ -981,7 +1277,8 @@ static bool ld_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) { return require_rvv(s) && vext_check_isa_ill(s) && - vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew); + vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew) && + vext_check_input_eew(s, -1, 0, a->rs2, eew, a->vm); } GEN_VEXT_TRANS(vlxei8_v, MO_8, rnfvm, ld_index_op, ld_index_check) @@ -1033,7 +1330,8 @@ static bool st_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) { return require_rvv(s) && vext_check_isa_ill(s) && - vext_check_st_index(s, a->rd, a->rs2, a->nf, eew); + vext_check_st_index(s, a->rd, a->rs2, a->nf, eew) && + vext_check_input_eew(s, a->rd, s->sew, a->rs2, eew, a->vm); } GEN_VEXT_TRANS(vsxei8_v, MO_8, rnfvm, st_index_op, st_index_check) @@ -1100,25 +1398,86 @@ GEN_VEXT_TRANS(vle64ff_v, MO_64, r2nfvm, ldff_op, ld_us_check) typedef void gen_helper_ldst_whole(TCGv_ptr, TCGv, TCGv_env, TCGv_i32); static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf, - gen_helper_ldst_whole *fn, - DisasContext *s) + uint32_t log2_esz, gen_helper_ldst_whole *fn, + DisasContext *s, bool is_load) { - TCGv_ptr dest; - TCGv base; - TCGv_i32 desc; - - uint32_t data = FIELD_DP32(0, VDATA, NF, nf); - data = FIELD_DP32(data, VDATA, VM, 1); - dest = tcg_temp_new_ptr(); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, - s->cfg_ptr->vlenb, data)); - - base = get_gpr(s, rs1, EXT_NONE); - tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); - mark_vs_dirty(s); - fn(dest, base, tcg_env, desc); + /* + * Load/store multiple bytes per iteration. + * When possible do this atomically. + * Update vstart with the number of processed elements. + * Use the helper function if either: + * - vstart is not 0. + * - the target has 32 bit registers and we are loading/storing 64 bit long + * elements. This is to ensure that we process every element with a single + * memory instruction. + */ + + bool use_helper_fn = !(s->vstart_eq_zero) || + (TCG_TARGET_REG_BITS == 32 && log2_esz == 3); + + if (!use_helper_fn) { + TCGv addr = tcg_temp_new(); + uint32_t size = s->cfg_ptr->vlenb * nf; + TCGv_i64 t8 = tcg_temp_new_i64(); + TCGv_i32 t4 = tcg_temp_new_i32(); + MemOp atomicity = MO_ATOM_NONE; + if (log2_esz == 0) { + atomicity = MO_ATOM_NONE; + } else { + atomicity = MO_ATOM_IFALIGN_PAIR; + } + if (TCG_TARGET_REG_BITS == 64) { + for (int i = 0; i < size; i += 8) { + addr = get_address(s, rs1, i); + if (is_load) { + tcg_gen_qemu_ld_i64(t8, addr, s->mem_idx, + MO_LE | MO_64 | atomicity); + tcg_gen_st_i64(t8, tcg_env, vreg_ofs(s, vd) + i); + } else { + tcg_gen_ld_i64(t8, tcg_env, vreg_ofs(s, vd) + i); + tcg_gen_qemu_st_i64(t8, addr, s->mem_idx, + MO_LE | MO_64 | atomicity); + } + if (i == size - 8) { + tcg_gen_movi_tl(cpu_vstart, 0); + } else { + tcg_gen_addi_tl(cpu_vstart, cpu_vstart, 8 >> log2_esz); + } + } + } else { + for (int i = 0; i < size; i += 4) { + addr = get_address(s, rs1, i); + if (is_load) { + tcg_gen_qemu_ld_i32(t4, addr, s->mem_idx, + MO_LE | MO_32 | atomicity); + tcg_gen_st_i32(t4, tcg_env, vreg_ofs(s, vd) + i); + } else { + tcg_gen_ld_i32(t4, tcg_env, vreg_ofs(s, vd) + i); + tcg_gen_qemu_st_i32(t4, addr, s->mem_idx, + MO_LE | MO_32 | atomicity); + } + if (i == size - 4) { + tcg_gen_movi_tl(cpu_vstart, 0); + } else { + tcg_gen_addi_tl(cpu_vstart, cpu_vstart, 4 >> log2_esz); + } + } + } + } else { + TCGv_ptr dest; + TCGv base; + TCGv_i32 desc; + uint32_t data = FIELD_DP32(0, VDATA, NF, nf); + data = FIELD_DP32(data, VDATA, VM, 1); + dest = tcg_temp_new_ptr(); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); + base = get_gpr(s, rs1, EXT_NONE); + tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); + fn(dest, base, tcg_env, desc); + } finalize_rvv_inst(s); return true; @@ -1128,58 +1487,47 @@ static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf, * load and store whole register instructions ignore vtype and vl setting. * Thus, we don't need to check vill bit. (Section 7.9) */ -#define GEN_LDST_WHOLE_TRANS(NAME, ARG_NF) \ -static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \ -{ \ - if (require_rvv(s) && \ - QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \ - return ldst_whole_trans(a->rd, a->rs1, ARG_NF, \ - gen_helper_##NAME, s); \ - } \ - return false; \ -} - -GEN_LDST_WHOLE_TRANS(vl1re8_v, 1) -GEN_LDST_WHOLE_TRANS(vl1re16_v, 1) -GEN_LDST_WHOLE_TRANS(vl1re32_v, 1) -GEN_LDST_WHOLE_TRANS(vl1re64_v, 1) -GEN_LDST_WHOLE_TRANS(vl2re8_v, 2) -GEN_LDST_WHOLE_TRANS(vl2re16_v, 2) -GEN_LDST_WHOLE_TRANS(vl2re32_v, 2) -GEN_LDST_WHOLE_TRANS(vl2re64_v, 2) -GEN_LDST_WHOLE_TRANS(vl4re8_v, 4) -GEN_LDST_WHOLE_TRANS(vl4re16_v, 4) -GEN_LDST_WHOLE_TRANS(vl4re32_v, 4) -GEN_LDST_WHOLE_TRANS(vl4re64_v, 4) -GEN_LDST_WHOLE_TRANS(vl8re8_v, 8) -GEN_LDST_WHOLE_TRANS(vl8re16_v, 8) -GEN_LDST_WHOLE_TRANS(vl8re32_v, 8) -GEN_LDST_WHOLE_TRANS(vl8re64_v, 8) +#define GEN_LDST_WHOLE_TRANS(NAME, ETYPE, ARG_NF, IS_LOAD) \ +static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \ +{ \ + if (require_rvv(s) && \ + QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \ + return ldst_whole_trans(a->rd, a->rs1, ARG_NF, ctzl(sizeof(ETYPE)), \ + gen_helper_##NAME, s, IS_LOAD); \ + } \ + return false; \ +} + +GEN_LDST_WHOLE_TRANS(vl1re8_v, int8_t, 1, true) +GEN_LDST_WHOLE_TRANS(vl1re16_v, int16_t, 1, true) +GEN_LDST_WHOLE_TRANS(vl1re32_v, int32_t, 1, true) +GEN_LDST_WHOLE_TRANS(vl1re64_v, int64_t, 1, true) +GEN_LDST_WHOLE_TRANS(vl2re8_v, int8_t, 2, true) +GEN_LDST_WHOLE_TRANS(vl2re16_v, int16_t, 2, true) +GEN_LDST_WHOLE_TRANS(vl2re32_v, int32_t, 2, true) +GEN_LDST_WHOLE_TRANS(vl2re64_v, int64_t, 2, true) +GEN_LDST_WHOLE_TRANS(vl4re8_v, int8_t, 4, true) +GEN_LDST_WHOLE_TRANS(vl4re16_v, int16_t, 4, true) +GEN_LDST_WHOLE_TRANS(vl4re32_v, int32_t, 4, true) +GEN_LDST_WHOLE_TRANS(vl4re64_v, int64_t, 4, true) +GEN_LDST_WHOLE_TRANS(vl8re8_v, int8_t, 8, true) +GEN_LDST_WHOLE_TRANS(vl8re16_v, int16_t, 8, true) +GEN_LDST_WHOLE_TRANS(vl8re32_v, int32_t, 8, true) +GEN_LDST_WHOLE_TRANS(vl8re64_v, int64_t, 8, true) /* * The vector whole register store instructions are encoded similar to * unmasked unit-stride store of elements with EEW=8. */ -GEN_LDST_WHOLE_TRANS(vs1r_v, 1) -GEN_LDST_WHOLE_TRANS(vs2r_v, 2) -GEN_LDST_WHOLE_TRANS(vs4r_v, 4) -GEN_LDST_WHOLE_TRANS(vs8r_v, 8) +GEN_LDST_WHOLE_TRANS(vs1r_v, int8_t, 1, false) +GEN_LDST_WHOLE_TRANS(vs2r_v, int8_t, 2, false) +GEN_LDST_WHOLE_TRANS(vs4r_v, int8_t, 4, false) +GEN_LDST_WHOLE_TRANS(vs8r_v, int8_t, 8, false) /* *** Vector Integer Arithmetic Instructions */ -/* - * MAXSZ returns the maximum vector size can be operated in bytes, - * which is used in GVEC IR when vl_eq_vlmax flag is set to true - * to accelerate vector operation. - */ -static inline uint32_t MAXSZ(DisasContext *s) -{ - int max_sz = s->cfg_ptr->vlenb * 8; - return max_sz >> (3 - s->lmul); -} - static bool opivv_check(DisasContext *s, arg_rmrr *a) { return require_rvv(s) && @@ -1475,6 +1823,16 @@ static bool opivv_widen_check(DisasContext *s, arg_rmrr *a) vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm); } +/* OPIVV with overwrite and WIDEN */ +static bool opivv_overwrite_widen_check(DisasContext *s, arg_rmrr *a) +{ + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm) && + vext_check_input_eew(s, a->rd, s->sew + 1, a->rs1, s->sew, a->vm) && + vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm); +} + static bool do_opivv_widen(DisasContext *s, arg_rmrr *a, gen_helper_gvec_4_ptr *fn, bool (*checkfn)(DisasContext *, arg_rmrr *)) @@ -1522,6 +1880,14 @@ static bool opivx_widen_check(DisasContext *s, arg_rmrr *a) vext_check_ds(s, a->rd, a->rs2, a->vm); } +static bool opivx_overwrite_widen_check(DisasContext *s, arg_rmrr *a) +{ + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_ds(s, a->rd, a->rs2, a->vm) && + vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm); +} + #define GEN_OPIVX_WIDEN_TRANS(NAME, CHECK) \ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ { \ @@ -1993,13 +2359,13 @@ GEN_OPIVX_TRANS(vmadd_vx, opivx_check) GEN_OPIVX_TRANS(vnmsub_vx, opivx_check) /* Vector Widening Integer Multiply-Add Instructions */ -GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_widen_check) -GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_widen_check) -GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_widen_check) -GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx, opivx_widen_check) -GEN_OPIVX_WIDEN_TRANS(vwmacc_vx, opivx_widen_check) -GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx, opivx_widen_check) -GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx, opivx_widen_check) +GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_overwrite_widen_check) +GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_overwrite_widen_check) +GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_overwrite_widen_check) +GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx, opivx_overwrite_widen_check) +GEN_OPIVX_WIDEN_TRANS(vwmacc_vx, opivx_overwrite_widen_check) +GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx, opivx_overwrite_widen_check) +GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx, opivx_overwrite_widen_check) /* Vector Integer Merge and Move Instructions */ static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a) @@ -2340,6 +2706,17 @@ static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a) vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm); } +static bool opfvv_overwrite_widen_check(DisasContext *s, arg_rmrr *a) +{ + return require_rvv(s) && + require_rvf(s) && + require_scale_rvf(s) && + vext_check_isa_ill(s) && + vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm) && + vext_check_input_eew(s, a->rd, s->sew + 1, a->rs1, s->sew, a->vm) && + vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm); +} + /* OPFVV with WIDEN */ #define GEN_OPFVV_WIDEN_TRANS(NAME, CHECK) \ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ @@ -2379,11 +2756,21 @@ static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a) vext_check_ds(s, a->rd, a->rs2, a->vm); } +static bool opfvf_overwrite_widen_check(DisasContext *s, arg_rmrr *a) +{ + return require_rvv(s) && + require_rvf(s) && + require_scale_rvf(s) && + vext_check_isa_ill(s) && + vext_check_ds(s, a->rd, a->rs2, a->vm) && + vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm); +} + /* OPFVF with WIDEN */ -#define GEN_OPFVF_WIDEN_TRANS(NAME) \ +#define GEN_OPFVF_WIDEN_TRANS(NAME, CHECK) \ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ { \ - if (opfvf_widen_check(s, a)) { \ + if (CHECK(s, a)) { \ uint32_t data = 0; \ static gen_helper_opfvf *const fns[2] = { \ gen_helper_##NAME##_h, gen_helper_##NAME##_w, \ @@ -2399,8 +2786,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ return false; \ } -GEN_OPFVF_WIDEN_TRANS(vfwadd_vf) -GEN_OPFVF_WIDEN_TRANS(vfwsub_vf) +GEN_OPFVF_WIDEN_TRANS(vfwadd_vf, opfvf_widen_check) +GEN_OPFVF_WIDEN_TRANS(vfwsub_vf, opfvf_widen_check) static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a) { @@ -2482,7 +2869,7 @@ GEN_OPFVF_TRANS(vfrdiv_vf, opfvf_check) /* Vector Widening Floating-Point Multiply */ GEN_OPFVV_WIDEN_TRANS(vfwmul_vv, opfvv_widen_check) -GEN_OPFVF_WIDEN_TRANS(vfwmul_vf) +GEN_OPFVF_WIDEN_TRANS(vfwmul_vf, opfvf_widen_check) /* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */ GEN_OPFVV_TRANS(vfmacc_vv, opfvv_check) @@ -2503,14 +2890,14 @@ GEN_OPFVF_TRANS(vfmsub_vf, opfvf_check) GEN_OPFVF_TRANS(vfnmsub_vf, opfvf_check) /* Vector Widening Floating-Point Fused Multiply-Add Instructions */ -GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_widen_check) -GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_widen_check) -GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_widen_check) -GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_widen_check) -GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf) -GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf) -GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf) -GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf) +GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_overwrite_widen_check) +GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_overwrite_widen_check) +GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_overwrite_widen_check) +GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_overwrite_widen_check) +GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf, opfvf_overwrite_widen_check) +GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf, opfvf_overwrite_widen_check) +GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf, opfvf_overwrite_widen_check) +GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf, opfvf_overwrite_widen_check) /* Vector Floating-Point Square-Root Instruction */ @@ -3426,6 +3813,7 @@ static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a) { return require_rvv(s) && vext_check_isa_ill(s) && + vext_check_input_eew(s, a->rs1, s->sew, a->rs2, s->sew, a->vm) && require_align(a->rd, s->lmul) && require_align(a->rs1, s->lmul) && require_align(a->rs2, s->lmul) && @@ -3438,6 +3826,7 @@ static bool vrgatherei16_vv_check(DisasContext *s, arg_rmrr *a) int8_t emul = MO_16 - s->sew + s->lmul; return require_rvv(s) && vext_check_isa_ill(s) && + vext_check_input_eew(s, a->rs1, MO_16, a->rs2, s->sew, a->vm) && (emul >= -3 && emul <= 3) && require_align(a->rd, s->lmul) && require_align(a->rs1, emul) && @@ -3457,6 +3846,7 @@ static bool vrgather_vx_check(DisasContext *s, arg_rmrr *a) { return require_rvv(s) && vext_check_isa_ill(s) && + vext_check_input_eew(s, -1, MO_64, a->rs2, s->sew, a->vm) && require_align(a->rd, s->lmul) && require_align(a->rs2, s->lmul) && (a->rd != a->rs2) && @@ -3600,7 +3990,9 @@ static bool int_ext_check(DisasContext *s, arg_rmr *a, uint8_t div) require_align(a->rd, s->lmul) && require_align(a->rs2, s->lmul - div) && require_vm(a->vm, a->rd) && - require_noover(a->rd, s->lmul, a->rs2, s->lmul - div); + require_noover(a->rd, s->lmul, a->rs2, s->lmul - div) && + vext_check_input_eew(s, -1, 0, a->rs2, s->sew, a->vm); + return ret; } diff --git a/target/riscv/internals.h b/target/riscv/internals.h index 213aff31d8..4570bd50be 100644 --- a/target/riscv/internals.h +++ b/target/riscv/internals.h @@ -201,4 +201,9 @@ static inline target_ulong adjust_addr_virt(CPURISCVState *env, return adjust_addr_body(env, addr, true); } +static inline int insn_len(uint16_t first_word) +{ + return (first_word & 3) == 3 ? 4 : 2; +} + #endif diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c index 75724b6af4..82f9728636 100644 --- a/target/riscv/kvm/kvm-cpu.c +++ b/target/riscv/kvm/kvm-cpu.c @@ -58,33 +58,17 @@ void riscv_kvm_aplic_request(void *opaque, int irq, int level) static bool cap_has_mp_state; -static uint64_t kvm_riscv_reg_id_ulong(CPURISCVState *env, uint64_t type, - uint64_t idx) -{ - uint64_t id = KVM_REG_RISCV | type | idx; +#define KVM_RISCV_REG_ID_U32(type, idx) (KVM_REG_RISCV | KVM_REG_SIZE_U32 | \ + type | idx) - switch (riscv_cpu_mxl(env)) { - case MXL_RV32: - id |= KVM_REG_SIZE_U32; - break; - case MXL_RV64: - id |= KVM_REG_SIZE_U64; - break; - default: - g_assert_not_reached(); - } - return id; -} - -static uint64_t kvm_riscv_reg_id_u32(uint64_t type, uint64_t idx) -{ - return KVM_REG_RISCV | KVM_REG_SIZE_U32 | type | idx; -} +#define KVM_RISCV_REG_ID_U64(type, idx) (KVM_REG_RISCV | KVM_REG_SIZE_U64 | \ + type | idx) -static uint64_t kvm_riscv_reg_id_u64(uint64_t type, uint64_t idx) -{ - return KVM_REG_RISCV | KVM_REG_SIZE_U64 | type | idx; -} +#if defined(TARGET_RISCV64) +#define KVM_RISCV_REG_ID_ULONG(type, idx) KVM_RISCV_REG_ID_U64(type, idx) +#else +#define KVM_RISCV_REG_ID_ULONG(type, idx) KVM_RISCV_REG_ID_U32(type, idx) +#endif static uint64_t kvm_encode_reg_size_id(uint64_t id, size_t size_b) { @@ -107,45 +91,29 @@ static uint64_t kvm_riscv_vector_reg_id(RISCVCPU *cpu, return kvm_encode_reg_size_id(id, size_b); } -#define RISCV_CORE_REG(env, name) \ - kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, \ +#define RISCV_CORE_REG(name) \ + KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, \ KVM_REG_RISCV_CORE_REG(name)) -#define RISCV_CSR_REG(env, name) \ - kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CSR, \ +#define RISCV_CSR_REG(name) \ + KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CSR, \ KVM_REG_RISCV_CSR_REG(name)) -#define RISCV_CONFIG_REG(env, name) \ - kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, \ +#define RISCV_CONFIG_REG(name) \ + KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG, \ KVM_REG_RISCV_CONFIG_REG(name)) -#define RISCV_TIMER_REG(name) kvm_riscv_reg_id_u64(KVM_REG_RISCV_TIMER, \ +#define RISCV_TIMER_REG(name) KVM_RISCV_REG_ID_U64(KVM_REG_RISCV_TIMER, \ KVM_REG_RISCV_TIMER_REG(name)) -#define RISCV_FP_F_REG(idx) kvm_riscv_reg_id_u32(KVM_REG_RISCV_FP_F, idx) +#define RISCV_FP_F_REG(idx) KVM_RISCV_REG_ID_U32(KVM_REG_RISCV_FP_F, idx) -#define RISCV_FP_D_REG(idx) kvm_riscv_reg_id_u64(KVM_REG_RISCV_FP_D, idx) +#define RISCV_FP_D_REG(idx) KVM_RISCV_REG_ID_U64(KVM_REG_RISCV_FP_D, idx) -#define RISCV_VECTOR_CSR_REG(env, name) \ - kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_VECTOR, \ +#define RISCV_VECTOR_CSR_REG(name) \ + KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_VECTOR, \ KVM_REG_RISCV_VECTOR_CSR_REG(name)) -#define KVM_RISCV_GET_CSR(cs, env, csr, reg) \ - do { \ - int _ret = kvm_get_one_reg(cs, RISCV_CSR_REG(env, csr), ®); \ - if (_ret) { \ - return _ret; \ - } \ - } while (0) - -#define KVM_RISCV_SET_CSR(cs, env, csr, reg) \ - do { \ - int _ret = kvm_set_one_reg(cs, RISCV_CSR_REG(env, csr), ®); \ - if (_ret) { \ - return _ret; \ - } \ - } while (0) - #define KVM_RISCV_GET_TIMER(cs, name, reg) \ do { \ int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(name), ®); \ @@ -167,6 +135,7 @@ typedef struct KVMCPUConfig { const char *description; target_ulong offset; uint64_t kvm_reg_id; + uint32_t prop_size; bool user_set; bool supported; } KVMCPUConfig; @@ -248,7 +217,7 @@ static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs) /* If we're here we're going to disable the MISA bit */ reg = 0; - id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT, + id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT, misa_cfg->kvm_reg_id); ret = kvm_set_one_reg(cs, id, ®); if (ret != 0) { @@ -267,6 +236,56 @@ static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs) } } +#define KVM_CSR_CFG(_name, _env_prop, reg_id) \ + {.name = _name, .offset = ENV_CSR_OFFSET(_env_prop), \ + .prop_size = sizeof(((CPURISCVState *)0)->_env_prop), \ + .kvm_reg_id = reg_id} + +static KVMCPUConfig kvm_csr_cfgs[] = { + KVM_CSR_CFG("sstatus", mstatus, RISCV_CSR_REG(sstatus)), + KVM_CSR_CFG("sie", mie, RISCV_CSR_REG(sie)), + KVM_CSR_CFG("stvec", stvec, RISCV_CSR_REG(stvec)), + KVM_CSR_CFG("sscratch", sscratch, RISCV_CSR_REG(sscratch)), + KVM_CSR_CFG("sepc", sepc, RISCV_CSR_REG(sepc)), + KVM_CSR_CFG("scause", scause, RISCV_CSR_REG(scause)), + KVM_CSR_CFG("stval", stval, RISCV_CSR_REG(stval)), + KVM_CSR_CFG("sip", mip, RISCV_CSR_REG(sip)), + KVM_CSR_CFG("satp", satp, RISCV_CSR_REG(satp)), + KVM_CSR_CFG("scounteren", scounteren, RISCV_CSR_REG(scounteren)), + KVM_CSR_CFG("senvcfg", senvcfg, RISCV_CSR_REG(senvcfg)), +}; + +static void *kvmconfig_get_env_addr(RISCVCPU *cpu, KVMCPUConfig *csr_cfg) +{ + return (void *)&cpu->env + csr_cfg->offset; +} + +static uint32_t kvm_cpu_csr_get_u32(RISCVCPU *cpu, KVMCPUConfig *csr_cfg) +{ + uint32_t *val32 = kvmconfig_get_env_addr(cpu, csr_cfg); + return *val32; +} + +static uint64_t kvm_cpu_csr_get_u64(RISCVCPU *cpu, KVMCPUConfig *csr_cfg) +{ + uint64_t *val64 = kvmconfig_get_env_addr(cpu, csr_cfg); + return *val64; +} + +static void kvm_cpu_csr_set_u32(RISCVCPU *cpu, KVMCPUConfig *csr_cfg, + uint32_t val) +{ + uint32_t *val32 = kvmconfig_get_env_addr(cpu, csr_cfg); + *val32 = val; +} + +static void kvm_cpu_csr_set_u64(RISCVCPU *cpu, KVMCPUConfig *csr_cfg, + uint64_t val) +{ + uint64_t *val64 = kvmconfig_get_env_addr(cpu, csr_cfg); + *val64 = val; +} + #define KVM_EXT_CFG(_name, _prop, _reg_id) \ {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ .kvm_reg_id = _reg_id} @@ -434,7 +453,6 @@ static KVMCPUConfig kvm_sbi_dbcn = { static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs) { - CPURISCVState *env = &cpu->env; uint64_t id, reg; int i, ret; @@ -445,7 +463,7 @@ static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs) continue; } - id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT, + id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT, multi_ext_cfg->kvm_reg_id); reg = kvm_cpu_cfg_get(cpu, multi_ext_cfg); ret = kvm_set_one_reg(cs, id, ®); @@ -570,14 +588,14 @@ static int kvm_riscv_get_regs_core(CPUState *cs) target_ulong reg; CPURISCVState *env = &RISCV_CPU(cs)->env; - ret = kvm_get_one_reg(cs, RISCV_CORE_REG(env, regs.pc), ®); + ret = kvm_get_one_reg(cs, RISCV_CORE_REG(regs.pc), ®); if (ret) { return ret; } env->pc = reg; for (i = 1; i < 32; i++) { - uint64_t id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, i); + uint64_t id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, i); ret = kvm_get_one_reg(cs, id, ®); if (ret) { return ret; @@ -596,13 +614,13 @@ static int kvm_riscv_put_regs_core(CPUState *cs) CPURISCVState *env = &RISCV_CPU(cs)->env; reg = env->pc; - ret = kvm_set_one_reg(cs, RISCV_CORE_REG(env, regs.pc), ®); + ret = kvm_set_one_reg(cs, RISCV_CORE_REG(regs.pc), ®); if (ret) { return ret; } for (i = 1; i < 32; i++) { - uint64_t id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, i); + uint64_t id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, i); reg = env->gpr[i]; ret = kvm_set_one_reg(cs, id, ®); if (ret) { @@ -613,53 +631,81 @@ static int kvm_riscv_put_regs_core(CPUState *cs) return ret; } -static void kvm_riscv_reset_regs_csr(CPURISCVState *env) -{ - env->mstatus = 0; - env->mie = 0; - env->stvec = 0; - env->sscratch = 0; - env->sepc = 0; - env->scause = 0; - env->stval = 0; - env->mip = 0; - env->satp = 0; -} - static int kvm_riscv_get_regs_csr(CPUState *cs) { - CPURISCVState *env = &RISCV_CPU(cs)->env; + RISCVCPU *cpu = RISCV_CPU(cs); + uint64_t reg; + int i, ret; + + for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) { + KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i]; + + if (!csr_cfg->supported) { + continue; + } + + ret = kvm_get_one_reg(cs, csr_cfg->kvm_reg_id, ®); + if (ret) { + return ret; + } - KVM_RISCV_GET_CSR(cs, env, sstatus, env->mstatus); - KVM_RISCV_GET_CSR(cs, env, sie, env->mie); - KVM_RISCV_GET_CSR(cs, env, stvec, env->stvec); - KVM_RISCV_GET_CSR(cs, env, sscratch, env->sscratch); - KVM_RISCV_GET_CSR(cs, env, sepc, env->sepc); - KVM_RISCV_GET_CSR(cs, env, scause, env->scause); - KVM_RISCV_GET_CSR(cs, env, stval, env->stval); - KVM_RISCV_GET_CSR(cs, env, sip, env->mip); - KVM_RISCV_GET_CSR(cs, env, satp, env->satp); + if (csr_cfg->prop_size == sizeof(uint32_t)) { + kvm_cpu_csr_set_u32(cpu, csr_cfg, (uint32_t)reg); + } else if (csr_cfg->prop_size == sizeof(uint64_t)) { + kvm_cpu_csr_set_u64(cpu, csr_cfg, reg); + } else { + g_assert_not_reached(); + } + } return 0; } static int kvm_riscv_put_regs_csr(CPUState *cs) { - CPURISCVState *env = &RISCV_CPU(cs)->env; + RISCVCPU *cpu = RISCV_CPU(cs); + uint64_t reg; + int i, ret; - KVM_RISCV_SET_CSR(cs, env, sstatus, env->mstatus); - KVM_RISCV_SET_CSR(cs, env, sie, env->mie); - KVM_RISCV_SET_CSR(cs, env, stvec, env->stvec); - KVM_RISCV_SET_CSR(cs, env, sscratch, env->sscratch); - KVM_RISCV_SET_CSR(cs, env, sepc, env->sepc); - KVM_RISCV_SET_CSR(cs, env, scause, env->scause); - KVM_RISCV_SET_CSR(cs, env, stval, env->stval); - KVM_RISCV_SET_CSR(cs, env, sip, env->mip); - KVM_RISCV_SET_CSR(cs, env, satp, env->satp); + for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) { + KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i]; + + if (!csr_cfg->supported) { + continue; + } + + if (csr_cfg->prop_size == sizeof(uint32_t)) { + reg = kvm_cpu_csr_get_u32(cpu, csr_cfg); + } else if (csr_cfg->prop_size == sizeof(uint64_t)) { + reg = kvm_cpu_csr_get_u64(cpu, csr_cfg); + } else { + g_assert_not_reached(); + } + + ret = kvm_set_one_reg(cs, csr_cfg->kvm_reg_id, ®); + if (ret) { + return ret; + } + } return 0; } +static void kvm_riscv_reset_regs_csr(CPURISCVState *env) +{ + env->mstatus = 0; + env->mie = 0; + env->stvec = 0; + env->sscratch = 0; + env->sepc = 0; + env->scause = 0; + env->stval = 0; + env->mip = 0; + env->satp = 0; + env->scounteren = 0; + env->senvcfg = 0; +} + static int kvm_riscv_get_regs_fp(CPUState *cs) { int ret = 0; @@ -800,26 +846,26 @@ static int kvm_riscv_get_regs_vector(CPUState *cs) return 0; } - ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vstart), ®); + ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vstart), ®); if (ret) { return ret; } env->vstart = reg; - ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vl), ®); + ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vl), ®); if (ret) { return ret; } env->vl = reg; - ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), ®); + ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vtype), ®); if (ret) { return ret; } env->vtype = reg; if (kvm_v_vlenb.supported) { - ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vlenb), ®); + ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vlenb), ®); if (ret) { return ret; } @@ -857,26 +903,26 @@ static int kvm_riscv_put_regs_vector(CPUState *cs) } reg = env->vstart; - ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vstart), ®); + ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vstart), ®); if (ret) { return ret; } reg = env->vl; - ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vl), ®); + ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vl), ®); if (ret) { return ret; } reg = env->vtype; - ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), ®); + ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vtype), ®); if (ret) { return ret; } if (kvm_v_vlenb.supported) { reg = cpu->cfg.vlenb; - ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vlenb), ®); + ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vlenb), ®); for (int i = 0; i < 32; i++) { /* @@ -955,25 +1001,24 @@ static void kvm_riscv_destroy_scratch_vcpu(KVMScratchCPU *scratch) static void kvm_riscv_init_machine_ids(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) { - CPURISCVState *env = &cpu->env; struct kvm_one_reg reg; int ret; - reg.id = RISCV_CONFIG_REG(env, mvendorid); + reg.id = RISCV_CONFIG_REG(mvendorid); reg.addr = (uint64_t)&cpu->cfg.mvendorid; ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); if (ret != 0) { error_report("Unable to retrieve mvendorid from host, error %d", ret); } - reg.id = RISCV_CONFIG_REG(env, marchid); + reg.id = RISCV_CONFIG_REG(marchid); reg.addr = (uint64_t)&cpu->cfg.marchid; ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); if (ret != 0) { error_report("Unable to retrieve marchid from host, error %d", ret); } - reg.id = RISCV_CONFIG_REG(env, mimpid); + reg.id = RISCV_CONFIG_REG(mimpid); reg.addr = (uint64_t)&cpu->cfg.mimpid; ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); if (ret != 0) { @@ -988,7 +1033,7 @@ static void kvm_riscv_init_misa_ext_mask(RISCVCPU *cpu, struct kvm_one_reg reg; int ret; - reg.id = RISCV_CONFIG_REG(env, isa); + reg.id = RISCV_CONFIG_REG(isa); reg.addr = (uint64_t)&env->misa_ext_mask; ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); @@ -1005,11 +1050,10 @@ static void kvm_riscv_init_misa_ext_mask(RISCVCPU *cpu, static void kvm_riscv_read_cbomz_blksize(RISCVCPU *cpu, KVMScratchCPU *kvmcpu, KVMCPUConfig *cbomz_cfg) { - CPURISCVState *env = &cpu->env; struct kvm_one_reg reg; int ret; - reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, + reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG, cbomz_cfg->kvm_reg_id); reg.addr = (uint64_t)kvmconfig_get_cfg_addr(cpu, cbomz_cfg); ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); @@ -1023,7 +1067,6 @@ static void kvm_riscv_read_cbomz_blksize(RISCVCPU *cpu, KVMScratchCPU *kvmcpu, static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) { - CPURISCVState *env = &cpu->env; uint64_t val; int i, ret; @@ -1031,7 +1074,7 @@ static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu, KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i]; struct kvm_one_reg reg; - reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT, + reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT, multi_ext_cfg->kvm_reg_id); reg.addr = (uint64_t)&val; ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); @@ -1061,6 +1104,32 @@ static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu, } } +static void kvm_riscv_read_csr_cfg_legacy(KVMScratchCPU *kvmcpu) +{ + uint64_t val; + int i, ret; + + for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) { + KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i]; + struct kvm_one_reg reg; + + reg.id = csr_cfg->kvm_reg_id; + reg.addr = (uint64_t)&val; + ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); + if (ret != 0) { + if (errno == EINVAL) { + csr_cfg->supported = false; + } else { + error_report("Unable to read KVM CSR %s: %s", + csr_cfg->name, strerror(errno)); + exit(EXIT_FAILURE); + } + } else { + csr_cfg->supported = true; + } + } +} + static int uint64_cmp(const void *a, const void *b) { uint64_t val1 = *(const uint64_t *)a; @@ -1078,7 +1147,6 @@ static int uint64_cmp(const void *a, const void *b) } static void kvm_riscv_check_sbi_dbcn_support(RISCVCPU *cpu, - KVMScratchCPU *kvmcpu, struct kvm_reg_list *reglist) { struct kvm_reg_list *reg_search; @@ -1118,12 +1186,31 @@ static void kvm_riscv_read_vlenb(RISCVCPU *cpu, KVMScratchCPU *kvmcpu, } } -static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) +static void kvm_riscv_read_csr_cfg(struct kvm_reg_list *reglist) +{ + struct kvm_reg_list *reg_search; + uint64_t reg_id; + + for (int i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) { + KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i]; + + reg_id = csr_cfg->kvm_reg_id; + reg_search = bsearch(®_id, reglist->reg, reglist->n, + sizeof(uint64_t), uint64_cmp); + if (!reg_search) { + continue; + } + + csr_cfg->supported = true; + } +} + +static void kvm_riscv_init_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) { + g_autofree struct kvm_reg_list *reglist = NULL; KVMCPUConfig *multi_ext_cfg; struct kvm_one_reg reg; struct kvm_reg_list rl_struct; - struct kvm_reg_list *reglist; uint64_t val, reg_id, *reg_search; int i, ret; @@ -1135,7 +1222,9 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) * (EINVAL). Use read_legacy() in this case. */ if (errno == EINVAL) { - return kvm_riscv_read_multiext_legacy(cpu, kvmcpu); + kvm_riscv_read_multiext_legacy(cpu, kvmcpu); + kvm_riscv_read_csr_cfg_legacy(kvmcpu); + return; } else if (errno != E2BIG) { /* * E2BIG is an expected error message for the API since we @@ -1164,7 +1253,7 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) { multi_ext_cfg = &kvm_multi_ext_cfgs[i]; - reg_id = kvm_riscv_reg_id_ulong(&cpu->env, KVM_REG_RISCV_ISA_EXT, + reg_id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT, multi_ext_cfg->kvm_reg_id); reg_search = bsearch(®_id, reglist->reg, reglist->n, sizeof(uint64_t), uint64_cmp); @@ -1197,7 +1286,8 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) kvm_riscv_read_vlenb(cpu, kvmcpu, reglist); } - kvm_riscv_check_sbi_dbcn_support(cpu, kvmcpu, reglist); + kvm_riscv_check_sbi_dbcn_support(cpu, reglist); + kvm_riscv_read_csr_cfg(reglist); } static void riscv_init_kvm_registers(Object *cpu_obj) @@ -1211,7 +1301,7 @@ static void riscv_init_kvm_registers(Object *cpu_obj) kvm_riscv_init_machine_ids(cpu, &kvmcpu); kvm_riscv_init_misa_ext_mask(cpu, &kvmcpu); - kvm_riscv_init_multiext_cfg(cpu, &kvmcpu); + kvm_riscv_init_cfg(cpu, &kvmcpu); kvm_riscv_destroy_scratch_vcpu(&kvmcpu); } @@ -1343,12 +1433,11 @@ void kvm_arch_init_irq_routing(KVMState *s) static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs) { - CPURISCVState *env = &cpu->env; target_ulong reg; uint64_t id; int ret; - id = RISCV_CONFIG_REG(env, mvendorid); + id = RISCV_CONFIG_REG(mvendorid); /* * cfg.mvendorid is an uint32 but a target_ulong will * be written. Assign it to a target_ulong var to avoid @@ -1360,13 +1449,13 @@ static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs) return ret; } - id = RISCV_CONFIG_REG(env, marchid); + id = RISCV_CONFIG_REG(marchid); ret = kvm_set_one_reg(cs, id, &cpu->cfg.marchid); if (ret != 0) { return ret; } - id = RISCV_CONFIG_REG(env, mimpid); + id = RISCV_CONFIG_REG(mimpid); ret = kvm_set_one_reg(cs, id, &cpu->cfg.mimpid); return ret; @@ -1916,7 +2005,7 @@ void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp) if (cpu->cfg.ext_zicbom && riscv_cpu_option_set(kvm_cbom_blocksize.name)) { - reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, + reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG, kvm_cbom_blocksize.kvm_reg_id); reg.addr = (uint64_t)&val; ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, ®); @@ -1935,7 +2024,7 @@ void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp) if (cpu->cfg.ext_zicboz && riscv_cpu_option_set(kvm_cboz_blocksize.name)) { - reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, + reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG, kvm_cboz_blocksize.kvm_reg_id); reg.addr = (uint64_t)&val; ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, ®); diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c index 05316f2088..557807ba4b 100644 --- a/target/riscv/op_helper.c +++ b/target/riscv/op_helper.c @@ -71,7 +71,7 @@ target_ulong helper_csrr(CPURISCVState *env, int csr) void helper_csrw(CPURISCVState *env, int csr, target_ulong src) { target_ulong mask = env->xl == MXL_RV32 ? UINT32_MAX : (target_ulong)-1; - RISCVException ret = riscv_csrrw(env, csr, NULL, src, mask); + RISCVException ret = riscv_csrrw(env, csr, NULL, src, mask, GETPC()); if (ret != RISCV_EXCP_NONE) { riscv_raise_exception(env, ret, GETPC()); @@ -82,7 +82,7 @@ target_ulong helper_csrrw(CPURISCVState *env, int csr, target_ulong src, target_ulong write_mask) { target_ulong val = 0; - RISCVException ret = riscv_csrrw(env, csr, &val, src, write_mask); + RISCVException ret = riscv_csrrw(env, csr, &val, src, write_mask, GETPC()); if (ret != RISCV_EXCP_NONE) { riscv_raise_exception(env, ret, GETPC()); @@ -108,7 +108,7 @@ void helper_csrw_i128(CPURISCVState *env, int csr, { RISCVException ret = riscv_csrrw_i128(env, csr, NULL, int128_make128(srcl, srch), - UINT128_MAX); + UINT128_MAX, GETPC()); if (ret != RISCV_EXCP_NONE) { riscv_raise_exception(env, ret, GETPC()); @@ -116,13 +116,14 @@ void helper_csrw_i128(CPURISCVState *env, int csr, } target_ulong helper_csrrw_i128(CPURISCVState *env, int csr, - target_ulong srcl, target_ulong srch, - target_ulong maskl, target_ulong maskh) + target_ulong srcl, target_ulong srch, + target_ulong maskl, target_ulong maskh) { Int128 rv = int128_zero(); RISCVException ret = riscv_csrrw_i128(env, csr, &rv, int128_make128(srcl, srch), - int128_make128(maskl, maskh)); + int128_make128(maskl, maskh), + GETPC()); if (ret != RISCV_EXCP_NONE) { riscv_raise_exception(env, ret, GETPC()); diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c index c13a117e3f..5af295e410 100644 --- a/target/riscv/pmp.c +++ b/target/riscv/pmp.c @@ -33,6 +33,15 @@ static bool pmp_write_cfg(CPURISCVState *env, uint32_t addr_index, static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index); /* + * Convert the PMP permissions to match the truth table in the Smepmp spec. + */ +static inline uint8_t pmp_get_smepmp_operation(uint8_t cfg) +{ + return ((cfg & PMP_LOCK) >> 4) | ((cfg & PMP_READ) << 2) | + (cfg & PMP_WRITE) | ((cfg & PMP_EXEC) >> 2); +} + +/* * Accessor method to extract address matching type 'a field' from cfg reg */ static inline uint8_t pmp_get_a_field(uint8_t cfg) @@ -46,21 +55,58 @@ static inline uint8_t pmp_get_a_field(uint8_t cfg) */ static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index) { - /* mseccfg.RLB is set */ - if (MSECCFG_RLB_ISSET(env)) { - return 0; - } - if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) { return 1; } - /* Top PMP has no 'next' to check */ - if ((pmp_index + 1u) >= MAX_RISCV_PMPS) { + return 0; +} + +/* + * Check whether a PMP is locked for writing or not. + * (i.e. has LOCK flag and mseccfg.RLB is unset) + */ +static int pmp_is_readonly(CPURISCVState *env, uint32_t pmp_index) +{ + return pmp_is_locked(env, pmp_index) && !MSECCFG_RLB_ISSET(env); +} + +/* + * Check whether `val` is an invalid Smepmp config value + */ +static int pmp_is_invalid_smepmp_cfg(CPURISCVState *env, uint8_t val) +{ + /* No check if mseccfg.MML is not set or if mseccfg.RLB is set */ + if (!MSECCFG_MML_ISSET(env) || MSECCFG_RLB_ISSET(env)) { return 0; } - return 0; + /* + * Adding a rule with executable privileges that either is M-mode-only + * or a locked Shared-Region is not possible + */ + switch (pmp_get_smepmp_operation(val)) { + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + case 12: + case 14: + case 15: + return 0; + case 9: + case 10: + case 11: + case 13: + return 1; + default: + g_assert_not_reached(); + } } /* @@ -91,45 +137,18 @@ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index) static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val) { if (pmp_index < MAX_RISCV_PMPS) { - bool locked = true; - - if (riscv_cpu_cfg(env)->ext_smepmp) { - /* mseccfg.RLB is set */ - if (MSECCFG_RLB_ISSET(env)) { - locked = false; - } - - /* mseccfg.MML is not set */ - if (!MSECCFG_MML_ISSET(env) && !pmp_is_locked(env, pmp_index)) { - locked = false; - } - - /* mseccfg.MML is set */ - if (MSECCFG_MML_ISSET(env)) { - /* not adding execute bit */ - if ((val & PMP_LOCK) != 0 && (val & PMP_EXEC) != PMP_EXEC) { - locked = false; - } - /* shared region and not adding X bit */ - if ((val & PMP_LOCK) != PMP_LOCK && - (val & 0x7) != (PMP_WRITE | PMP_EXEC)) { - locked = false; - } - } - } else { - if (!pmp_is_locked(env, pmp_index)) { - locked = false; - } + if (env->pmp_state.pmp[pmp_index].cfg_reg == val) { + /* no change */ + return false; } - if (locked) { - qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n"); - } else if (env->pmp_state.pmp[pmp_index].cfg_reg != val) { - /* If !mseccfg.MML then ignore writes with encoding RW=01 */ - if ((val & PMP_WRITE) && !(val & PMP_READ) && - !MSECCFG_MML_ISSET(env)) { - return false; - } + if (pmp_is_readonly(env, pmp_index)) { + qemu_log_mask(LOG_GUEST_ERROR, + "ignoring pmpcfg write - read only\n"); + } else if (pmp_is_invalid_smepmp_cfg(env, val)) { + qemu_log_mask(LOG_GUEST_ERROR, + "ignoring pmpcfg write - invalid\n"); + } else { env->pmp_state.pmp[pmp_index].cfg_reg = val; pmp_update_rule_addr(env, pmp_index); return true; @@ -353,16 +372,6 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr, const uint8_t a_field = pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); - /* - * Convert the PMP permissions to match the truth table in the - * Smepmp spec. - */ - const uint8_t smepmp_operation = - ((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) | - ((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) | - (env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) | - ((env->pmp_state.pmp[i].cfg_reg & PMP_EXEC) >> 2); - if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) { /* * If the PMP entry is not off and the address is in range, @@ -381,6 +390,9 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr, /* * If mseccfg.MML Bit set, do the enhanced pmp priv check */ + const uint8_t smepmp_operation = + pmp_get_smepmp_operation(env->pmp_state.pmp[i].cfg_reg); + if (mode == PRV_M) { switch (smepmp_operation) { case 0: @@ -517,6 +529,11 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, bool is_next_cfg_tor = false; if (addr_index < MAX_RISCV_PMPS) { + if (env->pmp_state.pmp[addr_index].addr_reg == val) { + /* no change */ + return; + } + /* * In TOR mode, need to check the lock bit of the next pmp * (if there is a next). @@ -525,25 +542,23 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg; is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg); - if (pmp_is_locked(env, addr_index + 1) && is_next_cfg_tor) { + if (pmp_is_readonly(env, addr_index + 1) && is_next_cfg_tor) { qemu_log_mask(LOG_GUEST_ERROR, - "ignoring pmpaddr write - pmpcfg + 1 locked\n"); + "ignoring pmpaddr write - pmpcfg+1 read only\n"); return; } } - if (!pmp_is_locked(env, addr_index)) { - if (env->pmp_state.pmp[addr_index].addr_reg != val) { - env->pmp_state.pmp[addr_index].addr_reg = val; - pmp_update_rule_addr(env, addr_index); - if (is_next_cfg_tor) { - pmp_update_rule_addr(env, addr_index + 1); - } - tlb_flush(env_cpu(env)); + if (!pmp_is_readonly(env, addr_index)) { + env->pmp_state.pmp[addr_index].addr_reg = val; + pmp_update_rule_addr(env, addr_index); + if (is_next_cfg_tor) { + pmp_update_rule_addr(env, addr_index + 1); } + tlb_flush(env_cpu(env)); } else { qemu_log_mask(LOG_GUEST_ERROR, - "ignoring pmpaddr write - locked\n"); + "ignoring pmpaddr write - read only\n"); } } else { qemu_log_mask(LOG_GUEST_ERROR, diff --git a/target/riscv/translate.c b/target/riscv/translate.c index 85128f997b..0d4f7d601c 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -1209,11 +1209,6 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc) /* The specification allows for longer insns, but not supported by qemu. */ #define MAX_INSN_LEN 4 -static inline int insn_len(uint16_t first_word) -{ - return (first_word & 3) == 3 ? 4 : 2; -} - const RISCVDecoder decoder_table[] = { { always_true_p, decode_insn32 }, { has_xthead_p, decode_xthead}, diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c index 8eea3e6df0..5dc1c10012 100644 --- a/target/riscv/vector_helper.c +++ b/target/riscv/vector_helper.c @@ -117,25 +117,42 @@ static inline uint32_t vext_max_elems(uint32_t desc, uint32_t log2_esz) * It will trigger an exception if there is no mapping in TLB * and page table walk can't fill the TLB entry. Then the guest * software can return here after process the exception or never return. + * + * This function can also be used when direct access to probe_access_flags is + * needed in order to access the flags. If a pointer to a flags operand is + * provided the function will call probe_access_flags instead, use nonfault + * and update host and flags. */ -static void probe_pages(CPURISCVState *env, target_ulong addr, - target_ulong len, uintptr_t ra, - MMUAccessType access_type) +static void probe_pages(CPURISCVState *env, target_ulong addr, target_ulong len, + uintptr_t ra, MMUAccessType access_type, int mmu_index, + void **host, int *flags, bool nonfault) { target_ulong pagelen = -(addr | TARGET_PAGE_MASK); target_ulong curlen = MIN(pagelen, len); - int mmu_index = riscv_env_mmu_index(env, false); - probe_access(env, adjust_addr(env, addr), curlen, access_type, - mmu_index, ra); + if (flags != NULL) { + *flags = probe_access_flags(env, adjust_addr(env, addr), curlen, + access_type, mmu_index, nonfault, host, ra); + } else { + probe_access(env, adjust_addr(env, addr), curlen, access_type, + mmu_index, ra); + } + if (len > curlen) { addr += curlen; curlen = len - curlen; - probe_access(env, adjust_addr(env, addr), curlen, access_type, - mmu_index, ra); + if (flags != NULL) { + *flags = probe_access_flags(env, adjust_addr(env, addr), curlen, + access_type, mmu_index, nonfault, + host, ra); + } else { + probe_access(env, adjust_addr(env, addr), curlen, access_type, + mmu_index, ra); + } } } + static inline void vext_set_elem_mask(void *v0, int index, uint8_t value) { @@ -335,8 +352,8 @@ vext_page_ldst_us(CPURISCVState *env, void *vd, target_ulong addr, MMUAccessType access_type = is_load ? MMU_DATA_LOAD : MMU_DATA_STORE; /* Check page permission/pmp/watchpoint/etc. */ - flags = probe_access_flags(env, adjust_addr(env, addr), size, access_type, - mmu_index, true, &host, ra); + probe_pages(env, addr, size, ra, access_type, mmu_index, &host, &flags, + true); if (flags == 0) { if (nf == 1) { @@ -635,7 +652,7 @@ vext_ldff(void *vd, void *v0, target_ulong base, CPURISCVState *env, uint32_t vma = vext_vma(desc); target_ulong addr, addr_probe, addr_i, offset, remain, page_split, elems; int mmu_index = riscv_env_mmu_index(env, false); - int flags; + int flags, probe_flags; void *host; VSTART_CHECK_EARLY_EXIT(env, env->vl); @@ -649,15 +666,15 @@ vext_ldff(void *vd, void *v0, target_ulong base, CPURISCVState *env, } /* Check page permission/pmp/watchpoint/etc. */ - flags = probe_access_flags(env, adjust_addr(env, addr), elems * msize, - MMU_DATA_LOAD, mmu_index, true, &host, ra); + probe_pages(env, addr, elems * msize, ra, MMU_DATA_LOAD, mmu_index, &host, + &flags, true); /* If we are crossing a page check also the second page. */ if (env->vl > elems) { addr_probe = addr + (elems << log2_esz); - flags |= probe_access_flags(env, adjust_addr(env, addr_probe), - elems * msize, MMU_DATA_LOAD, mmu_index, - true, &host, ra); + probe_pages(env, addr_probe, elems * msize, ra, MMU_DATA_LOAD, + mmu_index, &host, &probe_flags, true); + flags |= probe_flags; } if (flags & ~TLB_WATCHPOINT) { @@ -669,16 +686,16 @@ vext_ldff(void *vd, void *v0, target_ulong base, CPURISCVState *env, addr_i = adjust_addr(env, base + i * (nf << log2_esz)); if (i == 0) { /* Allow fault on first element. */ - probe_pages(env, addr_i, nf << log2_esz, ra, MMU_DATA_LOAD); + probe_pages(env, addr_i, nf << log2_esz, ra, MMU_DATA_LOAD, + mmu_index, &host, NULL, false); } else { remain = nf << log2_esz; while (remain > 0) { offset = -(addr_i | TARGET_PAGE_MASK); /* Probe nonfault on subsequent elements. */ - flags = probe_access_flags(env, addr_i, offset, - MMU_DATA_LOAD, mmu_index, true, - &host, 0); + probe_pages(env, addr_i, offset, 0, MMU_DATA_LOAD, + mmu_index, &host, &flags, true); /* * Stop if invalid (unmapped) or mmio (transaction may @@ -5116,9 +5133,11 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ } \ \ for (i = i_max; i < vl; ++i) { \ - if (vm || vext_elem_mask(v0, i)) { \ - *((ETYPE *)vd + H(i)) = 0; \ + if (!vm && !vext_elem_mask(v0, i)) { \ + vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \ + continue; \ } \ + *((ETYPE *)vd + H(i)) = 0; \ } \ \ env->vstart = 0; \ |