diff options
Diffstat (limited to 'target/riscv')
| -rw-r--r-- | target/riscv/cpu.c | 94 | ||||
| -rw-r--r-- | target/riscv/cpu.h | 8 | ||||
| -rw-r--r-- | target/riscv/cpu_cfg.h | 13 | ||||
| -rw-r--r-- | target/riscv/cpu_helper.c | 21 | ||||
| -rw-r--r-- | target/riscv/csr.c | 58 | ||||
| -rw-r--r-- | target/riscv/insn_trans/trans_rva.c.inc | 11 | ||||
| -rw-r--r-- | target/riscv/insn_trans/trans_rvi.c.inc | 16 | ||||
| -rw-r--r-- | target/riscv/insn_trans/trans_rvv.c.inc | 97 | ||||
| -rw-r--r-- | target/riscv/insn_trans/trans_rvzce.c.inc | 6 | ||||
| -rw-r--r-- | target/riscv/kvm/kvm-cpu.c | 29 | ||||
| -rw-r--r-- | target/riscv/machine.c | 16 | ||||
| -rw-r--r-- | target/riscv/pmu.h | 5 | ||||
| -rw-r--r-- | target/riscv/tcg/tcg-cpu.c | 34 | ||||
| -rw-r--r-- | target/riscv/translate.c | 3 | ||||
| -rw-r--r-- | target/riscv/vector_helper.c | 5 |
15 files changed, 271 insertions, 145 deletions
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index 5ff0192c52..5a48d30828 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -98,9 +98,14 @@ bool riscv_cpu_option_set(const char *optname) * instead. */ const RISCVIsaExtData isa_edata_arr[] = { + ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b), ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), + ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, ext_always_enabled), + ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, ext_always_enabled), + ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, ext_always_enabled), + ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, ext_always_enabled), ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), @@ -109,6 +114,7 @@ const RISCVIsaExtData isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), + ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, ext_always_enabled), ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), @@ -143,6 +149,7 @@ const RISCVIsaExtData isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), + ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso), ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), @@ -172,8 +179,13 @@ const RISCVIsaExtData isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), + ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, ext_always_enabled), ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), + ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, ext_always_enabled), ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), + ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, ext_always_enabled), + ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, ext_always_enabled), + ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade), ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), @@ -949,9 +961,9 @@ static void riscv_cpu_reset_hold(Object *obj) env->two_stage_lookup = false; env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | - (cpu->cfg.ext_svadu ? MENVCFG_ADUE : 0); - env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | - (cpu->cfg.ext_svadu ? HENVCFG_ADUE : 0); + (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ? + MENVCFG_ADUE : 0); + env->henvcfg = 0; /* Initialized default priorities of local interrupts. */ for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { @@ -1452,17 +1464,27 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), + MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), + MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), + MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false), MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), + MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false), + MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false), + MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false), + MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false), MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), + MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false), MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), + MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false), + MULTI_EXT_CFG_BOOL("svade", ext_svade, false), MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), @@ -1488,6 +1510,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), + MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false), MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), @@ -1549,25 +1572,40 @@ const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { /* These are experimental so mark with 'x-' */ const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { - MULTI_EXT_CFG_BOOL("x-smaia", ext_smaia, false), - MULTI_EXT_CFG_BOOL("x-ssaia", ext_ssaia, false), - - MULTI_EXT_CFG_BOOL("x-zaamo", ext_zaamo, false), - MULTI_EXT_CFG_BOOL("x-zalrsc", ext_zalrsc, false), - - MULTI_EXT_CFG_BOOL("x-zvfh", ext_zvfh, false), - MULTI_EXT_CFG_BOOL("x-zvfhmin", ext_zvfhmin, false), - - MULTI_EXT_CFG_BOOL("x-zfbfmin", ext_zfbfmin, false), - MULTI_EXT_CFG_BOOL("x-zvfbfmin", ext_zvfbfmin, false), - MULTI_EXT_CFG_BOOL("x-zvfbfwma", ext_zvfbfwma, false), - DEFINE_PROP_END_OF_LIST(), }; +#define ALWAYS_ENABLED_FEATURE(_name) \ + {.name = _name, \ + .offset = CPU_CFG_OFFSET(ext_always_enabled), \ + .enabled = true} + +/* + * 'Named features' is the name we give to extensions that we + * don't want to expose to users. They are either immutable + * (always enabled/disable) or they'll vary depending on + * the resulting CPU state. They have riscv,isa strings + * and priv_ver like regular extensions. + */ const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { - MULTI_EXT_CFG_BOOL("svade", svade, true), - MULTI_EXT_CFG_BOOL("zic64b", zic64b, true), + MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true), + + /* + * cache-related extensions that are always enabled + * in TCG since QEMU RISC-V does not have a cache + * model. + */ + ALWAYS_ENABLED_FEATURE("za64rs"), + ALWAYS_ENABLED_FEATURE("ziccif"), + ALWAYS_ENABLED_FEATURE("ziccrse"), + ALWAYS_ENABLED_FEATURE("ziccamoa"), + ALWAYS_ENABLED_FEATURE("zicclsm"), + ALWAYS_ENABLED_FEATURE("ssccptr"), + + /* Other named features that TCG always implements */ + ALWAYS_ENABLED_FEATURE("sstvecd"), + ALWAYS_ENABLED_FEATURE("sstvala"), + ALWAYS_ENABLED_FEATURE("sscounterenw"), DEFINE_PROP_END_OF_LIST(), }; @@ -2162,13 +2200,10 @@ static const PropertyInfo prop_marchid = { }; /* - * RVA22U64 defines some 'named features' or 'synthetic extensions' - * that are cache related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa - * and Zicclsm. We do not implement caching in QEMU so we'll consider - * all these named features as always enabled. - * - * There's no riscv,isa update for them (nor for zic64b, despite it - * having a cfg offset) at this moment. + * RVA22U64 defines some 'named features' that are cache + * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa + * and Zicclsm. They are always implemented in TCG and + * doesn't need to be manually enabled by the profile. */ static RISCVCPUProfile RVA22U64 = { .parent = NULL, @@ -2185,7 +2220,7 @@ static RISCVCPUProfile RVA22U64 = { CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), /* mandatory named features for this profile */ - CPU_CFG_OFFSET(zic64b), + CPU_CFG_OFFSET(ext_zic64b), RISCV_PROFILE_EXT_LIST_END } @@ -2200,8 +2235,6 @@ static RISCVCPUProfile RVA22U64 = { * Other named features that we already implement: Sstvecd, Sstvala, * Sscounterenw * - * Named features that we need to enable: svade - * * The remaining features/extensions comes from RVA22U64. */ static RISCVCPUProfile RVA22S64 = { @@ -2213,10 +2246,7 @@ static RISCVCPUProfile RVA22S64 = { .ext_offsets = { /* rva22s64 exts */ CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), - CPU_CFG_OFFSET(ext_svinval), - - /* rva22s64 named features */ - CPU_CFG_OFFSET(svade), + CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade), RISCV_PROFILE_EXT_LIST_END } diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 5d291a7092..3b1a02b944 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -271,7 +271,7 @@ struct CPUArchState { target_ulong hstatus; target_ulong hedeleg; uint64_t hideleg; - target_ulong hcounteren; + uint32_t hcounteren; target_ulong htval; target_ulong htinst; target_ulong hgatp; @@ -334,10 +334,10 @@ struct CPUArchState { */ bool two_stage_indirect_lookup; - target_ulong scounteren; - target_ulong mcounteren; + uint32_t scounteren; + uint32_t mcounteren; - target_ulong mcountinhibit; + uint32_t mcountinhibit; /* PMU counter state */ PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS]; diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h index 833bf58217..2040b90da0 100644 --- a/target/riscv/cpu_cfg.h +++ b/target/riscv/cpu_cfg.h @@ -71,6 +71,7 @@ struct RISCVCPUConfig { bool ext_zihintntl; bool ext_zihintpause; bool ext_zihpm; + bool ext_ztso; bool ext_smstateen; bool ext_sstc; bool ext_svadu; @@ -119,13 +120,21 @@ struct RISCVCPUConfig { bool ext_smepmp; bool rvv_ta_all_1s; bool rvv_ma_all_1s; - bool svade; - bool zic64b; uint32_t mvendorid; uint64_t marchid; uint64_t mimpid; + /* Named features */ + bool ext_svade; + bool ext_zic64b; + + /* + * Always 'true' boolean for named features + * TCG always implement/can't be disabled. + */ + bool ext_always_enabled; + /* Vendor-specific custom extensions */ bool ext_xtheadba; bool ext_xtheadbb; diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c index d462d95ee1..ce7322011d 100644 --- a/target/riscv/cpu_helper.c +++ b/target/riscv/cpu_helper.c @@ -907,7 +907,9 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, } bool pbmte = env->menvcfg & MENVCFG_PBMTE; - bool adue = env->menvcfg & MENVCFG_ADUE; + bool svade = riscv_cpu_cfg(env)->ext_svade; + bool svadu = riscv_cpu_cfg(env)->ext_svadu; + bool adue = svadu ? env->menvcfg & MENVCFG_ADUE : !svade; if (first_stage && two_stage && env->virt_enabled) { pbmte = pbmte && (env->henvcfg & HENVCFG_PBMTE); @@ -1082,9 +1084,18 @@ restart: return TRANSLATE_FAIL; } - /* If necessary, set accessed and dirty bits. */ - target_ulong updated_pte = pte | PTE_A | - (access_type == MMU_DATA_STORE ? PTE_D : 0); + target_ulong updated_pte = pte; + + /* + * If ADUE is enabled, set accessed and dirty bits. + * Otherwise raise an exception if necessary. + */ + if (adue) { + updated_pte |= PTE_A | (access_type == MMU_DATA_STORE ? PTE_D : 0); + } else if (!(pte & PTE_A) || + (access_type == MMU_DATA_STORE && !(pte & PTE_D))) { + return TRANSLATE_FAIL; + } /* Page table updates need to be atomic with MTTCG enabled */ if (updated_pte != pte && !is_debug) { @@ -1212,7 +1223,7 @@ hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) if (env->virt_enabled) { if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL, - 0, mmu_idx, false, true, true)) { + 0, MMUIdx_U, false, true, true)) { return -1; } } diff --git a/target/riscv/csr.c b/target/riscv/csr.c index d4e8ac13b9..726096444f 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -1295,8 +1295,34 @@ static RISCVException read_mstatus(CPURISCVState *env, int csrno, static bool validate_vm(CPURISCVState *env, target_ulong vm) { - return (vm & 0xf) <= - satp_mode_max_from_map(riscv_cpu_cfg(env)->satp_mode.map); + uint64_t mode_supported = riscv_cpu_cfg(env)->satp_mode.map; + return get_field(mode_supported, (1 << vm)); +} + +static target_ulong legalize_xatp(CPURISCVState *env, target_ulong old_xatp, + target_ulong val) +{ + target_ulong mask; + bool vm; + if (riscv_cpu_mxl(env) == MXL_RV32) { + vm = validate_vm(env, get_field(val, SATP32_MODE)); + mask = (val ^ old_xatp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN); + } else { + vm = validate_vm(env, get_field(val, SATP64_MODE)); + mask = (val ^ old_xatp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN); + } + + if (vm && mask) { + /* + * The ISA defines SATP.MODE=Bare as "no translation", but we still + * pass these through QEMU's TLB emulation as it improves + * performance. Flushing the TLB on SATP writes with paging + * enabled avoids leaking those invalid cached mappings. + */ + tlb_flush(env_cpu(env)); + return val; + } + return old_xatp; } static target_ulong legalize_mpp(CPURISCVState *env, target_ulong old_mpp, @@ -2133,7 +2159,7 @@ static RISCVException read_henvcfg(CPURISCVState *env, int csrno, /* * henvcfg.pbmte is read_only 0 when menvcfg.pbmte = 0 * henvcfg.stce is read_only 0 when menvcfg.stce = 0 - * henvcfg.hade is read_only 0 when menvcfg.hade = 0 + * henvcfg.adue is read_only 0 when menvcfg.adue = 0 */ *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE) | env->menvcfg); @@ -3021,31 +3047,11 @@ static RISCVException read_satp(CPURISCVState *env, int csrno, static RISCVException write_satp(CPURISCVState *env, int csrno, target_ulong val) { - target_ulong mask; - bool vm; - if (!riscv_cpu_cfg(env)->mmu) { return RISCV_EXCP_NONE; } - if (riscv_cpu_mxl(env) == MXL_RV32) { - vm = validate_vm(env, get_field(val, SATP32_MODE)); - mask = (val ^ env->satp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN); - } else { - vm = validate_vm(env, get_field(val, SATP64_MODE)); - mask = (val ^ env->satp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN); - } - - if (vm && mask) { - /* - * The ISA defines SATP.MODE=Bare as "no translation", but we still - * pass these through QEMU's TLB emulation as it improves - * performance. Flushing the TLB on SATP writes with paging - * enabled avoids leaking those invalid cached mappings. - */ - tlb_flush(env_cpu(env)); - env->satp = val; - } + env->satp = legalize_xatp(env, env->satp, val); return RISCV_EXCP_NONE; } @@ -3532,7 +3538,7 @@ static RISCVException read_hgatp(CPURISCVState *env, int csrno, static RISCVException write_hgatp(CPURISCVState *env, int csrno, target_ulong val) { - env->hgatp = val; + env->hgatp = legalize_xatp(env, env->hgatp, val); return RISCV_EXCP_NONE; } @@ -3809,7 +3815,7 @@ static RISCVException read_vsatp(CPURISCVState *env, int csrno, static RISCVException write_vsatp(CPURISCVState *env, int csrno, target_ulong val) { - env->vsatp = val; + env->vsatp = legalize_xatp(env, env->vsatp, val); return RISCV_EXCP_NONE; } diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc index 267930e5bc..4a9e4591d1 100644 --- a/target/riscv/insn_trans/trans_rva.c.inc +++ b/target/riscv/insn_trans/trans_rva.c.inc @@ -40,7 +40,11 @@ static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop) tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); } tcg_gen_qemu_ld_tl(load_val, src1, ctx->mem_idx, mop); - if (a->aq) { + /* + * TSO defines AMOs as acquire+release-RCsc, but does not define LR/SC as + * AMOs. Instead treat them like loads. + */ + if (a->aq || ctx->ztso) { tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); } @@ -76,9 +80,10 @@ static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop) gen_set_label(l1); /* * Address comparison failure. However, we still need to - * provide the memory barrier implied by AQ/RL. + * provide the memory barrier implied by AQ/RL/TSO. */ - tcg_gen_mb(TCG_MO_ALL + a->aq * TCG_BAR_LDAQ + a->rl * TCG_BAR_STRL); + TCGBar bar_strl = (ctx->ztso || a->rl) ? TCG_BAR_STRL : 0; + tcg_gen_mb(TCG_MO_ALL + a->aq * TCG_BAR_LDAQ + bar_strl); gen_set_gpr(ctx, a->rd, tcg_constant_tl(1)); gen_set_label(l2); diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc index faf6d65064..ad40d3e87f 100644 --- a/target/riscv/insn_trans/trans_rvi.c.inc +++ b/target/riscv/insn_trans/trans_rvi.c.inc @@ -266,12 +266,20 @@ static bool gen_load_i128(DisasContext *ctx, arg_lb *a, MemOp memop) static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop) { + bool out; + decode_save_opc(ctx); if (get_xl(ctx) == MXL_RV128) { - return gen_load_i128(ctx, a, memop); + out = gen_load_i128(ctx, a, memop); } else { - return gen_load_tl(ctx, a, memop); + out = gen_load_tl(ctx, a, memop); + } + + if (ctx->ztso) { + tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); } + + return out; } static bool trans_lb(DisasContext *ctx, arg_lb *a) @@ -328,6 +336,10 @@ static bool gen_store_tl(DisasContext *ctx, arg_sb *a, MemOp memop) TCGv addr = get_address(ctx, a->rs1, a->imm); TCGv data = get_gpr(ctx, a->rs2, EXT_NONE); + if (ctx->ztso) { + tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); + } + tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop); return true; } diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc index 9e101ab434..e42728990e 100644 --- a/target/riscv/insn_trans/trans_rvv.c.inc +++ b/target/riscv/insn_trans/trans_rvv.c.inc @@ -636,10 +636,28 @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data, tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0)); + /* + * According to the specification + * + * Additionally, if the Ztso extension is implemented, then vector memory + * instructions in the V extension and Zve family of extensions follow + * RVTSO at the instruction level. The Ztso extension does not + * strengthen the ordering of intra-instruction element accesses. + * + * as a result neither ordered nor unordered accesses from the V + * instructions need ordering within the loop but we do still need barriers + * around the loop. + */ + if (is_store && s->ztso) { + tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); + } + + mark_vs_dirty(s); + fn(dest, mask, base, tcg_env, desc); - if (!is_store) { - mark_vs_dirty(s); + if (!is_store && s->ztso) { + tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); } gen_set_label(over); @@ -778,7 +796,7 @@ typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv, static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2, uint32_t data, gen_helper_ldst_stride *fn, - DisasContext *s, bool is_store) + DisasContext *s) { TCGv_ptr dest, mask; TCGv base, stride; @@ -797,11 +815,9 @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2, tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0)); - fn(dest, mask, base, stride, tcg_env, desc); + mark_vs_dirty(s); - if (!is_store) { - mark_vs_dirty(s); - } + fn(dest, mask, base, stride, tcg_env, desc); gen_set_label(over); return true; @@ -827,7 +843,7 @@ static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew) data = FIELD_DP32(data, VDATA, NF, a->nf); data = FIELD_DP32(data, VDATA, VTA, s->vta); data = FIELD_DP32(data, VDATA, VMA, s->vma); - return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, false); + return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s); } static bool ld_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) @@ -861,7 +877,7 @@ static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew) return false; } - return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, true); + return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s); } static bool st_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) @@ -884,7 +900,7 @@ typedef void gen_helper_ldst_index(TCGv_ptr, TCGv_ptr, TCGv, static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t data, gen_helper_ldst_index *fn, - DisasContext *s, bool is_store) + DisasContext *s) { TCGv_ptr dest, mask, index; TCGv base; @@ -904,11 +920,9 @@ static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, tcg_gen_addi_ptr(index, tcg_env, vreg_ofs(s, vs2)); tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0)); - fn(dest, mask, base, index, tcg_env, desc); + mark_vs_dirty(s); - if (!is_store) { - mark_vs_dirty(s); - } + fn(dest, mask, base, index, tcg_env, desc); gen_set_label(over); return true; @@ -953,7 +967,7 @@ static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew) data = FIELD_DP32(data, VDATA, NF, a->nf); data = FIELD_DP32(data, VDATA, VTA, s->vta); data = FIELD_DP32(data, VDATA, VMA, s->vma); - return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, false); + return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s); } static bool ld_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) @@ -1005,7 +1019,7 @@ static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew) data = FIELD_DP32(data, VDATA, VM, a->vm); data = FIELD_DP32(data, VDATA, LMUL, emul); data = FIELD_DP32(data, VDATA, NF, a->nf); - return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, true); + return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s); } static bool st_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) @@ -1084,7 +1098,7 @@ typedef void gen_helper_ldst_whole(TCGv_ptr, TCGv, TCGv_env, TCGv_i32); static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf, uint32_t width, gen_helper_ldst_whole *fn, - DisasContext *s, bool is_store) + DisasContext *s) { uint32_t evl = s->cfg_ptr->vlenb * nf / width; TCGLabel *over = gen_new_label(); @@ -1102,11 +1116,10 @@ static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf, base = get_gpr(s, rs1, EXT_NONE); tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); + mark_vs_dirty(s); + fn(dest, base, tcg_env, desc); - if (!is_store) { - mark_vs_dirty(s); - } gen_set_label(over); return true; @@ -1116,42 +1129,42 @@ static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf, * load and store whole register instructions ignore vtype and vl setting. * Thus, we don't need to check vill bit. (Section 7.9) */ -#define GEN_LDST_WHOLE_TRANS(NAME, ARG_NF, WIDTH, IS_STORE) \ +#define GEN_LDST_WHOLE_TRANS(NAME, ARG_NF, WIDTH) \ static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \ { \ if (require_rvv(s) && \ QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \ return ldst_whole_trans(a->rd, a->rs1, ARG_NF, WIDTH, \ - gen_helper_##NAME, s, IS_STORE); \ + gen_helper_##NAME, s); \ } \ return false; \ } -GEN_LDST_WHOLE_TRANS(vl1re8_v, 1, 1, false) -GEN_LDST_WHOLE_TRANS(vl1re16_v, 1, 2, false) -GEN_LDST_WHOLE_TRANS(vl1re32_v, 1, 4, false) -GEN_LDST_WHOLE_TRANS(vl1re64_v, 1, 8, false) -GEN_LDST_WHOLE_TRANS(vl2re8_v, 2, 1, false) -GEN_LDST_WHOLE_TRANS(vl2re16_v, 2, 2, false) -GEN_LDST_WHOLE_TRANS(vl2re32_v, 2, 4, false) -GEN_LDST_WHOLE_TRANS(vl2re64_v, 2, 8, false) -GEN_LDST_WHOLE_TRANS(vl4re8_v, 4, 1, false) -GEN_LDST_WHOLE_TRANS(vl4re16_v, 4, 2, false) -GEN_LDST_WHOLE_TRANS(vl4re32_v, 4, 4, false) -GEN_LDST_WHOLE_TRANS(vl4re64_v, 4, 8, false) -GEN_LDST_WHOLE_TRANS(vl8re8_v, 8, 1, false) -GEN_LDST_WHOLE_TRANS(vl8re16_v, 8, 2, false) -GEN_LDST_WHOLE_TRANS(vl8re32_v, 8, 4, false) -GEN_LDST_WHOLE_TRANS(vl8re64_v, 8, 8, false) +GEN_LDST_WHOLE_TRANS(vl1re8_v, 1, 1) +GEN_LDST_WHOLE_TRANS(vl1re16_v, 1, 2) +GEN_LDST_WHOLE_TRANS(vl1re32_v, 1, 4) +GEN_LDST_WHOLE_TRANS(vl1re64_v, 1, 8) +GEN_LDST_WHOLE_TRANS(vl2re8_v, 2, 1) +GEN_LDST_WHOLE_TRANS(vl2re16_v, 2, 2) +GEN_LDST_WHOLE_TRANS(vl2re32_v, 2, 4) +GEN_LDST_WHOLE_TRANS(vl2re64_v, 2, 8) +GEN_LDST_WHOLE_TRANS(vl4re8_v, 4, 1) +GEN_LDST_WHOLE_TRANS(vl4re16_v, 4, 2) +GEN_LDST_WHOLE_TRANS(vl4re32_v, 4, 4) +GEN_LDST_WHOLE_TRANS(vl4re64_v, 4, 8) +GEN_LDST_WHOLE_TRANS(vl8re8_v, 8, 1) +GEN_LDST_WHOLE_TRANS(vl8re16_v, 8, 2) +GEN_LDST_WHOLE_TRANS(vl8re32_v, 8, 4) +GEN_LDST_WHOLE_TRANS(vl8re64_v, 8, 8) /* * The vector whole register store instructions are encoded similar to * unmasked unit-stride store of elements with EEW=8. */ -GEN_LDST_WHOLE_TRANS(vs1r_v, 1, 1, true) -GEN_LDST_WHOLE_TRANS(vs2r_v, 2, 1, true) -GEN_LDST_WHOLE_TRANS(vs4r_v, 4, 1, true) -GEN_LDST_WHOLE_TRANS(vs8r_v, 8, 1, true) +GEN_LDST_WHOLE_TRANS(vs1r_v, 1, 1) +GEN_LDST_WHOLE_TRANS(vs2r_v, 2, 1) +GEN_LDST_WHOLE_TRANS(vs4r_v, 4, 1) +GEN_LDST_WHOLE_TRANS(vs8r_v, 8, 1) /* *** Vector Integer Arithmetic Instructions diff --git a/target/riscv/insn_trans/trans_rvzce.c.inc b/target/riscv/insn_trans/trans_rvzce.c.inc index 2d992e14c4..cd234ad960 100644 --- a/target/riscv/insn_trans/trans_rvzce.c.inc +++ b/target/riscv/insn_trans/trans_rvzce.c.inc @@ -293,12 +293,14 @@ static bool trans_cm_jalt(DisasContext *ctx, arg_cm_jalt *a) { REQUIRE_ZCMT(ctx); + TCGv addr = tcg_temp_new(); + /* * Update pc to current for the non-unwinding exception * that might come from cpu_ld*_code() in the helper. */ gen_update_pc(ctx, 0); - gen_helper_cm_jalt(cpu_pc, tcg_env, tcg_constant_i32(a->index)); + gen_helper_cm_jalt(addr, tcg_env, tcg_constant_i32(a->index)); /* c.jt vs c.jalt depends on the index. */ if (a->index >= 32) { @@ -307,6 +309,8 @@ static bool trans_cm_jalt(DisasContext *ctx, arg_cm_jalt *a) gen_set_gpr(ctx, xRA, succ_pc); } + tcg_gen_mov_tl(cpu_pc, addr); + tcg_gen_lookup_and_goto_ptr(); ctx->base.is_jmp = DISAS_NORETURN; return true; diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c index 422e4f121c..c7afdb1e81 100644 --- a/target/riscv/kvm/kvm-cpu.c +++ b/target/riscv/kvm/kvm-cpu.c @@ -275,13 +275,42 @@ static KVMCPUConfig kvm_multi_ext_cfgs[] = { KVM_EXT_CFG("zicbom", ext_zicbom, KVM_RISCV_ISA_EXT_ZICBOM), KVM_EXT_CFG("zicboz", ext_zicboz, KVM_RISCV_ISA_EXT_ZICBOZ), KVM_EXT_CFG("zicntr", ext_zicntr, KVM_RISCV_ISA_EXT_ZICNTR), + KVM_EXT_CFG("zicond", ext_zicond, KVM_RISCV_ISA_EXT_ZICOND), KVM_EXT_CFG("zicsr", ext_zicsr, KVM_RISCV_ISA_EXT_ZICSR), KVM_EXT_CFG("zifencei", ext_zifencei, KVM_RISCV_ISA_EXT_ZIFENCEI), + KVM_EXT_CFG("zihintntl", ext_zihintntl, KVM_RISCV_ISA_EXT_ZIHINTNTL), KVM_EXT_CFG("zihintpause", ext_zihintpause, KVM_RISCV_ISA_EXT_ZIHINTPAUSE), KVM_EXT_CFG("zihpm", ext_zihpm, KVM_RISCV_ISA_EXT_ZIHPM), + KVM_EXT_CFG("zfa", ext_zfa, KVM_RISCV_ISA_EXT_ZFA), + KVM_EXT_CFG("zfh", ext_zfh, KVM_RISCV_ISA_EXT_ZFH), + KVM_EXT_CFG("zfhmin", ext_zfhmin, KVM_RISCV_ISA_EXT_ZFHMIN), KVM_EXT_CFG("zba", ext_zba, KVM_RISCV_ISA_EXT_ZBA), KVM_EXT_CFG("zbb", ext_zbb, KVM_RISCV_ISA_EXT_ZBB), + KVM_EXT_CFG("zbc", ext_zbc, KVM_RISCV_ISA_EXT_ZBC), + KVM_EXT_CFG("zbkb", ext_zbkb, KVM_RISCV_ISA_EXT_ZBKB), + KVM_EXT_CFG("zbkc", ext_zbkc, KVM_RISCV_ISA_EXT_ZBKC), + KVM_EXT_CFG("zbkx", ext_zbkx, KVM_RISCV_ISA_EXT_ZBKX), KVM_EXT_CFG("zbs", ext_zbs, KVM_RISCV_ISA_EXT_ZBS), + KVM_EXT_CFG("zknd", ext_zknd, KVM_RISCV_ISA_EXT_ZKND), + KVM_EXT_CFG("zkne", ext_zkne, KVM_RISCV_ISA_EXT_ZKNE), + KVM_EXT_CFG("zknh", ext_zknh, KVM_RISCV_ISA_EXT_ZKNH), + KVM_EXT_CFG("zkr", ext_zkr, KVM_RISCV_ISA_EXT_ZKR), + KVM_EXT_CFG("zksed", ext_zksed, KVM_RISCV_ISA_EXT_ZKSED), + KVM_EXT_CFG("zksh", ext_zksh, KVM_RISCV_ISA_EXT_ZKSH), + KVM_EXT_CFG("zkt", ext_zkt, KVM_RISCV_ISA_EXT_ZKT), + KVM_EXT_CFG("zvbb", ext_zvbb, KVM_RISCV_ISA_EXT_ZVBB), + KVM_EXT_CFG("zvbc", ext_zvbc, KVM_RISCV_ISA_EXT_ZVBC), + KVM_EXT_CFG("zvfh", ext_zvfh, KVM_RISCV_ISA_EXT_ZVFH), + KVM_EXT_CFG("zvfhmin", ext_zvfhmin, KVM_RISCV_ISA_EXT_ZVFHMIN), + KVM_EXT_CFG("zvkb", ext_zvkb, KVM_RISCV_ISA_EXT_ZVKB), + KVM_EXT_CFG("zvkg", ext_zvkg, KVM_RISCV_ISA_EXT_ZVKG), + KVM_EXT_CFG("zvkned", ext_zvkned, KVM_RISCV_ISA_EXT_ZVKNED), + KVM_EXT_CFG("zvknha", ext_zvknha, KVM_RISCV_ISA_EXT_ZVKNHA), + KVM_EXT_CFG("zvknhb", ext_zvknhb, KVM_RISCV_ISA_EXT_ZVKNHB), + KVM_EXT_CFG("zvksed", ext_zvksed, KVM_RISCV_ISA_EXT_ZVKSED), + KVM_EXT_CFG("zvksh", ext_zvksh, KVM_RISCV_ISA_EXT_ZVKSH), + KVM_EXT_CFG("zvkt", ext_zvkt, KVM_RISCV_ISA_EXT_ZVKT), + KVM_EXT_CFG("smstateen", ext_smstateen, KVM_RISCV_ISA_EXT_SMSTATEEN), KVM_EXT_CFG("ssaia", ext_ssaia, KVM_RISCV_ISA_EXT_SSAIA), KVM_EXT_CFG("sstc", ext_sstc, KVM_RISCV_ISA_EXT_SSTC), KVM_EXT_CFG("svinval", ext_svinval, KVM_RISCV_ISA_EXT_SVINVAL), diff --git a/target/riscv/machine.c b/target/riscv/machine.c index 81cf22894e..76f2150f78 100644 --- a/target/riscv/machine.c +++ b/target/riscv/machine.c @@ -79,14 +79,14 @@ static bool hyper_needed(void *opaque) static const VMStateDescription vmstate_hyper = { .name = "cpu/hyper", - .version_id = 3, - .minimum_version_id = 3, + .version_id = 4, + .minimum_version_id = 4, .needed = hyper_needed, .fields = (const VMStateField[]) { VMSTATE_UINTTL(env.hstatus, RISCVCPU), VMSTATE_UINTTL(env.hedeleg, RISCVCPU), VMSTATE_UINT64(env.hideleg, RISCVCPU), - VMSTATE_UINTTL(env.hcounteren, RISCVCPU), + VMSTATE_UINT32(env.hcounteren, RISCVCPU), VMSTATE_UINTTL(env.htval, RISCVCPU), VMSTATE_UINTTL(env.htinst, RISCVCPU), VMSTATE_UINTTL(env.hgatp, RISCVCPU), @@ -353,8 +353,8 @@ static const VMStateDescription vmstate_jvt = { const VMStateDescription vmstate_riscv_cpu = { .name = "cpu", - .version_id = 9, - .minimum_version_id = 9, + .version_id = 10, + .minimum_version_id = 10, .post_load = riscv_cpu_post_load, .fields = (const VMStateField[]) { VMSTATE_UINTTL_ARRAY(env.gpr, RISCVCPU, 32), @@ -397,9 +397,9 @@ const VMStateDescription vmstate_riscv_cpu = { VMSTATE_UINTTL(env.mtval, RISCVCPU), VMSTATE_UINTTL(env.miselect, RISCVCPU), VMSTATE_UINTTL(env.siselect, RISCVCPU), - VMSTATE_UINTTL(env.scounteren, RISCVCPU), - VMSTATE_UINTTL(env.mcounteren, RISCVCPU), - VMSTATE_UINTTL(env.mcountinhibit, RISCVCPU), + VMSTATE_UINT32(env.scounteren, RISCVCPU), + VMSTATE_UINT32(env.mcounteren, RISCVCPU), + VMSTATE_UINT32(env.mcountinhibit, RISCVCPU), VMSTATE_STRUCT_ARRAY(env.pmu_ctrs, RISCVCPU, RV_MAX_MHPMCOUNTERS, 0, vmstate_pmu_ctr_state, PMUCTRState), VMSTATE_UINTTL_ARRAY(env.mhpmevent_val, RISCVCPU, RV_MAX_MHPMEVENTS), diff --git a/target/riscv/pmu.h b/target/riscv/pmu.h index 505fc850d3..7c0ad661e0 100644 --- a/target/riscv/pmu.h +++ b/target/riscv/pmu.h @@ -16,6 +16,9 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ +#ifndef RISCV_PMU_H +#define RISCV_PMU_H + #include "cpu.h" #include "qapi/error.h" @@ -31,3 +34,5 @@ int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx); void riscv_pmu_generate_fdt_node(void *fdt, uint32_t cmask, char *pmu_name); int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx); + +#endif /* RISCV_PMU_H */ diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c index dd5228c288..ab6db817db 100644 --- a/target/riscv/tcg/tcg-cpu.c +++ b/target/riscv/tcg/tcg-cpu.c @@ -196,17 +196,14 @@ static bool cpu_cfg_offset_is_named_feat(uint32_t ext_offset) static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset) { - switch (feat_offset) { - case CPU_CFG_OFFSET(zic64b): + /* + * All other named features are already enabled + * in riscv_tcg_cpu_instance_init(). + */ + if (feat_offset == CPU_CFG_OFFSET(ext_zic64b)) { cpu->cfg.cbom_blocksize = 64; cpu->cfg.cbop_blocksize = 64; cpu->cfg.cboz_blocksize = 64; - break; - case CPU_CFG_OFFSET(svade): - cpu->cfg.ext_svadu = false; - break; - default: - g_assert_not_reached(); } } @@ -219,10 +216,6 @@ static void cpu_bump_multi_ext_priv_ver(CPURISCVState *env, return; } - if (cpu_cfg_offset_is_named_feat(ext_offset)) { - return; - } - ext_priv_ver = cpu_cfg_ext_get_min_version(ext_offset); if (env->priv_ver < ext_priv_ver) { @@ -322,11 +315,9 @@ static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) static void riscv_cpu_update_named_features(RISCVCPU *cpu) { - cpu->cfg.zic64b = cpu->cfg.cbom_blocksize == 64 && - cpu->cfg.cbop_blocksize == 64 && - cpu->cfg.cboz_blocksize == 64; - - cpu->cfg.svade = !cpu->cfg.ext_svadu; + cpu->cfg.ext_zic64b = cpu->cfg.cbom_blocksize == 64 && + cpu->cfg.cbop_blocksize == 64 && + cpu->cfg.cboz_blocksize == 64; } static void riscv_cpu_validate_g(RISCVCPU *cpu) @@ -1075,6 +1066,7 @@ static void cpu_set_profile(Object *obj, Visitor *v, const char *name, #ifndef CONFIG_USER_ONLY if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { + object_property_set_bool(obj, "mmu", true, NULL); const char *satp_prop = satp_mode_str(profile->satp_mode, riscv_cpu_is_32bit(cpu)); object_property_set_bool(obj, satp_prop, profile->enabled, NULL); @@ -1290,6 +1282,12 @@ static void riscv_init_max_cpu_extensions(Object *obj) isa_ext_update_enabled(cpu, prop->offset, true); } + /* + * Some extensions can't be added without backward compatibilty concerns. + * Disable those, the user can still opt in to them on the command line. + */ + cpu->cfg.ext_svade = false; + /* set vector version */ env->vext_ver = VEXT_VERSION_1_00_0; @@ -1318,6 +1316,8 @@ static void riscv_tcg_cpu_instance_init(CPUState *cs) RISCVCPU *cpu = RISCV_CPU(cs); Object *obj = OBJECT(cpu); + cpu->cfg.ext_always_enabled = true; + misa_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); riscv_cpu_add_user_properties(obj); diff --git a/target/riscv/translate.c b/target/riscv/translate.c index 177418b2b9..ea5d52b2ef 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -109,6 +109,8 @@ typedef struct DisasContext { /* PointerMasking extension */ bool pm_mask_enabled; bool pm_base_enabled; + /* Ztso */ + bool ztso; /* Use icount trigger for native debug */ bool itrigger; /* FRM is known to contain a valid value. */ @@ -1196,6 +1198,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) ctx->cs = cs; ctx->pm_mask_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_MASK_ENABLED); ctx->pm_base_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_BASE_ENABLED); + ctx->ztso = cpu->cfg.ext_ztso; ctx->itrigger = FIELD_EX32(tb_flags, TB_FLAGS, ITRIGGER); ctx->zero = tcg_constant_tl(0); ctx->virt_inst_excp = false; diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c index 84cec73eb2..fe56c007d5 100644 --- a/target/riscv/vector_helper.c +++ b/target/riscv/vector_helper.c @@ -44,6 +44,7 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1, target_ulong reserved = s2 & MAKE_64BIT_MASK(R_VTYPE_RESERVED_SHIFT, xlen - 1 - R_VTYPE_RESERVED_SHIFT); + uint16_t vlen = cpu->cfg.vlenb << 3; int8_t lmul; if (vlmul & 4) { @@ -53,10 +54,8 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1, * VLEN * LMUL >= SEW * VLEN >> (8 - lmul) >= sew * (vlenb << 3) >> (8 - lmul) >= sew - * vlenb >> (8 - 3 - lmul) >= sew */ - if (vlmul == 4 || - cpu->cfg.vlenb >> (8 - 3 - vlmul) < sew) { + if (vlmul == 4 || (vlen >> (8 - vlmul)) < sew) { vill = true; } } |