diff options
Diffstat (limited to 'target/riscv/insn_trans/trans_rvv.c.inc')
| -rw-r--r-- | target/riscv/insn_trans/trans_rvv.c.inc | 225 |
1 files changed, 187 insertions, 38 deletions
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc index 6c285c958b..f85a9e83b4 100644 --- a/target/riscv/insn_trans/trans_rvv.c.inc +++ b/target/riscv/insn_trans/trans_rvv.c.inc @@ -66,6 +66,50 @@ static bool require_scale_rvf(DisasContext *s) } } +static bool require_zve32f(DisasContext *s) +{ + /* RVV + Zve32f = RVV. */ + if (has_ext(s, RVV)) { + return true; + } + + /* Zve32f doesn't support FP64. (Section 18.2) */ + return s->ext_zve32f ? s->sew <= MO_32 : true; +} + +static bool require_scale_zve32f(DisasContext *s) +{ + /* RVV + Zve32f = RVV. */ + if (has_ext(s, RVV)) { + return true; + } + + /* Zve32f doesn't support FP64. (Section 18.2) */ + return s->ext_zve64f ? s->sew <= MO_16 : true; +} + +static bool require_zve64f(DisasContext *s) +{ + /* RVV + Zve64f = RVV. */ + if (has_ext(s, RVV)) { + return true; + } + + /* Zve64f doesn't support FP64. (Section 18.2) */ + return s->ext_zve64f ? s->sew <= MO_32 : true; +} + +static bool require_scale_zve64f(DisasContext *s) +{ + /* RVV + Zve64f = RVV. */ + if (has_ext(s, RVV)) { + return true; + } + + /* Zve64f doesn't support FP64. (Section 18.2) */ + return s->ext_zve64f ? s->sew <= MO_16 : true; +} + /* Destination vector register group cannot overlap source mask register. */ static bool require_vm(int vm, int vd) { @@ -129,7 +173,8 @@ static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2) { TCGv s1, dst; - if (!require_rvv(s) || !has_ext(s, RVV)) { + if (!require_rvv(s) || + !(has_ext(s, RVV) || s->ext_zve32f || s->ext_zve64f)) { return false; } @@ -149,7 +194,7 @@ static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2) gen_set_gpr(s, rd, dst); mark_vs_dirty(s); - tcg_gen_movi_tl(cpu_pc, s->pc_succ_insn); + gen_set_pc_imm(s, s->pc_succ_insn); tcg_gen_lookup_and_goto_ptr(); s->base.is_jmp = DISAS_NORETURN; @@ -164,7 +209,8 @@ static bool do_vsetivli(DisasContext *s, int rd, TCGv s1, TCGv s2) { TCGv dst; - if (!require_rvv(s) || !has_ext(s, RVV)) { + if (!require_rvv(s) || + !(has_ext(s, RVV) || s->ext_zve32f || s->ext_zve64f)) { return false; } @@ -173,7 +219,7 @@ static bool do_vsetivli(DisasContext *s, int rd, TCGv s1, TCGv s2) gen_helper_vsetvl(dst, cpu_env, s1, s2); gen_set_gpr(s, rd, dst); mark_vs_dirty(s); - tcg_gen_movi_tl(cpu_pc, s->pc_succ_insn); + gen_set_pc_imm(s, s->pc_succ_insn); tcg_gen_lookup_and_goto_ptr(); s->base.is_jmp = DISAS_NORETURN; @@ -261,10 +307,21 @@ static bool vext_check_st_index(DisasContext *s, int vd, int vs2, int nf, uint8_t eew) { int8_t emul = eew - s->sew + s->lmul; - return (emul >= -3 && emul <= 3) && - require_align(vs2, emul) && - require_align(vd, s->lmul) && - require_nf(vd, nf, s->lmul); + bool ret = (emul >= -3 && emul <= 3) && + require_align(vs2, emul) && + require_align(vd, s->lmul) && + require_nf(vd, nf, s->lmul); + + /* + * All Zve* extensions support all vector load and store instructions, + * except Zve64* extensions do not support EEW=64 for index values + * when XLEN=32. (Section 18.2) + */ + if (get_xl(s) == MXL_RV32) { + ret &= (!has_ext(s, RVV) && s->ext_zve64f ? eew != MO_64 : true); + } + + return ret; } /* @@ -1201,7 +1258,7 @@ static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm, dest = tcg_temp_new_ptr(); mask = tcg_temp_new_ptr(); src2 = tcg_temp_new_ptr(); - src1 = get_gpr(s, rs1, EXT_NONE); + src1 = get_gpr(s, rs1, EXT_SIGN); data = FIELD_DP32(data, VDATA, VM, vm); data = FIELD_DP32(data, VDATA, LMUL, s->lmul); @@ -1895,14 +1952,41 @@ GEN_OPIVX_TRANS(vmaxu_vx, opivx_check) GEN_OPIVX_TRANS(vmax_vx, opivx_check) /* Vector Single-Width Integer Multiply Instructions */ + +static bool vmulh_vv_check(DisasContext *s, arg_rmrr *a) +{ + /* + * All Zve* extensions support all vector integer instructions, + * except that the vmulh integer multiply variants + * that return the high word of the product + * (vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx) + * are not included for EEW=64 in Zve64*. (Section 18.2) + */ + return opivv_check(s, a) && + (!has_ext(s, RVV) && s->ext_zve64f ? s->sew != MO_64 : true); +} + +static bool vmulh_vx_check(DisasContext *s, arg_rmrr *a) +{ + /* + * All Zve* extensions support all vector integer instructions, + * except that the vmulh integer multiply variants + * that return the high word of the product + * (vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx) + * are not included for EEW=64 in Zve64*. (Section 18.2) + */ + return opivx_check(s, a) && + (!has_ext(s, RVV) && s->ext_zve64f ? s->sew != MO_64 : true); +} + GEN_OPIVV_GVEC_TRANS(vmul_vv, mul) -GEN_OPIVV_TRANS(vmulh_vv, opivv_check) -GEN_OPIVV_TRANS(vmulhu_vv, opivv_check) -GEN_OPIVV_TRANS(vmulhsu_vv, opivv_check) +GEN_OPIVV_TRANS(vmulh_vv, vmulh_vv_check) +GEN_OPIVV_TRANS(vmulhu_vv, vmulh_vv_check) +GEN_OPIVV_TRANS(vmulhsu_vv, vmulh_vv_check) GEN_OPIVX_GVEC_TRANS(vmul_vx, muls) -GEN_OPIVX_TRANS(vmulh_vx, opivx_check) -GEN_OPIVX_TRANS(vmulhu_vx, opivx_check) -GEN_OPIVX_TRANS(vmulhsu_vx, opivx_check) +GEN_OPIVX_TRANS(vmulh_vx, vmulh_vx_check) +GEN_OPIVX_TRANS(vmulhu_vx, vmulh_vx_check) +GEN_OPIVX_TRANS(vmulhsu_vx, vmulh_vx_check) /* Vector Integer Divide Instructions */ GEN_OPIVV_TRANS(vdivu_vv, opivv_check) @@ -2083,8 +2167,31 @@ GEN_OPIVX_TRANS(vasub_vx, opivx_check) GEN_OPIVX_TRANS(vasubu_vx, opivx_check) /* Vector Single-Width Fractional Multiply with Rounding and Saturation */ -GEN_OPIVV_TRANS(vsmul_vv, opivv_check) -GEN_OPIVX_TRANS(vsmul_vx, opivx_check) + +static bool vsmul_vv_check(DisasContext *s, arg_rmrr *a) +{ + /* + * All Zve* extensions support all vector fixed-point arithmetic + * instructions, except that vsmul.vv and vsmul.vx are not supported + * for EEW=64 in Zve64*. (Section 18.2) + */ + return opivv_check(s, a) && + (!has_ext(s, RVV) && s->ext_zve64f ? s->sew != MO_64 : true); +} + +static bool vsmul_vx_check(DisasContext *s, arg_rmrr *a) +{ + /* + * All Zve* extensions support all vector fixed-point arithmetic + * instructions, except that vsmul.vv and vsmul.vx are not supported + * for EEW=64 in Zve64*. (Section 18.2) + */ + return opivx_check(s, a) && + (!has_ext(s, RVV) && s->ext_zve64f ? s->sew != MO_64 : true); +} + +GEN_OPIVV_TRANS(vsmul_vv, vsmul_vv_check) +GEN_OPIVX_TRANS(vsmul_vx, vsmul_vx_check) /* Vector Single-Width Scaling Shift Instructions */ GEN_OPIVV_TRANS(vssrl_vv, opivv_check) @@ -2143,7 +2250,9 @@ static bool opfvv_check(DisasContext *s, arg_rmrr *a) return require_rvv(s) && require_rvf(s) && vext_check_isa_ill(s) && - vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm); + vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm) && + require_zve32f(s) && + require_zve64f(s); } /* OPFVV without GVEC IR */ @@ -2223,7 +2332,9 @@ static bool opfvf_check(DisasContext *s, arg_rmrr *a) return require_rvv(s) && require_rvf(s) && vext_check_isa_ill(s) && - vext_check_ss(s, a->rd, a->rs2, a->vm); + vext_check_ss(s, a->rd, a->rs2, a->vm) && + require_zve32f(s) && + require_zve64f(s); } /* OPFVF without GVEC IR */ @@ -2257,7 +2368,9 @@ static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a) require_scale_rvf(s) && (s->sew != MO_8) && vext_check_isa_ill(s) && - vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm); + vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm) && + require_scale_zve32f(s) && + require_scale_zve64f(s); } /* OPFVV with WIDEN */ @@ -2296,7 +2409,9 @@ static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a) require_scale_rvf(s) && (s->sew != MO_8) && vext_check_isa_ill(s) && - vext_check_ds(s, a->rd, a->rs2, a->vm); + vext_check_ds(s, a->rd, a->rs2, a->vm) && + require_scale_zve32f(s) && + require_scale_zve64f(s); } /* OPFVF with WIDEN */ @@ -2326,7 +2441,9 @@ static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a) require_scale_rvf(s) && (s->sew != MO_8) && vext_check_isa_ill(s) && - vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm); + vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm) && + require_scale_zve32f(s) && + require_scale_zve64f(s); } /* WIDEN OPFVV with WIDEN */ @@ -2365,7 +2482,9 @@ static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a) require_scale_rvf(s) && (s->sew != MO_8) && vext_check_isa_ill(s) && - vext_check_dd(s, a->rd, a->rs2, a->vm); + vext_check_dd(s, a->rd, a->rs2, a->vm) && + require_scale_zve32f(s) && + require_scale_zve64f(s); } /* WIDEN OPFVF with WIDEN */ @@ -2440,7 +2559,9 @@ static bool opfv_check(DisasContext *s, arg_rmr *a) require_rvf(s) && vext_check_isa_ill(s) && /* OPFV instructions ignore vs1 check */ - vext_check_ss(s, a->rd, a->rs2, a->vm); + vext_check_ss(s, a->rd, a->rs2, a->vm) && + require_zve32f(s) && + require_zve64f(s); } static bool do_opfv(DisasContext *s, arg_rmr *a, @@ -2505,7 +2626,9 @@ static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a) return require_rvv(s) && require_rvf(s) && vext_check_isa_ill(s) && - vext_check_mss(s, a->rd, a->rs1, a->rs2); + vext_check_mss(s, a->rd, a->rs1, a->rs2) && + require_zve32f(s) && + require_zve64f(s); } GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check) @@ -2518,7 +2641,9 @@ static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a) return require_rvv(s) && require_rvf(s) && vext_check_isa_ill(s) && - vext_check_ms(s, a->rd, a->rs2); + vext_check_ms(s, a->rd, a->rs2) && + require_zve32f(s) && + require_zve64f(s); } GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check) @@ -2539,7 +2664,9 @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a) if (require_rvv(s) && require_rvf(s) && vext_check_isa_ill(s) && - require_align(a->rd, s->lmul)) { + require_align(a->rd, s->lmul) && + require_zve32f(s) && + require_zve64f(s)) { gen_set_rm(s, RISCV_FRM_DYN); TCGv_i64 t1; @@ -2620,14 +2747,18 @@ static bool opfv_widen_check(DisasContext *s, arg_rmr *a) static bool opxfv_widen_check(DisasContext *s, arg_rmr *a) { return opfv_widen_check(s, a) && - require_rvf(s); + require_rvf(s) && + require_zve32f(s) && + require_zve64f(s); } static bool opffv_widen_check(DisasContext *s, arg_rmr *a) { return opfv_widen_check(s, a) && require_scale_rvf(s) && - (s->sew != MO_8); + (s->sew != MO_8) && + require_scale_zve32f(s) && + require_scale_zve64f(s); } #define GEN_OPFV_WIDEN_TRANS(NAME, CHECK, HELPER, FRM) \ @@ -2678,7 +2809,9 @@ static bool opfxv_widen_check(DisasContext *s, arg_rmr *a) require_scale_rvf(s) && vext_check_isa_ill(s) && /* OPFV widening instructions ignore vs1 check */ - vext_check_ds(s, a->rd, a->rs2, a->vm); + vext_check_ds(s, a->rd, a->rs2, a->vm) && + require_scale_zve32f(s) && + require_scale_zve64f(s); } #define GEN_OPFXV_WIDEN_TRANS(NAME) \ @@ -2728,14 +2861,18 @@ static bool opfxv_narrow_check(DisasContext *s, arg_rmr *a) { return opfv_narrow_check(s, a) && require_rvf(s) && - (s->sew != MO_64); + (s->sew != MO_64) && + require_zve32f(s) && + require_zve64f(s); } static bool opffv_narrow_check(DisasContext *s, arg_rmr *a) { return opfv_narrow_check(s, a) && require_scale_rvf(s) && - (s->sew != MO_8); + (s->sew != MO_8) && + require_scale_zve32f(s) && + require_scale_zve64f(s); } #define GEN_OPFV_NARROW_TRANS(NAME, CHECK, HELPER, FRM) \ @@ -2784,7 +2921,9 @@ static bool opxfv_narrow_check(DisasContext *s, arg_rmr *a) require_scale_rvf(s) && vext_check_isa_ill(s) && /* OPFV narrowing instructions ignore vs1 check */ - vext_check_sd(s, a->rd, a->rs2, a->vm); + vext_check_sd(s, a->rd, a->rs2, a->vm) && + require_scale_zve32f(s) && + require_scale_zve64f(s); } #define GEN_OPXFV_NARROW_TRANS(NAME, HELPER, FRM) \ @@ -2857,7 +2996,9 @@ GEN_OPIVV_WIDEN_TRANS(vwredsumu_vs, reduction_widen_check) static bool freduction_check(DisasContext *s, arg_rmrr *a) { return reduction_check(s, a) && - require_rvf(s); + require_rvf(s) && + require_zve32f(s) && + require_zve64f(s); } GEN_OPFVV_TRANS(vfredsum_vs, freduction_check) @@ -3265,7 +3406,9 @@ static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a) { if (require_rvv(s) && require_rvf(s) && - vext_check_isa_ill(s)) { + vext_check_isa_ill(s) && + require_zve32f(s) && + require_zve64f(s)) { gen_set_rm(s, RISCV_FRM_DYN); unsigned int ofs = (8 << s->sew); @@ -3291,7 +3434,9 @@ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a) { if (require_rvv(s) && require_rvf(s) && - vext_check_isa_ill(s)) { + vext_check_isa_ill(s) && + require_zve32f(s) && + require_zve64f(s)) { gen_set_rm(s, RISCV_FRM_DYN); /* The instructions ignore LMUL and vector register group. */ @@ -3342,13 +3487,17 @@ GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check) static bool fslideup_check(DisasContext *s, arg_rmrr *a) { return slideup_check(s, a) && - require_rvf(s); + require_rvf(s) && + require_zve32f(s) && + require_zve64f(s); } static bool fslidedown_check(DisasContext *s, arg_rmrr *a) { return slidedown_check(s, a) && - require_rvf(s); + require_rvf(s) && + require_zve32f(s) && + require_zve64f(s); } GEN_OPFVF_TRANS(vfslide1up_vf, fslideup_check) |