diff options
Diffstat (limited to 'target/arm/tcg')
| -rw-r--r-- | target/arm/tcg/cpu64.c | 11 | ||||
| -rw-r--r-- | target/arm/tcg/helper-a64.c | 8 | ||||
| -rw-r--r-- | target/arm/tcg/hflags.c | 30 | ||||
| -rw-r--r-- | target/arm/tcg/m_helper.c | 6 | ||||
| -rw-r--r-- | target/arm/tcg/op_helper.c | 95 | ||||
| -rw-r--r-- | target/arm/tcg/psci.c | 2 | ||||
| -rw-r--r-- | target/arm/tcg/tlb_helper.c | 27 | ||||
| -rw-r--r-- | target/arm/tcg/translate-a64.c | 161 | ||||
| -rw-r--r-- | target/arm/tcg/translate.h | 16 |
9 files changed, 309 insertions, 47 deletions
diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c index fcda99e158..5fba2c0f04 100644 --- a/target/arm/tcg/cpu64.c +++ b/target/arm/tcg/cpu64.c @@ -1105,6 +1105,16 @@ void aarch64_max_tcg_initfn(Object *obj) u = FIELD_DP32(u, CLIDR_EL1, LOUU, 0); cpu->clidr = u; + /* + * Set CTR_EL0.DIC and IDC to tell the guest it doesnt' need to + * do any cache maintenance for data-to-instruction or + * instruction-to-guest coherence. (Our cache ops are nops.) + */ + t = cpu->ctr; + t = FIELD_DP64(t, CTR_EL0, IDC, 1); + t = FIELD_DP64(t, CTR_EL0, DIC, 1); + cpu->ctr = t; + t = cpu->isar.id_aa64isar0; t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* FEAT_PMULL */ t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1); /* FEAT_SHA1 */ @@ -1194,6 +1204,7 @@ void aarch64_max_tcg_initfn(Object *obj) t = FIELD_DP64(t, ID_AA64MMFR2, UAO, 1); /* FEAT_UAO */ t = FIELD_DP64(t, ID_AA64MMFR2, IESB, 1); /* FEAT_IESB */ t = FIELD_DP64(t, ID_AA64MMFR2, VARANGE, 1); /* FEAT_LVA */ + t = FIELD_DP64(t, ID_AA64MMFR2, NV, 2); /* FEAT_NV2 */ t = FIELD_DP64(t, ID_AA64MMFR2, ST, 1); /* FEAT_TTST */ t = FIELD_DP64(t, ID_AA64MMFR2, AT, 1); /* FEAT_LSE2 */ t = FIELD_DP64(t, ID_AA64MMFR2, IDS, 1); /* FEAT_IDST */ diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c index 8ad84623d3..198b975f20 100644 --- a/target/arm/tcg/helper-a64.c +++ b/target/arm/tcg/helper-a64.c @@ -809,9 +809,9 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) goto illegal_return; } - qemu_mutex_lock_iothread(); + bql_lock(); arm_call_pre_el_change_hook(env_archcpu(env)); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (!return_to_aa64) { env->aarch64 = false; @@ -876,9 +876,9 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) */ aarch64_sve_change_el(env, cur_el, new_el, return_to_aa64); - qemu_mutex_lock_iothread(); + bql_lock(); arm_call_el_change_hook(env_archcpu(env)); - qemu_mutex_unlock_iothread(); + bql_unlock(); return; diff --git a/target/arm/tcg/hflags.c b/target/arm/tcg/hflags.c index a6ebd7571a..8e5d35d922 100644 --- a/target/arm/tcg/hflags.c +++ b/target/arm/tcg/hflags.c @@ -169,6 +169,7 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, CPUARMTBFlags flags = {}; ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx); uint64_t tcr = regime_tcr(env, mmu_idx); + uint64_t hcr = arm_hcr_el2_eff(env); uint64_t sctlr; int tbii, tbid; @@ -260,8 +261,10 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, switch (mmu_idx) { case ARMMMUIdx_E10_1: case ARMMMUIdx_E10_1_PAN: - /* TODO: ARMv8.3-NV */ - DP_TBFLAG_A64(flags, UNPRIV, 1); + /* FEAT_NV: NV,NV1 == 1,1 means we don't do UNPRIV accesses */ + if ((hcr & (HCR_NV | HCR_NV1)) != (HCR_NV | HCR_NV1)) { + DP_TBFLAG_A64(flags, UNPRIV, 1); + } break; case ARMMMUIdx_E20_2: case ARMMMUIdx_E20_2_PAN: @@ -285,13 +288,34 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, if (arm_fgt_active(env, el)) { DP_TBFLAG_ANY(flags, FGT_ACTIVE, 1); if (FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, ERET)) { - DP_TBFLAG_A64(flags, FGT_ERET, 1); + DP_TBFLAG_A64(flags, TRAP_ERET, 1); } if (fgt_svc(env, el)) { DP_TBFLAG_ANY(flags, FGT_SVC, 1); } } + /* + * ERET can also be trapped for FEAT_NV. arm_hcr_el2_eff() takes care + * of "is EL2 enabled" and the NV bit can only be set if FEAT_NV is present. + */ + if (el == 1 && (hcr & HCR_NV)) { + DP_TBFLAG_A64(flags, TRAP_ERET, 1); + DP_TBFLAG_A64(flags, NV, 1); + if (hcr & HCR_NV1) { + DP_TBFLAG_A64(flags, NV1, 1); + } + if (hcr & HCR_NV2) { + DP_TBFLAG_A64(flags, NV2, 1); + if (hcr & HCR_E2H) { + DP_TBFLAG_A64(flags, NV2_MEM_E20, 1); + } + if (env->cp15.sctlr_el[2] & SCTLR_EE) { + DP_TBFLAG_A64(flags, NV2_MEM_BE, 1); + } + } + } + if (cpu_isar_feature(aa64_mte, env_archcpu(env))) { /* * Set MTE_ACTIVE if any access may be Checked, and leave clear diff --git a/target/arm/tcg/m_helper.c b/target/arm/tcg/m_helper.c index a26adb75aa..d1f1e02acc 100644 --- a/target/arm/tcg/m_helper.c +++ b/target/arm/tcg/m_helper.c @@ -373,8 +373,8 @@ void HELPER(v7m_preserve_fp_state)(CPUARMState *env) bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK); bool take_exception; - /* Take the iothread lock as we are going to touch the NVIC */ - qemu_mutex_lock_iothread(); + /* Take the BQL as we are going to touch the NVIC */ + bql_lock(); /* Check the background context had access to the FPU */ if (!v7m_cpacr_pass(env, is_secure, is_priv)) { @@ -428,7 +428,7 @@ void HELPER(v7m_preserve_fp_state)(CPUARMState *env) take_exception = !stacked_ok && armv7m_nvic_can_take_pending_exception(env->nvic); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (take_exception) { raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC()); diff --git a/target/arm/tcg/op_helper.c b/target/arm/tcg/op_helper.c index ea08936a85..b5ac26061c 100644 --- a/target/arm/tcg/op_helper.c +++ b/target/arm/tcg/op_helper.c @@ -121,6 +121,61 @@ void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue) } } +/* Sign/zero extend */ +uint32_t HELPER(sxtb16)(uint32_t x) +{ + uint32_t res; + res = (uint16_t)(int8_t)x; + res |= (uint32_t)(int8_t)(x >> 16) << 16; + return res; +} + +static void handle_possible_div0_trap(CPUARMState *env, uintptr_t ra) +{ + /* + * Take a division-by-zero exception if necessary; otherwise return + * to get the usual non-trapping division behaviour (result of 0) + */ + if (arm_feature(env, ARM_FEATURE_M) + && (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_DIV_0_TRP_MASK)) { + raise_exception_ra(env, EXCP_DIVBYZERO, 0, 1, ra); + } +} + +uint32_t HELPER(uxtb16)(uint32_t x) +{ + uint32_t res; + res = (uint16_t)(uint8_t)x; + res |= (uint32_t)(uint8_t)(x >> 16) << 16; + return res; +} + +int32_t HELPER(sdiv)(CPUARMState *env, int32_t num, int32_t den) +{ + if (den == 0) { + handle_possible_div0_trap(env, GETPC()); + return 0; + } + if (num == INT_MIN && den == -1) { + return INT_MIN; + } + return num / den; +} + +uint32_t HELPER(udiv)(CPUARMState *env, uint32_t num, uint32_t den) +{ + if (den == 0) { + handle_possible_div0_trap(env, GETPC()); + return 0; + } + return num / den; +} + +uint32_t HELPER(rbit)(uint32_t x) +{ + return revbit32(x); +} + uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b) { uint32_t res = a + b; @@ -427,9 +482,9 @@ void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val) { uint32_t mask; - qemu_mutex_lock_iothread(); + bql_lock(); arm_call_pre_el_change_hook(env_archcpu(env)); - qemu_mutex_unlock_iothread(); + bql_unlock(); mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar); cpsr_write(env, val, mask, CPSRWriteExceptionReturn); @@ -442,9 +497,9 @@ void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val) env->regs[15] &= (env->thumb ? ~1 : ~3); arm_rebuild_hflags(env); - qemu_mutex_lock_iothread(); + bql_lock(); arm_call_el_change_hook(env_archcpu(env)); - qemu_mutex_unlock_iothread(); + bql_unlock(); } /* Access to user mode registers from privileged modes. */ @@ -803,9 +858,9 @@ void HELPER(set_cp_reg)(CPUARMState *env, const void *rip, uint32_t value) const ARMCPRegInfo *ri = rip; if (ri->type & ARM_CP_IO) { - qemu_mutex_lock_iothread(); + bql_lock(); ri->writefn(env, ri, value); - qemu_mutex_unlock_iothread(); + bql_unlock(); } else { ri->writefn(env, ri, value); } @@ -817,9 +872,9 @@ uint32_t HELPER(get_cp_reg)(CPUARMState *env, const void *rip) uint32_t res; if (ri->type & ARM_CP_IO) { - qemu_mutex_lock_iothread(); + bql_lock(); res = ri->readfn(env, ri); - qemu_mutex_unlock_iothread(); + bql_unlock(); } else { res = ri->readfn(env, ri); } @@ -832,9 +887,9 @@ void HELPER(set_cp_reg64)(CPUARMState *env, const void *rip, uint64_t value) const ARMCPRegInfo *ri = rip; if (ri->type & ARM_CP_IO) { - qemu_mutex_lock_iothread(); + bql_lock(); ri->writefn(env, ri, value); - qemu_mutex_unlock_iothread(); + bql_unlock(); } else { ri->writefn(env, ri, value); } @@ -846,9 +901,9 @@ uint64_t HELPER(get_cp_reg64)(CPUARMState *env, const void *rip) uint64_t res; if (ri->type & ARM_CP_IO) { - qemu_mutex_lock_iothread(); + bql_lock(); res = ri->readfn(env, ri); - qemu_mutex_unlock_iothread(); + bql_unlock(); } else { res = ri->readfn(env, ri); } @@ -930,7 +985,14 @@ void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome) * * Conduit SMC, valid call Trap to EL2 PSCI Call * Conduit SMC, inval call Trap to EL2 Undef insn - * Conduit not SMC Undef insn Undef insn + * Conduit not SMC Undef or trap[1] Undef insn + * + * [1] In this case: + * - if HCR_EL2.NV == 1 we must trap to EL2 + * - if HCR_EL2.NV == 0 then newer architecture revisions permit + * AArch64 (but not AArch32) to trap to EL2 as an IMPDEF choice + * - otherwise we must UNDEF + * We take the IMPDEF choice to always UNDEF if HCR_EL2.NV == 0. */ /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state. @@ -944,9 +1006,12 @@ void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome) : smd_flag && !secure; if (!arm_feature(env, ARM_FEATURE_EL3) && + !(arm_hcr_el2_eff(env) & HCR_NV) && cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { - /* If we have no EL3 then SMC always UNDEFs and can't be - * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3 + /* + * If we have no EL3 then traditionally SMC always UNDEFs and can't be + * trapped to EL2. For nested virtualization, SMC can be trapped to + * the outer hypervisor. PSCI-via-SMC is a sort of ersatz EL3 * firmware within QEMU, and we want an EL2 guest to be able * to forbid its EL1 from making PSCI calls into QEMU's * "firmware" via HCR.TSC, so for these purposes treat diff --git a/target/arm/tcg/psci.c b/target/arm/tcg/psci.c index 6c1239bb96..9080a91d9c 100644 --- a/target/arm/tcg/psci.c +++ b/target/arm/tcg/psci.c @@ -107,7 +107,7 @@ void arm_handle_psci_call(ARMCPU *cpu) } target_cpu = ARM_CPU(target_cpu_state); - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); ret = target_cpu->power_state; break; default: diff --git a/target/arm/tcg/tlb_helper.c b/target/arm/tcg/tlb_helper.c index 4fdd85359e..dd5de74ffb 100644 --- a/target/arm/tcg/tlb_helper.c +++ b/target/arm/tcg/tlb_helper.c @@ -50,7 +50,15 @@ static inline uint32_t merge_syn_data_abort(uint32_t template_syn, * ST64BV, or ST64BV0 insns report syndrome info even for stage-1 * faults and regardless of the target EL. */ - if (!(template_syn & ARM_EL_ISV) || target_el != 2 + if (template_syn & ARM_EL_VNCR) { + /* + * FEAT_NV2 faults on accesses via VNCR_EL2 are a special case: + * they are always reported as "same EL", even though we are going + * from EL1 to EL2. + */ + assert(!fi->stage2); + syn = syn_data_abort_vncr(fi->ea, is_write, fsc); + } else if (!(template_syn & ARM_EL_ISV) || target_el != 2 || fi->s1ptw || !fi->stage2) { syn = syn_data_abort_no_iss(same_el, 0, fi->ea, 0, fi->s1ptw, is_write, fsc); @@ -169,6 +177,20 @@ void arm_deliver_fault(ARMCPU *cpu, vaddr addr, int current_el = arm_current_el(env); bool same_el; uint32_t syn, exc, fsr, fsc; + /* + * We know this must be a data or insn abort, and that + * env->exception.syndrome contains the template syndrome set + * up at translate time. So we can check only the VNCR bit + * (and indeed syndrome does not have the EC field in it, + * because we masked that out in disas_set_insn_syndrome()) + */ + bool is_vncr = (mmu_idx != MMU_INST_FETCH) && + (env->exception.syndrome & ARM_EL_VNCR); + + if (is_vncr) { + /* FEAT_NV2 faults on accesses via VNCR_EL2 go to EL2 */ + target_el = 2; + } if (report_as_gpc_exception(cpu, current_el, fi)) { target_el = 3; @@ -177,7 +199,8 @@ void arm_deliver_fault(ARMCPU *cpu, vaddr addr, syn = syn_gpc(fi->stage2 && fi->type == ARMFault_GPCFOnWalk, access_type == MMU_INST_FETCH, - encode_gpcsc(fi), 0, fi->s1ptw, + encode_gpcsc(fi), is_vncr, + 0, fi->s1ptw, access_type == MMU_DATA_STORE, fsc); env->cp15.mfar_el3 = fi->paddr; diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index a2e49c39f9..27335e8540 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" +#include "exec/exec-all.h" #include "translate.h" #include "translate-a64.h" #include "qemu/log.h" @@ -1605,7 +1606,7 @@ static bool trans_ERET(DisasContext *s, arg_ERET *a) if (s->current_el == 0) { return false; } - if (s->fgt_eret) { + if (s->trap_eret) { gen_exception_insn_el(s, 0, EXCP_UDEF, syn_erettrap(0), 2); return true; } @@ -1632,7 +1633,7 @@ static bool trans_ERETA(DisasContext *s, arg_reta *a) return false; } /* The FGT trap takes precedence over an auth trap. */ - if (s->fgt_eret) { + if (s->trap_eret) { gen_exception_insn_el(s, 0, EXCP_UDEF, syn_erettrap(a->m ? 3 : 2), 2); return true; } @@ -2131,16 +2132,19 @@ static void handle_sys(DisasContext *s, bool isread, crn, crm, op0, op1, op2); const ARMCPRegInfo *ri = get_arm_cp_reginfo(s->cp_regs, key); bool need_exit_tb = false; + bool nv_trap_to_el2 = false; + bool nv_redirect_reg = false; + bool skip_fp_access_checks = false; + bool nv2_mem_redirect = false; TCGv_ptr tcg_ri = NULL; TCGv_i64 tcg_rt; - uint32_t syndrome; + uint32_t syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread); if (crn == 11 || crn == 15) { /* * Check for TIDCP trap, which must take precedence over * the UNDEF for "no such register" etc. */ - syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread); switch (s->current_el) { case 0: if (dc_isar_feature(aa64_tidcp1, s)) { @@ -2164,17 +2168,65 @@ static void handle_sys(DisasContext *s, bool isread, return; } + if (s->nv2 && ri->nv2_redirect_offset) { + /* + * Some registers always redirect to memory; some only do so if + * HCR_EL2.NV1 is 0, and some only if NV1 is 1 (these come in + * pairs which share an offset; see the table in R_CSRPQ). + */ + if (ri->nv2_redirect_offset & NV2_REDIR_NV1) { + nv2_mem_redirect = s->nv1; + } else if (ri->nv2_redirect_offset & NV2_REDIR_NO_NV1) { + nv2_mem_redirect = !s->nv1; + } else { + nv2_mem_redirect = true; + } + } + /* Check access permissions */ if (!cp_access_ok(s->current_el, ri, isread)) { - gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt); - return; + /* + * FEAT_NV/NV2 handling does not do the usual FP access checks + * for registers only accessible at EL2 (though it *does* do them + * for registers accessible at EL1). + */ + skip_fp_access_checks = true; + if (s->nv2 && (ri->type & ARM_CP_NV2_REDIRECT)) { + /* + * This is one of the few EL2 registers which should redirect + * to the equivalent EL1 register. We do that after running + * the EL2 register's accessfn. + */ + nv_redirect_reg = true; + assert(!nv2_mem_redirect); + } else if (nv2_mem_redirect) { + /* + * NV2 redirect-to-memory takes precedence over trap to EL2 or + * UNDEF to EL1. + */ + } else if (s->nv && arm_cpreg_traps_in_nv(ri)) { + /* + * This register / instruction exists and is an EL2 register, so + * we must trap to EL2 if accessed in nested virtualization EL1 + * instead of UNDEFing. We'll do that after the usual access checks. + * (This makes a difference only for a couple of registers like + * VSTTBR_EL2 where the "UNDEF if NonSecure" should take priority + * over the trap-to-EL2. Most trapped-by-FEAT_NV registers have + * an accessfn which does nothing when called from EL1, because + * the trap-to-EL3 controls which would apply to that register + * at EL2 don't take priority over the FEAT_NV trap-to-EL2.) + */ + nv_trap_to_el2 = true; + } else { + gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt); + return; + } } if (ri->accessfn || (ri->fgt && s->fgt_active)) { /* Emit code to perform further access permissions checks at * runtime; this may result in an exception. */ - syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread); gen_a64_update_pc(s, 0); tcg_ri = tcg_temp_new_ptr(); gen_helper_access_check_cp_reg(tcg_ri, tcg_env, @@ -2189,6 +2241,78 @@ static void handle_sys(DisasContext *s, bool isread, gen_a64_update_pc(s, 0); } + if (!skip_fp_access_checks) { + if ((ri->type & ARM_CP_FPU) && !fp_access_check_only(s)) { + return; + } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) { + return; + } else if ((ri->type & ARM_CP_SME) && !sme_access_check(s)) { + return; + } + } + + if (nv_trap_to_el2) { + gen_exception_insn_el(s, 0, EXCP_UDEF, syndrome, 2); + return; + } + + if (nv_redirect_reg) { + /* + * FEAT_NV2 redirection of an EL2 register to an EL1 register. + * Conveniently in all cases the encoding of the EL1 register is + * identical to the EL2 register except that opc1 is 0. + * Get the reginfo for the EL1 register to use for the actual access. + * We don't use the EL1 register's access function, and + * fine-grained-traps on EL1 also do not apply here. + */ + key = ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, + crn, crm, op0, 0, op2); + ri = get_arm_cp_reginfo(s->cp_regs, key); + assert(ri); + assert(cp_access_ok(s->current_el, ri, isread)); + /* + * We might not have done an update_pc earlier, so check we don't + * need it. We could support this in future if necessary. + */ + assert(!(ri->type & ARM_CP_RAISES_EXC)); + } + + if (nv2_mem_redirect) { + /* + * This system register is being redirected into an EL2 memory access. + * This means it is not an IO operation, doesn't change hflags, + * and need not end the TB, because it has no side effects. + * + * The access is 64-bit single copy atomic, guaranteed aligned because + * of the definition of VCNR_EL2. Its endianness depends on + * SCTLR_EL2.EE, not on the data endianness of EL1. + * It is done under either the EL2 translation regime or the EL2&0 + * translation regime, depending on HCR_EL2.E2H. It behaves as if + * PSTATE.PAN is 0. + */ + TCGv_i64 ptr = tcg_temp_new_i64(); + MemOp mop = MO_64 | MO_ALIGN | MO_ATOM_IFALIGN; + ARMMMUIdx armmemidx = s->nv2_mem_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_E2; + int memidx = arm_to_core_mmu_idx(armmemidx); + uint32_t syn; + + mop |= (s->nv2_mem_be ? MO_BE : MO_LE); + + tcg_gen_ld_i64(ptr, tcg_env, offsetof(CPUARMState, cp15.vncr_el2)); + tcg_gen_addi_i64(ptr, ptr, + (ri->nv2_redirect_offset & ~NV2_REDIR_FLAG_MASK)); + tcg_rt = cpu_reg(s, rt); + + syn = syn_data_abort_vncr(0, !isread, 0); + disas_set_insn_syndrome(s, syn); + if (isread) { + tcg_gen_qemu_ld_i64(tcg_rt, ptr, memidx, mop); + } else { + tcg_gen_qemu_st_i64(tcg_rt, ptr, memidx, mop); + } + return; + } + /* Handle special cases first */ switch (ri->type & ARM_CP_SPECIAL_MASK) { case 0: @@ -2204,12 +2328,17 @@ static void handle_sys(DisasContext *s, bool isread, } return; case ARM_CP_CURRENTEL: - /* Reads as current EL value from pstate, which is + { + /* + * Reads as current EL value from pstate, which is * guaranteed to be constant by the tb flags. + * For nested virt we should report EL2. */ + int el = s->nv ? 2 : s->current_el; tcg_rt = cpu_reg(s, rt); - tcg_gen_movi_i64(tcg_rt, s->current_el << 2); + tcg_gen_movi_i64(tcg_rt, el << 2); return; + } case ARM_CP_DC_ZVA: /* Writes clear the aligned block of memory which rt points into. */ if (s->mte_active[0]) { @@ -2267,13 +2396,6 @@ static void handle_sys(DisasContext *s, bool isread, default: g_assert_not_reached(); } - if ((ri->type & ARM_CP_FPU) && !fp_access_check_only(s)) { - return; - } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) { - return; - } else if ((ri->type & ARM_CP_SME) && !sme_access_check(s)) { - return; - } if (ri->type & ARM_CP_IO) { /* I/O operations must end the TB here (whether read or write) */ @@ -13979,7 +14101,7 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL); dc->fgt_active = EX_TBFLAG_ANY(tb_flags, FGT_ACTIVE); dc->fgt_svc = EX_TBFLAG_ANY(tb_flags, FGT_SVC); - dc->fgt_eret = EX_TBFLAG_A64(tb_flags, FGT_ERET); + dc->trap_eret = EX_TBFLAG_A64(tb_flags, TRAP_ERET); dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL); dc->sme_excp_el = EX_TBFLAG_A64(tb_flags, SMEEXC_EL); dc->vl = (EX_TBFLAG_A64(tb_flags, VL) + 1) * 16; @@ -13996,6 +14118,11 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, dc->pstate_za = EX_TBFLAG_A64(tb_flags, PSTATE_ZA); dc->sme_trap_nonstreaming = EX_TBFLAG_A64(tb_flags, SME_TRAP_NONSTREAMING); dc->naa = EX_TBFLAG_A64(tb_flags, NAA); + dc->nv = EX_TBFLAG_A64(tb_flags, NV); + dc->nv1 = EX_TBFLAG_A64(tb_flags, NV1); + dc->nv2 = EX_TBFLAG_A64(tb_flags, NV2); + dc->nv2_mem_e20 = EX_TBFLAG_A64(tb_flags, NV2_MEM_E20); + dc->nv2_mem_be = EX_TBFLAG_A64(tb_flags, NV2_MEM_BE); dc->vec_len = 0; dc->vec_stride = 0; dc->cp_regs = arm_cpu->cp_regs; diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h index 3c3bb3431a..93be745cf3 100644 --- a/target/arm/tcg/translate.h +++ b/target/arm/tcg/translate.h @@ -138,12 +138,22 @@ typedef struct DisasContext { bool mve_no_pred; /* True if fine-grained traps are active */ bool fgt_active; - /* True if fine-grained trap on ERET is enabled */ - bool fgt_eret; /* True if fine-grained trap on SVC is enabled */ bool fgt_svc; + /* True if a trap on ERET is enabled (FGT or NV) */ + bool trap_eret; /* True if FEAT_LSE2 SCTLR_ELx.nAA is set */ bool naa; + /* True if FEAT_NV HCR_EL2.NV is enabled */ + bool nv; + /* True if NV enabled and HCR_EL2.NV1 is set */ + bool nv1; + /* True if NV enabled and HCR_EL2.NV2 is set */ + bool nv2; + /* True if NV2 enabled and NV2 RAM accesses use EL2&0 translation regime */ + bool nv2_mem_e20; + /* True if NV2 enabled and NV2 RAM accesses are big-endian */ + bool nv2_mem_be; /* * >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI. * < 0, set by the current instruction. @@ -159,6 +169,8 @@ typedef struct DisasContext { int c15_cpar; /* TCG op of the current insn_start. */ TCGOp *insn_start; + /* Offset from VNCR_EL2 when FEAT_NV2 redirects this reg to memory */ + uint32_t nv2_redirect_offset; } DisasContext; typedef struct DisasCompare { |