diff options
Diffstat (limited to 'target/arm')
| -rw-r--r-- | target/arm/cpu-features.h | 5 | ||||
| -rw-r--r-- | target/arm/cpu-qom.h | 5 | ||||
| -rw-r--r-- | target/arm/cpu.c | 151 | ||||
| -rw-r--r-- | target/arm/cpu.h | 9 | ||||
| -rw-r--r-- | target/arm/helper.c | 101 | ||||
| -rw-r--r-- | target/arm/internals.h | 21 | ||||
| -rw-r--r-- | target/arm/tcg/a64.decode | 1 | ||||
| -rw-r--r-- | target/arm/tcg/cpu64.c | 1 | ||||
| -rw-r--r-- | target/arm/tcg/helper-a64.c | 16 | ||||
| -rw-r--r-- | target/arm/tcg/helper-a64.h | 1 | ||||
| -rw-r--r-- | target/arm/tcg/translate-a64.c | 19 |
11 files changed, 309 insertions, 21 deletions
diff --git a/target/arm/cpu-features.h b/target/arm/cpu-features.h index e5758d9fbc..b300d0446d 100644 --- a/target/arm/cpu-features.h +++ b/target/arm/cpu-features.h @@ -681,6 +681,11 @@ static inline bool isar_feature_aa64_sme(const ARMISARegisters *id) return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, SME) != 0; } +static inline bool isar_feature_aa64_nmi(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, NMI) != 0; +} + static inline bool isar_feature_aa64_tgran4_lpa2(const ARMISARegisters *id) { return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4) >= 1; diff --git a/target/arm/cpu-qom.h b/target/arm/cpu-qom.h index 8e032691db..b497667d61 100644 --- a/target/arm/cpu-qom.h +++ b/target/arm/cpu-qom.h @@ -36,11 +36,14 @@ DECLARE_CLASS_CHECKERS(AArch64CPUClass, AARCH64_CPU, #define ARM_CPU_TYPE_SUFFIX "-" TYPE_ARM_CPU #define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX) -/* Meanings of the ARMCPU object's four inbound GPIO lines */ +/* Meanings of the ARMCPU object's seven inbound GPIO lines */ #define ARM_CPU_IRQ 0 #define ARM_CPU_FIQ 1 #define ARM_CPU_VIRQ 2 #define ARM_CPU_VFIQ 3 +#define ARM_CPU_NMI 4 +#define ARM_CPU_VINMI 5 +#define ARM_CPU_VFNMI 6 /* For M profile, some registers are banked secure vs non-secure; * these are represented as a 2-element array where the first element diff --git a/target/arm/cpu.c b/target/arm/cpu.c index ab8d007a86..a152def241 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -122,6 +122,13 @@ void arm_restore_state_to_opc(CPUState *cs, } #endif /* CONFIG_TCG */ +/* + * With SCTLR_ELx.NMI == 0, IRQ with Superpriority is masked identically with + * IRQ without Superpriority. Moreover, if the GIC is configured so that + * FEAT_GICv3_NMI is only set if FEAT_NMI is set, then we won't ever see + * CPU_INTERRUPT_*NMI anyway. So we might as well accept NMI here + * unconditionally. + */ static bool arm_cpu_has_work(CPUState *cs) { ARMCPU *cpu = ARM_CPU(cs); @@ -129,6 +136,7 @@ static bool arm_cpu_has_work(CPUState *cs) return (cpu->power_state != PSCI_OFF) && cs->interrupt_request & (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD + | CPU_INTERRUPT_NMI | CPU_INTERRUPT_VINMI | CPU_INTERRUPT_VFNMI | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VSERR | CPU_INTERRUPT_EXITTB); } @@ -212,7 +220,7 @@ static void cp_reg_check_reset(gpointer key, gpointer value, gpointer opaque) assert(oldvalue == newvalue); } -static void arm_cpu_reset_hold(Object *obj) +static void arm_cpu_reset_hold(Object *obj, ResetType type) { CPUState *cs = CPU(obj); ARMCPU *cpu = ARM_CPU(cs); @@ -220,7 +228,7 @@ static void arm_cpu_reset_hold(Object *obj) CPUARMState *env = &cpu->env; if (acc->parent_phases.hold) { - acc->parent_phases.hold(obj); + acc->parent_phases.hold(obj, type); } memset(env, 0, offsetof(CPUARMState, end_reset_fields)); @@ -668,6 +676,7 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, CPUARMState *env = cpu_env(cs); bool pstate_unmasked; bool unmasked = false; + bool allIntMask = false; /* * Don't take exceptions if they target a lower EL. @@ -678,13 +687,36 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, return false; } + if (cpu_isar_feature(aa64_nmi, env_archcpu(env)) && + env->cp15.sctlr_el[target_el] & SCTLR_NMI && cur_el == target_el) { + allIntMask = env->pstate & PSTATE_ALLINT || + ((env->cp15.sctlr_el[target_el] & SCTLR_SPINTMASK) && + (env->pstate & PSTATE_SP)); + } + switch (excp_idx) { + case EXCP_NMI: + pstate_unmasked = !allIntMask; + break; + + case EXCP_VINMI: + if (!(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) { + /* VINMIs are only taken when hypervized. */ + return false; + } + return !allIntMask; + case EXCP_VFNMI: + if (!(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) { + /* VFNMIs are only taken when hypervized. */ + return false; + } + return !allIntMask; case EXCP_FIQ: - pstate_unmasked = !(env->daif & PSTATE_F); + pstate_unmasked = (!(env->daif & PSTATE_F)) && (!allIntMask); break; case EXCP_IRQ: - pstate_unmasked = !(env->daif & PSTATE_I); + pstate_unmasked = (!(env->daif & PSTATE_I)) && (!allIntMask); break; case EXCP_VFIQ: @@ -692,13 +724,13 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, /* VFIQs are only taken when hypervized. */ return false; } - return !(env->daif & PSTATE_F); + return !(env->daif & PSTATE_F) && (!allIntMask); case EXCP_VIRQ: if (!(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) { /* VIRQs are only taken when hypervized. */ return false; } - return !(env->daif & PSTATE_I); + return !(env->daif & PSTATE_I) && (!allIntMask); case EXCP_VSERR: if (!(hcr_el2 & HCR_AMO) || (hcr_el2 & HCR_TGE)) { /* VIRQs are only taken when hypervized. */ @@ -804,6 +836,48 @@ static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request) /* The prioritization of interrupts is IMPLEMENTATION DEFINED. */ + if (cpu_isar_feature(aa64_nmi, env_archcpu(env)) && + (arm_sctlr(env, cur_el) & SCTLR_NMI)) { + if (interrupt_request & CPU_INTERRUPT_NMI) { + excp_idx = EXCP_NMI; + target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure); + if (arm_excp_unmasked(cs, excp_idx, target_el, + cur_el, secure, hcr_el2)) { + goto found; + } + } + if (interrupt_request & CPU_INTERRUPT_VINMI) { + excp_idx = EXCP_VINMI; + target_el = 1; + if (arm_excp_unmasked(cs, excp_idx, target_el, + cur_el, secure, hcr_el2)) { + goto found; + } + } + if (interrupt_request & CPU_INTERRUPT_VFNMI) { + excp_idx = EXCP_VFNMI; + target_el = 1; + if (arm_excp_unmasked(cs, excp_idx, target_el, + cur_el, secure, hcr_el2)) { + goto found; + } + } + } else { + /* + * NMI disabled: interrupts with superpriority are handled + * as if they didn't have it + */ + if (interrupt_request & CPU_INTERRUPT_NMI) { + interrupt_request |= CPU_INTERRUPT_HARD; + } + if (interrupt_request & CPU_INTERRUPT_VINMI) { + interrupt_request |= CPU_INTERRUPT_VIRQ; + } + if (interrupt_request & CPU_INTERRUPT_VFNMI) { + interrupt_request |= CPU_INTERRUPT_VFIQ; + } + } + if (interrupt_request & CPU_INTERRUPT_FIQ) { excp_idx = EXCP_FIQ; target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure); @@ -867,7 +941,8 @@ void arm_cpu_update_virq(ARMCPU *cpu) CPUARMState *env = &cpu->env; CPUState *cs = CPU(cpu); - bool new_state = (env->cp15.hcr_el2 & HCR_VI) || + bool new_state = ((arm_hcr_el2_eff(env) & HCR_VI) && + !(arm_hcrx_el2_eff(env) & HCRX_VINMI)) || (env->irq_line_state & CPU_INTERRUPT_VIRQ); if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VIRQ) != 0)) { @@ -888,7 +963,8 @@ void arm_cpu_update_vfiq(ARMCPU *cpu) CPUARMState *env = &cpu->env; CPUState *cs = CPU(cpu); - bool new_state = (env->cp15.hcr_el2 & HCR_VF) || + bool new_state = ((arm_hcr_el2_eff(env) & HCR_VF) && + !(arm_hcrx_el2_eff(env) & HCRX_VFNMI)) || (env->irq_line_state & CPU_INTERRUPT_VFIQ); if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VFIQ) != 0)) { @@ -900,6 +976,48 @@ void arm_cpu_update_vfiq(ARMCPU *cpu) } } +void arm_cpu_update_vinmi(ARMCPU *cpu) +{ + /* + * Update the interrupt level for VINMI, which is the logical OR of + * the HCRX_EL2.VINMI bit and the input line level from the GIC. + */ + CPUARMState *env = &cpu->env; + CPUState *cs = CPU(cpu); + + bool new_state = ((arm_hcr_el2_eff(env) & HCR_VI) && + (arm_hcrx_el2_eff(env) & HCRX_VINMI)) || + (env->irq_line_state & CPU_INTERRUPT_VINMI); + + if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VINMI) != 0)) { + if (new_state) { + cpu_interrupt(cs, CPU_INTERRUPT_VINMI); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_VINMI); + } + } +} + +void arm_cpu_update_vfnmi(ARMCPU *cpu) +{ + /* + * Update the interrupt level for VFNMI, which is the HCRX_EL2.VFNMI bit. + */ + CPUARMState *env = &cpu->env; + CPUState *cs = CPU(cpu); + + bool new_state = (arm_hcr_el2_eff(env) & HCR_VF) && + (arm_hcrx_el2_eff(env) & HCRX_VFNMI); + + if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VFNMI) != 0)) { + if (new_state) { + cpu_interrupt(cs, CPU_INTERRUPT_VFNMI); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_VFNMI); + } + } +} + void arm_cpu_update_vserr(ARMCPU *cpu) { /* @@ -929,7 +1047,9 @@ static void arm_cpu_set_irq(void *opaque, int irq, int level) [ARM_CPU_IRQ] = CPU_INTERRUPT_HARD, [ARM_CPU_FIQ] = CPU_INTERRUPT_FIQ, [ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ, - [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ + [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ, + [ARM_CPU_NMI] = CPU_INTERRUPT_NMI, + [ARM_CPU_VINMI] = CPU_INTERRUPT_VINMI, }; if (!arm_feature(env, ARM_FEATURE_EL2) && @@ -955,8 +1075,12 @@ static void arm_cpu_set_irq(void *opaque, int irq, int level) case ARM_CPU_VFIQ: arm_cpu_update_vfiq(cpu); break; + case ARM_CPU_VINMI: + arm_cpu_update_vinmi(cpu); + break; case ARM_CPU_IRQ: case ARM_CPU_FIQ: + case ARM_CPU_NMI: if (level) { cpu_interrupt(cs, mask[irq]); } else { @@ -1350,12 +1474,13 @@ static void arm_cpu_initfn(Object *obj) #else /* Our inbound IRQ and FIQ lines */ if (kvm_enabled()) { - /* VIRQ and VFIQ are unused with KVM but we add them to maintain - * the same interface as non-KVM CPUs. + /* + * VIRQ, VFIQ, NMI, VINMI are unused with KVM but we add + * them to maintain the same interface as non-KVM CPUs. */ - qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 4); + qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 6); } else { - qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 4); + qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 6); } qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs, diff --git a/target/arm/cpu.h b/target/arm/cpu.h index bc0c84873f..97997dbd08 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -61,6 +61,9 @@ #define EXCP_DIVBYZERO 23 /* v7M DIVBYZERO UsageFault */ #define EXCP_VSERR 24 #define EXCP_GPC 25 /* v9 Granule Protection Check Fault */ +#define EXCP_NMI 26 +#define EXCP_VINMI 27 +#define EXCP_VFNMI 28 /* NB: add new EXCP_ defines to the array in arm_log_exception() too */ #define ARMV7M_EXCP_RESET 1 @@ -80,6 +83,9 @@ #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2 #define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3 #define CPU_INTERRUPT_VSERR CPU_INTERRUPT_TGT_INT_0 +#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_4 +#define CPU_INTERRUPT_VINMI CPU_INTERRUPT_TGT_EXT_0 +#define CPU_INTERRUPT_VFNMI CPU_INTERRUPT_TGT_INT_1 /* The usual mapping for an AArch64 system register to its AArch32 * counterpart is for the 32 bit world to have access to the lower @@ -1392,6 +1398,8 @@ void pmu_init(ARMCPU *cpu); #define CPSR_N (1U << 31) #define CPSR_NZCV (CPSR_N | CPSR_Z | CPSR_C | CPSR_V) #define CPSR_AIF (CPSR_A | CPSR_I | CPSR_F) +#define ISR_FS (1U << 9) +#define ISR_IS (1U << 10) #define CPSR_IT (CPSR_IT_0_1 | CPSR_IT_2_7) #define CACHED_CPSR_BITS (CPSR_T | CPSR_AIF | CPSR_GE | CPSR_IT | CPSR_Q \ @@ -1430,6 +1438,7 @@ void pmu_init(ARMCPU *cpu); #define PSTATE_D (1U << 9) #define PSTATE_BTYPE (3U << 10) #define PSTATE_SSBS (1U << 12) +#define PSTATE_ALLINT (1U << 13) #define PSTATE_IL (1U << 20) #define PSTATE_SS (1U << 21) #define PSTATE_PAN (1U << 22) diff --git a/target/arm/helper.c b/target/arm/helper.c index a620481d7c..6b224826fb 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -2021,16 +2021,29 @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { ret |= CPSR_I; } + if (cs->interrupt_request & CPU_INTERRUPT_VINMI) { + ret |= ISR_IS; + ret |= CPSR_I; + } } else { if (cs->interrupt_request & CPU_INTERRUPT_HARD) { ret |= CPSR_I; } + + if (cs->interrupt_request & CPU_INTERRUPT_NMI) { + ret |= ISR_IS; + ret |= CPSR_I; + } } if (hcr_el2 & HCR_FMO) { if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) { ret |= CPSR_F; } + if (cs->interrupt_request & CPU_INTERRUPT_VFNMI) { + ret |= ISR_FS; + ret |= CPSR_F; + } } else { if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { ret |= CPSR_F; @@ -6046,15 +6059,19 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) * and the state of the input lines from the GIC. (This requires * that we have the BQL, which is done by marking the * reginfo structs as ARM_CP_IO.) - * Note that if a write to HCR pends a VIRQ or VFIQ it is never - * possible for it to be taken immediately, because VIRQ and - * VFIQ are masked unless running at EL0 or EL1, and HCR - * can only be written at EL2. + * Note that if a write to HCR pends a VIRQ or VFIQ or VINMI or + * VFNMI, it is never possible for it to be taken immediately + * because VIRQ, VFIQ, VINMI and VFNMI are masked unless running + * at EL0 or EL1, and HCR can only be written at EL2. */ g_assert(bql_locked()); arm_cpu_update_virq(cpu); arm_cpu_update_vfiq(cpu); arm_cpu_update_vserr(cpu); + if (cpu_isar_feature(aa64_nmi, cpu)) { + arm_cpu_update_vinmi(cpu); + arm_cpu_update_vfnmi(cpu); + } } static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) @@ -6187,15 +6204,38 @@ bool el_is_in_host(CPUARMState *env, int el) static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { + ARMCPU *cpu = env_archcpu(env); uint64_t valid_mask = 0; /* FEAT_MOPS adds MSCEn and MCE2 */ - if (cpu_isar_feature(aa64_mops, env_archcpu(env))) { + if (cpu_isar_feature(aa64_mops, cpu)) { valid_mask |= HCRX_MSCEN | HCRX_MCE2; } + /* FEAT_NMI adds TALLINT, VINMI and VFNMI */ + if (cpu_isar_feature(aa64_nmi, cpu)) { + valid_mask |= HCRX_TALLINT | HCRX_VINMI | HCRX_VFNMI; + } + /* Clear RES0 bits. */ env->cp15.hcrx_el2 = value & valid_mask; + + /* + * Updates to VINMI and VFNMI require us to update the status of + * virtual NMI, which are the logical OR of these bits + * and the state of the input lines from the GIC. (This requires + * that we have the BQL, which is done by marking the + * reginfo structs as ARM_CP_IO.) + * Note that if a write to HCRX pends a VINMI or VFNMI it is never + * possible for it to be taken immediately, because VINMI and + * VFNMI are masked unless running at EL0 or EL1, and HCRX + * can only be written at EL2. + */ + if (cpu_isar_feature(aa64_nmi, cpu)) { + g_assert(bql_locked()); + arm_cpu_update_vinmi(cpu); + arm_cpu_update_vfnmi(cpu); + } } static CPAccessResult access_hxen(CPUARMState *env, const ARMCPRegInfo *ri, @@ -6211,6 +6251,7 @@ static CPAccessResult access_hxen(CPUARMState *env, const ARMCPRegInfo *ri, static const ARMCPRegInfo hcrx_el2_reginfo = { .name = "HCRX_EL2", .state = ARM_CP_STATE_AA64, + .type = ARM_CP_IO, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 2, .access = PL2_RW, .writefn = hcrx_write, .accessfn = access_hxen, .nv2_redirect_offset = 0xa0, @@ -7494,6 +7535,37 @@ static const ARMCPRegInfo rme_mte_reginfo[] = { .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 5, .access = PL3_W, .type = ARM_CP_NOP }, }; + +static void aa64_allint_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + env->pstate = (env->pstate & ~PSTATE_ALLINT) | (value & PSTATE_ALLINT); +} + +static uint64_t aa64_allint_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return env->pstate & PSTATE_ALLINT; +} + +static CPAccessResult aa64_allint_access(CPUARMState *env, + const ARMCPRegInfo *ri, bool isread) +{ + if (!isread && arm_current_el(env) == 1 && + (arm_hcrx_el2_eff(env) & HCRX_TALLINT)) { + return CP_ACCESS_TRAP_EL2; + } + return CP_ACCESS_OK; +} + +static const ARMCPRegInfo nmi_reginfo[] = { + { .name = "ALLINT", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .opc2 = 0, .crn = 4, .crm = 3, + .type = ARM_CP_NO_RAW, + .access = PL1_RW, .accessfn = aa64_allint_access, + .fieldoffset = offsetof(CPUARMState, pstate), + .writefn = aa64_allint_write, .readfn = aa64_allint_read, + .resetfn = arm_cp_reset_ignore }, +}; #endif /* TARGET_AARCH64 */ static void define_pmu_regs(ARMCPU *cpu) @@ -9888,6 +9960,10 @@ void register_cp_regs_for_features(ARMCPU *cpu) if (cpu_isar_feature(aa64_nv2, cpu)) { define_arm_cp_regs(cpu, nv2_reginfo); } + + if (cpu_isar_feature(aa64_nmi, cpu)) { + define_arm_cp_regs(cpu, nmi_reginfo); + } #endif if (cpu_isar_feature(any_predinv, cpu)) { @@ -10700,6 +10776,7 @@ uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, hcr_el2 = arm_hcr_el2_eff(env); switch (excp_idx) { case EXCP_IRQ: + case EXCP_NMI: scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ); hcr = hcr_el2 & HCR_IMO; break; @@ -10758,6 +10835,9 @@ void arm_log_exception(CPUState *cs) [EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault", [EXCP_VSERR] = "Virtual SERR", [EXCP_GPC] = "Granule Protection Check", + [EXCP_NMI] = "NMI", + [EXCP_VINMI] = "Virtual IRQ NMI", + [EXCP_VFNMI] = "Virtual FIQ NMI", }; if (idx >= 0 && idx < ARRAY_SIZE(excnames)) { @@ -11573,10 +11653,13 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs) break; case EXCP_IRQ: case EXCP_VIRQ: + case EXCP_NMI: + case EXCP_VINMI: addr += 0x80; break; case EXCP_FIQ: case EXCP_VFIQ: + case EXCP_VFNMI: addr += 0x100; break; case EXCP_VSERR: @@ -11653,6 +11736,14 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs) } } + if (cpu_isar_feature(aa64_nmi, cpu)) { + if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPINTMASK)) { + new_mode |= PSTATE_ALLINT; + } else { + new_mode &= ~PSTATE_ALLINT; + } + } + pstate_write(env, PSTATE_DAIF | new_mode); env->aarch64 = true; aarch64_restore_sp(env, new_el); diff --git a/target/arm/internals.h b/target/arm/internals.h index dd3da211a3..b53f5e8ff2 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -1110,6 +1110,24 @@ void arm_cpu_update_virq(ARMCPU *cpu); void arm_cpu_update_vfiq(ARMCPU *cpu); /** + * arm_cpu_update_vinmi: Update CPU_INTERRUPT_VINMI bit in cs->interrupt_request + * + * Update the CPU_INTERRUPT_VINMI bit in cs->interrupt_request, following + * a change to either the input VNMI line from the GIC or the HCRX_EL2.VINMI. + * Must be called with the BQL held. + */ +void arm_cpu_update_vinmi(ARMCPU *cpu); + +/** + * arm_cpu_update_vfnmi: Update CPU_INTERRUPT_VFNMI bit in cs->interrupt_request + * + * Update the CPU_INTERRUPT_VFNMI bit in cs->interrupt_request, following + * a change to the HCRX_EL2.VFNMI. + * Must be called with the BQL held. + */ +void arm_cpu_update_vfnmi(ARMCPU *cpu); + +/** * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit * * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request, @@ -1229,6 +1247,9 @@ static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id) if (isar_feature_aa64_mte(id)) { valid |= PSTATE_TCO; } + if (isar_feature_aa64_nmi(id)) { + valid |= PSTATE_ALLINT; + } return valid; } diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode index 8a20dce3c8..0e7656fd15 100644 --- a/target/arm/tcg/a64.decode +++ b/target/arm/tcg/a64.decode @@ -207,6 +207,7 @@ MSR_i_DIT 1101 0101 0000 0 011 0100 .... 010 11111 @msr_i MSR_i_TCO 1101 0101 0000 0 011 0100 .... 100 11111 @msr_i MSR_i_DAIFSET 1101 0101 0000 0 011 0100 .... 110 11111 @msr_i MSR_i_DAIFCLEAR 1101 0101 0000 0 011 0100 .... 111 11111 @msr_i +MSR_i_ALLINT 1101 0101 0000 0 001 0100 000 imm:1 000 11111 MSR_i_SVCR 1101 0101 0000 0 011 0100 0 mask:2 imm:1 011 11111 # MRS, MSR (register), SYS, SYSL. These are all essentially the diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c index 9f7a9f3d2c..62c4663512 100644 --- a/target/arm/tcg/cpu64.c +++ b/target/arm/tcg/cpu64.c @@ -1175,6 +1175,7 @@ void aarch64_max_tcg_initfn(Object *obj) t = FIELD_DP64(t, ID_AA64PFR1, RAS_FRAC, 0); /* FEAT_RASv1p1 + FEAT_DoubleFault */ t = FIELD_DP64(t, ID_AA64PFR1, SME, 1); /* FEAT_SME */ t = FIELD_DP64(t, ID_AA64PFR1, CSV2_FRAC, 0); /* FEAT_CSV2_2 */ + t = FIELD_DP64(t, ID_AA64PFR1, NMI, 1); /* FEAT_NMI */ cpu->isar.id_aa64pfr1 = t; t = cpu->isar.id_aa64mmfr0; diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c index ebaa7f00df..0ea8668ab4 100644 --- a/target/arm/tcg/helper-a64.c +++ b/target/arm/tcg/helper-a64.c @@ -66,6 +66,18 @@ void HELPER(msr_i_spsel)(CPUARMState *env, uint32_t imm) update_spsel(env, imm); } +void HELPER(msr_set_allint_el1)(CPUARMState *env) +{ + /* ALLINT update to PSTATE. */ + if (arm_hcrx_el2_eff(env) & HCRX_TALLINT) { + raise_exception_ra(env, EXCP_UDEF, + syn_aa64_sysregtrap(0, 1, 0, 4, 1, 0x1f, 0), 2, + GETPC()); + } + + env->pstate |= PSTATE_ALLINT; +} + static void daif_check(CPUARMState *env, uint32_t op, uint32_t imm, uintptr_t ra) { @@ -892,8 +904,8 @@ illegal_return: */ env->pstate |= PSTATE_IL; env->pc = new_pc; - spsr &= PSTATE_NZCV | PSTATE_DAIF; - spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF); + spsr &= PSTATE_NZCV | PSTATE_DAIF | PSTATE_ALLINT; + spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF | PSTATE_ALLINT); pstate_write(env, spsr); if (!arm_singlestep_active(env)) { env->pstate &= ~PSTATE_SS; diff --git a/target/arm/tcg/helper-a64.h b/target/arm/tcg/helper-a64.h index 575a5dab7d..0518165399 100644 --- a/target/arm/tcg/helper-a64.h +++ b/target/arm/tcg/helper-a64.h @@ -22,6 +22,7 @@ DEF_HELPER_FLAGS_1(rbit64, TCG_CALL_NO_RWG_SE, i64, i64) DEF_HELPER_2(msr_i_spsel, void, env, i32) DEF_HELPER_2(msr_i_daifset, void, env, i32) DEF_HELPER_2(msr_i_daifclear, void, env, i32) +DEF_HELPER_1(msr_set_allint_el1, void, env) DEF_HELPER_3(vfp_cmph_a64, i64, f16, f16, ptr) DEF_HELPER_3(vfp_cmpeh_a64, i64, f16, f16, ptr) DEF_HELPER_3(vfp_cmps_a64, i64, f32, f32, ptr) diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index 2666d52711..976094a5c8 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -2036,6 +2036,25 @@ static bool trans_MSR_i_DAIFCLEAR(DisasContext *s, arg_i *a) return true; } +static bool trans_MSR_i_ALLINT(DisasContext *s, arg_i *a) +{ + if (!dc_isar_feature(aa64_nmi, s) || s->current_el == 0) { + return false; + } + + if (a->imm == 0) { + clear_pstate_bits(PSTATE_ALLINT); + } else if (s->current_el > 1) { + set_pstate_bits(PSTATE_ALLINT); + } else { + gen_helper_msr_set_allint_el1(tcg_env); + } + + /* Exit the cpu loop to re-evaluate pending IRQs. */ + s->base.is_jmp = DISAS_UPDATE_EXIT; + return true; +} + static bool trans_MSR_i_SVCR(DisasContext *s, arg_MSR_i_SVCR *a) { if (!dc_isar_feature(aa64_sme, s) || a->mask == 0) { |