diff options
Diffstat (limited to 'target')
27 files changed, 513 insertions, 437 deletions
diff --git a/target/arm/cpu-irq.c b/target/arm/cpu-irq.c new file mode 100644 index 0000000000..fe514cc93a --- /dev/null +++ b/target/arm/cpu-irq.c @@ -0,0 +1,381 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* + * QEMU ARM CPU - interrupt_request handling + * + * Copyright (c) 2003-2025 QEMU contributors + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "accel/tcg/cpu-ops.h" +#include "internals.h" + +#ifdef CONFIG_TCG +static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, + unsigned int target_el, + unsigned int cur_el, bool secure, + uint64_t hcr_el2) +{ + CPUARMState *env = cpu_env(cs); + bool pstate_unmasked; + bool unmasked = false; + bool allIntMask = false; + + /* + * Don't take exceptions if they target a lower EL. + * This check should catch any exceptions that would not be taken + * but left pending. + */ + if (cur_el > target_el) { + return false; + } + + if (cpu_isar_feature(aa64_nmi, env_archcpu(env)) && + env->cp15.sctlr_el[target_el] & SCTLR_NMI && cur_el == target_el) { + allIntMask = env->pstate & PSTATE_ALLINT || + ((env->cp15.sctlr_el[target_el] & SCTLR_SPINTMASK) && + (env->pstate & PSTATE_SP)); + } + + switch (excp_idx) { + case EXCP_NMI: + pstate_unmasked = !allIntMask; + break; + + case EXCP_VINMI: + if (!(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) { + /* VINMIs are only taken when hypervized. */ + return false; + } + return !allIntMask; + case EXCP_VFNMI: + if (!(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) { + /* VFNMIs are only taken when hypervized. */ + return false; + } + return !allIntMask; + case EXCP_FIQ: + pstate_unmasked = (!(env->daif & PSTATE_F)) && (!allIntMask); + break; + + case EXCP_IRQ: + pstate_unmasked = (!(env->daif & PSTATE_I)) && (!allIntMask); + break; + + case EXCP_VFIQ: + if (!(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) { + /* VFIQs are only taken when hypervized. */ + return false; + } + return !(env->daif & PSTATE_F) && (!allIntMask); + case EXCP_VIRQ: + if (!(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) { + /* VIRQs are only taken when hypervized. */ + return false; + } + return !(env->daif & PSTATE_I) && (!allIntMask); + case EXCP_VSERR: + if (!(hcr_el2 & HCR_AMO) || (hcr_el2 & HCR_TGE)) { + /* VIRQs are only taken when hypervized. */ + return false; + } + return !(env->daif & PSTATE_A); + default: + g_assert_not_reached(); + } + + /* + * Use the target EL, current execution state and SCR/HCR settings to + * determine whether the corresponding CPSR bit is used to mask the + * interrupt. + */ + if ((target_el > cur_el) && (target_el != 1)) { + /* Exceptions targeting a higher EL may not be maskable */ + if (arm_feature(env, ARM_FEATURE_AARCH64)) { + switch (target_el) { + case 2: + /* + * According to ARM DDI 0487H.a, an interrupt can be masked + * when HCR_E2H and HCR_TGE are both set regardless of the + * current Security state. Note that we need to revisit this + * part again once we need to support NMI. + */ + if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { + unmasked = true; + } + break; + case 3: + /* Interrupt cannot be masked when the target EL is 3 */ + unmasked = true; + break; + default: + g_assert_not_reached(); + } + } else { + /* + * The old 32-bit-only environment has a more complicated + * masking setup. HCR and SCR bits not only affect interrupt + * routing but also change the behaviour of masking. + */ + bool hcr, scr; + + switch (excp_idx) { + case EXCP_FIQ: + /* + * If FIQs are routed to EL3 or EL2 then there are cases where + * we override the CPSR.F in determining if the exception is + * masked or not. If neither of these are set then we fall back + * to the CPSR.F setting otherwise we further assess the state + * below. + */ + hcr = hcr_el2 & HCR_FMO; + scr = (env->cp15.scr_el3 & SCR_FIQ); + + /* + * When EL3 is 32-bit, the SCR.FW bit controls whether the + * CPSR.F bit masks FIQ interrupts when taken in non-secure + * state. If SCR.FW is set then FIQs can be masked by CPSR.F + * when non-secure but only when FIQs are only routed to EL3. + */ + scr = scr && !((env->cp15.scr_el3 & SCR_FW) && !hcr); + break; + case EXCP_IRQ: + /* + * When EL3 execution state is 32-bit, if HCR.IMO is set then + * we may override the CPSR.I masking when in non-secure state. + * The SCR.IRQ setting has already been taken into consideration + * when setting the target EL, so it does not have a further + * affect here. + */ + hcr = hcr_el2 & HCR_IMO; + scr = false; + break; + default: + g_assert_not_reached(); + } + + if ((scr || hcr) && !secure) { + unmasked = true; + } + } + } + + /* + * The PSTATE bits only mask the interrupt if we have not overridden the + * ability above. + */ + return unmasked || pstate_unmasked; +} + +bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +{ + CPUARMState *env = cpu_env(cs); + uint32_t cur_el = arm_current_el(env); + bool secure = arm_is_secure(env); + uint64_t hcr_el2 = arm_hcr_el2_eff(env); + uint32_t target_el; + uint32_t excp_idx; + + /* The prioritization of interrupts is IMPLEMENTATION DEFINED. */ + + if (cpu_isar_feature(aa64_nmi, env_archcpu(env)) && + (arm_sctlr(env, cur_el) & SCTLR_NMI)) { + if (interrupt_request & CPU_INTERRUPT_NMI) { + excp_idx = EXCP_NMI; + target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure); + if (arm_excp_unmasked(cs, excp_idx, target_el, + cur_el, secure, hcr_el2)) { + goto found; + } + } + if (interrupt_request & CPU_INTERRUPT_VINMI) { + excp_idx = EXCP_VINMI; + target_el = 1; + if (arm_excp_unmasked(cs, excp_idx, target_el, + cur_el, secure, hcr_el2)) { + goto found; + } + } + if (interrupt_request & CPU_INTERRUPT_VFNMI) { + excp_idx = EXCP_VFNMI; + target_el = 1; + if (arm_excp_unmasked(cs, excp_idx, target_el, + cur_el, secure, hcr_el2)) { + goto found; + } + } + } else { + /* + * NMI disabled: interrupts with superpriority are handled + * as if they didn't have it + */ + if (interrupt_request & CPU_INTERRUPT_NMI) { + interrupt_request |= CPU_INTERRUPT_HARD; + } + if (interrupt_request & CPU_INTERRUPT_VINMI) { + interrupt_request |= CPU_INTERRUPT_VIRQ; + } + if (interrupt_request & CPU_INTERRUPT_VFNMI) { + interrupt_request |= CPU_INTERRUPT_VFIQ; + } + } + + if (interrupt_request & CPU_INTERRUPT_FIQ) { + excp_idx = EXCP_FIQ; + target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure); + if (arm_excp_unmasked(cs, excp_idx, target_el, + cur_el, secure, hcr_el2)) { + goto found; + } + } + if (interrupt_request & CPU_INTERRUPT_HARD) { + excp_idx = EXCP_IRQ; + target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure); + if (arm_excp_unmasked(cs, excp_idx, target_el, + cur_el, secure, hcr_el2)) { + goto found; + } + } + if (interrupt_request & CPU_INTERRUPT_VIRQ) { + excp_idx = EXCP_VIRQ; + target_el = 1; + if (arm_excp_unmasked(cs, excp_idx, target_el, + cur_el, secure, hcr_el2)) { + goto found; + } + } + if (interrupt_request & CPU_INTERRUPT_VFIQ) { + excp_idx = EXCP_VFIQ; + target_el = 1; + if (arm_excp_unmasked(cs, excp_idx, target_el, + cur_el, secure, hcr_el2)) { + goto found; + } + } + if (interrupt_request & CPU_INTERRUPT_VSERR) { + excp_idx = EXCP_VSERR; + target_el = 1; + if (arm_excp_unmasked(cs, excp_idx, target_el, + cur_el, secure, hcr_el2)) { + /* Taking a virtual abort clears HCR_EL2.VSE */ + env->cp15.hcr_el2 &= ~HCR_VSE; + cpu_reset_interrupt(cs, CPU_INTERRUPT_VSERR); + goto found; + } + } + return false; + + found: + cs->exception_index = excp_idx; + env->exception.target_el = target_el; + cs->cc->tcg_ops->do_interrupt(cs); + return true; +} +#endif /* CONFIG_TCG */ + +void arm_cpu_update_virq(ARMCPU *cpu) +{ + /* + * Update the interrupt level for VIRQ, which is the logical OR of + * the HCR_EL2.VI bit and the input line level from the GIC. + */ + CPUARMState *env = &cpu->env; + CPUState *cs = CPU(cpu); + + bool new_state = ((arm_hcr_el2_eff(env) & HCR_VI) && + !(arm_hcrx_el2_eff(env) & HCRX_VINMI)) || + (env->irq_line_state & CPU_INTERRUPT_VIRQ); + + if (new_state != cpu_test_interrupt(cs, CPU_INTERRUPT_VIRQ)) { + if (new_state) { + cpu_interrupt(cs, CPU_INTERRUPT_VIRQ); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ); + } + } +} + +void arm_cpu_update_vfiq(ARMCPU *cpu) +{ + /* + * Update the interrupt level for VFIQ, which is the logical OR of + * the HCR_EL2.VF bit and the input line level from the GIC. + */ + CPUARMState *env = &cpu->env; + CPUState *cs = CPU(cpu); + + bool new_state = ((arm_hcr_el2_eff(env) & HCR_VF) && + !(arm_hcrx_el2_eff(env) & HCRX_VFNMI)) || + (env->irq_line_state & CPU_INTERRUPT_VFIQ); + + if (new_state != cpu_test_interrupt(cs, CPU_INTERRUPT_VFIQ)) { + if (new_state) { + cpu_interrupt(cs, CPU_INTERRUPT_VFIQ); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_VFIQ); + } + } +} + +void arm_cpu_update_vinmi(ARMCPU *cpu) +{ + /* + * Update the interrupt level for VINMI, which is the logical OR of + * the HCRX_EL2.VINMI bit and the input line level from the GIC. + */ + CPUARMState *env = &cpu->env; + CPUState *cs = CPU(cpu); + + bool new_state = ((arm_hcr_el2_eff(env) & HCR_VI) && + (arm_hcrx_el2_eff(env) & HCRX_VINMI)) || + (env->irq_line_state & CPU_INTERRUPT_VINMI); + + if (new_state != cpu_test_interrupt(cs, CPU_INTERRUPT_VINMI)) { + if (new_state) { + cpu_interrupt(cs, CPU_INTERRUPT_VINMI); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_VINMI); + } + } +} + +void arm_cpu_update_vfnmi(ARMCPU *cpu) +{ + /* + * Update the interrupt level for VFNMI, which is the HCRX_EL2.VFNMI bit. + */ + CPUARMState *env = &cpu->env; + CPUState *cs = CPU(cpu); + + bool new_state = (arm_hcr_el2_eff(env) & HCR_VF) && + (arm_hcrx_el2_eff(env) & HCRX_VFNMI); + + if (new_state != cpu_test_interrupt(cs, CPU_INTERRUPT_VFNMI)) { + if (new_state) { + cpu_interrupt(cs, CPU_INTERRUPT_VFNMI); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_VFNMI); + } + } +} + +void arm_cpu_update_vserr(ARMCPU *cpu) +{ + /* + * Update the interrupt level for VSERR, which is the HCR_EL2.VSE bit. + */ + CPUARMState *env = &cpu->env; + CPUState *cs = CPU(cpu); + + bool new_state = env->cp15.hcr_el2 & HCR_VSE; + + if (new_state != cpu_test_interrupt(cs, CPU_INTERRUPT_VSERR)) { + if (new_state) { + cpu_interrupt(cs, CPU_INTERRUPT_VSERR); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_VSERR); + } + } +} + diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 02e2a31a86..c65af7e761 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -677,376 +677,6 @@ void arm_emulate_firmware_reset(CPUState *cpustate, int target_el) } -#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) - -static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, - unsigned int target_el, - unsigned int cur_el, bool secure, - uint64_t hcr_el2) -{ - CPUARMState *env = cpu_env(cs); - bool pstate_unmasked; - bool unmasked = false; - bool allIntMask = false; - - /* - * Don't take exceptions if they target a lower EL. - * This check should catch any exceptions that would not be taken - * but left pending. - */ - if (cur_el > target_el) { - return false; - } - - if (cpu_isar_feature(aa64_nmi, env_archcpu(env)) && - env->cp15.sctlr_el[target_el] & SCTLR_NMI && cur_el == target_el) { - allIntMask = env->pstate & PSTATE_ALLINT || - ((env->cp15.sctlr_el[target_el] & SCTLR_SPINTMASK) && - (env->pstate & PSTATE_SP)); - } - - switch (excp_idx) { - case EXCP_NMI: - pstate_unmasked = !allIntMask; - break; - - case EXCP_VINMI: - if (!(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) { - /* VINMIs are only taken when hypervized. */ - return false; - } - return !allIntMask; - case EXCP_VFNMI: - if (!(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) { - /* VFNMIs are only taken when hypervized. */ - return false; - } - return !allIntMask; - case EXCP_FIQ: - pstate_unmasked = (!(env->daif & PSTATE_F)) && (!allIntMask); - break; - - case EXCP_IRQ: - pstate_unmasked = (!(env->daif & PSTATE_I)) && (!allIntMask); - break; - - case EXCP_VFIQ: - if (!(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) { - /* VFIQs are only taken when hypervized. */ - return false; - } - return !(env->daif & PSTATE_F) && (!allIntMask); - case EXCP_VIRQ: - if (!(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) { - /* VIRQs are only taken when hypervized. */ - return false; - } - return !(env->daif & PSTATE_I) && (!allIntMask); - case EXCP_VSERR: - if (!(hcr_el2 & HCR_AMO) || (hcr_el2 & HCR_TGE)) { - /* VIRQs are only taken when hypervized. */ - return false; - } - return !(env->daif & PSTATE_A); - default: - g_assert_not_reached(); - } - - /* - * Use the target EL, current execution state and SCR/HCR settings to - * determine whether the corresponding CPSR bit is used to mask the - * interrupt. - */ - if ((target_el > cur_el) && (target_el != 1)) { - /* Exceptions targeting a higher EL may not be maskable */ - if (arm_feature(env, ARM_FEATURE_AARCH64)) { - switch (target_el) { - case 2: - /* - * According to ARM DDI 0487H.a, an interrupt can be masked - * when HCR_E2H and HCR_TGE are both set regardless of the - * current Security state. Note that we need to revisit this - * part again once we need to support NMI. - */ - if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { - unmasked = true; - } - break; - case 3: - /* Interrupt cannot be masked when the target EL is 3 */ - unmasked = true; - break; - default: - g_assert_not_reached(); - } - } else { - /* - * The old 32-bit-only environment has a more complicated - * masking setup. HCR and SCR bits not only affect interrupt - * routing but also change the behaviour of masking. - */ - bool hcr, scr; - - switch (excp_idx) { - case EXCP_FIQ: - /* - * If FIQs are routed to EL3 or EL2 then there are cases where - * we override the CPSR.F in determining if the exception is - * masked or not. If neither of these are set then we fall back - * to the CPSR.F setting otherwise we further assess the state - * below. - */ - hcr = hcr_el2 & HCR_FMO; - scr = (env->cp15.scr_el3 & SCR_FIQ); - - /* - * When EL3 is 32-bit, the SCR.FW bit controls whether the - * CPSR.F bit masks FIQ interrupts when taken in non-secure - * state. If SCR.FW is set then FIQs can be masked by CPSR.F - * when non-secure but only when FIQs are only routed to EL3. - */ - scr = scr && !((env->cp15.scr_el3 & SCR_FW) && !hcr); - break; - case EXCP_IRQ: - /* - * When EL3 execution state is 32-bit, if HCR.IMO is set then - * we may override the CPSR.I masking when in non-secure state. - * The SCR.IRQ setting has already been taken into consideration - * when setting the target EL, so it does not have a further - * affect here. - */ - hcr = hcr_el2 & HCR_IMO; - scr = false; - break; - default: - g_assert_not_reached(); - } - - if ((scr || hcr) && !secure) { - unmasked = true; - } - } - } - - /* - * The PSTATE bits only mask the interrupt if we have not overridden the - * ability above. - */ - return unmasked || pstate_unmasked; -} - -static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request) -{ - CPUARMState *env = cpu_env(cs); - uint32_t cur_el = arm_current_el(env); - bool secure = arm_is_secure(env); - uint64_t hcr_el2 = arm_hcr_el2_eff(env); - uint32_t target_el; - uint32_t excp_idx; - - /* The prioritization of interrupts is IMPLEMENTATION DEFINED. */ - - if (cpu_isar_feature(aa64_nmi, env_archcpu(env)) && - (arm_sctlr(env, cur_el) & SCTLR_NMI)) { - if (interrupt_request & CPU_INTERRUPT_NMI) { - excp_idx = EXCP_NMI; - target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure); - if (arm_excp_unmasked(cs, excp_idx, target_el, - cur_el, secure, hcr_el2)) { - goto found; - } - } - if (interrupt_request & CPU_INTERRUPT_VINMI) { - excp_idx = EXCP_VINMI; - target_el = 1; - if (arm_excp_unmasked(cs, excp_idx, target_el, - cur_el, secure, hcr_el2)) { - goto found; - } - } - if (interrupt_request & CPU_INTERRUPT_VFNMI) { - excp_idx = EXCP_VFNMI; - target_el = 1; - if (arm_excp_unmasked(cs, excp_idx, target_el, - cur_el, secure, hcr_el2)) { - goto found; - } - } - } else { - /* - * NMI disabled: interrupts with superpriority are handled - * as if they didn't have it - */ - if (interrupt_request & CPU_INTERRUPT_NMI) { - interrupt_request |= CPU_INTERRUPT_HARD; - } - if (interrupt_request & CPU_INTERRUPT_VINMI) { - interrupt_request |= CPU_INTERRUPT_VIRQ; - } - if (interrupt_request & CPU_INTERRUPT_VFNMI) { - interrupt_request |= CPU_INTERRUPT_VFIQ; - } - } - - if (interrupt_request & CPU_INTERRUPT_FIQ) { - excp_idx = EXCP_FIQ; - target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure); - if (arm_excp_unmasked(cs, excp_idx, target_el, - cur_el, secure, hcr_el2)) { - goto found; - } - } - if (interrupt_request & CPU_INTERRUPT_HARD) { - excp_idx = EXCP_IRQ; - target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure); - if (arm_excp_unmasked(cs, excp_idx, target_el, - cur_el, secure, hcr_el2)) { - goto found; - } - } - if (interrupt_request & CPU_INTERRUPT_VIRQ) { - excp_idx = EXCP_VIRQ; - target_el = 1; - if (arm_excp_unmasked(cs, excp_idx, target_el, - cur_el, secure, hcr_el2)) { - goto found; - } - } - if (interrupt_request & CPU_INTERRUPT_VFIQ) { - excp_idx = EXCP_VFIQ; - target_el = 1; - if (arm_excp_unmasked(cs, excp_idx, target_el, - cur_el, secure, hcr_el2)) { - goto found; - } - } - if (interrupt_request & CPU_INTERRUPT_VSERR) { - excp_idx = EXCP_VSERR; - target_el = 1; - if (arm_excp_unmasked(cs, excp_idx, target_el, - cur_el, secure, hcr_el2)) { - /* Taking a virtual abort clears HCR_EL2.VSE */ - env->cp15.hcr_el2 &= ~HCR_VSE; - cpu_reset_interrupt(cs, CPU_INTERRUPT_VSERR); - goto found; - } - } - return false; - - found: - cs->exception_index = excp_idx; - env->exception.target_el = target_el; - cs->cc->tcg_ops->do_interrupt(cs); - return true; -} - -#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */ - -void arm_cpu_update_virq(ARMCPU *cpu) -{ - /* - * Update the interrupt level for VIRQ, which is the logical OR of - * the HCR_EL2.VI bit and the input line level from the GIC. - */ - CPUARMState *env = &cpu->env; - CPUState *cs = CPU(cpu); - - bool new_state = ((arm_hcr_el2_eff(env) & HCR_VI) && - !(arm_hcrx_el2_eff(env) & HCRX_VINMI)) || - (env->irq_line_state & CPU_INTERRUPT_VIRQ); - - if (new_state != cpu_test_interrupt(cs, CPU_INTERRUPT_VIRQ)) { - if (new_state) { - cpu_interrupt(cs, CPU_INTERRUPT_VIRQ); - } else { - cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ); - } - } -} - -void arm_cpu_update_vfiq(ARMCPU *cpu) -{ - /* - * Update the interrupt level for VFIQ, which is the logical OR of - * the HCR_EL2.VF bit and the input line level from the GIC. - */ - CPUARMState *env = &cpu->env; - CPUState *cs = CPU(cpu); - - bool new_state = ((arm_hcr_el2_eff(env) & HCR_VF) && - !(arm_hcrx_el2_eff(env) & HCRX_VFNMI)) || - (env->irq_line_state & CPU_INTERRUPT_VFIQ); - - if (new_state != cpu_test_interrupt(cs, CPU_INTERRUPT_VFIQ)) { - if (new_state) { - cpu_interrupt(cs, CPU_INTERRUPT_VFIQ); - } else { - cpu_reset_interrupt(cs, CPU_INTERRUPT_VFIQ); - } - } -} - -void arm_cpu_update_vinmi(ARMCPU *cpu) -{ - /* - * Update the interrupt level for VINMI, which is the logical OR of - * the HCRX_EL2.VINMI bit and the input line level from the GIC. - */ - CPUARMState *env = &cpu->env; - CPUState *cs = CPU(cpu); - - bool new_state = ((arm_hcr_el2_eff(env) & HCR_VI) && - (arm_hcrx_el2_eff(env) & HCRX_VINMI)) || - (env->irq_line_state & CPU_INTERRUPT_VINMI); - - if (new_state != cpu_test_interrupt(cs, CPU_INTERRUPT_VINMI)) { - if (new_state) { - cpu_interrupt(cs, CPU_INTERRUPT_VINMI); - } else { - cpu_reset_interrupt(cs, CPU_INTERRUPT_VINMI); - } - } -} - -void arm_cpu_update_vfnmi(ARMCPU *cpu) -{ - /* - * Update the interrupt level for VFNMI, which is the HCRX_EL2.VFNMI bit. - */ - CPUARMState *env = &cpu->env; - CPUState *cs = CPU(cpu); - - bool new_state = (arm_hcr_el2_eff(env) & HCR_VF) && - (arm_hcrx_el2_eff(env) & HCRX_VFNMI); - - if (new_state != cpu_test_interrupt(cs, CPU_INTERRUPT_VFNMI)) { - if (new_state) { - cpu_interrupt(cs, CPU_INTERRUPT_VFNMI); - } else { - cpu_reset_interrupt(cs, CPU_INTERRUPT_VFNMI); - } - } -} - -void arm_cpu_update_vserr(ARMCPU *cpu) -{ - /* - * Update the interrupt level for VSERR, which is the HCR_EL2.VSE bit. - */ - CPUARMState *env = &cpu->env; - CPUState *cs = CPU(cpu); - - bool new_state = env->cp15.hcr_el2 & HCR_VSE; - - if (new_state != cpu_test_interrupt(cs, CPU_INTERRUPT_VSERR)) { - if (new_state) { - cpu_interrupt(cs, CPU_INTERRUPT_VSERR); - } else { - cpu_reset_interrupt(cs, CPU_INTERRUPT_VSERR); - } - } -} - #ifndef CONFIG_USER_ONLY static void arm_cpu_set_irq(void *opaque, int irq, int level) { diff --git a/target/arm/el2-stubs.c b/target/arm/el2-stubs.c new file mode 100644 index 0000000000..972023c337 --- /dev/null +++ b/target/arm/el2-stubs.c @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* QEMU ARM CPU - user-mode emulation stubs for EL2 interrupts + * + * These should not really be needed, but CP registers for EL2 + * are not elided by user-mode emulation and they call these + * functions. Leave them as stubs until it's cleaned up. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internals.h" + +void arm_cpu_update_virq(ARMCPU *cpu) +{ + g_assert_not_reached(); +} + +void arm_cpu_update_vfiq(ARMCPU *cpu) +{ + g_assert_not_reached(); +} + +void arm_cpu_update_vinmi(ARMCPU *cpu) +{ + g_assert_not_reached(); +} + +void arm_cpu_update_vfnmi(ARMCPU *cpu) +{ + g_assert_not_reached(); +} + +void arm_cpu_update_vserr(ARMCPU *cpu) +{ + g_assert_not_reached(); +} diff --git a/target/arm/helper.c b/target/arm/helper.c index fa8dfac299..c44294711f 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -2868,8 +2868,12 @@ static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri, static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { +#ifdef CONFIG_USER_ONLY + g_assert_not_reached(); +#else /* Wait-for-interrupt (deprecated) */ cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT); +#endif } static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, diff --git a/target/arm/internals.h b/target/arm/internals.h index 532fabcafc..0f7df97b99 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -1293,6 +1293,11 @@ static inline const char *aarch32_mode_name(uint32_t psr) } /** + * arm_cpu_exec_interrupt(): Implementation of the cpu_exec_inrerrupt hook. + */ +bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request); + +/** * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request * * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following diff --git a/target/arm/meson.build b/target/arm/meson.build index 07d9271aa4..914f1498fc 100644 --- a/target/arm/meson.build +++ b/target/arm/meson.build @@ -26,6 +26,7 @@ arm_user_ss.add(files( 'debug_helper.c', 'helper.c', 'vfp_fpscr.c', + 'el2-stubs.c', )) arm_common_system_ss.add(files('cpu.c')) @@ -38,6 +39,7 @@ arm_common_system_ss.add(files( 'arm-powerctl.c', 'cortex-regs.c', 'cpregs-pmu.c', + 'cpu-irq.c', 'debug_helper.c', 'helper.c', 'machine.c', diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c index 0efc18a181..302e899287 100644 --- a/target/arm/tcg/mte_helper.c +++ b/target/arm/tcg/mte_helper.c @@ -591,7 +591,7 @@ static void mte_async_check_fail(CPUARMState *env, uint64_t dirty_ptr, * which is rather sooner than "normal". But the alternative * is waiting until the next syscall. */ - qemu_cpu_kick(env_cpu(env)); + cpu_exit(env_cpu(env)); #endif } diff --git a/target/avr/helper.c b/target/avr/helper.c index b9cd6d5ef2..4b29ab3526 100644 --- a/target/avr/helper.c +++ b/target/avr/helper.c @@ -47,7 +47,7 @@ bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request) cs->exception_index = EXCP_RESET; avr_cpu_do_interrupt(cs); - cs->interrupt_request &= ~CPU_INTERRUPT_RESET; + cpu_reset_interrupt(cs, CPU_INTERRUPT_RESET); return true; } } @@ -59,7 +59,7 @@ bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request) env->intsrc &= env->intsrc - 1; /* clear the interrupt */ if (!env->intsrc) { - cs->interrupt_request &= ~CPU_INTERRUPT_HARD; + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); } return true; } diff --git a/target/i386/cpu.h b/target/i386/cpu.h index f977fc49a7..e0be7a7406 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -2574,6 +2574,11 @@ static inline bool x86_has_cpuid_0x1f(X86CPU *cpu) void x86_cpu_set_a20(X86CPU *cpu, int a20_state); void cpu_sync_avx_hflag(CPUX86State *env); +typedef enum X86ASIdx { + X86ASIdx_MEM = 0, + X86ASIdx_SMM = 1, +} X86ASIdx; + #ifndef CONFIG_USER_ONLY static inline int x86_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs) { diff --git a/target/i386/helper.c b/target/i386/helper.c index e0aaed3c4c..651041ccfa 100644 --- a/target/i386/helper.c +++ b/target/i386/helper.c @@ -110,6 +110,7 @@ int cpu_x86_support_mca_broadcast(CPUX86State *env) /* x86 mmu */ /* XXX: add PGE support */ +#ifndef CONFIG_USER_ONLY void x86_cpu_set_a20(X86CPU *cpu, int a20_state) { CPUX86State *env = &cpu->env; @@ -129,6 +130,7 @@ void x86_cpu_set_a20(X86CPU *cpu, int a20_state) env->a20_mask = ~(1 << 20) | (a20_state << 20); } } +#endif void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0) { diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c index 9e05e0e576..a502437c30 100644 --- a/target/i386/hvf/x86hvf.c +++ b/target/i386/hvf/x86hvf.c @@ -397,7 +397,7 @@ bool hvf_inject_interrupts(CPUState *cs) if (cpu_test_interrupt(cs, CPU_INTERRUPT_NMI)) { if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) { - cs->interrupt_request &= ~CPU_INTERRUPT_NMI; + cpu_reset_interrupt(cs, CPU_INTERRUPT_NMI); info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | EXCP02_NMI; wvmcs(cs->accel->fd, VMCS_ENTRY_INTR_INFO, info); } else { @@ -409,7 +409,7 @@ bool hvf_inject_interrupts(CPUState *cs) cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK) && !(info & VMCS_INTR_VALID)) { int line = cpu_get_pic_interrupt(env); - cs->interrupt_request &= ~CPU_INTERRUPT_HARD; + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); if (line >= 0) { wvmcs(cs->accel->fd, VMCS_ENTRY_INTR_INFO, line | VMCS_INTR_VALID | VMCS_INTR_T_HWINTR); @@ -437,7 +437,7 @@ int hvf_process_events(CPUState *cs) } if (cpu_test_interrupt(cs, CPU_INTERRUPT_POLL)) { - cs->interrupt_request &= ~CPU_INTERRUPT_POLL; + cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL); apic_poll_irq(cpu->apic_state); } if ((cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) && @@ -450,7 +450,7 @@ int hvf_process_events(CPUState *cs) do_cpu_sipi(cpu); } if (cpu_test_interrupt(cs, CPU_INTERRUPT_TPR)) { - cs->interrupt_request &= ~CPU_INTERRUPT_TPR; + cpu_reset_interrupt(cs, CPU_INTERRUPT_TPR); cpu_synchronize_state(cs); apic_handle_tpr_access_report(cpu->apic_state, env->eip, env->tpr_access_type); diff --git a/target/i386/kvm/hyperv.c b/target/i386/kvm/hyperv.c index 9865120cc4..f7a81bd270 100644 --- a/target/i386/kvm/hyperv.c +++ b/target/i386/kvm/hyperv.c @@ -81,7 +81,6 @@ int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit) * necessary because memory hierarchy is being changed */ async_safe_run_on_cpu(CPU(cpu), async_synic_update, RUN_ON_CPU_NULL); - cpu_exit(CPU(cpu)); return EXCP_INTERRUPT; case KVM_EXIT_HYPERV_HCALL: { diff --git a/target/i386/kvm/kvm-cpu.c b/target/i386/kvm/kvm-cpu.c index 89a7953659..9c25b55839 100644 --- a/target/i386/kvm/kvm-cpu.c +++ b/target/i386/kvm/kvm-cpu.c @@ -13,6 +13,7 @@ #include "qapi/error.h" #include "system/system.h" #include "hw/boards.h" +#include "hw/i386/x86.h" #include "kvm_i386.h" #include "accel/accel-cpu-target.h" @@ -91,6 +92,15 @@ static bool kvm_cpu_realizefn(CPUState *cs, Error **errp) kvm_set_guest_phys_bits(cs); } + /* + * When SMM is enabled, there is 2 address spaces. Otherwise only 1. + * + * Only initialize address space 0 here, the second one for SMM is + * initialized at register_smram_listener() after machine init done. + */ + cs->num_ases = x86_machine_is_smm_enabled(X86_MACHINE(current_machine)) ? 2 : 1; + cpu_address_space_init(cs, X86ASIdx_MEM, "cpu-memory", cs->memory); + return true; } diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index 306430a052..6a3a1c1ed8 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -2704,6 +2704,7 @@ static MemoryRegion smram_as_mem; static void register_smram_listener(Notifier *n, void *unused) { + CPUState *cpu; MemoryRegion *smram = (MemoryRegion *) object_resolve_path("/machine/smram", NULL); @@ -2727,7 +2728,11 @@ static void register_smram_listener(Notifier *n, void *unused) address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM"); kvm_memory_listener_register(kvm_state, &smram_listener, - &smram_address_space, 1, "kvm-smram"); + &smram_address_space, X86ASIdx_SMM, "kvm-smram"); + + CPU_FOREACH(cpu) { + cpu_address_space_init(cpu, X86ASIdx_SMM, "cpu-smm", &smram_as_root); + } } static void *kvm_msr_energy_thread(void *data) @@ -3310,8 +3315,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s) return ret; } - if (kvm_check_extension(s, KVM_CAP_X86_SMM) && - object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE) && + if (object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE) && x86_machine_is_smm_enabled(X86_MACHINE(ms))) { smram_machine_done.notify = register_smram_listener; qemu_add_machine_init_done_notifier(&smram_machine_done); @@ -5066,7 +5070,7 @@ static int kvm_put_vcpu_events(X86CPU *cpu, int level) */ events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI; events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT; - cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI); + cpu_reset_interrupt(cs, CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI); } else { /* Keep these in cs->interrupt_request. */ events.smi.pending = 0; @@ -5456,7 +5460,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) if (cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) { if (cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) { bql_lock(); - cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; + cpu_reset_interrupt(cpu, CPU_INTERRUPT_NMI); bql_unlock(); DPRINTF("injected NMI\n"); ret = kvm_vcpu_ioctl(cpu, KVM_NMI); @@ -5467,7 +5471,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) } if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SMI)) { bql_lock(); - cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; + cpu_reset_interrupt(cpu, CPU_INTERRUPT_SMI); bql_unlock(); DPRINTF("injected SMI\n"); ret = kvm_vcpu_ioctl(cpu, KVM_SMI); @@ -5486,10 +5490,10 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT) && !(env->hflags & HF_SMM_MASK)) { - qatomic_set(&cpu->exit_request, 1); + qatomic_set(&cpu->exit_request, true); } if (cpu_test_interrupt(cpu, CPU_INTERRUPT_TPR)) { - qatomic_set(&cpu->exit_request, 1); + qatomic_set(&cpu->exit_request, true); } } @@ -5502,7 +5506,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) bql_lock(); - cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; + cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD); irq = cpu_get_pic_interrupt(env); if (irq >= 0) { struct kvm_interrupt intr; @@ -5597,14 +5601,14 @@ int kvm_arch_process_async_events(CPUState *cs) /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */ assert(env->mcg_cap); - cs->interrupt_request &= ~CPU_INTERRUPT_MCE; + cpu_reset_interrupt(cs, CPU_INTERRUPT_MCE); kvm_cpu_synchronize_state(cs); if (env->exception_nr == EXCP08_DBLE) { /* this means triple fault */ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); - cs->exit_request = 1; + qatomic_set(&cs->exit_request, true); return 0; } kvm_queue_exception(env, EXCP12_MCHK, 0, 0); @@ -5627,7 +5631,7 @@ int kvm_arch_process_async_events(CPUState *cs) } if (cpu_test_interrupt(cs, CPU_INTERRUPT_POLL)) { - cs->interrupt_request &= ~CPU_INTERRUPT_POLL; + cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL); apic_poll_irq(cpu->apic_state); } if ((cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) && @@ -5640,7 +5644,7 @@ int kvm_arch_process_async_events(CPUState *cs) do_cpu_sipi(cpu); } if (cpu_test_interrupt(cs, CPU_INTERRUPT_TPR)) { - cs->interrupt_request &= ~CPU_INTERRUPT_TPR; + cpu_reset_interrupt(cs, CPU_INTERRUPT_TPR); kvm_cpu_synchronize_state(cs); apic_handle_tpr_access_report(cpu->apic_state, env->eip, env->tpr_access_type); diff --git a/target/i386/nvmm/nvmm-accel-ops.c b/target/i386/nvmm/nvmm-accel-ops.c index 3799260bbd..dd5d5428b1 100644 --- a/target/i386/nvmm/nvmm-accel-ops.c +++ b/target/i386/nvmm/nvmm-accel-ops.c @@ -42,16 +42,14 @@ static void *qemu_nvmm_cpu_thread_fn(void *arg) qemu_guest_random_seed_thread_part2(cpu->random_seed); do { + qemu_process_cpu_events(cpu); + if (cpu_can_run(cpu)) { r = nvmm_vcpu_exec(cpu); if (r == EXCP_DEBUG) { cpu_handle_guest_debug(cpu); } } - while (cpu_thread_is_idle(cpu)) { - qemu_cond_wait_bql(cpu->halt_cond); - } - qemu_wait_io_event_common(cpu); } while (!cpu->unplug || cpu_can_run(cpu)); nvmm_destroy_vcpu(cpu); @@ -77,7 +75,7 @@ static void nvmm_start_vcpu_thread(CPUState *cpu) */ static void nvmm_kick_vcpu_thread(CPUState *cpu) { - cpu->exit_request = 1; + qatomic_set(&cpu->exit_request, true); cpus_kick_thread(cpu); } diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c index c1ac74c4f0..ed42425167 100644 --- a/target/i386/nvmm/nvmm-all.c +++ b/target/i386/nvmm/nvmm-all.c @@ -414,12 +414,12 @@ nvmm_vcpu_pre_run(CPUState *cpu) * or commit pending TPR access. */ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { - cpu->exit_request = 1; + qatomic_set(&cpu->exit_request, true); } if (!has_event && cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) { if (nvmm_can_take_nmi(cpu)) { - cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; + cpu_reset_interrupt(cpu, CPU_INTERRUPT_NMI); event->type = NVMM_VCPU_EVENT_INTR; event->vector = 2; has_event = true; @@ -428,7 +428,7 @@ nvmm_vcpu_pre_run(CPUState *cpu) if (!has_event && cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) { if (nvmm_can_take_int(cpu)) { - cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; + cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD); event->type = NVMM_VCPU_EVENT_INTR; event->vector = cpu_get_pic_interrupt(env); has_event = true; @@ -437,7 +437,7 @@ nvmm_vcpu_pre_run(CPUState *cpu) /* Don't want SMIs. */ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SMI)) { - cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; + cpu_reset_interrupt(cpu, CPU_INTERRUPT_SMI); } if (sync_tpr) { @@ -697,7 +697,7 @@ nvmm_vcpu_loop(CPUState *cpu) /* set int/nmi windows back to the reset state */ } if (cpu_test_interrupt(cpu, CPU_INTERRUPT_POLL)) { - cpu->interrupt_request &= ~CPU_INTERRUPT_POLL; + cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); apic_poll_irq(x86_cpu->apic_state); } if ((cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) && @@ -710,7 +710,7 @@ nvmm_vcpu_loop(CPUState *cpu) do_cpu_sipi(x86_cpu); } if (cpu_test_interrupt(cpu, CPU_INTERRUPT_TPR)) { - cpu->interrupt_request &= ~CPU_INTERRUPT_TPR; + cpu_reset_interrupt(cpu, CPU_INTERRUPT_TPR); nvmm_cpu_synchronize_state(cpu); apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip, env->tpr_access_type); @@ -743,7 +743,8 @@ nvmm_vcpu_loop(CPUState *cpu) nvmm_vcpu_pre_run(cpu); - if (qatomic_read(&cpu->exit_request)) { + /* Corresponding store-release is in cpu_exit. */ + if (qatomic_load_acquire(&cpu->exit_request)) { #if NVMM_USER_VERSION >= 2 nvmm_vcpu_stop(vcpu); #else @@ -751,8 +752,6 @@ nvmm_vcpu_loop(CPUState *cpu) #endif } - /* Read exit_request before the kernel reads the immediate exit flag */ - smp_rmb(); ret = nvmm_vcpu_run(mach, vcpu); if (ret == -1) { error_report("NVMM: Failed to exec a virtual processor," @@ -818,8 +817,6 @@ nvmm_vcpu_loop(CPUState *cpu) cpu_exec_end(cpu); bql_lock(); - qatomic_set(&cpu->exit_request, false); - return ret < 0; } diff --git a/target/i386/tcg/system/seg_helper.c b/target/i386/tcg/system/seg_helper.c index 794a23ddfc..38072e51d7 100644 --- a/target/i386/tcg/system/seg_helper.c +++ b/target/i386/tcg/system/seg_helper.c @@ -178,7 +178,7 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request) */ switch (interrupt_request) { case CPU_INTERRUPT_POLL: - cs->interrupt_request &= ~CPU_INTERRUPT_POLL; + cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL); apic_poll_irq(cpu->apic_state); break; case CPU_INTERRUPT_SIPI: @@ -186,23 +186,22 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request) break; case CPU_INTERRUPT_SMI: cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0); - cs->interrupt_request &= ~CPU_INTERRUPT_SMI; + cpu_reset_interrupt(cs, CPU_INTERRUPT_SMI); do_smm_enter(cpu); break; case CPU_INTERRUPT_NMI: cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0); - cs->interrupt_request &= ~CPU_INTERRUPT_NMI; + cpu_reset_interrupt(cs, CPU_INTERRUPT_NMI); env->hflags2 |= HF2_NMI_MASK; do_interrupt_x86_hardirq(env, EXCP02_NMI, 1); break; case CPU_INTERRUPT_MCE: - cs->interrupt_request &= ~CPU_INTERRUPT_MCE; + cpu_reset_interrupt(cs, CPU_INTERRUPT_MCE); do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0); break; case CPU_INTERRUPT_HARD: cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0); - cs->interrupt_request &= ~(CPU_INTERRUPT_HARD | - CPU_INTERRUPT_VIRQ); + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ); intno = cpu_get_pic_interrupt(env); qemu_log_mask(CPU_LOG_INT, "Servicing hardware INT=0x%02x\n", intno); @@ -215,7 +214,7 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request) qemu_log_mask(CPU_LOG_INT, "Servicing virtual hardware INT=0x%02x\n", intno); do_interrupt_x86_hardirq(env, intno, 1); - cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; + cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ); env->int_ctl &= ~V_IRQ_MASK; break; } diff --git a/target/i386/tcg/system/svm_helper.c b/target/i386/tcg/system/svm_helper.c index 3569196bdd..505788b0e2 100644 --- a/target/i386/tcg/system/svm_helper.c +++ b/target/i386/tcg/system/svm_helper.c @@ -824,7 +824,7 @@ void do_vmexit(CPUX86State *env) env->intercept_exceptions = 0; /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */ - cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; + cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ); env->int_ctl = 0; /* Clears the TSC_OFFSET inside the processor. */ diff --git a/target/i386/tcg/system/tcg-cpu.c b/target/i386/tcg/system/tcg-cpu.c index 0538a4fd51..7255862c24 100644 --- a/target/i386/tcg/system/tcg-cpu.c +++ b/target/i386/tcg/system/tcg-cpu.c @@ -74,8 +74,8 @@ bool tcg_cpu_realizefn(CPUState *cs, Error **errp) memory_region_set_enabled(cpu->cpu_as_mem, true); cs->num_ases = 2; - cpu_address_space_init(cs, 0, "cpu-memory", cs->memory); - cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root); + cpu_address_space_init(cs, X86ASIdx_MEM, "cpu-memory", cs->memory); + cpu_address_space_init(cs, X86ASIdx_SMM, "cpu-smm", cpu->cpu_as_root); /* ... SMRAM with higher priority, linked from /machine/smram. */ cpu->machine_done.notify = tcg_cpu_machine_done; diff --git a/target/i386/whpx/whpx-accel-ops.c b/target/i386/whpx/whpx-accel-ops.c index da58805b1a..f75886128d 100644 --- a/target/i386/whpx/whpx-accel-ops.c +++ b/target/i386/whpx/whpx-accel-ops.c @@ -42,16 +42,14 @@ static void *whpx_cpu_thread_fn(void *arg) qemu_guest_random_seed_thread_part2(cpu->random_seed); do { + qemu_process_cpu_events(cpu); + if (cpu_can_run(cpu)) { r = whpx_vcpu_exec(cpu); if (r == EXCP_DEBUG) { cpu_handle_guest_debug(cpu); } } - while (cpu_thread_is_idle(cpu)) { - qemu_cond_wait_bql(cpu->halt_cond); - } - qemu_wait_io_event_common(cpu); } while (!cpu->unplug || cpu_can_run(cpu)); whpx_destroy_vcpu(cpu); diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index 878cdd1668..2a85168ed5 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -1471,14 +1471,14 @@ static void whpx_vcpu_pre_run(CPUState *cpu) if (!vcpu->interruption_pending && cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) { if (cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) { - cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; + cpu_reset_interrupt(cpu, CPU_INTERRUPT_NMI); vcpu->interruptable = false; new_int.InterruptionType = WHvX64PendingNmi; new_int.InterruptionPending = 1; new_int.InterruptionVector = 2; } if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SMI)) { - cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; + cpu_reset_interrupt(cpu, CPU_INTERRUPT_SMI); } } @@ -1489,10 +1489,10 @@ static void whpx_vcpu_pre_run(CPUState *cpu) if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT) && !(env->hflags & HF_SMM_MASK)) { - cpu->exit_request = 1; + qatomic_set(&cpu->exit_request, true); } if (cpu_test_interrupt(cpu, CPU_INTERRUPT_TPR)) { - cpu->exit_request = 1; + qatomic_set(&cpu->exit_request, true); } } @@ -1502,7 +1502,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu) vcpu->interruptable && (env->eflags & IF_MASK)) { assert(!new_int.InterruptionPending); if (cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) { - cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; + cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD); irq = cpu_get_pic_interrupt(env); if (irq >= 0) { new_int.InterruptionType = WHvX64PendingInterrupt; @@ -1520,7 +1520,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu) } } else if (vcpu->ready_for_pic_interrupt && cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) { - cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; + cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD); irq = cpu_get_pic_interrupt(env); if (irq >= 0) { reg_names[reg_count] = WHvRegisterPendingEvent; @@ -1539,7 +1539,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu) if (tpr != vcpu->tpr) { vcpu->tpr = tpr; reg_values[reg_count].Reg64 = tpr; - cpu->exit_request = 1; + qatomic_set(&cpu->exit_request, true); reg_names[reg_count] = WHvX64RegisterCr8; reg_count += 1; } @@ -1607,7 +1607,7 @@ static void whpx_vcpu_process_async_events(CPUState *cpu) } if (cpu_test_interrupt(cpu, CPU_INTERRUPT_POLL)) { - cpu->interrupt_request &= ~CPU_INTERRUPT_POLL; + cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); apic_poll_irq(x86_cpu->apic_state); } @@ -1623,7 +1623,7 @@ static void whpx_vcpu_process_async_events(CPUState *cpu) } if (cpu_test_interrupt(cpu, CPU_INTERRUPT_TPR)) { - cpu->interrupt_request &= ~CPU_INTERRUPT_TPR; + cpu_reset_interrupt(cpu, CPU_INTERRUPT_TPR); whpx_cpu_synchronize_state(cpu); apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip, env->tpr_access_type); @@ -1714,7 +1714,8 @@ static int whpx_vcpu_run(CPUState *cpu) if (exclusive_step_mode == WHPX_STEP_NONE) { whpx_vcpu_pre_run(cpu); - if (qatomic_read(&cpu->exit_request)) { + /* Corresponding store-release is in cpu_exit. */ + if (qatomic_load_acquire(&cpu->exit_request)) { whpx_vcpu_kick(cpu); } } @@ -2049,8 +2050,6 @@ static int whpx_vcpu_run(CPUState *cpu) whpx_last_vcpu_stopping(cpu); } - qatomic_set(&cpu->exit_request, false); - return ret < 0; } diff --git a/target/openrisc/sys_helper.c b/target/openrisc/sys_helper.c index d96b41a01c..b091a9c668 100644 --- a/target/openrisc/sys_helper.c +++ b/target/openrisc/sys_helper.c @@ -196,7 +196,7 @@ void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb) env->ttmr = (rb & ~TTMR_IP) | ip; } else { /* Clear IP bit. */ env->ttmr = rb & ~TTMR_IP; - cs->interrupt_request &= ~CPU_INTERRUPT_TIMER; + cpu_reset_interrupt(cs, CPU_INTERRUPT_TIMER); } cpu_openrisc_timer_update(cpu); bql_unlock(); diff --git a/target/ppc/helper_regs.c b/target/ppc/helper_regs.c index 7e5726871e..5f21739749 100644 --- a/target/ppc/helper_regs.c +++ b/target/ppc/helper_regs.c @@ -274,6 +274,7 @@ TCGTBCPUState ppc_get_tb_cpu_state(CPUState *cs) return (TCGTBCPUState){ .pc = env->nip, .flags = hflags_current }; } +#ifndef CONFIG_USER_ONLY void cpu_interrupt_exittb(CPUState *cs) { /* @@ -285,6 +286,7 @@ void cpu_interrupt_exittb(CPUState *cs) cpu_interrupt(cs, CPU_INTERRUPT_EXITTB); } } +#endif int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv) { diff --git a/target/rx/helper.c b/target/rx/helper.c index ce003af421..41c9606fd1 100644 --- a/target/rx/helper.c +++ b/target/rx/helper.c @@ -63,7 +63,7 @@ void rx_cpu_do_interrupt(CPUState *cs) env->bpsw = save_psw; env->pc = env->fintv; env->psw_ipl = 15; - cs->interrupt_request &= ~CPU_INTERRUPT_FIR; + cpu_reset_interrupt(cs, CPU_INTERRUPT_FIR); qemu_set_irq(env->ack, env->ack_irq); qemu_log_mask(CPU_LOG_INT, "fast interrupt raised\n"); } else if (do_irq & CPU_INTERRUPT_HARD) { @@ -73,7 +73,7 @@ void rx_cpu_do_interrupt(CPUState *cs) cpu_stl_data(env, env->isp, env->pc); env->pc = cpu_ldl_data(env, env->intb + env->ack_irq * 4); env->psw_ipl = env->ack_ipl; - cs->interrupt_request &= ~CPU_INTERRUPT_HARD; + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); qemu_set_irq(env->ack, env->ack_irq); qemu_log_mask(CPU_LOG_INT, "interrupt 0x%02x raised\n", env->ack_irq); diff --git a/target/s390x/tcg/excp_helper.c b/target/s390x/tcg/excp_helper.c index e4c75d0ce0..4c7faeee82 100644 --- a/target/s390x/tcg/excp_helper.c +++ b/target/s390x/tcg/excp_helper.c @@ -559,7 +559,7 @@ try_deliver: /* we might still have pending interrupts, but not deliverable */ if (!env->pending_int && !qemu_s390_flic_has_any(flic)) { - cs->interrupt_request &= ~CPU_INTERRUPT_HARD; + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); } /* WAIT PSW during interrupt injection or STOP interrupt */ diff --git a/target/sparc/int32_helper.c b/target/sparc/int32_helper.c index 39db4ffa70..fdcaa0a578 100644 --- a/target/sparc/int32_helper.c +++ b/target/sparc/int32_helper.c @@ -65,6 +65,7 @@ static const char *excp_name_str(int32_t exception_index) return excp_names[exception_index]; } +#if !defined(CONFIG_USER_ONLY) void cpu_check_irqs(CPUSPARCState *env) { CPUState *cs; @@ -96,6 +97,7 @@ void cpu_check_irqs(CPUSPARCState *env) cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); } } +#endif void sparc_cpu_do_interrupt(CPUState *cs) { diff --git a/target/sparc/int64_helper.c b/target/sparc/int64_helper.c index 49e4e51c6d..23adda4cad 100644 --- a/target/sparc/int64_helper.c +++ b/target/sparc/int64_helper.c @@ -62,6 +62,7 @@ static const char * const excp_names[0x80] = { }; #endif +#if !defined(CONFIG_USER_ONLY) void cpu_check_irqs(CPUSPARCState *env) { CPUState *cs; @@ -127,6 +128,7 @@ void cpu_check_irqs(CPUSPARCState *env) cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); } } +#endif void sparc_cpu_do_interrupt(CPUState *cs) { |