diff options
Diffstat (limited to 'target/arm')
| -rw-r--r-- | target/arm/arm-powerctl.c | 202 | ||||
| -rw-r--r-- | target/arm/arm-powerctl.h | 2 | ||||
| -rw-r--r-- | target/arm/arm_ldst.h | 10 | ||||
| -rw-r--r-- | target/arm/cpu.c | 45 | ||||
| -rw-r--r-- | target/arm/cpu.h | 29 | ||||
| -rw-r--r-- | target/arm/helper.c | 293 | ||||
| -rw-r--r-- | target/arm/internals.h | 5 | ||||
| -rw-r--r-- | target/arm/kvm.c | 7 | ||||
| -rw-r--r-- | target/arm/machine.c | 41 | ||||
| -rw-r--r-- | target/arm/op_helper.c | 72 | ||||
| -rw-r--r-- | target/arm/psci.c | 4 | ||||
| -rw-r--r-- | target/arm/translate-a64.c | 22 | ||||
| -rw-r--r-- | target/arm/translate.c | 213 | ||||
| -rw-r--r-- | target/arm/translate.h | 14 |
14 files changed, 681 insertions, 278 deletions
diff --git a/target/arm/arm-powerctl.c b/target/arm/arm-powerctl.c index fbb7a15daa..25207cb850 100644 --- a/target/arm/arm-powerctl.c +++ b/target/arm/arm-powerctl.c @@ -14,6 +14,7 @@ #include "internals.h" #include "arm-powerctl.h" #include "qemu/log.h" +#include "qemu/main-loop.h" #include "exec/exec-all.h" #ifndef DEBUG_ARM_POWERCTL @@ -48,11 +49,93 @@ CPUState *arm_get_cpu_by_id(uint64_t id) return NULL; } +struct CpuOnInfo { + uint64_t entry; + uint64_t context_id; + uint32_t target_el; + bool target_aa64; +}; + + +static void arm_set_cpu_on_async_work(CPUState *target_cpu_state, + run_on_cpu_data data) +{ + ARMCPU *target_cpu = ARM_CPU(target_cpu_state); + struct CpuOnInfo *info = (struct CpuOnInfo *) data.host_ptr; + + /* Initialize the cpu we are turning on */ + cpu_reset(target_cpu_state); + target_cpu_state->halted = 0; + + if (info->target_aa64) { + if ((info->target_el < 3) && arm_feature(&target_cpu->env, + ARM_FEATURE_EL3)) { + /* + * As target mode is AArch64, we need to set lower + * exception level (the requested level 2) to AArch64 + */ + target_cpu->env.cp15.scr_el3 |= SCR_RW; + } + + if ((info->target_el < 2) && arm_feature(&target_cpu->env, + ARM_FEATURE_EL2)) { + /* + * As target mode is AArch64, we need to set lower + * exception level (the requested level 1) to AArch64 + */ + target_cpu->env.cp15.hcr_el2 |= HCR_RW; + } + + target_cpu->env.pstate = aarch64_pstate_mode(info->target_el, true); + } else { + /* We are requested to boot in AArch32 mode */ + static const uint32_t mode_for_el[] = { 0, + ARM_CPU_MODE_SVC, + ARM_CPU_MODE_HYP, + ARM_CPU_MODE_SVC }; + + cpsr_write(&target_cpu->env, mode_for_el[info->target_el], CPSR_M, + CPSRWriteRaw); + } + + if (info->target_el == 3) { + /* Processor is in secure mode */ + target_cpu->env.cp15.scr_el3 &= ~SCR_NS; + } else { + /* Processor is not in secure mode */ + target_cpu->env.cp15.scr_el3 |= SCR_NS; + } + + /* We check if the started CPU is now at the correct level */ + assert(info->target_el == arm_current_el(&target_cpu->env)); + + if (info->target_aa64) { + target_cpu->env.xregs[0] = info->context_id; + target_cpu->env.thumb = false; + } else { + target_cpu->env.regs[0] = info->context_id; + target_cpu->env.thumb = info->entry & 1; + info->entry &= 0xfffffffe; + } + + /* Start the new CPU at the requested address */ + cpu_set_pc(target_cpu_state, info->entry); + + g_free(info); + + /* Finally set the power status */ + assert(qemu_mutex_iothread_locked()); + target_cpu->power_state = PSCI_ON; +} + int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id, uint32_t target_el, bool target_aa64) { CPUState *target_cpu_state; ARMCPU *target_cpu; + struct CpuOnInfo *info; + + assert(qemu_mutex_iothread_locked()); DPRINTF("cpu %" PRId64 " (EL %d, %s) @ 0x%" PRIx64 " with R0 = 0x%" PRIx64 "\n", cpuid, target_el, target_aa64 ? "aarch64" : "aarch32", entry, @@ -77,7 +160,7 @@ int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id, } target_cpu = ARM_CPU(target_cpu_state); - if (!target_cpu->powered_off) { + if (target_cpu->power_state == PSCI_ON) { qemu_log_mask(LOG_GUEST_ERROR, "[ARM]%s: CPU %" PRId64 " is already on\n", __func__, cpuid); @@ -109,74 +192,54 @@ int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id, return QEMU_ARM_POWERCTL_INVALID_PARAM; } - /* Initialize the cpu we are turning on */ - cpu_reset(target_cpu_state); - target_cpu->powered_off = false; - target_cpu_state->halted = 0; - - if (target_aa64) { - if ((target_el < 3) && arm_feature(&target_cpu->env, ARM_FEATURE_EL3)) { - /* - * As target mode is AArch64, we need to set lower - * exception level (the requested level 2) to AArch64 - */ - target_cpu->env.cp15.scr_el3 |= SCR_RW; - } - - if ((target_el < 2) && arm_feature(&target_cpu->env, ARM_FEATURE_EL2)) { - /* - * As target mode is AArch64, we need to set lower - * exception level (the requested level 1) to AArch64 - */ - target_cpu->env.cp15.hcr_el2 |= HCR_RW; - } - - target_cpu->env.pstate = aarch64_pstate_mode(target_el, true); - } else { - /* We are requested to boot in AArch32 mode */ - static uint32_t mode_for_el[] = { 0, - ARM_CPU_MODE_SVC, - ARM_CPU_MODE_HYP, - ARM_CPU_MODE_SVC }; - - cpsr_write(&target_cpu->env, mode_for_el[target_el], CPSR_M, - CPSRWriteRaw); - } - - if (target_el == 3) { - /* Processor is in secure mode */ - target_cpu->env.cp15.scr_el3 &= ~SCR_NS; - } else { - /* Processor is not in secure mode */ - target_cpu->env.cp15.scr_el3 |= SCR_NS; - } - - /* We check if the started CPU is now at the correct level */ - assert(target_el == arm_current_el(&target_cpu->env)); - - if (target_aa64) { - target_cpu->env.xregs[0] = context_id; - target_cpu->env.thumb = false; - } else { - target_cpu->env.regs[0] = context_id; - target_cpu->env.thumb = entry & 1; - entry &= 0xfffffffe; + /* + * If another CPU has powered the target on we are in the state + * ON_PENDING and additional attempts to power on the CPU should + * fail (see 6.6 Implementation CPU_ON/CPU_OFF races in the PSCI + * spec) + */ + if (target_cpu->power_state == PSCI_ON_PENDING) { + qemu_log_mask(LOG_GUEST_ERROR, + "[ARM]%s: CPU %" PRId64 " is already powering on\n", + __func__, cpuid); + return QEMU_ARM_POWERCTL_ON_PENDING; } - /* Start the new CPU at the requested address */ - cpu_set_pc(target_cpu_state, entry); + /* To avoid racing with a CPU we are just kicking off we do the + * final bit of preparation for the work in the target CPUs + * context. + */ + info = g_new(struct CpuOnInfo, 1); + info->entry = entry; + info->context_id = context_id; + info->target_el = target_el; + info->target_aa64 = target_aa64; - qemu_cpu_kick(target_cpu_state); + async_run_on_cpu(target_cpu_state, arm_set_cpu_on_async_work, + RUN_ON_CPU_HOST_PTR(info)); /* We are good to go */ return QEMU_ARM_POWERCTL_RET_SUCCESS; } +static void arm_set_cpu_off_async_work(CPUState *target_cpu_state, + run_on_cpu_data data) +{ + ARMCPU *target_cpu = ARM_CPU(target_cpu_state); + + assert(qemu_mutex_iothread_locked()); + target_cpu->power_state = PSCI_OFF; + target_cpu_state->halted = 1; + target_cpu_state->exception_index = EXCP_HLT; +} + int arm_set_cpu_off(uint64_t cpuid) { CPUState *target_cpu_state; ARMCPU *target_cpu; + assert(qemu_mutex_iothread_locked()); + DPRINTF("cpu %" PRId64 "\n", cpuid); /* change to the cpu we are powering up */ @@ -185,27 +248,34 @@ int arm_set_cpu_off(uint64_t cpuid) return QEMU_ARM_POWERCTL_INVALID_PARAM; } target_cpu = ARM_CPU(target_cpu_state); - if (target_cpu->powered_off) { + if (target_cpu->power_state == PSCI_OFF) { qemu_log_mask(LOG_GUEST_ERROR, "[ARM]%s: CPU %" PRId64 " is already off\n", __func__, cpuid); return QEMU_ARM_POWERCTL_IS_OFF; } - target_cpu->powered_off = true; - target_cpu_state->halted = 1; - target_cpu_state->exception_index = EXCP_HLT; - cpu_loop_exit(target_cpu_state); - /* notreached */ + /* Queue work to run under the target vCPUs context */ + async_run_on_cpu(target_cpu_state, arm_set_cpu_off_async_work, + RUN_ON_CPU_NULL); return QEMU_ARM_POWERCTL_RET_SUCCESS; } +static void arm_reset_cpu_async_work(CPUState *target_cpu_state, + run_on_cpu_data data) +{ + /* Reset the cpu */ + cpu_reset(target_cpu_state); +} + int arm_reset_cpu(uint64_t cpuid) { CPUState *target_cpu_state; ARMCPU *target_cpu; + assert(qemu_mutex_iothread_locked()); + DPRINTF("cpu %" PRId64 "\n", cpuid); /* change to the cpu we are resetting */ @@ -214,15 +284,17 @@ int arm_reset_cpu(uint64_t cpuid) return QEMU_ARM_POWERCTL_INVALID_PARAM; } target_cpu = ARM_CPU(target_cpu_state); - if (target_cpu->powered_off) { + + if (target_cpu->power_state == PSCI_OFF) { qemu_log_mask(LOG_GUEST_ERROR, "[ARM]%s: CPU %" PRId64 " is off\n", __func__, cpuid); return QEMU_ARM_POWERCTL_IS_OFF; } - /* Reset the cpu */ - cpu_reset(target_cpu_state); + /* Queue work to run under the target vCPUs context */ + async_run_on_cpu(target_cpu_state, arm_reset_cpu_async_work, + RUN_ON_CPU_NULL); return QEMU_ARM_POWERCTL_RET_SUCCESS; } diff --git a/target/arm/arm-powerctl.h b/target/arm/arm-powerctl.h index 98ee04989b..04353923c0 100644 --- a/target/arm/arm-powerctl.h +++ b/target/arm/arm-powerctl.h @@ -17,6 +17,7 @@ #define QEMU_ARM_POWERCTL_INVALID_PARAM QEMU_PSCI_RET_INVALID_PARAMS #define QEMU_ARM_POWERCTL_ALREADY_ON QEMU_PSCI_RET_ALREADY_ON #define QEMU_ARM_POWERCTL_IS_OFF QEMU_PSCI_RET_DENIED +#define QEMU_ARM_POWERCTL_ON_PENDING QEMU_PSCI_RET_ON_PENDING /* * arm_get_cpu_by_id: @@ -43,6 +44,7 @@ CPUState *arm_get_cpu_by_id(uint64_t cpuid); * Returns: QEMU_ARM_POWERCTL_RET_SUCCESS on success. * QEMU_ARM_POWERCTL_INVALID_PARAM if bad parameters are provided. * QEMU_ARM_POWERCTL_ALREADY_ON if the CPU was already started. + * QEMU_ARM_POWERCTL_ON_PENDING if the CPU is still powering up */ int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id, uint32_t target_el, bool target_aa64); diff --git a/target/arm/arm_ldst.h b/target/arm/arm_ldst.h index a76d89f62c..01587b3ebb 100644 --- a/target/arm/arm_ldst.h +++ b/target/arm/arm_ldst.h @@ -39,7 +39,15 @@ static inline uint32_t arm_ldl_code(CPUARMState *env, target_ulong addr, static inline uint16_t arm_lduw_code(CPUARMState *env, target_ulong addr, bool sctlr_b) { - uint16_t insn = cpu_lduw_code(env, addr); + uint16_t insn; +#ifndef CONFIG_USER_ONLY + /* In big-endian (BE32) mode, adjacent Thumb instructions have been swapped + within each word. Undo that now. */ + if (sctlr_b) { + addr ^= 2; + } +#endif + insn = cpu_lduw_code(env, addr); if (bswap_code(sctlr_b)) { return bswap16(insn); } diff --git a/target/arm/cpu.c b/target/arm/cpu.c index e9f10f7747..f7157dc0e5 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -45,7 +45,7 @@ static bool arm_cpu_has_work(CPUState *cs) { ARMCPU *cpu = ARM_CPU(cs); - return !cpu->powered_off + return (cpu->power_state != PSCI_OFF) && cs->interrupt_request & (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ @@ -132,7 +132,7 @@ static void arm_cpu_reset(CPUState *s) env->vfp.xregs[ARM_VFP_MVFR1] = cpu->mvfr1; env->vfp.xregs[ARM_VFP_MVFR2] = cpu->mvfr2; - cpu->powered_off = cpu->start_powered_off; + cpu->power_state = cpu->start_powered_off ? PSCI_OFF : PSCI_ON; s->halted = cpu->start_powered_off; if (arm_feature(env, ARM_FEATURE_IWMMXT)) { @@ -446,6 +446,21 @@ print_insn_thumb1(bfd_vma pc, disassemble_info *info) return print_insn_arm(pc | 1, info); } +static int arm_read_memory_func(bfd_vma memaddr, bfd_byte *b, + int length, struct disassemble_info *info) +{ + assert(info->read_memory_inner_func); + assert((info->flags & INSN_ARM_BE32) == 0 || length == 2 || length == 4); + + if ((info->flags & INSN_ARM_BE32) != 0 && length == 2) { + assert(info->endian == BFD_ENDIAN_LITTLE); + return info->read_memory_inner_func(memaddr ^ 2, (bfd_byte *)b, 2, + info); + } else { + return info->read_memory_inner_func(memaddr, b, length, info); + } +} + static void arm_disas_set_info(CPUState *cpu, disassemble_info *info) { ARMCPU *ac = ARM_CPU(cpu); @@ -471,6 +486,14 @@ static void arm_disas_set_info(CPUState *cpu, disassemble_info *info) info->endian = BFD_ENDIAN_BIG; #endif } + if (info->read_memory_inner_func == NULL) { + info->read_memory_inner_func = info->read_memory_func; + info->read_memory_func = arm_read_memory_func; + } + info->flags &= ~INSN_ARM_BE32; + if (arm_sctlr_b(env)) { + info->flags |= INSN_ARM_BE32; + } } static void arm_cpu_initfn(Object *obj) @@ -541,6 +564,9 @@ static Property arm_cpu_has_el2_property = static Property arm_cpu_has_el3_property = DEFINE_PROP_BOOL("has_el3", ARMCPU, has_el3, true); +static Property arm_cpu_cfgend_property = + DEFINE_PROP_BOOL("cfgend", ARMCPU, cfgend, false); + /* use property name "pmu" to match other archs and virt tools */ static Property arm_cpu_has_pmu_property = DEFINE_PROP_BOOL("pmu", ARMCPU, has_pmu, true); @@ -608,6 +634,8 @@ static void arm_cpu_post_init(Object *obj) } } + qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property, + &error_abort); } static void arm_cpu_finalizefn(Object *obj) @@ -728,6 +756,14 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) cpu->reset_sctlr |= (1 << 13); } + if (cpu->cfgend) { + if (arm_feature(&cpu->env, ARM_FEATURE_V7)) { + cpu->reset_sctlr |= SCTLR_EE; + } else { + cpu->reset_sctlr |= SCTLR_B; + } + } + if (!cpu->has_el3) { /* If the has_el3 CPU property is disabled then we need to disable the * feature. @@ -745,7 +781,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) unset_feature(env, ARM_FEATURE_EL2); } - if (!cpu->has_pmu || !kvm_enabled()) { + if (!cpu->has_pmu) { cpu->has_pmu = false; unset_feature(env, ARM_FEATURE_PMU); } @@ -1639,6 +1675,9 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data) cc->gdb_stop_before_watchpoint = true; cc->debug_excp_handler = arm_debug_excp_handler; cc->debug_check_watchpoint = arm_debug_check_watchpoint; +#if !defined(CONFIG_USER_ONLY) + cc->adjust_watchpoint_address = arm_adjust_watchpoint_address; +#endif cc->disas_set_info = arm_disas_set_info; } diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 39bff86daf..38a8e00908 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -30,6 +30,9 @@ # define TARGET_LONG_BITS 32 #endif +/* ARM processors have a weak memory model */ +#define TCG_GUEST_DEFAULT_MO (0) + #define CPUArchState struct CPUARMState #include "qemu-common.h" @@ -307,9 +310,9 @@ typedef struct CPUARMState { uint64_t c9_pmcr; /* performance monitor control register */ uint64_t c9_pmcnten; /* perf monitor counter enables */ uint32_t c9_pmovsr; /* perf monitor overflow status */ - uint32_t c9_pmxevtyper; /* perf monitor event type */ uint32_t c9_pmuserenr; /* perf monitor user enable */ - uint32_t c9_pminten; /* perf monitor interrupt enables */ + uint64_t c9_pmselr; /* perf monitor counter selection register */ + uint64_t c9_pminten; /* perf monitor interrupt enables */ union { /* Memory attribute redirection */ struct { #ifdef HOST_WORDS_BIGENDIAN @@ -526,6 +529,15 @@ typedef struct CPUARMState { */ typedef void ARMELChangeHook(ARMCPU *cpu, void *opaque); + +/* These values map onto the return values for + * QEMU_PSCI_0_2_FN_AFFINITY_INFO */ +typedef enum ARMPSCIState { + PSCI_OFF = 0, + PSCI_ON = 1, + PSCI_ON_PENDING = 2 +} ARMPSCIState; + /** * ARMCPU: * @env: #CPUARMState @@ -582,8 +594,10 @@ struct ARMCPU { /* Should CPU start in PSCI powered-off state? */ bool start_powered_off; - /* CPU currently in PSCI powered-off state */ - bool powered_off; + + /* Current power state, access guarded by BQL */ + ARMPSCIState power_state; + /* CPU has virtualization extension */ bool has_el2; /* CPU has security extension */ @@ -676,6 +690,13 @@ struct ARMCPU { int gic_vpribits; /* number of virtual priority bits */ int gic_vprebits; /* number of virtual preemption bits */ + /* Whether the cfgend input is high (i.e. this CPU should reset into + * big-endian mode). This setting isn't used directly: instead it modifies + * the reset_sctlr value to have SCTLR_B or SCTLR_EE set, depending on the + * architecture version. + */ + bool cfgend; + ARMELChangeHook *el_change_hook; void *el_change_hook_opaque; }; diff --git a/target/arm/helper.c b/target/arm/helper.c index c23df1b133..bcedb4a808 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -536,41 +536,33 @@ static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *other_cs; + CPUState *cs = ENV_GET_CPU(env); - CPU_FOREACH(other_cs) { - tlb_flush(other_cs); - } + tlb_flush_all_cpus_synced(cs); } static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *other_cs; + CPUState *cs = ENV_GET_CPU(env); - CPU_FOREACH(other_cs) { - tlb_flush(other_cs); - } + tlb_flush_all_cpus_synced(cs); } static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *other_cs; + CPUState *cs = ENV_GET_CPU(env); - CPU_FOREACH(other_cs) { - tlb_flush_page(other_cs, value & TARGET_PAGE_MASK); - } + tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); } static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *other_cs; + CPUState *cs = ENV_GET_CPU(env); - CPU_FOREACH(other_cs) { - tlb_flush_page(other_cs, value & TARGET_PAGE_MASK); - } + tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); } static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -578,19 +570,21 @@ static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, { CPUState *cs = ENV_GET_CPU(env); - tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, - ARMMMUIdx_S2NS, -1); + tlb_flush_by_mmuidx(cs, + (1 << ARMMMUIdx_S12NSE1) | + (1 << ARMMMUIdx_S12NSE0) | + (1 << ARMMMUIdx_S2NS)); } static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *other_cs; + CPUState *cs = ENV_GET_CPU(env); - CPU_FOREACH(other_cs) { - tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1, - ARMMMUIdx_S12NSE0, ARMMMUIdx_S2NS, -1); - } + tlb_flush_by_mmuidx_all_cpus_synced(cs, + (1 << ARMMMUIdx_S12NSE1) | + (1 << ARMMMUIdx_S12NSE0) | + (1 << ARMMMUIdx_S2NS)); } static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -611,13 +605,13 @@ static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri, pageaddr = sextract64(value << 12, 0, 40); - tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S2NS, -1); + tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S2NS)); } static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *other_cs; + CPUState *cs = ENV_GET_CPU(env); uint64_t pageaddr; if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { @@ -626,9 +620,8 @@ static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri, pageaddr = sextract64(value << 12, 0, 40); - CPU_FOREACH(other_cs) { - tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S2NS, -1); - } + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, + (1 << ARMMMUIdx_S2NS)); } static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -636,17 +629,15 @@ static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, { CPUState *cs = ENV_GET_CPU(env); - tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E2, -1); + tlb_flush_by_mmuidx(cs, (1 << ARMMMUIdx_S1E2)); } static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *other_cs; + CPUState *cs = ENV_GET_CPU(env); - CPU_FOREACH(other_cs) { - tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E2, -1); - } + tlb_flush_by_mmuidx_all_cpus_synced(cs, (1 << ARMMMUIdx_S1E2)); } static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -655,18 +646,17 @@ static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, CPUState *cs = ENV_GET_CPU(env); uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); - tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E2, -1); + tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S1E2)); } static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *other_cs; + CPUState *cs = ENV_GET_CPU(env); uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); - CPU_FOREACH(other_cs) { - tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E2, -1); - } + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, + (1 << ARMMMUIdx_S1E2)); } static const ARMCPRegInfo cp_reginfo[] = { @@ -975,6 +965,17 @@ static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri) return total_ticks - env->cp15.c15_ccnt; } +static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and + * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the + * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are + * accessed. + */ + env->cp15.c9_pmselr = value & 0x1f; +} + static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { @@ -1043,7 +1044,25 @@ static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri, static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - env->cp15.c9_pmxevtyper = value & 0xff; + /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when + * PMSELR value is equal to or greater than the number of implemented + * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI. + */ + if (env->cp15.c9_pmselr == 0x1f) { + pmccfiltr_write(env, ri, value); + } +} + +static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + /* We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER + * are CONSTRAINED UNPREDICTABLE. See comments in pmxevtyper_write(). + */ + if (env->cp15.c9_pmselr == 0x1f) { + return env->cp15.pmccfiltr_el0; + } else { + return 0; + } } static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -1194,13 +1213,17 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { /* Unimplemented so WI. */ { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4, .access = PL0_W, .accessfn = pmreg_access, .type = ARM_CP_NOP }, - /* Since we don't implement any events, writing to PMSELR is UNPREDICTABLE. - * We choose to RAZ/WI. - */ - { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5, - .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0, - .accessfn = pmreg_access }, #ifndef CONFIG_USER_ONLY + { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5, + .access = PL0_RW, .type = ARM_CP_ALIAS, + .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr), + .accessfn = pmreg_access, .writefn = pmselr_write, + .raw_writefn = raw_write}, + { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5, + .access = PL0_RW, .accessfn = pmreg_access, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr), + .writefn = pmselr_write, .raw_writefn = raw_write, }, { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0, .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_IO, .readfn = pmccntr_read, .writefn = pmccntr_write32, @@ -1219,10 +1242,12 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0), .resetvalue = 0, }, { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1, - .access = PL0_RW, - .fieldoffset = offsetof(CPUARMState, cp15.c9_pmxevtyper), - .accessfn = pmreg_access, .writefn = pmxevtyper_write, - .raw_writefn = raw_write }, + .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access, + .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, + { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1, + .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access, + .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, /* Unimplemented, RAZ/WI. */ { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2, .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0, @@ -1240,9 +1265,17 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { .writefn = pmuserenr_write, .raw_writefn = raw_write }, { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1, .access = PL1_RW, .accessfn = access_tpm, - .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), + .type = ARM_CP_ALIAS, + .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten), .resetvalue = 0, .writefn = pmintenset_write, .raw_writefn = raw_write }, + { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1, + .access = PL1_RW, .accessfn = access_tpm, + .type = ARM_CP_IO, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), + .writefn = pmintenset_write, .raw_writefn = raw_write, + .resetvalue = 0x0 }, { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2, .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), @@ -2499,8 +2532,10 @@ static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, /* Accesses to VTTBR may change the VMID so we must flush the TLB. */ if (raw_read(env, ri) != value) { - tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, - ARMMMUIdx_S2NS, -1); + tlb_flush_by_mmuidx(cs, + (1 << ARMMMUIdx_S12NSE1) | + (1 << ARMMMUIdx_S12NSE0) | + (1 << ARMMMUIdx_S2NS)); raw_write(env, ri, value); } } @@ -2855,29 +2890,33 @@ static CPAccessResult aa64_cacheop_access(CPUARMState *env, static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - ARMCPU *cpu = arm_env_get_cpu(env); - CPUState *cs = CPU(cpu); + CPUState *cs = ENV_GET_CPU(env); if (arm_is_secure_below_el3(env)) { - tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1); + tlb_flush_by_mmuidx(cs, + (1 << ARMMMUIdx_S1SE1) | + (1 << ARMMMUIdx_S1SE0)); } else { - tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1); + tlb_flush_by_mmuidx(cs, + (1 << ARMMMUIdx_S12NSE1) | + (1 << ARMMMUIdx_S12NSE0)); } } static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { + CPUState *cs = ENV_GET_CPU(env); bool sec = arm_is_secure_below_el3(env); - CPUState *other_cs; - CPU_FOREACH(other_cs) { - if (sec) { - tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1); - } else { - tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1, - ARMMMUIdx_S12NSE0, -1); - } + if (sec) { + tlb_flush_by_mmuidx_all_cpus_synced(cs, + (1 << ARMMMUIdx_S1SE1) | + (1 << ARMMMUIdx_S1SE0)); + } else { + tlb_flush_by_mmuidx_all_cpus_synced(cs, + (1 << ARMMMUIdx_S12NSE1) | + (1 << ARMMMUIdx_S12NSE0)); } } @@ -2892,13 +2931,19 @@ static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, CPUState *cs = CPU(cpu); if (arm_is_secure_below_el3(env)) { - tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1); + tlb_flush_by_mmuidx(cs, + (1 << ARMMMUIdx_S1SE1) | + (1 << ARMMMUIdx_S1SE0)); } else { if (arm_feature(env, ARM_FEATURE_EL2)) { - tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, - ARMMMUIdx_S2NS, -1); + tlb_flush_by_mmuidx(cs, + (1 << ARMMMUIdx_S12NSE1) | + (1 << ARMMMUIdx_S12NSE0) | + (1 << ARMMMUIdx_S2NS)); } else { - tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1); + tlb_flush_by_mmuidx(cs, + (1 << ARMMMUIdx_S12NSE1) | + (1 << ARMMMUIdx_S12NSE0)); } } } @@ -2909,7 +2954,7 @@ static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri, ARMCPU *cpu = arm_env_get_cpu(env); CPUState *cs = CPU(cpu); - tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E2, -1); + tlb_flush_by_mmuidx(cs, (1 << ARMMMUIdx_S1E2)); } static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -2918,7 +2963,7 @@ static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, ARMCPU *cpu = arm_env_get_cpu(env); CPUState *cs = CPU(cpu); - tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E3, -1); + tlb_flush_by_mmuidx(cs, (1 << ARMMMUIdx_S1E3)); } static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -2928,41 +2973,40 @@ static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, * stage 2 translations, whereas most other scopes only invalidate * stage 1 translations. */ + CPUState *cs = ENV_GET_CPU(env); bool sec = arm_is_secure_below_el3(env); bool has_el2 = arm_feature(env, ARM_FEATURE_EL2); - CPUState *other_cs; - - CPU_FOREACH(other_cs) { - if (sec) { - tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1); - } else if (has_el2) { - tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1, - ARMMMUIdx_S12NSE0, ARMMMUIdx_S2NS, -1); - } else { - tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1, - ARMMMUIdx_S12NSE0, -1); - } + + if (sec) { + tlb_flush_by_mmuidx_all_cpus_synced(cs, + (1 << ARMMMUIdx_S1SE1) | + (1 << ARMMMUIdx_S1SE0)); + } else if (has_el2) { + tlb_flush_by_mmuidx_all_cpus_synced(cs, + (1 << ARMMMUIdx_S12NSE1) | + (1 << ARMMMUIdx_S12NSE0) | + (1 << ARMMMUIdx_S2NS)); + } else { + tlb_flush_by_mmuidx_all_cpus_synced(cs, + (1 << ARMMMUIdx_S12NSE1) | + (1 << ARMMMUIdx_S12NSE0)); } } static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *other_cs; + CPUState *cs = ENV_GET_CPU(env); - CPU_FOREACH(other_cs) { - tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E2, -1); - } + tlb_flush_by_mmuidx_all_cpus_synced(cs, (1 << ARMMMUIdx_S1E2)); } static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *other_cs; + CPUState *cs = ENV_GET_CPU(env); - CPU_FOREACH(other_cs) { - tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E3, -1); - } + tlb_flush_by_mmuidx_all_cpus_synced(cs, (1 << ARMMMUIdx_S1E3)); } static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -2978,11 +3022,13 @@ static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t pageaddr = sextract64(value << 12, 0, 56); if (arm_is_secure_below_el3(env)) { - tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1SE1, - ARMMMUIdx_S1SE0, -1); + tlb_flush_page_by_mmuidx(cs, pageaddr, + (1 << ARMMMUIdx_S1SE1) | + (1 << ARMMMUIdx_S1SE0)); } else { - tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S12NSE1, - ARMMMUIdx_S12NSE0, -1); + tlb_flush_page_by_mmuidx(cs, pageaddr, + (1 << ARMMMUIdx_S12NSE1) | + (1 << ARMMMUIdx_S12NSE0)); } } @@ -2997,7 +3043,7 @@ static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, CPUState *cs = CPU(cpu); uint64_t pageaddr = sextract64(value << 12, 0, 56); - tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E2, -1); + tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S1E2)); } static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -3011,47 +3057,46 @@ static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, CPUState *cs = CPU(cpu); uint64_t pageaddr = sextract64(value << 12, 0, 56); - tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E3, -1); + tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S1E3)); } static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { + ARMCPU *cpu = arm_env_get_cpu(env); + CPUState *cs = CPU(cpu); bool sec = arm_is_secure_below_el3(env); - CPUState *other_cs; uint64_t pageaddr = sextract64(value << 12, 0, 56); - CPU_FOREACH(other_cs) { - if (sec) { - tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1SE1, - ARMMMUIdx_S1SE0, -1); - } else { - tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S12NSE1, - ARMMMUIdx_S12NSE0, -1); - } + if (sec) { + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, + (1 << ARMMMUIdx_S1SE1) | + (1 << ARMMMUIdx_S1SE0)); + } else { + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, + (1 << ARMMMUIdx_S12NSE1) | + (1 << ARMMMUIdx_S12NSE0)); } } static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *other_cs; + CPUState *cs = ENV_GET_CPU(env); uint64_t pageaddr = sextract64(value << 12, 0, 56); - CPU_FOREACH(other_cs) { - tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E2, -1); - } + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, + (1 << ARMMMUIdx_S1E2)); } static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *other_cs; + CPUState *cs = ENV_GET_CPU(env); uint64_t pageaddr = sextract64(value << 12, 0, 56); - CPU_FOREACH(other_cs) { - tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E3, -1); - } + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, + (1 << ARMMMUIdx_S1E3)); } static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -3073,13 +3118,13 @@ static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri, pageaddr = sextract64(value << 12, 0, 48); - tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S2NS, -1); + tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S2NS)); } static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *other_cs; + CPUState *cs = ENV_GET_CPU(env); uint64_t pageaddr; if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { @@ -3088,9 +3133,8 @@ static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri, pageaddr = sextract64(value << 12, 0, 48); - CPU_FOREACH(other_cs) { - tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S2NS, -1); - } + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, + (1 << ARMMMUIdx_S2NS)); } static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri, @@ -4590,12 +4634,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, - /* We mask out the PMUVer field, because we don't currently - * implement the PMU. Not advertising it prevents the guest - * from trying to use it and getting UNDEFs on registers we - * don't implement. - */ - .resetvalue = cpu->id_aa64dfr0 & ~0xf00 }, + .resetvalue = cpu->id_aa64dfr0 }, { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, @@ -6731,6 +6770,12 @@ void arm_cpu_do_interrupt(CPUState *cs) arm_cpu_do_interrupt_aarch32(cs); } + /* Hooks may change global state so BQL should be held, also the + * BQL needs to be held for any modification of + * cs->interrupt_request. + */ + g_assert(qemu_mutex_iothread_locked()); + arm_call_el_change_hook(cpu); if (!kvm_enabled()) { diff --git a/target/arm/internals.h b/target/arm/internals.h index 2e65bc12fa..f742a419ff 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -444,6 +444,11 @@ void hw_breakpoint_update_all(ARMCPU *cpu); /* Callback function for checking if a watchpoint should trigger. */ bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp); +/* Adjust addresses (in BE32 mode) before testing against watchpoint + * addresses. + */ +vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len); + /* Callback function for when a watchpoint or breakpoint triggers. */ void arm_debug_excp_handler(CPUState *cs); diff --git a/target/arm/kvm.c b/target/arm/kvm.c index c00b94e42a..395e986973 100644 --- a/target/arm/kvm.c +++ b/target/arm/kvm.c @@ -488,8 +488,8 @@ int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu) { if (cap_has_mp_state) { struct kvm_mp_state mp_state = { - .mp_state = - cpu->powered_off ? KVM_MP_STATE_STOPPED : KVM_MP_STATE_RUNNABLE + .mp_state = (cpu->power_state == PSCI_OFF) ? + KVM_MP_STATE_STOPPED : KVM_MP_STATE_RUNNABLE }; int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); if (ret) { @@ -515,7 +515,8 @@ int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu) __func__, ret, strerror(-ret)); abort(); } - cpu->powered_off = (mp_state.mp_state == KVM_MP_STATE_STOPPED); + cpu->power_state = (mp_state.mp_state == KVM_MP_STATE_STOPPED) ? + PSCI_OFF : PSCI_ON; } return 0; diff --git a/target/arm/machine.c b/target/arm/machine.c index fa5ec76090..d8094a840b 100644 --- a/target/arm/machine.c +++ b/target/arm/machine.c @@ -211,6 +211,38 @@ static const VMStateInfo vmstate_cpsr = { .put = put_cpsr, }; +static int get_power(QEMUFile *f, void *opaque, size_t size, + VMStateField *field) +{ + ARMCPU *cpu = opaque; + bool powered_off = qemu_get_byte(f); + cpu->power_state = powered_off ? PSCI_OFF : PSCI_ON; + return 0; +} + +static int put_power(QEMUFile *f, void *opaque, size_t size, + VMStateField *field, QJSON *vmdesc) +{ + ARMCPU *cpu = opaque; + + /* Migration should never happen while we transition power states */ + + if (cpu->power_state == PSCI_ON || + cpu->power_state == PSCI_OFF) { + bool powered_off = (cpu->power_state == PSCI_OFF) ? true : false; + qemu_put_byte(f, powered_off); + return 0; + } else { + return 1; + } +} + +static const VMStateInfo vmstate_powered_off = { + .name = "powered_off", + .get = get_power, + .put = put_power, +}; + static void cpu_pre_save(void *opaque) { ARMCPU *cpu = opaque; @@ -329,7 +361,14 @@ const VMStateDescription vmstate_arm_cpu = { VMSTATE_UINT64(env.exception.vaddress, ARMCPU), VMSTATE_TIMER_PTR(gt_timer[GTIMER_PHYS], ARMCPU), VMSTATE_TIMER_PTR(gt_timer[GTIMER_VIRT], ARMCPU), - VMSTATE_BOOL(powered_off, ARMCPU), + { + .name = "power_state", + .version_id = 0, + .size = sizeof(bool), + .info = &vmstate_powered_off, + .flags = VMS_SINGLE, + .offset = 0, + }, VMSTATE_END_OF_LIST() }, .subsections = (const VMStateDescription*[]) { diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c index ba796d898e..d64c8670fa 100644 --- a/target/arm/op_helper.c +++ b/target/arm/op_helper.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" #include "qemu/log.h" +#include "qemu/main-loop.h" #include "cpu.h" #include "exec/helper-proto.h" #include "internals.h" @@ -435,6 +436,13 @@ void HELPER(yield)(CPUARMState *env) ARMCPU *cpu = arm_env_get_cpu(env); CPUState *cs = CPU(cpu); + /* When running in MTTCG we don't generate jumps to the yield and + * WFE helpers as it won't affect the scheduling of other vCPUs. + * If we wanted to more completely model WFE/SEV so we don't busy + * spin unnecessarily we would need to do something more involved. + */ + g_assert(!parallel_cpus); + /* This is a non-trappable hint instruction that generally indicates * that the guest is currently busy-looping. Yield control back to the * top level loop so that a more deserving VCPU has a chance to run. @@ -487,7 +495,9 @@ void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val) */ env->regs[15] &= (env->thumb ? ~1 : ~3); + qemu_mutex_lock_iothread(); arm_call_el_change_hook(arm_env_get_cpu(env)); + qemu_mutex_unlock_iothread(); } /* Access to user mode registers from privileged modes. */ @@ -735,28 +745,58 @@ void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value) { const ARMCPRegInfo *ri = rip; - ri->writefn(env, ri, value); + if (ri->type & ARM_CP_IO) { + qemu_mutex_lock_iothread(); + ri->writefn(env, ri, value); + qemu_mutex_unlock_iothread(); + } else { + ri->writefn(env, ri, value); + } } uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip) { const ARMCPRegInfo *ri = rip; + uint32_t res; - return ri->readfn(env, ri); + if (ri->type & ARM_CP_IO) { + qemu_mutex_lock_iothread(); + res = ri->readfn(env, ri); + qemu_mutex_unlock_iothread(); + } else { + res = ri->readfn(env, ri); + } + + return res; } void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value) { const ARMCPRegInfo *ri = rip; - ri->writefn(env, ri, value); + if (ri->type & ARM_CP_IO) { + qemu_mutex_lock_iothread(); + ri->writefn(env, ri, value); + qemu_mutex_unlock_iothread(); + } else { + ri->writefn(env, ri, value); + } } uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip) { const ARMCPRegInfo *ri = rip; + uint64_t res; - return ri->readfn(env, ri); + if (ri->type & ARM_CP_IO) { + qemu_mutex_lock_iothread(); + res = ri->readfn(env, ri); + qemu_mutex_unlock_iothread(); + } else { + res = ri->readfn(env, ri); + } + + return res; } void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm) @@ -989,7 +1029,9 @@ void HELPER(exception_return)(CPUARMState *env) cur_el, new_el, env->pc); } + qemu_mutex_lock_iothread(); arm_call_el_change_hook(arm_env_get_cpu(env)); + qemu_mutex_unlock_iothread(); return; @@ -1225,6 +1267,28 @@ bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp) return check_watchpoints(cpu); } +vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + + /* In BE32 system mode, target memory is stored byteswapped (on a + * little-endian host system), and by the time we reach here (via an + * opcode helper) the addresses of subword accesses have been adjusted + * to account for that, which means that watchpoints will not match. + * Undo the adjustment here. + */ + if (arm_sctlr_b(env)) { + if (len == 1) { + addr ^= 3; + } else if (len == 2) { + addr ^= 2; + } + } + + return addr; +} + void arm_debug_excp_handler(CPUState *cs) { /* Called by core code when a watchpoint or breakpoint fires; diff --git a/target/arm/psci.c b/target/arm/psci.c index 64bf82eea1..ade9fe2ede 100644 --- a/target/arm/psci.c +++ b/target/arm/psci.c @@ -127,7 +127,9 @@ void arm_handle_psci_call(ARMCPU *cpu) break; } target_cpu = ARM_CPU(target_cpu_state); - ret = target_cpu->powered_off ? 1 : 0; + + g_assert(qemu_mutex_iothread_locked()); + ret = target_cpu->power_state; break; default: /* Everything above affinity level 0 is always on. */ diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index d0352e2045..e15eae6d41 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -379,20 +379,6 @@ static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest) } } -static void disas_set_insn_syndrome(DisasContext *s, uint32_t syn) -{ - /* We don't need to save all of the syndrome so we mask and shift - * out uneeded bits to help the sleb128 encoder do a better job. - */ - syn &= ARM_INSN_START_WORD2_MASK; - syn >>= ARM_INSN_START_WORD2_SHIFT; - - /* We check and clear insn_start_idx to catch multiple updates. */ - assert(s->insn_start_idx != 0); - tcg_set_insn_param(s->insn_start_idx, 2, syn); - s->insn_start_idx = 0; -} - static void unallocated_encoding(DisasContext *s) { /* Unallocated and reserved encodings are uncategorized */ @@ -1342,10 +1328,14 @@ static void handle_hint(DisasContext *s, uint32_t insn, s->is_jmp = DISAS_WFI; return; case 1: /* YIELD */ - s->is_jmp = DISAS_YIELD; + if (!parallel_cpus) { + s->is_jmp = DISAS_YIELD; + } return; case 2: /* WFE */ - s->is_jmp = DISAS_WFE; + if (!parallel_cpus) { + s->is_jmp = DISAS_WFE; + } return; case 4: /* SEV */ case 5: /* SEVL */ diff --git a/target/arm/translate.c b/target/arm/translate.c index 493c627bcf..abc1f77ee4 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -102,6 +102,49 @@ void arm_translate_init(void) a64_translate_init(); } +/* Flags for the disas_set_da_iss info argument: + * lower bits hold the Rt register number, higher bits are flags. + */ +typedef enum ISSInfo { + ISSNone = 0, + ISSRegMask = 0x1f, + ISSInvalid = (1 << 5), + ISSIsAcqRel = (1 << 6), + ISSIsWrite = (1 << 7), + ISSIs16Bit = (1 << 8), +} ISSInfo; + +/* Save the syndrome information for a Data Abort */ +static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo) +{ + uint32_t syn; + int sas = memop & MO_SIZE; + bool sse = memop & MO_SIGN; + bool is_acqrel = issinfo & ISSIsAcqRel; + bool is_write = issinfo & ISSIsWrite; + bool is_16bit = issinfo & ISSIs16Bit; + int srt = issinfo & ISSRegMask; + + if (issinfo & ISSInvalid) { + /* Some callsites want to conditionally provide ISS info, + * eg "only if this was not a writeback" + */ + return; + } + + if (srt == 15) { + /* For AArch32, insns where the src/dest is R15 never generate + * ISS information. Catching that here saves checking at all + * the call sites. + */ + return; + } + + syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel, + 0, 0, 0, is_write, 0, is_16bit); + disas_set_insn_syndrome(s, syn); +} + static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s) { /* Return the mmu_idx to use for A32/T32 "unprivileged load/store" @@ -933,6 +976,14 @@ static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \ TCGv_i32 a32, int index) \ { \ gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \ +} \ +static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \ + TCGv_i32 val, \ + TCGv_i32 a32, int index, \ + ISSInfo issinfo) \ +{ \ + gen_aa32_ld##SUFF(s, val, a32, index); \ + disas_set_da_iss(s, OPC, issinfo); \ } #define DO_GEN_ST(SUFF, OPC) \ @@ -940,6 +991,14 @@ static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \ TCGv_i32 a32, int index) \ { \ gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \ +} \ +static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \ + TCGv_i32 val, \ + TCGv_i32 a32, int index, \ + ISSInfo issinfo) \ +{ \ + gen_aa32_st##SUFF(s, val, a32, index); \ + disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \ } static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val) @@ -4345,20 +4404,32 @@ static void gen_exception_return(DisasContext *s, TCGv_i32 pc) gen_rfe(s, pc, load_cpu_field(spsr)); } +/* + * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we + * only call the helper when running single threaded TCG code to ensure + * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we + * just skip this instruction. Currently the SEV/SEVL instructions + * which are *one* of many ways to wake the CPU from WFE are not + * implemented so we can't sleep like WFI does. + */ static void gen_nop_hint(DisasContext *s, int val) { switch (val) { case 1: /* yield */ - gen_set_pc_im(s, s->pc); - s->is_jmp = DISAS_YIELD; + if (!parallel_cpus) { + gen_set_pc_im(s, s->pc); + s->is_jmp = DISAS_YIELD; + } break; case 3: /* wfi */ gen_set_pc_im(s, s->pc); s->is_jmp = DISAS_WFI; break; case 2: /* wfe */ - gen_set_pc_im(s, s->pc); - s->is_jmp = DISAS_WFE; + if (!parallel_cpus) { + gen_set_pc_im(s, s->pc); + s->is_jmp = DISAS_WFE; + } break; case 4: /* sev */ case 5: /* sevl */ @@ -8682,16 +8753,19 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) tmp = tcg_temp_new_i32(); switch (op1) { case 0: /* lda */ - gen_aa32_ld32u(s, tmp, addr, - get_mem_index(s)); + gen_aa32_ld32u_iss(s, tmp, addr, + get_mem_index(s), + rd | ISSIsAcqRel); break; case 2: /* ldab */ - gen_aa32_ld8u(s, tmp, addr, - get_mem_index(s)); + gen_aa32_ld8u_iss(s, tmp, addr, + get_mem_index(s), + rd | ISSIsAcqRel); break; case 3: /* ldah */ - gen_aa32_ld16u(s, tmp, addr, - get_mem_index(s)); + gen_aa32_ld16u_iss(s, tmp, addr, + get_mem_index(s), + rd | ISSIsAcqRel); break; default: abort(); @@ -8702,16 +8776,19 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) tmp = load_reg(s, rm); switch (op1) { case 0: /* stl */ - gen_aa32_st32(s, tmp, addr, - get_mem_index(s)); + gen_aa32_st32_iss(s, tmp, addr, + get_mem_index(s), + rm | ISSIsAcqRel); break; case 2: /* stlb */ - gen_aa32_st8(s, tmp, addr, - get_mem_index(s)); + gen_aa32_st8_iss(s, tmp, addr, + get_mem_index(s), + rm | ISSIsAcqRel); break; case 3: /* stlh */ - gen_aa32_st16(s, tmp, addr, - get_mem_index(s)); + gen_aa32_st16_iss(s, tmp, addr, + get_mem_index(s), + rm | ISSIsAcqRel); break; default: abort(); @@ -8782,11 +8859,18 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) } else { int address_offset; bool load = insn & (1 << 20); + bool wbit = insn & (1 << 21); + bool pbit = insn & (1 << 24); bool doubleword = false; + ISSInfo issinfo; + /* Misc load/store */ rn = (insn >> 16) & 0xf; rd = (insn >> 12) & 0xf; + /* ISS not valid if writeback */ + issinfo = (pbit & !wbit) ? rd : ISSInvalid; + if (!load && (sh & 2)) { /* doubleword */ ARCH(5TE); @@ -8799,8 +8883,9 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) } addr = load_reg(s, rn); - if (insn & (1 << 24)) + if (pbit) { gen_add_datah_offset(s, insn, 0, addr); + } address_offset = 0; if (doubleword) { @@ -8829,30 +8914,33 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) tmp = tcg_temp_new_i32(); switch (sh) { case 1: - gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); + gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), + issinfo); break; case 2: - gen_aa32_ld8s(s, tmp, addr, get_mem_index(s)); + gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), + issinfo); break; default: case 3: - gen_aa32_ld16s(s, tmp, addr, get_mem_index(s)); + gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), + issinfo); break; } } else { /* store */ tmp = load_reg(s, rd); - gen_aa32_st16(s, tmp, addr, get_mem_index(s)); + gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo); tcg_temp_free_i32(tmp); } /* Perform base writeback before the loaded value to ensure correct behavior with overlapping index registers. ldrd with base writeback is undefined if the destination and index registers overlap. */ - if (!(insn & (1 << 24))) { + if (!pbit) { gen_add_datah_offset(s, insn, address_offset, addr); store_reg(s, rn, addr); - } else if (insn & (1 << 21)) { + } else if (wbit) { if (address_offset) tcg_gen_addi_i32(addr, addr, address_offset); store_reg(s, rn, addr); @@ -9195,17 +9283,17 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) /* load */ tmp = tcg_temp_new_i32(); if (insn & (1 << 22)) { - gen_aa32_ld8u(s, tmp, tmp2, i); + gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd); } else { - gen_aa32_ld32u(s, tmp, tmp2, i); + gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd); } } else { /* store */ tmp = load_reg(s, rd); if (insn & (1 << 22)) { - gen_aa32_st8(s, tmp, tmp2, i); + gen_aa32_st8_iss(s, tmp, tmp2, i, rd); } else { - gen_aa32_st32(s, tmp, tmp2, i); + gen_aa32_st32_iss(s, tmp, tmp2, i, rd); } tcg_temp_free_i32(tmp); } @@ -9666,13 +9754,16 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw tmp = tcg_temp_new_i32(); switch (op) { case 0: /* ldab */ - gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); + gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), + rs | ISSIsAcqRel); break; case 1: /* ldah */ - gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); + gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), + rs | ISSIsAcqRel); break; case 2: /* lda */ - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), + rs | ISSIsAcqRel); break; default: abort(); @@ -9682,13 +9773,16 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw tmp = load_reg(s, rs); switch (op) { case 0: /* stlb */ - gen_aa32_st8(s, tmp, addr, get_mem_index(s)); + gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), + rs | ISSIsAcqRel); break; case 1: /* stlh */ - gen_aa32_st16(s, tmp, addr, get_mem_index(s)); + gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), + rs | ISSIsAcqRel); break; case 2: /* stl */ - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), + rs | ISSIsAcqRel); break; default: abort(); @@ -10634,6 +10728,8 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw int postinc = 0; int writeback = 0; int memidx; + ISSInfo issinfo; + if ((insn & 0x01100000) == 0x01000000) { if (disas_neon_ls_insn(s, insn)) { goto illegal_op; @@ -10737,24 +10833,27 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw } } } + + issinfo = writeback ? ISSInvalid : rs; + if (insn & (1 << 20)) { /* Load. */ tmp = tcg_temp_new_i32(); switch (op) { case 0: - gen_aa32_ld8u(s, tmp, addr, memidx); + gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo); break; case 4: - gen_aa32_ld8s(s, tmp, addr, memidx); + gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo); break; case 1: - gen_aa32_ld16u(s, tmp, addr, memidx); + gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo); break; case 5: - gen_aa32_ld16s(s, tmp, addr, memidx); + gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo); break; case 2: - gen_aa32_ld32u(s, tmp, addr, memidx); + gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo); break; default: tcg_temp_free_i32(tmp); @@ -10771,13 +10870,13 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw tmp = load_reg(s, rs); switch (op) { case 0: - gen_aa32_st8(s, tmp, addr, memidx); + gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo); break; case 1: - gen_aa32_st16(s, tmp, addr, memidx); + gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo); break; case 2: - gen_aa32_st32(s, tmp, addr, memidx); + gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo); break; default: tcg_temp_free_i32(tmp); @@ -10914,7 +11013,8 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) addr = tcg_temp_new_i32(); tcg_gen_movi_i32(addr, val); tmp = tcg_temp_new_i32(); - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), + rd | ISSIs16Bit); tcg_temp_free_i32(addr); store_reg(s, rd, tmp); break; @@ -11117,28 +11217,28 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) switch (op) { case 0: /* str */ - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); break; case 1: /* strh */ - gen_aa32_st16(s, tmp, addr, get_mem_index(s)); + gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); break; case 2: /* strb */ - gen_aa32_st8(s, tmp, addr, get_mem_index(s)); + gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); break; case 3: /* ldrsb */ - gen_aa32_ld8s(s, tmp, addr, get_mem_index(s)); + gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); break; case 4: /* ldr */ - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); break; case 5: /* ldrh */ - gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); + gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); break; case 6: /* ldrb */ - gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); + gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); break; case 7: /* ldrsh */ - gen_aa32_ld16s(s, tmp, addr, get_mem_index(s)); + gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); break; } if (op >= 3) { /* load */ @@ -11182,12 +11282,12 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) if (insn & (1 << 11)) { /* load */ tmp = tcg_temp_new_i32(); - gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); + gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); store_reg(s, rd, tmp); } else { /* store */ tmp = load_reg(s, rd); - gen_aa32_st8(s, tmp, addr, get_mem_index(s)); + gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); tcg_temp_free_i32(tmp); } tcg_temp_free_i32(addr); @@ -11204,12 +11304,12 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) if (insn & (1 << 11)) { /* load */ tmp = tcg_temp_new_i32(); - gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); + gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); store_reg(s, rd, tmp); } else { /* store */ tmp = load_reg(s, rd); - gen_aa32_st16(s, tmp, addr, get_mem_index(s)); + gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); tcg_temp_free_i32(tmp); } tcg_temp_free_i32(addr); @@ -11225,12 +11325,12 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) if (insn & (1 << 11)) { /* load */ tmp = tcg_temp_new_i32(); - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); store_reg(s, rd, tmp); } else { /* store */ tmp = load_reg(s, rd); - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); tcg_temp_free_i32(tmp); } tcg_temp_free_i32(addr); @@ -11712,6 +11812,7 @@ void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb) store_cpu_field(tmp, condexec_bits); } do { + dc->insn_start_idx = tcg_op_buf_count(); tcg_gen_insn_start(dc->pc, (dc->condexec_cond << 4) | (dc->condexec_mask >> 1), 0); diff --git a/target/arm/translate.h b/target/arm/translate.h index 285e96f087..abb0760158 100644 --- a/target/arm/translate.h +++ b/target/arm/translate.h @@ -104,6 +104,20 @@ static inline int default_exception_el(DisasContext *s) ? 3 : MAX(1, s->current_el); } +static void disas_set_insn_syndrome(DisasContext *s, uint32_t syn) +{ + /* We don't need to save all of the syndrome so we mask and shift + * out unneeded bits to help the sleb128 encoder do a better job. + */ + syn &= ARM_INSN_START_WORD2_MASK; + syn >>= ARM_INSN_START_WORD2_SHIFT; + + /* We check and clear insn_start_idx to catch multiple updates. */ + assert(s->insn_start_idx != 0); + tcg_set_insn_param(s->insn_start_idx, 2, syn); + s->insn_start_idx = 0; +} + /* target-specific extra values for is_jmp */ /* These instructions trap after executing, so the A32/T32 decoder must * defer them until after the conditional execution state has been updated. |