diff options
Diffstat (limited to 'target')
39 files changed, 1864 insertions, 414 deletions
diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 2fa022f62b..5f63316dbf 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -26,7 +26,6 @@ #include "target/arm/idau.h" #include "qemu/module.h" #include "qapi/error.h" -#include "qapi/visitor.h" #include "cpu.h" #ifdef CONFIG_TCG #include "hw/core/tcg-cpu-ops.h" @@ -309,6 +308,10 @@ static void arm_cpu_reset_hold(Object *obj) env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1, CPACR, CP11, 3); #endif + if (arm_feature(env, ARM_FEATURE_V8)) { + env->cp15.rvbar = cpu->rvbar_prop; + env->regs[15] = cpu->rvbar_prop; + } } #if defined(CONFIG_USER_ONLY) @@ -487,6 +490,14 @@ static void arm_cpu_reset_hold(Object *obj) sizeof(*env->pmsav7.dracr) * cpu->pmsav7_dregion); } } + + if (cpu->pmsav8r_hdregion > 0) { + memset(env->pmsav8.hprbar, 0, + sizeof(*env->pmsav8.hprbar) * cpu->pmsav8r_hdregion); + memset(env->pmsav8.hprlar, 0, + sizeof(*env->pmsav8.hprlar) * cpu->pmsav8r_hdregion); + } + env->pmsav7.rnr[M_REG_NS] = 0; env->pmsav7.rnr[M_REG_S] = 0; env->pmsav8.mair0[M_REG_NS] = 0; @@ -1345,7 +1356,7 @@ void arm_cpu_post_init(Object *obj) qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_hivecs_property); } - if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + if (arm_feature(&cpu->env, ARM_FEATURE_V8)) { object_property_add_uint64_ptr(obj, "rvbar", &cpu->rvbar_prop, OBJ_PROP_FLAG_READWRITE); @@ -1998,11 +2009,10 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) /* MPU can be configured out of a PMSA CPU either by setting has-mpu * to false or by setting pmsav7-dregion to 0. */ - if (!cpu->has_mpu) { - cpu->pmsav7_dregion = 0; - } - if (cpu->pmsav7_dregion == 0) { + if (!cpu->has_mpu || cpu->pmsav7_dregion == 0) { cpu->has_mpu = false; + cpu->pmsav7_dregion = 0; + cpu->pmsav8r_hdregion = 0; } if (arm_feature(env, ARM_FEATURE_PMSA) && @@ -2029,6 +2039,19 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) env->pmsav7.dracr = g_new0(uint32_t, nr); } } + + if (cpu->pmsav8r_hdregion > 0xff) { + error_setg(errp, "PMSAv8 MPU EL2 #regions invalid %" PRIu32, + cpu->pmsav8r_hdregion); + return; + } + + if (cpu->pmsav8r_hdregion) { + env->pmsav8.hprbar = g_new0(uint32_t, + cpu->pmsav8r_hdregion); + env->pmsav8.hprlar = g_new0(uint32_t, + cpu->pmsav8r_hdregion); + } } if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 2b4bd20f9d..bf2bce046d 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -309,6 +309,7 @@ typedef struct CPUArchState { }; uint64_t sctlr_el[4]; }; + uint64_t vsctlr; /* Virtualization System control register. */ uint64_t cpacr_el1; /* Architectural feature access control register */ uint64_t cptr_el[4]; /* ARMv8 feature trap registers */ uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */ @@ -745,8 +746,11 @@ typedef struct CPUArchState { */ uint32_t *rbar[M_REG_NUM_BANKS]; uint32_t *rlar[M_REG_NUM_BANKS]; + uint32_t *hprbar; + uint32_t *hprlar; uint32_t mair0[M_REG_NUM_BANKS]; uint32_t mair1[M_REG_NUM_BANKS]; + uint32_t hprselr; } pmsav8; /* v8M SAU */ @@ -906,6 +910,8 @@ struct ArchCPU { bool has_mpu; /* PMSAv7 MPU number of supported regions */ uint32_t pmsav7_dregion; + /* PMSAv8 MPU number of supported hyp regions */ + uint32_t pmsav8r_hdregion; /* v8M SAU number of supported regions */ uint32_t sau_sregion; diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c index 2cf2ca4ce5..0e021960fb 100644 --- a/target/arm/cpu64.c +++ b/target/arm/cpu64.c @@ -21,13 +21,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "cpu.h" -#ifdef CONFIG_TCG -#include "hw/core/tcg-cpu-ops.h" -#endif /* CONFIG_TCG */ #include "qemu/module.h" -#if !defined(CONFIG_USER_ONLY) -#include "hw/loader.h" -#endif #include "sysemu/kvm.h" #include "sysemu/hvf.h" #include "kvm_arm.h" diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c index 568cbcfc52..ccde5080eb 100644 --- a/target/arm/cpu_tcg.c +++ b/target/arm/cpu_tcg.c @@ -854,6 +854,47 @@ static void cortex_r5_initfn(Object *obj) define_arm_cp_regs(cpu, cortexr5_cp_reginfo); } +static void cortex_r52_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_EL2); + set_feature(&cpu->env, ARM_FEATURE_PMSA); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + cpu->midr = 0x411fd133; /* r1p3 */ + cpu->revidr = 0x00000000; + cpu->reset_fpsid = 0x41034023; + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x12111111; + cpu->isar.mvfr2 = 0x00000043; + cpu->ctr = 0x8144c004; + cpu->reset_sctlr = 0x30c50838; + cpu->isar.id_pfr0 = 0x00000131; + cpu->isar.id_pfr1 = 0x10111001; + cpu->isar.id_dfr0 = 0x03010006; + cpu->id_afr0 = 0x00000000; + cpu->isar.id_mmfr0 = 0x00211040; + cpu->isar.id_mmfr1 = 0x40000000; + cpu->isar.id_mmfr2 = 0x01200000; + cpu->isar.id_mmfr3 = 0xf0102211; + cpu->isar.id_mmfr4 = 0x00000010; + cpu->isar.id_isar0 = 0x02101110; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232142; + cpu->isar.id_isar3 = 0x01112131; + cpu->isar.id_isar4 = 0x00010142; + cpu->isar.id_isar5 = 0x00010001; + cpu->isar.dbgdidr = 0x77168000; + cpu->clidr = (1 << 27) | (1 << 24) | 0x3; + cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */ + cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */ + + cpu->pmsav7_dregion = 16; + cpu->pmsav8r_hdregion = 16; +} + static void cortex_r5f_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); @@ -1163,6 +1204,7 @@ static const ARMCPUInfo arm_tcg_cpus[] = { .class_init = arm_v7m_class_init }, { .name = "cortex-r5", .initfn = cortex_r5_initfn }, { .name = "cortex-r5f", .initfn = cortex_r5f_initfn }, + { .name = "cortex-r52", .initfn = cortex_r52_initfn }, { .name = "ti925t", .initfn = ti925t_initfn }, { .name = "sa1100", .initfn = sa1100_initfn }, { .name = "sa1110", .initfn = sa1110_initfn }, diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c index c21739242c..2f6ddc0da5 100644 --- a/target/arm/debug_helper.c +++ b/target/arm/debug_helper.c @@ -437,6 +437,9 @@ static uint32_t arm_debug_exception_fsr(CPUARMState *env) if (target_el == 2 || arm_el_is_aa64(env, target_el)) { using_lpae = true; + } else if (arm_feature(env, ARM_FEATURE_PMSA) && + arm_feature(env, ARM_FEATURE_V8)) { + using_lpae = true; } else { if (arm_feature(env, ARM_FEATURE_LPAE) && (env->cp15.tcr_el[target_el] & TTBCR_EAE)) { diff --git a/target/arm/helper.c b/target/arm/helper.c index bac2ea62c4..cee3804354 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -7,13 +7,11 @@ */ #include "qemu/osdep.h" -#include "qemu/units.h" #include "qemu/log.h" #include "trace.h" #include "cpu.h" #include "internals.h" #include "exec/helper-proto.h" -#include "qemu/host-utils.h" #include "qemu/main-loop.h" #include "qemu/timer.h" #include "qemu/bitops.h" @@ -22,17 +20,12 @@ #include "exec/exec-all.h" #include <zlib.h> /* For crc32 */ #include "hw/irq.h" -#include "semihosting/semihost.h" -#include "sysemu/cpus.h" #include "sysemu/cpu-timers.h" #include "sysemu/kvm.h" -#include "qemu/range.h" #include "qapi/qapi-commands-machine-target.h" #include "qapi/error.h" #include "qemu/guest-random.h" #ifdef CONFIG_TCG -#include "arm_ldst.h" -#include "exec/cpu_ldst.h" #include "semihosting/common-semi.h" #endif #include "cpregs.h" @@ -83,7 +76,8 @@ uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri) static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t v) { - /* Raw write of a coprocessor register (as needed for migration, etc). + /* + * Raw write of a coprocessor register (as needed for migration, etc). * Note that constant registers are treated as write-ignored; the * caller should check for success by whether a readback gives the * value written. @@ -101,7 +95,8 @@ static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri, static bool raw_accessors_invalid(const ARMCPRegInfo *ri) { - /* Return true if the regdef would cause an assertion if you called + /* + * Return true if the regdef would cause an assertion if you called * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a * program bug for it not to have the NO_RAW flag). * NB that returning false here doesn't necessarily mean that calling @@ -184,7 +179,8 @@ bool write_list_to_cpustate(ARMCPU *cpu) if (ri->type & ARM_CP_NO_RAW) { continue; } - /* Write value and confirm it reads back as written + /* + * Write value and confirm it reads back as written * (to catch read-only registers and partially read-only * registers where the incoming migration value doesn't match) */ @@ -202,7 +198,7 @@ static void add_cpreg_to_list(gpointer key, gpointer opaque) uint32_t regidx = (uintptr_t)key; const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); - if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { + if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) { cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx); /* The value array need not be initialized at this point */ cpu->cpreg_array_len++; @@ -216,7 +212,7 @@ static void count_cpreg(gpointer key, gpointer opaque) ri = g_hash_table_lookup(cpu->cp_regs, key); - if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { + if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) { cpu->cpreg_array_len++; } } @@ -237,7 +233,8 @@ static gint cpreg_key_compare(gconstpointer a, gconstpointer b) void init_cpreg_list(ARMCPU *cpu) { - /* Initialise the cpreg_tuples[] array based on the cp_regs hash. + /* + * Initialise the cpreg_tuples[] array based on the cp_regs hash. * Note that we require cpreg_tuples[] to be sorted by key ID. */ GList *keys; @@ -279,7 +276,8 @@ static CPAccessResult access_el3_aa32ns(CPUARMState *env, return CP_ACCESS_OK; } -/* Some secure-only AArch32 registers trap to EL3 if used from +/* + * Some secure-only AArch32 registers trap to EL3 if used from * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts). * Note that an access from Secure EL1 can only happen if EL3 is AArch64. * We assume that the .access field is set to PL1_RW. @@ -301,7 +299,8 @@ static CPAccessResult access_trap_aa32s_el1(CPUARMState *env, return CP_ACCESS_TRAP_UNCATEGORIZED; } -/* Check for traps to performance monitor registers, which are controlled +/* + * Check for traps to performance monitor registers, which are controlled * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3. */ static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri, @@ -399,7 +398,8 @@ static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) ARMCPU *cpu = env_archcpu(env); if (raw_read(env, ri) != value) { - /* Unlike real hardware the qemu TLB uses virtual addresses, + /* + * Unlike real hardware the qemu TLB uses virtual addresses, * not modified virtual addresses, so this causes a TLB flush. */ tlb_flush(CPU(cpu)); @@ -414,7 +414,8 @@ static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA) && !extended_addresses_enabled(env)) { - /* For VMSA (when not using the LPAE long descriptor page table + /* + * For VMSA (when not using the LPAE long descriptor page table * format) this register includes the ASID, so do a TLB flush. * For PMSA it is purely a process ID and no action is needed. */ @@ -606,7 +607,8 @@ static void tlbiipas2is_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, } static const ARMCPRegInfo cp_reginfo[] = { - /* Define the secure and non-secure FCSE identifier CP registers + /* + * Define the secure and non-secure FCSE identifier CP registers * separately because there is no secure bank in V8 (no _EL3). This allows * the secure register to be properly reset and migrated. There is also no * v8 EL1 version of the register so the non-secure instance stands alone. @@ -621,7 +623,8 @@ static const ARMCPRegInfo cp_reginfo[] = { .access = PL1_RW, .secure = ARM_CP_SECSTATE_S, .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s), .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, - /* Define the secure and non-secure context identifier CP registers + /* + * Define the secure and non-secure context identifier CP registers * separately because there is no secure bank in V8 (no _EL3). This allows * the secure register to be properly reset and migrated. In the * non-secure case, the 32-bit register will have reset and migration @@ -642,7 +645,8 @@ static const ARMCPRegInfo cp_reginfo[] = { }; static const ARMCPRegInfo not_v8_cp_reginfo[] = { - /* NB: Some of these registers exist in v8 but with more precise + /* + * NB: Some of these registers exist in v8 but with more precise * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]). */ /* MMU Domain access control / MPU write buffer control */ @@ -652,7 +656,8 @@ static const ARMCPRegInfo not_v8_cp_reginfo[] = { .writefn = dacr_write, .raw_writefn = raw_write, .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), offsetoflow32(CPUARMState, cp15.dacr_ns) } }, - /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs. + /* + * ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs. * For v6 and v5, these mappings are overly broad. */ { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0, @@ -670,7 +675,8 @@ static const ARMCPRegInfo not_v8_cp_reginfo[] = { }; static const ARMCPRegInfo not_v6_cp_reginfo[] = { - /* Not all pre-v6 cores implemented this WFI, so this is slightly + /* + * Not all pre-v6 cores implemented this WFI, so this is slightly * over-broad. */ { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2, @@ -678,12 +684,14 @@ static const ARMCPRegInfo not_v6_cp_reginfo[] = { }; static const ARMCPRegInfo not_v7_cp_reginfo[] = { - /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which + /* + * Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which * is UNPREDICTABLE; we choose to NOP as most implementations do). */ { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, .access = PL1_W, .type = ARM_CP_WFI }, - /* L1 cache lockdown. Not architectural in v6 and earlier but in practice + /* + * L1 cache lockdown. Not architectural in v6 and earlier but in practice * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and * OMAPCP will override this space. */ @@ -697,14 +705,16 @@ static const ARMCPRegInfo not_v7_cp_reginfo[] = { { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY, .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, .resetvalue = 0 }, - /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR; + /* + * We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR; * implementing it as RAZ means the "debug architecture version" bits * will read as a reserved value, which should cause Linux to not try * to use the debug hardware. */ { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 }, - /* MMU TLB control. Note that the wildcarding means we cover not just + /* + * MMU TLB control. Note that the wildcarding means we cover not just * the unified TLB ops but also the dside/iside/inner-shareable variants. */ { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY, @@ -732,7 +742,8 @@ static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, /* In ARMv8 most bits of CPACR_EL1 are RES0. */ if (!arm_feature(env, ARM_FEATURE_V8)) { - /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI. + /* + * ARMv7 defines bits for unimplemented coprocessors as RAZ/WI. * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP. * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell. */ @@ -748,7 +759,8 @@ static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, value |= R_CPACR_ASEDIS_MASK; } - /* VFPv3 and upwards with NEON implement 32 double precision + /* + * VFPv3 and upwards with NEON implement 32 double precision * registers (D0-D31). */ if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) { @@ -790,7 +802,8 @@ static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri) static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri) { - /* Call cpacr_write() so that we reset with the correct RAO bits set + /* + * Call cpacr_write() so that we reset with the correct RAO bits set * for our CPU features. */ cpacr_write(env, ri, 0); @@ -831,7 +844,8 @@ static const ARMCPRegInfo v6_cp_reginfo[] = { { .name = "MVA_prefetch", .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1, .access = PL1_W, .type = ARM_CP_NOP }, - /* We need to break the TB after ISB to execute self-modifying code + /* + * We need to break the TB after ISB to execute self-modifying code * correctly and also to take any pending interrupts immediately. * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag. */ @@ -846,7 +860,8 @@ static const ARMCPRegInfo v6_cp_reginfo[] = { .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s), offsetof(CPUARMState, cp15.ifar_ns) }, .resetvalue = 0, }, - /* Watchpoint Fault Address Register : should actually only be present + /* + * Watchpoint Fault Address Register : should actually only be present * for 1136, 1176, 11MPCore. */ { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1, @@ -1051,7 +1066,8 @@ static bool event_supported(uint16_t number) static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { - /* Performance monitor registers user accessibility is controlled + /* + * Performance monitor registers user accessibility is controlled * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable * trapping to EL2 or EL3 for other accesses. */ @@ -1139,7 +1155,8 @@ static CPAccessResult pmreg_access_ccntr(CPUARMState *env, (MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD | MDCR_HLP) #define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_SCCD) -/* Returns true if the counter (pass 31 for PMCCNTR) should count events using +/* + * Returns true if the counter (pass 31 for PMCCNTR) should count events using * the current EL, security state, and register configuration. */ static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter) @@ -1503,7 +1520,8 @@ static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri) static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and + /* + * The value of PMSELR.SEL affects the behavior of PMXEVTYPER and * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are * accessed. @@ -1614,7 +1632,8 @@ static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK; pmevcntr_op_finish(env, counter); } - /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when + /* + * Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when * PMSELR value is equal to or greater than the number of implemented * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI. */ @@ -1715,8 +1734,10 @@ static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri, } return ret; } else { - /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR - * are CONSTRAINED UNPREDICTABLE. */ + /* + * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR + * are CONSTRAINED UNPREDICTABLE. + */ return 0; } } @@ -1791,7 +1812,8 @@ static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - /* Note that even though the AArch64 view of this register has bits + /* + * Note that even though the AArch64 view of this register has bits * [10:0] all RES0 we can only mask the bottom 5, to comply with the * architectural requirements for bits which are RES0 only in some * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7 @@ -1854,7 +1876,8 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) if (!arm_feature(env, ARM_FEATURE_EL2)) { valid_mask &= ~SCR_HCE; - /* On ARMv7, SMD (or SCD as it is called in v7) is only + /* + * On ARMv7, SMD (or SCD as it is called in v7) is only * supported if EL2 exists. The bit is UNK/SBZP when * EL2 is unavailable. In QEMU ARMv7, we force it to always zero * when EL2 is unavailable. @@ -1911,7 +1934,8 @@ static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) { ARMCPU *cpu = env_archcpu(env); - /* Acquire the CSSELR index from the bank corresponding to the CCSIDR + /* + * Acquire the CSSELR index from the bank corresponding to the CCSIDR * bank */ uint32_t index = A32_BANKED_REG_GET(env, csselr, @@ -1986,7 +2010,8 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */ { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, .access = PL1_W, .type = ARM_CP_NOP }, - /* Performance monitors are implementation defined in v7, + /* + * Performance monitors are implementation defined in v7, * but with an ARM recommended set of registers, which we * follow. * @@ -2140,7 +2165,8 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { .writefn = csselr_write, .resetvalue = 0, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s), offsetof(CPUARMState, cp15.csselr_ns) } }, - /* Auxiliary ID register: this actually has an IMPDEF value but for now + /* + * Auxiliary ID register: this actually has an IMPDEF value but for now * just RAZ for all cores: */ { .name = "AIDR", .state = ARM_CP_STATE_BOTH, @@ -2148,7 +2174,8 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid1, .resetvalue = 0 }, - /* Auxiliary fault status registers: these also are IMPDEF, and we + /* + * Auxiliary fault status registers: these also are IMPDEF, and we * choose to RAZ/WI for all cores. */ { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH, @@ -2159,7 +2186,8 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1, .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_CONST, .resetvalue = 0 }, - /* MAIR can just read-as-written because we don't implement caches + /* + * MAIR can just read-as-written because we don't implement caches * and so don't need to care about memory attributes. */ { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64, @@ -2171,10 +2199,12 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0, .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]), .resetvalue = 0 }, - /* For non-long-descriptor page tables these are PRRR and NMRR; + /* + * For non-long-descriptor page tables these are PRRR and NMRR; * regardless they still act as reads-as-written for QEMU. */ - /* MAIR0/1 are defined separately from their 64-bit counterpart which + /* + * MAIR0/1 are defined separately from their 64-bit counterpart which * allows them to assign the correct fieldoffset based on the endianness * handled in the field definitions. */ @@ -2313,11 +2343,11 @@ static const ARMCPRegInfo v6k_cp_reginfo[] = { .resetfn = arm_cp_reset_ignore }, { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0, - .access = PL0_R|PL1_W, + .access = PL0_R | PL1_W, .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]), .resetvalue = 0}, { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3, - .access = PL0_R|PL1_W, + .access = PL0_R | PL1_W, .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s), offsetoflow32(CPUARMState, cp15.tpidruro_ns) }, .resetfn = arm_cp_reset_ignore }, @@ -2337,7 +2367,8 @@ static const ARMCPRegInfo v6k_cp_reginfo[] = { static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { - /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero. + /* + * CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero. * Writable only at the highest implemented exception level. */ int el = arm_current_el(env); @@ -2496,7 +2527,8 @@ static CPAccessResult gt_stimer_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { - /* The AArch64 register view of the secure physical timer is + /* + * The AArch64 register view of the secure physical timer is * always accessible from EL3, and configurably accessible from * Secure EL1. */ @@ -2531,7 +2563,8 @@ static void gt_recalc_timer(ARMCPU *cpu, int timeridx) ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx]; if (gt->ctl & 1) { - /* Timer enabled: calculate and set current ISTATUS, irq, and + /* + * Timer enabled: calculate and set current ISTATUS, irq, and * reset timer to when ISTATUS next has to change */ uint64_t offset = timeridx == GTIMER_VIRT ? @@ -2554,7 +2587,8 @@ static void gt_recalc_timer(ARMCPU *cpu, int timeridx) /* Next transition is when we hit cval */ nexttick = gt->cval + offset; } - /* Note that the desired next expiry time might be beyond the + /* + * Note that the desired next expiry time might be beyond the * signed-64-bit range of a QEMUTimer -- in this case we just * set the timer for as far in the future as possible. When the * timer expires we will reset the timer for any remaining period. @@ -2671,7 +2705,8 @@ static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, /* Enable toggled */ gt_recalc_timer(cpu, timeridx); } else if ((oldval ^ value) & 2) { - /* IMASK toggled: don't need to recalculate, + /* + * IMASK toggled: don't need to recalculate, * just set the interrupt line based on ISTATUS */ int irqstate = (oldval & 4) && !(value & 2); @@ -2982,7 +3017,8 @@ static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque) } static const ARMCPRegInfo generic_timer_cp_reginfo[] = { - /* Note that CNTFRQ is purely reads-as-written for the benefit + /* + * Note that CNTFRQ is purely reads-as-written for the benefit * of software; writing it doesn't actually change the timer frequency. * Our reset value matches the fixed frequency we implement the timer at. */ @@ -3145,7 +3181,8 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = { .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read, .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write, }, - /* Secure timer -- this is actually restricted to only EL3 + /* + * Secure timer -- this is actually restricted to only EL3 * and configurably Secure-EL1 via the accessfn. */ { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64, @@ -3184,7 +3221,8 @@ static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri, #else -/* In user-mode most of the generic timer registers are inaccessible +/* + * In user-mode most of the generic timer registers are inaccessible * however modern kernels (4.12+) allow access to cntvct_el0 */ @@ -3192,7 +3230,8 @@ static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) { ARMCPU *cpu = env_archcpu(env); - /* Currently we have no support for QEMUTimer in linux-user so we + /* + * Currently we have no support for QEMUTimer in linux-user so we * can't call gt_get_countervalue(env), instead we directly * call the lower level functions. */ @@ -3233,7 +3272,8 @@ static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (ri->opc2 & 4) { - /* The ATS12NSO* operations must trap to EL3 or EL2 if executed in + /* + * The ATS12NSO* operations must trap to EL3 or EL2 if executed in * Secure EL1 (which can only happen if EL3 is AArch64). * They are simply UNDEF if executed from NS EL1. * They function normally from EL2 or EL3. @@ -3394,7 +3434,8 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value, } } } else { - /* fsr is a DFSR/IFSR value for the short descriptor + /* + * fsr is a DFSR/IFSR value for the short descriptor * translation table format (with WnR always clear). * Convert it to a 32-bit PAR. */ @@ -3682,8 +3723,225 @@ static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri, raw_write(env, ri, value); } +static void prbar_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + + tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ + env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value; +} + +static uint64_t prbar_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]]; +} + +static void prlar_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + + tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ + env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value; +} + +static uint64_t prlar_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]]; +} + +static void prselr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + + /* + * Ignore writes that would select not implemented region. + * This is architecturally UNPREDICTABLE. + */ + if (value >= cpu->pmsav7_dregion) { + return; + } + + env->pmsav7.rnr[M_REG_NS] = value; +} + +static void hprbar_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + + tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ + env->pmsav8.hprbar[env->pmsav8.hprselr] = value; +} + +static uint64_t hprbar_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return env->pmsav8.hprbar[env->pmsav8.hprselr]; +} + +static void hprlar_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + + tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ + env->pmsav8.hprlar[env->pmsav8.hprselr] = value; +} + +static uint64_t hprlar_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return env->pmsav8.hprlar[env->pmsav8.hprselr]; +} + +static void hprenr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + uint32_t n; + uint32_t bit; + ARMCPU *cpu = env_archcpu(env); + + /* Ignore writes to unimplemented regions */ + int rmax = MIN(cpu->pmsav8r_hdregion, 32); + value &= MAKE_64BIT_MASK(0, rmax); + + tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ + + /* Register alias is only valid for first 32 indexes */ + for (n = 0; n < rmax; ++n) { + bit = extract32(value, n, 1); + env->pmsav8.hprlar[n] = deposit32( + env->pmsav8.hprlar[n], 0, 1, bit); + } +} + +static uint64_t hprenr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + uint32_t n; + uint32_t result = 0x0; + ARMCPU *cpu = env_archcpu(env); + + /* Register alias is only valid for first 32 indexes */ + for (n = 0; n < MIN(cpu->pmsav8r_hdregion, 32); ++n) { + if (env->pmsav8.hprlar[n] & 0x1) { + result |= (0x1 << n); + } + } + return result; +} + +static void hprselr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + + /* + * Ignore writes that would select not implemented region. + * This is architecturally UNPREDICTABLE. + */ + if (value >= cpu->pmsav8r_hdregion) { + return; + } + + env->pmsav8.hprselr = value; +} + +static void pmsav8r_regn_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + uint8_t index = (extract32(ri->opc0, 0, 1) << 4) | + (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1); + + tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ + + if (ri->opc1 & 4) { + if (index >= cpu->pmsav8r_hdregion) { + return; + } + if (ri->opc2 & 0x1) { + env->pmsav8.hprlar[index] = value; + } else { + env->pmsav8.hprbar[index] = value; + } + } else { + if (index >= cpu->pmsav7_dregion) { + return; + } + if (ri->opc2 & 0x1) { + env->pmsav8.rlar[M_REG_NS][index] = value; + } else { + env->pmsav8.rbar[M_REG_NS][index] = value; + } + } +} + +static uint64_t pmsav8r_regn_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + ARMCPU *cpu = env_archcpu(env); + uint8_t index = (extract32(ri->opc0, 0, 1) << 4) | + (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1); + + if (ri->opc1 & 4) { + if (index >= cpu->pmsav8r_hdregion) { + return 0x0; + } + if (ri->opc2 & 0x1) { + return env->pmsav8.hprlar[index]; + } else { + return env->pmsav8.hprbar[index]; + } + } else { + if (index >= cpu->pmsav7_dregion) { + return 0x0; + } + if (ri->opc2 & 0x1) { + return env->pmsav8.rlar[M_REG_NS][index]; + } else { + return env->pmsav8.rbar[M_REG_NS][index]; + } + } +} + +static const ARMCPRegInfo pmsav8r_cp_reginfo[] = { + { .name = "PRBAR", + .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_NO_RAW, + .accessfn = access_tvm_trvm, + .readfn = prbar_read, .writefn = prbar_write }, + { .name = "PRLAR", + .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 1, + .access = PL1_RW, .type = ARM_CP_NO_RAW, + .accessfn = access_tvm_trvm, + .readfn = prlar_read, .writefn = prlar_write }, + { .name = "PRSELR", .resetvalue = 0, + .cp = 15, .opc1 = 0, .crn = 6, .crm = 2, .opc2 = 1, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .writefn = prselr_write, + .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]) }, + { .name = "HPRBAR", .resetvalue = 0, + .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 0, + .access = PL2_RW, .type = ARM_CP_NO_RAW, + .readfn = hprbar_read, .writefn = hprbar_write }, + { .name = "HPRLAR", + .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 1, + .access = PL2_RW, .type = ARM_CP_NO_RAW, + .readfn = hprlar_read, .writefn = hprlar_write }, + { .name = "HPRSELR", .resetvalue = 0, + .cp = 15, .opc1 = 4, .crn = 6, .crm = 2, .opc2 = 1, + .access = PL2_RW, + .writefn = hprselr_write, + .fieldoffset = offsetof(CPUARMState, pmsav8.hprselr) }, + { .name = "HPRENR", + .cp = 15, .opc1 = 4, .crn = 6, .crm = 1, .opc2 = 1, + .access = PL2_RW, .type = ARM_CP_NO_RAW, + .readfn = hprenr_read, .writefn = hprenr_write }, +}; + static const ARMCPRegInfo pmsav7_cp_reginfo[] = { - /* Reset for all these registers is handled in arm_cpu_reset(), + /* + * Reset for all these registers is handled in arm_cpu_reset(), * because the PMSAv7 is also used by M-profile CPUs, which do * not register cpregs but still need the state to be reset. */ @@ -3784,7 +4042,8 @@ static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, } if (arm_feature(env, ARM_FEATURE_LPAE)) { - /* With LPAE the TTBCR could result in a change of ASID + /* + * With LPAE the TTBCR could result in a change of ASID * via the TTBCR.A1 bit, so do a TLB flush. */ tlb_flush(CPU(cpu)); @@ -3901,7 +4160,8 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = { offsetoflow32(CPUARMState, cp15.tcr_el[1])} }, }; -/* Note that unlike TTBCR, writing to TTBCR2 does not require flushing +/* + * Note that unlike TTBCR, writing to TTBCR2 does not require flushing * qemu tlbs nor adjusting cached masks. */ static const ARMCPRegInfo ttbcr2_reginfo = { @@ -3939,7 +4199,8 @@ static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri, static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - /* On OMAP there are registers indicating the max/min index of dcache lines + /* + * On OMAP there are registers indicating the max/min index of dcache lines * containing a dirty line; cache flush operations have to reset these. */ env->cp15.c15_i_max = 0x000; @@ -3971,7 +4232,8 @@ static const ARMCPRegInfo omap_cp_reginfo[] = { .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NO_RAW, .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, }, - /* TODO: Peripheral port remap register: + /* + * TODO: Peripheral port remap register: * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff), * when MMU is off. @@ -4000,7 +4262,8 @@ static const ARMCPRegInfo xscale_cp_reginfo[] = { .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr), .resetvalue = 0, }, - /* XScale specific cache-lockdown: since we have no cache we NOP these + /* + * XScale specific cache-lockdown: since we have no cache we NOP these * and hope the guest does not really rely on cache behaviour. */ { .name = "XSCALE_LOCK_ICACHE_LINE", @@ -4018,7 +4281,8 @@ static const ARMCPRegInfo xscale_cp_reginfo[] = { }; static const ARMCPRegInfo dummy_c15_cp_reginfo[] = { - /* RAZ/WI the whole crn=15 space, when we don't have a more specific + /* + * RAZ/WI the whole crn=15 space, when we don't have a more specific * implementation of this implementation-defined space. * Ideally this should eventually disappear in favour of actually * implementing the correct behaviour for all cores. @@ -4044,21 +4308,22 @@ static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = { .resetvalue = 0 }, /* The cache ops themselves: these all NOP for QEMU */ { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0, - .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, + .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0, - .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, + .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0, - .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, + .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1, - .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, + .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2, - .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, + .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0, - .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, + .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, }; static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = { - /* The cache test-and-clean instructions always return (1 << 30) + /* + * The cache test-and-clean instructions always return (1 << 30) * to indicate that there are no dirty cache lines. */ { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3, @@ -4094,7 +4359,8 @@ static uint64_t mpidr_read_val(CPUARMState *env) if (arm_feature(env, ARM_FEATURE_V7MP)) { mpidr |= (1U << 31); - /* Cores which are uniprocessor (non-coherent) + /* + * Cores which are uniprocessor (non-coherent) * but still implement the MP extensions set * bit 30. (For instance, Cortex-R5). */ @@ -4306,7 +4572,8 @@ static CPAccessResult access_tocu(CPUARMState *env, const ARMCPRegInfo *ri, return do_cacheop_pou_access(env, HCR_TOCU | HCR_TPU); } -/* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions +/* + * See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions * Page D4-1736 (DDI0487A.b) */ @@ -4439,7 +4706,8 @@ static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - /* Invalidate by VA, EL2 + /* + * Invalidate by VA, EL2 * Currently handles both VAE2 and VALE2, since we don't support * flush-last-level-only. */ @@ -4453,7 +4721,8 @@ static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - /* Invalidate by VA, EL3 + /* + * Invalidate by VA, EL3 * Currently handles both VAE3 and VALE3, since we don't support * flush-last-level-only. */ @@ -4478,7 +4747,8 @@ static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - /* Invalidate by VA, EL1&0 (AArch64 version). + /* + * Invalidate by VA, EL1&0 (AArch64 version). * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1, * since we don't support flush-for-specific-ASID-only or * flush-last-level-only. @@ -4799,7 +5069,8 @@ static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (!(env->pstate & PSTATE_SP)) { - /* Access to SP_EL0 is undefined if it's being used as + /* + * Access to SP_EL0 is undefined if it's being used as * the stack pointer. */ return CP_ACCESS_TRAP_UNCATEGORIZED; @@ -4839,7 +5110,8 @@ static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, } if (raw_read(env, ri) == value) { - /* Skip the TLB flush if nothing actually changed; Linux likes + /* + * Skip the TLB flush if nothing actually changed; Linux likes * to do a lot of pointless SCTLR writes. */ return; @@ -4907,7 +5179,8 @@ static void mdcr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, } static const ARMCPRegInfo v8_cp_reginfo[] = { - /* Minimal set of EL0-visible registers. This will need to be expanded + /* + * Minimal set of EL0-visible registers. This will need to be expanded * significantly for system emulation of AArch64 CPUs. */ { .name = "NZCV", .state = ARM_CP_STATE_AA64, @@ -5190,7 +5463,8 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) }, - /* We rely on the access checks not allowing the guest to write to the + /* + * We rely on the access checks not allowing the guest to write to the * state field when SPSel indicates that it's being used as the stack * pointer. */ @@ -5268,7 +5542,8 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) if (arm_feature(env, ARM_FEATURE_EL3)) { valid_mask &= ~HCR_HCD; } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { - /* Architecturally HCR.TSC is RES0 if EL3 is not implemented. + /* + * Architecturally HCR.TSC is RES0 if EL3 is not implemented. * However, if we're using the SMC PSCI conduit then QEMU is * effectively acting like EL3 firmware and so the guest at * EL2 should retain the ability to prevent EL1 from being @@ -5698,7 +5973,8 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, .writefn = tlbi_aa64_vae2is_write }, #ifndef CONFIG_USER_ONLY - /* Unlike the other EL2-related AT operations, these must + /* + * Unlike the other EL2-related AT operations, these must * UNDEF from EL3 if EL2 is not implemented, which is why we * define them here rather than with the rest of the AT ops. */ @@ -5712,7 +5988,8 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { .access = PL2_W, .accessfn = at_s1e2_access, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF, .writefn = ats_write64 }, - /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE + /* + * The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose * to behave as if SCR.NS was 1. @@ -5725,7 +6002,8 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, - /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the + /* + * ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the * reset values as IMPDEF. We choose to reset to 3 to comply with * both ARMv7 and ARMv8. */ @@ -5808,7 +6086,8 @@ static const ARMCPRegInfo el2_sec_cp_reginfo[] = { static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { - /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2. + /* + * The NSACR is RW at EL3, and RO for NS EL1 and NS EL2. * At Secure EL1 it traps to EL3 or EL2. */ if (arm_current_el(env) == 3) { @@ -6612,7 +6891,8 @@ static void define_pmu_regs(ARMCPU *cpu) } } -/* We don't know until after realize whether there's a GICv3 +/* + * We don't know until after realize whether there's a GICv3 * attached, and that is what registers the gicv3 sysregs. * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1 * at runtime. @@ -6641,7 +6921,8 @@ static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) } #endif -/* Shared logic between LORID and the rest of the LOR* registers. +/* + * Shared logic between LORID and the rest of the LOR* registers. * Secure state exclusion has already been dealt with. */ static CPAccessResult access_lor_ns(CPUARMState *env, @@ -7468,7 +7749,8 @@ void register_cp_regs_for_features(ARMCPU *cpu) define_arm_cp_regs(cpu, cp_reginfo); if (!arm_feature(env, ARM_FEATURE_V8)) { - /* Must go early as it is full of wildcards that may be + /* + * Must go early as it is full of wildcards that may be * overridden by later definitions. */ define_arm_cp_regs(cpu, not_v8_cp_reginfo); @@ -7482,7 +7764,8 @@ void register_cp_regs_for_features(ARMCPU *cpu) .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, .resetvalue = cpu->isar.id_pfr0 }, - /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know + /* + * ID_PFR1 is not a plain ARM_CP_CONST because we don't know * the value of the GIC field until after we define these regs. */ { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH, @@ -7864,31 +8147,89 @@ void register_cp_regs_for_features(ARMCPU *cpu) #ifdef CONFIG_USER_ONLY static const ARMCPRegUserSpaceInfo v8_user_idregs[] = { { .name = "ID_AA64PFR0_EL1", - .exported_bits = 0x000f000f00ff0000, - .fixed_bits = 0x0000000000000011 }, + .exported_bits = R_ID_AA64PFR0_FP_MASK | + R_ID_AA64PFR0_ADVSIMD_MASK | + R_ID_AA64PFR0_SVE_MASK | + R_ID_AA64PFR0_DIT_MASK, + .fixed_bits = (0x1u << R_ID_AA64PFR0_EL0_SHIFT) | + (0x1u << R_ID_AA64PFR0_EL1_SHIFT) }, { .name = "ID_AA64PFR1_EL1", - .exported_bits = 0x00000000000000f0 }, + .exported_bits = R_ID_AA64PFR1_BT_MASK | + R_ID_AA64PFR1_SSBS_MASK | + R_ID_AA64PFR1_MTE_MASK | + R_ID_AA64PFR1_SME_MASK }, { .name = "ID_AA64PFR*_EL1_RESERVED", - .is_glob = true }, - { .name = "ID_AA64ZFR0_EL1" }, + .is_glob = true }, + { .name = "ID_AA64ZFR0_EL1", + .exported_bits = R_ID_AA64ZFR0_SVEVER_MASK | + R_ID_AA64ZFR0_AES_MASK | + R_ID_AA64ZFR0_BITPERM_MASK | + R_ID_AA64ZFR0_BFLOAT16_MASK | + R_ID_AA64ZFR0_SHA3_MASK | + R_ID_AA64ZFR0_SM4_MASK | + R_ID_AA64ZFR0_I8MM_MASK | + R_ID_AA64ZFR0_F32MM_MASK | + R_ID_AA64ZFR0_F64MM_MASK }, + { .name = "ID_AA64SMFR0_EL1", + .exported_bits = R_ID_AA64SMFR0_F32F32_MASK | + R_ID_AA64SMFR0_B16F32_MASK | + R_ID_AA64SMFR0_F16F32_MASK | + R_ID_AA64SMFR0_I8I32_MASK | + R_ID_AA64SMFR0_F64F64_MASK | + R_ID_AA64SMFR0_I16I64_MASK | + R_ID_AA64SMFR0_FA64_MASK }, { .name = "ID_AA64MMFR0_EL1", - .fixed_bits = 0x00000000ff000000 }, - { .name = "ID_AA64MMFR1_EL1" }, + .exported_bits = R_ID_AA64MMFR0_ECV_MASK, + .fixed_bits = (0xfu << R_ID_AA64MMFR0_TGRAN64_SHIFT) | + (0xfu << R_ID_AA64MMFR0_TGRAN4_SHIFT) }, + { .name = "ID_AA64MMFR1_EL1", + .exported_bits = R_ID_AA64MMFR1_AFP_MASK }, + { .name = "ID_AA64MMFR2_EL1", + .exported_bits = R_ID_AA64MMFR2_AT_MASK }, { .name = "ID_AA64MMFR*_EL1_RESERVED", - .is_glob = true }, + .is_glob = true }, { .name = "ID_AA64DFR0_EL1", - .fixed_bits = 0x0000000000000006 }, - { .name = "ID_AA64DFR1_EL1" }, + .fixed_bits = (0x6u << R_ID_AA64DFR0_DEBUGVER_SHIFT) }, + { .name = "ID_AA64DFR1_EL1" }, { .name = "ID_AA64DFR*_EL1_RESERVED", - .is_glob = true }, + .is_glob = true }, { .name = "ID_AA64AFR*", - .is_glob = true }, + .is_glob = true }, { .name = "ID_AA64ISAR0_EL1", - .exported_bits = 0x00fffffff0fffff0 }, + .exported_bits = R_ID_AA64ISAR0_AES_MASK | + R_ID_AA64ISAR0_SHA1_MASK | + R_ID_AA64ISAR0_SHA2_MASK | + R_ID_AA64ISAR0_CRC32_MASK | + R_ID_AA64ISAR0_ATOMIC_MASK | + R_ID_AA64ISAR0_RDM_MASK | + R_ID_AA64ISAR0_SHA3_MASK | + R_ID_AA64ISAR0_SM3_MASK | + R_ID_AA64ISAR0_SM4_MASK | + R_ID_AA64ISAR0_DP_MASK | + R_ID_AA64ISAR0_FHM_MASK | + R_ID_AA64ISAR0_TS_MASK | + R_ID_AA64ISAR0_RNDR_MASK }, { .name = "ID_AA64ISAR1_EL1", - .exported_bits = 0x000000f0ffffffff }, + .exported_bits = R_ID_AA64ISAR1_DPB_MASK | + R_ID_AA64ISAR1_APA_MASK | + R_ID_AA64ISAR1_API_MASK | + R_ID_AA64ISAR1_JSCVT_MASK | + R_ID_AA64ISAR1_FCMA_MASK | + R_ID_AA64ISAR1_LRCPC_MASK | + R_ID_AA64ISAR1_GPA_MASK | + R_ID_AA64ISAR1_GPI_MASK | + R_ID_AA64ISAR1_FRINTTS_MASK | + R_ID_AA64ISAR1_SB_MASK | + R_ID_AA64ISAR1_BF16_MASK | + R_ID_AA64ISAR1_DGH_MASK | + R_ID_AA64ISAR1_I8MM_MASK }, + { .name = "ID_AA64ISAR2_EL1", + .exported_bits = R_ID_AA64ISAR2_WFXT_MASK | + R_ID_AA64ISAR2_RPRES_MASK | + R_ID_AA64ISAR2_GPA3_MASK | + R_ID_AA64ISAR2_APA3_MASK }, { .name = "ID_AA64ISAR*_EL1_RESERVED", - .is_glob = true }, + .is_glob = true }, }; modify_arm_cp_regs(v8_idregs, v8_user_idregs); #endif @@ -7896,7 +8237,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) if (!arm_feature(env, ARM_FEATURE_EL3) && !arm_feature(env, ARM_FEATURE_EL2)) { ARMCPRegInfo rvbar = { - .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64, + .name = "RVBAR_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, .access = PL1_R, .fieldoffset = offsetof(CPUARMState, cp15.rvbar), @@ -7987,13 +8328,20 @@ void register_cp_regs_for_features(ARMCPU *cpu) } /* RVBAR_EL2 is only implemented if EL2 is the highest EL */ if (!arm_feature(env, ARM_FEATURE_EL3)) { - ARMCPRegInfo rvbar = { - .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1, - .access = PL2_R, - .fieldoffset = offsetof(CPUARMState, cp15.rvbar), + ARMCPRegInfo rvbar[] = { + { + .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1, + .access = PL2_R, + .fieldoffset = offsetof(CPUARMState, cp15.rvbar), + }, + { .name = "RVBAR", .type = ARM_CP_ALIAS, + .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, + .access = PL2_R, + .fieldoffset = offsetof(CPUARMState, cp15.rvbar), + }, }; - define_one_arm_cp_reg(cpu, &rvbar); + define_arm_cp_regs(cpu, rvbar); } } @@ -8016,7 +8364,8 @@ void register_cp_regs_for_features(ARMCPU *cpu) define_arm_cp_regs(cpu, el3_regs); } - /* The behaviour of NSACR is sufficiently various that we don't + /* + * The behaviour of NSACR is sufficiently various that we don't * try to describe it in a single reginfo: * if EL3 is 64 bit, then trap to EL3 from S EL1, * reads as constant 0xc00 from NS EL1 and NS EL2 @@ -8108,13 +8457,15 @@ void register_cp_regs_for_features(ARMCPU *cpu) if (cpu_isar_feature(aa32_jazelle, cpu)) { define_arm_cp_regs(cpu, jazelle_regs); } - /* Slightly awkwardly, the OMAP and StrongARM cores need all of + /* + * Slightly awkwardly, the OMAP and StrongARM cores need all of * cp15 crn=0 to be writes-ignored, whereas for other cores they should * be read-only (ie write causes UNDEF exception). */ { ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = { - /* Pre-v8 MIDR space. + /* + * Pre-v8 MIDR space. * Note that the MIDR isn't a simple constant register because * of the TI925 behaviour where writes to another register can * cause the MIDR value to change. @@ -8153,10 +8504,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr, .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), .readfn = midr_read }, - /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */ - { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, - .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, - .access = PL1_R, .resetvalue = cpu->midr }, + /* crn = 0 op1 = 0 crm = 0 op2 = 7 : AArch32 aliases of MIDR */ { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7, .access = PL1_R, .resetvalue = cpu->midr }, @@ -8166,6 +8514,11 @@ void register_cp_regs_for_features(ARMCPU *cpu) .accessfn = access_aa64_tid1, .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, }; + ARMCPRegInfo id_v8_midr_alias_cp_reginfo = { + .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, + .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, + .access = PL1_R, .resetvalue = cpu->midr + }; ARMCPRegInfo id_cp_reginfo[] = { /* These are common to v8 and pre-v8 */ { .name = "CTR", @@ -8198,6 +8551,13 @@ void register_cp_regs_for_features(ARMCPU *cpu) .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->pmsav7_dregion << 8 }; + /* HMPUIR is specific to PMSA V8 */ + ARMCPRegInfo id_hmpuir_reginfo = { + .name = "HMPUIR", + .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 4, + .access = PL2_R, .type = ARM_CP_CONST, + .resetvalue = cpu->pmsav8r_hdregion + }; static const ARMCPRegInfo crn0_wi_reginfo = { .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W, @@ -8206,15 +8566,20 @@ void register_cp_regs_for_features(ARMCPU *cpu) #ifdef CONFIG_USER_ONLY static const ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = { { .name = "MIDR_EL1", - .exported_bits = 0x00000000ffffffff }, - { .name = "REVIDR_EL1" }, + .exported_bits = R_MIDR_EL1_REVISION_MASK | + R_MIDR_EL1_PARTNUM_MASK | + R_MIDR_EL1_ARCHITECTURE_MASK | + R_MIDR_EL1_VARIANT_MASK | + R_MIDR_EL1_IMPLEMENTER_MASK }, + { .name = "REVIDR_EL1" }, }; modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo); #endif if (arm_feature(env, ARM_FEATURE_OMAPCP) || arm_feature(env, ARM_FEATURE_STRONGARM)) { size_t i; - /* Register the blanket "writes ignored" value first to cover the + /* + * Register the blanket "writes ignored" value first to cover the * whole space. Then update the specific ID registers to allow write * access, so that they ignore writes rather than causing them to * UNDEF. @@ -8231,12 +8596,83 @@ void register_cp_regs_for_features(ARMCPU *cpu) } if (arm_feature(env, ARM_FEATURE_V8)) { define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo); + if (!arm_feature(env, ARM_FEATURE_PMSA)) { + define_one_arm_cp_reg(cpu, &id_v8_midr_alias_cp_reginfo); + } } else { define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo); } define_arm_cp_regs(cpu, id_cp_reginfo); if (!arm_feature(env, ARM_FEATURE_PMSA)) { define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo); + } else if (arm_feature(env, ARM_FEATURE_PMSA) && + arm_feature(env, ARM_FEATURE_V8)) { + uint32_t i = 0; + char *tmp_string; + + define_one_arm_cp_reg(cpu, &id_mpuir_reginfo); + define_one_arm_cp_reg(cpu, &id_hmpuir_reginfo); + define_arm_cp_regs(cpu, pmsav8r_cp_reginfo); + + /* Register alias is only valid for first 32 indexes */ + for (i = 0; i < MIN(cpu->pmsav7_dregion, 32); ++i) { + uint8_t crm = 0b1000 | extract32(i, 1, 3); + uint8_t opc1 = extract32(i, 4, 1); + uint8_t opc2 = extract32(i, 0, 1) << 2; + + tmp_string = g_strdup_printf("PRBAR%u", i); + ARMCPRegInfo tmp_prbarn_reginfo = { + .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW, + .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2, + .access = PL1_RW, .resetvalue = 0, + .accessfn = access_tvm_trvm, + .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read + }; + define_one_arm_cp_reg(cpu, &tmp_prbarn_reginfo); + g_free(tmp_string); + + opc2 = extract32(i, 0, 1) << 2 | 0x1; + tmp_string = g_strdup_printf("PRLAR%u", i); + ARMCPRegInfo tmp_prlarn_reginfo = { + .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW, + .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2, + .access = PL1_RW, .resetvalue = 0, + .accessfn = access_tvm_trvm, + .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read + }; + define_one_arm_cp_reg(cpu, &tmp_prlarn_reginfo); + g_free(tmp_string); + } + + /* Register alias is only valid for first 32 indexes */ + for (i = 0; i < MIN(cpu->pmsav8r_hdregion, 32); ++i) { + uint8_t crm = 0b1000 | extract32(i, 1, 3); + uint8_t opc1 = 0b100 | extract32(i, 4, 1); + uint8_t opc2 = extract32(i, 0, 1) << 2; + + tmp_string = g_strdup_printf("HPRBAR%u", i); + ARMCPRegInfo tmp_hprbarn_reginfo = { + .name = tmp_string, + .type = ARM_CP_NO_RAW, + .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2, + .access = PL2_RW, .resetvalue = 0, + .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read + }; + define_one_arm_cp_reg(cpu, &tmp_hprbarn_reginfo); + g_free(tmp_string); + + opc2 = extract32(i, 0, 1) << 2 | 0x1; + tmp_string = g_strdup_printf("HPRLAR%u", i); + ARMCPRegInfo tmp_hprlarn_reginfo = { + .name = tmp_string, + .type = ARM_CP_NO_RAW, + .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2, + .access = PL2_RW, .resetvalue = 0, + .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read + }; + define_one_arm_cp_reg(cpu, &tmp_hprlarn_reginfo); + g_free(tmp_string); + } } else if (arm_feature(env, ARM_FEATURE_V7)) { define_one_arm_cp_reg(cpu, &id_mpuir_reginfo); } @@ -8314,7 +8750,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) ARMCPRegInfo cbar = { .name = "CBAR", .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, - .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar, + .access = PL1_R | PL3_W, .resetvalue = cpu->reset_cbar, .fieldoffset = offsetof(CPUARMState, cp15.c15_config_base_address) }; @@ -8351,13 +8787,25 @@ void register_cp_regs_for_features(ARMCPU *cpu) .raw_writefn = raw_write, }; if (arm_feature(env, ARM_FEATURE_XSCALE)) { - /* Normally we would always end the TB on an SCTLR write, but Linux + /* + * Normally we would always end the TB on an SCTLR write, but Linux * arch/arm/mach-pxa/sleep.S expects two instructions following * an MMU enable to execute from cache. Imitate this behaviour. */ sctlr.type |= ARM_CP_SUPPRESS_TB_END; } define_one_arm_cp_reg(cpu, &sctlr); + + if (arm_feature(env, ARM_FEATURE_PMSA) && + arm_feature(env, ARM_FEATURE_V8)) { + ARMCPRegInfo vsctlr = { + .name = "VSCTLR", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, + .access = PL2_RW, .resetvalue = 0x0, + .fieldoffset = offsetoflow32(CPUARMState, cp15.vsctlr), + }; + define_one_arm_cp_reg(cpu, &vsctlr); + } } if (cpu_isar_feature(aa64_lor, cpu)) { @@ -8746,7 +9194,8 @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r, void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, const ARMCPRegInfo *r, void *opaque) { - /* Define implementations of coprocessor registers. + /* + * Define implementations of coprocessor registers. * We store these in a hashtable because typically * there are less than 150 registers in a space which * is 16*16*16*8*8 = 262144 in size. @@ -8813,7 +9262,8 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, default: g_assert_not_reached(); } - /* The AArch64 pseudocode CheckSystemAccess() specifies that op1 + /* + * The AArch64 pseudocode CheckSystemAccess() specifies that op1 * encodes a minimum access level for the register. We roll this * runtime check into our general permission check code, so check * here that the reginfo's specified permissions are strict enough @@ -8855,7 +9305,8 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, assert((r->access & ~mask) == 0); } - /* Check that the register definition has enough info to handle + /* + * Check that the register definition has enough info to handle * reads and writes if they are permitted. */ if (!(r->type & (ARM_CP_SPECIAL_MASK | ARM_CP_CONST))) { @@ -8880,7 +9331,8 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, continue; } if (state == ARM_CP_STATE_AA32) { - /* Under AArch32 CP registers can be common + /* + * Under AArch32 CP registers can be common * (same for secure and non-secure world) or banked. */ char *name; @@ -8906,8 +9358,10 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, g_assert_not_reached(); } } else { - /* AArch64 registers get mapped to non-secure instance - * of AArch32 */ + /* + * AArch64 registers get mapped to non-secure instance + * of AArch32 + */ add_cpreg_to_hashtable(cpu, r, opaque, state, ARM_CP_SECSTATE_NS, crm, opc1, opc2, r->name); @@ -8993,7 +9447,8 @@ void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque) static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type) { - /* Return true if it is not valid for us to switch to + /* + * Return true if it is not valid for us to switch to * this CPU mode (ie all the UNPREDICTABLE cases in * the ARM ARM CPSRWriteByInstr pseudocode). */ @@ -9014,10 +9469,12 @@ static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type) case ARM_CPU_MODE_UND: case ARM_CPU_MODE_IRQ: case ARM_CPU_MODE_FIQ: - /* Note that we don't implement the IMPDEF NSACR.RFR which in v7 + /* + * Note that we don't implement the IMPDEF NSACR.RFR which in v7 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.) */ - /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR + /* + * If HCR.TGE is set then changes from Monitor to NS PL1 via MSR * and CPS are treated as illegal mode changes. */ if (write_type == CPSRWriteByInstr && @@ -9059,10 +9516,12 @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, env->CF = (val >> 29) & 1; env->VF = (val << 3) & 0x80000000; } - if (mask & CPSR_Q) + if (mask & CPSR_Q) { env->QF = ((val & CPSR_Q) != 0); - if (mask & CPSR_T) + } + if (mask & CPSR_T) { env->thumb = ((val & CPSR_T) != 0); + } if (mask & CPSR_IT_0_1) { env->condexec_bits &= ~3; env->condexec_bits |= (val >> 25) & 3; @@ -9075,7 +9534,8 @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, env->GE = (val >> 16) & 0xf; } - /* In a V7 implementation that includes the security extensions but does + /* + * In a V7 implementation that includes the security extensions but does * not include Virtualization Extensions the SCR.FW and SCR.AW bits control * whether non-secure software is allowed to change the CPSR_F and CPSR_A * bits respectively. @@ -9091,7 +9551,8 @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, changed_daif = (env->daif ^ val) & mask; if (changed_daif & CPSR_A) { - /* Check to see if we are allowed to change the masking of async + /* + * Check to see if we are allowed to change the masking of async * abort exceptions from a non-secure state. */ if (!(env->cp15.scr_el3 & SCR_AW)) { @@ -9103,7 +9564,8 @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, } if (changed_daif & CPSR_F) { - /* Check to see if we are allowed to change the masking of FIQ + /* + * Check to see if we are allowed to change the masking of FIQ * exceptions from a non-secure state. */ if (!(env->cp15.scr_el3 & SCR_FW)) { @@ -9113,7 +9575,8 @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, mask &= ~CPSR_F; } - /* Check whether non-maskable FIQ (NMFI) support is enabled. + /* + * Check whether non-maskable FIQ (NMFI) support is enabled. * If this bit is set software is not allowed to mask * FIQs, but is allowed to set CPSR_F to 0. */ @@ -9133,7 +9596,8 @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, if (write_type != CPSRWriteRaw && ((env->uncached_cpsr ^ val) & mask & CPSR_M)) { if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) { - /* Note that we can only get here in USR mode if this is a + /* + * Note that we can only get here in USR mode if this is a * gdb stub write; for this case we follow the architectural * behaviour for guest writes in USR mode of ignoring an attempt * to switch mode. (Those are caught by translate.c for writes @@ -9141,7 +9605,8 @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, */ mask &= ~CPSR_M; } else if (bad_mode_switch(env, val & CPSR_M, write_type)) { - /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in + /* + * Attempt to switch to an invalid mode: this is UNPREDICTABLE in * v7, and has defined behaviour in v8: * + leave CPSR.M untouched * + allow changes to the other CPSR fields @@ -9261,15 +9726,16 @@ static void switch_mode(CPUARMState *env, int mode) int i; old_mode = env->uncached_cpsr & CPSR_M; - if (mode == old_mode) + if (mode == old_mode) { return; + } if (old_mode == ARM_CPU_MODE_FIQ) { - memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); - memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); + memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); + memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); } else if (mode == ARM_CPU_MODE_FIQ) { - memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); - memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); + memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); + memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); } i = bank_number(old_mode); @@ -9284,7 +9750,8 @@ static void switch_mode(CPUARMState *env, int mode) env->regs[14] = env->banked_r14[r14_bank_number(mode)]; } -/* Physical Interrupt Target EL Lookup Table +/* + * Physical Interrupt Target EL Lookup Table * * [ From ARM ARM section G1.13.4 (Table G1-15) ] * @@ -9358,7 +9825,8 @@ uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, if (arm_feature(env, ARM_FEATURE_EL3)) { rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW); } else { - /* Either EL2 is the highest EL (and so the EL2 register width + /* + * Either EL2 is the highest EL (and so the EL2 register width * is given by is64); or there is no EL2 or EL3, in which case * the value of 'rw' does not affect the table lookup anyway. */ @@ -9633,7 +10101,8 @@ void aarch64_sync_64_to_32(CPUARMState *env) env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23]; } - /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ + /* + * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ * mode, then we can copy to r8-r14. Otherwise, we copy to the * FIQ bank for r8-r14. */ @@ -9865,10 +10334,11 @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs) new_mode = ARM_CPU_MODE_UND; addr = 0x04; mask = CPSR_I; - if (env->thumb) + if (env->thumb) { offset = 2; - else + } else { offset = 4; + } break; case EXCP_SWI: new_mode = ARM_CPU_MODE_SVC; @@ -9979,7 +10449,8 @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs) /* High vectors. When enabled, base address cannot be remapped. */ addr += 0xffff0000; } else { - /* ARM v7 architectures provide a vector base address register to remap + /* + * ARM v7 architectures provide a vector base address register to remap * the interrupt vector table. * This register is only followed in non-monitor mode, and is banked. * Note: only bits 31:5 are valid. @@ -10113,7 +10584,8 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs) aarch64_sve_change_el(env, cur_el, new_el, is_a64(env)); if (cur_el < new_el) { - /* Entry vector offset depends on whether the implemented EL + /* + * Entry vector offset depends on whether the implemented EL * immediately lower than the target level is using AArch32 or AArch64 */ bool is_aa64; @@ -10314,7 +10786,8 @@ static void handle_semihosting(CPUState *cs) } #endif -/* Handle a CPU exception for A and R profile CPUs. +/* + * Handle a CPU exception for A and R profile CPUs. * Do any appropriate logging, handle PSCI calls, and then hand off * to the AArch64-entry or AArch32-entry function depending on the * target exception level's register width. @@ -10359,7 +10832,8 @@ void arm_cpu_do_interrupt(CPUState *cs) } #endif - /* Hooks may change global state so BQL should be held, also the + /* + * Hooks may change global state so BQL should be held, also the * BQL needs to be held for any modification of * cs->interrupt_request. */ @@ -10640,9 +11114,11 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, }; } -/* Note that signed overflow is undefined in C. The following routines are - careful to use unsigned types where modulo arithmetic is required. - Failure to do so _will_ break on newer gcc. */ +/* + * Note that signed overflow is undefined in C. The following routines are + * careful to use unsigned types where modulo arithmetic is required. + * Failure to do so _will_ break on newer gcc. + */ /* Signed saturating arithmetic. */ @@ -10653,10 +11129,11 @@ static inline uint16_t add16_sat(uint16_t a, uint16_t b) res = a + b; if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) { - if (a & 0x8000) + if (a & 0x8000) { res = 0x8000; - else + } else { res = 0x7fff; + } } return res; } @@ -10668,10 +11145,11 @@ static inline uint8_t add8_sat(uint8_t a, uint8_t b) res = a + b; if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) { - if (a & 0x80) + if (a & 0x80) { res = 0x80; - else + } else { res = 0x7f; + } } return res; } @@ -10683,10 +11161,11 @@ static inline uint16_t sub16_sat(uint16_t a, uint16_t b) res = a - b; if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) { - if (a & 0x8000) + if (a & 0x8000) { res = 0x8000; - else + } else { res = 0x7fff; + } } return res; } @@ -10698,10 +11177,11 @@ static inline uint8_t sub8_sat(uint8_t a, uint8_t b) res = a - b; if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) { - if (a & 0x80) + if (a & 0x80) { res = 0x80; - else + } else { res = 0x7f; + } } return res; } @@ -10719,34 +11199,38 @@ static inline uint16_t add16_usat(uint16_t a, uint16_t b) { uint16_t res; res = a + b; - if (res < a) + if (res < a) { res = 0xffff; + } return res; } static inline uint16_t sub16_usat(uint16_t a, uint16_t b) { - if (a > b) + if (a > b) { return a - b; - else + } else { return 0; + } } static inline uint8_t add8_usat(uint8_t a, uint8_t b) { uint8_t res; res = a + b; - if (res < a) + if (res < a) { res = 0xff; + } return res; } static inline uint8_t sub8_usat(uint8_t a, uint8_t b) { - if (a > b) + if (a > b) { return a - b; - else + } else { return 0; + } } #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16); @@ -10764,7 +11248,7 @@ static inline uint8_t sub8_usat(uint8_t a, uint8_t b) RESULT(sum, n, 16); \ if (sum >= 0) \ ge |= 3 << (n * 2); \ - } while(0) + } while (0) #define SARITH8(a, b, n, op) do { \ int32_t sum; \ @@ -10772,7 +11256,7 @@ static inline uint8_t sub8_usat(uint8_t a, uint8_t b) RESULT(sum, n, 8); \ if (sum >= 0) \ ge |= 1 << n; \ - } while(0) + } while (0) #define ADD16(a, b, n) SARITH16(a, b, n, +) @@ -10791,7 +11275,7 @@ static inline uint8_t sub8_usat(uint8_t a, uint8_t b) RESULT(sum, n, 16); \ if ((sum >> 16) == 1) \ ge |= 3 << (n * 2); \ - } while(0) + } while (0) #define ADD8(a, b, n) do { \ uint32_t sum; \ @@ -10799,7 +11283,7 @@ static inline uint8_t sub8_usat(uint8_t a, uint8_t b) RESULT(sum, n, 8); \ if ((sum >> 8) == 1) \ ge |= 1 << n; \ - } while(0) + } while (0) #define SUB16(a, b, n) do { \ uint32_t sum; \ @@ -10807,7 +11291,7 @@ static inline uint8_t sub8_usat(uint8_t a, uint8_t b) RESULT(sum, n, 16); \ if ((sum >> 16) == 0) \ ge |= 3 << (n * 2); \ - } while(0) + } while (0) #define SUB8(a, b, n) do { \ uint32_t sum; \ @@ -10815,7 +11299,7 @@ static inline uint8_t sub8_usat(uint8_t a, uint8_t b) RESULT(sum, n, 8); \ if ((sum >> 8) == 0) \ ge |= 1 << n; \ - } while(0) + } while (0) #define PFX u #define ARITH_GE @@ -10850,10 +11334,11 @@ static inline uint8_t sub8_usat(uint8_t a, uint8_t b) static inline uint8_t do_usad(uint8_t a, uint8_t b) { - if (a > b) + if (a > b) { return a - b; - else + } else { return b - a; + } } /* Unsigned sum of absolute byte differences. */ @@ -10873,18 +11358,23 @@ uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b) uint32_t mask; mask = 0; - if (flags & 1) + if (flags & 1) { mask |= 0xff; - if (flags & 2) + } + if (flags & 2) { mask |= 0xff00; - if (flags & 4) + } + if (flags & 4) { mask |= 0xff0000; - if (flags & 8) + } + if (flags & 8) { mask |= 0xff000000; + } return (a & mask) | (b & ~mask); } -/* CRC helpers. +/* + * CRC helpers. * The upper bytes of val (above the number specified by 'bytes') must have * been zeroed out by the caller. */ @@ -10908,7 +11398,8 @@ uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes) return crc32c(acc, buf, bytes) ^ 0xffffffff; } -/* Return the exception level to which FP-disabled exceptions should +/* + * Return the exception level to which FP-disabled exceptions should * be taken, or 0 if FP is enabled. */ int fp_exception_el(CPUARMState *env, int cur_el) @@ -10916,7 +11407,8 @@ int fp_exception_el(CPUARMState *env, int cur_el) #ifndef CONFIG_USER_ONLY uint64_t hcr_el2; - /* CPACR and the CPTR registers don't exist before v6, so FP is + /* + * CPACR and the CPTR registers don't exist before v6, so FP is * always accessible */ if (!arm_feature(env, ARM_FEATURE_V6)) { @@ -10941,7 +11433,8 @@ int fp_exception_el(CPUARMState *env, int cur_el) hcr_el2 = arm_hcr_el2_eff(env); - /* The CPACR controls traps to EL1, or PL1 if we're 32 bit: + /* + * The CPACR controls traps to EL1, or PL1 if we're 32 bit: * 0, 2 : trap EL0 and EL1/PL1 accesses * 1 : trap only EL0 accesses * 3 : trap no accesses diff --git a/target/arm/internals.h b/target/arm/internals.h index 161e42d50f..d9555309df 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -257,6 +257,10 @@ unsigned int arm_pamax(ARMCPU *cpu); static inline bool extended_addresses_enabled(CPUARMState *env) { uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1]; + if (arm_feature(env, ARM_FEATURE_PMSA) && + arm_feature(env, ARM_FEATURE_V8)) { + return true; + } return arm_el_is_aa64(env, 1) || (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE)); } diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c index 355cd4d60a..033a4d9261 100644 --- a/target/arm/m_helper.c +++ b/target/arm/m_helper.c @@ -7,30 +7,14 @@ */ #include "qemu/osdep.h" -#include "qemu/units.h" -#include "target/arm/idau.h" -#include "trace.h" #include "cpu.h" #include "internals.h" -#include "exec/gdbstub.h" #include "exec/helper-proto.h" -#include "qemu/host-utils.h" #include "qemu/main-loop.h" #include "qemu/bitops.h" -#include "qemu/crc32c.h" -#include "qemu/qemu-print.h" #include "qemu/log.h" #include "exec/exec-all.h" -#include <zlib.h> /* For crc32 */ -#include "semihosting/semihost.h" -#include "sysemu/cpus.h" -#include "sysemu/kvm.h" -#include "qemu/range.h" -#include "qapi/qapi-commands-machine-target.h" -#include "qapi/error.h" -#include "qemu/guest-random.h" #ifdef CONFIG_TCG -#include "arm_ldst.h" #include "exec/cpu_ldst.h" #include "semihosting/common-semi.h" #endif diff --git a/target/arm/machine.c b/target/arm/machine.c index 54c5c62433..5f26152652 100644 --- a/target/arm/machine.c +++ b/target/arm/machine.c @@ -487,6 +487,30 @@ static bool pmsav8_needed(void *opaque) arm_feature(env, ARM_FEATURE_V8); } +static bool pmsav8r_needed(void *opaque) +{ + ARMCPU *cpu = opaque; + CPUARMState *env = &cpu->env; + + return arm_feature(env, ARM_FEATURE_PMSA) && + arm_feature(env, ARM_FEATURE_V8) && + !arm_feature(env, ARM_FEATURE_M); +} + +static const VMStateDescription vmstate_pmsav8r = { + .name = "cpu/pmsav8/pmsav8r", + .version_id = 1, + .minimum_version_id = 1, + .needed = pmsav8r_needed, + .fields = (VMStateField[]) { + VMSTATE_VARRAY_UINT32(env.pmsav8.hprbar, ARMCPU, + pmsav8r_hdregion, 0, vmstate_info_uint32, uint32_t), + VMSTATE_VARRAY_UINT32(env.pmsav8.hprlar, ARMCPU, + pmsav8r_hdregion, 0, vmstate_info_uint32, uint32_t), + VMSTATE_END_OF_LIST() + }, +}; + static const VMStateDescription vmstate_pmsav8 = { .name = "cpu/pmsav8", .version_id = 1, @@ -500,6 +524,10 @@ static const VMStateDescription vmstate_pmsav8 = { VMSTATE_UINT32(env.pmsav8.mair0[M_REG_NS], ARMCPU), VMSTATE_UINT32(env.pmsav8.mair1[M_REG_NS], ARMCPU), VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &vmstate_pmsav8r, + NULL } }; diff --git a/target/arm/ptw.c b/target/arm/ptw.c index f812734bfb..4bda0590c7 100644 --- a/target/arm/ptw.c +++ b/target/arm/ptw.c @@ -1758,9 +1758,13 @@ static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx, if (arm_feature(env, ARM_FEATURE_M)) { return env->v7m.mpu_ctrl[is_secure] & R_V7M_MPU_CTRL_PRIVDEFENA_MASK; - } else { - return regime_sctlr(env, mmu_idx) & SCTLR_BR; } + + if (mmu_idx == ARMMMUIdx_Stage2) { + return false; + } + + return regime_sctlr(env, mmu_idx) & SCTLR_BR; } static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, @@ -1952,6 +1956,26 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, return !(result->f.prot & (1 << access_type)); } +static uint32_t *regime_rbar(CPUARMState *env, ARMMMUIdx mmu_idx, + uint32_t secure) +{ + if (regime_el(env, mmu_idx) == 2) { + return env->pmsav8.hprbar; + } else { + return env->pmsav8.rbar[secure]; + } +} + +static uint32_t *regime_rlar(CPUARMState *env, ARMMMUIdx mmu_idx, + uint32_t secure) +{ + if (regime_el(env, mmu_idx) == 2) { + return env->pmsav8.hprlar; + } else { + return env->pmsav8.rlar[secure]; + } +} + bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, MMUAccessType access_type, ARMMMUIdx mmu_idx, bool secure, GetPhysAddrResult *result, @@ -1974,6 +1998,13 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, bool hit = false; uint32_t addr_page_base = address & TARGET_PAGE_MASK; uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); + int region_counter; + + if (regime_el(env, mmu_idx) == 2) { + region_counter = cpu->pmsav8r_hdregion; + } else { + region_counter = cpu->pmsav7_dregion; + } result->f.lg_page_size = TARGET_PAGE_BITS; result->f.phys_addr = address; @@ -1982,6 +2013,10 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, *mregion = -1; } + if (mmu_idx == ARMMMUIdx_Stage2) { + fi->stage2 = true; + } + /* * Unlike the ARM ARM pseudocode, we don't need to check whether this * was an exception vector read from the vector table (which is always @@ -1998,17 +2033,26 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, hit = true; } - for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { + uint32_t bitmask; + if (arm_feature(env, ARM_FEATURE_M)) { + bitmask = 0x1f; + } else { + bitmask = 0x3f; + fi->level = 0; + } + + for (n = region_counter - 1; n >= 0; n--) { /* region search */ /* - * Note that the base address is bits [31:5] from the register - * with bits [4:0] all zeroes, but the limit address is bits - * [31:5] from the register with bits [4:0] all ones. + * Note that the base address is bits [31:x] from the register + * with bits [x-1:0] all zeroes, but the limit address is bits + * [31:x] from the register with bits [x:0] all ones. Where x is + * 5 for Cortex-M and 6 for Cortex-R */ - uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f; - uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f; + uint32_t base = regime_rbar(env, mmu_idx, secure)[n] & ~bitmask; + uint32_t limit = regime_rlar(env, mmu_idx, secure)[n] | bitmask; - if (!(env->pmsav8.rlar[secure][n] & 0x1)) { + if (!(regime_rlar(env, mmu_idx, secure)[n] & 0x1)) { /* Region disabled */ continue; } @@ -2042,7 +2086,9 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, * PMSAv7 where highest-numbered-region wins) */ fi->type = ARMFault_Permission; - fi->level = 1; + if (arm_feature(env, ARM_FEATURE_M)) { + fi->level = 1; + } return true; } @@ -2052,8 +2098,11 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, } if (!hit) { - /* background fault */ - fi->type = ARMFault_Background; + if (arm_feature(env, ARM_FEATURE_M)) { + fi->type = ARMFault_Background; + } else { + fi->type = ARMFault_Permission; + } return true; } @@ -2061,12 +2110,14 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, /* hit using the background region */ get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot); } else { - uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2); - uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1); + uint32_t matched_rbar = regime_rbar(env, mmu_idx, secure)[matchregion]; + uint32_t matched_rlar = regime_rlar(env, mmu_idx, secure)[matchregion]; + uint32_t ap = extract32(matched_rbar, 1, 2); + uint32_t xn = extract32(matched_rbar, 0, 1); bool pxn = false; if (arm_feature(env, ARM_FEATURE_V8_1M)) { - pxn = extract32(env->pmsav8.rlar[secure][matchregion], 4, 1); + pxn = extract32(matched_rlar, 4, 1); } if (m_is_system_region(env, address)) { @@ -2074,21 +2125,46 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, xn = 1; } - result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap); + if (regime_el(env, mmu_idx) == 2) { + result->f.prot = simple_ap_to_rw_prot_is_user(ap, + mmu_idx != ARMMMUIdx_E2); + } else { + result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap); + } + + if (!arm_feature(env, ARM_FEATURE_M)) { + uint8_t attrindx = extract32(matched_rlar, 1, 3); + uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; + uint8_t sh = extract32(matched_rlar, 3, 2); + + if (regime_sctlr(env, mmu_idx) & SCTLR_WXN && + result->f.prot & PAGE_WRITE && mmu_idx != ARMMMUIdx_Stage2) { + xn = 0x1; + } + + if ((regime_el(env, mmu_idx) == 1) && + regime_sctlr(env, mmu_idx) & SCTLR_UWXN && ap == 0x1) { + pxn = 0x1; + } + + result->cacheattrs.is_s2_format = false; + result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8); + result->cacheattrs.shareability = sh; + } + if (result->f.prot && !xn && !(pxn && !is_user)) { result->f.prot |= PAGE_EXEC; } - /* - * We don't need to look the attribute up in the MAIR0/MAIR1 - * registers because that only tells us about cacheability. - */ + if (mregion) { *mregion = matchregion; } } fi->type = ARMFault_Permission; - fi->level = 1; + if (arm_feature(env, ARM_FEATURE_M)) { + fi->level = 1; + } return !(result->f.prot & (1 << access_type)); } @@ -2361,7 +2437,11 @@ static uint8_t combined_attrs_nofwb(uint64_t hcr, { uint8_t s1lo, s2lo, s1hi, s2hi, s2_mair_attrs, ret_attrs; - s2_mair_attrs = convert_stage2_attrs(hcr, s2.attrs); + if (s2.is_s2_format) { + s2_mair_attrs = convert_stage2_attrs(hcr, s2.attrs); + } else { + s2_mair_attrs = s2.attrs; + } s1lo = extract32(s1.attrs, 0, 4); s2lo = extract32(s2_mair_attrs, 0, 4); @@ -2418,6 +2498,8 @@ static uint8_t force_cacheattr_nibble_wb(uint8_t attr) */ static uint8_t combined_attrs_fwb(ARMCacheAttrs s1, ARMCacheAttrs s2) { + assert(s2.is_s2_format && !s1.is_s2_format); + switch (s2.attrs) { case 7: /* Use stage 1 attributes */ @@ -2467,7 +2549,7 @@ static ARMCacheAttrs combine_cacheattrs(uint64_t hcr, ARMCacheAttrs ret; bool tagged = false; - assert(s2.is_s2_format && !s1.is_s2_format); + assert(!s1.is_s2_format); ret.is_s2_format = false; if (s1.attrs == 0xf0) { @@ -2643,7 +2725,13 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, cacheattrs1 = result->cacheattrs; memset(result, 0, sizeof(*result)); - ret = get_phys_addr_lpae(env, ptw, ipa, access_type, is_el0, result, fi); + if (arm_feature(env, ARM_FEATURE_PMSA)) { + ret = get_phys_addr_pmsav8(env, ipa, access_type, + ptw->in_mmu_idx, is_secure, result, fi); + } else { + ret = get_phys_addr_lpae(env, ptw, ipa, access_type, + is_el0, result, fi); + } fi->s2addr = ipa; /* Combine the S1 and S2 perms. */ @@ -2655,10 +2743,20 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, } /* - * Use the maximum of the S1 & S2 page size, so that invalidation - * of pages > TARGET_PAGE_SIZE works correctly. + * If either S1 or S2 returned a result smaller than TARGET_PAGE_SIZE, + * this means "don't put this in the TLB"; in this case, return a + * result with lg_page_size == 0 to achieve that. Otherwise, + * use the maximum of the S1 & S2 page size, so that invalidation + * of pages > TARGET_PAGE_SIZE works correctly. (This works even though + * we know the combined result permissions etc only cover the minimum + * of the S1 and S2 page size, because we know that the common TLB code + * never actually creates TLB entries bigger than TARGET_PAGE_SIZE, + * and passing a larger page size value only affects invalidations.) */ - if (result->f.lg_page_size < s1_lgpgsz) { + if (result->f.lg_page_size < TARGET_PAGE_BITS || + s1_lgpgsz < TARGET_PAGE_BITS) { + result->f.lg_page_size = 0; + } else if (result->f.lg_page_size < s1_lgpgsz) { result->f.lg_page_size = s1_lgpgsz; } diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c index 0f4f4fc809..60abcbebe6 100644 --- a/target/arm/tlb_helper.c +++ b/target/arm/tlb_helper.c @@ -19,6 +19,10 @@ bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) if (el == 2 || arm_el_is_aa64(env, el)) { return true; } + if (arm_feature(env, ARM_FEATURE_PMSA) && + arm_feature(env, ARM_FEATURE_V8)) { + return true; + } if (arm_feature(env, ARM_FEATURE_LPAE) && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) { return true; diff --git a/target/arm/translate.c b/target/arm/translate.c index 74a903072f..1dcaefb8e7 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -1184,7 +1184,7 @@ static inline void gen_hlt(DisasContext *s, int imm) * semihosting, to provide some semblance of security * (and for consistency with our 32-bit semihosting). */ - if (semihosting_enabled(s->current_el != 0) && + if (semihosting_enabled(s->current_el == 0) && (imm == (s->thumb ? 0x3c : 0xf000))) { gen_exception_internal_insn(s, EXCP_SEMIHOST); return; diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c index 658ca4ff78..807037c586 100644 --- a/target/hexagon/cpu.c +++ b/target/hexagon/cpu.c @@ -86,7 +86,7 @@ static target_ulong adjust_stack_ptrs(CPUHexagonState *env, target_ulong addr) return addr; } -/* HEX_REG_P3_0 (aka C4) is an alias for the predicate registers */ +/* HEX_REG_P3_0_ALIASED (aka C4) is an alias for the predicate registers */ static target_ulong read_p3_0(CPUHexagonState *env) { int32_t control_reg = 0; @@ -102,7 +102,7 @@ static void print_reg(FILE *f, CPUHexagonState *env, int regnum) { target_ulong value; - if (regnum == HEX_REG_P3_0) { + if (regnum == HEX_REG_P3_0_ALIASED) { value = read_p3_0(env); } else { value = regnum < 32 ? adjust_stack_ptrs(env, env->gpr[regnum]) @@ -198,7 +198,7 @@ static void hexagon_dump(CPUHexagonState *env, FILE *f, int flags) print_reg(f, env, HEX_REG_M0); print_reg(f, env, HEX_REG_M1); print_reg(f, env, HEX_REG_USR); - print_reg(f, env, HEX_REG_P3_0); + print_reg(f, env, HEX_REG_P3_0_ALIASED); print_reg(f, env, HEX_REG_GP); print_reg(f, env, HEX_REG_UGP); print_reg(f, env, HEX_REG_PC); diff --git a/target/hexagon/genptr.c b/target/hexagon/genptr.c index 6cf2e0ed43..90db99024f 100644 --- a/target/hexagon/genptr.c +++ b/target/hexagon/genptr.c @@ -43,6 +43,33 @@ TCGv gen_read_preg(TCGv pred, uint8_t num) return pred; } +#define IMMUTABLE (~0) + +static const target_ulong reg_immut_masks[TOTAL_PER_THREAD_REGS] = { + [HEX_REG_USR] = 0xc13000c0, + [HEX_REG_PC] = IMMUTABLE, + [HEX_REG_GP] = 0x3f, + [HEX_REG_UPCYCLELO] = IMMUTABLE, + [HEX_REG_UPCYCLEHI] = IMMUTABLE, + [HEX_REG_UTIMERLO] = IMMUTABLE, + [HEX_REG_UTIMERHI] = IMMUTABLE, +}; + +static inline void gen_masked_reg_write(TCGv new_val, TCGv cur_val, + target_ulong reg_mask) +{ + if (reg_mask) { + TCGv tmp = tcg_temp_new(); + + /* new_val = (new_val & ~reg_mask) | (cur_val & reg_mask) */ + tcg_gen_andi_tl(new_val, new_val, ~reg_mask); + tcg_gen_andi_tl(tmp, cur_val, reg_mask); + tcg_gen_or_tl(new_val, new_val, tmp); + + tcg_temp_free(tmp); + } +} + static inline void gen_log_predicated_reg_write(int rnum, TCGv val, uint32_t slot) { @@ -69,6 +96,9 @@ static inline void gen_log_predicated_reg_write(int rnum, TCGv val, void gen_log_reg_write(int rnum, TCGv val) { + const target_ulong reg_mask = reg_immut_masks[rnum]; + + gen_masked_reg_write(val, hex_gpr[rnum], reg_mask); tcg_gen_mov_tl(hex_new_value[rnum], val); if (HEX_DEBUG) { /* Do this so HELPER(debug_commit_end) will know */ @@ -114,19 +144,29 @@ static void gen_log_predicated_reg_write_pair(int rnum, TCGv_i64 val, static void gen_log_reg_write_pair(int rnum, TCGv_i64 val) { + const target_ulong reg_mask_low = reg_immut_masks[rnum]; + const target_ulong reg_mask_high = reg_immut_masks[rnum + 1]; + TCGv val32 = tcg_temp_new(); + /* Low word */ - tcg_gen_extrl_i64_i32(hex_new_value[rnum], val); + tcg_gen_extrl_i64_i32(val32, val); + gen_masked_reg_write(val32, hex_gpr[rnum], reg_mask_low); + tcg_gen_mov_tl(hex_new_value[rnum], val32); if (HEX_DEBUG) { /* Do this so HELPER(debug_commit_end) will know */ tcg_gen_movi_tl(hex_reg_written[rnum], 1); } /* High word */ - tcg_gen_extrh_i64_i32(hex_new_value[rnum + 1], val); + tcg_gen_extrh_i64_i32(val32, val); + gen_masked_reg_write(val32, hex_gpr[rnum + 1], reg_mask_high); + tcg_gen_mov_tl(hex_new_value[rnum + 1], val32); if (HEX_DEBUG) { /* Do this so HELPER(debug_commit_end) will know */ tcg_gen_movi_tl(hex_reg_written[rnum + 1], 1); } + + tcg_temp_free(val32); } void gen_log_pred_write(DisasContext *ctx, int pnum, TCGv val) @@ -163,7 +203,7 @@ static inline void gen_read_p3_0(TCGv control_reg) /* * Certain control registers require special handling on read - * HEX_REG_P3_0 aliased to the predicate registers + * HEX_REG_P3_0_ALIASED aliased to the predicate registers * -> concat the 4 predicate registers together * HEX_REG_PC actual value stored in DisasContext * -> assign from ctx->base.pc_next @@ -173,7 +213,7 @@ static inline void gen_read_p3_0(TCGv control_reg) static inline void gen_read_ctrl_reg(DisasContext *ctx, const int reg_num, TCGv dest) { - if (reg_num == HEX_REG_P3_0) { + if (reg_num == HEX_REG_P3_0_ALIASED) { gen_read_p3_0(dest); } else if (reg_num == HEX_REG_PC) { tcg_gen_movi_tl(dest, ctx->base.pc_next); @@ -194,7 +234,7 @@ static inline void gen_read_ctrl_reg(DisasContext *ctx, const int reg_num, static inline void gen_read_ctrl_reg_pair(DisasContext *ctx, const int reg_num, TCGv_i64 dest) { - if (reg_num == HEX_REG_P3_0) { + if (reg_num == HEX_REG_P3_0_ALIASED) { TCGv p3_0 = tcg_temp_new(); gen_read_p3_0(p3_0); tcg_gen_concat_i32_i64(dest, p3_0, hex_gpr[reg_num + 1]); @@ -238,7 +278,7 @@ static void gen_write_p3_0(DisasContext *ctx, TCGv control_reg) /* * Certain control registers require special handling on write - * HEX_REG_P3_0 aliased to the predicate registers + * HEX_REG_P3_0_ALIASED aliased to the predicate registers * -> break the value across 4 predicate registers * HEX_REG_QEMU_*_CNT changes in current TB in DisasContext * -> clear the changes @@ -246,7 +286,7 @@ static void gen_write_p3_0(DisasContext *ctx, TCGv control_reg) static inline void gen_write_ctrl_reg(DisasContext *ctx, int reg_num, TCGv val) { - if (reg_num == HEX_REG_P3_0) { + if (reg_num == HEX_REG_P3_0_ALIASED) { gen_write_p3_0(ctx, val); } else { gen_log_reg_write(reg_num, val); @@ -266,7 +306,7 @@ static inline void gen_write_ctrl_reg(DisasContext *ctx, int reg_num, static inline void gen_write_ctrl_reg_pair(DisasContext *ctx, int reg_num, TCGv_i64 val) { - if (reg_num == HEX_REG_P3_0) { + if (reg_num == HEX_REG_P3_0_ALIASED) { TCGv val32 = tcg_temp_new(); tcg_gen_extrl_i64_i32(val32, val); gen_write_p3_0(ctx, val32); diff --git a/target/hexagon/hex_regs.h b/target/hexagon/hex_regs.h index a63c2c0fd5..bddfc28021 100644 --- a/target/hexagon/hex_regs.h +++ b/target/hexagon/hex_regs.h @@ -58,7 +58,7 @@ enum { HEX_REG_LC0 = 33, HEX_REG_SA1 = 34, HEX_REG_LC1 = 35, - HEX_REG_P3_0 = 36, + HEX_REG_P3_0_ALIASED = 36, HEX_REG_M0 = 38, HEX_REG_M1 = 39, HEX_REG_USR = 40, diff --git a/target/hexagon/idef-parser/README.rst b/target/hexagon/idef-parser/README.rst index 65e6bf4ee5..ff6d14150a 100644 --- a/target/hexagon/idef-parser/README.rst +++ b/target/hexagon/idef-parser/README.rst @@ -552,11 +552,11 @@ to compare our buggy CPU state against, running :: - meson configure -Dhexagon_idef_parser_enabled=false + meson configure -Dhexagon_idef_parser=false will disable the idef-parser for all instructions and fallback on manual tinycode generator overrides, or on helper function implementations. Recompiling -gives us ``qemu-hexagon`` which passes all tests. If ``qemu-heaxgon-buggy`` is +gives us ``qemu-hexagon`` which passes all tests. If ``qemu-hexagon-buggy`` is our binary with the incorrect tinycode generators, we can compare the CPU state between the two versions diff --git a/target/hexagon/idef-parser/idef-parser.y b/target/hexagon/idef-parser/idef-parser.y index 8be44a0ad1..c14cb39500 100644 --- a/target/hexagon/idef-parser/idef-parser.y +++ b/target/hexagon/idef-parser/idef-parser.y @@ -99,6 +99,8 @@ /* Input file containing the description of each hexagon instruction */ input : instructions { + /* Suppress warning about unused yynerrs */ + (void) yynerrs; YYACCEPT; } ; diff --git a/target/hexagon/meson.build b/target/hexagon/meson.build index e8f250fcac..c9d31d095c 100644 --- a/target/hexagon/meson.build +++ b/target/hexagon/meson.build @@ -197,7 +197,6 @@ if idef_parser_enabled and 'hexagon-linux-user' in target_dirs idef_parser_dir / 'parser-helpers.c'], include_directories: ['idef-parser', '../../include/'], dependencies: [glib_dep], - c_args: ['-Wextra'], native: true ) diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c index add4d54ae7..287659c74d 100644 --- a/target/ppc/excp_helper.c +++ b/target/ppc/excp_helper.c @@ -2163,22 +2163,13 @@ static int ppc_next_unmasked_interrupt(CPUPPCState *env) void ppc_maybe_interrupt(CPUPPCState *env) { CPUState *cs = env_cpu(env); - bool locked = false; - - if (!qemu_mutex_iothread_locked()) { - locked = true; - qemu_mutex_lock_iothread(); - } + QEMU_IOTHREAD_LOCK_GUARD(); if (ppc_next_unmasked_interrupt(env)) { cpu_interrupt(cs, CPU_INTERRUPT_HARD); } else { cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); } - - if (locked) { - qemu_mutex_unlock_iothread(); - } } #if defined(TARGET_PPC64) diff --git a/target/ppc/helper_regs.c b/target/ppc/helper_regs.c index c0aee5855b..779e7db513 100644 --- a/target/ppc/helper_regs.c +++ b/target/ppc/helper_regs.c @@ -22,6 +22,7 @@ #include "qemu/main-loop.h" #include "exec/exec-all.h" #include "sysemu/kvm.h" +#include "sysemu/tcg.h" #include "helper_regs.h" #include "power8-pmu.h" #include "cpu-models.h" @@ -203,17 +204,10 @@ void cpu_interrupt_exittb(CPUState *cs) { /* * We don't need to worry about translation blocks - * when running with KVM. + * unless running with TCG. */ - if (kvm_enabled()) { - return; - } - - if (!qemu_mutex_iothread_locked()) { - qemu_mutex_lock_iothread(); - cpu_interrupt(cs, CPU_INTERRUPT_EXITTB); - qemu_mutex_unlock_iothread(); - } else { + if (tcg_enabled()) { + QEMU_IOTHREAD_LOCK_GUARD(); cpu_interrupt(cs, CPU_INTERRUPT_EXITTB); } } diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index 6fe176e483..cc75ca7667 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -76,6 +76,7 @@ static const struct isa_ext_data isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(zicsr, true, PRIV_VERSION_1_10_0, ext_icsr), ISA_EXT_DATA_ENTRY(zifencei, true, PRIV_VERSION_1_10_0, ext_ifencei), ISA_EXT_DATA_ENTRY(zihintpause, true, PRIV_VERSION_1_10_0, ext_zihintpause), + ISA_EXT_DATA_ENTRY(zawrs, true, PRIV_VERSION_1_12_0, ext_zawrs), ISA_EXT_DATA_ENTRY(zfh, true, PRIV_VERSION_1_12_0, ext_zfh), ISA_EXT_DATA_ENTRY(zfhmin, true, PRIV_VERSION_1_12_0, ext_zfhmin), ISA_EXT_DATA_ENTRY(zfinx, true, PRIV_VERSION_1_12_0, ext_zfinx), @@ -382,6 +383,10 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) CSR_MHARTID, CSR_MSTATUS, CSR_MSTATUSH, + /* + * CSR_SSTATUS is intentionally omitted here as its value + * can be figured out by looking at CSR_MSTATUS + */ CSR_HSTATUS, CSR_VSSTATUS, CSR_MIP, @@ -762,6 +767,11 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) return; } + if ((cpu->cfg.ext_zawrs) && !cpu->cfg.ext_a) { + error_setg(errp, "Zawrs extension requires A extension"); + return; + } + if ((cpu->cfg.ext_zfh || cpu->cfg.ext_zfhmin) && !cpu->cfg.ext_f) { error_setg(errp, "Zfh/Zfhmin extensions require F extension"); return; @@ -1017,6 +1027,7 @@ static Property riscv_cpu_extensions[] = { DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true), DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true), DEFINE_PROP_BOOL("Zihintpause", RISCVCPU, cfg.ext_zihintpause, true), + DEFINE_PROP_BOOL("Zawrs", RISCVCPU, cfg.ext_zawrs, true), DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false), DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false), DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false), diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 443d15a47c..f5609b62a2 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -329,6 +329,9 @@ struct CPUArchState { target_ulong tdata3[RV_MAX_TRIGGERS]; struct CPUBreakpoint *cpu_breakpoint[RV_MAX_TRIGGERS]; struct CPUWatchpoint *cpu_watchpoint[RV_MAX_TRIGGERS]; + QEMUTimer *itrigger_timer[RV_MAX_TRIGGERS]; + int64_t last_icount; + bool itrigger_enabled; /* machine specific rdtime callback */ uint64_t (*rdtime_fn)(void *); @@ -366,6 +369,9 @@ struct CPUArchState { /* CSRs for execution enviornment configuration */ uint64_t menvcfg; + uint64_t mstateen[SMSTATEEN_MAX_COUNT]; + uint64_t hstateen[SMSTATEEN_MAX_COUNT]; + uint64_t sstateen[SMSTATEEN_MAX_COUNT]; target_ulong senvcfg; uint64_t henvcfg; #endif @@ -441,11 +447,13 @@ struct RISCVCPUConfig { bool ext_ifencei; bool ext_icsr; bool ext_zihintpause; + bool ext_smstateen; bool ext_sstc; bool ext_svinval; bool ext_svnapot; bool ext_svpbmt; bool ext_zdinx; + bool ext_zawrs; bool ext_zfh; bool ext_zfhmin; bool ext_zfinx; @@ -621,6 +629,8 @@ FIELD(TB_FLAGS, PM_MASK_ENABLED, 22, 1) FIELD(TB_FLAGS, PM_BASE_ENABLED, 23, 1) FIELD(TB_FLAGS, VTA, 24, 1) FIELD(TB_FLAGS, VMA, 25, 1) +/* Native debug itrigger */ +FIELD(TB_FLAGS, ITRIGGER, 26, 1) #ifdef TARGET_RISCV32 #define riscv_cpu_mxl(env) ((void)(env), MXL_RV32) diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h index d8f5f0abed..8b0d7e20ea 100644 --- a/target/riscv/cpu_bits.h +++ b/target/riscv/cpu_bits.h @@ -197,6 +197,12 @@ /* Supervisor Configuration CSRs */ #define CSR_SENVCFG 0x10A +/* Supervisor state CSRs */ +#define CSR_SSTATEEN0 0x10C +#define CSR_SSTATEEN1 0x10D +#define CSR_SSTATEEN2 0x10E +#define CSR_SSTATEEN3 0x10F + /* Supervisor Trap Handling */ #define CSR_SSCRATCH 0x140 #define CSR_SEPC 0x141 @@ -244,6 +250,16 @@ #define CSR_HENVCFG 0x60A #define CSR_HENVCFGH 0x61A +/* Hypervisor state CSRs */ +#define CSR_HSTATEEN0 0x60C +#define CSR_HSTATEEN0H 0x61C +#define CSR_HSTATEEN1 0x60D +#define CSR_HSTATEEN1H 0x61D +#define CSR_HSTATEEN2 0x60E +#define CSR_HSTATEEN2H 0x61E +#define CSR_HSTATEEN3 0x60F +#define CSR_HSTATEEN3H 0x61F + /* Virtual CSRs */ #define CSR_VSSTATUS 0x200 #define CSR_VSIE 0x204 @@ -289,6 +305,27 @@ #define CSR_MENVCFG 0x30A #define CSR_MENVCFGH 0x31A +/* Machine state CSRs */ +#define CSR_MSTATEEN0 0x30C +#define CSR_MSTATEEN0H 0x31C +#define CSR_MSTATEEN1 0x30D +#define CSR_MSTATEEN1H 0x31D +#define CSR_MSTATEEN2 0x30E +#define CSR_MSTATEEN2H 0x31E +#define CSR_MSTATEEN3 0x30F +#define CSR_MSTATEEN3H 0x31F + +/* Common defines for all smstateen */ +#define SMSTATEEN_MAX_COUNT 4 +#define SMSTATEEN0_CS (1ULL << 0) +#define SMSTATEEN0_FCSR (1ULL << 1) +#define SMSTATEEN0_HSCONTXT (1ULL << 57) +#define SMSTATEEN0_IMSIC (1ULL << 58) +#define SMSTATEEN0_AIA (1ULL << 59) +#define SMSTATEEN0_SVSLCT (1ULL << 60) +#define SMSTATEEN0_HSENVCFG (1ULL << 62) +#define SMSTATEEN_STATEEN (1ULL << 63) + /* Enhanced Physical Memory Protection (ePMP) */ #define CSR_MSECCFG 0x747 #define CSR_MSECCFGH 0x757 diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c index 278d163803..8ea3442b4a 100644 --- a/target/riscv/cpu_helper.c +++ b/target/riscv/cpu_helper.c @@ -27,7 +27,9 @@ #include "tcg/tcg-op.h" #include "trace.h" #include "semihosting/common-semi.h" +#include "sysemu/cpu-timers.h" #include "cpu_bits.h" +#include "debug.h" int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch) { @@ -103,6 +105,9 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc, flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_VS, get_field(env->mstatus_hs, MSTATUS_VS)); } + if (riscv_feature(env, RISCV_FEATURE_DEBUG) && !icount_enabled()) { + flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled); + } #endif flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl); @@ -610,7 +615,6 @@ uint64_t riscv_cpu_update_mip(RISCVCPU *cpu, uint64_t mask, uint64_t value) CPURISCVState *env = &cpu->env; CPUState *cs = CPU(cpu); uint64_t gein, vsgein = 0, vstip = 0, old = env->mip; - bool locked = false; if (riscv_cpu_virt_enabled(env)) { gein = get_field(env->hstatus, HSTATUS_VGEIN); @@ -621,10 +625,7 @@ uint64_t riscv_cpu_update_mip(RISCVCPU *cpu, uint64_t mask, uint64_t value) mask = ((mask == MIP_VSTIP) && env->vstime_irq) ? 0 : mask; vstip = env->vstime_irq ? MIP_VSTIP : 0; - if (!qemu_mutex_iothread_locked()) { - locked = true; - qemu_mutex_lock_iothread(); - } + QEMU_IOTHREAD_LOCK_GUARD(); env->mip = (env->mip & ~mask) | (value & mask); @@ -634,10 +635,6 @@ uint64_t riscv_cpu_update_mip(RISCVCPU *cpu, uint64_t mask, uint64_t value) cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); } - if (locked) { - qemu_mutex_unlock_iothread(); - } - return old; } @@ -670,6 +667,9 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv) if (newpriv == PRV_H) { newpriv = PRV_U; } + if (icount_enabled() && newpriv != env->priv) { + riscv_itrigger_update_priv(env); + } /* tlb_flush is unnecessary as mode is contained in mmu_idx */ env->priv = newpriv; env->xl = cpu_recompute_xl(env); @@ -706,24 +706,26 @@ static int get_physical_address_pmp(CPURISCVState *env, int *prot, int mode) { pmp_priv_t pmp_priv; - target_ulong tlb_size_pmp = 0; + int pmp_index = -1; if (!riscv_feature(env, RISCV_FEATURE_PMP)) { *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return TRANSLATE_SUCCESS; } - if (!pmp_hart_has_privs(env, addr, size, 1 << access_type, &pmp_priv, - mode)) { + pmp_index = pmp_hart_has_privs(env, addr, size, 1 << access_type, + &pmp_priv, mode); + if (pmp_index < 0) { *prot = 0; return TRANSLATE_PMP_FAIL; } *prot = pmp_priv_to_page_prot(pmp_priv); - if (tlb_size != NULL) { - if (pmp_is_range_in_tlb(env, addr & ~(*tlb_size - 1), &tlb_size_pmp)) { - *tlb_size = tlb_size_pmp; - } + if ((tlb_size != NULL) && pmp_index != MAX_RISCV_PMPS) { + target_ulong tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1); + target_ulong tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1; + + *tlb_size = pmp_get_tlb_size(env, pmp_index, tlb_sa, tlb_ea); } return TRANSLATE_SUCCESS; @@ -1248,6 +1250,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, } } + pmu_tlb_fill_incr_ctr(cpu, access_type); if (riscv_cpu_virt_enabled(env) || ((riscv_cpu_two_stage_lookup(mmu_idx) || two_stage_lookup) && access_type != MMU_INST_FETCH)) { @@ -1311,7 +1314,6 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, } } } else { - pmu_tlb_fill_incr_ctr(cpu, access_type); /* Single stage lookup */ ret = get_physical_address(env, &pa, &prot, address, NULL, access_type, mmu_idx, true, false, false); diff --git a/target/riscv/csr.c b/target/riscv/csr.c index 5c9a7ee287..0db2c233e5 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -41,6 +41,42 @@ void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops) } /* Predicates */ +#if !defined(CONFIG_USER_ONLY) +static RISCVException smstateen_acc_ok(CPURISCVState *env, int index, + uint64_t bit) +{ + bool virt = riscv_cpu_virt_enabled(env); + CPUState *cs = env_cpu(env); + RISCVCPU *cpu = RISCV_CPU(cs); + + if (env->priv == PRV_M || !cpu->cfg.ext_smstateen) { + return RISCV_EXCP_NONE; + } + + if (!(env->mstateen[index] & bit)) { + return RISCV_EXCP_ILLEGAL_INST; + } + + if (virt) { + if (!(env->hstateen[index] & bit)) { + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + } + + if (env->priv == PRV_U && !(env->sstateen[index] & bit)) { + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + } + } + + if (env->priv == PRV_U && riscv_has_ext(env, RVS)) { + if (!(env->sstateen[index] & bit)) { + return RISCV_EXCP_ILLEGAL_INST; + } + } + + return RISCV_EXCP_NONE; +} +#endif + static RISCVException fs(CPURISCVState *env, int csrno) { #if !defined(CONFIG_USER_ONLY) @@ -283,6 +319,72 @@ static RISCVException umode32(CPURISCVState *env, int csrno) return umode(env, csrno); } +static RISCVException mstateen(CPURISCVState *env, int csrno) +{ + CPUState *cs = env_cpu(env); + RISCVCPU *cpu = RISCV_CPU(cs); + + if (!cpu->cfg.ext_smstateen) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return any(env, csrno); +} + +static RISCVException hstateen_pred(CPURISCVState *env, int csrno, int base) +{ + CPUState *cs = env_cpu(env); + RISCVCPU *cpu = RISCV_CPU(cs); + + if (!cpu->cfg.ext_smstateen) { + return RISCV_EXCP_ILLEGAL_INST; + } + + if (env->priv < PRV_M) { + if (!(env->mstateen[csrno - base] & SMSTATEEN_STATEEN)) { + return RISCV_EXCP_ILLEGAL_INST; + } + } + + return hmode(env, csrno); +} + +static RISCVException hstateen(CPURISCVState *env, int csrno) +{ + return hstateen_pred(env, csrno, CSR_HSTATEEN0); +} + +static RISCVException hstateenh(CPURISCVState *env, int csrno) +{ + return hstateen_pred(env, csrno, CSR_HSTATEEN0H); +} + +static RISCVException sstateen(CPURISCVState *env, int csrno) +{ + bool virt = riscv_cpu_virt_enabled(env); + int index = csrno - CSR_SSTATEEN0; + CPUState *cs = env_cpu(env); + RISCVCPU *cpu = RISCV_CPU(cs); + + if (!cpu->cfg.ext_smstateen) { + return RISCV_EXCP_ILLEGAL_INST; + } + + if (env->priv < PRV_M) { + if (!(env->mstateen[index] & SMSTATEEN_STATEEN)) { + return RISCV_EXCP_ILLEGAL_INST; + } + + if (virt) { + if (!(env->hstateen[index] & SMSTATEEN_STATEEN)) { + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + } + } + } + + return smode(env, csrno); +} + /* Checks if PointerMasking registers could be accessed */ static RISCVException pointer_masking(CPURISCVState *env, int csrno) { @@ -838,7 +940,7 @@ static RISCVException sstc(CPURISCVState *env, int csrno) } if (riscv_cpu_virt_enabled(env)) { - if (!(get_field(env->hcounteren, COUNTEREN_TM) & + if (!(get_field(env->hcounteren, COUNTEREN_TM) && get_field(env->henvcfg, HENVCFG_STCE))) { return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; } @@ -1808,6 +1910,13 @@ static RISCVException write_menvcfgh(CPURISCVState *env, int csrno, static RISCVException read_senvcfg(CPURISCVState *env, int csrno, target_ulong *val) { + RISCVException ret; + + ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG); + if (ret != RISCV_EXCP_NONE) { + return ret; + } + *val = env->senvcfg; return RISCV_EXCP_NONE; } @@ -1816,15 +1925,27 @@ static RISCVException write_senvcfg(CPURISCVState *env, int csrno, target_ulong val) { uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE; + RISCVException ret; - env->senvcfg = (env->senvcfg & ~mask) | (val & mask); + ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG); + if (ret != RISCV_EXCP_NONE) { + return ret; + } + env->senvcfg = (env->senvcfg & ~mask) | (val & mask); return RISCV_EXCP_NONE; } static RISCVException read_henvcfg(CPURISCVState *env, int csrno, target_ulong *val) { + RISCVException ret; + + ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG); + if (ret != RISCV_EXCP_NONE) { + return ret; + } + *val = env->henvcfg; return RISCV_EXCP_NONE; } @@ -1833,6 +1954,12 @@ static RISCVException write_henvcfg(CPURISCVState *env, int csrno, target_ulong val) { uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE; + RISCVException ret; + + ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG); + if (ret != RISCV_EXCP_NONE) { + return ret; + } if (riscv_cpu_mxl(env) == MXL_RV64) { mask |= HENVCFG_PBMTE | HENVCFG_STCE; @@ -1846,6 +1973,13 @@ static RISCVException write_henvcfg(CPURISCVState *env, int csrno, static RISCVException read_henvcfgh(CPURISCVState *env, int csrno, target_ulong *val) { + RISCVException ret; + + ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG); + if (ret != RISCV_EXCP_NONE) { + return ret; + } + *val = env->henvcfg >> 32; return RISCV_EXCP_NONE; } @@ -1855,12 +1989,208 @@ static RISCVException write_henvcfgh(CPURISCVState *env, int csrno, { uint64_t mask = HENVCFG_PBMTE | HENVCFG_STCE; uint64_t valh = (uint64_t)val << 32; + RISCVException ret; + + ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG); + if (ret != RISCV_EXCP_NONE) { + return ret; + } env->henvcfg = (env->henvcfg & ~mask) | (valh & mask); + return RISCV_EXCP_NONE; +} + +static RISCVException read_mstateen(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->mstateen[csrno - CSR_MSTATEEN0]; + + return RISCV_EXCP_NONE; +} + +static RISCVException write_mstateen(CPURISCVState *env, int csrno, + uint64_t wr_mask, target_ulong new_val) +{ + uint64_t *reg; + + reg = &env->mstateen[csrno - CSR_MSTATEEN0]; + *reg = (*reg & ~wr_mask) | (new_val & wr_mask); + + return RISCV_EXCP_NONE; +} + +static RISCVException write_mstateen0(CPURISCVState *env, int csrno, + target_ulong new_val) +{ + uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; + + return write_mstateen(env, csrno, wr_mask, new_val); +} + +static RISCVException write_mstateen_1_3(CPURISCVState *env, int csrno, + target_ulong new_val) +{ + return write_mstateen(env, csrno, SMSTATEEN_STATEEN, new_val); +} + +static RISCVException read_mstateenh(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->mstateen[csrno - CSR_MSTATEEN0H] >> 32; + + return RISCV_EXCP_NONE; +} + +static RISCVException write_mstateenh(CPURISCVState *env, int csrno, + uint64_t wr_mask, target_ulong new_val) +{ + uint64_t *reg, val; + + reg = &env->mstateen[csrno - CSR_MSTATEEN0H]; + val = (uint64_t)new_val << 32; + val |= *reg & 0xFFFFFFFF; + *reg = (*reg & ~wr_mask) | (val & wr_mask); + + return RISCV_EXCP_NONE; +} + +static RISCVException write_mstateen0h(CPURISCVState *env, int csrno, + target_ulong new_val) +{ + uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; + + return write_mstateenh(env, csrno, wr_mask, new_val); +} + +static RISCVException write_mstateenh_1_3(CPURISCVState *env, int csrno, + target_ulong new_val) +{ + return write_mstateenh(env, csrno, SMSTATEEN_STATEEN, new_val); +} + +static RISCVException read_hstateen(CPURISCVState *env, int csrno, + target_ulong *val) +{ + int index = csrno - CSR_HSTATEEN0; + + *val = env->hstateen[index] & env->mstateen[index]; + + return RISCV_EXCP_NONE; +} + +static RISCVException write_hstateen(CPURISCVState *env, int csrno, + uint64_t mask, target_ulong new_val) +{ + int index = csrno - CSR_HSTATEEN0; + uint64_t *reg, wr_mask; + + reg = &env->hstateen[index]; + wr_mask = env->mstateen[index] & mask; + *reg = (*reg & ~wr_mask) | (new_val & wr_mask); + + return RISCV_EXCP_NONE; +} + +static RISCVException write_hstateen0(CPURISCVState *env, int csrno, + target_ulong new_val) +{ + uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; + + return write_hstateen(env, csrno, wr_mask, new_val); +} + +static RISCVException write_hstateen_1_3(CPURISCVState *env, int csrno, + target_ulong new_val) +{ + return write_hstateen(env, csrno, SMSTATEEN_STATEEN, new_val); +} + +static RISCVException read_hstateenh(CPURISCVState *env, int csrno, + target_ulong *val) +{ + int index = csrno - CSR_HSTATEEN0H; + + *val = (env->hstateen[index] >> 32) & (env->mstateen[index] >> 32); return RISCV_EXCP_NONE; } +static RISCVException write_hstateenh(CPURISCVState *env, int csrno, + uint64_t mask, target_ulong new_val) +{ + int index = csrno - CSR_HSTATEEN0H; + uint64_t *reg, wr_mask, val; + + reg = &env->hstateen[index]; + val = (uint64_t)new_val << 32; + val |= *reg & 0xFFFFFFFF; + wr_mask = env->mstateen[index] & mask; + *reg = (*reg & ~wr_mask) | (val & wr_mask); + + return RISCV_EXCP_NONE; +} + +static RISCVException write_hstateen0h(CPURISCVState *env, int csrno, + target_ulong new_val) +{ + uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; + + return write_hstateenh(env, csrno, wr_mask, new_val); +} + +static RISCVException write_hstateenh_1_3(CPURISCVState *env, int csrno, + target_ulong new_val) +{ + return write_hstateenh(env, csrno, SMSTATEEN_STATEEN, new_val); +} + +static RISCVException read_sstateen(CPURISCVState *env, int csrno, + target_ulong *val) +{ + bool virt = riscv_cpu_virt_enabled(env); + int index = csrno - CSR_SSTATEEN0; + + *val = env->sstateen[index] & env->mstateen[index]; + if (virt) { + *val &= env->hstateen[index]; + } + + return RISCV_EXCP_NONE; +} + +static RISCVException write_sstateen(CPURISCVState *env, int csrno, + uint64_t mask, target_ulong new_val) +{ + bool virt = riscv_cpu_virt_enabled(env); + int index = csrno - CSR_SSTATEEN0; + uint64_t wr_mask; + uint64_t *reg; + + wr_mask = env->mstateen[index] & mask; + if (virt) { + wr_mask &= env->hstateen[index]; + } + + reg = &env->sstateen[index]; + *reg = (*reg & ~wr_mask) | (new_val & wr_mask); + + return RISCV_EXCP_NONE; +} + +static RISCVException write_sstateen0(CPURISCVState *env, int csrno, + target_ulong new_val) +{ + uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; + + return write_sstateen(env, csrno, wr_mask, new_val); +} + +static RISCVException write_sstateen_1_3(CPURISCVState *env, int csrno, + target_ulong new_val) +{ + return write_sstateen(env, csrno, SMSTATEEN_STATEEN, new_val); +} + static RISCVException rmw_mip64(CPURISCVState *env, int csrno, uint64_t *ret_val, uint64_t new_val, uint64_t wr_mask) @@ -3744,6 +4074,65 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_HENVCFGH] = { "henvcfgh", hmode32, read_henvcfgh, write_henvcfgh, .min_priv_ver = PRIV_VERSION_1_12_0 }, + /* Smstateen extension CSRs */ + [CSR_MSTATEEN0] = { "mstateen0", mstateen, read_mstateen, write_mstateen0, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MSTATEEN0H] = { "mstateen0h", mstateen, read_mstateenh, + write_mstateen0h, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MSTATEEN1] = { "mstateen1", mstateen, read_mstateen, + write_mstateen_1_3, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MSTATEEN1H] = { "mstateen1h", mstateen, read_mstateenh, + write_mstateenh_1_3, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MSTATEEN2] = { "mstateen2", mstateen, read_mstateen, + write_mstateen_1_3, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MSTATEEN2H] = { "mstateen2h", mstateen, read_mstateenh, + write_mstateenh_1_3, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MSTATEEN3] = { "mstateen3", mstateen, read_mstateen, + write_mstateen_1_3, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MSTATEEN3H] = { "mstateen3h", mstateen, read_mstateenh, + write_mstateenh_1_3, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_HSTATEEN0] = { "hstateen0", hstateen, read_hstateen, write_hstateen0, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_HSTATEEN0H] = { "hstateen0h", hstateenh, read_hstateenh, + write_hstateen0h, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_HSTATEEN1] = { "hstateen1", hstateen, read_hstateen, + write_hstateen_1_3, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_HSTATEEN1H] = { "hstateen1h", hstateenh, read_hstateenh, + write_hstateenh_1_3, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_HSTATEEN2] = { "hstateen2", hstateen, read_hstateen, + write_hstateen_1_3, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_HSTATEEN2H] = { "hstateen2h", hstateenh, read_hstateenh, + write_hstateenh_1_3, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_HSTATEEN3] = { "hstateen3", hstateen, read_hstateen, + write_hstateen_1_3, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_HSTATEEN3H] = { "hstateen3h", hstateenh, read_hstateenh, + write_hstateenh_1_3, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_SSTATEEN0] = { "sstateen0", sstateen, read_sstateen, write_sstateen0, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_SSTATEEN1] = { "sstateen1", sstateen, read_sstateen, + write_sstateen_1_3, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_SSTATEEN2] = { "sstateen2", sstateen, read_sstateen, + write_sstateen_1_3, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_SSTATEEN3] = { "sstateen3", sstateen, read_sstateen, + write_sstateen_1_3, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + /* Supervisor Trap Setup */ [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus, NULL, read_sstatus_i128 }, diff --git a/target/riscv/debug.c b/target/riscv/debug.c index e44848d0d7..bf4840a6a3 100644 --- a/target/riscv/debug.c +++ b/target/riscv/debug.c @@ -29,6 +29,8 @@ #include "cpu.h" #include "trace.h" #include "exec/exec-all.h" +#include "exec/helper-proto.h" +#include "sysemu/cpu-timers.h" /* * The following M-mode trigger CSRs are implemented: @@ -496,10 +498,209 @@ static void type6_reg_write(CPURISCVState *env, target_ulong index, return; } +/* icount trigger type */ +static inline int +itrigger_get_count(CPURISCVState *env, int index) +{ + return get_field(env->tdata1[index], ITRIGGER_COUNT); +} + +static inline void +itrigger_set_count(CPURISCVState *env, int index, int value) +{ + env->tdata1[index] = set_field(env->tdata1[index], + ITRIGGER_COUNT, value); +} + +static bool check_itrigger_priv(CPURISCVState *env, int index) +{ + target_ulong tdata1 = env->tdata1[index]; + if (riscv_cpu_virt_enabled(env)) { + /* check VU/VS bit against current privilege level */ + return (get_field(tdata1, ITRIGGER_VS) == env->priv) || + (get_field(tdata1, ITRIGGER_VU) == env->priv); + } else { + /* check U/S/M bit against current privilege level */ + return (get_field(tdata1, ITRIGGER_M) == env->priv) || + (get_field(tdata1, ITRIGGER_S) == env->priv) || + (get_field(tdata1, ITRIGGER_U) == env->priv); + } +} + +bool riscv_itrigger_enabled(CPURISCVState *env) +{ + int count; + for (int i = 0; i < RV_MAX_TRIGGERS; i++) { + if (get_trigger_type(env, i) != TRIGGER_TYPE_INST_CNT) { + continue; + } + if (check_itrigger_priv(env, i)) { + continue; + } + count = itrigger_get_count(env, i); + if (!count) { + continue; + } + return true; + } + + return false; +} + +void helper_itrigger_match(CPURISCVState *env) +{ + int count; + for (int i = 0; i < RV_MAX_TRIGGERS; i++) { + if (get_trigger_type(env, i) != TRIGGER_TYPE_INST_CNT) { + continue; + } + if (check_itrigger_priv(env, i)) { + continue; + } + count = itrigger_get_count(env, i); + if (!count) { + continue; + } + itrigger_set_count(env, i, count--); + if (!count) { + env->itrigger_enabled = riscv_itrigger_enabled(env); + do_trigger_action(env, i); + } + } +} + +static void riscv_itrigger_update_count(CPURISCVState *env) +{ + int count, executed; + /* + * Record last icount, so that we can evaluate the executed instructions + * since last priviledge mode change or timer expire. + */ + int64_t last_icount = env->last_icount, current_icount; + current_icount = env->last_icount = icount_get_raw(); + + for (int i = 0; i < RV_MAX_TRIGGERS; i++) { + if (get_trigger_type(env, i) != TRIGGER_TYPE_INST_CNT) { + continue; + } + count = itrigger_get_count(env, i); + if (!count) { + continue; + } + /* + * Only when priviledge is changed or itrigger timer expires, + * the count field in itrigger tdata1 register is updated. + * And the count field in itrigger only contains remaining value. + */ + if (check_itrigger_priv(env, i)) { + /* + * If itrigger enabled in this priviledge mode, the number of + * executed instructions since last priviledge change + * should be reduced from current itrigger count. + */ + executed = current_icount - last_icount; + itrigger_set_count(env, i, count - executed); + if (count == executed) { + do_trigger_action(env, i); + } + } else { + /* + * If itrigger is not enabled in this priviledge mode, + * the number of executed instructions will be discard and + * the count field in itrigger will not change. + */ + timer_mod(env->itrigger_timer[i], + current_icount + count); + } + } +} + +static void riscv_itrigger_timer_cb(void *opaque) +{ + riscv_itrigger_update_count((CPURISCVState *)opaque); +} + +void riscv_itrigger_update_priv(CPURISCVState *env) +{ + riscv_itrigger_update_count(env); +} + +static target_ulong itrigger_validate(CPURISCVState *env, + target_ulong ctrl) +{ + target_ulong val; + + /* validate the generic part first */ + val = tdata1_validate(env, ctrl, TRIGGER_TYPE_INST_CNT); + + /* validate unimplemented (always zero) bits */ + warn_always_zero_bit(ctrl, ITRIGGER_ACTION, "action"); + warn_always_zero_bit(ctrl, ITRIGGER_HIT, "hit"); + warn_always_zero_bit(ctrl, ITRIGGER_PENDING, "pending"); + + /* keep the mode and attribute bits */ + val |= ctrl & (ITRIGGER_VU | ITRIGGER_VS | ITRIGGER_U | ITRIGGER_S | + ITRIGGER_M | ITRIGGER_COUNT); + + return val; +} + +static void itrigger_reg_write(CPURISCVState *env, target_ulong index, + int tdata_index, target_ulong val) +{ + target_ulong new_val; + + switch (tdata_index) { + case TDATA1: + /* set timer for icount */ + new_val = itrigger_validate(env, val); + if (new_val != env->tdata1[index]) { + env->tdata1[index] = new_val; + if (icount_enabled()) { + env->last_icount = icount_get_raw(); + /* set the count to timer */ + timer_mod(env->itrigger_timer[index], + env->last_icount + itrigger_get_count(env, index)); + } else { + env->itrigger_enabled = riscv_itrigger_enabled(env); + } + } + break; + case TDATA2: + qemu_log_mask(LOG_UNIMP, + "tdata2 is not supported for icount trigger\n"); + break; + case TDATA3: + qemu_log_mask(LOG_UNIMP, + "tdata3 is not supported for icount trigger\n"); + break; + default: + g_assert_not_reached(); + } + + return; +} + +static int itrigger_get_adjust_count(CPURISCVState *env) +{ + int count = itrigger_get_count(env, env->trigger_cur), executed; + if ((count != 0) && check_itrigger_priv(env, env->trigger_cur)) { + executed = icount_get_raw() - env->last_icount; + count += executed; + } + return count; +} + target_ulong tdata_csr_read(CPURISCVState *env, int tdata_index) { + int trigger_type; switch (tdata_index) { case TDATA1: + trigger_type = extract_trigger_type(env, env->tdata1[env->trigger_cur]); + if ((trigger_type == TRIGGER_TYPE_INST_CNT) && icount_enabled()) { + return deposit64(env->tdata1[env->trigger_cur], 10, 14, + itrigger_get_adjust_count(env)); + } return env->tdata1[env->trigger_cur]; case TDATA2: return env->tdata2[env->trigger_cur]; @@ -528,6 +729,8 @@ void tdata_csr_write(CPURISCVState *env, int tdata_index, target_ulong val) type6_reg_write(env, env->trigger_cur, tdata_index, val); break; case TRIGGER_TYPE_INST_CNT: + itrigger_reg_write(env, env->trigger_cur, tdata_index, val); + break; case TRIGGER_TYPE_INT: case TRIGGER_TYPE_EXCP: case TRIGGER_TYPE_EXT_SRC: @@ -725,5 +928,7 @@ void riscv_trigger_init(CPURISCVState *env) env->tdata3[i] = 0; env->cpu_breakpoint[i] = NULL; env->cpu_watchpoint[i] = NULL; + env->itrigger_timer[i] = timer_new_ns(QEMU_CLOCK_VIRTUAL, + riscv_itrigger_timer_cb, env); } } diff --git a/target/riscv/debug.h b/target/riscv/debug.h index a1226b4d29..c471748d5a 100644 --- a/target/riscv/debug.h +++ b/target/riscv/debug.h @@ -118,6 +118,17 @@ enum { SIZE_NUM = 16 }; +/* itrigger filed masks */ +#define ITRIGGER_ACTION 0x3f +#define ITRIGGER_U BIT(6) +#define ITRIGGER_S BIT(7) +#define ITRIGGER_PENDING BIT(8) +#define ITRIGGER_M BIT(9) +#define ITRIGGER_COUNT (0x3fff << 10) +#define ITRIGGER_HIT BIT(24) +#define ITRIGGER_VU BIT(25) +#define ITRIGGER_VS BIT(26) + bool tdata_available(CPURISCVState *env, int tdata_index); target_ulong tselect_csr_read(CPURISCVState *env); @@ -134,4 +145,6 @@ bool riscv_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp); void riscv_trigger_init(CPURISCVState *env); +bool riscv_itrigger_enabled(CPURISCVState *env); +void riscv_itrigger_update_priv(CPURISCVState *env); #endif /* RISCV_DEBUG_H */ diff --git a/target/riscv/helper.h b/target/riscv/helper.h index a03014fe67..227c7122ef 100644 --- a/target/riscv/helper.h +++ b/target/riscv/helper.h @@ -109,6 +109,8 @@ DEF_HELPER_1(sret, tl, env) DEF_HELPER_1(mret, tl, env) DEF_HELPER_1(wfi, void, env) DEF_HELPER_1(tlb_flush, void, env) +/* Native Debug */ +DEF_HELPER_1(itrigger_match, void, env) #endif /* Hypervisor functions */ diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode index d0253b8104..b7e7613ea2 100644 --- a/target/riscv/insn32.decode +++ b/target/riscv/insn32.decode @@ -718,6 +718,10 @@ vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm11 vsetivli 11 .......... ..... 111 ..... 1010111 @r2_zimm10 vsetvl 1000000 ..... ..... 111 ..... 1010111 @r +# *** Zawrs Standard Extension *** +wrs_nto 000000001101 00000 000 00000 1110011 +wrs_sto 000000011101 00000 000 00000 1110011 + # *** RV32 Zba Standard Extension *** sh1add 0010000 .......... 010 ..... 0110011 @r sh2add 0010000 .......... 100 ..... 0110011 @r diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc index 3281408a87..59501b2780 100644 --- a/target/riscv/insn_trans/trans_privileged.c.inc +++ b/target/riscv/insn_trans/trans_privileged.c.inc @@ -78,7 +78,7 @@ static bool trans_sret(DisasContext *ctx, arg_sret *a) if (has_ext(ctx, RVS)) { decode_save_opc(ctx); gen_helper_sret(cpu_pc, cpu_env); - tcg_gen_exit_tb(NULL, 0); /* no chaining */ + exit_tb(ctx); /* no chaining */ ctx->base.is_jmp = DISAS_NORETURN; } else { return false; @@ -94,7 +94,7 @@ static bool trans_mret(DisasContext *ctx, arg_mret *a) #ifndef CONFIG_USER_ONLY decode_save_opc(ctx); gen_helper_mret(cpu_pc, cpu_env); - tcg_gen_exit_tb(NULL, 0); /* no chaining */ + exit_tb(ctx); /* no chaining */ ctx->base.is_jmp = DISAS_NORETURN; return true; #else diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc index c49dbec0eb..5c69b88d1e 100644 --- a/target/riscv/insn_trans/trans_rvi.c.inc +++ b/target/riscv/insn_trans/trans_rvi.c.inc @@ -66,7 +66,7 @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a) } gen_set_gpri(ctx, a->rd, ctx->pc_succ_insn); - tcg_gen_lookup_and_goto_ptr(); + lookup_and_goto_ptr(ctx); if (misaligned) { gen_set_label(misaligned); @@ -803,7 +803,7 @@ static bool trans_pause(DisasContext *ctx, arg_pause *a) * end the TB and return to main loop */ gen_set_pc_imm(ctx, ctx->pc_succ_insn); - tcg_gen_exit_tb(NULL, 0); + exit_tb(ctx); ctx->base.is_jmp = DISAS_NORETURN; return true; @@ -827,7 +827,7 @@ static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a) * however we need to end the translation block */ gen_set_pc_imm(ctx, ctx->pc_succ_insn); - tcg_gen_exit_tb(NULL, 0); + exit_tb(ctx); ctx->base.is_jmp = DISAS_NORETURN; return true; } @@ -838,7 +838,7 @@ static bool do_csr_post(DisasContext *ctx) decode_save_opc(ctx); /* We may have changed important cpu state -- exit to main loop. */ gen_set_pc_imm(ctx, ctx->pc_succ_insn); - tcg_gen_exit_tb(NULL, 0); + exit_tb(ctx); ctx->base.is_jmp = DISAS_NORETURN; return true; } diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc index 4dea4413ae..d455acedbf 100644 --- a/target/riscv/insn_trans/trans_rvv.c.inc +++ b/target/riscv/insn_trans/trans_rvv.c.inc @@ -196,7 +196,7 @@ static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2) mark_vs_dirty(s); gen_set_pc_imm(s, s->pc_succ_insn); - tcg_gen_lookup_and_goto_ptr(); + lookup_and_goto_ptr(s); s->base.is_jmp = DISAS_NORETURN; if (rd == 0 && rs1 == 0) { @@ -222,7 +222,7 @@ static bool do_vsetivli(DisasContext *s, int rd, TCGv s1, TCGv s2) gen_set_gpr(s, rd, dst); mark_vs_dirty(s); gen_set_pc_imm(s, s->pc_succ_insn); - tcg_gen_lookup_and_goto_ptr(); + lookup_and_goto_ptr(s); s->base.is_jmp = DISAS_NORETURN; return true; diff --git a/target/riscv/insn_trans/trans_rvzawrs.c.inc b/target/riscv/insn_trans/trans_rvzawrs.c.inc new file mode 100644 index 0000000000..8254e7dfe2 --- /dev/null +++ b/target/riscv/insn_trans/trans_rvzawrs.c.inc @@ -0,0 +1,51 @@ +/* + * RISC-V translation routines for the RISC-V Zawrs Extension. + * + * Copyright (c) 2022 Christoph Muellner, christoph.muellner@vrull.io + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +static bool trans_wrs(DisasContext *ctx) +{ + if (!ctx->cfg_ptr->ext_zawrs) { + return false; + } + + /* + * The specification says: + * While stalled, an implementation is permitted to occasionally + * terminate the stall and complete execution for any reason. + * + * So let's just exit TB and return to the main loop. + */ + + /* Clear the load reservation (if any). */ + tcg_gen_movi_tl(load_res, -1); + + gen_set_pc_imm(ctx, ctx->pc_succ_insn); + tcg_gen_exit_tb(NULL, 0); + ctx->base.is_jmp = DISAS_NORETURN; + + return true; +} + +#define GEN_TRANS_WRS(insn) \ +static bool trans_ ## insn(DisasContext *ctx, arg_ ## insn *a) \ +{ \ + (void)a; \ + return trans_wrs(ctx); \ +} + +GEN_TRANS_WRS(wrs_nto) +GEN_TRANS_WRS(wrs_sto) diff --git a/target/riscv/machine.c b/target/riscv/machine.c index c2a94a82b3..65a8549ec2 100644 --- a/target/riscv/machine.c +++ b/target/riscv/machine.c @@ -21,6 +21,8 @@ #include "qemu/error-report.h" #include "sysemu/kvm.h" #include "migration/cpu.h" +#include "sysemu/cpu-timers.h" +#include "debug.h" static bool pmp_needed(void *opaque) { @@ -229,11 +231,24 @@ static bool debug_needed(void *opaque) return riscv_feature(env, RISCV_FEATURE_DEBUG); } +static int debug_post_load(void *opaque, int version_id) +{ + RISCVCPU *cpu = opaque; + CPURISCVState *env = &cpu->env; + + if (icount_enabled()) { + env->itrigger_enabled = riscv_itrigger_enabled(env); + } + + return 0; +} + static const VMStateDescription vmstate_debug = { .name = "cpu/debug", .version_id = 2, .minimum_version_id = 2, .needed = debug_needed, + .post_load = debug_post_load, .fields = (VMStateField[]) { VMSTATE_UINTTL(env.trigger_cur, RISCVCPU), VMSTATE_UINTTL_ARRAY(env.tdata1, RISCVCPU, RV_MAX_TRIGGERS), @@ -253,6 +268,26 @@ static int riscv_cpu_post_load(void *opaque, int version_id) return 0; } +static bool smstateen_needed(void *opaque) +{ + RISCVCPU *cpu = opaque; + + return cpu->cfg.ext_smstateen; +} + +static const VMStateDescription vmstate_smstateen = { + .name = "cpu/smtateen", + .version_id = 1, + .minimum_version_id = 1, + .needed = smstateen_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64_ARRAY(env.mstateen, RISCVCPU, 4), + VMSTATE_UINT64_ARRAY(env.hstateen, RISCVCPU, 4), + VMSTATE_UINT64_ARRAY(env.sstateen, RISCVCPU, 4), + VMSTATE_END_OF_LIST() + } +}; + static bool envcfg_needed(void *opaque) { RISCVCPU *cpu = opaque; @@ -364,6 +399,7 @@ const VMStateDescription vmstate_riscv_cpu = { &vmstate_kvmtimer, &vmstate_envcfg, &vmstate_debug, + &vmstate_smstateen, NULL } }; diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c index 09f1f5185d..878bcb03b8 100644 --- a/target/riscv/op_helper.c +++ b/target/riscv/op_helper.c @@ -149,21 +149,24 @@ target_ulong helper_sret(CPURISCVState *env) } mstatus = env->mstatus; + prev_priv = get_field(mstatus, MSTATUS_SPP); + mstatus = set_field(mstatus, MSTATUS_SIE, + get_field(mstatus, MSTATUS_SPIE)); + mstatus = set_field(mstatus, MSTATUS_SPIE, 1); + mstatus = set_field(mstatus, MSTATUS_SPP, PRV_U); + if (env->priv_ver >= PRIV_VERSION_1_12_0) { + mstatus = set_field(mstatus, MSTATUS_MPRV, 0); + } + env->mstatus = mstatus; if (riscv_has_ext(env, RVH) && !riscv_cpu_virt_enabled(env)) { /* We support Hypervisor extensions and virtulisation is disabled */ target_ulong hstatus = env->hstatus; - prev_priv = get_field(mstatus, MSTATUS_SPP); prev_virt = get_field(hstatus, HSTATUS_SPV); hstatus = set_field(hstatus, HSTATUS_SPV, 0); - mstatus = set_field(mstatus, MSTATUS_SPP, 0); - mstatus = set_field(mstatus, SSTATUS_SIE, - get_field(mstatus, SSTATUS_SPIE)); - mstatus = set_field(mstatus, SSTATUS_SPIE, 1); - env->mstatus = mstatus; env->hstatus = hstatus; if (prev_virt) { @@ -171,14 +174,6 @@ target_ulong helper_sret(CPURISCVState *env) } riscv_cpu_set_virt_enabled(env, prev_virt); - } else { - prev_priv = get_field(mstatus, MSTATUS_SPP); - - mstatus = set_field(mstatus, MSTATUS_SIE, - get_field(mstatus, MSTATUS_SPIE)); - mstatus = set_field(mstatus, MSTATUS_SPIE, 1); - mstatus = set_field(mstatus, MSTATUS_SPP, PRV_U); - env->mstatus = mstatus; } riscv_cpu_set_mode(env, prev_priv); @@ -202,7 +197,7 @@ target_ulong helper_mret(CPURISCVState *env) if (riscv_feature(env, RISCV_FEATURE_PMP) && !pmp_get_num_rules(env) && (prev_priv != PRV_M)) { - riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + riscv_raise_exception(env, RISCV_EXCP_INST_ACCESS_FAULT, GETPC()); } target_ulong prev_virt = get_field(env->mstatus, MSTATUS_MPV); @@ -211,6 +206,9 @@ target_ulong helper_mret(CPURISCVState *env) mstatus = set_field(mstatus, MSTATUS_MPIE, 1); mstatus = set_field(mstatus, MSTATUS_MPP, PRV_U); mstatus = set_field(mstatus, MSTATUS_MPV, 0); + if ((env->priv_ver >= PRIV_VERSION_1_12_0) && (prev_priv != PRV_M)) { + mstatus = set_field(mstatus, MSTATUS_MPRV, 0); + } env->mstatus = mstatus; riscv_cpu_set_mode(env, prev_priv); diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c index 2b43e399b8..d1126a6066 100644 --- a/target/riscv/pmp.c +++ b/target/riscv/pmp.c @@ -292,8 +292,11 @@ static bool pmp_hart_has_privs_default(CPURISCVState *env, target_ulong addr, /* * Check if the address has required RWX privs to complete desired operation + * Return PMP rule index if a pmp rule match + * Return MAX_RISCV_PMPS if default match + * Return negtive value if no match */ -bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, +int pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs, target_ulong mode) { @@ -305,8 +308,10 @@ bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, /* Short cut if no rules */ if (0 == pmp_get_num_rules(env)) { - return pmp_hart_has_privs_default(env, addr, size, privs, - allowed_privs, mode); + if (pmp_hart_has_privs_default(env, addr, size, privs, + allowed_privs, mode)) { + ret = MAX_RISCV_PMPS; + } } if (size == 0) { @@ -333,7 +338,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, if ((s + e) == 1) { qemu_log_mask(LOG_GUEST_ERROR, "pmp violation - access is partially inside\n"); - ret = 0; + ret = -1; break; } @@ -436,18 +441,22 @@ bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, } } - ret = ((privs & *allowed_privs) == privs); + if ((privs & *allowed_privs) == privs) { + ret = i; + } break; } } /* No rule matched */ if (ret == -1) { - return pmp_hart_has_privs_default(env, addr, size, privs, - allowed_privs, mode); + if (pmp_hart_has_privs_default(env, addr, size, privs, + allowed_privs, mode)) { + ret = MAX_RISCV_PMPS; + } } - return ret == 1 ? true : false; + return ret; } /* @@ -586,64 +595,25 @@ target_ulong mseccfg_csr_read(CPURISCVState *env) * Calculate the TLB size if the start address or the end address of * PMP entry is presented in the TLB page. */ -static target_ulong pmp_get_tlb_size(CPURISCVState *env, int pmp_index, - target_ulong tlb_sa, target_ulong tlb_ea) +target_ulong pmp_get_tlb_size(CPURISCVState *env, int pmp_index, + target_ulong tlb_sa, target_ulong tlb_ea) { target_ulong pmp_sa = env->pmp_state.addr[pmp_index].sa; target_ulong pmp_ea = env->pmp_state.addr[pmp_index].ea; - if (pmp_sa >= tlb_sa && pmp_ea <= tlb_ea) { - return pmp_ea - pmp_sa + 1; - } - - if (pmp_sa >= tlb_sa && pmp_sa <= tlb_ea && pmp_ea >= tlb_ea) { - return tlb_ea - pmp_sa + 1; - } - - if (pmp_ea <= tlb_ea && pmp_ea >= tlb_sa && pmp_sa <= tlb_sa) { - return pmp_ea - tlb_sa + 1; - } - - return 0; -} - -/* - * Check is there a PMP entry which range covers this page. If so, - * try to find the minimum granularity for the TLB size. - */ -bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa, - target_ulong *tlb_size) -{ - int i; - target_ulong val; - target_ulong tlb_ea = (tlb_sa + TARGET_PAGE_SIZE - 1); - - for (i = 0; i < MAX_RISCV_PMPS; i++) { - val = pmp_get_tlb_size(env, i, tlb_sa, tlb_ea); - if (val) { - if (*tlb_size == 0 || *tlb_size > val) { - *tlb_size = val; - } - } - } - - if (*tlb_size != 0) { + if (pmp_sa <= tlb_sa && pmp_ea >= tlb_ea) { + return TARGET_PAGE_SIZE; + } else { /* - * At this point we have a tlb_size that is the smallest possible size - * That fits within a TARGET_PAGE_SIZE and the PMP region. - * - * If the size is less then TARGET_PAGE_SIZE we drop the size to 1. - * This means the result isn't cached in the TLB and is only used for - * a single translation. - */ - if (*tlb_size < TARGET_PAGE_SIZE) { - *tlb_size = 1; - } - - return true; + * At this point we have a tlb_size that is the smallest possible size + * That fits within a TARGET_PAGE_SIZE and the PMP region. + * + * If the size is less then TARGET_PAGE_SIZE we drop the size to 1. + * This means the result isn't cached in the TLB and is only used for + * a single translation. + */ + return 1; } - - return false; } /* diff --git a/target/riscv/pmp.h b/target/riscv/pmp.h index a8dd797476..da32c61c85 100644 --- a/target/riscv/pmp.h +++ b/target/riscv/pmp.h @@ -72,11 +72,11 @@ target_ulong mseccfg_csr_read(CPURISCVState *env); void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, target_ulong val); target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index); -bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, +int pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs, target_ulong mode); -bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa, - target_ulong *tlb_size); +target_ulong pmp_get_tlb_size(CPURISCVState *env, int pmp_index, + target_ulong tlb_sa, target_ulong tlb_ea); void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index); void pmp_update_rule_nums(CPURISCVState *env); uint32_t pmp_get_num_rules(CPURISCVState *env); diff --git a/target/riscv/translate.c b/target/riscv/translate.c index db123da5ec..df38db7553 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -76,6 +76,7 @@ typedef struct DisasContext { to reset this known value. */ int frm; RISCVMXL ol; + bool virt_inst_excp; bool virt_enabled; const RISCVCPUConfig *cfg_ptr; bool hlsx; @@ -111,6 +112,8 @@ typedef struct DisasContext { /* PointerMasking extension */ bool pm_mask_enabled; bool pm_base_enabled; + /* Use icount trigger for native debug */ + bool itrigger; /* TCG of the current insn_start */ TCGOp *insn_start; } DisasContext; @@ -243,7 +246,11 @@ static void gen_exception_illegal(DisasContext *ctx) { tcg_gen_st_i32(tcg_constant_i32(ctx->opcode), cpu_env, offsetof(CPURISCVState, bins)); - generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST); + if (ctx->virt_inst_excp) { + generate_exception(ctx, RISCV_EXCP_VIRT_INSTRUCTION_FAULT); + } else { + generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST); + } } static void gen_exception_inst_addr_mis(DisasContext *ctx) @@ -252,15 +259,39 @@ static void gen_exception_inst_addr_mis(DisasContext *ctx) generate_exception(ctx, RISCV_EXCP_INST_ADDR_MIS); } +static void lookup_and_goto_ptr(DisasContext *ctx) +{ +#ifndef CONFIG_USER_ONLY + if (ctx->itrigger) { + gen_helper_itrigger_match(cpu_env); + } +#endif + tcg_gen_lookup_and_goto_ptr(); +} + +static void exit_tb(DisasContext *ctx) +{ +#ifndef CONFIG_USER_ONLY + if (ctx->itrigger) { + gen_helper_itrigger_match(cpu_env); + } +#endif + tcg_gen_exit_tb(NULL, 0); +} + static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) { - if (translator_use_goto_tb(&ctx->base, dest)) { + /* + * Under itrigger, instruction executes one by one like singlestep, + * direct block chain benefits will be small. + */ + if (translator_use_goto_tb(&ctx->base, dest) && !ctx->itrigger) { tcg_gen_goto_tb(n); gen_set_pc_imm(ctx, dest); tcg_gen_exit_tb(ctx->base.tb, n); } else { gen_set_pc_imm(ctx, dest); - tcg_gen_lookup_and_goto_ptr(); + lookup_and_goto_ptr(ctx); } } @@ -1029,6 +1060,7 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc) #include "insn_trans/trans_rvh.c.inc" #include "insn_trans/trans_rvv.c.inc" #include "insn_trans/trans_rvb.c.inc" +#include "insn_trans/trans_rvzawrs.c.inc" #include "insn_trans/trans_rvzfh.c.inc" #include "insn_trans/trans_rvk.c.inc" #include "insn_trans/trans_privileged.c.inc" @@ -1062,16 +1094,13 @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode) { has_XVentanaCondOps_p, decode_XVentanaCodeOps }, }; + ctx->virt_inst_excp = false; /* Check for compressed insn */ if (insn_len(opcode) == 2) { - if (!has_ext(ctx, RVC)) { - gen_exception_illegal(ctx); - } else { - ctx->opcode = opcode; - ctx->pc_succ_insn = ctx->base.pc_next + 2; - if (decode_insn16(ctx, opcode)) { - return; - } + ctx->opcode = opcode; + ctx->pc_succ_insn = ctx->base.pc_next + 2; + if (has_ext(ctx, RVC) && decode_insn16(ctx, opcode)) { + return; } } else { uint32_t opcode32 = opcode; @@ -1136,6 +1165,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) memset(ctx->ftemp, 0, sizeof(ctx->ftemp)); ctx->pm_mask_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_MASK_ENABLED); ctx->pm_base_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_BASE_ENABLED); + ctx->itrigger = FIELD_EX32(tb_flags, TB_FLAGS, ITRIGGER); ctx->zero = tcg_constant_tl(0); } @@ -1175,7 +1205,7 @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) /* Only the first insn within a TB is allowed to cross a page boundary. */ if (ctx->base.is_jmp == DISAS_NEXT) { - if (!is_same_page(&ctx->base, ctx->base.pc_next)) { + if (ctx->itrigger || !is_same_page(&ctx->base, ctx->base.pc_next)) { ctx->base.is_jmp = DISAS_TOO_MANY; } else { unsigned page_ofs = ctx->base.pc_next & ~TARGET_PAGE_MASK; diff --git a/target/sparc/translate.c b/target/sparc/translate.c index 34858eb95f..150aeecd14 100644 --- a/target/sparc/translate.c +++ b/target/sparc/translate.c @@ -163,13 +163,6 @@ static inline void gen_update_fprs_dirty(DisasContext *dc, int rd) /* floating point registers moves */ static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src) { -#if TCG_TARGET_REG_BITS == 32 - if (src & 1) { - return TCGV_LOW(cpu_fpr[src / 2]); - } else { - return TCGV_HIGH(cpu_fpr[src / 2]); - } -#else TCGv_i32 ret = get_temp_i32(dc); if (src & 1) { tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]); @@ -177,22 +170,16 @@ static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src) tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]); } return ret; -#endif } static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v) { -#if TCG_TARGET_REG_BITS == 32 - if (dst & 1) { - tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v); - } else { - tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v); - } -#else - TCGv_i64 t = (TCGv_i64)v; + TCGv_i64 t = tcg_temp_new_i64(); + + tcg_gen_extu_i32_i64(t, v); tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t, (dst & 1 ? 0 : 32), 32); -#endif + tcg_temp_free_i64(t); gen_update_fprs_dirty(dc, dst); } |