diff options
Diffstat (limited to 'target')
37 files changed, 851 insertions, 157 deletions
diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 1320fd8c8f..5b751439bd 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -2390,14 +2390,22 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) #ifndef CONFIG_USER_ONLY /* - * If we do not have tag-memory provided by the machine, - * reduce MTE support to instructions enabled at EL0. + * If we run with TCG and do not have tag-memory provided by + * the machine, then reduce MTE support to instructions enabled at EL0. * This matches Cortex-A710 BROADCASTMTE input being LOW. */ - if (cpu->tag_memory == NULL) { + if (tcg_enabled() && cpu->tag_memory == NULL) { cpu->isar.id_aa64pfr1 = FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 1); } + + /* + * If MTE is supported by the host, however it should not be + * enabled on the guest (i.e mte=off), clear guest's MTE bits." + */ + if (kvm_enabled() && !cpu->kvm_mte) { + FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0); + } #endif } diff --git a/target/arm/cpu.h b/target/arm/cpu.h index f065756c5c..8fc8b6398f 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -922,6 +922,8 @@ struct ArchCPU { /* CPU has memory protection unit */ bool has_mpu; + /* CPU has MTE enabled in KVM mode */ + bool kvm_mte; /* PMSAv7 MPU number of supported regions */ uint32_t pmsav7_dregion; /* PMSAv8 MPU number of supported hyp regions */ diff --git a/target/arm/internals.h b/target/arm/internals.h index 299a96a81a..fd8f7c82aa 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -963,6 +963,7 @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) { switch (mmu_idx) { + case ARMMMUIdx_E10_0: case ARMMMUIdx_E20_0: case ARMMMUIdx_Stage1_E0: case ARMMMUIdx_MUser: @@ -972,10 +973,6 @@ static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) return true; default: return false; - case ARMMMUIdx_E10_0: - case ARMMMUIdx_E10_1: - case ARMMMUIdx_E10_1_PAN: - g_assert_not_reached(); } } diff --git a/target/arm/kvm.c b/target/arm/kvm.c index f1f1b5b375..7b6812c0de 100644 --- a/target/arm/kvm.c +++ b/target/arm/kvm.c @@ -39,8 +39,10 @@ #include "hw/acpi/acpi.h" #include "hw/acpi/ghes.h" #include "target/arm/gtimer.h" +#include "migration/blocker.h" const KVMCapabilityInfo kvm_arch_required_capabilities[] = { + KVM_CAP_INFO(DEVICE_CTRL), KVM_CAP_LAST_INFO }; @@ -119,6 +121,21 @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try, if (vmfd < 0) { goto err; } + + /* + * The MTE capability must be enabled by the VMM before creating + * any VCPUs in order to allow the MTE bits of the ID_AA64PFR1 + * register to be probed correctly, as they are masked if MTE + * is not enabled. + */ + if (kvm_arm_mte_supported()) { + KVMState kvm_state; + + kvm_state.fd = kvmfd; + kvm_state.vmfd = vmfd; + kvm_vm_enable_cap(&kvm_state, KVM_CAP_ARM_MTE, 0); + } + cpufd = ioctl(vmfd, KVM_CREATE_VCPU, 0); if (cpufd < 0) { goto err; @@ -675,19 +692,11 @@ static void kvm_arm_set_device_addr(KVMDevice *kd) { struct kvm_device_attr *attr = &kd->kdattr; int ret; + uint64_t addr = kd->kda.addr; - /* If the device control API is available and we have a device fd on the - * KVMDevice struct, let's use the newer API - */ - if (kd->dev_fd >= 0) { - uint64_t addr = kd->kda.addr; - - addr |= kd->kda_addr_ormask; - attr->addr = (uintptr_t)&addr; - ret = kvm_device_ioctl(kd->dev_fd, KVM_SET_DEVICE_ATTR, attr); - } else { - ret = kvm_vm_ioctl(kvm_state, KVM_ARM_SET_DEVICE_ADDR, &kd->kda); - } + addr |= kd->kda_addr_ormask; + attr->addr = (uintptr_t)&addr; + ret = kvm_device_ioctl(kd->dev_fd, KVM_SET_DEVICE_ATTR, attr); if (ret < 0) { fprintf(stderr, "Failed to set device address: %s\n", @@ -1793,6 +1802,11 @@ bool kvm_arm_sve_supported(void) return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE); } +bool kvm_arm_mte_supported(void) +{ + return kvm_check_extension(kvm_state, KVM_CAP_ARM_MTE); +} + QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1); uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu) @@ -2417,3 +2431,40 @@ int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) } return 0; } + +void kvm_arm_enable_mte(Object *cpuobj, Error **errp) +{ + static bool tried_to_enable; + static bool succeeded_to_enable; + Error *mte_migration_blocker = NULL; + ARMCPU *cpu = ARM_CPU(cpuobj); + int ret; + + if (!tried_to_enable) { + /* + * MTE on KVM is enabled on a per-VM basis (and retrying doesn't make + * sense), and we only want a single migration blocker as well. + */ + tried_to_enable = true; + + ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_MTE, 0); + if (ret) { + error_setg_errno(errp, -ret, "Failed to enable KVM_CAP_ARM_MTE"); + return; + } + + /* TODO: Add migration support with MTE enabled */ + error_setg(&mte_migration_blocker, + "Live migration disabled due to MTE enabled"); + if (migrate_add_blocker(&mte_migration_blocker, errp)) { + error_free(mte_migration_blocker); + return; + } + + succeeded_to_enable = true; + } + + if (succeeded_to_enable) { + cpu->kvm_mte = true; + } +} diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h index cfaa0d9bc7..2e6b49bf13 100644 --- a/target/arm/kvm_arm.h +++ b/target/arm/kvm_arm.h @@ -22,17 +22,15 @@ * @devid: the KVM device ID * @group: device control API group for setting addresses * @attr: device control API address type - * @dev_fd: device control device file descriptor (or -1 if not supported) + * @dev_fd: device control device file descriptor * @addr_ormask: value to be OR'ed with resolved address * - * Remember the memory region @mr, and when it is mapped by the - * machine model, tell the kernel that base address using the - * KVM_ARM_SET_DEVICE_ADDRESS ioctl or the newer device control API. @devid - * should be the ID of the device as defined by KVM_ARM_SET_DEVICE_ADDRESS or - * the arm-vgic device in the device control API. - * The machine model may map - * and unmap the device multiple times; the kernel will only be told the final - * address at the point where machine init is complete. + * Remember the memory region @mr, and when it is mapped by the machine + * model, tell the kernel that base address using the device control API. + * @devid should be the ID of the device as defined by the arm-vgic device + * in the device control API. The machine model may map and unmap the device + * multiple times; the kernel will only be told the final address at the + * point where machine init is complete. */ void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group, uint64_t attr, int dev_fd, uint64_t addr_ormask); @@ -189,6 +187,13 @@ bool kvm_arm_pmu_supported(void); bool kvm_arm_sve_supported(void); /** + * kvm_arm_mte_supported: + * + * Returns: true if KVM can enable MTE, and false otherwise. + */ +bool kvm_arm_mte_supported(void); + +/** * kvm_arm_get_max_vm_ipa_size: * @ms: Machine state handle * @fixed_ipa: True when the IPA limit is fixed at 40. This is the case @@ -214,6 +219,8 @@ void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa); int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level); +void kvm_arm_enable_mte(Object *cpuobj, Error **errp); + #else /* @@ -235,6 +242,11 @@ static inline bool kvm_arm_sve_supported(void) return false; } +static inline bool kvm_arm_mte_supported(void) +{ + return false; +} + /* * These functions should never actually be called without KVM support. */ @@ -283,6 +295,11 @@ static inline uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu) g_assert_not_reached(); } +static inline void kvm_arm_enable_mte(Object *cpuobj, Error **errp) +{ + g_assert_not_reached(); +} + #endif #endif diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c index 56b431faf5..8f42a28d07 100644 --- a/target/arm/tcg/helper-a64.c +++ b/target/arm/tcg/helper-a64.c @@ -1348,7 +1348,7 @@ static void do_setm(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc, /* Do the actual memset: we leave the last partial page to SETE */ stagesetsize = setsize & TARGET_PAGE_MASK; while (stagesetsize > 0) { - step = stepfn(env, toaddr, setsize, data, memidx, &mtedesc, ra); + step = stepfn(env, toaddr, stagesetsize, data, memidx, &mtedesc, ra); toaddr += step; setsize -= step; stagesetsize -= step; diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c index 203d37303b..62638d2b1f 100644 --- a/target/arm/vfp_helper.c +++ b/target/arm/vfp_helper.c @@ -59,32 +59,6 @@ static inline int vfp_exceptbits_from_host(int host_bits) return target_bits; } -/* Convert vfp exception flags to target form. */ -static inline int vfp_exceptbits_to_host(int target_bits) -{ - int host_bits = 0; - - if (target_bits & 1) { - host_bits |= float_flag_invalid; - } - if (target_bits & 2) { - host_bits |= float_flag_divbyzero; - } - if (target_bits & 4) { - host_bits |= float_flag_overflow; - } - if (target_bits & 8) { - host_bits |= float_flag_underflow; - } - if (target_bits & 0x10) { - host_bits |= float_flag_inexact; - } - if (target_bits & 0x80) { - host_bits |= float_flag_input_denormal; - } - return host_bits; -} - static uint32_t vfp_get_fpsr_from_host(CPUARMState *env) { uint32_t i; @@ -99,15 +73,14 @@ static uint32_t vfp_get_fpsr_from_host(CPUARMState *env) return vfp_exceptbits_from_host(i); } -static void vfp_set_fpsr_to_host(CPUARMState *env, uint32_t val) +static void vfp_clear_float_status_exc_flags(CPUARMState *env) { /* - * The exception flags are ORed together when we read fpscr so we - * only need to preserve the current state in one of our - * float_status values. + * Clear out all the exception-flag information in the float_status + * values. The caller should have arranged for env->vfp.fpsr to + * be the architecturally up-to-date exception flag information first. */ - int i = vfp_exceptbits_to_host(val); - set_float_exception_flags(i, &env->vfp.fp_status); + set_float_exception_flags(0, &env->vfp.fp_status); set_float_exception_flags(0, &env->vfp.fp_status_f16); set_float_exception_flags(0, &env->vfp.standard_fp_status); set_float_exception_flags(0, &env->vfp.standard_fp_status_f16); @@ -164,7 +137,7 @@ static uint32_t vfp_get_fpsr_from_host(CPUARMState *env) return 0; } -static void vfp_set_fpsr_to_host(CPUARMState *env, uint32_t val) +static void vfp_clear_float_status_exc_flags(CPUARMState *env) { } @@ -216,8 +189,6 @@ void vfp_set_fpsr(CPUARMState *env, uint32_t val) { ARMCPU *cpu = env_archcpu(env); - vfp_set_fpsr_to_host(env, val); - if (arm_feature(env, ARM_FEATURE_NEON) || cpu_isar_feature(aa32_mve, cpu)) { /* @@ -231,13 +202,18 @@ void vfp_set_fpsr(CPUARMState *env, uint32_t val) } /* - * The only FPSR bits we keep in vfp.fpsr are NZCV: - * the exception flags IOC|DZC|OFC|UFC|IXC|IDC are stored in - * fp_status, and QC is in vfp.qc[]. Store the NZCV bits there, - * and zero any of the other FPSR bits. + * NZCV lives only in env->vfp.fpsr. The cumulative exception flags + * IOC|DZC|OFC|UFC|IXC|IDC also live in env->vfp.fpsr, with possible + * extra pending exception information that hasn't yet been folded in + * living in the float_status values (for TCG). + * Since this FPSR write gives us the up to date values of the exception + * flags, we want to store into vfp.fpsr the NZCV and CEXC bits, zeroing + * anything else. We also need to clear out the float_status exception + * information so that the next vfp_get_fpsr does not fold in stale data. */ - val &= FPSR_NZCV_MASK; + val &= FPSR_NZCV_MASK | FPSR_CEXC_MASK; env->vfp.fpsr = val; + vfp_clear_float_status_exc_flags(env); } static void vfp_set_fpcr_masked(CPUARMState *env, uint32_t val, uint32_t mask) diff --git a/target/riscv/cpu-qom.h b/target/riscv/cpu-qom.h index 4464c0fd7a..62115375cd 100644 --- a/target/riscv/cpu-qom.h +++ b/target/riscv/cpu-qom.h @@ -30,6 +30,7 @@ #define RISCV_CPU_TYPE_NAME(name) (name RISCV_CPU_TYPE_SUFFIX) #define TYPE_RISCV_CPU_MAX RISCV_CPU_TYPE_NAME("max") +#define TYPE_RISCV_CPU_MAX32 RISCV_CPU_TYPE_NAME("max32") #define TYPE_RISCV_CPU_BASE32 RISCV_CPU_TYPE_NAME("rv32") #define TYPE_RISCV_CPU_BASE64 RISCV_CPU_TYPE_NAME("rv64") #define TYPE_RISCV_CPU_BASE128 RISCV_CPU_TYPE_NAME("x-rv128") diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index 658bdb4ae1..f219f0c3b5 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -106,6 +106,8 @@ const RISCVIsaExtData isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11), ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11), ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11), + ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp), + ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss), ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), @@ -449,11 +451,9 @@ static void riscv_max_cpu_init(Object *obj) env->priv_ver = PRIV_VERSION_LATEST; #ifndef CONFIG_USER_ONLY -#ifdef TARGET_RISCV32 - set_satp_mode_max_supported(cpu, VM_1_10_SV32); -#else - set_satp_mode_max_supported(cpu, VM_1_10_SV57); -#endif + set_satp_mode_max_supported(RISCV_CPU(obj), + riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? + VM_1_10_SV32 : VM_1_10_SV57); #endif } @@ -615,7 +615,10 @@ static void rv64e_bare_cpu_init(Object *obj) riscv_cpu_set_misa_ext(env, RVE); } -#else /* !TARGET_RISCV64 */ +#endif /* !TARGET_RISCV64 */ + +#if defined(TARGET_RISCV32) || \ + (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) static void rv32_base_cpu_init(Object *obj) { @@ -1003,12 +1006,23 @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type) } pmp_unlock_entries(env); +#else + env->priv = PRV_U; + env->senvcfg = 0; + env->menvcfg = 0; #endif + + /* on reset elp is clear */ + env->elp = false; + /* on reset ssp is set to 0 */ + env->ssp = 0; + env->xl = riscv_cpu_mxl(env); riscv_cpu_update_mask(env); cs->exception_index = RISCV_EXCP_NONE; env->load_res = -1; set_default_nan_mode(1, &env->fp_status); + env->vill = true; #ifndef CONFIG_USER_ONLY if (cpu->cfg.debug) { @@ -1460,6 +1474,8 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false), MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), + MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false), + MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false), MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), @@ -2941,6 +2957,12 @@ static const TypeInfo riscv_cpu_type_infos[] = { }, #if defined(TARGET_RISCV32) DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), +#elif defined(TARGET_RISCV64) + DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), +#endif + +#if defined(TARGET_RISCV32) || \ + (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), @@ -2948,8 +2970,13 @@ static const TypeInfo riscv_cpu_type_infos[] = { DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init), DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init), -#elif defined(TARGET_RISCV64) - DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), +#endif + +#if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) + DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX32, MXL_RV32, riscv_max_cpu_init), +#endif + +#if defined(TARGET_RISCV64) DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 1619c3acb6..284b112821 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -46,8 +46,13 @@ typedef struct CPUArchState CPURISCVState; /* * RISC-V-specific extra insn start words: * 1: Original instruction opcode + * 2: more information about instruction */ -#define TARGET_INSN_START_EXTRA_WORDS 1 +#define TARGET_INSN_START_EXTRA_WORDS 2 +/* + * b0: Whether a instruction always raise a store AMO or not. + */ +#define RISCV_UW2_ALWAYS_STORE_AMO 1 #define RV(x) ((target_ulong)1 << (x - 'A')) @@ -230,12 +235,24 @@ struct CPUArchState { target_ulong jvt; + /* elp state for zicfilp extension */ + bool elp; + /* shadow stack register for zicfiss extension */ + target_ulong ssp; + /* env place holder for extra word 2 during unwind */ + target_ulong excp_uw2; + /* sw check code for sw check exception */ + target_ulong sw_check_code; #ifdef CONFIG_USER_ONLY uint32_t elf_flags; #endif -#ifndef CONFIG_USER_ONLY target_ulong priv; + /* CSRs for execution environment configuration */ + uint64_t menvcfg; + target_ulong senvcfg; + +#ifndef CONFIG_USER_ONLY /* This contains QEMU specific information about the virt state. */ bool virt_enabled; target_ulong geilen; @@ -445,12 +462,9 @@ struct CPUArchState { target_ulong upmmask; target_ulong upmbase; - /* CSRs for execution environment configuration */ - uint64_t menvcfg; uint64_t mstateen[SMSTATEEN_MAX_COUNT]; uint64_t hstateen[SMSTATEEN_MAX_COUNT]; uint64_t sstateen[SMSTATEEN_MAX_COUNT]; - target_ulong senvcfg; uint64_t henvcfg; #endif target_ulong cur_pmmask; @@ -544,6 +558,8 @@ void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen); bool riscv_cpu_vector_enabled(CPURISCVState *env); void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable); int riscv_env_mmu_index(CPURISCVState *env, bool ifetch); +bool cpu_get_fcfien(CPURISCVState *env); +bool cpu_get_bcfien(CPURISCVState *env); G_NORETURN void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); @@ -616,6 +632,11 @@ FIELD(TB_FLAGS, ITRIGGER, 22, 1) FIELD(TB_FLAGS, VIRT_ENABLED, 23, 1) FIELD(TB_FLAGS, PRIV, 24, 2) FIELD(TB_FLAGS, AXL, 26, 2) +/* zicfilp needs a TB flag to track indirect branches */ +FIELD(TB_FLAGS, FCFI_ENABLED, 28, 1) +FIELD(TB_FLAGS, FCFI_LP_EXPECTED, 29, 1) +/* zicfiss needs a TB flag so that correct TB is located based on tb flags */ +FIELD(TB_FLAGS, BCFI_ENABLED, 30, 1) #ifdef TARGET_RISCV32 #define riscv_cpu_mxl(env) ((void)(env), MXL_RV32) @@ -709,8 +730,11 @@ static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env) #ifdef CONFIG_USER_ONLY return env->misa_mxl; #else - return get_field(env->mstatus, MSTATUS64_SXL); + if (env->misa_mxl != MXL_RV32) { + return get_field(env->mstatus, MSTATUS64_SXL); + } #endif + return MXL_RV32; } #endif diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h index 7e3f629356..385a2c67c2 100644 --- a/target/riscv/cpu_bits.h +++ b/target/riscv/cpu_bits.h @@ -34,6 +34,9 @@ /* Control and Status Registers */ +/* zicfiss user ssp csr */ +#define CSR_SSP 0x011 + /* User Trap Setup */ #define CSR_USTATUS 0x000 #define CSR_UIE 0x004 @@ -552,6 +555,8 @@ #define MSTATUS_TVM 0x00100000 /* since: priv-1.10 */ #define MSTATUS_TW 0x00200000 /* since: priv-1.10 */ #define MSTATUS_TSR 0x00400000 /* since: priv-1.10 */ +#define MSTATUS_SPELP 0x00800000 /* zicfilp */ +#define MSTATUS_MPELP 0x020000000000 /* zicfilp */ #define MSTATUS_GVA 0x4000000000ULL #define MSTATUS_MPV 0x8000000000ULL @@ -582,6 +587,7 @@ typedef enum { #define SSTATUS_XS 0x00018000 #define SSTATUS_SUM 0x00040000 /* since: priv-1.10 */ #define SSTATUS_MXR 0x00080000 +#define SSTATUS_SPELP MSTATUS_SPELP /* zicfilp */ #define SSTATUS64_UXL 0x0000000300000000ULL @@ -689,6 +695,11 @@ typedef enum RISCVException { RISCV_EXCP_SEMIHOST = 0x3f, } RISCVException; +/* zicfilp defines lp violation results in sw check with tval = 2*/ +#define RISCV_EXCP_SW_CHECK_FCFI_TVAL 2 +/* zicfiss defines ss violation results in sw check with tval = 3*/ +#define RISCV_EXCP_SW_CHECK_BCFI_TVAL 3 + #define RISCV_EXCP_INT_FLAG 0x80000000 #define RISCV_EXCP_INT_MASK 0x7fffffff @@ -754,6 +765,8 @@ typedef enum RISCVException { /* Execution environment configuration bits */ #define MENVCFG_FIOM BIT(0) +#define MENVCFG_LPE BIT(2) /* zicfilp */ +#define MENVCFG_SSE BIT(3) /* zicfiss */ #define MENVCFG_CBIE (3UL << 4) #define MENVCFG_CBCFE BIT(6) #define MENVCFG_CBZE BIT(7) @@ -767,11 +780,15 @@ typedef enum RISCVException { #define MENVCFGH_STCE BIT(31) #define SENVCFG_FIOM MENVCFG_FIOM +#define SENVCFG_LPE MENVCFG_LPE +#define SENVCFG_SSE MENVCFG_SSE #define SENVCFG_CBIE MENVCFG_CBIE #define SENVCFG_CBCFE MENVCFG_CBCFE #define SENVCFG_CBZE MENVCFG_CBZE #define HENVCFG_FIOM MENVCFG_FIOM +#define HENVCFG_LPE MENVCFG_LPE +#define HENVCFG_SSE MENVCFG_SSE #define HENVCFG_CBIE MENVCFG_CBIE #define HENVCFG_CBCFE MENVCFG_CBCFE #define HENVCFG_CBZE MENVCFG_CBZE diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h index 355afedfd3..59d6fc445d 100644 --- a/target/riscv/cpu_cfg.h +++ b/target/riscv/cpu_cfg.h @@ -67,6 +67,8 @@ struct RISCVCPUConfig { bool ext_zicbom; bool ext_zicbop; bool ext_zicboz; + bool ext_zicfilp; + bool ext_zicfiss; bool ext_zicond; bool ext_zihintntl; bool ext_zihintpause; diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c index a935377b4a..0a3ead69ea 100644 --- a/target/riscv/cpu_helper.c +++ b/target/riscv/cpu_helper.c @@ -33,6 +33,7 @@ #include "cpu_bits.h" #include "debug.h" #include "tcg/oversized-guest.h" +#include "pmp.h" int riscv_env_mmu_index(CPURISCVState *env, bool ifetch) { @@ -63,6 +64,62 @@ int riscv_env_mmu_index(CPURISCVState *env, bool ifetch) #endif } +bool cpu_get_fcfien(CPURISCVState *env) +{ + /* no cfi extension, return false */ + if (!env_archcpu(env)->cfg.ext_zicfilp) { + return false; + } + + switch (env->priv) { + case PRV_U: + if (riscv_has_ext(env, RVS)) { + return env->senvcfg & SENVCFG_LPE; + } + return env->menvcfg & MENVCFG_LPE; +#ifndef CONFIG_USER_ONLY + case PRV_S: + if (env->virt_enabled) { + return env->henvcfg & HENVCFG_LPE; + } + return env->menvcfg & MENVCFG_LPE; + case PRV_M: + return env->mseccfg & MSECCFG_MLPE; +#endif + default: + g_assert_not_reached(); + } +} + +bool cpu_get_bcfien(CPURISCVState *env) +{ + /* no cfi extension, return false */ + if (!env_archcpu(env)->cfg.ext_zicfiss) { + return false; + } + + switch (env->priv) { + case PRV_U: + /* + * If S is not implemented then shadow stack for U can't be turned on + * It is checked in `riscv_cpu_validate_set_extensions`, so no need to + * check here or assert here + */ + return env->senvcfg & SENVCFG_SSE; +#ifndef CONFIG_USER_ONLY + case PRV_S: + if (env->virt_enabled) { + return env->henvcfg & HENVCFG_SSE; + } + return env->menvcfg & MENVCFG_SSE; + case PRV_M: /* M-mode shadow stack is always off */ + return false; +#endif + default: + g_assert_not_reached(); + } +} + void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc, uint64_t *cs_base, uint32_t *pflags) { @@ -104,6 +161,20 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc, flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1); } + if (cpu_get_fcfien(env)) { + /* + * For Forward CFI, only the expectation of a lpad at + * the start of the block is tracked via env->elp. env->elp + * is turned on during jalr translation. + */ + flags = FIELD_DP32(flags, TB_FLAGS, FCFI_LP_EXPECTED, env->elp); + flags = FIELD_DP32(flags, TB_FLAGS, FCFI_ENABLED, 1); + } + + if (cpu_get_bcfien(env)) { + flags = FIELD_DP32(flags, TB_FLAGS, BCFI_ENABLED, 1); + } + #ifdef CONFIG_USER_ONLY fs = EXT_STATUS_DIRTY; vs = EXT_STATUS_DIRTY; @@ -546,6 +617,15 @@ void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env) } bool current_virt = env->virt_enabled; + /* + * If zicfilp extension available and henvcfg.LPE = 1, + * then apply SPELP mask on mstatus + */ + if (env_archcpu(env)->cfg.ext_zicfilp && + get_field(env->henvcfg, HENVCFG_LPE)) { + mstatus_mask |= SSTATUS_SPELP; + } + g_assert(riscv_has_ext(env, RVH)); if (current_virt) { @@ -804,7 +884,7 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, target_ulong *fault_pte_addr, int access_type, int mmu_idx, bool first_stage, bool two_stage, - bool is_debug) + bool is_debug, bool is_probe) { /* * NOTE: the env->pc value visible here will not be @@ -818,6 +898,8 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, hwaddr ppn; int napot_bits = 0; target_ulong napot_mask; + bool is_sstack_idx = ((mmu_idx & MMU_IDX_SS_WRITE) == MMU_IDX_SS_WRITE); + bool sstack_page = false; /* * Check if we should use the background registers for the two @@ -890,12 +972,14 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, CPUState *cs = env_cpu(env); int va_bits = PGSHIFT + levels * ptidxbits + widened; + int sxlen = 16 << riscv_cpu_sxl(env); + int sxlen_bytes = sxlen / 8; if (first_stage == true) { target_ulong mask, masked_msbs; - if (TARGET_LONG_BITS > (va_bits - 1)) { - mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1; + if (sxlen > (va_bits - 1)) { + mask = (1L << (sxlen - (va_bits - 1))) - 1; } else { mask = 0; } @@ -948,7 +1032,7 @@ restart: int vbase_ret = get_physical_address(env, &vbase, &vbase_prot, base, NULL, MMU_DATA_LOAD, MMUIdx_U, false, true, - is_debug); + is_debug, false); if (vbase_ret != TRANSLATE_SUCCESS) { if (fault_pte_addr) { @@ -964,7 +1048,7 @@ restart: int pmp_prot; int pmp_ret = get_physical_address_pmp(env, &pmp_prot, pte_addr, - sizeof(target_ulong), + sxlen_bytes, MMU_DATA_LOAD, PRV_S); if (pmp_ret != TRANSLATE_SUCCESS) { return TRANSLATE_PMP_FAIL; @@ -1026,21 +1110,43 @@ restart: return TRANSLATE_FAIL; } + target_ulong rwx = pte & (PTE_R | PTE_W | PTE_X); /* Check for reserved combinations of RWX flags. */ - switch (pte & (PTE_R | PTE_W | PTE_X)) { - case PTE_W: + switch (rwx) { case PTE_W | PTE_X: return TRANSLATE_FAIL; + case PTE_W: + /* if bcfi enabled, PTE_W is not reserved and shadow stack page */ + if (cpu_get_bcfien(env) && first_stage) { + sstack_page = true; + /* + * if ss index, read and write allowed. else if not a probe + * then only read allowed + */ + rwx = is_sstack_idx ? (PTE_R | PTE_W) : (is_probe ? 0 : PTE_R); + break; + } + return TRANSLATE_FAIL; + case PTE_R: + /* + * no matter what's the `access_type`, shadow stack access to readonly + * memory are always store page faults. During unwind, loads will be + * promoted as store fault. + */ + if (is_sstack_idx) { + return TRANSLATE_FAIL; + } + break; } int prot = 0; - if (pte & PTE_R) { + if (rwx & PTE_R) { prot |= PAGE_READ; } - if (pte & PTE_W) { + if (rwx & PTE_W) { prot |= PAGE_WRITE; } - if (pte & PTE_X) { + if (rwx & PTE_X) { bool mxr = false; /* @@ -1084,8 +1190,11 @@ restart: } if (!((prot >> access_type) & 1)) { - /* Access check failed */ - return TRANSLATE_FAIL; + /* + * Access check failed, access check failures for shadow stack are + * access faults. + */ + return sstack_page ? TRANSLATE_PMP_FAIL : TRANSLATE_FAIL; } target_ulong updated_pte = pte; @@ -1116,7 +1225,7 @@ restart: * it is no longer valid and we must re-walk the page table. */ MemoryRegion *mr; - hwaddr l = sizeof(target_ulong), addr1; + hwaddr l = sxlen_bytes, addr1; mr = address_space_translate(cs->as, pte_addr, &addr1, &l, false, MEMTXATTRS_UNSPECIFIED); if (memory_region_is_ram(mr)) { @@ -1128,7 +1237,12 @@ restart: */ *pte_pa = pte = updated_pte; #else - target_ulong old_pte = qatomic_cmpxchg(pte_pa, pte, updated_pte); + target_ulong old_pte; + if (riscv_cpu_sxl(env) == MXL_RV32) { + old_pte = qatomic_cmpxchg((uint32_t *)pte_pa, pte, updated_pte); + } else { + old_pte = qatomic_cmpxchg(pte_pa, pte, updated_pte); + } if (old_pte != pte) { goto restart; } @@ -1223,13 +1337,13 @@ hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) int mmu_idx = riscv_env_mmu_index(&cpu->env, false); if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx, - true, env->virt_enabled, true)) { + true, env->virt_enabled, true, false)) { return -1; } if (env->virt_enabled) { if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL, - 0, MMUIdx_U, false, true, true)) { + 0, MMUIdx_U, false, true, true, false)) { return -1; } } @@ -1272,9 +1386,17 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, break; case MMU_DATA_LOAD: cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS; + /* shadow stack mis aligned accesses are access faults */ + if (mmu_idx & MMU_IDX_SS_WRITE) { + cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT; + } break; case MMU_DATA_STORE: cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS; + /* shadow stack mis aligned accesses are access faults */ + if (mmu_idx & MMU_IDX_SS_WRITE) { + cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT; + } break; default: g_assert_not_reached(); @@ -1335,7 +1457,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, /* Two stage lookup */ ret = get_physical_address(env, &pa, &prot, address, &env->guest_phys_fault_addr, access_type, - mmu_idx, true, true, false); + mmu_idx, true, true, false, probe); /* * A G-stage exception may be triggered during two state lookup. @@ -1358,7 +1480,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, ret = get_physical_address(env, &pa, &prot2, im_address, NULL, access_type, MMUIdx_U, false, true, - false); + false, probe); qemu_log_mask(CPU_LOG_MMU, "%s 2nd-stage address=%" VADDR_PRIx @@ -1395,7 +1517,8 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, } else { /* Single stage lookup */ ret = get_physical_address(env, &pa, &prot, address, NULL, - access_type, mmu_idx, true, false, false); + access_type, mmu_idx, true, false, false, + probe); qemu_log_mask(CPU_LOG_MMU, "%s address=%" VADDR_PRIx " ret %d physical " @@ -1641,6 +1764,22 @@ static target_ulong riscv_transformed_insn(CPURISCVState *env, return xinsn; } +static target_ulong promote_load_fault(target_ulong orig_cause) +{ + switch (orig_cause) { + case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT: + return RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT; + + case RISCV_EXCP_LOAD_ACCESS_FAULT: + return RISCV_EXCP_STORE_AMO_ACCESS_FAULT; + + case RISCV_EXCP_LOAD_PAGE_FAULT: + return RISCV_EXCP_STORE_PAGE_FAULT; + } + + /* if no promotion, return original cause */ + return orig_cause; +} /* * Handle Traps * @@ -1653,6 +1792,7 @@ void riscv_cpu_do_interrupt(CPUState *cs) CPURISCVState *env = &cpu->env; bool virt = env->virt_enabled; bool write_gva = false; + bool always_storeamo = (env->excp_uw2 & RISCV_UW2_ALWAYS_STORE_AMO); uint64_t s; /* @@ -1670,6 +1810,8 @@ void riscv_cpu_do_interrupt(CPUState *cs) target_ulong tinst = 0; target_ulong htval = 0; target_ulong mtval2 = 0; + int sxlen = 0; + int mxlen = 0; if (!async) { /* set tval to badaddr for traps with address information */ @@ -1688,6 +1830,9 @@ void riscv_cpu_do_interrupt(CPUState *cs) case RISCV_EXCP_STORE_AMO_ACCESS_FAULT: case RISCV_EXCP_LOAD_PAGE_FAULT: case RISCV_EXCP_STORE_PAGE_FAULT: + if (always_storeamo) { + cause = promote_load_fault(cause); + } write_gva = env->two_stage_lookup; tval = env->badaddr; if (env->two_stage_indirect_lookup) { @@ -1729,6 +1874,9 @@ void riscv_cpu_do_interrupt(CPUState *cs) cs->watchpoint_hit = NULL; } break; + case RISCV_EXCP_SW_CHECK: + tval = env->sw_check_code; + break; default: break; } @@ -1760,6 +1908,11 @@ void riscv_cpu_do_interrupt(CPUState *cs) if (env->priv <= PRV_S && cause < 64 && (((deleg >> cause) & 1) || s_injected || vs_injected)) { /* handle the trap in S-mode */ + /* save elp status */ + if (cpu_get_fcfien(env)) { + env->mstatus = set_field(env->mstatus, MSTATUS_SPELP, env->elp); + } + if (riscv_has_ext(env, RVH)) { uint64_t hdeleg = async ? env->hideleg : env->hedeleg; @@ -1798,7 +1951,8 @@ void riscv_cpu_do_interrupt(CPUState *cs) s = set_field(s, MSTATUS_SPP, env->priv); s = set_field(s, MSTATUS_SIE, 0); env->mstatus = s; - env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1)); + sxlen = 16 << riscv_cpu_sxl(env); + env->scause = cause | ((target_ulong)async << (sxlen - 1)); env->sepc = env->pc; env->stval = tval; env->htval = htval; @@ -1808,6 +1962,11 @@ void riscv_cpu_do_interrupt(CPUState *cs) riscv_cpu_set_mode(env, PRV_S, virt); } else { /* handle the trap in M-mode */ + /* save elp status */ + if (cpu_get_fcfien(env)) { + env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, env->elp); + } + if (riscv_has_ext(env, RVH)) { if (env->virt_enabled) { riscv_cpu_swap_hypervisor_regs(env); @@ -1829,7 +1988,8 @@ void riscv_cpu_do_interrupt(CPUState *cs) s = set_field(s, MSTATUS_MPP, env->priv); s = set_field(s, MSTATUS_MIE, 0); env->mstatus = s; - env->mcause = cause | ~(((target_ulong)-1) >> async); + mxlen = 16 << riscv_cpu_mxl(env); + env->mcause = cause | ((target_ulong)async << (mxlen - 1)); env->mepc = env->pc; env->mtval = tval; env->mtval2 = mtval2; @@ -1840,6 +2000,13 @@ void riscv_cpu_do_interrupt(CPUState *cs) } /* + * Interrupt/exception/trap delivery is asynchronous event and as per + * zicfilp spec CPU should clear up the ELP state. No harm in clearing + * unconditionally. + */ + env->elp = false; + + /* * NOTE: it is not necessary to yield load reservations here. It is only * necessary for an SC from "another hart" to cause a load reservation * to be yielded. Refer to the memory consistency model section of the diff --git a/target/riscv/cpu_user.h b/target/riscv/cpu_user.h index 02afad608b..e6927ff847 100644 --- a/target/riscv/cpu_user.h +++ b/target/riscv/cpu_user.h @@ -15,5 +15,6 @@ #define xA6 16 #define xA7 17 /* syscall number for RVI ABI */ #define xT0 5 /* syscall number for RVE ABI */ +#define xT2 7 #endif diff --git a/target/riscv/csr.c b/target/riscv/csr.c index ea3560342c..9846770820 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -184,6 +184,25 @@ static RISCVException zcmt(CPURISCVState *env, int csrno) return RISCV_EXCP_NONE; } +static RISCVException cfi_ss(CPURISCVState *env, int csrno) +{ + if (!env_archcpu(env)->cfg.ext_zicfiss) { + return RISCV_EXCP_ILLEGAL_INST; + } + + /* if bcfi not active for current env, access to csr is illegal */ + if (!cpu_get_bcfien(env)) { +#if !defined(CONFIG_USER_ONLY) + if (env->debugger) { + return RISCV_EXCP_NONE; + } +#endif + return RISCV_EXCP_ILLEGAL_INST; + } + + return RISCV_EXCP_NONE; +} + #if !defined(CONFIG_USER_ONLY) static RISCVException mctr(CPURISCVState *env, int csrno) { @@ -622,6 +641,19 @@ static RISCVException seed(CPURISCVState *env, int csrno) #endif } +/* zicfiss CSR_SSP read and write */ +static int read_ssp(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->ssp; + return RISCV_EXCP_NONE; +} + +static int write_ssp(CPURISCVState *env, int csrno, target_ulong val) +{ + env->ssp = val; + return RISCV_EXCP_NONE; +} + /* User Floating-Point CSRs */ static RISCVException read_fflags(CPURISCVState *env, int csrno, target_ulong *val) @@ -734,7 +766,7 @@ static RISCVException write_vxrm(CPURISCVState *env, int csrno, static RISCVException read_vxsat(CPURISCVState *env, int csrno, target_ulong *val) { - *val = env->vxsat; + *val = env->vxsat & BIT(0); return RISCV_EXCP_NONE; } @@ -744,7 +776,7 @@ static RISCVException write_vxsat(CPURISCVState *env, int csrno, #if !defined(CONFIG_USER_ONLY) env->mstatus |= MSTATUS_VS; #endif - env->vxsat = val; + env->vxsat = val & BIT(0); return RISCV_EXCP_NONE; } @@ -1377,6 +1409,7 @@ static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS | (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \ (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \ (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \ + (1ULL << (RISCV_EXCP_SW_CHECK)) | \ (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \ (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \ (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \ @@ -1598,6 +1631,11 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno, } } + /* If cfi lp extension is available, then apply cfi lp mask */ + if (env_archcpu(env)->cfg.ext_zicfilp) { + mask |= (MSTATUS_MPELP | MSTATUS_SPELP); + } + mstatus = (mstatus & ~mask) | (val & mask); env->mstatus = mstatus; @@ -2344,6 +2382,14 @@ static RISCVException write_menvcfg(CPURISCVState *env, int csrno, mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) | (cfg->ext_sstc ? MENVCFG_STCE : 0) | (cfg->ext_svadu ? MENVCFG_ADUE : 0); + + if (env_archcpu(env)->cfg.ext_zicfilp) { + mask |= MENVCFG_LPE; + } + + if (env_archcpu(env)->cfg.ext_zicfiss) { + mask |= MENVCFG_SSE; + } } env->menvcfg = (env->menvcfg & ~mask) | (val & mask); @@ -2396,6 +2442,17 @@ static RISCVException write_senvcfg(CPURISCVState *env, int csrno, return ret; } + if (env_archcpu(env)->cfg.ext_zicfilp) { + mask |= SENVCFG_LPE; + } + + /* Higher mode SSE must be ON for next-less mode SSE to be ON */ + if (env_archcpu(env)->cfg.ext_zicfiss && + get_field(env->menvcfg, MENVCFG_SSE) && + (env->virt_enabled ? get_field(env->henvcfg, HENVCFG_SSE) : true)) { + mask |= SENVCFG_SSE; + } + env->senvcfg = (env->senvcfg & ~mask) | (val & mask); return RISCV_EXCP_NONE; } @@ -2433,6 +2490,16 @@ static RISCVException write_henvcfg(CPURISCVState *env, int csrno, if (riscv_cpu_mxl(env) == MXL_RV64) { mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE); + + if (env_archcpu(env)->cfg.ext_zicfilp) { + mask |= HENVCFG_LPE; + } + + /* H can light up SSE for VS only if HS had it from menvcfg */ + if (env_archcpu(env)->cfg.ext_zicfiss && + get_field(env->menvcfg, MENVCFG_SSE)) { + mask |= HENVCFG_SSE; + } } env->henvcfg = (env->henvcfg & ~mask) | (val & mask); @@ -2897,6 +2964,10 @@ static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno, mask |= SSTATUS64_UXL; } + if (env_archcpu(env)->cfg.ext_zicfilp) { + mask |= SSTATUS_SPELP; + } + *val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus)); return RISCV_EXCP_NONE; } @@ -2908,6 +2979,11 @@ static RISCVException read_sstatus(CPURISCVState *env, int csrno, if (env->xl != MXL_RV32 || env->debugger) { mask |= SSTATUS64_UXL; } + + if (env_archcpu(env)->cfg.ext_zicfilp) { + mask |= SSTATUS_SPELP; + } + /* TODO: Use SXL not MXL. */ *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask); return RISCV_EXCP_NONE; @@ -2923,6 +2999,11 @@ static RISCVException write_sstatus(CPURISCVState *env, int csrno, mask |= SSTATUS64_UXL; } } + + if (env_archcpu(env)->cfg.ext_zicfilp) { + mask |= SSTATUS_SPELP; + } + target_ulong newval = (env->mstatus & ~mask) | (val & mask); return write_mstatus(env, CSR_MSTATUS, newval); } @@ -4934,6 +5015,9 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { /* Zcmt Extension */ [CSR_JVT] = {"jvt", zcmt, read_jvt, write_jvt}, + /* zicfiss Extension, shadow stack register */ + [CSR_SSP] = { "ssp", cfi_ss, read_ssp, write_ssp }, + #if !defined(CONFIG_USER_ONLY) /* Machine Timers and Counters */ [CSR_MCYCLE] = { "mcycle", any, read_hpmcounter, diff --git a/target/riscv/insn16.decode b/target/riscv/insn16.decode index 3953bcf82d..bf893d1c2e 100644 --- a/target/riscv/insn16.decode +++ b/target/riscv/insn16.decode @@ -140,6 +140,10 @@ sw 110 ... ... .. ... 00 @cs_w addi 000 . ..... ..... 01 @ci addi 010 . ..... ..... 01 @c_li { + # c.sspush x1 carving out of zcmops + sspush 011 0 00001 00000 01 &r2_s rs2=1 rs1=0 + # c.sspopchk x5 carving out of zcmops + sspopchk 011 0 00101 00000 01 &r2 rs1=5 rd=0 c_mop_n 011 0 0 n:3 1 00000 01 illegal 011 0 ----- 00000 01 # c.addi16sp and c.lui, RES nzimm=0 addi 011 . 00010 ..... 01 @c_addi16sp diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode index c45b8fa1d8..e9139ec1b9 100644 --- a/target/riscv/insn32.decode +++ b/target/riscv/insn32.decode @@ -123,7 +123,10 @@ sfence_vm 0001000 00100 ..... 000 00000 1110011 @sfence_vm # *** RV32I Base Instruction Set *** lui .................... ..... 0110111 @u -auipc .................... ..... 0010111 @u +{ + lpad label:20 00000 0010111 + auipc .................... ..... 0010111 @u +} jal .................... ..... 1101111 @j jalr ............ ..... 000 ..... 1100111 @i beq ....... ..... ..... 000 ..... 1100011 @b @@ -243,6 +246,7 @@ remud 0000001 ..... ..... 111 ..... 1111011 @r lr_w 00010 . . 00000 ..... 010 ..... 0101111 @atom_ld sc_w 00011 . . ..... ..... 010 ..... 0101111 @atom_st amoswap_w 00001 . . ..... ..... 010 ..... 0101111 @atom_st +ssamoswap_w 01001 . . ..... ..... 010 ..... 0101111 @atom_st amoadd_w 00000 . . ..... ..... 010 ..... 0101111 @atom_st amoxor_w 00100 . . ..... ..... 010 ..... 0101111 @atom_st amoand_w 01100 . . ..... ..... 010 ..... 0101111 @atom_st @@ -256,6 +260,7 @@ amomaxu_w 11100 . . ..... ..... 010 ..... 0101111 @atom_st lr_d 00010 . . 00000 ..... 011 ..... 0101111 @atom_ld sc_d 00011 . . ..... ..... 011 ..... 0101111 @atom_st amoswap_d 00001 . . ..... ..... 011 ..... 0101111 @atom_st +ssamoswap_d 01001 . . ..... ..... 011 ..... 0101111 @atom_st amoadd_d 00000 . . ..... ..... 011 ..... 0101111 @atom_st amoxor_d 00100 . . ..... ..... 011 ..... 0101111 @atom_st amoand_d 01100 . . ..... ..... 011 ..... 0101111 @atom_st @@ -1019,8 +1024,23 @@ amocas_d 00101 . . ..... ..... 011 ..... 0101111 @atom_st amocas_q 00101 . . ..... ..... 100 ..... 0101111 @atom_st # *** Zimop may-be-operation extension *** -mop_r_n 1 . 00 .. 0111 .. ..... 100 ..... 1110011 @mop5 -mop_rr_n 1 . 00 .. 1 ..... ..... 100 ..... 1110011 @mop3 +{ + # zicfiss instructions carved out of mop.r + [ + ssrdp 1100110 11100 00000 100 rd:5 1110011 + sspopchk 1100110 11100 00001 100 00000 1110011 &r2 rs1=1 rd=0 + sspopchk 1100110 11100 00101 100 00000 1110011 &r2 rs1=5 rd=0 + ] + mop_r_n 1 . 00 .. 0111 .. ..... 100 ..... 1110011 @mop5 +} +{ + # zicfiss instruction carved out of mop.rr + [ + sspush 1100111 00001 00000 100 00000 1110011 &r2_s rs2=1 rs1=0 + sspush 1100111 00101 00000 100 00000 1110011 &r2_s rs2=5 rs1=0 + ] + mop_rr_n 1 . 00 .. 1 ..... ..... 100 ..... 1110011 @mop3 +} # *** Zabhb Standard Extension *** amoswap_b 00001 . . ..... ..... 000 ..... 0101111 @atom_st diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc index bc5263a4e0..ecd3b8b2c9 100644 --- a/target/riscv/insn_trans/trans_privileged.c.inc +++ b/target/riscv/insn_trans/trans_privileged.c.inc @@ -78,7 +78,7 @@ static bool trans_sret(DisasContext *ctx, arg_sret *a) { #ifndef CONFIG_USER_ONLY if (has_ext(ctx, RVS)) { - decode_save_opc(ctx); + decode_save_opc(ctx, 0); translator_io_start(&ctx->base); gen_helper_sret(cpu_pc, tcg_env); exit_tb(ctx); /* no chaining */ @@ -95,7 +95,7 @@ static bool trans_sret(DisasContext *ctx, arg_sret *a) static bool trans_mret(DisasContext *ctx, arg_mret *a) { #ifndef CONFIG_USER_ONLY - decode_save_opc(ctx); + decode_save_opc(ctx, 0); translator_io_start(&ctx->base); gen_helper_mret(cpu_pc, tcg_env); exit_tb(ctx); /* no chaining */ @@ -109,7 +109,7 @@ static bool trans_mret(DisasContext *ctx, arg_mret *a) static bool trans_wfi(DisasContext *ctx, arg_wfi *a) { #ifndef CONFIG_USER_ONLY - decode_save_opc(ctx); + decode_save_opc(ctx, 0); gen_update_pc(ctx, ctx->cur_insn_len); gen_helper_wfi(tcg_env); return true; @@ -121,7 +121,7 @@ static bool trans_wfi(DisasContext *ctx, arg_wfi *a) static bool trans_sfence_vma(DisasContext *ctx, arg_sfence_vma *a) { #ifndef CONFIG_USER_ONLY - decode_save_opc(ctx); + decode_save_opc(ctx, 0); gen_helper_tlb_flush(tcg_env); return true; #endif diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc index 39bbf60f3c..9cf3ae8019 100644 --- a/target/riscv/insn_trans/trans_rva.c.inc +++ b/target/riscv/insn_trans/trans_rva.c.inc @@ -34,7 +34,7 @@ static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop) { TCGv src1; - decode_save_opc(ctx); + decode_save_opc(ctx, 0); src1 = get_address(ctx, a->rs1, 0); if (a->rl) { tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); @@ -61,7 +61,7 @@ static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop) TCGLabel *l1 = gen_new_label(); TCGLabel *l2 = gen_new_label(); - decode_save_opc(ctx); + decode_save_opc(ctx, 0); src1 = get_address(ctx, a->rs1, 0); tcg_gen_brcond_tl(TCG_COND_NE, load_res, src1, l1); diff --git a/target/riscv/insn_trans/trans_rvd.c.inc b/target/riscv/insn_trans/trans_rvd.c.inc index 8a46124f98..30883ea37c 100644 --- a/target/riscv/insn_trans/trans_rvd.c.inc +++ b/target/riscv/insn_trans/trans_rvd.c.inc @@ -61,7 +61,7 @@ static bool trans_fld(DisasContext *ctx, arg_fld *a) memop |= MO_ATOM_IFALIGN; } - decode_save_opc(ctx); + decode_save_opc(ctx, 0); addr = get_address(ctx, a->rs1, a->imm); tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, memop); @@ -85,7 +85,7 @@ static bool trans_fsd(DisasContext *ctx, arg_fsd *a) memop |= MO_ATOM_IFALIGN; } - decode_save_opc(ctx); + decode_save_opc(ctx, 0); addr = get_address(ctx, a->rs1, a->imm); tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, memop); return true; diff --git a/target/riscv/insn_trans/trans_rvf.c.inc b/target/riscv/insn_trans/trans_rvf.c.inc index 0222a728df..ed73afe089 100644 --- a/target/riscv/insn_trans/trans_rvf.c.inc +++ b/target/riscv/insn_trans/trans_rvf.c.inc @@ -52,7 +52,7 @@ static bool trans_flw(DisasContext *ctx, arg_flw *a) memop |= MO_ATOM_WITHIN16; } - decode_save_opc(ctx); + decode_save_opc(ctx, 0); addr = get_address(ctx, a->rs1, a->imm); dest = cpu_fpr[a->rd]; tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, memop); @@ -74,7 +74,7 @@ static bool trans_fsw(DisasContext *ctx, arg_fsw *a) memop |= MO_ATOM_WITHIN16; } - decode_save_opc(ctx); + decode_save_opc(ctx, 0); addr = get_address(ctx, a->rs1, a->imm); tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, memop); return true; diff --git a/target/riscv/insn_trans/trans_rvh.c.inc b/target/riscv/insn_trans/trans_rvh.c.inc index aa9d41c18c..03c6694430 100644 --- a/target/riscv/insn_trans/trans_rvh.c.inc +++ b/target/riscv/insn_trans/trans_rvh.c.inc @@ -44,7 +44,7 @@ static bool do_hlv(DisasContext *ctx, arg_r2 *a, TCGv dest = dest_gpr(ctx, a->rd); TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE); - decode_save_opc(ctx); + decode_save_opc(ctx, 0); func(dest, tcg_env, addr); gen_set_gpr(ctx, a->rd, dest); return true; @@ -56,7 +56,7 @@ static bool do_hsv(DisasContext *ctx, arg_r2_s *a, TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE); TCGv data = get_gpr(ctx, a->rs2, EXT_NONE); - decode_save_opc(ctx); + decode_save_opc(ctx, 0); func(tcg_env, addr, data); return true; } @@ -147,7 +147,7 @@ static bool trans_hfence_gvma(DisasContext *ctx, arg_sfence_vma *a) { REQUIRE_EXT(ctx, RVH); #ifndef CONFIG_USER_ONLY - decode_save_opc(ctx); + decode_save_opc(ctx, 0); gen_helper_hyp_gvma_tlb_flush(tcg_env); return true; #endif @@ -158,7 +158,7 @@ static bool trans_hfence_vvma(DisasContext *ctx, arg_sfence_vma *a) { REQUIRE_EXT(ctx, RVH); #ifndef CONFIG_USER_ONLY - decode_save_opc(ctx); + decode_save_opc(ctx, 0); gen_helper_hyp_tlb_flush(tcg_env); return true; #endif diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc index fab5c06719..96c218a9d7 100644 --- a/target/riscv/insn_trans/trans_rvi.c.inc +++ b/target/riscv/insn_trans/trans_rvi.c.inc @@ -36,6 +36,49 @@ static bool trans_lui(DisasContext *ctx, arg_lui *a) return true; } +static bool trans_lpad(DisasContext *ctx, arg_lpad *a) +{ + /* + * fcfi_lp_expected can set only if fcfi was eanbled. + * translate further only if fcfi_lp_expected set. + * lpad comes from NOP space anyways, so return true if + * fcfi_lp_expected is false. + */ + if (!ctx->fcfi_lp_expected) { + return true; + } + + ctx->fcfi_lp_expected = false; + if ((ctx->base.pc_next) & 0x3) { + /* + * misaligned, according to spec we should raise sw check exception + */ + tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_FCFI_TVAL), + tcg_env, offsetof(CPURISCVState, sw_check_code)); + gen_helper_raise_exception(tcg_env, + tcg_constant_i32(RISCV_EXCP_SW_CHECK)); + return true; + } + + /* per spec, label check performed only when embedded label non-zero */ + if (a->label != 0) { + TCGLabel *skip = gen_new_label(); + TCGv tmp = tcg_temp_new(); + tcg_gen_extract_tl(tmp, get_gpr(ctx, xT2, EXT_NONE), 12, 20); + tcg_gen_brcondi_tl(TCG_COND_EQ, tmp, a->label, skip); + tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_FCFI_TVAL), + tcg_env, offsetof(CPURISCVState, sw_check_code)); + gen_helper_raise_exception(tcg_env, + tcg_constant_i32(RISCV_EXCP_SW_CHECK)); + gen_set_label(skip); + } + + tcg_gen_st8_tl(tcg_constant_tl(0), tcg_env, + offsetof(CPURISCVState, elp)); + + return true; +} + static bool trans_auipc(DisasContext *ctx, arg_auipc *a) { TCGv target_pc = dest_gpr(ctx, a->rd); @@ -75,6 +118,18 @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a) gen_set_gpr(ctx, a->rd, succ_pc); tcg_gen_mov_tl(cpu_pc, target_pc); + if (ctx->fcfi_enabled) { + /* + * return from functions (i.e. rs1 == xRA || rs1 == xT0) are not + * tracked. zicfilp introduces sw guarded branch as well. sw guarded + * branch are not tracked. rs1 == xT2 is a sw guarded branch. + */ + if (a->rs1 != xRA && a->rs1 != xT0 && a->rs1 != xT2) { + tcg_gen_st8_tl(tcg_constant_tl(1), + tcg_env, offsetof(CPURISCVState, elp)); + } + } + lookup_and_goto_ptr(ctx); if (misaligned) { @@ -271,7 +326,7 @@ static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop) if (ctx->cfg_ptr->ext_zama16b) { memop |= MO_ATOM_WITHIN16; } - decode_save_opc(ctx); + decode_save_opc(ctx, 0); if (get_xl(ctx) == MXL_RV128) { out = gen_load_i128(ctx, a, memop); } else { @@ -372,7 +427,7 @@ static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop) if (ctx->cfg_ptr->ext_zama16b) { memop |= MO_ATOM_WITHIN16; } - decode_save_opc(ctx); + decode_save_opc(ctx, 0); if (get_xl(ctx) == MXL_RV128) { return gen_store_i128(ctx, a, memop); } else { @@ -834,7 +889,7 @@ static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a) static bool do_csr_post(DisasContext *ctx) { /* The helper may raise ILLEGAL_INSN -- record binv for unwind. */ - decode_save_opc(ctx); + decode_save_opc(ctx, 0); /* We may have changed important cpu state -- exit to main loop. */ gen_update_pc(ctx, ctx->cur_insn_len); exit_tb(ctx); diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc index ae1f40174a..27bf3f0b68 100644 --- a/target/riscv/insn_trans/trans_rvvk.c.inc +++ b/target/riscv/insn_trans/trans_rvvk.c.inc @@ -249,7 +249,7 @@ GEN_OPIVI_WIDEN_TRANS(vwsll_vi, IMM_ZX, vwsll_vx, vwsll_vx_check) \ if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { \ /* save opcode for unwinding in case we throw an exception */ \ - decode_save_opc(s); \ + decode_save_opc(s, 0); \ egs = tcg_constant_i32(EGS); \ gen_helper_egs_check(egs, tcg_env); \ } \ @@ -322,7 +322,7 @@ GEN_V_UNMASKED_TRANS(vaesem_vs, vaes_check_vs, ZVKNED_EGS) \ if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { \ /* save opcode for unwinding in case we throw an exception */ \ - decode_save_opc(s); \ + decode_save_opc(s, 0); \ egs = tcg_constant_i32(EGS); \ gen_helper_egs_check(egs, tcg_env); \ } \ @@ -389,7 +389,7 @@ GEN_VI_UNMASKED_TRANS(vaeskf2_vi, vaeskf2_check, ZVKNED_EGS) \ if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { \ /* save opcode for unwinding in case we throw an exception */ \ - decode_save_opc(s); \ + decode_save_opc(s, 0); \ egs = tcg_constant_i32(EGS); \ gen_helper_egs_check(egs, tcg_env); \ } \ @@ -440,7 +440,7 @@ static bool trans_vsha2cl_vv(DisasContext *s, arg_rmrr *a) if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { /* save opcode for unwinding in case we throw an exception */ - decode_save_opc(s); + decode_save_opc(s, 0); egs = tcg_constant_i32(ZVKNH_EGS); gen_helper_egs_check(egs, tcg_env); } @@ -471,7 +471,7 @@ static bool trans_vsha2ch_vv(DisasContext *s, arg_rmrr *a) if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { /* save opcode for unwinding in case we throw an exception */ - decode_save_opc(s); + decode_save_opc(s, 0); egs = tcg_constant_i32(ZVKNH_EGS); gen_helper_egs_check(egs, tcg_env); } diff --git a/target/riscv/insn_trans/trans_rvzacas.c.inc b/target/riscv/insn_trans/trans_rvzacas.c.inc index fcced99fc7..15e688a033 100644 --- a/target/riscv/insn_trans/trans_rvzacas.c.inc +++ b/target/riscv/insn_trans/trans_rvzacas.c.inc @@ -76,7 +76,7 @@ static bool gen_cmpxchg64(DisasContext *ctx, arg_atomic *a, MemOp mop) TCGv src1 = get_address(ctx, a->rs1, 0); TCGv_i64 src2 = get_gpr_pair(ctx, a->rs2); - decode_save_opc(ctx); + decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO); tcg_gen_atomic_cmpxchg_i64(dest, src1, dest, src2, ctx->mem_idx, mop); gen_set_gpr_pair(ctx, a->rd, dest); @@ -121,7 +121,7 @@ static bool trans_amocas_q(DisasContext *ctx, arg_amocas_q *a) tcg_gen_concat_i64_i128(src2, src2l, src2h); tcg_gen_concat_i64_i128(dest, destl, desth); - decode_save_opc(ctx); + decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO); tcg_gen_atomic_cmpxchg_i128(dest, src1, dest, src2, ctx->mem_idx, (MO_ALIGN | MO_TEUO)); diff --git a/target/riscv/insn_trans/trans_rvzfh.c.inc b/target/riscv/insn_trans/trans_rvzfh.c.inc index 1eb458b491..bece48e600 100644 --- a/target/riscv/insn_trans/trans_rvzfh.c.inc +++ b/target/riscv/insn_trans/trans_rvzfh.c.inc @@ -48,7 +48,7 @@ static bool trans_flh(DisasContext *ctx, arg_flh *a) REQUIRE_FPU; REQUIRE_ZFHMIN_OR_ZFBFMIN(ctx); - decode_save_opc(ctx); + decode_save_opc(ctx, 0); t0 = get_gpr(ctx, a->rs1, EXT_NONE); if (a->imm) { TCGv temp = tcg_temp_new(); @@ -71,7 +71,7 @@ static bool trans_fsh(DisasContext *ctx, arg_fsh *a) REQUIRE_FPU; REQUIRE_ZFHMIN_OR_ZFBFMIN(ctx); - decode_save_opc(ctx); + decode_save_opc(ctx, 0); t0 = get_gpr(ctx, a->rs1, EXT_NONE); if (a->imm) { TCGv temp = tcg_temp_new(); diff --git a/target/riscv/insn_trans/trans_rvzicfiss.c.inc b/target/riscv/insn_trans/trans_rvzicfiss.c.inc new file mode 100644 index 0000000000..e3ebc4977c --- /dev/null +++ b/target/riscv/insn_trans/trans_rvzicfiss.c.inc @@ -0,0 +1,114 @@ +/* + * RISC-V translation routines for the Control-Flow Integrity Extension + * + * Copyright (c) 2024 Rivos Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ +static bool trans_sspopchk(DisasContext *ctx, arg_sspopchk *a) +{ + if (!ctx->bcfi_enabled) { + return false; + } + + TCGv addr = tcg_temp_new(); + TCGLabel *skip = gen_new_label(); + uint32_t tmp = (get_xl(ctx) == MXL_RV64) ? 8 : 4; + TCGv data = tcg_temp_new(); + tcg_gen_ld_tl(addr, tcg_env, offsetof(CPURISCVState, ssp)); + decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO); + tcg_gen_qemu_ld_tl(data, addr, SS_MMU_INDEX(ctx), + mxl_memop(ctx) | MO_ALIGN); + TCGv rs1 = get_gpr(ctx, a->rs1, EXT_NONE); + tcg_gen_brcond_tl(TCG_COND_EQ, data, rs1, skip); + tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_BCFI_TVAL), + tcg_env, offsetof(CPURISCVState, sw_check_code)); + gen_helper_raise_exception(tcg_env, + tcg_constant_i32(RISCV_EXCP_SW_CHECK)); + gen_set_label(skip); + tcg_gen_addi_tl(addr, addr, tmp); + tcg_gen_st_tl(addr, tcg_env, offsetof(CPURISCVState, ssp)); + + return true; +} + +static bool trans_sspush(DisasContext *ctx, arg_sspush *a) +{ + if (!ctx->bcfi_enabled) { + return false; + } + + TCGv addr = tcg_temp_new(); + int tmp = (get_xl(ctx) == MXL_RV64) ? -8 : -4; + TCGv data = get_gpr(ctx, a->rs2, EXT_NONE); + decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO); + tcg_gen_ld_tl(addr, tcg_env, offsetof(CPURISCVState, ssp)); + tcg_gen_addi_tl(addr, addr, tmp); + tcg_gen_qemu_st_tl(data, addr, SS_MMU_INDEX(ctx), + mxl_memop(ctx) | MO_ALIGN); + tcg_gen_st_tl(addr, tcg_env, offsetof(CPURISCVState, ssp)); + + return true; +} + +static bool trans_ssrdp(DisasContext *ctx, arg_ssrdp *a) +{ + if (!ctx->bcfi_enabled || a->rd == 0) { + return false; + } + + TCGv dest = dest_gpr(ctx, a->rd); + tcg_gen_ld_tl(dest, tcg_env, offsetof(CPURISCVState, ssp)); + gen_set_gpr(ctx, a->rd, dest); + + return true; +} + +static bool trans_ssamoswap_w(DisasContext *ctx, arg_amoswap_w *a) +{ + REQUIRE_A_OR_ZAAMO(ctx); + if (!ctx->bcfi_enabled) { + return false; + } + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE); + + decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO); + src1 = get_address(ctx, a->rs1, 0); + + tcg_gen_atomic_xchg_tl(dest, src1, src2, SS_MMU_INDEX(ctx), + (MO_ALIGN | MO_TESL)); + gen_set_gpr(ctx, a->rd, dest); + return true; +} + +static bool trans_ssamoswap_d(DisasContext *ctx, arg_amoswap_w *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_A_OR_ZAAMO(ctx); + if (!ctx->bcfi_enabled) { + return false; + } + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE); + + decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO); + src1 = get_address(ctx, a->rs1, 0); + + tcg_gen_atomic_xchg_tl(dest, src1, src2, SS_MMU_INDEX(ctx), + (MO_ALIGN | MO_TESQ)); + gen_set_gpr(ctx, a->rd, dest); + return true; +} diff --git a/target/riscv/insn_trans/trans_svinval.c.inc b/target/riscv/insn_trans/trans_svinval.c.inc index 0f692a1088..a06c3b214f 100644 --- a/target/riscv/insn_trans/trans_svinval.c.inc +++ b/target/riscv/insn_trans/trans_svinval.c.inc @@ -28,7 +28,7 @@ static bool trans_sinval_vma(DisasContext *ctx, arg_sinval_vma *a) /* Do the same as sfence.vma currently */ REQUIRE_EXT(ctx, RVS); #ifndef CONFIG_USER_ONLY - decode_save_opc(ctx); + decode_save_opc(ctx, 0); gen_helper_tlb_flush(tcg_env); return true; #endif @@ -57,7 +57,7 @@ static bool trans_hinval_vvma(DisasContext *ctx, arg_hinval_vvma *a) /* Do the same as hfence.vvma currently */ REQUIRE_EXT(ctx, RVH); #ifndef CONFIG_USER_ONLY - decode_save_opc(ctx); + decode_save_opc(ctx, 0); gen_helper_hyp_tlb_flush(tcg_env); return true; #endif @@ -70,7 +70,7 @@ static bool trans_hinval_gvma(DisasContext *ctx, arg_hinval_gvma *a) /* Do the same as hfence.gvma currently */ REQUIRE_EXT(ctx, RVH); #ifndef CONFIG_USER_ONLY - decode_save_opc(ctx); + decode_save_opc(ctx, 0); gen_helper_hyp_gvma_tlb_flush(tcg_env); return true; #endif diff --git a/target/riscv/internals.h b/target/riscv/internals.h index 0ac17bc5ad..ddbdee885b 100644 --- a/target/riscv/internals.h +++ b/target/riscv/internals.h @@ -30,12 +30,15 @@ * - U+2STAGE 0b100 * - S+2STAGE 0b101 * - S+SUM+2STAGE 0b110 + * - Shadow stack+U 0b1000 + * - Shadow stack+S 0b1001 */ #define MMUIdx_U 0 #define MMUIdx_S 1 #define MMUIdx_S_SUM 2 #define MMUIdx_M 3 #define MMU_2STAGE_BIT (1 << 2) +#define MMU_IDX_SS_WRITE (1 << 3) static inline int mmuidx_priv(int mmu_idx) { diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c index 8233a32102..cbda4596da 100644 --- a/target/riscv/kvm/kvm-cpu.c +++ b/target/riscv/kvm/kvm-cpu.c @@ -1676,9 +1676,9 @@ void kvm_arch_accel_class_init(ObjectClass *oc) object_class_property_add_str(oc, "riscv-aia", riscv_get_kvm_aia, riscv_set_kvm_aia); object_class_property_set_description(oc, "riscv-aia", - "Set KVM AIA mode. Valid values are " - "emul, hwaccel, and auto. Default " - "is auto."); + "Set KVM AIA mode. Valid values are 'emul', 'hwaccel' and 'auto'. " + "Changing KVM AIA modes relies on host support. Defaults to 'auto' " + "if the host supports it"); object_property_set_default_str(object_class_property_find(oc, "riscv-aia"), "auto"); } @@ -1711,18 +1711,20 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift, error_report("KVM AIA: failed to get current KVM AIA mode"); exit(1); } - qemu_log("KVM AIA: default mode is %s\n", - kvm_aia_mode_str(default_aia_mode)); if (default_aia_mode != aia_mode) { ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG, KVM_DEV_RISCV_AIA_CONFIG_MODE, &aia_mode, true, NULL); - if (ret < 0) - warn_report("KVM AIA: failed to set KVM AIA mode"); - else - qemu_log("KVM AIA: set current mode to %s\n", - kvm_aia_mode_str(aia_mode)); + if (ret < 0) { + warn_report("KVM AIA: failed to set KVM AIA mode '%s', using " + "default host mode '%s'", + kvm_aia_mode_str(aia_mode), + kvm_aia_mode_str(default_aia_mode)); + + /* failed to change AIA mode, use default */ + aia_mode = default_aia_mode; + } } ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG, diff --git a/target/riscv/machine.c b/target/riscv/machine.c index 492c2c6d9d..99f0af5077 100644 --- a/target/riscv/machine.c +++ b/target/riscv/machine.c @@ -350,6 +350,42 @@ static const VMStateDescription vmstate_jvt = { } }; +static bool elp_needed(void *opaque) +{ + RISCVCPU *cpu = opaque; + + return cpu->cfg.ext_zicfilp; +} + +static const VMStateDescription vmstate_elp = { + .name = "cpu/elp", + .version_id = 1, + .minimum_version_id = 1, + .needed = elp_needed, + .fields = (const VMStateField[]) { + VMSTATE_BOOL(env.elp, RISCVCPU), + VMSTATE_END_OF_LIST() + } +}; + +static bool ssp_needed(void *opaque) +{ + RISCVCPU *cpu = opaque; + + return cpu->cfg.ext_zicfiss; +} + +static const VMStateDescription vmstate_ssp = { + .name = "cpu/ssp", + .version_id = 1, + .minimum_version_id = 1, + .needed = ssp_needed, + .fields = (const VMStateField[]) { + VMSTATE_UINTTL(env.ssp, RISCVCPU), + VMSTATE_END_OF_LIST() + } +}; + const VMStateDescription vmstate_riscv_cpu = { .name = "cpu", .version_id = 10, @@ -422,6 +458,8 @@ const VMStateDescription vmstate_riscv_cpu = { &vmstate_debug, &vmstate_smstateen, &vmstate_jvt, + &vmstate_elp, + &vmstate_ssp, NULL } }; diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c index 25a5263573..eddedacf4b 100644 --- a/target/riscv/op_helper.c +++ b/target/riscv/op_helper.c @@ -309,6 +309,15 @@ target_ulong helper_sret(CPURISCVState *env) riscv_cpu_set_mode(env, prev_priv, prev_virt); + /* + * If forward cfi enabled for new priv, restore elp status + * and clear spelp in mstatus + */ + if (cpu_get_fcfien(env)) { + env->elp = get_field(env->mstatus, MSTATUS_SPELP); + } + env->mstatus = set_field(env->mstatus, MSTATUS_SPELP, 0); + return retpc; } @@ -349,6 +358,14 @@ target_ulong helper_mret(CPURISCVState *env) } riscv_cpu_set_mode(env, prev_priv, prev_virt); + /* + * If forward cfi enabled for new priv, restore elp status + * and clear mpelp in mstatus + */ + if (cpu_get_fcfien(env)) { + env->elp = get_field(env->mstatus, MSTATUS_MPELP); + } + env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, 0); return retpc; } diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c index 9eea397e72..a1b36664fc 100644 --- a/target/riscv/pmp.c +++ b/target/riscv/pmp.c @@ -326,7 +326,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr, */ pmp_size = -(addr | TARGET_PAGE_MASK); } else { - pmp_size = sizeof(target_ulong); + pmp_size = 2 << riscv_cpu_mxl(env); } } else { pmp_size = size; @@ -598,6 +598,11 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val) val &= ~(MSECCFG_MMWP | MSECCFG_MML | MSECCFG_RLB); } + /* M-mode forward cfi to be enabled if cfi extension is implemented */ + if (env_archcpu(env)->cfg.ext_zicfilp) { + val |= (val & MSECCFG_MLPE); + } + env->mseccfg = val; } diff --git a/target/riscv/pmp.h b/target/riscv/pmp.h index f5c10ce85c..e0530a17a3 100644 --- a/target/riscv/pmp.h +++ b/target/riscv/pmp.h @@ -44,7 +44,8 @@ typedef enum { MSECCFG_MMWP = 1 << 1, MSECCFG_RLB = 1 << 2, MSECCFG_USEED = 1 << 8, - MSECCFG_SSEED = 1 << 9 + MSECCFG_SSEED = 1 << 9, + MSECCFG_MLPE = 1 << 10, } mseccfg_field_t; typedef struct { diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c index dea8ab7a43..c62c221696 100644 --- a/target/riscv/tcg/tcg-cpu.c +++ b/target/riscv/tcg/tcg-cpu.c @@ -129,6 +129,7 @@ static void riscv_restore_state_to_opc(CPUState *cs, env->pc = pc; } env->bins = data[1]; + env->excp_uw2 = data[2]; } static const TCGCPUOps riscv_tcg_ops = { @@ -618,11 +619,39 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) cpu->cfg.ext_zihpm = false; } + if (cpu->cfg.ext_zicfiss) { + if (!cpu->cfg.ext_zicsr) { + error_setg(errp, "zicfiss extension requires zicsr extension"); + return; + } + if (!riscv_has_ext(env, RVA)) { + error_setg(errp, "zicfiss extension requires A extension"); + return; + } + if (!riscv_has_ext(env, RVS)) { + error_setg(errp, "zicfiss extension requires S"); + return; + } + if (!cpu->cfg.ext_zimop) { + error_setg(errp, "zicfiss extension requires zimop extension"); + return; + } + if (cpu->cfg.ext_zca && !cpu->cfg.ext_zcmop) { + error_setg(errp, "zicfiss with zca requires zcmop extension"); + return; + } + } + if (!cpu->cfg.ext_zihpm) { cpu->cfg.pmu_mask = 0; cpu->pmu_avail_ctrs = 0; } + if (cpu->cfg.ext_zicfilp && !cpu->cfg.ext_zicsr) { + error_setg(errp, "zicfilp extension requires zicsr extension"); + return; + } + /* * Disable isa extensions based on priv spec after we * validated and set everything we need. diff --git a/target/riscv/translate.c b/target/riscv/translate.c index acba90f170..bccaf8e89a 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -116,6 +116,11 @@ typedef struct DisasContext { bool frm_valid; bool insn_start_updated; const GPtrArray *decoders; + /* zicfilp extension. fcfi_enabled, lp expected or not */ + bool fcfi_enabled; + bool fcfi_lp_expected; + /* zicfiss extension, if shadow stack was enabled during TB gen */ + bool bcfi_enabled; } DisasContext; static inline bool has_ext(DisasContext *ctx, uint32_t ext) @@ -139,6 +144,8 @@ static inline bool has_ext(DisasContext *ctx, uint32_t ext) #define get_address_xl(ctx) ((ctx)->address_xl) #endif +#define mxl_memop(ctx) ((get_xl(ctx) + 1) | MO_TE) + /* The word size for this machine mode. */ static inline int __attribute__((unused)) get_xlen(DisasContext *ctx) { @@ -204,11 +211,12 @@ static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in) tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan); } -static void decode_save_opc(DisasContext *ctx) +static void decode_save_opc(DisasContext *ctx, target_ulong excp_uw2) { assert(!ctx->insn_start_updated); ctx->insn_start_updated = true; tcg_set_insn_start_param(ctx->base.insn_start, 1, ctx->opcode); + tcg_set_insn_start_param(ctx->base.insn_start, 2, excp_uw2); } static void gen_pc_plus_diff(TCGv target, DisasContext *ctx, @@ -694,7 +702,7 @@ static void gen_set_rm(DisasContext *ctx, int rm) } /* The helper may raise ILLEGAL_INSN -- record binv for unwind. */ - decode_save_opc(ctx); + decode_save_opc(ctx, 0); gen_helper_set_rounding_mode(tcg_env, tcg_constant_i32(rm)); } @@ -707,7 +715,7 @@ static void gen_set_rm_chkfrm(DisasContext *ctx, int rm) ctx->frm_valid = true; /* The helper may raise ILLEGAL_INSN -- record binv for unwind. */ - decode_save_opc(ctx); + decode_save_opc(ctx, 0); gen_helper_set_rounding_mode_chkfrm(tcg_env, tcg_constant_i32(rm)); } @@ -1091,7 +1099,7 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a, mop |= MO_ALIGN; } - decode_save_opc(ctx); + decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO); src1 = get_address(ctx, a->rs1, 0); func(dest, src1, src2, ctx->mem_idx, mop); @@ -1105,7 +1113,7 @@ static bool gen_cmpxchg(DisasContext *ctx, arg_atomic *a, MemOp mop) TCGv src1 = get_address(ctx, a->rs1, 0); TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE); - decode_save_opc(ctx); + decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO); tcg_gen_atomic_cmpxchg_tl(dest, src1, dest, src2, ctx->mem_idx, mop); gen_set_gpr(ctx, a->rd, dest); @@ -1121,6 +1129,8 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc) return translator_ldl(env, &ctx->base, pc); } +#define SS_MMU_INDEX(ctx) (ctx->mem_idx | MMU_IDX_SS_WRITE) + /* Include insn module translation function */ #include "insn_trans/trans_rvi.c.inc" #include "insn_trans/trans_rvm.c.inc" @@ -1151,6 +1161,7 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc) #include "decode-insn16.c.inc" #include "insn_trans/trans_rvzce.c.inc" #include "insn_trans/trans_rvzcmop.c.inc" +#include "insn_trans/trans_rvzicfiss.c.inc" /* Include decoders for factored-out extensions */ #include "decode-XVentanaCondOps.c.inc" @@ -1238,6 +1249,9 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) ctx->pm_base_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_BASE_ENABLED); ctx->ztso = cpu->cfg.ext_ztso; ctx->itrigger = FIELD_EX32(tb_flags, TB_FLAGS, ITRIGGER); + ctx->bcfi_enabled = FIELD_EX32(tb_flags, TB_FLAGS, BCFI_ENABLED); + ctx->fcfi_lp_expected = FIELD_EX32(tb_flags, TB_FLAGS, FCFI_LP_EXPECTED); + ctx->fcfi_enabled = FIELD_EX32(tb_flags, TB_FLAGS, FCFI_ENABLED); ctx->zero = tcg_constant_tl(0); ctx->virt_inst_excp = false; ctx->decoders = cpu->decoders; @@ -1256,7 +1270,7 @@ static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) pc_next &= ~TARGET_PAGE_MASK; } - tcg_gen_insn_start(pc_next, 0); + tcg_gen_insn_start(pc_next, 0, 0); ctx->insn_start_updated = false; } @@ -1270,6 +1284,24 @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) decode_opc(env, ctx, opcode16); ctx->base.pc_next += ctx->cur_insn_len; + /* + * If 'fcfi_lp_expected' is still true after processing the instruction, + * then we did not see an 'lpad' instruction, and must raise an exception. + * Insert code to raise the exception at the start of the insn; any other + * code the insn may have emitted will be deleted as dead code following + * the noreturn exception + */ + if (ctx->fcfi_lp_expected) { + /* Emit after insn_start, i.e. before the op following insn_start. */ + tcg_ctx->emit_before_op = QTAILQ_NEXT(ctx->base.insn_start, link); + tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_FCFI_TVAL), + tcg_env, offsetof(CPURISCVState, sw_check_code)); + gen_helper_raise_exception(tcg_env, + tcg_constant_i32(RISCV_EXCP_SW_CHECK)); + tcg_ctx->emit_before_op = NULL; + ctx->base.is_jmp = DISAS_NORETURN; + } + /* Only the first insn within a TB is allowed to cross a page boundary. */ if (ctx->base.is_jmp == DISAS_NEXT) { if (ctx->itrigger || !is_same_page(&ctx->base, ctx->base.pc_next)) { diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c index 072bd444b1..ccb32e6122 100644 --- a/target/riscv/vector_helper.c +++ b/target/riscv/vector_helper.c @@ -5132,7 +5132,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ } \ env->vstart = 0; \ /* set tail elements to 1s */ \ - vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \ + vext_set_elems_1s(vd, vta, num * esz, total_elems * esz); \ } /* Compress into vd elements of vs2 where vs1 is enabled */ |