diff options
Diffstat (limited to 'target/arm/helper.c')
| -rw-r--r-- | target/arm/helper.c | 170 |
1 files changed, 161 insertions, 9 deletions
diff --git a/target/arm/helper.c b/target/arm/helper.c index 0bf8f53d4b..5074b5f69c 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -1910,6 +1910,17 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) raw_write(env, ri, value); } +static CPAccessResult access_aa64_tid2(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) { + return CP_ACCESS_TRAP_EL2; + } + + return CP_ACCESS_OK; +} + static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) { ARMCPU *cpu = env_archcpu(env); @@ -1962,6 +1973,26 @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) return ret; } +static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) { + return CP_ACCESS_TRAP_EL2; + } + + return CP_ACCESS_OK; +} + +static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_feature(env, ARM_FEATURE_V8)) { + return access_aa64_tid1(env, ri, isread); + } + + return CP_ACCESS_OK; +} + static const ARMCPRegInfo v7_cp_reginfo[] = { /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */ { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, @@ -2110,10 +2141,14 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { .writefn = pmintenclr_write }, { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0, - .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW }, + .access = PL1_R, + .accessfn = access_aa64_tid2, + .readfn = ccsidr_read, .type = ARM_CP_NO_RAW }, { .name = "CSSELR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0, - .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0, + .access = PL1_RW, + .accessfn = access_aa64_tid2, + .writefn = csselr_write, .resetvalue = 0, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s), offsetof(CPUARMState, cp15.csselr_ns) } }, /* Auxiliary ID register: this actually has an IMPDEF value but for now @@ -2121,7 +2156,9 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { */ { .name = "AIDR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7, - .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid1, + .resetvalue = 0 }, /* Auxiliary fault status registers: these also are IMPDEF, and we * choose to RAZ/WI for all cores. */ @@ -5096,7 +5133,7 @@ static const ARMCPRegInfo el3_cp_reginfo[] = { .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0, .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3), .resetvalue = 0, .writefn = scr_write }, - { .name = "SCR", .type = ARM_CP_ALIAS, + { .name = "SCR", .type = ARM_CP_ALIAS | ARM_CP_NEWEL, .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0, .access = PL1_RW, .accessfn = access_trap_aa32s_el1, .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3), @@ -5204,6 +5241,11 @@ static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) { return CP_ACCESS_TRAP; } + + if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) { + return CP_ACCESS_TRAP_EL2; + } + return CP_ACCESS_OK; } @@ -5932,6 +5974,52 @@ static const ARMCPRegInfo rndr_reginfo[] = { .access = PL0_R, .readfn = rndr_readfn }, REGINFO_SENTINEL }; + +#ifndef CONFIG_USER_ONLY +static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + /* CTR_EL0 System register -> DminLine, bits [19:16] */ + uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF); + uint64_t vaddr_in = (uint64_t) value; + uint64_t vaddr = vaddr_in & ~(dline_size - 1); + void *haddr; + int mem_idx = cpu_mmu_index(env, false); + + /* This won't be crossing page boundaries */ + haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC()); + if (haddr) { + + ram_addr_t offset; + MemoryRegion *mr; + + /* RCU lock is already being held */ + mr = memory_region_from_host(haddr, &offset); + + if (mr) { + memory_region_do_writeback(mr, offset, dline_size); + } + } +} + +static const ARMCPRegInfo dcpop_reg[] = { + { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1, + .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, + .accessfn = aa64_cacheop_access, .writefn = dccvap_writefn }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo dcpodp_reg[] = { + { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1, + .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, + .accessfn = aa64_cacheop_access, .writefn = dccvap_writefn }, + REGINFO_SENTINEL +}; +#endif /*CONFIG_USER_ONLY*/ + #endif static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri, @@ -5998,6 +6086,30 @@ static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri, return CP_ACCESS_OK; } +static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) { + return CP_ACCESS_TRAP_EL2; + } + + return CP_ACCESS_OK; +} + +static const ARMCPRegInfo jazelle_regs[] = { + { .name = "JIDR", + .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0, + .access = PL1_R, .accessfn = access_jazelle, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "JOSCR", + .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "JMCR", + .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + REGINFO_SENTINEL +}; + void register_cp_regs_for_features(ARMCPU *cpu) { /* Register all the coprocessor registers based on feature bits */ @@ -6184,7 +6296,9 @@ void register_cp_regs_for_features(ARMCPU *cpu) ARMCPRegInfo clidr = { .name = "CLIDR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1, - .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid2, + .resetvalue = cpu->clidr }; define_one_arm_cp_reg(cpu, &clidr); define_arm_cp_regs(cpu, v7_cp_reginfo); @@ -6655,6 +6769,9 @@ void register_cp_regs_for_features(ARMCPU *cpu) if (arm_feature(env, ARM_FEATURE_LPAE)) { define_arm_cp_regs(cpu, lpae_cp_reginfo); } + if (cpu_isar_feature(jazelle, cpu)) { + define_arm_cp_regs(cpu, jazelle_regs); + } /* Slightly awkwardly, the OMAP and StrongARM cores need all of * cp15 crn=0 to be writes-ignored, whereas for other cores they should * be read-only (ie write causes UNDEF exception). @@ -6710,14 +6827,17 @@ void register_cp_regs_for_features(ARMCPU *cpu) .access = PL1_R, .resetvalue = cpu->midr }, { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6, - .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, + .access = PL1_R, + .accessfn = access_aa64_tid1, + .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, REGINFO_SENTINEL }; ARMCPRegInfo id_cp_reginfo[] = { /* These are common to v8 and pre-v8 */ { .name = "CTR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1, - .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, + .access = PL1_R, .accessfn = ctr_el0_access, + .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0, .access = PL0_R, .accessfn = ctr_el0_access, @@ -6725,14 +6845,18 @@ void register_cp_regs_for_features(ARMCPU *cpu) /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */ { .name = "TCMTR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2, - .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + .access = PL1_R, + .accessfn = access_aa32_tid1, + .type = ARM_CP_CONST, .resetvalue = 0 }, REGINFO_SENTINEL }; /* TLBTR is specific to VMSA */ ARMCPRegInfo id_tlbtr_reginfo = { .name = "TLBTR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3, - .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0, + .access = PL1_R, + .accessfn = access_aa32_tid1, + .type = ARM_CP_CONST, .resetvalue = 0, }; /* MPUIR is specific to PMSA V6+ */ ARMCPRegInfo id_mpuir_reginfo = { @@ -6968,6 +7092,16 @@ void register_cp_regs_for_features(ARMCPU *cpu) if (cpu_isar_feature(aa64_rndr, cpu)) { define_arm_cp_regs(cpu, rndr_reginfo); } +#ifndef CONFIG_USER_ONLY + /* Data Cache clean instructions up to PoP */ + if (cpu_isar_feature(aa64_dcpop, cpu)) { + define_one_arm_cp_reg(cpu, dcpop_reg); + + if (cpu_isar_feature(aa64_dcpodp, cpu)) { + define_one_arm_cp_reg(cpu, dcpodp_reg); + } + } +#endif /*CONFIG_USER_ONLY*/ #endif /* @@ -11232,6 +11366,12 @@ static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el, if (arm_el_is_aa64(env, 1)) { flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1); } + + if (arm_current_el(env) < 2 && env->cp15.hstr_el2 && + (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { + flags = FIELD_DP32(flags, TBFLAG_A32, HSTR_ACTIVE, 1); + } + return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); } @@ -11332,6 +11472,18 @@ void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el) env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx); } +/* + * If we have triggered a EL state change we can't rely on the + * translator having passed it too us, we need to recompute. + */ +void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env) +{ + int el = arm_current_el(env); + int fp_el = fp_exception_el(env, el); + ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); + env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx); +} + void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el) { int fp_el = fp_exception_el(env, el); |