diff options
Diffstat (limited to 'target')
| -rw-r--r-- | target/arm/ptw.c | 39 | ||||
| -rw-r--r-- | target/arm/tcg/hflags.c | 34 | ||||
| -rw-r--r-- | target/arm/tcg/translate.c | 8 | ||||
| -rw-r--r-- | target/sparc/mmu_helper.c | 2 |
4 files changed, 73 insertions, 10 deletions
diff --git a/target/arm/ptw.c b/target/arm/ptw.c index ba1a27ca2b..31ae43f60e 100644 --- a/target/arm/ptw.c +++ b/target/arm/ptw.c @@ -471,6 +471,16 @@ static bool granule_protection_check(CPUARMState *env, uint64_t paddress, return false; } +static bool S1_attrs_are_device(uint8_t attrs) +{ + /* + * This slightly under-decodes the MAIR_ELx field: + * 0b0000dd01 is Device with FEAT_XS, otherwise UNPREDICTABLE; + * 0b0000dd1x is UNPREDICTABLE. + */ + return (attrs & 0xf0) == 0; +} + static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs) { /* @@ -1684,6 +1694,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, bool aarch64 = arm_el_is_aa64(env, el); uint64_t descriptor, new_descriptor; ARMSecuritySpace out_space; + bool device; /* TODO: This code does not support shareability levels. */ if (aarch64) { @@ -2106,6 +2117,12 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, if (regime_is_stage2(mmu_idx)) { result->cacheattrs.is_s2_format = true; result->cacheattrs.attrs = extract32(attrs, 2, 4); + /* + * Security state does not really affect HCR_EL2.FWB; + * we only need to filter FWB for aa32 or other FEAT. + */ + device = S2_attrs_are_device(arm_hcr_el2_eff(env), + result->cacheattrs.attrs); } else { /* Index into MAIR registers for cache attributes */ uint8_t attrindx = extract32(attrs, 2, 3); @@ -2118,6 +2135,28 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) { result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */ } + device = S1_attrs_are_device(result->cacheattrs.attrs); + } + + /* + * Enable alignment checks on Device memory. + * + * Per R_XCHFJ, this check is mis-ordered. The correct ordering + * for alignment, permission, and stage 2 faults should be: + * - Alignment fault caused by the memory type + * - Permission fault + * - A stage 2 fault on the memory access + * but due to the way the TCG softmmu TLB operates, we will have + * implicitly done the permission check and the stage2 lookup in + * finding the TLB entry, so the alignment check cannot be done sooner. + * + * In v7, for a CPU without the Virtualization Extensions this + * access is UNPREDICTABLE; we choose to make it take the alignment + * fault as is required for a v7VE CPU. (QEMU doesn't emulate any + * CPUs with ARM_FEATURE_LPAE but not ARM_FEATURE_V7VE anyway.) + */ + if (device) { + result->f.tlb_fill_flags |= TLB_CHECK_ALIGNED; } /* diff --git a/target/arm/tcg/hflags.c b/target/arm/tcg/hflags.c index 8e5d35d922..5da1b0fc1d 100644 --- a/target/arm/tcg/hflags.c +++ b/target/arm/tcg/hflags.c @@ -26,6 +26,35 @@ static inline bool fgt_svc(CPUARMState *env, int el) FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, SVC_EL1); } +/* Return true if memory alignment should be enforced. */ +static bool aprofile_require_alignment(CPUARMState *env, int el, uint64_t sctlr) +{ +#ifdef CONFIG_USER_ONLY + return false; +#else + /* Check the alignment enable bit. */ + if (sctlr & SCTLR_A) { + return true; + } + + /* + * If translation is disabled, then the default memory type is + * Device(-nGnRnE) instead of Normal, which requires that alignment + * be enforced. Since this affects all ram, it is most efficient + * to handle this during translation. + */ + if (sctlr & SCTLR_M) { + /* Translation enabled: memory type in PTE via MAIR_ELx. */ + return false; + } + if (el < 2 && (arm_hcr_el2_eff(env) & (HCR_DC | HCR_VM))) { + /* Stage 2 translation enabled: memory type in PTE. */ + return false; + } + return true; +#endif +} + static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el, ARMMMUIdx mmu_idx, CPUARMTBFlags flags) @@ -121,8 +150,9 @@ static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el, { CPUARMTBFlags flags = {}; int el = arm_current_el(env); + uint64_t sctlr = arm_sctlr(env, el); - if (arm_sctlr(env, el) & SCTLR_A) { + if (aprofile_require_alignment(env, el, sctlr)) { DP_TBFLAG_ANY(flags, ALIGN_MEM, 1); } @@ -223,7 +253,7 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, sctlr = regime_sctlr(env, stage1); - if (sctlr & SCTLR_A) { + if (aprofile_require_alignment(env, el, sctlr)) { DP_TBFLAG_ANY(flags, ALIGN_MEM, 1); } diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c index f947c62c6b..c8a2470675 100644 --- a/target/arm/tcg/translate.c +++ b/target/arm/tcg/translate.c @@ -900,13 +900,7 @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var) MemOp pow2_align(unsigned i) { static const MemOp mop_align[] = { - 0, MO_ALIGN_2, MO_ALIGN_4, MO_ALIGN_8, MO_ALIGN_16, - /* - * FIXME: TARGET_PAGE_BITS_MIN affects TLB_FLAGS_MASK such - * that 256-bit alignment (MO_ALIGN_32) cannot be supported: - * see get_alignment_bits(). Enforce only 128-bit alignment for now. - */ - MO_ALIGN_16 + 0, MO_ALIGN_2, MO_ALIGN_4, MO_ALIGN_8, MO_ALIGN_16, MO_ALIGN_32 }; g_assert(i < ARRAY_SIZE(mop_align)); return mop_align[i]; diff --git a/target/sparc/mmu_helper.c b/target/sparc/mmu_helper.c index 5170a668bb..e7b1997d54 100644 --- a/target/sparc/mmu_helper.c +++ b/target/sparc/mmu_helper.c @@ -580,7 +580,7 @@ static int get_physical_address_data(CPUSPARCState *env, CPUTLBEntryFull *full, int do_fault = 0; if (TTE_IS_IE(env->dtlb[i].tte)) { - full->attrs.byte_swap = true; + full->tlb_fill_flags |= TLB_BSWAP; } /* access ok? */ |