diff options
Diffstat (limited to 'target/arm/helper.c')
| -rw-r--r-- | target/arm/helper.c | 1216 |
1 files changed, 951 insertions, 265 deletions
diff --git a/target/arm/helper.c b/target/arm/helper.c index f00c141ef9..92666e5208 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -15,6 +15,7 @@ #include "arm_ldst.h" #include <zlib.h> /* For crc32 */ #include "exec/semihost.h" +#include "sysemu/cpus.h" #include "sysemu/kvm.h" #include "fpu/softfloat.h" #include "qemu/range.h" @@ -976,10 +977,29 @@ static const ARMCPRegInfo v6_cp_reginfo[] = { /* Definitions for the PMU registers */ #define PMCRN_MASK 0xf800 #define PMCRN_SHIFT 11 +#define PMCRDP 0x10 #define PMCRD 0x8 #define PMCRC 0x4 +#define PMCRP 0x2 #define PMCRE 0x1 +#define PMXEVTYPER_P 0x80000000 +#define PMXEVTYPER_U 0x40000000 +#define PMXEVTYPER_NSK 0x20000000 +#define PMXEVTYPER_NSU 0x10000000 +#define PMXEVTYPER_NSH 0x08000000 +#define PMXEVTYPER_M 0x04000000 +#define PMXEVTYPER_MT 0x02000000 +#define PMXEVTYPER_EVTCOUNT 0x0000ffff +#define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \ + PMXEVTYPER_NSU | PMXEVTYPER_NSH | \ + PMXEVTYPER_M | PMXEVTYPER_MT | \ + PMXEVTYPER_EVTCOUNT) + +#define PMCCFILTR 0xf8000000 +#define PMCCFILTR_M PMXEVTYPER_M +#define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M) + static inline uint32_t pmu_num_counters(CPUARMState *env) { return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT; @@ -991,6 +1011,128 @@ static inline uint64_t pmu_counter_mask(CPUARMState *env) return (1 << 31) | ((1 << pmu_num_counters(env)) - 1); } +typedef struct pm_event { + uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */ + /* If the event is supported on this CPU (used to generate PMCEID[01]) */ + bool (*supported)(CPUARMState *); + /* + * Retrieve the current count of the underlying event. The programmed + * counters hold a difference from the return value from this function + */ + uint64_t (*get_count)(CPUARMState *); +} pm_event; + +static bool event_always_supported(CPUARMState *env) +{ + return true; +} + +static uint64_t swinc_get_count(CPUARMState *env) +{ + /* + * SW_INCR events are written directly to the pmevcntr's by writes to + * PMSWINC, so there is no underlying count maintained by the PMU itself + */ + return 0; +} + +/* + * Return the underlying cycle count for the PMU cycle counters. If we're in + * usermode, simply return 0. + */ +static uint64_t cycles_get_count(CPUARMState *env) +{ +#ifndef CONFIG_USER_ONLY + return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); +#else + return cpu_get_host_ticks(); +#endif +} + +#ifndef CONFIG_USER_ONLY +static bool instructions_supported(CPUARMState *env) +{ + return use_icount == 1 /* Precise instruction counting */; +} + +static uint64_t instructions_get_count(CPUARMState *env) +{ + return (uint64_t)cpu_get_icount_raw(); +} +#endif + +static const pm_event pm_events[] = { + { .number = 0x000, /* SW_INCR */ + .supported = event_always_supported, + .get_count = swinc_get_count, + }, +#ifndef CONFIG_USER_ONLY + { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */ + .supported = instructions_supported, + .get_count = instructions_get_count, + }, + { .number = 0x011, /* CPU_CYCLES, Cycle */ + .supported = event_always_supported, + .get_count = cycles_get_count, + } +#endif +}; + +/* + * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of + * events (i.e. the statistical profiling extension), this implementation + * should first be updated to something sparse instead of the current + * supported_event_map[] array. + */ +#define MAX_EVENT_ID 0x11 +#define UNSUPPORTED_EVENT UINT16_MAX +static uint16_t supported_event_map[MAX_EVENT_ID + 1]; + +/* + * Called upon initialization to build PMCEID0_EL0 or PMCEID1_EL0 (indicated by + * 'which'). We also use it to build a map of ARM event numbers to indices in + * our pm_events array. + * + * Note: Events in the 0x40XX range are not currently supported. + */ +uint64_t get_pmceid(CPUARMState *env, unsigned which) +{ + uint64_t pmceid = 0; + unsigned int i; + + assert(which <= 1); + + for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) { + supported_event_map[i] = UNSUPPORTED_EVENT; + } + + for (i = 0; i < ARRAY_SIZE(pm_events); i++) { + const pm_event *cnt = &pm_events[i]; + assert(cnt->number <= MAX_EVENT_ID); + /* We do not currently support events in the 0x40xx range */ + assert(cnt->number <= 0x3f); + + if ((cnt->number & 0x20) == (which << 6) && + cnt->supported(env)) { + pmceid |= (1 << (cnt->number & 0x1f)); + supported_event_map[cnt->number] = i; + } + } + return pmceid; +} + +/* + * Check at runtime whether a PMU event is supported for the current machine + */ +static bool event_supported(uint16_t number) +{ + if (number > MAX_EVENT_ID) { + return false; + } + return supported_event_map[number] != UNSUPPORTED_EVENT; +} + static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { @@ -1044,8 +1186,6 @@ static CPAccessResult pmreg_access_swinc(CPUARMState *env, return pmreg_access(env, ri, isread); } -#ifndef CONFIG_USER_ONLY - static CPAccessResult pmreg_access_selr(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) @@ -1075,68 +1215,222 @@ static CPAccessResult pmreg_access_ccntr(CPUARMState *env, return pmreg_access(env, ri, isread); } -static inline bool arm_ccnt_enabled(CPUARMState *env) +/* Returns true if the counter (pass 31 for PMCCNTR) should count events using + * the current EL, security state, and register configuration. + */ +static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter) { - /* This does not support checking PMCCFILTR_EL0 register */ + uint64_t filter; + bool e, p, u, nsk, nsu, nsh, m; + bool enabled, prohibited, filtered; + bool secure = arm_is_secure(env); + int el = arm_current_el(env); + uint8_t hpmn = env->cp15.mdcr_el2 & MDCR_HPMN; - if (!(env->cp15.c9_pmcr & PMCRE) || !(env->cp15.c9_pmcnten & (1 << 31))) { - return false; + if (!arm_feature(env, ARM_FEATURE_EL2) || + (counter < hpmn || counter == 31)) { + e = env->cp15.c9_pmcr & PMCRE; + } else { + e = env->cp15.mdcr_el2 & MDCR_HPME; } + enabled = e && (env->cp15.c9_pmcnten & (1 << counter)); - return true; + if (!secure) { + if (el == 2 && (counter < hpmn || counter == 31)) { + prohibited = env->cp15.mdcr_el2 & MDCR_HPMD; + } else { + prohibited = false; + } + } else { + prohibited = arm_feature(env, ARM_FEATURE_EL3) && + (env->cp15.mdcr_el3 & MDCR_SPME); + } + + if (prohibited && counter == 31) { + prohibited = env->cp15.c9_pmcr & PMCRDP; + } + + if (counter == 31) { + filter = env->cp15.pmccfiltr_el0; + } else { + filter = env->cp15.c14_pmevtyper[counter]; + } + + p = filter & PMXEVTYPER_P; + u = filter & PMXEVTYPER_U; + nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK); + nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU); + nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH); + m = arm_el_is_aa64(env, 1) && + arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M); + + if (el == 0) { + filtered = secure ? u : u != nsu; + } else if (el == 1) { + filtered = secure ? p : p != nsk; + } else if (el == 2) { + filtered = !nsh; + } else { /* EL3 */ + filtered = m != p; + } + + if (counter != 31) { + /* + * If not checking PMCCNTR, ensure the counter is setup to an event we + * support + */ + uint16_t event = filter & PMXEVTYPER_EVTCOUNT; + if (!event_supported(event)) { + return false; + } + } + + return enabled && !prohibited && !filtered; +} + +/* + * Ensure c15_ccnt is the guest-visible count so that operations such as + * enabling/disabling the counter or filtering, modifying the count itself, + * etc. can be done logically. This is essentially a no-op if the counter is + * not enabled at the time of the call. + */ +void pmccntr_op_start(CPUARMState *env) +{ + uint64_t cycles = cycles_get_count(env); + + if (pmu_counter_enabled(env, 31)) { + uint64_t eff_cycles = cycles; + if (env->cp15.c9_pmcr & PMCRD) { + /* Increment once every 64 processor clock cycles */ + eff_cycles /= 64; + } + + env->cp15.c15_ccnt = eff_cycles - env->cp15.c15_ccnt_delta; + } + env->cp15.c15_ccnt_delta = cycles; } -void pmccntr_sync(CPUARMState *env) +/* + * If PMCCNTR is enabled, recalculate the delta between the clock and the + * guest-visible count. A call to pmccntr_op_finish should follow every call to + * pmccntr_op_start. + */ +void pmccntr_op_finish(CPUARMState *env) { - uint64_t temp_ticks; + if (pmu_counter_enabled(env, 31)) { + uint64_t prev_cycles = env->cp15.c15_ccnt_delta; + + if (env->cp15.c9_pmcr & PMCRD) { + /* Increment once every 64 processor clock cycles */ + prev_cycles /= 64; + } + + env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt; + } +} + +static void pmevcntr_op_start(CPUARMState *env, uint8_t counter) +{ + + uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; + uint64_t count = 0; + if (event_supported(event)) { + uint16_t event_idx = supported_event_map[event]; + count = pm_events[event_idx].get_count(env); + } + + if (pmu_counter_enabled(env, counter)) { + env->cp15.c14_pmevcntr[counter] = + count - env->cp15.c14_pmevcntr_delta[counter]; + } + env->cp15.c14_pmevcntr_delta[counter] = count; +} - temp_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), - ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); +static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter) +{ + if (pmu_counter_enabled(env, counter)) { + env->cp15.c14_pmevcntr_delta[counter] -= + env->cp15.c14_pmevcntr[counter]; + } +} - if (env->cp15.c9_pmcr & PMCRD) { - /* Increment once every 64 processor clock cycles */ - temp_ticks /= 64; +void pmu_op_start(CPUARMState *env) +{ + unsigned int i; + pmccntr_op_start(env); + for (i = 0; i < pmu_num_counters(env); i++) { + pmevcntr_op_start(env, i); } +} - if (arm_ccnt_enabled(env)) { - env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt; +void pmu_op_finish(CPUARMState *env) +{ + unsigned int i; + pmccntr_op_finish(env); + for (i = 0; i < pmu_num_counters(env); i++) { + pmevcntr_op_finish(env, i); } } +void pmu_pre_el_change(ARMCPU *cpu, void *ignored) +{ + pmu_op_start(&cpu->env); +} + +void pmu_post_el_change(ARMCPU *cpu, void *ignored) +{ + pmu_op_finish(&cpu->env); +} + static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - pmccntr_sync(env); + pmu_op_start(env); if (value & PMCRC) { /* The counter has been reset */ env->cp15.c15_ccnt = 0; } + if (value & PMCRP) { + unsigned int i; + for (i = 0; i < pmu_num_counters(env); i++) { + env->cp15.c14_pmevcntr[i] = 0; + } + } + /* only the DP, X, D and E bits are writable */ env->cp15.c9_pmcr &= ~0x39; env->cp15.c9_pmcr |= (value & 0x39); - pmccntr_sync(env); + pmu_op_finish(env); } -static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri) +static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - uint64_t total_ticks; - - if (!arm_ccnt_enabled(env)) { - /* Counter is disabled, do not change value */ - return env->cp15.c15_ccnt; + unsigned int i; + for (i = 0; i < pmu_num_counters(env); i++) { + /* Increment a counter's count iff: */ + if ((value & (1 << i)) && /* counter's bit is set */ + /* counter is enabled and not filtered */ + pmu_counter_enabled(env, i) && + /* counter is SW_INCR */ + (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) { + pmevcntr_op_start(env, i); + env->cp15.c14_pmevcntr[i]++; + pmevcntr_op_finish(env, i); + } } +} - total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), - ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); - - if (env->cp15.c9_pmcr & PMCRD) { - /* Increment once every 64 processor clock cycles */ - total_ticks /= 64; - } - return total_ticks - env->cp15.c15_ccnt; +static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + uint64_t ret; + pmccntr_op_start(env); + ret = env->cp15.c15_ccnt; + pmccntr_op_finish(env); + return ret; } static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -1153,22 +1447,9 @@ static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri, static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - uint64_t total_ticks; - - if (!arm_ccnt_enabled(env)) { - /* Counter is disabled, set the absolute value */ - env->cp15.c15_ccnt = value; - return; - } - - total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), - ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); - - if (env->cp15.c9_pmcr & PMCRD) { - /* Increment once every 64 processor clock cycles */ - total_ticks /= 64; - } - env->cp15.c15_ccnt = total_ticks - value; + pmccntr_op_start(env); + env->cp15.c15_ccnt = value; + pmccntr_op_finish(env); } static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri, @@ -1179,20 +1460,28 @@ static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri, pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value)); } -#else /* CONFIG_USER_ONLY */ - -void pmccntr_sync(CPUARMState *env) +static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { + pmccntr_op_start(env); + env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0; + pmccntr_op_finish(env); } -#endif - -static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri, +static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - pmccntr_sync(env); - env->cp15.pmccfiltr_el0 = value & 0xfc000000; - pmccntr_sync(env); + pmccntr_op_start(env); + /* M is not accessible from AArch32 */ + env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) | + (value & PMCCFILTR); + pmccntr_op_finish(env); +} + +static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri) +{ + /* M is not visible in AArch32 */ + return env->cp15.pmccfiltr_el0 & PMCCFILTR; } static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -1216,30 +1505,181 @@ static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri, env->cp15.c9_pmovsr &= ~value; } -static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { + value &= pmu_counter_mask(env); + env->cp15.c9_pmovsr |= value; +} + +static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value, const uint8_t counter) +{ + if (counter == 31) { + pmccfiltr_write(env, ri, value); + } else if (counter < pmu_num_counters(env)) { + pmevcntr_op_start(env, counter); + + /* + * If this counter's event type is changing, store the current + * underlying count for the new type in c14_pmevcntr_delta[counter] so + * pmevcntr_op_finish has the correct baseline when it converts back to + * a delta. + */ + uint16_t old_event = env->cp15.c14_pmevtyper[counter] & + PMXEVTYPER_EVTCOUNT; + uint16_t new_event = value & PMXEVTYPER_EVTCOUNT; + if (old_event != new_event) { + uint64_t count = 0; + if (event_supported(new_event)) { + uint16_t event_idx = supported_event_map[new_event]; + count = pm_events[event_idx].get_count(env); + } + env->cp15.c14_pmevcntr_delta[counter] = count; + } + + env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK; + pmevcntr_op_finish(env, counter); + } /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when * PMSELR value is equal to or greater than the number of implemented * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI. */ - if (env->cp15.c9_pmselr == 0x1f) { - pmccfiltr_write(env, ri, value); +} + +static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri, + const uint8_t counter) +{ + if (counter == 31) { + return env->cp15.pmccfiltr_el0; + } else if (counter < pmu_num_counters(env)) { + return env->cp15.c14_pmevtyper[counter]; + } else { + /* + * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER + * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write(). + */ + return 0; } } +static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); + pmevtyper_write(env, ri, value, counter); +} + +static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); + env->cp15.c14_pmevtyper[counter] = value; + + /* + * pmevtyper_rawwrite is called between a pair of pmu_op_start and + * pmu_op_finish calls when loading saved state for a migration. Because + * we're potentially updating the type of event here, the value written to + * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a + * different counter type. Therefore, we need to set this value to the + * current count for the counter type we're writing so that pmu_op_finish + * has the correct count for its calculation. + */ + uint16_t event = value & PMXEVTYPER_EVTCOUNT; + if (event_supported(event)) { + uint16_t event_idx = supported_event_map[event]; + env->cp15.c14_pmevcntr_delta[counter] = + pm_events[event_idx].get_count(env); + } +} + +static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri) +{ + uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); + return pmevtyper_read(env, ri, counter); +} + +static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31); +} + static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri) { - /* We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER - * are CONSTRAINED UNPREDICTABLE. See comments in pmxevtyper_write(). + return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31); +} + +static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value, uint8_t counter) +{ + if (counter < pmu_num_counters(env)) { + pmevcntr_op_start(env, counter); + env->cp15.c14_pmevcntr[counter] = value; + pmevcntr_op_finish(env, counter); + } + /* + * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR + * are CONSTRAINED UNPREDICTABLE. */ - if (env->cp15.c9_pmselr == 0x1f) { - return env->cp15.pmccfiltr_el0; +} + +static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri, + uint8_t counter) +{ + if (counter < pmu_num_counters(env)) { + uint64_t ret; + pmevcntr_op_start(env, counter); + ret = env->cp15.c14_pmevcntr[counter]; + pmevcntr_op_finish(env, counter); + return ret; } else { + /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR + * are CONSTRAINED UNPREDICTABLE. */ return 0; } } +static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); + pmevcntr_write(env, ri, value, counter); +} + +static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) +{ + uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); + return pmevcntr_read(env, ri, counter); +} + +static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); + assert(counter < pmu_num_counters(env)); + env->cp15.c14_pmevcntr[counter] = value; + pmevcntr_write(env, ri, value, counter); +} + +static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri) +{ + uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); + assert(counter < pmu_num_counters(env)); + return env->cp15.c14_pmevcntr[counter]; +} + +static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31); +} + +static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31); +} + static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { @@ -1368,7 +1808,7 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { .access = PL1_W, .type = ARM_CP_NOP }, /* Performance monitors are implementation defined in v7, * but with an ARM recommended set of registers, which we - * follow (although we don't actually implement any counters) + * follow. * * Performance registers fall into three categories: * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR) @@ -1413,10 +1853,13 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), .writefn = pmovsr_write, .raw_writefn = raw_write }, - /* Unimplemented so WI. */ { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4, - .access = PL0_W, .accessfn = pmreg_access_swinc, .type = ARM_CP_NOP }, -#ifndef CONFIG_USER_ONLY + .access = PL0_W, .accessfn = pmreg_access_swinc, .type = ARM_CP_NO_RAW, + .writefn = pmswinc_write }, + { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4, + .access = PL0_W, .accessfn = pmreg_access_swinc, .type = ARM_CP_NO_RAW, + .writefn = pmswinc_write }, { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5, .access = PL0_RW, .type = ARM_CP_ALIAS, .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr), @@ -1435,26 +1878,39 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0, .access = PL0_RW, .accessfn = pmreg_access_ccntr, .type = ARM_CP_IO, - .readfn = pmccntr_read, .writefn = pmccntr_write, }, -#endif + .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt), + .readfn = pmccntr_read, .writefn = pmccntr_write, + .raw_readfn = raw_read, .raw_writefn = raw_write, }, + { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7, + .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32, + .access = PL0_RW, .accessfn = pmreg_access, + .type = ARM_CP_ALIAS | ARM_CP_IO, + .resetvalue = 0, }, { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7, - .writefn = pmccfiltr_write, + .writefn = pmccfiltr_write, .raw_writefn = raw_write, .access = PL0_RW, .accessfn = pmreg_access, .type = ARM_CP_IO, .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0), .resetvalue = 0, }, { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1, - .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access, + .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, + .accessfn = pmreg_access, .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1, - .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access, + .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, + .accessfn = pmreg_access, .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, - /* Unimplemented, RAZ/WI. */ { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2, - .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0, - .accessfn = pmreg_access_xevcntr }, + .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, + .accessfn = pmreg_access_xevcntr, + .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, + { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2, + .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, + .accessfn = pmreg_access_xevcntr, + .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0, .access = PL0_R | PL1_RW, .accessfn = access_tpm, .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr), @@ -1585,6 +2041,24 @@ static const ARMCPRegInfo v7mp_cp_reginfo[] = { REGINFO_SENTINEL }; +static const ARMCPRegInfo pmovsset_cp_reginfo[] = { + /* PMOVSSET is not implemented in v7 before v7ve */ + { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3, + .access = PL0_RW, .accessfn = pmreg_access, + .type = ARM_CP_ALIAS, + .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), + .writefn = pmovsset_write, + .raw_writefn = raw_write }, + { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3, + .access = PL0_RW, .accessfn = pmreg_access, + .type = ARM_CP_ALIAS, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), + .writefn = pmovsset_write, + .raw_writefn = raw_write }, + REGINFO_SENTINEL +}; + static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { @@ -4284,7 +4758,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { #endif /* The only field of MDCR_EL2 that has a defined architectural reset value * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we - * don't impelment any PMU event counters, so using zero as a reset + * don't implement any PMU event counters, so using zero as a reset * value for MDCR_EL2 is okay */ { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, @@ -5061,6 +5535,70 @@ static CPAccessResult access_lor_other(CPUARMState *env, return access_lor_ns(env); } +#ifdef TARGET_AARCH64 +static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + int el = arm_current_el(env); + + if (el < 2 && + arm_feature(env, ARM_FEATURE_EL2) && + !(arm_hcr_el2_eff(env) & HCR_APK)) { + return CP_ACCESS_TRAP_EL2; + } + if (el < 3 && + arm_feature(env, ARM_FEATURE_EL3) && + !(env->cp15.scr_el3 & SCR_APK)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + +static const ARMCPRegInfo pauth_reginfo[] = { + { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0, + .access = PL1_RW, .accessfn = access_pauth, + .fieldoffset = offsetof(CPUARMState, apda_key.lo) }, + { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1, + .access = PL1_RW, .accessfn = access_pauth, + .fieldoffset = offsetof(CPUARMState, apda_key.hi) }, + { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2, + .access = PL1_RW, .accessfn = access_pauth, + .fieldoffset = offsetof(CPUARMState, apdb_key.lo) }, + { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3, + .access = PL1_RW, .accessfn = access_pauth, + .fieldoffset = offsetof(CPUARMState, apdb_key.hi) }, + { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0, + .access = PL1_RW, .accessfn = access_pauth, + .fieldoffset = offsetof(CPUARMState, apga_key.lo) }, + { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1, + .access = PL1_RW, .accessfn = access_pauth, + .fieldoffset = offsetof(CPUARMState, apga_key.hi) }, + { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0, + .access = PL1_RW, .accessfn = access_pauth, + .fieldoffset = offsetof(CPUARMState, apia_key.lo) }, + { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1, + .access = PL1_RW, .accessfn = access_pauth, + .fieldoffset = offsetof(CPUARMState, apia_key.hi) }, + { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2, + .access = PL1_RW, .accessfn = access_pauth, + .fieldoffset = offsetof(CPUARMState, apib_key.lo) }, + { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3, + .access = PL1_RW, .accessfn = access_pauth, + .fieldoffset = offsetof(CPUARMState, apib_key.hi) }, + REGINFO_SENTINEL +}; +#endif + void register_cp_regs_for_features(ARMCPU *cpu) { /* Register all the coprocessor registers based on feature bits */ @@ -5163,12 +5701,15 @@ void register_cp_regs_for_features(ARMCPU *cpu) !arm_feature(env, ARM_FEATURE_PMSA)) { define_arm_cp_regs(cpu, v7mp_cp_reginfo); } + if (arm_feature(env, ARM_FEATURE_V7VE)) { + define_arm_cp_regs(cpu, pmovsset_cp_reginfo); + } if (arm_feature(env, ARM_FEATURE_V7)) { /* v7 performance monitor control register: same implementor - * field as main ID register, and we implement only the cycle - * count register. + * field as main ID register, and we implement four counters in + * addition to the cycle count register. */ -#ifndef CONFIG_USER_ONLY + unsigned int i, pmcrn = 4; ARMCPRegInfo pmcr = { .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, .access = PL0_RW, @@ -5183,12 +5724,48 @@ void register_cp_regs_for_features(ARMCPU *cpu) .access = PL0_RW, .accessfn = pmreg_access, .type = ARM_CP_IO, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), - .resetvalue = cpu->midr & 0xff000000, + .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT), .writefn = pmcr_write, .raw_writefn = raw_write, }; define_one_arm_cp_reg(cpu, &pmcr); define_one_arm_cp_reg(cpu, &pmcr64); -#endif + for (i = 0; i < pmcrn; i++) { + char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i); + char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i); + char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i); + char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i); + ARMCPRegInfo pmev_regs[] = { + { .name = pmevcntr_name, .cp = 15, .crn = 15, + .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, + .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, + .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, + .accessfn = pmreg_access }, + { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 15, .crm = 8 | (3 & (i >> 3)), + .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, + .type = ARM_CP_IO, + .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, + .raw_readfn = pmevcntr_rawread, + .raw_writefn = pmevcntr_rawwrite }, + { .name = pmevtyper_name, .cp = 15, .crn = 15, + .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, + .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, + .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, + .accessfn = pmreg_access }, + { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 15, .crm = 12 | (3 & (i >> 3)), + .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, + .type = ARM_CP_IO, + .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, + .raw_writefn = pmevtyper_rawwrite }, + REGINFO_SENTINEL + }; + define_arm_cp_regs(cpu, pmev_regs); + g_free(pmevcntr_name); + g_free(pmevcntr_el0_name); + g_free(pmevtyper_name); + g_free(pmevtyper_el0_name); + } ARMCPRegInfo clidr = { .name = "CLIDR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1, @@ -5200,6 +5777,21 @@ void register_cp_regs_for_features(ARMCPU *cpu) } else { define_arm_cp_regs(cpu, not_v7_cp_reginfo); } + if (FIELD_EX32(cpu->id_dfr0, ID_DFR0, PERFMON) >= 4 && + FIELD_EX32(cpu->id_dfr0, ID_DFR0, PERFMON) != 0xf) { + ARMCPRegInfo v81_pmu_regs[] = { + { .name = "PMCEID2", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4, + .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, + .resetvalue = extract64(cpu->pmceid0, 32, 32) }, + { .name = "PMCEID3", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5, + .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, + .resetvalue = extract64(cpu->pmceid1, 32, 32) }, + REGINFO_SENTINEL + }; + define_arm_cp_regs(cpu, v81_pmu_regs); + } if (arm_feature(env, ARM_FEATURE_V8)) { /* AArch64 ID registers, which all have impdef reset values. * Note that within the ID register ranges the unused slots @@ -5376,7 +5968,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) { .name = "PMCEID0", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6, .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, - .resetvalue = cpu->pmceid0 }, + .resetvalue = extract64(cpu->pmceid0, 0, 32) }, { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6, .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, @@ -5384,7 +5976,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) { .name = "PMCEID1", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7, .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, - .resetvalue = cpu->pmceid1 }, + .resetvalue = extract64(cpu->pmceid1, 0, 32) }, { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7, .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, @@ -5845,6 +6437,12 @@ void register_cp_regs_for_features(ARMCPU *cpu) define_one_arm_cp_reg(cpu, &zcr_el3_reginfo); } } + +#ifdef TARGET_AARCH64 + if (cpu_isar_feature(aa64_pauth, cpu)) { + define_arm_cp_regs(cpu, pauth_reginfo); + } +#endif } void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) @@ -6297,7 +6895,7 @@ static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type) return 0; case ARM_CPU_MODE_HYP: return !arm_feature(env, ARM_FEATURE_EL2) - || arm_current_el(env) < 2 || arm_is_secure(env); + || arm_current_el(env) < 2 || arm_is_secure_below_el3(env); case ARM_CPU_MODE_MON: return arm_current_el(env) < 3; default: @@ -7117,7 +7715,7 @@ static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain, limit = env->v7m.msplim[M_REG_S]; } } else { - mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); + mmu_idx = arm_mmu_idx(env); frame_sp_p = &env->regs[13]; limit = v7m_sp_limit(env); } @@ -7298,7 +7896,7 @@ static bool v7m_push_stack(ARMCPU *cpu) CPUARMState *env = &cpu->env; uint32_t xpsr = xpsr_read(env); uint32_t frameptr = env->regs[13]; - ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); + ARMMMUIdx mmu_idx = arm_mmu_idx(env); /* Align stack pointer if the guest wants that */ if ((frameptr & 4) && @@ -8957,48 +9555,6 @@ static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) return mmu_idx; } -/* Returns TBI0 value for current regime el */ -uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx) -{ - TCR *tcr; - uint32_t el; - - /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert - * a stage 1+2 mmu index into the appropriate stage 1 mmu index. - */ - mmu_idx = stage_1_mmu_idx(mmu_idx); - - tcr = regime_tcr(env, mmu_idx); - el = regime_el(env, mmu_idx); - - if (el > 1) { - return extract64(tcr->raw_tcr, 20, 1); - } else { - return extract64(tcr->raw_tcr, 37, 1); - } -} - -/* Returns TBI1 value for current regime el */ -uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx) -{ - TCR *tcr; - uint32_t el; - - /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert - * a stage 1+2 mmu index into the appropriate stage 1 mmu index. - */ - mmu_idx = stage_1_mmu_idx(mmu_idx); - - tcr = regime_tcr(env, mmu_idx); - el = regime_el(env, mmu_idx); - - if (el > 1) { - return 0; - } else { - return extract64(tcr->raw_tcr, 38, 1); - } -} - /* Return the TTBR associated with this translation regime */ static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn) @@ -9744,6 +10300,138 @@ static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs) return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint; } +ARMVAParameters aa64_va_parameters_both(CPUARMState *env, uint64_t va, + ARMMMUIdx mmu_idx) +{ + uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; + uint32_t el = regime_el(env, mmu_idx); + bool tbi, tbid, epd, hpd, using16k, using64k; + int select, tsz; + + /* + * Bit 55 is always between the two regions, and is canonical for + * determining if address tagging is enabled. + */ + select = extract64(va, 55, 1); + + if (el > 1) { + tsz = extract32(tcr, 0, 6); + using64k = extract32(tcr, 14, 1); + using16k = extract32(tcr, 15, 1); + if (mmu_idx == ARMMMUIdx_S2NS) { + /* VTCR_EL2 */ + tbi = tbid = hpd = false; + } else { + tbi = extract32(tcr, 20, 1); + hpd = extract32(tcr, 24, 1); + tbid = extract32(tcr, 29, 1); + } + epd = false; + } else if (!select) { + tsz = extract32(tcr, 0, 6); + epd = extract32(tcr, 7, 1); + using64k = extract32(tcr, 14, 1); + using16k = extract32(tcr, 15, 1); + tbi = extract64(tcr, 37, 1); + hpd = extract64(tcr, 41, 1); + tbid = extract64(tcr, 51, 1); + } else { + int tg = extract32(tcr, 30, 2); + using16k = tg == 1; + using64k = tg == 3; + tsz = extract32(tcr, 16, 6); + epd = extract32(tcr, 23, 1); + tbi = extract64(tcr, 38, 1); + hpd = extract64(tcr, 42, 1); + tbid = extract64(tcr, 52, 1); + } + tsz = MIN(tsz, 39); /* TODO: ARMv8.4-TTST */ + tsz = MAX(tsz, 16); /* TODO: ARMv8.2-LVA */ + + return (ARMVAParameters) { + .tsz = tsz, + .select = select, + .tbi = tbi, + .tbid = tbid, + .epd = epd, + .hpd = hpd, + .using16k = using16k, + .using64k = using64k, + }; +} + +ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, + ARMMMUIdx mmu_idx, bool data) +{ + ARMVAParameters ret = aa64_va_parameters_both(env, va, mmu_idx); + + /* Present TBI as a composite with TBID. */ + ret.tbi &= (data || !ret.tbid); + return ret; +} + +static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va, + ARMMMUIdx mmu_idx) +{ + uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; + uint32_t el = regime_el(env, mmu_idx); + int select, tsz; + bool epd, hpd; + + if (mmu_idx == ARMMMUIdx_S2NS) { + /* VTCR */ + bool sext = extract32(tcr, 4, 1); + bool sign = extract32(tcr, 3, 1); + + /* + * If the sign-extend bit is not the same as t0sz[3], the result + * is unpredictable. Flag this as a guest error. + */ + if (sign != sext) { + qemu_log_mask(LOG_GUEST_ERROR, + "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n"); + } + tsz = sextract32(tcr, 0, 4) + 8; + select = 0; + hpd = false; + epd = false; + } else if (el == 2) { + /* HTCR */ + tsz = extract32(tcr, 0, 3); + select = 0; + hpd = extract64(tcr, 24, 1); + epd = false; + } else { + int t0sz = extract32(tcr, 0, 3); + int t1sz = extract32(tcr, 16, 3); + + if (t1sz == 0) { + select = va > (0xffffffffu >> t0sz); + } else { + /* Note that we will detect errors later. */ + select = va >= ~(0xffffffffu >> t1sz); + } + if (!select) { + tsz = t0sz; + epd = extract32(tcr, 7, 1); + hpd = extract64(tcr, 41, 1); + } else { + tsz = t1sz; + epd = extract32(tcr, 23, 1); + hpd = extract64(tcr, 42, 1); + } + /* For aarch32, hpd0 is not enabled without t2e as well. */ + hpd &= extract32(tcr, 6, 1); + } + + return (ARMVAParameters) { + .tsz = tsz, + .select = select, + .epd = epd, + .hpd = hpd, + }; +} + static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, MMUAccessType access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, @@ -9755,26 +10443,20 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, /* Read an LPAE long-descriptor translation table. */ ARMFaultType fault_type = ARMFault_Translation; uint32_t level; - uint32_t epd = 0; - int32_t t0sz, t1sz; - uint32_t tg; + ARMVAParameters param; uint64_t ttbr; - int ttbr_select; hwaddr descaddr, indexmask, indexmask_grainsize; uint32_t tableattrs; - target_ulong page_size; + target_ulong page_size, top_bits; uint32_t attrs; - int32_t stride = 9; - int32_t addrsize; - int inputsize; - int32_t tbi = 0; + int32_t stride; + int addrsize, inputsize; TCR *tcr = regime_tcr(env, mmu_idx); int ap, ns, xn, pxn; uint32_t el = regime_el(env, mmu_idx); - bool ttbr1_valid = true; + bool ttbr1_valid; uint64_t descaddrmask; bool aarch64 = arm_el_is_aa64(env, el); - bool hpd = false; /* TODO: * This code does not handle the different format TCR for VTCR_EL2. @@ -9783,91 +10465,44 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, * support for those page table walks. */ if (aarch64) { + param = aa64_va_parameters(env, address, mmu_idx, + access_type != MMU_INST_FETCH); level = 0; - addrsize = 64; - if (el > 1) { - if (mmu_idx != ARMMMUIdx_S2NS) { - tbi = extract64(tcr->raw_tcr, 20, 1); - } - } else { - if (extract64(address, 55, 1)) { - tbi = extract64(tcr->raw_tcr, 38, 1); - } else { - tbi = extract64(tcr->raw_tcr, 37, 1); - } - } - tbi *= 8; - /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it * invalid. */ - if (el > 1) { - ttbr1_valid = false; - } + ttbr1_valid = (el < 2); + addrsize = 64 - 8 * param.tbi; + inputsize = 64 - param.tsz; } else { + param = aa32_va_parameters(env, address, mmu_idx); level = 1; - addrsize = 32; /* There is no TTBR1 for EL2 */ - if (el == 2) { - ttbr1_valid = false; - } + ttbr1_valid = (el != 2); + addrsize = (mmu_idx == ARMMMUIdx_S2NS ? 40 : 32); + inputsize = addrsize - param.tsz; } - /* Determine whether this address is in the region controlled by - * TTBR0 or TTBR1 (or if it is in neither region and should fault). - * This is a Non-secure PL0/1 stage 1 translation, so controlled by - * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32: + /* + * We determined the region when collecting the parameters, but we + * have not yet validated that the address is valid for the region. + * Extract the top bits and verify that they all match select. */ - if (aarch64) { - /* AArch64 translation. */ - t0sz = extract32(tcr->raw_tcr, 0, 6); - t0sz = MIN(t0sz, 39); - t0sz = MAX(t0sz, 16); - } else if (mmu_idx != ARMMMUIdx_S2NS) { - /* AArch32 stage 1 translation. */ - t0sz = extract32(tcr->raw_tcr, 0, 3); - } else { - /* AArch32 stage 2 translation. */ - bool sext = extract32(tcr->raw_tcr, 4, 1); - bool sign = extract32(tcr->raw_tcr, 3, 1); - /* Address size is 40-bit for a stage 2 translation, - * and t0sz can be negative (from -8 to 7), - * so we need to adjust it to use the TTBR selecting logic below. - */ - addrsize = 40; - t0sz = sextract32(tcr->raw_tcr, 0, 4) + 8; - - /* If the sign-extend bit is not the same as t0sz[3], the result - * is unpredictable. Flag this as a guest error. */ - if (sign != sext) { - qemu_log_mask(LOG_GUEST_ERROR, - "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n"); - } - } - t1sz = extract32(tcr->raw_tcr, 16, 6); - if (aarch64) { - t1sz = MIN(t1sz, 39); - t1sz = MAX(t1sz, 16); - } - if (t0sz && !extract64(address, addrsize - t0sz, t0sz - tbi)) { - /* there is a ttbr0 region and we are in it (high bits all zero) */ - ttbr_select = 0; - } else if (ttbr1_valid && t1sz && - !extract64(~address, addrsize - t1sz, t1sz - tbi)) { - /* there is a ttbr1 region and we are in it (high bits all one) */ - ttbr_select = 1; - } else if (!t0sz) { - /* ttbr0 region is "everything not in the ttbr1 region" */ - ttbr_select = 0; - } else if (!t1sz && ttbr1_valid) { - /* ttbr1 region is "everything not in the ttbr0 region" */ - ttbr_select = 1; - } else { - /* in the gap between the two regions, this is a Translation fault */ + top_bits = sextract64(address, inputsize, addrsize - inputsize); + if (-top_bits != param.select || (param.select && !ttbr1_valid)) { + /* In the gap between the two regions, this is a Translation fault */ fault_type = ARMFault_Translation; goto do_fault; } + if (param.using64k) { + stride = 13; + } else if (param.using16k) { + stride = 11; + } else { + stride = 9; + } + /* Note that QEMU ignores shareability and cacheability attributes, * so we don't need to do anything with the SH, ORGN, IRGN fields * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the @@ -9875,56 +10510,13 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, * implement any ASID-like capability so we can ignore it (instead * we will always flush the TLB any time the ASID is changed). */ - if (ttbr_select == 0) { - ttbr = regime_ttbr(env, mmu_idx, 0); - if (el < 2) { - epd = extract32(tcr->raw_tcr, 7, 1); - } - inputsize = addrsize - t0sz; - - tg = extract32(tcr->raw_tcr, 14, 2); - if (tg == 1) { /* 64KB pages */ - stride = 13; - } - if (tg == 2) { /* 16KB pages */ - stride = 11; - } - if (aarch64 && el > 1) { - hpd = extract64(tcr->raw_tcr, 24, 1); - } else { - hpd = extract64(tcr->raw_tcr, 41, 1); - } - if (!aarch64) { - /* For aarch32, hpd0 is not enabled without t2e as well. */ - hpd &= extract64(tcr->raw_tcr, 6, 1); - } - } else { - /* We should only be here if TTBR1 is valid */ - assert(ttbr1_valid); - - ttbr = regime_ttbr(env, mmu_idx, 1); - epd = extract32(tcr->raw_tcr, 23, 1); - inputsize = addrsize - t1sz; - - tg = extract32(tcr->raw_tcr, 30, 2); - if (tg == 3) { /* 64KB pages */ - stride = 13; - } - if (tg == 1) { /* 16KB pages */ - stride = 11; - } - hpd = extract64(tcr->raw_tcr, 42, 1); - if (!aarch64) { - /* For aarch32, hpd1 is not enabled without t2e as well. */ - hpd &= extract64(tcr->raw_tcr, 6, 1); - } - } + ttbr = regime_ttbr(env, mmu_idx, param.select); /* Here we should have set up all the parameters for the translation: * inputsize, ttbr, epd, stride, tbi */ - if (epd) { + if (param.epd) { /* Translation table walk disabled => Translation fault on TLB miss * Note: This is always 0 on 64-bit EL2 and EL3. */ @@ -10037,7 +10629,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, } /* Merge in attributes from table descriptors */ attrs |= nstable << 3; /* NS */ - if (hpd) { + if (param.hpd) { /* HPD disables all the table attributes except NSTable. */ break; } @@ -11073,7 +11665,7 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, int prot; bool ret; ARMMMUFaultInfo fi = {}; - ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); + ARMMMUIdx mmu_idx = arm_mmu_idx(env); *attrs = (MemTxAttrs) {}; @@ -12949,10 +13541,66 @@ int fp_exception_el(CPUARMState *env, int cur_el) return 0; } +ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env, + bool secstate, bool priv) +{ + ARMMMUIdx mmu_idx = ARM_MMU_IDX_M; + + if (priv) { + mmu_idx |= ARM_MMU_IDX_M_PRIV; + } + + if (armv7m_nvic_neg_prio_requested(env->nvic, secstate)) { + mmu_idx |= ARM_MMU_IDX_M_NEGPRI; + } + + if (secstate) { + mmu_idx |= ARM_MMU_IDX_M_S; + } + + return mmu_idx; +} + +/* Return the MMU index for a v7M CPU in the specified security state */ +ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate) +{ + bool priv = arm_current_el(env) != 0; + + return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv); +} + +ARMMMUIdx arm_mmu_idx(CPUARMState *env) +{ + int el; + + if (arm_feature(env, ARM_FEATURE_M)) { + return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure); + } + + el = arm_current_el(env); + if (el < 2 && arm_is_secure_below_el3(env)) { + return ARMMMUIdx_S1SE0 + el; + } else { + return ARMMMUIdx_S12NSE0 + el; + } +} + +int cpu_mmu_index(CPUARMState *env, bool ifetch) +{ + return arm_to_core_mmu_idx(arm_mmu_idx(env)); +} + +#ifndef CONFIG_USER_ONLY +ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) +{ + return stage_1_mmu_idx(arm_mmu_idx(env)); +} +#endif + void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, target_ulong *cs_base, uint32_t *pflags) { - ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); + ARMMMUIdx mmu_idx = arm_mmu_idx(env); int current_el = arm_current_el(env); int fp_el = fp_exception_el(env, current_el); uint32_t flags = 0; @@ -12962,11 +13610,30 @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, *pc = env->pc; flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1); - /* Get control bits for tagged addresses */ - flags = FIELD_DP32(flags, TBFLAG_A64, TBI0, - arm_regime_tbi0(env, mmu_idx)); - flags = FIELD_DP32(flags, TBFLAG_A64, TBI1, - arm_regime_tbi1(env, mmu_idx)); + +#ifndef CONFIG_USER_ONLY + /* + * Get control bits for tagged addresses. Note that the + * translator only uses this for instruction addresses. + */ + { + ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx); + ARMVAParameters p0 = aa64_va_parameters_both(env, 0, stage1); + int tbii, tbid; + + /* FIXME: ARMv8.1-VHE S2 translation regime. */ + if (regime_el(env, stage1) < 2) { + ARMVAParameters p1 = aa64_va_parameters_both(env, -1, stage1); + tbid = (p1.tbi << 1) | p0.tbi; + tbii = tbid & ~((p1.tbid << 1) | p0.tbid); + } else { + tbid = p0.tbi; + tbii = tbid & !p0.tbid; + } + + flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii); + } +#endif if (cpu_isar_feature(aa64_sve, cpu)) { int sve_el = sve_exception_el(env, current_el); @@ -12983,6 +13650,25 @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, flags = FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el); flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len); } + + if (cpu_isar_feature(aa64_pauth, cpu)) { + /* + * In order to save space in flags, we record only whether + * pauth is "inactive", meaning all insns are implemented as + * a nop, or "active" when some action must be performed. + * The decision of which action to take is left to a helper. + */ + uint64_t sctlr; + if (current_el == 0) { + /* FIXME: ARMv8.1-VHE S2 translation regime. */ + sctlr = env->cp15.sctlr_el[1]; + } else { + sctlr = env->cp15.sctlr_el[current_el]; + } + if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) { + flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1); + } + } } else { *pc = env->regs[15]; flags = FIELD_DP32(flags, TBFLAG_A32, THUMB, env->thumb); |