diff options
Diffstat (limited to 'target')
| -rw-r--r-- | target/arm/cpu-features.h | 5 | ||||
| -rw-r--r-- | target/arm/cpu.h | 6 | ||||
| -rw-r--r-- | target/arm/helper.c | 8 | ||||
| -rw-r--r-- | target/arm/ptw.c | 87 | ||||
| -rw-r--r-- | target/arm/tcg/cpu64.c | 81 | ||||
| -rw-r--r-- | target/arm/tcg/mte_helper.c | 5 | ||||
| -rw-r--r-- | target/i386/arch_memory_mapping.c | 10 | ||||
| -rw-r--r-- | target/i386/kvm/xen-emu.c | 4 | ||||
| -rw-r--r-- | target/i386/nvmm/nvmm-all.c | 5 | ||||
| -rw-r--r-- | target/i386/whpx/whpx-all.c | 7 | ||||
| -rw-r--r-- | target/s390x/mmu_helper.c | 8 |
11 files changed, 184 insertions, 42 deletions
diff --git a/target/arm/cpu-features.h b/target/arm/cpu-features.h index 602f6a88e5..f59c18b6ef 100644 --- a/target/arm/cpu-features.h +++ b/target/arm/cpu-features.h @@ -1091,6 +1091,11 @@ static inline bool isar_feature_aa64_rme(const ARMISARegisters *id) return FIELD_EX64_IDREG(id, ID_AA64PFR0, RME) != 0; } +static inline bool isar_feature_aa64_rme_gpc2(const ARMISARegisters *id) +{ + return FIELD_EX64_IDREG(id, ID_AA64PFR0, RME) >= 2; +} + static inline bool isar_feature_aa64_dit(const ARMISARegisters *id) { return FIELD_EX64_IDREG(id, ID_AA64PFR0, DIT) != 0; diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 2b9585dc80..41414ac22b 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -1995,13 +1995,19 @@ FIELD(V7M_VPR, MASK01, 16, 4) FIELD(V7M_VPR, MASK23, 20, 4) FIELD(GPCCR, PPS, 0, 3) +FIELD(GPCCR, RLPAD, 5, 1) +FIELD(GPCCR, NSPAD, 6, 1) +FIELD(GPCCR, SPAD, 7, 1) FIELD(GPCCR, IRGN, 8, 2) FIELD(GPCCR, ORGN, 10, 2) FIELD(GPCCR, SH, 12, 2) FIELD(GPCCR, PGS, 14, 2) FIELD(GPCCR, GPC, 16, 1) FIELD(GPCCR, GPCP, 17, 1) +FIELD(GPCCR, TBGPCD, 18, 1) +FIELD(GPCCR, NSO, 19, 1) FIELD(GPCCR, L0GPTSZ, 20, 4) +FIELD(GPCCR, APPSAA, 24, 1) FIELD(MFAR, FPA, 12, 40) FIELD(MFAR, NSE, 62, 1) diff --git a/target/arm/helper.c b/target/arm/helper.c index aa730addf2..b7bf45afc1 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -3742,7 +3742,8 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) value &= valid_mask; /* RW is RAO/WI if EL1 is AArch64 only */ - if (!cpu_isar_feature(aa64_aa32_el1, cpu)) { + if (arm_feature(env, ARM_FEATURE_AARCH64) && + !cpu_isar_feature(aa64_aa32_el1, cpu)) { value |= HCR_RW; } @@ -4932,6 +4933,11 @@ static void gpccr_write(CPUARMState *env, const ARMCPRegInfo *ri, R_GPCCR_ORGN_MASK | R_GPCCR_SH_MASK | R_GPCCR_PGS_MASK | R_GPCCR_GPC_MASK | R_GPCCR_GPCP_MASK; + if (cpu_isar_feature(aa64_rme_gpc2, env_archcpu(env))) { + rw_mask |= R_GPCCR_APPSAA_MASK | R_GPCCR_NSO_MASK | + R_GPCCR_SPAD_MASK | R_GPCCR_NSPAD_MASK | R_GPCCR_RLPAD_MASK; + } + env->cp15.gpccr_el3 = (value & rw_mask) | (env->cp15.gpccr_el3 & ~rw_mask); } diff --git a/target/arm/ptw.c b/target/arm/ptw.c index 6344971fa6..e03657f309 100644 --- a/target/arm/ptw.c +++ b/target/arm/ptw.c @@ -36,8 +36,6 @@ typedef struct S1Translate { /* * in_space: the security space for this walk. This plus * the in_mmu_idx specify the architectural translation regime. - * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit, - * this field is updated accordingly. * * Note that the security space for the in_ptw_idx may be different * from that for the in_mmu_idx. We do not need to explicitly track @@ -53,6 +51,11 @@ typedef struct S1Translate { */ ARMSecuritySpace in_space; /* + * Like in_space, except this may be "downgraded" to NonSecure + * by an NSTable bit. + */ + ARMSecuritySpace cur_space; + /* * in_debug: is this a QEMU debug access (gdbstub, etc)? Debug * accesses will not update the guest page table access flags * and will not change the state of the softmmu TLBs. @@ -315,6 +318,7 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx, static bool granule_protection_check(CPUARMState *env, uint64_t paddress, ARMSecuritySpace pspace, + ARMSecuritySpace ss, ARMMMUFaultInfo *fi) { MemTxAttrs attrs = { @@ -383,18 +387,37 @@ static bool granule_protection_check(CPUARMState *env, uint64_t paddress, l0gptsz = 30 + FIELD_EX64(gpccr, GPCCR, L0GPTSZ); /* - * GPC Priority 2: Secure, Realm or Root address exceeds PPS. + * GPC Priority 2: Access to Secure, NonSecure or Realm is prevented + * by one of the GPCCR_EL3 address space disable bits (R_TCWMD). + * All of these bits are checked vs aa64_rme_gpc2 in gpccr_write. + */ + { + static const uint8_t disable_masks[4] = { + [ARMSS_Secure] = R_GPCCR_SPAD_MASK, + [ARMSS_NonSecure] = R_GPCCR_NSPAD_MASK, + [ARMSS_Root] = 0, + [ARMSS_Realm] = R_GPCCR_RLPAD_MASK, + }; + + if (gpccr & disable_masks[pspace]) { + goto fault_fail; + } + } + + /* + * GPC Priority 3: Secure, Realm or Root address exceeds PPS. * R_CPDSB: A NonSecure physical address input exceeding PPS * does not experience any fault. + * R_PBPSH: Other address spaces have fault suppressed by APPSAA. */ if (paddress & ~pps_mask) { - if (pspace == ARMSS_NonSecure) { + if (pspace == ARMSS_NonSecure || FIELD_EX64(gpccr, GPCCR, APPSAA)) { return true; } - goto fault_size; + goto fault_fail; } - /* GPC Priority 3: the base address of GPTBR_EL3 exceeds PPS. */ + /* GPC Priority 4: the base address of GPTBR_EL3 exceeds PPS. */ tableaddr = env->cp15.gptbr_el3 << 12; if (tableaddr & ~pps_mask) { goto fault_size; @@ -475,18 +498,30 @@ static bool granule_protection_check(CPUARMState *env, uint64_t paddress, break; case 0b1111: /* all access */ return true; - case 0b1000: - case 0b1001: - case 0b1010: - case 0b1011: + case 0b1000: /* secure */ + if (!cpu_isar_feature(aa64_sel2, cpu)) { + goto fault_walk; + } + /* fall through */ + case 0b1001: /* non-secure */ + case 0b1010: /* root */ + case 0b1011: /* realm */ if (pspace == (gpi & 3)) { return true; } break; + case 0b1101: /* non-secure only */ + /* aa64_rme_gpc2 was checked in gpccr_write */ + if (FIELD_EX64(gpccr, GPCCR, NSO)) { + return (pspace == ARMSS_NonSecure && + (ss == ARMSS_NonSecure || ss == ARMSS_Root)); + } + goto fault_walk; default: goto fault_walk; /* reserved */ } + fault_fail: fi->gpcf = GPCF_Fail; goto fault_common; fault_eabt: @@ -587,7 +622,8 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, * From gdbstub, do not use softmmu so that we don't modify the * state of the cpu at all, including softmmu tlb contents. */ - ARMSecuritySpace s2_space = S2_security_space(ptw->in_space, s2_mmu_idx); + ARMSecuritySpace s2_space + = S2_security_space(ptw->cur_space, s2_mmu_idx); S1Translate s2ptw = { .in_mmu_idx = s2_mmu_idx, .in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx), @@ -630,7 +666,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, } if (regime_is_stage2(s2_mmu_idx)) { - uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space); + uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->cur_space); if ((hcr & HCR_PTW) && S2_attrs_are_device(hcr, pte_attrs)) { /* @@ -641,7 +677,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, fi->s2addr = addr; fi->stage2 = true; fi->s1ptw = true; - fi->s1ns = fault_s1ns(ptw->in_space, s2_mmu_idx); + fi->s1ns = fault_s1ns(ptw->cur_space, s2_mmu_idx); return false; } } @@ -657,7 +693,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, fi->s2addr = addr; fi->stage2 = regime_is_stage2(s2_mmu_idx); fi->s1ptw = fi->stage2; - fi->s1ns = fault_s1ns(ptw->in_space, s2_mmu_idx); + fi->s1ns = fault_s1ns(ptw->cur_space, s2_mmu_idx); return false; } @@ -844,7 +880,7 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val, fi->s2addr = ptw->out_virt; fi->stage2 = true; fi->s1ptw = true; - fi->s1ns = fault_s1ns(ptw->in_space, ptw->in_ptw_idx); + fi->s1ns = fault_s1ns(ptw->cur_space, ptw->in_ptw_idx); return 0; } @@ -1224,7 +1260,7 @@ static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw, g_assert_not_reached(); } } - out_space = ptw->in_space; + out_space = ptw->cur_space; if (ns) { /* * The NS bit will (as required by the architecture) have no effect if @@ -1254,7 +1290,7 @@ static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw, } result->f.prot = get_S1prot(env, mmu_idx, false, user_rw, prot_rw, - xn, pxn, result->f.attrs.space, out_space); + xn, pxn, ptw->in_space, out_space); if (ptw->in_prot_check & ~result->f.prot) { /* Access permission fault. */ fi->type = ARMFault_Permission; @@ -1857,7 +1893,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, * NonSecure. With RME, the EL3 translation regime does not change * from Root to NonSecure. */ - if (ptw->in_space == ARMSS_Secure + if (ptw->cur_space == ARMSS_Secure && !regime_is_stage2(mmu_idx) && extract32(tableattrs, 4, 1)) { /* @@ -1867,7 +1903,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_S + 1 != ARMMMUIdx_Phys_NS); QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2_S + 1 != ARMMMUIdx_Stage2); ptw->in_ptw_idx += 1; - ptw->in_space = ARMSS_NonSecure; + ptw->cur_space = ARMSS_NonSecure; } if (!S1_ptw_translate(env, ptw, descaddr, fi)) { @@ -1991,7 +2027,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, } ap = extract32(attrs, 6, 2); - out_space = ptw->in_space; + out_space = ptw->cur_space; if (regime_is_stage2(mmu_idx)) { /* * R_GYNXY: For stage2 in Realm security state, bit 55 is NS. @@ -2089,12 +2125,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, user_rw = simple_ap_to_rw_prot_is_user(ap, true); prot_rw = simple_ap_to_rw_prot_is_user(ap, false); - /* - * Note that we modified ptw->in_space earlier for NSTable, but - * result->f.attrs retains a copy of the original security space. - */ result->f.prot = get_S1prot(env, mmu_idx, aarch64, user_rw, prot_rw, - xn, pxn, result->f.attrs.space, out_space); + xn, pxn, ptw->in_space, out_space); /* Index into MAIR registers for cache attributes */ attrindx = extract32(attrs, 2, 3); @@ -2192,7 +2224,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, fi->level = level; fi->stage2 = regime_is_stage2(mmu_idx); } - fi->s1ns = fault_s1ns(ptw->in_space, mmu_idx); + fi->s1ns = fault_s1ns(ptw->cur_space, mmu_idx); return true; } @@ -3413,6 +3445,7 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw, * cannot upgrade a NonSecure translation regime's attributes * to Secure or Realm. */ + ptw->cur_space = ptw->in_space; result->f.attrs.space = ptw->in_space; result->f.attrs.secure = arm_space_is_secure(ptw->in_space); @@ -3548,7 +3581,7 @@ static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw, return true; } if (!granule_protection_check(env, result->f.phys_addr, - result->f.attrs.space, fi)) { + result->f.attrs.space, ptw->in_space, fi)) { fi->type = ARMFault_GPCFOnOutput; return true; } diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c index abef6a246e..8c617fe37b 100644 --- a/target/arm/tcg/cpu64.c +++ b/target/arm/tcg/cpu64.c @@ -159,7 +159,8 @@ static void cpu_arm_set_rme(Object *obj, bool value, Error **errp) { ARMCPU *cpu = ARM_CPU(obj); - FIELD_DP64_IDREG(&cpu->isar, ID_AA64PFR0, RME, value); + /* Enable FEAT_RME_GPC2 */ + FIELD_DP64_IDREG(&cpu->isar, ID_AA64PFR0, RME, value ? 2 : 0); } static void cpu_max_set_l0gptsz(Object *obj, Visitor *v, const char *name, @@ -406,6 +407,79 @@ static void aarch64_a76_initfn(Object *obj) cpu->isar.reset_pmcr_el0 = 0x410b3000; } +static void aarch64_a78ae_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; + + cpu->dtb_compatible = "arm,cortex-a78ae"; + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + set_feature(&cpu->env, ARM_FEATURE_AARCH64); + set_feature(&cpu->env, ARM_FEATURE_EL2); + set_feature(&cpu->env, ARM_FEATURE_EL3); + set_feature(&cpu->env, ARM_FEATURE_PMU); + + /* Ordered by 3.2.4 AArch64 registers by functional group */ + SET_IDREG(isar, CLIDR, 0x82000023); + cpu->ctr = 0x9444c004; + cpu->dcz_blocksize = 4; + SET_IDREG(isar, ID_AA64DFR0, 0x0000000110305408ull); + SET_IDREG(isar, ID_AA64ISAR0, 0x0010100010211120ull); + SET_IDREG(isar, ID_AA64ISAR1, 0x0000000001200031ull); + SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000101125ull); + SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull); + SET_IDREG(isar, ID_AA64MMFR2, 0x0000000100001011ull); + SET_IDREG(isar, ID_AA64PFR0, 0x1100000010111112ull); /* GIC filled in later */ + SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000010ull); + SET_IDREG(isar, ID_AFR0, 0x00000000); + SET_IDREG(isar, ID_DFR0, 0x04010088); + SET_IDREG(isar, ID_ISAR0, 0x02101110); + SET_IDREG(isar, ID_ISAR1, 0x13112111); + SET_IDREG(isar, ID_ISAR2, 0x21232042); + SET_IDREG(isar, ID_ISAR3, 0x01112131); + SET_IDREG(isar, ID_ISAR4, 0x00010142); + SET_IDREG(isar, ID_ISAR5, 0x01011121); + SET_IDREG(isar, ID_ISAR6, 0x00000010); + SET_IDREG(isar, ID_MMFR0, 0x10201105); + SET_IDREG(isar, ID_MMFR1, 0x40000000); + SET_IDREG(isar, ID_MMFR2, 0x01260000); + SET_IDREG(isar, ID_MMFR3, 0x02122211); + SET_IDREG(isar, ID_MMFR4, 0x00021110); + SET_IDREG(isar, ID_PFR0, 0x10010131); + SET_IDREG(isar, ID_PFR1, 0x00010000); /* GIC filled in later */ + SET_IDREG(isar, ID_PFR2, 0x00000011); + cpu->midr = 0x410fd423; /* r0p3 */ + cpu->revidr = 0; + + /* From 3.2.33 CCSIDR_EL1 */ + /* 64KB L1 dcache */ + cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 64 * KiB, 7); + /* 64KB L1 icache */ + cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 64 * KiB, 2); + /* 512KB L2 cache */ + cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 8, 64, 512 * KiB, 7); + + /* From 3.2.118 SCTLR_EL3 */ + cpu->reset_sctlr = 0x30c50838; + + /* From 3.4.23 ICH_VTR_EL2 */ + cpu->gic_num_lrs = 4; + cpu->gic_vpribits = 5; + cpu->gic_vprebits = 5; + /* From 3.4.8 ICC_CTLR_EL3 */ + cpu->gic_pribits = 5; + + /* From 3.5.1 AdvSIMD AArch64 register summary */ + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x13211111; + cpu->isar.mvfr2 = 0x00000043; + + /* From 5.5.1 AArch64 PMU register summary */ + cpu->isar.reset_pmcr_el0 = 0x41223000; +} + static void aarch64_a64fx_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); @@ -1321,6 +1395,11 @@ static const ARMCPUInfo aarch64_cpus[] = { { .name = "cortex-a55", .initfn = aarch64_a55_initfn }, { .name = "cortex-a72", .initfn = aarch64_a72_initfn }, { .name = "cortex-a76", .initfn = aarch64_a76_initfn }, + /* + * The Cortex-A78AE differs slightly from the plain Cortex-A78. We don't + * currently model the latter. + */ + { .name = "cortex-a78ae", .initfn = aarch64_a78ae_initfn }, { .name = "cortex-a710", .initfn = aarch64_a710_initfn }, { .name = "a64fx", .initfn = aarch64_a64fx_initfn }, { .name = "neoverse-n1", .initfn = aarch64_neoverse_n1_initfn }, diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c index 302e899287..b96c953f80 100644 --- a/target/arm/tcg/mte_helper.c +++ b/target/arm/tcg/mte_helper.c @@ -21,12 +21,13 @@ #include "qemu/log.h" #include "cpu.h" #include "internals.h" +#include "exec/target_page.h" #include "exec/page-protection.h" #ifdef CONFIG_USER_ONLY #include "user/cpu_loop.h" #include "user/page-protection.h" #else -#include "system/ram_addr.h" +#include "system/physmem.h" #endif #include "accel/tcg/cpu-ldst.h" #include "accel/tcg/probe.h" @@ -188,7 +189,7 @@ uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx, */ if (tag_access == MMU_DATA_STORE) { ram_addr_t tag_ra = memory_region_get_ram_addr(mr) + xlat; - cpu_physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION); + physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION); } return memory_region_get_ram_ptr(mr) + xlat; diff --git a/target/i386/arch_memory_mapping.c b/target/i386/arch_memory_mapping.c index a2398c2173..560f4689ab 100644 --- a/target/i386/arch_memory_mapping.c +++ b/target/i386/arch_memory_mapping.c @@ -35,7 +35,7 @@ static void walk_pte(MemoryMappingList *list, AddressSpace *as, } start_paddr = (pte & ~0xfff) & ~(0x1ULL << 63); - if (cpu_physical_memory_is_io(start_paddr)) { + if (address_space_is_io(as, start_paddr)) { /* I/O region */ continue; } @@ -65,7 +65,7 @@ static void walk_pte2(MemoryMappingList *list, AddressSpace *as, } start_paddr = pte & ~0xfff; - if (cpu_physical_memory_is_io(start_paddr)) { + if (address_space_is_io(as, start_paddr)) { /* I/O region */ continue; } @@ -100,7 +100,7 @@ static void walk_pde(MemoryMappingList *list, AddressSpace *as, if (pde & PG_PSE_MASK) { /* 2 MB page */ start_paddr = (pde & ~0x1fffff) & ~(0x1ULL << 63); - if (cpu_physical_memory_is_io(start_paddr)) { + if (address_space_is_io(as, start_paddr)) { /* I/O region */ continue; } @@ -142,7 +142,7 @@ static void walk_pde2(MemoryMappingList *list, AddressSpace *as, */ high_paddr = ((hwaddr)(pde & 0x1fe000) << 19); start_paddr = (pde & ~0x3fffff) | high_paddr; - if (cpu_physical_memory_is_io(start_paddr)) { + if (address_space_is_io(as, start_paddr)) { /* I/O region */ continue; } @@ -203,7 +203,7 @@ static void walk_pdpe(MemoryMappingList *list, AddressSpace *as, if (pdpe & PG_PSE_MASK) { /* 1 GB page */ start_paddr = (pdpe & ~0x3fffffff) & ~(0x1ULL << 63); - if (cpu_physical_memory_is_io(start_paddr)) { + if (address_space_is_io(as, start_paddr)) { /* I/O region */ continue; } diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c index 284c5ef6f6..52de019834 100644 --- a/target/i386/kvm/xen-emu.c +++ b/target/i386/kvm/xen-emu.c @@ -21,6 +21,7 @@ #include "system/address-spaces.h" #include "xen-emu.h" #include "trace.h" +#include "system/memory.h" #include "system/runstate.h" #include "hw/pci/msi.h" @@ -75,6 +76,7 @@ static bool kvm_gva_to_gpa(CPUState *cs, uint64_t gva, uint64_t *gpa, static int kvm_gva_rw(CPUState *cs, uint64_t gva, void *_buf, size_t sz, bool is_write) { + AddressSpace *as = cpu_addressspace(cs, MEMTXATTRS_UNSPECIFIED); uint8_t *buf = (uint8_t *)_buf; uint64_t gpa; size_t len; @@ -87,7 +89,7 @@ static int kvm_gva_rw(CPUState *cs, uint64_t gva, void *_buf, size_t sz, len = sz; } - cpu_physical_memory_rw(gpa, buf, len, is_write); + address_space_rw(as, gpa, MEMTXATTRS_UNSPECIFIED, buf, len, is_write); buf += len; sz -= len; diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c index ed42425167..2e442baf4b 100644 --- a/target/i386/nvmm/nvmm-all.c +++ b/target/i386/nvmm/nvmm-all.c @@ -15,6 +15,7 @@ #include "accel/accel-ops.h" #include "system/nvmm.h" #include "system/cpus.h" +#include "system/memory.h" #include "system/runstate.h" #include "qemu/main-loop.h" #include "qemu/error-report.h" @@ -516,7 +517,9 @@ nvmm_io_callback(struct nvmm_io *io) static void nvmm_mem_callback(struct nvmm_mem *mem) { - cpu_physical_memory_rw(mem->gpa, mem->data, mem->size, mem->write); + /* TODO: Get CPUState via mem->vcpu? */ + address_space_rw(&address_space_memory, mem->gpa, MEMTXATTRS_UNSPECIFIED, + mem->data, mem->size, mem->write); /* Needed, otherwise infinite loop. */ current_cpu->vcpu_dirty = false; diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index 2a85168ed5..256761834c 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -788,8 +788,11 @@ static HRESULT CALLBACK whpx_emu_mmio_callback( void *ctx, WHV_EMULATOR_MEMORY_ACCESS_INFO *ma) { - cpu_physical_memory_rw(ma->GpaAddress, ma->Data, ma->AccessSize, - ma->Direction); + CPUState *cs = (CPUState *)ctx; + AddressSpace *as = cpu_addressspace(cs, MEMTXATTRS_UNSPECIFIED); + + address_space_rw(as, ma->GpaAddress, MEMTXATTRS_UNSPECIFIED, + ma->Data, ma->AccessSize, ma->Direction); return S_OK; } diff --git a/target/s390x/mmu_helper.c b/target/s390x/mmu_helper.c index 00946e9c0f..487c41bf93 100644 --- a/target/s390x/mmu_helper.c +++ b/target/s390x/mmu_helper.c @@ -23,6 +23,7 @@ #include "kvm/kvm_s390x.h" #include "system/kvm.h" #include "system/tcg.h" +#include "system/memory.h" #include "exec/page-protection.h" #include "exec/target_page.h" #include "hw/hw.h" @@ -522,6 +523,7 @@ int s390_cpu_pv_mem_rw(S390CPU *cpu, unsigned int offset, void *hostbuf, int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf, int len, bool is_write) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; int currlen, nr_pages, i; target_ulong *pages; uint64_t tec; @@ -542,11 +544,13 @@ int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf, if (ret) { trigger_access_exception(&cpu->env, ret, tec); } else if (hostbuf != NULL) { + AddressSpace *as = CPU(cpu)->as; + /* Copy data by stepping through the area page by page */ for (i = 0; i < nr_pages; i++) { currlen = MIN(len, TARGET_PAGE_SIZE - (laddr % TARGET_PAGE_SIZE)); - cpu_physical_memory_rw(pages[i] | (laddr & ~TARGET_PAGE_MASK), - hostbuf, currlen, is_write); + address_space_rw(as, pages[i] | (laddr & ~TARGET_PAGE_MASK), + attrs, hostbuf, currlen, is_write); laddr += currlen; hostbuf += currlen; len -= currlen; |