From fe678c45d2c970ab35826c6ff3ae08f20bf02f73 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 10 Oct 2024 10:36:41 +0200 Subject: tcg: remove singlestep_enabled from DisasContextBase It is used in a couple of places only, both within the same target. Those can use the cflags just as well, so remove the separate field. Signed-off-by: Paolo Bonzini Message-ID: <20241010083641.1785069-1-pbonzini@redhat.com> Reviewed-by: Richard Henderson Signed-off-by: Richard Henderson --- include/exec/translator.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include') diff --git a/include/exec/translator.h b/include/exec/translator.h index 25004dfb76..d8dcb77b5f 100644 --- a/include/exec/translator.h +++ b/include/exec/translator.h @@ -71,7 +71,6 @@ typedef enum DisasJumpType { * @is_jmp: What instruction to disassemble next. * @num_insns: Number of translated instructions (including current). * @max_insns: Maximum number of instructions to be translated in this TB. - * @singlestep_enabled: "Hardware" single stepping enabled. * @plugin_enabled: TCG plugin enabled in this TB. * @fake_insn: True if translator_fake_ldb used. * @insn_start: The last op emitted by the insn_start hook, @@ -86,7 +85,6 @@ struct DisasContextBase { DisasJumpType is_jmp; int num_insns; int max_insns; - bool singlestep_enabled; bool plugin_enabled; bool fake_insn; struct TCGOp *insn_start; -- cgit 1.4.1 From f781af3b14fc87c5177d8d8e209d89743e4857df Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Thu, 12 Sep 2024 11:28:20 +0200 Subject: include/exec: Introduce env_cpu_const() It's the same as env_cpu(), but for const objects. Reviewed-by: Richard Henderson Signed-off-by: Ilya Leoshkevich Message-ID: <20240912093012.402366-2-iii@linux.ibm.com> Signed-off-by: Richard Henderson --- include/exec/cpu-common.h | 13 ++++++++++++- linux-user/elfload.c | 2 +- 2 files changed, 13 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h index 2e1b499cb7..638dc806a5 100644 --- a/include/exec/cpu-common.h +++ b/include/exec/cpu-common.h @@ -238,6 +238,17 @@ static inline ArchCPU *env_archcpu(CPUArchState *env) return (void *)env - sizeof(CPUState); } +/** + * env_cpu_const(env) + * @env: The architecture environment + * + * Return the CPUState associated with the environment. + */ +static inline const CPUState *env_cpu_const(const CPUArchState *env) +{ + return (void *)env - sizeof(CPUState); +} + /** * env_cpu(env) * @env: The architecture environment @@ -246,7 +257,7 @@ static inline ArchCPU *env_archcpu(CPUArchState *env) */ static inline CPUState *env_cpu(CPUArchState *env) { - return (void *)env - sizeof(CPUState); + return (CPUState *)env_cpu_const(env); } #ifndef CONFIG_USER_ONLY diff --git a/linux-user/elfload.c b/linux-user/elfload.c index 52c88a68a9..352960b771 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -4314,7 +4314,7 @@ static int wmr_write_region(void *opaque, target_ulong start, */ static int elf_core_dump(int signr, const CPUArchState *env) { - const CPUState *cpu = env_cpu((CPUArchState *)env); + const CPUState *cpu = env_cpu_const(env); const TaskState *ts = (const TaskState *)get_task_state((CPUState *)cpu); struct rlimit dumpsize; CountAndSizeRegions css; -- cgit 1.4.1 From da335fe12a5da71a33d7afc2075a341f26213f53 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Fri, 4 Oct 2024 13:00:47 -0700 Subject: include/exec/memop: Move get_alignment_bits from tcg.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This function is specific to MemOp, not TCG in general. Reviewed-by: Helge Deller Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- include/exec/memop.h | 23 +++++++++++++++++++++++ include/tcg/tcg.h | 23 ----------------------- 2 files changed, 23 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/include/exec/memop.h b/include/exec/memop.h index f881fe7af4..97720a8ee7 100644 --- a/include/exec/memop.h +++ b/include/exec/memop.h @@ -170,4 +170,27 @@ static inline bool memop_big_endian(MemOp op) return (op & MO_BSWAP) == MO_BE; } +/** + * get_alignment_bits + * @memop: MemOp value + * + * Extract the alignment size from the memop. + */ +static inline unsigned get_alignment_bits(MemOp memop) +{ + unsigned a = memop & MO_AMASK; + + if (a == MO_UNALN) { + /* No alignment required. */ + a = 0; + } else if (a == MO_ALIGN) { + /* A natural alignment requirement. */ + a = memop & MO_SIZE; + } else { + /* A specific alignment requirement. */ + a = a >> MO_ASHIFT; + } + return a; +} + #endif diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h index 21d5884741..824fb3560d 100644 --- a/include/tcg/tcg.h +++ b/include/tcg/tcg.h @@ -281,29 +281,6 @@ static inline int tcg_type_size(TCGType t) return 4 << i; } -/** - * get_alignment_bits - * @memop: MemOp value - * - * Extract the alignment size from the memop. - */ -static inline unsigned get_alignment_bits(MemOp memop) -{ - unsigned a = memop & MO_AMASK; - - if (a == MO_UNALN) { - /* No alignment required. */ - a = 0; - } else if (a == MO_ALIGN) { - /* A natural alignment requirement. */ - a = memop & MO_SIZE; - } else { - /* A specific alignment requirement. */ - a = a >> MO_ASHIFT; - } - return a; -} - typedef tcg_target_ulong TCGArg; /* Define type and accessor macros for TCG variables. -- cgit 1.4.1 From c5809eee452b0393ca57763137344099912b354a Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Fri, 4 Oct 2024 13:34:42 -0700 Subject: include/exec/memop: Rename get_alignment_bits MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename to use "memop_" prefix, like other functions that operate on MemOp. Reviewed-by: Helge Deller Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- accel/tcg/cputlb.c | 4 ++-- accel/tcg/user-exec.c | 4 ++-- include/exec/memop.h | 4 ++-- target/arm/tcg/translate-a64.c | 4 ++-- target/xtensa/translate.c | 2 +- tcg/arm/tcg-target.c.inc | 4 ++-- tcg/sparc64/tcg-target.c.inc | 2 +- tcg/tcg-op-ldst.c | 6 +++--- tcg/tcg.c | 2 +- 9 files changed, 16 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index fd6459b695..a975fe5f89 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1709,7 +1709,7 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, tcg_debug_assert(l->mmu_idx < NB_MMU_MODES); /* Handle CPU specific unaligned behaviour */ - a_bits = get_alignment_bits(l->memop); + a_bits = memop_alignment_bits(l->memop); if (addr & ((1 << a_bits) - 1)) { cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra); } @@ -1797,7 +1797,7 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, { uintptr_t mmu_idx = get_mmuidx(oi); MemOp mop = get_memop(oi); - int a_bits = get_alignment_bits(mop); + int a_bits = memop_alignment_bits(mop); uintptr_t index; CPUTLBEntry *tlbe; vaddr tlb_addr; diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index 11b6d45e90..51b2c16dbe 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -954,7 +954,7 @@ void page_reset_target_data(target_ulong start, target_ulong last) { } static void *cpu_mmu_lookup(CPUState *cpu, vaddr addr, MemOp mop, uintptr_t ra, MMUAccessType type) { - int a_bits = get_alignment_bits(mop); + int a_bits = memop_alignment_bits(mop); void *ret; /* Enforce guest required alignment. */ @@ -1236,7 +1236,7 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, int size, uintptr_t retaddr) { MemOp mop = get_memop(oi); - int a_bits = get_alignment_bits(mop); + int a_bits = memop_alignment_bits(mop); void *ret; /* Enforce guest required alignment. */ diff --git a/include/exec/memop.h b/include/exec/memop.h index 97720a8ee7..f53bf618c6 100644 --- a/include/exec/memop.h +++ b/include/exec/memop.h @@ -171,12 +171,12 @@ static inline bool memop_big_endian(MemOp op) } /** - * get_alignment_bits + * memop_alignment_bits: * @memop: MemOp value * * Extract the alignment size from the memop. */ -static inline unsigned get_alignment_bits(MemOp memop) +static inline unsigned memop_alignment_bits(MemOp memop) { unsigned a = memop & MO_AMASK; diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index 071b6349fc..ec0b1ee252 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -294,7 +294,7 @@ static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr, desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); - desc = FIELD_DP32(desc, MTEDESC, ALIGN, get_alignment_bits(memop)); + desc = FIELD_DP32(desc, MTEDESC, ALIGN, memop_alignment_bits(memop)); desc = FIELD_DP32(desc, MTEDESC, SIZEM1, memop_size(memop) - 1); ret = tcg_temp_new_i64(); @@ -326,7 +326,7 @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write, desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); - desc = FIELD_DP32(desc, MTEDESC, ALIGN, get_alignment_bits(single_mop)); + desc = FIELD_DP32(desc, MTEDESC, ALIGN, memop_alignment_bits(single_mop)); desc = FIELD_DP32(desc, MTEDESC, SIZEM1, total_size - 1); ret = tcg_temp_new_i64(); diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c index 75b7bfda4c..f4da4a40f9 100644 --- a/target/xtensa/translate.c +++ b/target/xtensa/translate.c @@ -521,7 +521,7 @@ static MemOp gen_load_store_alignment(DisasContext *dc, MemOp mop, mop |= MO_ALIGN; } if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) { - tcg_gen_andi_i32(addr, addr, ~0 << get_alignment_bits(mop)); + tcg_gen_andi_i32(addr, addr, ~0 << memop_alignment_bits(mop)); } return mop; } diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc index 3de5f50b62..56072d89a2 100644 --- a/tcg/arm/tcg-target.c.inc +++ b/tcg/arm/tcg-target.c.inc @@ -1587,7 +1587,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo, tcg_debug_assert((datalo & 1) == 0); tcg_debug_assert(datahi == datalo + 1); /* LDRD requires alignment; double-check that. */ - if (get_alignment_bits(opc) >= MO_64) { + if (memop_alignment_bits(opc) >= MO_64) { if (h.index < 0) { tcg_out_ldrd_8(s, h.cond, datalo, h.base, 0); break; @@ -1691,7 +1691,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo, tcg_debug_assert((datalo & 1) == 0); tcg_debug_assert(datahi == datalo + 1); /* STRD requires alignment; double-check that. */ - if (get_alignment_bits(opc) >= MO_64) { + if (memop_alignment_bits(opc) >= MO_64) { if (h.index < 0) { tcg_out_strd_8(s, h.cond, datalo, h.base, 0); } else { diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc index 176c98740b..32f9ec24b5 100644 --- a/tcg/sparc64/tcg-target.c.inc +++ b/tcg/sparc64/tcg-target.c.inc @@ -1133,7 +1133,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, * Otherwise, test for at least natural alignment and defer * everything else to the helper functions. */ - if (s_bits != get_alignment_bits(opc)) { + if (s_bits != memop_alignment_bits(opc)) { tcg_debug_assert(check_fit_tl(a_mask, 13)); tcg_out_arithi(s, TCG_REG_G0, addr_reg, a_mask, ARITH_ANDCC); diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c index 23dc807f11..a318011229 100644 --- a/tcg/tcg-op-ldst.c +++ b/tcg/tcg-op-ldst.c @@ -45,7 +45,7 @@ static void check_max_alignment(unsigned a_bits) static MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st) { - unsigned a_bits = get_alignment_bits(op); + unsigned a_bits = memop_alignment_bits(op); check_max_alignment(a_bits); @@ -559,7 +559,7 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr, TCGv_i64 ext_addr = NULL; TCGOpcode opc; - check_max_alignment(get_alignment_bits(memop)); + check_max_alignment(memop_alignment_bits(memop)); tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); /* In serial mode, reduce atomicity. */ @@ -676,7 +676,7 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr, TCGv_i64 ext_addr = NULL; TCGOpcode opc; - check_max_alignment(get_alignment_bits(memop)); + check_max_alignment(memop_alignment_bits(memop)); tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST); /* In serial mode, reduce atomicity. */ diff --git a/tcg/tcg.c b/tcg/tcg.c index 34e3056380..5decd83cf4 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -5506,7 +5506,7 @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op) static TCGAtomAlign atom_and_align_for_opc(TCGContext *s, MemOp opc, MemOp host_atom, bool allow_two_ops) { - MemOp align = get_alignment_bits(opc); + MemOp align = memop_alignment_bits(opc); MemOp size = opc & MO_SIZE; MemOp half = size ? size - 1 : 0; MemOp atom = opc & MO_ATOM_MASK; -- cgit 1.4.1 From e5b063e81fd2b30aad1e9128238871c71b62a666 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Fri, 4 Oct 2024 14:57:20 -0700 Subject: include/exec/memop: Introduce memop_atomicity_bits MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Split out of mmu_lookup. Reviewed-by: Helge Deller Reviewed-by: Peter Maydell Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Richard Henderson --- accel/tcg/cputlb.c | 16 ++-------------- include/exec/memop.h | 24 ++++++++++++++++++++++++ 2 files changed, 26 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index a975fe5f89..35cda1e2b0 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1767,20 +1767,8 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, * Device memory type require alignment. */ if (unlikely(flags & TLB_CHECK_ALIGNED)) { - MemOp size = l->memop & MO_SIZE; - - switch (l->memop & MO_ATOM_MASK) { - case MO_ATOM_NONE: - size = MO_8; - break; - case MO_ATOM_IFALIGN_PAIR: - case MO_ATOM_WITHIN16_PAIR: - size = size ? size - 1 : 0; - break; - default: - break; - } - if (addr & ((1 << size) - 1)) { + a_bits = memop_atomicity_bits(l->memop); + if (addr & ((1 << a_bits) - 1)) { cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra); } } diff --git a/include/exec/memop.h b/include/exec/memop.h index f53bf618c6..b699bf7688 100644 --- a/include/exec/memop.h +++ b/include/exec/memop.h @@ -193,4 +193,28 @@ static inline unsigned memop_alignment_bits(MemOp memop) return a; } +/* + * memop_atomicity_bits: + * @memop: MemOp value + * + * Extract the atomicity size from the memop. + */ +static inline unsigned memop_atomicity_bits(MemOp memop) +{ + unsigned size = memop & MO_SIZE; + + switch (memop & MO_ATOM_MASK) { + case MO_ATOM_NONE: + size = MO_8; + break; + case MO_ATOM_IFALIGN_PAIR: + case MO_ATOM_WITHIN16_PAIR: + size = size ? size - 1 : 0; + break; + default: + break; + } + return size; +} + #endif -- cgit 1.4.1 From f168808d7d100ed96c52c4438c4ddb557bd086bf Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 7 Oct 2024 16:34:06 -0700 Subject: accel/tcg: Add TCGCPUOps.tlb_fill_align Add a new callback to handle softmmu paging. Return the page details directly, instead of passing them indirectly to tlb_set_page. Handle alignment simultaneously with paging so that faults are handled with target-specific priority. Route all calls of the two hooks through a tlb_fill_align function local to cputlb.c. As yet no targets implement the new hook. As yet cputlb.c does not use the new alignment check. Reviewed-by: Helge Deller Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- accel/tcg/cputlb.c | 61 +++++++++++++++++++++++++++---------------- include/hw/core/cpu.h | 4 +-- include/hw/core/tcg-cpu-ops.h | 26 ++++++++++++++++++ include/qemu/typedefs.h | 1 + 4 files changed, 67 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 35cda1e2b0..d72f454e9e 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1221,22 +1221,35 @@ void tlb_set_page(CPUState *cpu, vaddr addr, } /* - * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the - * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must - * be discarded and looked up again (e.g. via tlb_entry()). + * Note: tlb_fill_align() can trigger a resize of the TLB. + * This means that all of the caller's prior references to the TLB table + * (e.g. CPUTLBEntry pointers) must be discarded and looked up again + * (e.g. via tlb_entry()). */ -static void tlb_fill(CPUState *cpu, vaddr addr, int size, - MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) +static bool tlb_fill_align(CPUState *cpu, vaddr addr, MMUAccessType type, + int mmu_idx, MemOp memop, int size, + bool probe, uintptr_t ra) { - bool ok; + const TCGCPUOps *ops = cpu->cc->tcg_ops; + CPUTLBEntryFull full; - /* - * This is not a probe, so only valid return is success; failure - * should result in exception + longjmp to the cpu loop. - */ - ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size, - access_type, mmu_idx, false, retaddr); - assert(ok); + if (ops->tlb_fill_align) { + if (ops->tlb_fill_align(cpu, &full, addr, type, mmu_idx, + memop, size, probe, ra)) { + tlb_set_page_full(cpu, mmu_idx, addr, &full); + return true; + } + } else { + /* Legacy behaviour is alignment before paging. */ + if (addr & ((1u << memop_alignment_bits(memop)) - 1)) { + ops->do_unaligned_access(cpu, addr, type, mmu_idx, ra); + } + if (ops->tlb_fill(cpu, addr, size, type, mmu_idx, probe, ra)) { + return true; + } + } + assert(probe); + return false; } static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, @@ -1351,22 +1364,22 @@ static int probe_access_internal(CPUState *cpu, vaddr addr, if (!tlb_hit_page(tlb_addr, page_addr)) { if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, page_addr)) { - if (!cpu->cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type, - mmu_idx, nonfault, retaddr)) { + if (!tlb_fill_align(cpu, addr, access_type, mmu_idx, + 0, fault_size, nonfault, retaddr)) { /* Non-faulting page table read failed. */ *phost = NULL; *pfull = NULL; return TLB_INVALID_MASK; } - /* TLB resize via tlb_fill may have moved the entry. */ + /* TLB resize via tlb_fill_align may have moved the entry. */ index = tlb_index(cpu, mmu_idx, addr); entry = tlb_entry(cpu, mmu_idx, addr); /* * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately, - * to force the next access through tlb_fill. We've just - * called tlb_fill, so we know that this entry *is* valid. + * to force the next access through tlb_fill_align. We've just + * called tlb_fill_align, so we know that this entry *is* valid. */ flags &= ~TLB_INVALID_MASK; } @@ -1613,7 +1626,7 @@ typedef struct MMULookupLocals { * * Resolve the translation for the one page at @data.addr, filling in * the rest of @data with the results. If the translation fails, - * tlb_fill will longjmp out. Return true if the softmmu tlb for + * tlb_fill_align will longjmp out. Return true if the softmmu tlb for * @mmu_idx may have resized. */ static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, @@ -1631,7 +1644,8 @@ static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, if (!tlb_hit(tlb_addr, addr)) { if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, addr & TARGET_PAGE_MASK)) { - tlb_fill(cpu, addr, data->size, access_type, mmu_idx, ra); + tlb_fill_align(cpu, addr, access_type, mmu_idx, + 0, data->size, false, ra); maybe_resized = true; index = tlb_index(cpu, mmu_idx, addr); entry = tlb_entry(cpu, mmu_idx, addr); @@ -1821,8 +1835,8 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, if (!tlb_hit(tlb_addr, addr)) { if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE, addr & TARGET_PAGE_MASK)) { - tlb_fill(cpu, addr, size, - MMU_DATA_STORE, mmu_idx, retaddr); + tlb_fill_align(cpu, addr, MMU_DATA_STORE, mmu_idx, + 0, size, false, retaddr); index = tlb_index(cpu, mmu_idx, addr); tlbe = tlb_entry(cpu, mmu_idx, addr); } @@ -1836,7 +1850,8 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, * but addr_read will only be -1 if PAGE_READ was unset. */ if (unlikely(tlbe->addr_read == -1)) { - tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); + tlb_fill_align(cpu, addr, MMU_DATA_LOAD, mmu_idx, + 0, size, false, retaddr); /* * Since we don't support reads and writes to different * addresses, and we do have the proper page loaded for diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 04e9ad4996..d21a24c82f 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -205,7 +205,7 @@ struct CPUClass { * so the layout is not as critical as that of CPUTLBEntry. This is * also why we don't want to combine the two structs. */ -typedef struct CPUTLBEntryFull { +struct CPUTLBEntryFull { /* * @xlat_section contains: * - in the lower TARGET_PAGE_BITS, a physical section number @@ -261,7 +261,7 @@ typedef struct CPUTLBEntryFull { bool guarded; } arm; } extra; -} CPUTLBEntryFull; +}; /* * Data elements that are per MMU mode, minus the bits accessed by diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h index 34318cf0e6..663efb9133 100644 --- a/include/hw/core/tcg-cpu-ops.h +++ b/include/hw/core/tcg-cpu-ops.h @@ -13,6 +13,7 @@ #include "exec/breakpoint.h" #include "exec/hwaddr.h" #include "exec/memattrs.h" +#include "exec/memop.h" #include "exec/mmu-access-type.h" #include "exec/vaddr.h" @@ -131,6 +132,31 @@ struct TCGCPUOps { * same function signature. */ bool (*cpu_exec_halt)(CPUState *cpu); + /** + * @tlb_fill_align: Handle a softmmu tlb miss + * @cpu: cpu context + * @out: output page properties + * @addr: virtual address + * @access_type: read, write or execute + * @mmu_idx: mmu context + * @memop: memory operation for the access + * @size: memory access size, or 0 for whole page + * @probe: test only, no fault + * @ra: host return address for exception unwind + * + * If the access is valid, fill in @out and return true. + * Otherwise if probe is true, return false. + * Otherwise raise an exception and do not return. + * + * The alignment check for the access is deferred to this hook, + * so that the target can determine the priority of any alignment + * fault with respect to other potential faults from paging. + * Zero may be passed for @memop to skip any alignment check + * for non-memory-access operations such as probing. + */ + bool (*tlb_fill_align)(CPUState *cpu, CPUTLBEntryFull *out, vaddr addr, + MMUAccessType access_type, int mmu_idx, + MemOp memop, int size, bool probe, uintptr_t ra); /** * @tlb_fill: Handle a softmmu tlb miss * diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h index 9d222dc376..3d84efcac4 100644 --- a/include/qemu/typedefs.h +++ b/include/qemu/typedefs.h @@ -40,6 +40,7 @@ typedef struct ConfidentialGuestSupport ConfidentialGuestSupport; typedef struct CPUArchState CPUArchState; typedef struct CPUPluginState CPUPluginState; typedef struct CPUState CPUState; +typedef struct CPUTLBEntryFull CPUTLBEntryFull; typedef struct DeviceState DeviceState; typedef struct DirtyBitmapSnapshot DirtyBitmapSnapshot; typedef struct DisasContextBase DisasContextBase; -- cgit 1.4.1