From 58e8f1f616d117aed6283690419dc16f53b7a202 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 22 Feb 2023 19:17:52 -1000 Subject: accel/tcg: Store some tlb flags in CPUTLBEntryFull MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We have run out of bits we can use within the CPUTLBEntry comparators, as TLB_FLAGS_MASK cannot overlap alignment. Store slow_flags[] in CPUTLBEntryFull, and merge with the flags from the comparator. A new TLB_FORCE_SLOW bit is set within the comparator as an indication that the slow path must be used. Move TLB_BSWAP to TLB_SLOW_FLAGS_MASK. Since we are out of bits, we cannot create a new bit without moving an old one. Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Richard Henderson --- include/exec/cpu-all.h | 21 +++++++++++++++++---- include/exec/cpu-defs.h | 6 ++++++ 2 files changed, 23 insertions(+), 4 deletions(-) (limited to 'include/exec') diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index 09bf4c0cc6..4422f4bb07 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -327,17 +327,30 @@ CPUArchState *cpu_copy(CPUArchState *env); #define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3)) /* Set if TLB entry contains a watchpoint. */ #define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4)) -/* Set if TLB entry requires byte swap. */ -#define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5)) +/* Set if the slow path must be used; more flags in CPUTLBEntryFull. */ +#define TLB_FORCE_SLOW (1 << (TARGET_PAGE_BITS_MIN - 5)) /* Set if TLB entry writes ignored. */ #define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 6)) -/* Use this mask to check interception with an alignment mask +/* + * Use this mask to check interception with an alignment mask * in a TCG backend. */ #define TLB_FLAGS_MASK \ (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \ - | TLB_WATCHPOINT | TLB_BSWAP | TLB_DISCARD_WRITE) + | TLB_WATCHPOINT | TLB_FORCE_SLOW | TLB_DISCARD_WRITE) + +/* + * Flags stored in CPUTLBEntryFull.slow_flags[x]. + * TLB_FORCE_SLOW must be set in CPUTLBEntry.addr_idx[x]. + */ +/* Set if TLB entry requires byte swap. */ +#define TLB_BSWAP (1 << 0) + +#define TLB_SLOW_FLAGS_MASK TLB_BSWAP + +/* The two sets of flags must not overlap. */ +QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK); /** * tlb_hit_page: return true if page aligned @addr is a hit against the diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h index e6a079402e..fb4c8d480f 100644 --- a/include/exec/cpu-defs.h +++ b/include/exec/cpu-defs.h @@ -124,6 +124,12 @@ typedef struct CPUTLBEntryFull { /* @lg_page_size contains the log2 of the page size. */ uint8_t lg_page_size; + /* + * Additional tlb flags for use by the slow path. If non-zero, + * the corresponding CPUTLBEntry comparator must have TLB_FORCE_SLOW. + */ + uint8_t slow_flags[MMU_ACCESS_COUNT]; + /* * Allow target-specific additions to this structure. * This may be used to cache items from the guest cpu -- cgit 1.4.1