summary refs log tree commit diff stats
path: root/include/exec/cputlb.h
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2025-09-24 12:02:13 -0700
committerRichard Henderson <richard.henderson@linaro.org>2025-09-24 12:02:13 -0700
commit48d7b47cd76b986ad360b6ba1b0889186416f1c2 (patch)
tree6cd0f2f0a38de0a8cb2e968243c5e3e3b1565d10 /include/exec/cputlb.h
parentab8008b231e758e03c87c1c483c03afdd9c02e19 (diff)
parentf6f7fdd68e6fbfafae828e504de544b5659bc4bd (diff)
downloadfocaccia-qemu-48d7b47cd76b986ad360b6ba1b0889186416f1c2.tar.gz
focaccia-qemu-48d7b47cd76b986ad360b6ba1b0889186416f1c2.zip
Merge tag 'pull-misc-20250924' of https://gitlab.com/rth7680/qemu into staging
hw/pci-host/{dino,astro}: Don't call pci_register_root_bus() in init
target/sparc: Loosen various decode for v7
linux-user: Add syscall dispatch support
tcg/optimize: Fix folding of vector bitsel
include/hw/core/cpu: Introduce MMUIdxMap
include/hw/core/cpu: Introduce cpu_tlb_fast
include/hw/core/cpu: Invert the indexing into CPUTLBDescFast
accel/tcg: Remove dead mmap_unlock() call in invalidate_phys_page_range
accel/tcg: Remove cpu_loop_exit_restore() stub
accel/tcg: Properly unlink a TB linked to itself
accel/tcg: Introduce and use tb_flush__exclusive_or_serial

# -----BEGIN PGP SIGNATURE-----
#
# iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmjUP5MdHHJpY2hhcmQu
# aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV/qEwgAt6uSXMVTXykr6uxW
# 321nMEMEB2Av5LHQwvgRW/BOAWCKDNtxHHn3tcfvOLKcFHR+agZqTHBvOKGgPGSo
# fPkoHRMlcb3pKxhttX66qZhDiaMNRALtajVNkelKUso4BtESkW1v4yQVNLr1Rk6+
# f/xg4noX2gSh56VDMGLgcTR5wvTNycTIq3909zPmO4YPVQjwUPSYkB227LyBRLYg
# R6EQOzn45oQuFfMYukjNQczibkZ7NV8mW7XmbfiMXwvK1yA/F75eN+B9sJKqS44d
# ww/rurQdIYZFwPPPqz3XZmztg0n9syE9VHkliYmAoJRtbgc0obQHt9M7UfLwW2TM
# NXYlNw==
# =HVcw
# -----END PGP SIGNATURE-----
# gpg: Signature made Wed 24 Sep 2025 11:59:31 AM PDT
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [ultimate]

* tag 'pull-misc-20250924' of https://gitlab.com/rth7680/qemu: (32 commits)
  accel/tcg: Remove cpu_loop_exit_restore() stub
  accel/tcg: Remove dead mmap_unlock() call in invalidate_phys_page_range
  accel/tcg: Improve buffer overflow in tb_gen_code
  accel/tcg: Create queue_tb_flush from tb_flush
  linux-user: Split out begin_parallel_context
  plugins: Use tb_flush__exclusive_or_serial
  accel/tcg: Move post-load tb_flush to vm_change_state hook
  accel/tcg: Split out tb_flush__exclusive_or_serial
  hw/ppc/spapr: Use tb_invalidate_phys_range in h_page_init
  target/riscv: Record misa_ext in TCGTBCPUState.cs_base
  target/alpha: Simplify call_pal implementation
  gdbstub: Remove tb_flush uses
  tests/tcg/multiarch: Add tb-link test
  accel/tcg: Properly unlink a TB linked to itself
  target/hppa: Adjust mmu indexes to begin with 0
  include/hw/core/cpu: Invert the indexing into CPUTLBDescFast
  include/hw/core/cpu: Introduce cpu_tlb_fast
  include/hw/core/cpu: Introduce MMUIdxMap
  tcg/optimize: Fix folding of vector bitsel
  hw/pci-host/astro: Don't call pci_regsiter_root_bus() in init
  ...

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'include/exec/cputlb.h')
-rw-r--r--include/exec/cputlb.h32
1 files changed, 16 insertions, 16 deletions
diff --git a/include/exec/cputlb.h b/include/exec/cputlb.h
index 03ed7e2165..9bec0e7890 100644
--- a/include/exec/cputlb.h
+++ b/include/exec/cputlb.h
@@ -150,7 +150,7 @@ void tlb_flush_all_cpus_synced(CPUState *src_cpu);
  * MMU indexes.
  */
 void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
-                              uint16_t idxmap);
+                              MMUIdxMap idxmap);
 
 /**
  * tlb_flush_page_by_mmuidx_all_cpus_synced:
@@ -165,7 +165,7 @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
  * translations using the flushed TLBs.
  */
 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
-                                              uint16_t idxmap);
+                                              MMUIdxMap idxmap);
 
 /**
  * tlb_flush_by_mmuidx:
@@ -176,7 +176,7 @@ void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
  * Flush all entries from the TLB of the specified CPU, for the specified
  * MMU indexes.
  */
-void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
+void tlb_flush_by_mmuidx(CPUState *cpu, MMUIdxMap idxmap);
 
 /**
  * tlb_flush_by_mmuidx_all_cpus_synced:
@@ -189,7 +189,7 @@ void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
  * When this function returns, no CPUs will subsequently perform
  * translations using the flushed TLBs.
  */
-void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
+void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, MMUIdxMap idxmap);
 
 /**
  * tlb_flush_page_bits_by_mmuidx
@@ -201,11 +201,11 @@ void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
  * Similar to tlb_flush_page_mask, but with a bitmap of indexes.
  */
 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
-                                   uint16_t idxmap, unsigned bits);
+                                   MMUIdxMap idxmap, unsigned bits);
 
 /* Similarly, with broadcast and syncing. */
 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
-                                                   uint16_t idxmap,
+                                                   MMUIdxMap idxmap,
                                                    unsigned bits);
 
 /**
@@ -220,14 +220,14 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
  * comparing only the low @bits worth of each virtual page.
  */
 void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
-                               vaddr len, uint16_t idxmap,
+                               vaddr len, MMUIdxMap idxmap,
                                unsigned bits);
 
 /* Similarly, with broadcast and syncing. */
 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
                                                vaddr addr,
                                                vaddr len,
-                                               uint16_t idxmap,
+                                               MMUIdxMap idxmap,
                                                unsigned bits);
 #else
 static inline void tlb_flush_page(CPUState *cpu, vaddr addr)
@@ -243,42 +243,42 @@ static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
 {
 }
 static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
-                                            vaddr addr, uint16_t idxmap)
+                                            vaddr addr, MMUIdxMap idxmap)
 {
 }
 
-static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
+static inline void tlb_flush_by_mmuidx(CPUState *cpu, MMUIdxMap idxmap)
 {
 }
 static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
                                                             vaddr addr,
-                                                            uint16_t idxmap)
+                                                            MMUIdxMap idxmap)
 {
 }
 static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
-                                                       uint16_t idxmap)
+                                                       MMUIdxMap idxmap)
 {
 }
 static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
                                                  vaddr addr,
-                                                 uint16_t idxmap,
+                                                 MMUIdxMap idxmap,
                                                  unsigned bits)
 {
 }
 static inline void
 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
-                                              uint16_t idxmap, unsigned bits)
+                                              MMUIdxMap idxmap, unsigned bits)
 {
 }
 static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
-                                             vaddr len, uint16_t idxmap,
+                                             vaddr len, MMUIdxMap idxmap,
                                              unsigned bits)
 {
 }
 static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
                                                              vaddr addr,
                                                              vaddr len,
-                                                             uint16_t idxmap,
+                                                             MMUIdxMap idxmap,
                                                              unsigned bits)
 {
 }