summary refs log tree commit diff stats
path: root/accel/tcg/cpu-exec-common.c
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2023-08-25 16:13:17 -0700
committerRichard Henderson <richard.henderson@linaro.org>2023-08-29 09:55:06 -0700
commit0d58c660689f6da1e3feff8a997014003d928b3b (patch)
tree081ceb163f15f146b86d9ecc64e56f85f3fd6454 /accel/tcg/cpu-exec-common.c
parent86e4f93d827d3c1efd00cd8a906e38a2c0f2b5bc (diff)
downloadfocaccia-qemu-0d58c660689f6da1e3feff8a997014003d928b3b.tar.gz
focaccia-qemu-0d58c660689f6da1e3feff8a997014003d928b3b.zip
softmmu: Use async_run_on_cpu in tcg_commit
After system startup, run the update to memory_dispatch
and the tlb_flush on the cpu.  This eliminates a race,
wherein a running cpu sees the memory_dispatch change
but has not yet seen the tlb_flush.

Since the update now happens on the cpu, we need not use
qatomic_rcu_read to protect the read of memory_dispatch.

Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1826
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1834
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1846
Tested-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel/tcg/cpu-exec-common.c')
-rw-r--r--accel/tcg/cpu-exec-common.c30
1 files changed, 0 insertions, 30 deletions
diff --git a/accel/tcg/cpu-exec-common.c b/accel/tcg/cpu-exec-common.c
index 9a5fabf625..7e35d7f4b5 100644
--- a/accel/tcg/cpu-exec-common.c
+++ b/accel/tcg/cpu-exec-common.c
@@ -33,36 +33,6 @@ void cpu_loop_exit_noexc(CPUState *cpu)
     cpu_loop_exit(cpu);
 }
 
-#if defined(CONFIG_SOFTMMU)
-void cpu_reloading_memory_map(void)
-{
-    if (qemu_in_vcpu_thread() && current_cpu->running) {
-        /* The guest can in theory prolong the RCU critical section as long
-         * as it feels like. The major problem with this is that because it
-         * can do multiple reconfigurations of the memory map within the
-         * critical section, we could potentially accumulate an unbounded
-         * collection of memory data structures awaiting reclamation.
-         *
-         * Because the only thing we're currently protecting with RCU is the
-         * memory data structures, it's sufficient to break the critical section
-         * in this callback, which we know will get called every time the
-         * memory map is rearranged.
-         *
-         * (If we add anything else in the system that uses RCU to protect
-         * its data structures, we will need to implement some other mechanism
-         * to force TCG CPUs to exit the critical section, at which point this
-         * part of this callback might become unnecessary.)
-         *
-         * This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which
-         * only protects cpu->as->dispatch. Since we know our caller is about
-         * to reload it, it's safe to split the critical section.
-         */
-        rcu_read_unlock();
-        rcu_read_lock();
-    }
-}
-#endif
-
 void cpu_loop_exit(CPUState *cpu)
 {
     /* Undo the setting in cpu_tb_exec.  */