summary refs log tree commit diff stats
path: root/cpu-exec.c
diff options
context:
space:
mode:
Diffstat (limited to 'cpu-exec.c')
-rw-r--r--cpu-exec.c25
1 files changed, 24 insertions, 1 deletions
diff --git a/cpu-exec.c b/cpu-exec.c
index 98f968df60..adb939a994 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -26,6 +26,7 @@
 #include "qemu/timer.h"
 #include "exec/address-spaces.h"
 #include "exec/memory-internal.h"
+#include "qemu/rcu.h"
 
 /* -icount align implementation. */
 
@@ -146,8 +147,27 @@ void cpu_resume_from_signal(CPUState *cpu, void *puc)
 
 void cpu_reload_memory_map(CPUState *cpu)
 {
+    AddressSpaceDispatch *d;
+
+    if (qemu_in_vcpu_thread()) {
+        /* Do not let the guest prolong the critical section as much as it
+         * as it desires.
+         *
+         * Currently, this is prevented by the I/O thread's periodinc kicking
+         * of the VCPU thread (iothread_requesting_mutex, qemu_cpu_kick_thread)
+         * but this will go away once TCG's execution moves out of the global
+         * mutex.
+         *
+         * This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which
+         * only protects cpu->as->dispatch.  Since we reload it below, we can
+         * split the critical section.
+         */
+        rcu_read_unlock();
+        rcu_read_lock();
+    }
+
     /* The CPU and TLB are protected by the iothread lock.  */
-    AddressSpaceDispatch *d = cpu->as->dispatch;
+    d = atomic_rcu_read(&cpu->as->dispatch);
     cpu->memory_dispatch = d;
     tlb_flush(cpu, 1);
 }
@@ -362,6 +382,8 @@ int cpu_exec(CPUArchState *env)
      * an instruction scheduling constraint on modern architectures.  */
     smp_mb();
 
+    rcu_read_lock();
+
     if (unlikely(exit_request)) {
         cpu->exit_request = 1;
     }
@@ -564,6 +586,7 @@ int cpu_exec(CPUArchState *env)
     } /* for(;;) */
 
     cc->cpu_exec_exit(cpu);
+    rcu_read_unlock();
 
     /* fail safe : never use current_cpu outside cpu_exec() */
     current_cpu = NULL;