summary refs log tree commit diff stats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2015-06-18 18:47:19 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2015-07-01 15:45:50 +0200
commitafbe70535ff1a8a7a32910cc15ebecc0ba92e7da (patch)
tree1e12a91857ba9ce038e7c1c05fd901add373b36b
parent2e7f7a3c86f884a77296a137b7c730a4d580c5c9 (diff)
downloadfocaccia-qemu-afbe70535ff1a8a7a32910cc15ebecc0ba92e7da.tar.gz
focaccia-qemu-afbe70535ff1a8a7a32910cc15ebecc0ba92e7da.zip
main-loop: introduce qemu_mutex_iothread_locked
This function will be used to avoid recursive locking of the iothread lock
whenever address_space_rw/ld*/st* are called with the BQL held, which is
almost always the case.

Tracking whether the iothread is owned is very cheap (just use a TLS
variable) but requires some care because now the lock must always be
taken with qemu_mutex_lock_iothread().  Previously this wasn't the case.
Outside TCG mode this is not a problem.  In TCG mode, we need to be
careful and avoid the "prod out of compiled code" step if already
in a VCPU thread.  This is easily done with a check on current_cpu,
i.e. qemu_in_vcpu_thread().

Hopefully, multithreaded TCG will get rid of the whole logic to kick
VCPUs whenever an I/O event occurs!

Cc: Frederic Konrad <fred.konrad@greensocs.com>
Message-Id: <1434646046-27150-3-git-send-email-pbonzini@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--cpus.c9
-rw-r--r--include/qemu/main-loop.h10
-rw-r--r--stubs/iothread-lock.c5
3 files changed, 24 insertions, 0 deletions
diff --git a/cpus.c b/cpus.c
index c09fbef5de..f547aebeaf 100644
--- a/cpus.c
+++ b/cpus.c
@@ -1146,6 +1146,13 @@ bool qemu_in_vcpu_thread(void)
     return current_cpu && qemu_cpu_is_self(current_cpu);
 }
 
+static __thread bool iothread_locked = false;
+
+bool qemu_mutex_iothread_locked(void)
+{
+    return iothread_locked;
+}
+
 void qemu_mutex_lock_iothread(void)
 {
     atomic_inc(&iothread_requesting_mutex);
@@ -1164,10 +1171,12 @@ void qemu_mutex_lock_iothread(void)
         atomic_dec(&iothread_requesting_mutex);
         qemu_cond_broadcast(&qemu_io_proceeded_cond);
     }
+    iothread_locked = true;
 }
 
 void qemu_mutex_unlock_iothread(void)
 {
+    iothread_locked = false;
     qemu_mutex_unlock(&qemu_global_mutex);
 }
 
diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h
index 0f4a0fd4b2..bc18ca30e4 100644
--- a/include/qemu/main-loop.h
+++ b/include/qemu/main-loop.h
@@ -223,6 +223,16 @@ int qemu_add_child_watch(pid_t pid);
 #endif
 
 /**
+ * qemu_mutex_iothread_locked: Return lock status of the main loop mutex.
+ *
+ * The main loop mutex is the coarsest lock in QEMU, and as such it
+ * must always be taken outside other locks.  This function helps
+ * functions take different paths depending on whether the current
+ * thread is running within the main loop mutex.
+ */
+bool qemu_mutex_iothread_locked(void);
+
+/**
  * qemu_mutex_lock_iothread: Lock the main loop mutex.
  *
  * This function locks the main loop mutex.  The mutex is taken by
diff --git a/stubs/iothread-lock.c b/stubs/iothread-lock.c
index 5d8aca1b37..dda6f6b58d 100644
--- a/stubs/iothread-lock.c
+++ b/stubs/iothread-lock.c
@@ -1,6 +1,11 @@
 #include "qemu-common.h"
 #include "qemu/main-loop.h"
 
+bool qemu_mutex_iothread_locked(void)
+{
+    return true;
+}
+
 void qemu_mutex_lock_iothread(void)
 {
 }