summary refs log tree commit diff stats
path: root/util
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2017-07-06 10:15:09 +0100
committerPeter Maydell <peter.maydell@linaro.org>2017-07-06 10:15:09 +0100
commit67b9c5d4f37ea373ebf9aad251883886e34bf2e1 (patch)
tree2205c5385217b4ef499e774728ab2979e9bbb0eb /util
parent2185c93ba80f81bfa27ce6f259c7f2ef4f08b668 (diff)
parent44eff673411381062b826d048ba9d6630d2b2bdb (diff)
downloadfocaccia-qemu-67b9c5d4f37ea373ebf9aad251883886e34bf2e1.tar.gz
focaccia-qemu-67b9c5d4f37ea373ebf9aad251883886e34bf2e1.zip
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
* qemu-thread portability improvement (Fam)
* virtio-scsi IOMMU fix (Jason)
* poisoning and common-obj-y cleanups (Thomas)
* initial Hypervisor.framework refactoring (Sergio)
* x86 TCG interrupt injection fixes (Wu Xiang, me)
* --disable-tcg support for x86 (Yang Zhong, me)
* various other bugfixes and cleanups (Daniel, Peter, Thomas)

# gpg: Signature made Wed 05 Jul 2017 08:12:56 BST
# gpg:                using RSA key 0xBFFBD25F78C7AE83
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>"
# gpg:                 aka "Paolo Bonzini <pbonzini@redhat.com>"
# Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4  E2F7 7E15 100C CD36 69B1
#      Subkey fingerprint: F133 3857 4B66 2389 866C  7682 BFFB D25F 78C7 AE83

* remotes/bonzini/tags/for-upstream: (42 commits)
  target/i386: add the CONFIG_TCG into Makefiles
  target/i386: add the tcg_enabled() in target/i386/
  target/i386: move TLB refill function out of helper.c
  target/i386: split cpu_set_mxcsr() and make cpu_set_fpuc() inline
  target/i386: make cpu_get_fp80()/cpu_set_fp80() static
  target/i386: move cpu_sync_bndcs_hflags() function
  tcg: add the CONFIG_TCG into Makefiles
  tcg: add CONFIG_TCG guards in headers
  exec: elide calls to tb_lock and tb_unlock
  tcg: move tb_lock out of translate-all.h
  tcg: add the tcg-stub.c file into accel/stubs/
  vapic: use tcg_enabled
  monitor: disable "info jit" and "info opcount" if !TCG
  tcg: make tcg_allowed global
  cpu: move interrupt handling out of translate-common.c
  tcg: move page_size_init() function
  vl: add tcg_enabled() for tcg related code
  vl: convert -tb-size to qemu_strtoul
  configure: add --disable-tcg configure option
  configure: early test for supported targets
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'util')
-rw-r--r--util/main-loop.c8
-rw-r--r--util/oslib-win32.c6
-rw-r--r--util/qemu-sockets.c2
-rw-r--r--util/qemu-thread-posix.c27
-rw-r--r--util/qemu-thread-win32.c34
5 files changed, 64 insertions, 13 deletions
diff --git a/util/main-loop.c b/util/main-loop.c
index 19cad6b8b6..2f48f41e62 100644
--- a/util/main-loop.c
+++ b/util/main-loop.c
@@ -487,7 +487,7 @@ static int os_host_main_loop_wait(int64_t timeout)
 }
 #endif
 
-int main_loop_wait(int nonblocking)
+void main_loop_wait(int nonblocking)
 {
     int ret;
     uint32_t timeout = UINT32_MAX;
@@ -500,9 +500,7 @@ int main_loop_wait(int nonblocking)
     /* poll any events */
     g_array_set_size(gpollfds, 0); /* reset for new iteration */
     /* XXX: separate device handlers from system ones */
-#ifdef CONFIG_SLIRP
     slirp_pollfds_fill(gpollfds, &timeout);
-#endif
 
     if (timeout == UINT32_MAX) {
         timeout_ns = -1;
@@ -515,16 +513,12 @@ int main_loop_wait(int nonblocking)
                                           &main_loop_tlg));
 
     ret = os_host_main_loop_wait(timeout_ns);
-#ifdef CONFIG_SLIRP
     slirp_pollfds_poll(gpollfds, (ret < 0));
-#endif
 
     /* CPU thread can infinitely wait for event after
        missing the warp */
     qemu_start_warp_timer();
     qemu_clock_run_all_timers();
-
-    return ret;
 }
 
 /* Functions to operate on the main QEMU AioContext.  */
diff --git a/util/oslib-win32.c b/util/oslib-win32.c
index 80e4668935..3de9e7777e 100644
--- a/util/oslib-win32.c
+++ b/util/oslib-win32.c
@@ -438,10 +438,8 @@ static int poll_rest(gboolean poll_msgs, HANDLE *handles, gint nhandles,
         if (timeout == 0 && nhandles > 1) {
             /* Remove the handle that fired */
             int i;
-            if (ready < nhandles - 1) {
-                for (i = ready - WAIT_OBJECT_0 + 1; i < nhandles; i++) {
-                    handles[i-1] = handles[i];
-                }
+            for (i = ready - WAIT_OBJECT_0 + 1; i < nhandles; i++) {
+                handles[i-1] = handles[i];
             }
             nhandles--;
             recursed_result = poll_rest(FALSE, handles, nhandles, fds, nfds, 0);
diff --git a/util/qemu-sockets.c b/util/qemu-sockets.c
index 82290cb687..d3e51089de 100644
--- a/util/qemu-sockets.c
+++ b/util/qemu-sockets.c
@@ -897,7 +897,7 @@ static int unix_listen_saddr(UnixSocketAddress *saddr,
     strncpy(un.sun_path, path, sizeof(un.sun_path));
 
     if (bind(sock, (struct sockaddr*) &un, sizeof(un)) < 0) {
-        error_setg_errno(errp, errno, "Failed to bind socket to %s", un.sun_path);
+        error_setg_errno(errp, errno, "Failed to bind socket to %s", path);
         goto err;
     }
     if (listen(sock, 1) < 0) {
diff --git a/util/qemu-thread-posix.c b/util/qemu-thread-posix.c
index eacd99e497..4e95d272dc 100644
--- a/util/qemu-thread-posix.c
+++ b/util/qemu-thread-posix.c
@@ -43,12 +43,15 @@ void qemu_mutex_init(QemuMutex *mutex)
     err = pthread_mutex_init(&mutex->lock, NULL);
     if (err)
         error_exit(err, __func__);
+    mutex->initialized = true;
 }
 
 void qemu_mutex_destroy(QemuMutex *mutex)
 {
     int err;
 
+    assert(mutex->initialized);
+    mutex->initialized = false;
     err = pthread_mutex_destroy(&mutex->lock);
     if (err)
         error_exit(err, __func__);
@@ -58,6 +61,7 @@ void qemu_mutex_lock(QemuMutex *mutex)
 {
     int err;
 
+    assert(mutex->initialized);
     err = pthread_mutex_lock(&mutex->lock);
     if (err)
         error_exit(err, __func__);
@@ -69,6 +73,7 @@ int qemu_mutex_trylock(QemuMutex *mutex)
 {
     int err;
 
+    assert(mutex->initialized);
     err = pthread_mutex_trylock(&mutex->lock);
     if (err == 0) {
         trace_qemu_mutex_locked(mutex);
@@ -84,6 +89,7 @@ void qemu_mutex_unlock(QemuMutex *mutex)
 {
     int err;
 
+    assert(mutex->initialized);
     trace_qemu_mutex_unlocked(mutex);
     err = pthread_mutex_unlock(&mutex->lock);
     if (err)
@@ -102,6 +108,7 @@ void qemu_rec_mutex_init(QemuRecMutex *mutex)
     if (err) {
         error_exit(err, __func__);
     }
+    mutex->initialized = true;
 }
 
 void qemu_cond_init(QemuCond *cond)
@@ -111,12 +118,15 @@ void qemu_cond_init(QemuCond *cond)
     err = pthread_cond_init(&cond->cond, NULL);
     if (err)
         error_exit(err, __func__);
+    cond->initialized = true;
 }
 
 void qemu_cond_destroy(QemuCond *cond)
 {
     int err;
 
+    assert(cond->initialized);
+    cond->initialized = false;
     err = pthread_cond_destroy(&cond->cond);
     if (err)
         error_exit(err, __func__);
@@ -126,6 +136,7 @@ void qemu_cond_signal(QemuCond *cond)
 {
     int err;
 
+    assert(cond->initialized);
     err = pthread_cond_signal(&cond->cond);
     if (err)
         error_exit(err, __func__);
@@ -135,6 +146,7 @@ void qemu_cond_broadcast(QemuCond *cond)
 {
     int err;
 
+    assert(cond->initialized);
     err = pthread_cond_broadcast(&cond->cond);
     if (err)
         error_exit(err, __func__);
@@ -144,6 +156,7 @@ void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
 {
     int err;
 
+    assert(cond->initialized);
     trace_qemu_mutex_unlocked(mutex);
     err = pthread_cond_wait(&cond->cond, &mutex->lock);
     trace_qemu_mutex_locked(mutex);
@@ -174,12 +187,15 @@ void qemu_sem_init(QemuSemaphore *sem, int init)
         error_exit(errno, __func__);
     }
 #endif
+    sem->initialized = true;
 }
 
 void qemu_sem_destroy(QemuSemaphore *sem)
 {
     int rc;
 
+    assert(sem->initialized);
+    sem->initialized = false;
 #if defined(__APPLE__) || defined(__NetBSD__)
     rc = pthread_cond_destroy(&sem->cond);
     if (rc < 0) {
@@ -201,6 +217,7 @@ void qemu_sem_post(QemuSemaphore *sem)
 {
     int rc;
 
+    assert(sem->initialized);
 #if defined(__APPLE__) || defined(__NetBSD__)
     pthread_mutex_lock(&sem->lock);
     if (sem->count == UINT_MAX) {
@@ -238,6 +255,7 @@ int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
     int rc;
     struct timespec ts;
 
+    assert(sem->initialized);
 #if defined(__APPLE__) || defined(__NetBSD__)
     rc = 0;
     compute_abs_deadline(&ts, ms);
@@ -285,6 +303,7 @@ void qemu_sem_wait(QemuSemaphore *sem)
 {
     int rc;
 
+    assert(sem->initialized);
 #if defined(__APPLE__) || defined(__NetBSD__)
     pthread_mutex_lock(&sem->lock);
     while (sem->count == 0) {
@@ -310,6 +329,7 @@ void qemu_sem_wait(QemuSemaphore *sem)
 #else
 static inline void qemu_futex_wake(QemuEvent *ev, int n)
 {
+    assert(ev->initialized);
     pthread_mutex_lock(&ev->lock);
     if (n == 1) {
         pthread_cond_signal(&ev->cond);
@@ -321,6 +341,7 @@ static inline void qemu_futex_wake(QemuEvent *ev, int n)
 
 static inline void qemu_futex_wait(QemuEvent *ev, unsigned val)
 {
+    assert(ev->initialized);
     pthread_mutex_lock(&ev->lock);
     if (ev->value == val) {
         pthread_cond_wait(&ev->cond, &ev->lock);
@@ -355,10 +376,13 @@ void qemu_event_init(QemuEvent *ev, bool init)
 #endif
 
     ev->value = (init ? EV_SET : EV_FREE);
+    ev->initialized = true;
 }
 
 void qemu_event_destroy(QemuEvent *ev)
 {
+    assert(ev->initialized);
+    ev->initialized = false;
 #ifndef __linux__
     pthread_mutex_destroy(&ev->lock);
     pthread_cond_destroy(&ev->cond);
@@ -370,6 +394,7 @@ void qemu_event_set(QemuEvent *ev)
     /* qemu_event_set has release semantics, but because it *loads*
      * ev->value we need a full memory barrier here.
      */
+    assert(ev->initialized);
     smp_mb();
     if (atomic_read(&ev->value) != EV_SET) {
         if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
@@ -383,6 +408,7 @@ void qemu_event_reset(QemuEvent *ev)
 {
     unsigned value;
 
+    assert(ev->initialized);
     value = atomic_read(&ev->value);
     smp_mb_acquire();
     if (value == EV_SET) {
@@ -398,6 +424,7 @@ void qemu_event_wait(QemuEvent *ev)
 {
     unsigned value;
 
+    assert(ev->initialized);
     value = atomic_read(&ev->value);
     smp_mb_acquire();
     if (value != EV_SET) {
diff --git a/util/qemu-thread-win32.c b/util/qemu-thread-win32.c
index 653f29f442..94f3491a87 100644
--- a/util/qemu-thread-win32.c
+++ b/util/qemu-thread-win32.c
@@ -46,15 +46,19 @@ static void error_exit(int err, const char *msg)
 void qemu_mutex_init(QemuMutex *mutex)
 {
     InitializeSRWLock(&mutex->lock);
+    mutex->initialized = true;
 }
 
 void qemu_mutex_destroy(QemuMutex *mutex)
 {
+    assert(mutex->initialized);
+    mutex->initialized = false;
     InitializeSRWLock(&mutex->lock);
 }
 
 void qemu_mutex_lock(QemuMutex *mutex)
 {
+    assert(mutex->initialized);
     AcquireSRWLockExclusive(&mutex->lock);
     trace_qemu_mutex_locked(mutex);
 }
@@ -63,6 +67,7 @@ int qemu_mutex_trylock(QemuMutex *mutex)
 {
     int owned;
 
+    assert(mutex->initialized);
     owned = TryAcquireSRWLockExclusive(&mutex->lock);
     if (owned) {
         trace_qemu_mutex_locked(mutex);
@@ -73,6 +78,7 @@ int qemu_mutex_trylock(QemuMutex *mutex)
 
 void qemu_mutex_unlock(QemuMutex *mutex)
 {
+    assert(mutex->initialized);
     trace_qemu_mutex_unlocked(mutex);
     ReleaseSRWLockExclusive(&mutex->lock);
 }
@@ -80,25 +86,31 @@ void qemu_mutex_unlock(QemuMutex *mutex)
 void qemu_rec_mutex_init(QemuRecMutex *mutex)
 {
     InitializeCriticalSection(&mutex->lock);
+    mutex->initialized = true;
 }
 
 void qemu_rec_mutex_destroy(QemuRecMutex *mutex)
 {
+    assert(mutex->initialized);
+    mutex->initialized = false;
     DeleteCriticalSection(&mutex->lock);
 }
 
 void qemu_rec_mutex_lock(QemuRecMutex *mutex)
 {
+    assert(mutex->initialized);
     EnterCriticalSection(&mutex->lock);
 }
 
 int qemu_rec_mutex_trylock(QemuRecMutex *mutex)
 {
+    assert(mutex->initialized);
     return !TryEnterCriticalSection(&mutex->lock);
 }
 
 void qemu_rec_mutex_unlock(QemuRecMutex *mutex)
 {
+    assert(mutex->initialized);
     LeaveCriticalSection(&mutex->lock);
 }
 
@@ -106,25 +118,31 @@ void qemu_cond_init(QemuCond *cond)
 {
     memset(cond, 0, sizeof(*cond));
     InitializeConditionVariable(&cond->var);
+    cond->initialized = true;
 }
 
 void qemu_cond_destroy(QemuCond *cond)
 {
+    assert(cond->initialized);
+    cond->initialized = false;
     InitializeConditionVariable(&cond->var);
 }
 
 void qemu_cond_signal(QemuCond *cond)
 {
+    assert(cond->initialized);
     WakeConditionVariable(&cond->var);
 }
 
 void qemu_cond_broadcast(QemuCond *cond)
 {
+    assert(cond->initialized);
     WakeAllConditionVariable(&cond->var);
 }
 
 void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
 {
+    assert(cond->initialized);
     trace_qemu_mutex_unlocked(mutex);
     SleepConditionVariableSRW(&cond->var, &mutex->lock, INFINITE, 0);
     trace_qemu_mutex_locked(mutex);
@@ -134,21 +152,28 @@ void qemu_sem_init(QemuSemaphore *sem, int init)
 {
     /* Manual reset.  */
     sem->sema = CreateSemaphore(NULL, init, LONG_MAX, NULL);
+    sem->initialized = true;
 }
 
 void qemu_sem_destroy(QemuSemaphore *sem)
 {
+    assert(sem->initialized);
+    sem->initialized = false;
     CloseHandle(sem->sema);
 }
 
 void qemu_sem_post(QemuSemaphore *sem)
 {
+    assert(sem->initialized);
     ReleaseSemaphore(sem->sema, 1, NULL);
 }
 
 int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
 {
-    int rc = WaitForSingleObject(sem->sema, ms);
+    int rc;
+
+    assert(sem->initialized);
+    rc = WaitForSingleObject(sem->sema, ms);
     if (rc == WAIT_OBJECT_0) {
         return 0;
     }
@@ -160,6 +185,7 @@ int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
 
 void qemu_sem_wait(QemuSemaphore *sem)
 {
+    assert(sem->initialized);
     if (WaitForSingleObject(sem->sema, INFINITE) != WAIT_OBJECT_0) {
         error_exit(GetLastError(), __func__);
     }
@@ -193,15 +219,19 @@ void qemu_event_init(QemuEvent *ev, bool init)
     /* Manual reset.  */
     ev->event = CreateEvent(NULL, TRUE, TRUE, NULL);
     ev->value = (init ? EV_SET : EV_FREE);
+    ev->initialized = true;
 }
 
 void qemu_event_destroy(QemuEvent *ev)
 {
+    assert(ev->initialized);
+    ev->initialized = false;
     CloseHandle(ev->event);
 }
 
 void qemu_event_set(QemuEvent *ev)
 {
+    assert(ev->initialized);
     /* qemu_event_set has release semantics, but because it *loads*
      * ev->value we need a full memory barrier here.
      */
@@ -218,6 +248,7 @@ void qemu_event_reset(QemuEvent *ev)
 {
     unsigned value;
 
+    assert(ev->initialized);
     value = atomic_read(&ev->value);
     smp_mb_acquire();
     if (value == EV_SET) {
@@ -232,6 +263,7 @@ void qemu_event_wait(QemuEvent *ev)
 {
     unsigned value;
 
+    assert(ev->initialized);
     value = atomic_read(&ev->value);
     smp_mb_acquire();
     if (value != EV_SET) {