summary refs log tree commit diff stats
path: root/util/qemu-coroutine-lock.c
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2020-09-24 18:48:45 +0100
committerPeter Maydell <peter.maydell@linaro.org>2020-09-24 18:48:45 +0100
commit8c1c07929feae876202ba26f07a540c5115c18cd (patch)
tree20f6c8e2ac556bfb3c88a98c0d0cb2689de0263e /util/qemu-coroutine-lock.c
parent1bd5556f6686365e76f7ff67fe67260c449e8345 (diff)
parentd73415a315471ac0b127ed3fad45c8ec5d711de1 (diff)
downloadfocaccia-qemu-8c1c07929feae876202ba26f07a540c5115c18cd.tar.gz
focaccia-qemu-8c1c07929feae876202ba26f07a540c5115c18cd.zip
Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging
Pull request

This includes the atomic_ -> qatomic_ rename that touches many files and is
prone to conflicts.

# gpg: Signature made Wed 23 Sep 2020 17:08:43 BST
# gpg:                using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8
# gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [full]
# gpg:                 aka "Stefan Hajnoczi <stefanha@gmail.com>" [full]
# Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35  775A 9CA4 ABB3 81AB 73C8

* remotes/stefanha/tags/block-pull-request:
  qemu/atomic.h: rename atomic_ to qatomic_
  tests: add test-fdmon-epoll
  fdmon-poll: reset npfd when upgrading to fdmon-epoll
  gitmodules: add qemu.org vbootrom submodule
  gitmodules: switch to qemu.org meson mirror
  gitmodules: switch to qemu.org qboot mirror
  docs/system: clarify deprecation schedule
  virtio-crypto: don't modify elem->in/out_sg
  virtio-blk: undo destructive iov_discard_*() operations
  util/iov: add iov_discard_undo()
  virtio: add vhost-user-fs-ccw device
  libvhost-user: handle endianness as mandated by the spec
  MAINTAINERS: add Stefan Hajnoczi as block/nvme.c maintainer

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'util/qemu-coroutine-lock.c')
-rw-r--r--util/qemu-coroutine-lock.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c
index 5da5234155..36927b5f88 100644
--- a/util/qemu-coroutine-lock.c
+++ b/util/qemu-coroutine-lock.c
@@ -212,10 +212,10 @@ static void coroutine_fn qemu_co_mutex_lock_slowpath(AioContext *ctx,
     /* This is the "Responsibility Hand-Off" protocol; a lock() picks from
      * a concurrent unlock() the responsibility of waking somebody up.
      */
-    old_handoff = atomic_mb_read(&mutex->handoff);
+    old_handoff = qatomic_mb_read(&mutex->handoff);
     if (old_handoff &&
         has_waiters(mutex) &&
-        atomic_cmpxchg(&mutex->handoff, old_handoff, 0) == old_handoff) {
+        qatomic_cmpxchg(&mutex->handoff, old_handoff, 0) == old_handoff) {
         /* There can be no concurrent pops, because there can be only
          * one active handoff at a time.
          */
@@ -250,18 +250,18 @@ void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex)
      */
     i = 0;
 retry_fast_path:
-    waiters = atomic_cmpxchg(&mutex->locked, 0, 1);
+    waiters = qatomic_cmpxchg(&mutex->locked, 0, 1);
     if (waiters != 0) {
         while (waiters == 1 && ++i < 1000) {
-            if (atomic_read(&mutex->ctx) == ctx) {
+            if (qatomic_read(&mutex->ctx) == ctx) {
                 break;
             }
-            if (atomic_read(&mutex->locked) == 0) {
+            if (qatomic_read(&mutex->locked) == 0) {
                 goto retry_fast_path;
             }
             cpu_relax();
         }
-        waiters = atomic_fetch_inc(&mutex->locked);
+        waiters = qatomic_fetch_inc(&mutex->locked);
     }
 
     if (waiters == 0) {
@@ -288,7 +288,7 @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)
     mutex->ctx = NULL;
     mutex->holder = NULL;
     self->locks_held--;
-    if (atomic_fetch_dec(&mutex->locked) == 1) {
+    if (qatomic_fetch_dec(&mutex->locked) == 1) {
         /* No waiting qemu_co_mutex_lock().  Pfew, that was easy!  */
         return;
     }
@@ -311,7 +311,7 @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)
         }
 
         our_handoff = mutex->sequence;
-        atomic_mb_set(&mutex->handoff, our_handoff);
+        qatomic_mb_set(&mutex->handoff, our_handoff);
         if (!has_waiters(mutex)) {
             /* The concurrent lock has not added itself yet, so it
              * will be able to pick our handoff.
@@ -322,7 +322,7 @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)
         /* Try to do the handoff protocol ourselves; if somebody else has
          * already taken it, however, we're done and they're responsible.
          */
-        if (atomic_cmpxchg(&mutex->handoff, our_handoff, 0) != our_handoff) {
+        if (qatomic_cmpxchg(&mutex->handoff, our_handoff, 0) != our_handoff) {
             break;
         }
     }