summary refs log tree commit diff stats
path: root/include/qemu
diff options
context:
space:
mode:
Diffstat (limited to 'include/qemu')
-rw-r--r--include/qemu/atomic.h258
-rw-r--r--include/qemu/atomic128.h6
-rw-r--r--include/qemu/bitops.h2
-rw-r--r--include/qemu/coroutine.h2
-rw-r--r--include/qemu/iov.h23
-rw-r--r--include/qemu/log.h6
-rw-r--r--include/qemu/queue.h7
-rw-r--r--include/qemu/rcu.h10
-rw-r--r--include/qemu/rcu_queue.h100
-rw-r--r--include/qemu/seqlock.h8
-rw-r--r--include/qemu/stats64.h28
-rw-r--r--include/qemu/thread.h24
12 files changed, 252 insertions, 222 deletions
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
index ff72db5115..c1d211a351 100644
--- a/include/qemu/atomic.h
+++ b/include/qemu/atomic.h
@@ -125,49 +125,49 @@
  * no effect on the generated code but not using the atomic primitives
  * will get flagged by sanitizers as a violation.
  */
-#define atomic_read__nocheck(ptr) \
+#define qatomic_read__nocheck(ptr) \
     __atomic_load_n(ptr, __ATOMIC_RELAXED)
 
-#define atomic_read(ptr)                              \
-    ({                                                \
+#define qatomic_read(ptr)                              \
+    ({                                                 \
     QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
-    atomic_read__nocheck(ptr);                        \
+    qatomic_read__nocheck(ptr);                        \
     })
 
-#define atomic_set__nocheck(ptr, i) \
+#define qatomic_set__nocheck(ptr, i) \
     __atomic_store_n(ptr, i, __ATOMIC_RELAXED)
 
-#define atomic_set(ptr, i)  do {                      \
+#define qatomic_set(ptr, i)  do {                      \
     QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
-    atomic_set__nocheck(ptr, i);                      \
+    qatomic_set__nocheck(ptr, i);                      \
 } while(0)
 
 /* See above: most compilers currently treat consume and acquire the
- * same, but this slows down atomic_rcu_read unnecessarily.
+ * same, but this slows down qatomic_rcu_read unnecessarily.
  */
 #ifdef __SANITIZE_THREAD__
-#define atomic_rcu_read__nocheck(ptr, valptr)           \
+#define qatomic_rcu_read__nocheck(ptr, valptr)           \
     __atomic_load(ptr, valptr, __ATOMIC_CONSUME);
 #else
-#define atomic_rcu_read__nocheck(ptr, valptr)           \
-    __atomic_load(ptr, valptr, __ATOMIC_RELAXED);       \
+#define qatomic_rcu_read__nocheck(ptr, valptr)           \
+    __atomic_load(ptr, valptr, __ATOMIC_RELAXED);        \
     smp_read_barrier_depends();
 #endif
 
-#define atomic_rcu_read(ptr)                          \
-    ({                                                \
+#define qatomic_rcu_read(ptr)                          \
+    ({                                                 \
     QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
-    typeof_strip_qual(*ptr) _val;                     \
-    atomic_rcu_read__nocheck(ptr, &_val);             \
-    _val;                                             \
+    typeof_strip_qual(*ptr) _val;                      \
+    qatomic_rcu_read__nocheck(ptr, &_val);             \
+    _val;                                              \
     })
 
-#define atomic_rcu_set(ptr, i) do {                   \
+#define qatomic_rcu_set(ptr, i) do {                   \
     QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
-    __atomic_store_n(ptr, i, __ATOMIC_RELEASE);       \
+    __atomic_store_n(ptr, i, __ATOMIC_RELEASE);        \
 } while(0)
 
-#define atomic_load_acquire(ptr)                        \
+#define qatomic_load_acquire(ptr)                       \
     ({                                                  \
     QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE);  \
     typeof_strip_qual(*ptr) _val;                       \
@@ -175,7 +175,7 @@
     _val;                                               \
     })
 
-#define atomic_store_release(ptr, i)  do {              \
+#define qatomic_store_release(ptr, i)  do {             \
     QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE);  \
     __atomic_store_n(ptr, i, __ATOMIC_RELEASE);         \
 } while(0)
@@ -183,56 +183,61 @@
 
 /* All the remaining operations are fully sequentially consistent */
 
-#define atomic_xchg__nocheck(ptr, i)    ({                  \
+#define qatomic_xchg__nocheck(ptr, i)    ({                 \
     __atomic_exchange_n(ptr, (i), __ATOMIC_SEQ_CST);        \
 })
 
-#define atomic_xchg(ptr, i)    ({                           \
+#define qatomic_xchg(ptr, i)    ({                          \
     QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE);      \
-    atomic_xchg__nocheck(ptr, i);                           \
+    qatomic_xchg__nocheck(ptr, i);                          \
 })
 
 /* Returns the eventual value, failed or not */
-#define atomic_cmpxchg__nocheck(ptr, old, new)    ({                    \
+#define qatomic_cmpxchg__nocheck(ptr, old, new)    ({                   \
     typeof_strip_qual(*ptr) _old = (old);                               \
     (void)__atomic_compare_exchange_n(ptr, &_old, new, false,           \
                               __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);      \
     _old;                                                               \
 })
 
-#define atomic_cmpxchg(ptr, old, new)    ({                             \
+#define qatomic_cmpxchg(ptr, old, new)    ({                            \
     QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE);                  \
-    atomic_cmpxchg__nocheck(ptr, old, new);                             \
+    qatomic_cmpxchg__nocheck(ptr, old, new);                            \
 })
 
 /* Provide shorter names for GCC atomic builtins, return old value */
-#define atomic_fetch_inc(ptr)  __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
-#define atomic_fetch_dec(ptr)  __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
-
-#ifndef atomic_fetch_add
-#define atomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_fetch_or(ptr, n)  __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)
-#endif
-
-#define atomic_inc_fetch(ptr)    __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST)
-#define atomic_dec_fetch(ptr)    __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST)
-#define atomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_or_fetch(ptr, n)  __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_fetch_inc(ptr)  __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
+#define qatomic_fetch_dec(ptr)  __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
+
+#define qatomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_fetch_or(ptr, n)  __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)
+
+#define qatomic_inc_fetch(ptr)    __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST)
+#define qatomic_dec_fetch(ptr)    __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST)
+#define qatomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_or_fetch(ptr, n)  __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST)
 
 /* And even shorter names that return void.  */
-#define atomic_inc(ptr)    ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
-#define atomic_dec(ptr)    ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
-#define atomic_add(ptr, n) ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
-#define atomic_sub(ptr, n) ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
-#define atomic_and(ptr, n) ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
-#define atomic_or(ptr, n)  ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
-#define atomic_xor(ptr, n) ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST))
+#define qatomic_inc(ptr) \
+    ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
+#define qatomic_dec(ptr) \
+    ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
+#define qatomic_add(ptr, n) \
+    ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
+#define qatomic_sub(ptr, n) \
+    ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
+#define qatomic_and(ptr, n) \
+    ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
+#define qatomic_or(ptr, n) \
+    ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
+#define qatomic_xor(ptr, n) \
+    ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST))
 
 #else /* __ATOMIC_RELAXED */
 
@@ -272,7 +277,7 @@
  * but it is a full barrier at the hardware level.  Add a compiler barrier
  * to make it a full barrier also at the compiler level.
  */
-#define atomic_xchg(ptr, i)    (barrier(), __sync_lock_test_and_set(ptr, i))
+#define qatomic_xchg(ptr, i)    (barrier(), __sync_lock_test_and_set(ptr, i))
 
 #elif defined(_ARCH_PPC)
 
@@ -325,14 +330,14 @@
 /* These will only be atomic if the processor does the fetch or store
  * in a single issue memory operation
  */
-#define atomic_read__nocheck(p)   (*(__typeof__(*(p)) volatile*) (p))
-#define atomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile*) (p)) = (i))
+#define qatomic_read__nocheck(p)   (*(__typeof__(*(p)) volatile*) (p))
+#define qatomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile*) (p)) = (i))
 
-#define atomic_read(ptr)       atomic_read__nocheck(ptr)
-#define atomic_set(ptr, i)     atomic_set__nocheck(ptr,i)
+#define qatomic_read(ptr)       qatomic_read__nocheck(ptr)
+#define qatomic_set(ptr, i)     qatomic_set__nocheck(ptr,i)
 
 /**
- * atomic_rcu_read - reads a RCU-protected pointer to a local variable
+ * qatomic_rcu_read - reads a RCU-protected pointer to a local variable
  * into a RCU read-side critical section. The pointer can later be safely
  * dereferenced within the critical section.
  *
@@ -342,21 +347,21 @@
  * Inserts memory barriers on architectures that require them (currently only
  * Alpha) and documents which pointers are protected by RCU.
  *
- * atomic_rcu_read also includes a compiler barrier to ensure that
+ * qatomic_rcu_read also includes a compiler barrier to ensure that
  * value-speculative optimizations (e.g. VSS: Value Speculation
  * Scheduling) does not perform the data read before the pointer read
  * by speculating the value of the pointer.
  *
- * Should match atomic_rcu_set(), atomic_xchg(), atomic_cmpxchg().
+ * Should match qatomic_rcu_set(), qatomic_xchg(), qatomic_cmpxchg().
  */
-#define atomic_rcu_read(ptr)    ({                \
-    typeof(*ptr) _val = atomic_read(ptr);         \
+#define qatomic_rcu_read(ptr)    ({               \
+    typeof(*ptr) _val = qatomic_read(ptr);        \
     smp_read_barrier_depends();                   \
     _val;                                         \
 })
 
 /**
- * atomic_rcu_set - assigns (publicizes) a pointer to a new data structure
+ * qatomic_rcu_set - assigns (publicizes) a pointer to a new data structure
  * meant to be read by RCU read-side critical sections.
  *
  * Documents which pointers will be dereferenced by RCU read-side critical
@@ -364,65 +369,64 @@
  * them. It also makes sure the compiler does not reorder code initializing the
  * data structure before its publication.
  *
- * Should match atomic_rcu_read().
+ * Should match qatomic_rcu_read().
  */
-#define atomic_rcu_set(ptr, i)  do {              \
+#define qatomic_rcu_set(ptr, i)  do {             \
     smp_wmb();                                    \
-    atomic_set(ptr, i);                           \
+    qatomic_set(ptr, i);                          \
 } while (0)
 
-#define atomic_load_acquire(ptr)    ({      \
-    typeof(*ptr) _val = atomic_read(ptr);   \
+#define qatomic_load_acquire(ptr)    ({     \
+    typeof(*ptr) _val = qatomic_read(ptr);  \
     smp_mb_acquire();                       \
     _val;                                   \
 })
 
-#define atomic_store_release(ptr, i)  do {  \
+#define qatomic_store_release(ptr, i)  do { \
     smp_mb_release();                       \
-    atomic_set(ptr, i);                     \
+    qatomic_set(ptr, i);                    \
 } while (0)
 
-#ifndef atomic_xchg
+#ifndef qatomic_xchg
 #if defined(__clang__)
-#define atomic_xchg(ptr, i)    __sync_swap(ptr, i)
+#define qatomic_xchg(ptr, i)    __sync_swap(ptr, i)
 #else
 /* __sync_lock_test_and_set() is documented to be an acquire barrier only.  */
-#define atomic_xchg(ptr, i)    (smp_mb(), __sync_lock_test_and_set(ptr, i))
+#define qatomic_xchg(ptr, i)    (smp_mb(), __sync_lock_test_and_set(ptr, i))
 #endif
 #endif
-#define atomic_xchg__nocheck  atomic_xchg
+#define qatomic_xchg__nocheck  qatomic_xchg
 
 /* Provide shorter names for GCC atomic builtins.  */
-#define atomic_fetch_inc(ptr)  __sync_fetch_and_add(ptr, 1)
-#define atomic_fetch_dec(ptr)  __sync_fetch_and_add(ptr, -1)
-
-#ifndef atomic_fetch_add
-#define atomic_fetch_add(ptr, n) __sync_fetch_and_add(ptr, n)
-#define atomic_fetch_sub(ptr, n) __sync_fetch_and_sub(ptr, n)
-#define atomic_fetch_and(ptr, n) __sync_fetch_and_and(ptr, n)
-#define atomic_fetch_or(ptr, n) __sync_fetch_and_or(ptr, n)
-#define atomic_fetch_xor(ptr, n) __sync_fetch_and_xor(ptr, n)
-#endif
-
-#define atomic_inc_fetch(ptr)  __sync_add_and_fetch(ptr, 1)
-#define atomic_dec_fetch(ptr)  __sync_add_and_fetch(ptr, -1)
-#define atomic_add_fetch(ptr, n) __sync_add_and_fetch(ptr, n)
-#define atomic_sub_fetch(ptr, n) __sync_sub_and_fetch(ptr, n)
-#define atomic_and_fetch(ptr, n) __sync_and_and_fetch(ptr, n)
-#define atomic_or_fetch(ptr, n) __sync_or_and_fetch(ptr, n)
-#define atomic_xor_fetch(ptr, n) __sync_xor_and_fetch(ptr, n)
-
-#define atomic_cmpxchg(ptr, old, new) __sync_val_compare_and_swap(ptr, old, new)
-#define atomic_cmpxchg__nocheck(ptr, old, new)  atomic_cmpxchg(ptr, old, new)
+#define qatomic_fetch_inc(ptr)  __sync_fetch_and_add(ptr, 1)
+#define qatomic_fetch_dec(ptr)  __sync_fetch_and_add(ptr, -1)
+
+#define qatomic_fetch_add(ptr, n) __sync_fetch_and_add(ptr, n)
+#define qatomic_fetch_sub(ptr, n) __sync_fetch_and_sub(ptr, n)
+#define qatomic_fetch_and(ptr, n) __sync_fetch_and_and(ptr, n)
+#define qatomic_fetch_or(ptr, n) __sync_fetch_and_or(ptr, n)
+#define qatomic_fetch_xor(ptr, n) __sync_fetch_and_xor(ptr, n)
+
+#define qatomic_inc_fetch(ptr)  __sync_add_and_fetch(ptr, 1)
+#define qatomic_dec_fetch(ptr)  __sync_add_and_fetch(ptr, -1)
+#define qatomic_add_fetch(ptr, n) __sync_add_and_fetch(ptr, n)
+#define qatomic_sub_fetch(ptr, n) __sync_sub_and_fetch(ptr, n)
+#define qatomic_and_fetch(ptr, n) __sync_and_and_fetch(ptr, n)
+#define qatomic_or_fetch(ptr, n) __sync_or_and_fetch(ptr, n)
+#define qatomic_xor_fetch(ptr, n) __sync_xor_and_fetch(ptr, n)
+
+#define qatomic_cmpxchg(ptr, old, new) \
+    __sync_val_compare_and_swap(ptr, old, new)
+#define qatomic_cmpxchg__nocheck(ptr, old, new)  qatomic_cmpxchg(ptr, old, new)
 
 /* And even shorter names that return void.  */
-#define atomic_inc(ptr)        ((void) __sync_fetch_and_add(ptr, 1))
-#define atomic_dec(ptr)        ((void) __sync_fetch_and_add(ptr, -1))
-#define atomic_add(ptr, n)     ((void) __sync_fetch_and_add(ptr, n))
-#define atomic_sub(ptr, n)     ((void) __sync_fetch_and_sub(ptr, n))
-#define atomic_and(ptr, n)     ((void) __sync_fetch_and_and(ptr, n))
-#define atomic_or(ptr, n)      ((void) __sync_fetch_and_or(ptr, n))
-#define atomic_xor(ptr, n)     ((void) __sync_fetch_and_xor(ptr, n))
+#define qatomic_inc(ptr)        ((void) __sync_fetch_and_add(ptr, 1))
+#define qatomic_dec(ptr)        ((void) __sync_fetch_and_add(ptr, -1))
+#define qatomic_add(ptr, n)     ((void) __sync_fetch_and_add(ptr, n))
+#define qatomic_sub(ptr, n)     ((void) __sync_fetch_and_sub(ptr, n))
+#define qatomic_and(ptr, n)     ((void) __sync_fetch_and_and(ptr, n))
+#define qatomic_or(ptr, n)      ((void) __sync_fetch_and_or(ptr, n))
+#define qatomic_xor(ptr, n)     ((void) __sync_fetch_and_xor(ptr, n))
 
 #endif /* __ATOMIC_RELAXED */
 
@@ -436,11 +440,11 @@
 /* This is more efficient than a store plus a fence.  */
 #if !defined(__SANITIZE_THREAD__)
 #if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
-#define atomic_mb_set(ptr, i)  ((void)atomic_xchg(ptr, i))
+#define qatomic_mb_set(ptr, i)  ((void)qatomic_xchg(ptr, i))
 #endif
 #endif
 
-/* atomic_mb_read/set semantics map Java volatile variables. They are
+/* qatomic_mb_read/set semantics map Java volatile variables. They are
  * less expensive on some platforms (notably POWER) than fully
  * sequentially consistent operations.
  *
@@ -448,58 +452,58 @@
  * use. See docs/devel/atomics.txt for more discussion.
  */
 
-#ifndef atomic_mb_read
-#define atomic_mb_read(ptr)                             \
-    atomic_load_acquire(ptr)
+#ifndef qatomic_mb_read
+#define qatomic_mb_read(ptr)                             \
+    qatomic_load_acquire(ptr)
 #endif
 
-#ifndef atomic_mb_set
-#define atomic_mb_set(ptr, i)  do {                     \
-    atomic_store_release(ptr, i);                       \
+#ifndef qatomic_mb_set
+#define qatomic_mb_set(ptr, i)  do {                    \
+    qatomic_store_release(ptr, i);                      \
     smp_mb();                                           \
 } while(0)
 #endif
 
-#define atomic_fetch_inc_nonzero(ptr) ({                                \
-    typeof_strip_qual(*ptr) _oldn = atomic_read(ptr);                   \
-    while (_oldn && atomic_cmpxchg(ptr, _oldn, _oldn + 1) != _oldn) {   \
-        _oldn = atomic_read(ptr);                                       \
+#define qatomic_fetch_inc_nonzero(ptr) ({                               \
+    typeof_strip_qual(*ptr) _oldn = qatomic_read(ptr);                  \
+    while (_oldn && qatomic_cmpxchg(ptr, _oldn, _oldn + 1) != _oldn) {  \
+        _oldn = qatomic_read(ptr);                                      \
     }                                                                   \
     _oldn;                                                              \
 })
 
 /* Abstractions to access atomically (i.e. "once") i64/u64 variables */
 #ifdef CONFIG_ATOMIC64
-static inline int64_t atomic_read_i64(const int64_t *ptr)
+static inline int64_t qatomic_read_i64(const int64_t *ptr)
 {
     /* use __nocheck because sizeof(void *) might be < sizeof(u64) */
-    return atomic_read__nocheck(ptr);
+    return qatomic_read__nocheck(ptr);
 }
 
-static inline uint64_t atomic_read_u64(const uint64_t *ptr)
+static inline uint64_t qatomic_read_u64(const uint64_t *ptr)
 {
-    return atomic_read__nocheck(ptr);
+    return qatomic_read__nocheck(ptr);
 }
 
-static inline void atomic_set_i64(int64_t *ptr, int64_t val)
+static inline void qatomic_set_i64(int64_t *ptr, int64_t val)
 {
-    atomic_set__nocheck(ptr, val);
+    qatomic_set__nocheck(ptr, val);
 }
 
-static inline void atomic_set_u64(uint64_t *ptr, uint64_t val)
+static inline void qatomic_set_u64(uint64_t *ptr, uint64_t val)
 {
-    atomic_set__nocheck(ptr, val);
+    qatomic_set__nocheck(ptr, val);
 }
 
-static inline void atomic64_init(void)
+static inline void qatomic64_init(void)
 {
 }
 #else /* !CONFIG_ATOMIC64 */
-int64_t  atomic_read_i64(const int64_t *ptr);
-uint64_t atomic_read_u64(const uint64_t *ptr);
-void atomic_set_i64(int64_t *ptr, int64_t val);
-void atomic_set_u64(uint64_t *ptr, uint64_t val);
-void atomic64_init(void);
+int64_t  qatomic_read_i64(const int64_t *ptr);
+uint64_t qatomic_read_u64(const uint64_t *ptr);
+void qatomic_set_i64(int64_t *ptr, int64_t val);
+void qatomic_set_u64(uint64_t *ptr, uint64_t val);
+void qatomic64_init(void);
 #endif /* !CONFIG_ATOMIC64 */
 
 #endif /* QEMU_ATOMIC_H */
diff --git a/include/qemu/atomic128.h b/include/qemu/atomic128.h
index 6b34484e15..ad2bcf45b4 100644
--- a/include/qemu/atomic128.h
+++ b/include/qemu/atomic128.h
@@ -44,7 +44,7 @@
 #if defined(CONFIG_ATOMIC128)
 static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
 {
-    return atomic_cmpxchg__nocheck(ptr, cmp, new);
+    return qatomic_cmpxchg__nocheck(ptr, cmp, new);
 }
 # define HAVE_CMPXCHG128 1
 #elif defined(CONFIG_CMPXCHG128)
@@ -89,12 +89,12 @@ Int128 QEMU_ERROR("unsupported atomic")
 #if defined(CONFIG_ATOMIC128)
 static inline Int128 atomic16_read(Int128 *ptr)
 {
-    return atomic_read__nocheck(ptr);
+    return qatomic_read__nocheck(ptr);
 }
 
 static inline void atomic16_set(Int128 *ptr, Int128 val)
 {
-    atomic_set__nocheck(ptr, val);
+    qatomic_set__nocheck(ptr, val);
 }
 
 # define HAVE_ATOMIC128 1
diff --git a/include/qemu/bitops.h b/include/qemu/bitops.h
index f55ce8b320..3acbf3384c 100644
--- a/include/qemu/bitops.h
+++ b/include/qemu/bitops.h
@@ -51,7 +51,7 @@ static inline void set_bit_atomic(long nr, unsigned long *addr)
     unsigned long mask = BIT_MASK(nr);
     unsigned long *p = addr + BIT_WORD(nr);
 
-    atomic_or(p, mask);
+    qatomic_or(p, mask);
 }
 
 /**
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
index dfd261c5b1..84eab6e3bf 100644
--- a/include/qemu/coroutine.h
+++ b/include/qemu/coroutine.h
@@ -179,7 +179,7 @@ static inline coroutine_fn void qemu_co_mutex_assert_locked(CoMutex *mutex)
      * because the condition will be false no matter whether we read NULL or
      * the pointer for any other coroutine.
      */
-    assert(atomic_read(&mutex->locked) &&
+    assert(qatomic_read(&mutex->locked) &&
            mutex->holder == qemu_coroutine_self());
 }
 
diff --git a/include/qemu/iov.h b/include/qemu/iov.h
index bffc151282..b6b283a5e5 100644
--- a/include/qemu/iov.h
+++ b/include/qemu/iov.h
@@ -130,6 +130,29 @@ size_t iov_discard_front(struct iovec **iov, unsigned int *iov_cnt,
 size_t iov_discard_back(struct iovec *iov, unsigned int *iov_cnt,
                         size_t bytes);
 
+/* Information needed to undo an iov_discard_*() operation */
+typedef struct {
+    struct iovec *modified_iov;
+    struct iovec orig;
+} IOVDiscardUndo;
+
+/*
+ * Undo an iov_discard_front_undoable() or iov_discard_back_undoable()
+ * operation. If multiple operations are made then each one needs a separate
+ * IOVDiscardUndo and iov_discard_undo() must be called in the reverse order
+ * that the operations were made.
+ */
+void iov_discard_undo(IOVDiscardUndo *undo);
+
+/*
+ * Undoable versions of iov_discard_front() and iov_discard_back(). Use
+ * iov_discard_undo() to reset to the state before the discard operations.
+ */
+size_t iov_discard_front_undoable(struct iovec **iov, unsigned int *iov_cnt,
+                                  size_t bytes, IOVDiscardUndo *undo);
+size_t iov_discard_back_undoable(struct iovec *iov, unsigned int *iov_cnt,
+                                 size_t bytes, IOVDiscardUndo *undo);
+
 typedef struct QEMUIOVector {
     struct iovec *iov;
     int niov;
diff --git a/include/qemu/log.h b/include/qemu/log.h
index f4724f7330..9b80660207 100644
--- a/include/qemu/log.h
+++ b/include/qemu/log.h
@@ -36,7 +36,7 @@ static inline bool qemu_log_separate(void)
     bool res = false;
 
     rcu_read_lock();
-    logfile = atomic_rcu_read(&qemu_logfile);
+    logfile = qatomic_rcu_read(&qemu_logfile);
     if (logfile && logfile->fd != stderr) {
         res = true;
     }
@@ -75,7 +75,7 @@ static inline FILE *qemu_log_lock(void)
 {
     QemuLogFile *logfile;
     rcu_read_lock();
-    logfile = atomic_rcu_read(&qemu_logfile);
+    logfile = qatomic_rcu_read(&qemu_logfile);
     if (logfile) {
         qemu_flockfile(logfile->fd);
         return logfile->fd;
@@ -102,7 +102,7 @@ qemu_log_vprintf(const char *fmt, va_list va)
     QemuLogFile *logfile;
 
     rcu_read_lock();
-    logfile = atomic_rcu_read(&qemu_logfile);
+    logfile = qatomic_rcu_read(&qemu_logfile);
     if (logfile) {
         vfprintf(logfile->fd, fmt, va);
     }
diff --git a/include/qemu/queue.h b/include/qemu/queue.h
index 456a5b01ee..e029e7bf66 100644
--- a/include/qemu/queue.h
+++ b/include/qemu/queue.h
@@ -218,12 +218,12 @@ struct {                                                                \
         typeof(elm) save_sle_next;                                           \
         do {                                                                 \
             save_sle_next = (elm)->field.sle_next = (head)->slh_first;       \
-        } while (atomic_cmpxchg(&(head)->slh_first, save_sle_next, (elm)) != \
+        } while (qatomic_cmpxchg(&(head)->slh_first, save_sle_next, (elm)) !=\
                  save_sle_next);                                             \
 } while (/*CONSTCOND*/0)
 
 #define QSLIST_MOVE_ATOMIC(dest, src) do {                               \
-        (dest)->slh_first = atomic_xchg(&(src)->slh_first, NULL);        \
+        (dest)->slh_first = qatomic_xchg(&(src)->slh_first, NULL);       \
 } while (/*CONSTCOND*/0)
 
 #define QSLIST_REMOVE_HEAD(head, field) do {                             \
@@ -376,7 +376,8 @@ struct {                                                                \
 /*
  * Simple queue access methods.
  */
-#define QSIMPLEQ_EMPTY_ATOMIC(head) (atomic_read(&((head)->sqh_first)) == NULL)
+#define QSIMPLEQ_EMPTY_ATOMIC(head) \
+    (qatomic_read(&((head)->sqh_first)) == NULL)
 #define QSIMPLEQ_EMPTY(head)        ((head)->sqh_first == NULL)
 #define QSIMPLEQ_FIRST(head)        ((head)->sqh_first)
 #define QSIMPLEQ_NEXT(elm, field)   ((elm)->field.sqe_next)
diff --git a/include/qemu/rcu.h b/include/qemu/rcu.h
index 0e375ebe13..515d327cf1 100644
--- a/include/qemu/rcu.h
+++ b/include/qemu/rcu.h
@@ -79,8 +79,8 @@ static inline void rcu_read_lock(void)
         return;
     }
 
-    ctr = atomic_read(&rcu_gp_ctr);
-    atomic_set(&p_rcu_reader->ctr, ctr);
+    ctr = qatomic_read(&rcu_gp_ctr);
+    qatomic_set(&p_rcu_reader->ctr, ctr);
 
     /* Write p_rcu_reader->ctr before reading RCU-protected pointers.  */
     smp_mb_placeholder();
@@ -100,12 +100,12 @@ static inline void rcu_read_unlock(void)
      * smp_mb_placeholder(), this ensures writes to p_rcu_reader->ctr
      * are sequentially consistent.
      */
-    atomic_store_release(&p_rcu_reader->ctr, 0);
+    qatomic_store_release(&p_rcu_reader->ctr, 0);
 
     /* Write p_rcu_reader->ctr before reading p_rcu_reader->waiting.  */
     smp_mb_placeholder();
-    if (unlikely(atomic_read(&p_rcu_reader->waiting))) {
-        atomic_set(&p_rcu_reader->waiting, false);
+    if (unlikely(qatomic_read(&p_rcu_reader->waiting))) {
+        qatomic_set(&p_rcu_reader->waiting, false);
         qemu_event_set(&rcu_gp_event);
     }
 }
diff --git a/include/qemu/rcu_queue.h b/include/qemu/rcu_queue.h
index 558961cc27..0e53ddd530 100644
--- a/include/qemu/rcu_queue.h
+++ b/include/qemu/rcu_queue.h
@@ -36,9 +36,9 @@ extern "C" {
 /*
  * List access methods.
  */
-#define QLIST_EMPTY_RCU(head) (atomic_read(&(head)->lh_first) == NULL)
-#define QLIST_FIRST_RCU(head) (atomic_rcu_read(&(head)->lh_first))
-#define QLIST_NEXT_RCU(elm, field) (atomic_rcu_read(&(elm)->field.le_next))
+#define QLIST_EMPTY_RCU(head) (qatomic_read(&(head)->lh_first) == NULL)
+#define QLIST_FIRST_RCU(head) (qatomic_rcu_read(&(head)->lh_first))
+#define QLIST_NEXT_RCU(elm, field) (qatomic_rcu_read(&(elm)->field.le_next))
 
 /*
  * List functions.
@@ -46,7 +46,7 @@ extern "C" {
 
 
 /*
- *  The difference between atomic_read/set and atomic_rcu_read/set
+ *  The difference between qatomic_read/set and qatomic_rcu_read/set
  *  is in the including of a read/write memory barrier to the volatile
  *  access. atomic_rcu_* macros include the memory barrier, the
  *  plain atomic macros do not. Therefore, it should be correct to
@@ -66,7 +66,7 @@ extern "C" {
 #define QLIST_INSERT_AFTER_RCU(listelm, elm, field) do {    \
     (elm)->field.le_next = (listelm)->field.le_next;        \
     (elm)->field.le_prev = &(listelm)->field.le_next;       \
-    atomic_rcu_set(&(listelm)->field.le_next, (elm));       \
+    qatomic_rcu_set(&(listelm)->field.le_next, (elm));      \
     if ((elm)->field.le_next != NULL) {                     \
        (elm)->field.le_next->field.le_prev =                \
         &(elm)->field.le_next;                              \
@@ -82,7 +82,7 @@ extern "C" {
 #define QLIST_INSERT_BEFORE_RCU(listelm, elm, field) do {   \
     (elm)->field.le_prev = (listelm)->field.le_prev;        \
     (elm)->field.le_next = (listelm);                       \
-    atomic_rcu_set((listelm)->field.le_prev, (elm));        \
+    qatomic_rcu_set((listelm)->field.le_prev, (elm));       \
     (listelm)->field.le_prev = &(elm)->field.le_next;       \
 } while (/*CONSTCOND*/0)
 
@@ -95,7 +95,7 @@ extern "C" {
 #define QLIST_INSERT_HEAD_RCU(head, elm, field) do {    \
     (elm)->field.le_prev = &(head)->lh_first;           \
     (elm)->field.le_next = (head)->lh_first;            \
-    atomic_rcu_set((&(head)->lh_first), (elm));         \
+    qatomic_rcu_set((&(head)->lh_first), (elm));        \
     if ((elm)->field.le_next != NULL) {                 \
        (elm)->field.le_next->field.le_prev =            \
         &(elm)->field.le_next;                          \
@@ -112,20 +112,20 @@ extern "C" {
        (elm)->field.le_next->field.le_prev =        \
         (elm)->field.le_prev;                       \
     }                                               \
-    atomic_set((elm)->field.le_prev, (elm)->field.le_next); \
+    qatomic_set((elm)->field.le_prev, (elm)->field.le_next); \
 } while (/*CONSTCOND*/0)
 
 /* List traversal must occur within an RCU critical section.  */
 #define QLIST_FOREACH_RCU(var, head, field)                 \
-        for ((var) = atomic_rcu_read(&(head)->lh_first);    \
+        for ((var) = qatomic_rcu_read(&(head)->lh_first);   \
                 (var);                                      \
-                (var) = atomic_rcu_read(&(var)->field.le_next))
+                (var) = qatomic_rcu_read(&(var)->field.le_next))
 
 /* List traversal must occur within an RCU critical section.  */
 #define QLIST_FOREACH_SAFE_RCU(var, head, field, next_var)           \
-    for ((var) = (atomic_rcu_read(&(head)->lh_first));               \
+    for ((var) = (qatomic_rcu_read(&(head)->lh_first));              \
       (var) &&                                                       \
-          ((next_var) = atomic_rcu_read(&(var)->field.le_next), 1);  \
+          ((next_var) = qatomic_rcu_read(&(var)->field.le_next), 1); \
            (var) = (next_var))
 
 /*
@@ -133,9 +133,10 @@ extern "C" {
  */
 
 /* Simple queue access methods */
-#define QSIMPLEQ_EMPTY_RCU(head)      (atomic_read(&(head)->sqh_first) == NULL)
-#define QSIMPLEQ_FIRST_RCU(head)       atomic_rcu_read(&(head)->sqh_first)
-#define QSIMPLEQ_NEXT_RCU(elm, field)  atomic_rcu_read(&(elm)->field.sqe_next)
+#define QSIMPLEQ_EMPTY_RCU(head) \
+    (qatomic_read(&(head)->sqh_first) == NULL)
+#define QSIMPLEQ_FIRST_RCU(head)       qatomic_rcu_read(&(head)->sqh_first)
+#define QSIMPLEQ_NEXT_RCU(elm, field)  qatomic_rcu_read(&(elm)->field.sqe_next)
 
 /* Simple queue functions */
 #define QSIMPLEQ_INSERT_HEAD_RCU(head, elm, field) do {         \
@@ -143,12 +144,12 @@ extern "C" {
     if ((elm)->field.sqe_next == NULL) {                        \
         (head)->sqh_last = &(elm)->field.sqe_next;              \
     }                                                           \
-    atomic_rcu_set(&(head)->sqh_first, (elm));                  \
+    qatomic_rcu_set(&(head)->sqh_first, (elm));                 \
 } while (/*CONSTCOND*/0)
 
 #define QSIMPLEQ_INSERT_TAIL_RCU(head, elm, field) do {    \
     (elm)->field.sqe_next = NULL;                          \
-    atomic_rcu_set((head)->sqh_last, (elm));               \
+    qatomic_rcu_set((head)->sqh_last, (elm));              \
     (head)->sqh_last = &(elm)->field.sqe_next;             \
 } while (/*CONSTCOND*/0)
 
@@ -157,11 +158,11 @@ extern "C" {
     if ((elm)->field.sqe_next == NULL) {                                \
         (head)->sqh_last = &(elm)->field.sqe_next;                      \
     }                                                                   \
-    atomic_rcu_set(&(listelm)->field.sqe_next, (elm));                  \
+    qatomic_rcu_set(&(listelm)->field.sqe_next, (elm));                 \
 } while (/*CONSTCOND*/0)
 
 #define QSIMPLEQ_REMOVE_HEAD_RCU(head, field) do {                     \
-    atomic_set(&(head)->sqh_first, (head)->sqh_first->field.sqe_next); \
+    qatomic_set(&(head)->sqh_first, (head)->sqh_first->field.sqe_next);\
     if ((head)->sqh_first == NULL) {                                   \
         (head)->sqh_last = &(head)->sqh_first;                         \
     }                                                                  \
@@ -175,7 +176,7 @@ extern "C" {
         while (curr->field.sqe_next != (elm)) {                     \
             curr = curr->field.sqe_next;                            \
         }                                                           \
-        atomic_set(&curr->field.sqe_next,                           \
+        qatomic_set(&curr->field.sqe_next,                          \
                    curr->field.sqe_next->field.sqe_next);           \
         if (curr->field.sqe_next == NULL) {                         \
             (head)->sqh_last = &(curr)->field.sqe_next;             \
@@ -184,13 +185,13 @@ extern "C" {
 } while (/*CONSTCOND*/0)
 
 #define QSIMPLEQ_FOREACH_RCU(var, head, field)                          \
-    for ((var) = atomic_rcu_read(&(head)->sqh_first);                   \
+    for ((var) = qatomic_rcu_read(&(head)->sqh_first);                  \
          (var);                                                         \
-         (var) = atomic_rcu_read(&(var)->field.sqe_next))
+         (var) = qatomic_rcu_read(&(var)->field.sqe_next))
 
 #define QSIMPLEQ_FOREACH_SAFE_RCU(var, head, field, next)                \
-    for ((var) = atomic_rcu_read(&(head)->sqh_first);                    \
-         (var) && ((next) = atomic_rcu_read(&(var)->field.sqe_next), 1); \
+    for ((var) = qatomic_rcu_read(&(head)->sqh_first);                   \
+         (var) && ((next) = qatomic_rcu_read(&(var)->field.sqe_next), 1);\
          (var) = (next))
 
 /*
@@ -198,9 +199,9 @@ extern "C" {
  */
 
 /* Tail queue access methods */
-#define QTAILQ_EMPTY_RCU(head)      (atomic_read(&(head)->tqh_first) == NULL)
-#define QTAILQ_FIRST_RCU(head)       atomic_rcu_read(&(head)->tqh_first)
-#define QTAILQ_NEXT_RCU(elm, field)  atomic_rcu_read(&(elm)->field.tqe_next)
+#define QTAILQ_EMPTY_RCU(head)      (qatomic_read(&(head)->tqh_first) == NULL)
+#define QTAILQ_FIRST_RCU(head)       qatomic_rcu_read(&(head)->tqh_first)
+#define QTAILQ_NEXT_RCU(elm, field)  qatomic_rcu_read(&(elm)->field.tqe_next)
 
 /* Tail queue functions */
 #define QTAILQ_INSERT_HEAD_RCU(head, elm, field) do {                   \
@@ -211,14 +212,14 @@ extern "C" {
     } else {                                                            \
         (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ;             \
     }                                                                   \
-    atomic_rcu_set(&(head)->tqh_first, (elm));                          \
+    qatomic_rcu_set(&(head)->tqh_first, (elm));                         \
     (elm)->field.tqe_circ.tql_prev = &(head)->tqh_circ;                 \
 } while (/*CONSTCOND*/0)
 
 #define QTAILQ_INSERT_TAIL_RCU(head, elm, field) do {                   \
     (elm)->field.tqe_next = NULL;                                       \
     (elm)->field.tqe_circ.tql_prev = (head)->tqh_circ.tql_prev;         \
-    atomic_rcu_set(&(head)->tqh_circ.tql_prev->tql_next, (elm));        \
+    qatomic_rcu_set(&(head)->tqh_circ.tql_prev->tql_next, (elm));       \
     (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ;                 \
 } while (/*CONSTCOND*/0)
 
@@ -230,14 +231,14 @@ extern "C" {
     } else {                                                            \
         (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ;             \
     }                                                                   \
-    atomic_rcu_set(&(listelm)->field.tqe_next, (elm));                  \
+    qatomic_rcu_set(&(listelm)->field.tqe_next, (elm));                 \
     (elm)->field.tqe_circ.tql_prev = &(listelm)->field.tqe_circ;        \
 } while (/*CONSTCOND*/0)
 
 #define QTAILQ_INSERT_BEFORE_RCU(listelm, elm, field) do {                \
     (elm)->field.tqe_circ.tql_prev = (listelm)->field.tqe_circ.tql_prev;  \
     (elm)->field.tqe_next = (listelm);                                    \
-    atomic_rcu_set(&(listelm)->field.tqe_circ.tql_prev->tql_next, (elm)); \
+    qatomic_rcu_set(&(listelm)->field.tqe_circ.tql_prev->tql_next, (elm));\
     (listelm)->field.tqe_circ.tql_prev = &(elm)->field.tqe_circ;          \
 } while (/*CONSTCOND*/0)
 
@@ -248,18 +249,19 @@ extern "C" {
     } else {                                                            \
         (head)->tqh_circ.tql_prev = (elm)->field.tqe_circ.tql_prev;     \
     }                                                                   \
-    atomic_set(&(elm)->field.tqe_circ.tql_prev->tql_next, (elm)->field.tqe_next); \
+    qatomic_set(&(elm)->field.tqe_circ.tql_prev->tql_next,              \
+                (elm)->field.tqe_next);                                 \
     (elm)->field.tqe_circ.tql_prev = NULL;                              \
 } while (/*CONSTCOND*/0)
 
 #define QTAILQ_FOREACH_RCU(var, head, field)                            \
-    for ((var) = atomic_rcu_read(&(head)->tqh_first);                   \
+    for ((var) = qatomic_rcu_read(&(head)->tqh_first);                  \
          (var);                                                         \
-         (var) = atomic_rcu_read(&(var)->field.tqe_next))
+         (var) = qatomic_rcu_read(&(var)->field.tqe_next))
 
 #define QTAILQ_FOREACH_SAFE_RCU(var, head, field, next)                  \
-    for ((var) = atomic_rcu_read(&(head)->tqh_first);                    \
-         (var) && ((next) = atomic_rcu_read(&(var)->field.tqe_next), 1); \
+    for ((var) = qatomic_rcu_read(&(head)->tqh_first);                   \
+         (var) && ((next) = qatomic_rcu_read(&(var)->field.tqe_next), 1);\
          (var) = (next))
 
 /*
@@ -267,23 +269,23 @@ extern "C" {
  */
 
 /* Singly-linked list access methods */
-#define QSLIST_EMPTY_RCU(head)      (atomic_read(&(head)->slh_first) == NULL)
-#define QSLIST_FIRST_RCU(head)       atomic_rcu_read(&(head)->slh_first)
-#define QSLIST_NEXT_RCU(elm, field)  atomic_rcu_read(&(elm)->field.sle_next)
+#define QSLIST_EMPTY_RCU(head)      (qatomic_read(&(head)->slh_first) == NULL)
+#define QSLIST_FIRST_RCU(head)       qatomic_rcu_read(&(head)->slh_first)
+#define QSLIST_NEXT_RCU(elm, field)  qatomic_rcu_read(&(elm)->field.sle_next)
 
 /* Singly-linked list functions */
 #define QSLIST_INSERT_HEAD_RCU(head, elm, field) do {           \
     (elm)->field.sle_next = (head)->slh_first;                  \
-    atomic_rcu_set(&(head)->slh_first, (elm));                  \
+    qatomic_rcu_set(&(head)->slh_first, (elm));                 \
 } while (/*CONSTCOND*/0)
 
 #define QSLIST_INSERT_AFTER_RCU(head, listelm, elm, field) do {         \
     (elm)->field.sle_next = (listelm)->field.sle_next;                  \
-    atomic_rcu_set(&(listelm)->field.sle_next, (elm));                  \
+    qatomic_rcu_set(&(listelm)->field.sle_next, (elm));                 \
 } while (/*CONSTCOND*/0)
 
 #define QSLIST_REMOVE_HEAD_RCU(head, field) do {                       \
-    atomic_set(&(head)->slh_first, (head)->slh_first->field.sle_next); \
+    qatomic_set(&(head)->slh_first, (head)->slh_first->field.sle_next);\
 } while (/*CONSTCOND*/0)
 
 #define QSLIST_REMOVE_RCU(head, elm, type, field) do {              \
@@ -294,19 +296,19 @@ extern "C" {
         while (curr->field.sle_next != (elm)) {                     \
             curr = curr->field.sle_next;                            \
         }                                                           \
-        atomic_set(&curr->field.sle_next,                           \
+        qatomic_set(&curr->field.sle_next,                          \
                    curr->field.sle_next->field.sle_next);           \
     }                                                               \
 } while (/*CONSTCOND*/0)
 
 #define QSLIST_FOREACH_RCU(var, head, field)                          \
-    for ((var) = atomic_rcu_read(&(head)->slh_first);                   \
-         (var);                                                         \
-         (var) = atomic_rcu_read(&(var)->field.sle_next))
+    for ((var) = qatomic_rcu_read(&(head)->slh_first);                \
+         (var);                                                       \
+         (var) = qatomic_rcu_read(&(var)->field.sle_next))
 
-#define QSLIST_FOREACH_SAFE_RCU(var, head, field, next)                \
-    for ((var) = atomic_rcu_read(&(head)->slh_first);                    \
-         (var) && ((next) = atomic_rcu_read(&(var)->field.sle_next), 1); \
+#define QSLIST_FOREACH_SAFE_RCU(var, head, field, next)                   \
+    for ((var) = qatomic_rcu_read(&(head)->slh_first);                    \
+         (var) && ((next) = qatomic_rcu_read(&(var)->field.sle_next), 1); \
          (var) = (next))
 
 #ifdef __cplusplus
diff --git a/include/qemu/seqlock.h b/include/qemu/seqlock.h
index 8b6b4ee4bb..ecb7d2c864 100644
--- a/include/qemu/seqlock.h
+++ b/include/qemu/seqlock.h
@@ -32,7 +32,7 @@ static inline void seqlock_init(QemuSeqLock *sl)
 /* Lock out other writers and update the count.  */
 static inline void seqlock_write_begin(QemuSeqLock *sl)
 {
-    atomic_set(&sl->sequence, sl->sequence + 1);
+    qatomic_set(&sl->sequence, sl->sequence + 1);
 
     /* Write sequence before updating other fields.  */
     smp_wmb();
@@ -43,7 +43,7 @@ static inline void seqlock_write_end(QemuSeqLock *sl)
     /* Write other fields before finalizing sequence.  */
     smp_wmb();
 
-    atomic_set(&sl->sequence, sl->sequence + 1);
+    qatomic_set(&sl->sequence, sl->sequence + 1);
 }
 
 /* Lock out other writers and update the count.  */
@@ -68,7 +68,7 @@ static inline void seqlock_write_unlock_impl(QemuSeqLock *sl, QemuLockable *lock
 static inline unsigned seqlock_read_begin(const QemuSeqLock *sl)
 {
     /* Always fail if a write is in progress.  */
-    unsigned ret = atomic_read(&sl->sequence);
+    unsigned ret = qatomic_read(&sl->sequence);
 
     /* Read sequence before reading other fields.  */
     smp_rmb();
@@ -79,7 +79,7 @@ static inline int seqlock_read_retry(const QemuSeqLock *sl, unsigned start)
 {
     /* Read other fields before reading final sequence.  */
     smp_rmb();
-    return unlikely(atomic_read(&sl->sequence) != start);
+    return unlikely(qatomic_read(&sl->sequence) != start);
 }
 
 #endif
diff --git a/include/qemu/stats64.h b/include/qemu/stats64.h
index 19a5ac4c56..fdd3d1b8f9 100644
--- a/include/qemu/stats64.h
+++ b/include/qemu/stats64.h
@@ -37,27 +37,27 @@ static inline void stat64_init(Stat64 *s, uint64_t value)
 
 static inline uint64_t stat64_get(const Stat64 *s)
 {
-    return atomic_read__nocheck(&s->value);
+    return qatomic_read__nocheck(&s->value);
 }
 
 static inline void stat64_add(Stat64 *s, uint64_t value)
 {
-    atomic_add(&s->value, value);
+    qatomic_add(&s->value, value);
 }
 
 static inline void stat64_min(Stat64 *s, uint64_t value)
 {
-    uint64_t orig = atomic_read__nocheck(&s->value);
+    uint64_t orig = qatomic_read__nocheck(&s->value);
     while (orig > value) {
-        orig = atomic_cmpxchg__nocheck(&s->value, orig, value);
+        orig = qatomic_cmpxchg__nocheck(&s->value, orig, value);
     }
 }
 
 static inline void stat64_max(Stat64 *s, uint64_t value)
 {
-    uint64_t orig = atomic_read__nocheck(&s->value);
+    uint64_t orig = qatomic_read__nocheck(&s->value);
     while (orig < value) {
-        orig = atomic_cmpxchg__nocheck(&s->value, orig, value);
+        orig = qatomic_cmpxchg__nocheck(&s->value, orig, value);
     }
 }
 #else
@@ -79,7 +79,7 @@ static inline void stat64_add(Stat64 *s, uint64_t value)
     low = (uint32_t) value;
     if (!low) {
         if (high) {
-            atomic_add(&s->high, high);
+            qatomic_add(&s->high, high);
         }
         return;
     }
@@ -101,7 +101,7 @@ static inline void stat64_add(Stat64 *s, uint64_t value)
          * the high 32 bits, so it can race just fine with stat64_add32_carry
          * and even stat64_get!
          */
-        old = atomic_cmpxchg(&s->low, orig, result);
+        old = qatomic_cmpxchg(&s->low, orig, result);
         if (orig == old) {
             return;
         }
@@ -116,7 +116,7 @@ static inline void stat64_min(Stat64 *s, uint64_t value)
     high = value >> 32;
     low = (uint32_t) value;
     do {
-        orig_high = atomic_read(&s->high);
+        orig_high = qatomic_read(&s->high);
         if (orig_high < high) {
             return;
         }
@@ -128,7 +128,7 @@ static inline void stat64_min(Stat64 *s, uint64_t value)
              * the write barrier in stat64_min_slow.
              */
             smp_rmb();
-            orig_low = atomic_read(&s->low);
+            orig_low = qatomic_read(&s->low);
             if (orig_low <= low) {
                 return;
             }
@@ -138,7 +138,7 @@ static inline void stat64_min(Stat64 *s, uint64_t value)
              * we may miss being lucky.
              */
             smp_rmb();
-            orig_high = atomic_read(&s->high);
+            orig_high = qatomic_read(&s->high);
             if (orig_high < high) {
                 return;
             }
@@ -156,7 +156,7 @@ static inline void stat64_max(Stat64 *s, uint64_t value)
     high = value >> 32;
     low = (uint32_t) value;
     do {
-        orig_high = atomic_read(&s->high);
+        orig_high = qatomic_read(&s->high);
         if (orig_high > high) {
             return;
         }
@@ -168,7 +168,7 @@ static inline void stat64_max(Stat64 *s, uint64_t value)
              * the write barrier in stat64_max_slow.
              */
             smp_rmb();
-            orig_low = atomic_read(&s->low);
+            orig_low = qatomic_read(&s->low);
             if (orig_low >= low) {
                 return;
             }
@@ -178,7 +178,7 @@ static inline void stat64_max(Stat64 *s, uint64_t value)
              * we may miss being lucky.
              */
             smp_rmb();
-            orig_high = atomic_read(&s->high);
+            orig_high = qatomic_read(&s->high);
             if (orig_high > high) {
                 return;
             }
diff --git a/include/qemu/thread.h b/include/qemu/thread.h
index 4baf4d1715..5435763184 100644
--- a/include/qemu/thread.h
+++ b/include/qemu/thread.h
@@ -70,33 +70,33 @@ extern QemuCondTimedWaitFunc qemu_cond_timedwait_func;
             qemu_cond_timedwait_impl(c, m, ms, __FILE__, __LINE__)
 #else
 #define qemu_mutex_lock(m) ({                                           \
-            QemuMutexLockFunc _f = atomic_read(&qemu_mutex_lock_func);  \
+            QemuMutexLockFunc _f = qatomic_read(&qemu_mutex_lock_func); \
             _f(m, __FILE__, __LINE__);                                  \
         })
 
-#define qemu_mutex_trylock(m) ({                                        \
-            QemuMutexTrylockFunc _f = atomic_read(&qemu_mutex_trylock_func); \
-            _f(m, __FILE__, __LINE__);                                  \
+#define qemu_mutex_trylock(m) ({                                              \
+            QemuMutexTrylockFunc _f = qatomic_read(&qemu_mutex_trylock_func); \
+            _f(m, __FILE__, __LINE__);                                        \
         })
 
-#define qemu_rec_mutex_lock(m) ({                                       \
-            QemuRecMutexLockFunc _f = atomic_read(&qemu_rec_mutex_lock_func); \
-            _f(m, __FILE__, __LINE__);                                  \
+#define qemu_rec_mutex_lock(m) ({                                             \
+            QemuRecMutexLockFunc _f = qatomic_read(&qemu_rec_mutex_lock_func);\
+            _f(m, __FILE__, __LINE__);                                        \
         })
 
 #define qemu_rec_mutex_trylock(m) ({                            \
             QemuRecMutexTrylockFunc _f;                         \
-            _f = atomic_read(&qemu_rec_mutex_trylock_func);     \
+            _f = qatomic_read(&qemu_rec_mutex_trylock_func);    \
             _f(m, __FILE__, __LINE__);                          \
         })
 
 #define qemu_cond_wait(c, m) ({                                         \
-            QemuCondWaitFunc _f = atomic_read(&qemu_cond_wait_func);    \
+            QemuCondWaitFunc _f = qatomic_read(&qemu_cond_wait_func);   \
             _f(c, m, __FILE__, __LINE__);                               \
         })
 
 #define qemu_cond_timedwait(c, m, ms) ({                                       \
-            QemuCondTimedWaitFunc _f = atomic_read(&qemu_cond_timedwait_func); \
+            QemuCondTimedWaitFunc _f = qatomic_read(&qemu_cond_timedwait_func);\
             _f(c, m, ms, __FILE__, __LINE__);                                  \
         })
 #endif
@@ -236,7 +236,7 @@ static inline void qemu_spin_lock(QemuSpin *spin)
     __tsan_mutex_pre_lock(spin, 0);
 #endif
     while (unlikely(__sync_lock_test_and_set(&spin->value, true))) {
-        while (atomic_read(&spin->value)) {
+        while (qatomic_read(&spin->value)) {
             cpu_relax();
         }
     }
@@ -261,7 +261,7 @@ static inline bool qemu_spin_trylock(QemuSpin *spin)
 
 static inline bool qemu_spin_locked(QemuSpin *spin)
 {
-    return atomic_read(&spin->value);
+    return qatomic_read(&spin->value);
 }
 
 static inline void qemu_spin_unlock(QemuSpin *spin)