summary refs log tree commit diff stats
path: root/include/qemu
diff options
context:
space:
mode:
Diffstat (limited to 'include/qemu')
-rw-r--r--include/qemu/atomic.h12
-rw-r--r--include/qemu/bitmap.h4
-rw-r--r--include/qemu/bitops.h14
3 files changed, 27 insertions, 3 deletions
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
index 98e05ca875..bd2c075343 100644
--- a/include/qemu/atomic.h
+++ b/include/qemu/atomic.h
@@ -99,7 +99,13 @@
 
 #ifndef smp_wmb
 #ifdef __ATOMIC_RELEASE
-#define smp_wmb()   __atomic_thread_fence(__ATOMIC_RELEASE)
+/* __atomic_thread_fence does not include a compiler barrier; instead,
+ * the barrier is part of __atomic_load/__atomic_store's "volatile-like"
+ * semantics. If smp_wmb() is a no-op, absence of the barrier means that
+ * the compiler is free to reorder stores on each side of the barrier.
+ * Add one here, and similarly in smp_rmb() and smp_read_barrier_depends().
+ */
+#define smp_wmb()   ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); barrier(); })
 #else
 #define smp_wmb()   __sync_synchronize()
 #endif
@@ -107,7 +113,7 @@
 
 #ifndef smp_rmb
 #ifdef __ATOMIC_ACQUIRE
-#define smp_rmb()   __atomic_thread_fence(__ATOMIC_ACQUIRE)
+#define smp_rmb()   ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); barrier(); })
 #else
 #define smp_rmb()   __sync_synchronize()
 #endif
@@ -115,7 +121,7 @@
 
 #ifndef smp_read_barrier_depends
 #ifdef __ATOMIC_CONSUME
-#define smp_read_barrier_depends()   __atomic_thread_fence(__ATOMIC_CONSUME)
+#define smp_read_barrier_depends()   ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); barrier(); })
 #else
 #define smp_read_barrier_depends()   barrier()
 #endif
diff --git a/include/qemu/bitmap.h b/include/qemu/bitmap.h
index f0273c965f..86dd9cd5fc 100644
--- a/include/qemu/bitmap.h
+++ b/include/qemu/bitmap.h
@@ -39,7 +39,9 @@
  * bitmap_empty(src, nbits)			Are all bits zero in *src?
  * bitmap_full(src, nbits)			Are all bits set in *src?
  * bitmap_set(dst, pos, nbits)			Set specified bit area
+ * bitmap_set_atomic(dst, pos, nbits)   Set specified bit area with atomic ops
  * bitmap_clear(dst, pos, nbits)		Clear specified bit area
+ * bitmap_test_and_clear_atomic(dst, pos, nbits)    Test and clear area
  * bitmap_find_next_zero_area(buf, len, pos, n, mask)	Find bit free area
  */
 
@@ -226,7 +228,9 @@ static inline int bitmap_intersects(const unsigned long *src1,
 }
 
 void bitmap_set(unsigned long *map, long i, long len);
+void bitmap_set_atomic(unsigned long *map, long i, long len);
 void bitmap_clear(unsigned long *map, long start, long nr);
+bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr);
 unsigned long bitmap_find_next_zero_area(unsigned long *map,
                                          unsigned long size,
                                          unsigned long start,
diff --git a/include/qemu/bitops.h b/include/qemu/bitops.h
index 8abdcf9077..8164225152 100644
--- a/include/qemu/bitops.h
+++ b/include/qemu/bitops.h
@@ -16,6 +16,7 @@
 #include <assert.h>
 
 #include "host-utils.h"
+#include "atomic.h"
 
 #define BITS_PER_BYTE           CHAR_BIT
 #define BITS_PER_LONG           (sizeof (unsigned long) * BITS_PER_BYTE)
@@ -39,6 +40,19 @@ static inline void set_bit(long nr, unsigned long *addr)
 }
 
 /**
+ * set_bit_atomic - Set a bit in memory atomically
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ */
+static inline void set_bit_atomic(long nr, unsigned long *addr)
+{
+    unsigned long mask = BIT_MASK(nr);
+    unsigned long *p = addr + BIT_WORD(nr);
+
+    atomic_or(p, mask);
+}
+
+/**
  * clear_bit - Clears a bit in memory
  * @nr: Bit to clear
  * @addr: Address to start counting from