summary refs log tree commit diff stats
path: root/include/qemu
diff options
context:
space:
mode:
Diffstat (limited to 'include/qemu')
-rw-r--r--include/qemu/rcu.h16
-rw-r--r--include/qemu/sys_membarrier.h27
-rw-r--r--include/qemu/timer.h14
3 files changed, 55 insertions, 2 deletions
diff --git a/include/qemu/rcu.h b/include/qemu/rcu.h
index f19413d649..22876d1428 100644
--- a/include/qemu/rcu.h
+++ b/include/qemu/rcu.h
@@ -27,6 +27,7 @@
 #include "qemu/thread.h"
 #include "qemu/queue.h"
 #include "qemu/atomic.h"
+#include "qemu/sys_membarrier.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -79,7 +80,10 @@ static inline void rcu_read_lock(void)
     }
 
     ctr = atomic_read(&rcu_gp_ctr);
-    atomic_xchg(&p_rcu_reader->ctr, ctr);
+    atomic_set(&p_rcu_reader->ctr, ctr);
+
+    /* Write p_rcu_reader->ctr before reading RCU-protected pointers.  */
+    smp_mb_placeholder();
 }
 
 static inline void rcu_read_unlock(void)
@@ -91,7 +95,15 @@ static inline void rcu_read_unlock(void)
         return;
     }
 
-    atomic_xchg(&p_rcu_reader->ctr, 0);
+    /* Ensure that the critical section is seen to precede the
+     * store to p_rcu_reader->ctr.  Together with the following
+     * smp_mb_placeholder(), this ensures writes to p_rcu_reader->ctr
+     * are sequentially consistent.
+     */
+    atomic_store_release(&p_rcu_reader->ctr, 0);
+
+    /* Write p_rcu_reader->ctr before reading p_rcu_reader->waiting.  */
+    smp_mb_placeholder();
     if (unlikely(atomic_read(&p_rcu_reader->waiting))) {
         atomic_set(&p_rcu_reader->waiting, false);
         qemu_event_set(&rcu_gp_event);
diff --git a/include/qemu/sys_membarrier.h b/include/qemu/sys_membarrier.h
new file mode 100644
index 0000000000..316e3dc4a2
--- /dev/null
+++ b/include/qemu/sys_membarrier.h
@@ -0,0 +1,27 @@
+/*
+ * Process-global memory barriers
+ *
+ * Copyright (c) 2018 Red Hat, Inc.
+ *
+ * Author: Paolo Bonzini <pbonzini@redhat.com>
+ */
+
+#ifndef QEMU_SYS_MEMBARRIER_H
+#define QEMU_SYS_MEMBARRIER_H 1
+
+#ifdef CONFIG_MEMBARRIER
+/* Only block reordering at the compiler level in the performance-critical
+ * side.  The slow side forces processor-level ordering on all other cores
+ * through a system call.
+ */
+extern void smp_mb_global_init(void);
+extern void smp_mb_global(void);
+#define smp_mb_placeholder()       barrier()
+#else
+/* Keep it simple, execute a real memory barrier on both sides.  */
+static inline void smp_mb_global_init(void) {}
+#define smp_mb_global()            smp_mb()
+#define smp_mb_placeholder()       smp_mb()
+#endif
+
+#endif
diff --git a/include/qemu/timer.h b/include/qemu/timer.h
index 3b5a54b014..39ea907e65 100644
--- a/include/qemu/timer.h
+++ b/include/qemu/timer.h
@@ -251,6 +251,20 @@ bool qemu_clock_run_timers(QEMUClockType type);
  */
 bool qemu_clock_run_all_timers(void);
 
+/**
+ * qemu_clock_get_last:
+ *
+ * Returns last clock query time.
+ */
+uint64_t qemu_clock_get_last(QEMUClockType type);
+/**
+ * qemu_clock_set_last:
+ *
+ * Sets last clock query time.
+ */
+void qemu_clock_set_last(QEMUClockType type, uint64_t last);
+
+
 /*
  * QEMUTimerList
  */