summary refs log tree commit diff stats
diff options
context:
space:
mode:
-rw-r--r--block/mirror.c4
-rw-r--r--include/block/graph-lock.h4
2 files changed, 4 insertions, 4 deletions
diff --git a/block/mirror.c b/block/mirror.c
index af9bbd23d4..80fa345071 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -270,8 +270,8 @@ static inline int64_t mirror_clip_bytes(MirrorBlockJob *s,
 
 /* Round offset and/or bytes to target cluster if COW is needed, and
  * return the offset of the adjusted tail against original. */
-static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
-                            uint64_t *bytes)
+static int coroutine_fn mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
+                                         uint64_t *bytes)
 {
     bool need_cow;
     int ret = 0;
diff --git a/include/block/graph-lock.h b/include/block/graph-lock.h
index 18cc14de22..ac0fef8605 100644
--- a/include/block/graph-lock.h
+++ b/include/block/graph-lock.h
@@ -208,14 +208,14 @@ typedef struct GraphLockable { } GraphLockable;
  * unlocked. TSA_ASSERT() makes sure that the following calls know that we
  * hold the lock while unlocking is left unchecked.
  */
-static inline GraphLockable * TSA_ASSERT(graph_lock) TSA_NO_TSA
+static inline GraphLockable * TSA_ASSERT(graph_lock) TSA_NO_TSA coroutine_fn
 graph_lockable_auto_lock(GraphLockable *x)
 {
     bdrv_graph_co_rdlock();
     return x;
 }
 
-static inline void TSA_NO_TSA
+static inline void TSA_NO_TSA coroutine_fn
 graph_lockable_auto_unlock(GraphLockable *x)
 {
     bdrv_graph_co_rdunlock();