summary refs log tree commit diff stats
path: root/async.c
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2014-03-03 11:30:04 +0100
committerStefan Hajnoczi <stefanha@redhat.com>2014-03-13 14:42:24 +0100
commit98563fc3ec44c1becce6f1720ad6b0a82ed101b4 (patch)
tree18adf9c0239707158d5f95ef2e0702c5eeaa29b8 /async.c
parent2da61b671eb89fcaa306738f44eed472977d6587 (diff)
downloadfocaccia-qemu-98563fc3ec44c1becce6f1720ad6b0a82ed101b4.tar.gz
focaccia-qemu-98563fc3ec44c1becce6f1720ad6b0a82ed101b4.zip
aio: add aio_context_acquire() and aio_context_release()
It can be useful to run an AioContext from a thread which normally does
not "own" the AioContext.  For example, request draining can be
implemented by acquiring the AioContext and looping aio_poll() until all
requests have been completed.

The following pattern should work:

  /* Event loop thread */
  while (running) {
      aio_context_acquire(ctx);
      aio_poll(ctx, true);
      aio_context_release(ctx);
  }

  /* Another thread */
  aio_context_acquire(ctx);
  bdrv_read(bs, 0x1000, buf, 1);
  aio_context_release(ctx);

This patch implements aio_context_acquire() and aio_context_release().

Note that existing aio_poll() callers do not need to worry about
acquiring and releasing - it is only needed when multiple threads will
call aio_poll() on the same AioContext.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'async.c')
-rw-r--r--async.c18
1 files changed, 18 insertions, 0 deletions
diff --git a/async.c b/async.c
index 5fb3fa61df..6930185e64 100644
--- a/async.c
+++ b/async.c
@@ -214,6 +214,7 @@ aio_ctx_finalize(GSource     *source)
     thread_pool_free(ctx->thread_pool);
     aio_set_event_notifier(ctx, &ctx->notifier, NULL);
     event_notifier_cleanup(&ctx->notifier);
+    rfifolock_destroy(&ctx->lock);
     qemu_mutex_destroy(&ctx->bh_lock);
     g_array_free(ctx->pollfds, TRUE);
     timerlistgroup_deinit(&ctx->tlg);
@@ -250,6 +251,12 @@ static void aio_timerlist_notify(void *opaque)
     aio_notify(opaque);
 }
 
+static void aio_rfifolock_cb(void *opaque)
+{
+    /* Kick owner thread in case they are blocked in aio_poll() */
+    aio_notify(opaque);
+}
+
 AioContext *aio_context_new(void)
 {
     AioContext *ctx;
@@ -257,6 +264,7 @@ AioContext *aio_context_new(void)
     ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
     ctx->thread_pool = NULL;
     qemu_mutex_init(&ctx->bh_lock);
+    rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
     event_notifier_init(&ctx->notifier, false);
     aio_set_event_notifier(ctx, &ctx->notifier, 
                            (EventNotifierHandler *)
@@ -275,3 +283,13 @@ void aio_context_unref(AioContext *ctx)
 {
     g_source_unref(&ctx->source);
 }
+
+void aio_context_acquire(AioContext *ctx)
+{
+    rfifolock_lock(&ctx->lock);
+}
+
+void aio_context_release(AioContext *ctx)
+{
+    rfifolock_unlock(&ctx->lock);
+}