summary refs log tree commit diff stats
diff options
context:
space:
mode:
-rw-r--r--block/blkio.c2
-rw-r--r--hw/block/dataplane/virtio-blk.c17
-rw-r--r--hw/block/virtio-blk.c44
-rw-r--r--include/hw/virtio/virtio-blk.h2
-rw-r--r--util/aio-posix.c21
5 files changed, 42 insertions, 44 deletions
diff --git a/block/blkio.c b/block/blkio.c
index 5eae3adfaf..6ad86b23d1 100644
--- a/block/blkio.c
+++ b/block/blkio.c
@@ -19,6 +19,8 @@
 #include "qemu/module.h"
 #include "exec/memory.h" /* for ram_block_discard_disable() */
 
+#include "block/block-io.h"
+
 /*
  * Keep the QEMU BlockDriver names identical to the libblkio driver names.
  * Using macros instead of typing out the string literals avoids typos.
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index 26f965cabc..b28d81737e 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -237,9 +237,6 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
         goto fail_aio_context;
     }
 
-    /* Process queued requests before the ones in vring */
-    virtio_blk_process_queued_requests(vblk, false);
-
     /* Kick right away to begin processing requests already in vring */
     for (i = 0; i < nvqs; i++) {
         VirtQueue *vq = virtio_get_queue(s->vdev, i);
@@ -272,11 +269,6 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
   fail_host_notifiers:
     k->set_guest_notifiers(qbus->parent, nvqs, false);
   fail_guest_notifiers:
-    /*
-     * If we failed to set up the guest notifiers queued requests will be
-     * processed on the main context.
-     */
-    virtio_blk_process_queued_requests(vblk, false);
     vblk->dataplane_disabled = true;
     s->starting = false;
     vblk->dataplane_started = true;
@@ -325,8 +317,13 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
     aio_context_acquire(s->ctx);
     aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s);
 
-    /* Drain and try to switch bs back to the QEMU main loop. If other users
-     * keep the BlockBackend in the iothread, that's ok */
+    /* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */
+    blk_drain(s->conf->conf.blk);
+
+    /*
+     * Try to switch bs back to the QEMU main loop. If other users keep the
+     * BlockBackend in the iothread, that's ok
+     */
     blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context(), NULL);
 
     aio_context_release(s->ctx);
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index f717550fdc..1762517878 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -806,8 +806,10 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
     virtio_blk_handle_vq(s, vq);
 }
 
-void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh)
+static void virtio_blk_dma_restart_bh(void *opaque)
 {
+    VirtIOBlock *s = opaque;
+
     VirtIOBlockReq *req = s->rq;
     MultiReqBuffer mrb = {};
 
@@ -834,43 +836,27 @@ void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh)
     if (mrb.num_reqs) {
         virtio_blk_submit_multireq(s, &mrb);
     }
-    if (is_bh) {
-        blk_dec_in_flight(s->conf.conf.blk);
-    }
-    aio_context_release(blk_get_aio_context(s->conf.conf.blk));
-}
-
-static void virtio_blk_dma_restart_bh(void *opaque)
-{
-    VirtIOBlock *s = opaque;
 
-    qemu_bh_delete(s->bh);
-    s->bh = NULL;
+    /* Paired with inc in virtio_blk_dma_restart_cb() */
+    blk_dec_in_flight(s->conf.conf.blk);
 
-    virtio_blk_process_queued_requests(s, true);
+    aio_context_release(blk_get_aio_context(s->conf.conf.blk));
 }
 
 static void virtio_blk_dma_restart_cb(void *opaque, bool running,
                                       RunState state)
 {
     VirtIOBlock *s = opaque;
-    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
-    VirtioBusState *bus = VIRTIO_BUS(qbus);
 
     if (!running) {
         return;
     }
 
-    /*
-     * If ioeventfd is enabled, don't schedule the BH here as queued
-     * requests will be processed while starting the data plane.
-     */
-    if (!s->bh && !virtio_bus_ioeventfd_enabled(bus)) {
-        s->bh = aio_bh_new(blk_get_aio_context(s->conf.conf.blk),
-                           virtio_blk_dma_restart_bh, s);
-        blk_inc_in_flight(s->conf.conf.blk);
-        qemu_bh_schedule(s->bh);
-    }
+    /* Paired with dec in virtio_blk_dma_restart_bh() */
+    blk_inc_in_flight(s->conf.conf.blk);
+
+    aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.conf.blk),
+            virtio_blk_dma_restart_bh, s);
 }
 
 static void virtio_blk_reset(VirtIODevice *vdev)
@@ -1213,7 +1199,13 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
         return;
     }
 
-    s->change = qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s);
+    /*
+     * This must be after virtio_init() so virtio_blk_dma_restart_cb() gets
+     * called after ->start_ioeventfd() has already set blk's AioContext.
+     */
+    s->change =
+        qdev_add_vm_change_state_handler(dev, virtio_blk_dma_restart_cb, s);
+
     blk_ram_registrar_init(&s->blk_ram_registrar, s->blk);
     blk_set_dev_ops(s->blk, &virtio_block_ops, s);
 
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
index 7f589b4146..dafec432ce 100644
--- a/include/hw/virtio/virtio-blk.h
+++ b/include/hw/virtio/virtio-blk.h
@@ -55,7 +55,6 @@ struct VirtIOBlock {
     VirtIODevice parent_obj;
     BlockBackend *blk;
     void *rq;
-    QEMUBH *bh;
     VirtIOBlkConf conf;
     unsigned short sector_mask;
     bool original_wce;
@@ -93,6 +92,5 @@ typedef struct MultiReqBuffer {
 } MultiReqBuffer;
 
 void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq);
-void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh);
 
 #endif
diff --git a/util/aio-posix.c b/util/aio-posix.c
index 731f3826c0..6cc6256d53 100644
--- a/util/aio-posix.c
+++ b/util/aio-posix.c
@@ -585,18 +585,16 @@ static bool try_poll_mode(AioContext *ctx, AioHandlerList *ready_list,
 
     max_ns = qemu_soonest_timeout(*timeout, ctx->poll_ns);
     if (max_ns && !ctx->fdmon_ops->need_wait(ctx)) {
+        /*
+         * Enable poll mode. It pairs with the poll_set_started() in
+         * aio_poll() which disables poll mode.
+         */
         poll_set_started(ctx, ready_list, true);
 
         if (run_poll_handlers(ctx, ready_list, max_ns, timeout)) {
             return true;
         }
     }
-
-    if (poll_set_started(ctx, ready_list, false)) {
-        *timeout = 0;
-        return true;
-    }
-
     return false;
 }
 
@@ -657,6 +655,17 @@ bool aio_poll(AioContext *ctx, bool blocking)
      * system call---a single round of run_poll_handlers_once suffices.
      */
     if (timeout || ctx->fdmon_ops->need_wait(ctx)) {
+        /*
+         * Disable poll mode. poll mode should be disabled before the call
+         * of ctx->fdmon_ops->wait() so that guest's notification can wake
+         * up IO threads when some work becomes pending. It is essential to
+         * avoid hangs or unnecessary latency.
+         */
+        if (poll_set_started(ctx, &ready_list, false)) {
+            timeout = 0;
+            progress = true;
+        }
+
         ctx->fdmon_ops->wait(ctx, &ready_list, timeout);
     }