summary refs log tree commit diff stats
diff options
context:
space:
mode:
authorDr. David Alan Gilbert <dgilbert@redhat.com>2017-07-17 12:09:34 +0100
committerJuan Quintela <quintela@redhat.com>2017-07-18 17:36:17 +0200
commit9c98cfbe72b21d9d84b9ea8d231bde103b9fb7ae (patch)
treed8bd7049b08e2de3267d674e3c27a4fa682cd503
parent0b3c15f09715acd78063e720444cc86ac357bab4 (diff)
downloadfocaccia-qemu-9c98cfbe72b21d9d84b9ea8d231bde103b9fb7ae.tar.gz
focaccia-qemu-9c98cfbe72b21d9d84b9ea8d231bde103b9fb7ae.zip
migration/rdma: Allow cancelling while waiting for wrid
When waiting for a WRID, if the other side dies we end up waiting
for ever with no way to cancel the migration.
Cure this by poll()ing the fd first with a timeout and checking
error flags and migration state.

Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Message-Id: <20170717110936.23314-5-dgilbert@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
-rw-r--r--migration/rdma.c59
1 files changed, 53 insertions, 6 deletions
diff --git a/migration/rdma.c b/migration/rdma.c
index 59810aec2e..0cf55a6d5b 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -1466,6 +1466,56 @@ static uint64_t qemu_rdma_poll(RDMAContext *rdma, uint64_t *wr_id_out,
     return  0;
 }
 
+/* Wait for activity on the completion channel.
+ * Returns 0 on success, none-0 on error.
+ */
+static int qemu_rdma_wait_comp_channel(RDMAContext *rdma)
+{
+    /*
+     * Coroutine doesn't start until migration_fd_process_incoming()
+     * so don't yield unless we know we're running inside of a coroutine.
+     */
+    if (rdma->migration_started_on_destination) {
+        yield_until_fd_readable(rdma->comp_channel->fd);
+    } else {
+        /* This is the source side, we're in a separate thread
+         * or destination prior to migration_fd_process_incoming()
+         * we can't yield; so we have to poll the fd.
+         * But we need to be able to handle 'cancel' or an error
+         * without hanging forever.
+         */
+        while (!rdma->error_state  && !rdma->received_error) {
+            GPollFD pfds[1];
+            pfds[0].fd = rdma->comp_channel->fd;
+            pfds[0].events = G_IO_IN | G_IO_HUP | G_IO_ERR;
+            /* 0.1s timeout, should be fine for a 'cancel' */
+            switch (qemu_poll_ns(pfds, 1, 100 * 1000 * 1000)) {
+            case 1: /* fd active */
+                return 0;
+
+            case 0: /* Timeout, go around again */
+                break;
+
+            default: /* Error of some type -
+                      * I don't trust errno from qemu_poll_ns
+                     */
+                error_report("%s: poll failed", __func__);
+                return -EPIPE;
+            }
+
+            if (migrate_get_current()->state == MIGRATION_STATUS_CANCELLING) {
+                /* Bail out and let the cancellation happen */
+                return -EPIPE;
+            }
+        }
+    }
+
+    if (rdma->received_error) {
+        return -EPIPE;
+    }
+    return rdma->error_state;
+}
+
 /*
  * Block until the next work request has completed.
  *
@@ -1513,12 +1563,9 @@ static int qemu_rdma_block_for_wrid(RDMAContext *rdma, int wrid_requested,
     }
 
     while (1) {
-        /*
-         * Coroutine doesn't start until migration_fd_process_incoming()
-         * so don't yield unless we know we're running inside of a coroutine.
-         */
-        if (rdma->migration_started_on_destination) {
-            yield_until_fd_readable(rdma->comp_channel->fd);
+        ret = qemu_rdma_wait_comp_channel(rdma);
+        if (ret) {
+            goto err_block_for_wrid;
         }
 
         ret = ibv_get_cq_event(rdma->comp_channel, &cq, &cq_ctx);