summary refs log tree commit diff stats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS4
-rw-r--r--Makefile9
-rw-r--r--Makefile.objs7
-rw-r--r--block.c2
-rw-r--r--block/backup.c59
-rw-r--r--block/commit.c44
-rw-r--r--block/mirror.c113
-rw-r--r--block/replication.c10
-rw-r--r--block/sheepdog.c4
-rw-r--r--block/stream.c39
-rw-r--r--block/trace-events5
-rw-r--r--blockdev.c68
-rw-r--r--blockjob.c1094
-rwxr-xr-xconfigure2
-rw-r--r--hw/9pfs/xen-9p-backend.c32
-rw-r--r--hw/block/xen_disk.c614
-rw-r--r--hw/char/xen_console.c9
-rw-r--r--hw/i386/xen/xen-hvm.c76
-rw-r--r--hw/i386/xen/xen_pvdevice.c11
-rw-r--r--hw/net/xen_nic.c33
-rw-r--r--hw/usb/xen-usb.c37
-rw-r--r--hw/xen/xen_backend.c178
-rw-r--r--hw/xen/xen_pt.c2
-rw-r--r--hw/xen/xen_pt_config_init.c2
-rw-r--r--include/block/block_int.h2
-rw-r--r--include/block/blockjob.h324
-rw-r--r--include/block/blockjob_int.h176
-rw-r--r--include/hw/xen/xen_backend.h34
-rw-r--r--include/hw/xen/xen_common.h17
-rw-r--r--include/qemu/job.h562
-rw-r--r--job-qmp.c188
-rw-r--r--job.c1000
-rw-r--r--qapi/block-core.json116
-rw-r--r--qapi/job.json253
-rw-r--r--qapi/qapi-schema.json1
-rw-r--r--qemu-img.c22
-rw-r--r--qemu-nbd.c8
-rwxr-xr-xscripts/checkpatch.pl2
-rw-r--r--target/lm32/op_helper.c4
-rwxr-xr-xtests/qemu-iotests/03017
-rwxr-xr-xtests/qemu-iotests/0402
-rwxr-xr-xtests/qemu-iotests/04123
-rwxr-xr-xtests/qemu-iotests/0862
-rw-r--r--tests/qemu-iotests/094.out7
-rwxr-xr-xtests/qemu-iotests/0952
-rw-r--r--tests/qemu-iotests/095.out6
-rwxr-xr-xtests/qemu-iotests/1092
-rw-r--r--tests/qemu-iotests/109.out178
-rwxr-xr-xtests/qemu-iotests/1248
-rw-r--r--tests/qemu-iotests/126.out2
-rw-r--r--tests/qemu-iotests/127.out7
-rwxr-xr-xtests/qemu-iotests/14113
-rw-r--r--tests/qemu-iotests/141.out29
-rwxr-xr-xtests/qemu-iotests/1442
-rw-r--r--tests/qemu-iotests/144.out7
-rwxr-xr-xtests/qemu-iotests/1552
-rwxr-xr-xtests/qemu-iotests/1562
-rw-r--r--tests/qemu-iotests/156.out7
-rwxr-xr-xtests/qemu-iotests/18514
-rw-r--r--tests/qemu-iotests/185.out10
-rwxr-xr-xtests/qemu-iotests/1916
-rw-r--r--tests/qemu-iotests/191.out132
-rwxr-xr-xtests/qemu-iotests/219209
-rw-r--r--tests/qemu-iotests/219.out327
-rw-r--r--tests/qemu-iotests/common.filter6
-rw-r--r--tests/qemu-iotests/common.rc12
-rw-r--r--tests/qemu-iotests/group11
-rw-r--r--tests/qemu-iotests/iotests.py50
-rw-r--r--tests/test-bdrv-drain.c63
-rw-r--r--tests/test-blockjob-txn.c74
-rw-r--r--tests/test-blockjob.c141
-rw-r--r--trace-events14
-rw-r--r--vl.c1
73 files changed, 4032 insertions, 2519 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index f07fcee72e..5b335aa948 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1369,10 +1369,14 @@ L: qemu-block@nongnu.org
 S: Supported
 F: blockjob.c
 F: include/block/blockjob.h
+F: job.c
+F: job-qmp.c
+F: include/block/job.h
 F: block/backup.c
 F: block/commit.c
 F: block/stream.c
 F: block/mirror.c
+F: qapi/job.json
 T: git git://github.com/codyprime/qemu-kvm-jtc.git block
 
 Block QAPI, monitor, command line
diff --git a/Makefile b/Makefile
index 35554b5bef..6d588d1f71 100644
--- a/Makefile
+++ b/Makefile
@@ -98,6 +98,7 @@ GENERATED_FILES += qapi/qapi-types-char.h qapi/qapi-types-char.c
 GENERATED_FILES += qapi/qapi-types-common.h qapi/qapi-types-common.c
 GENERATED_FILES += qapi/qapi-types-crypto.h qapi/qapi-types-crypto.c
 GENERATED_FILES += qapi/qapi-types-introspect.h qapi/qapi-types-introspect.c
+GENERATED_FILES += qapi/qapi-types-job.h qapi/qapi-types-job.c
 GENERATED_FILES += qapi/qapi-types-migration.h qapi/qapi-types-migration.c
 GENERATED_FILES += qapi/qapi-types-misc.h qapi/qapi-types-misc.c
 GENERATED_FILES += qapi/qapi-types-net.h qapi/qapi-types-net.c
@@ -116,6 +117,7 @@ GENERATED_FILES += qapi/qapi-visit-char.h qapi/qapi-visit-char.c
 GENERATED_FILES += qapi/qapi-visit-common.h qapi/qapi-visit-common.c
 GENERATED_FILES += qapi/qapi-visit-crypto.h qapi/qapi-visit-crypto.c
 GENERATED_FILES += qapi/qapi-visit-introspect.h qapi/qapi-visit-introspect.c
+GENERATED_FILES += qapi/qapi-visit-job.h qapi/qapi-visit-job.c
 GENERATED_FILES += qapi/qapi-visit-migration.h qapi/qapi-visit-migration.c
 GENERATED_FILES += qapi/qapi-visit-misc.h qapi/qapi-visit-misc.c
 GENERATED_FILES += qapi/qapi-visit-net.h qapi/qapi-visit-net.c
@@ -133,6 +135,7 @@ GENERATED_FILES += qapi/qapi-commands-char.h qapi/qapi-commands-char.c
 GENERATED_FILES += qapi/qapi-commands-common.h qapi/qapi-commands-common.c
 GENERATED_FILES += qapi/qapi-commands-crypto.h qapi/qapi-commands-crypto.c
 GENERATED_FILES += qapi/qapi-commands-introspect.h qapi/qapi-commands-introspect.c
+GENERATED_FILES += qapi/qapi-commands-job.h qapi/qapi-commands-job.c
 GENERATED_FILES += qapi/qapi-commands-migration.h qapi/qapi-commands-migration.c
 GENERATED_FILES += qapi/qapi-commands-misc.h qapi/qapi-commands-misc.c
 GENERATED_FILES += qapi/qapi-commands-net.h qapi/qapi-commands-net.c
@@ -150,6 +153,7 @@ GENERATED_FILES += qapi/qapi-events-char.h qapi/qapi-events-char.c
 GENERATED_FILES += qapi/qapi-events-common.h qapi/qapi-events-common.c
 GENERATED_FILES += qapi/qapi-events-crypto.h qapi/qapi-events-crypto.c
 GENERATED_FILES += qapi/qapi-events-introspect.h qapi/qapi-events-introspect.c
+GENERATED_FILES += qapi/qapi-events-job.h qapi/qapi-events-job.c
 GENERATED_FILES += qapi/qapi-events-migration.h qapi/qapi-events-migration.c
 GENERATED_FILES += qapi/qapi-events-misc.h qapi/qapi-events-misc.c
 GENERATED_FILES += qapi/qapi-events-net.h qapi/qapi-events-net.c
@@ -582,6 +586,7 @@ qapi-modules = $(SRC_PATH)/qapi/qapi-schema.json $(SRC_PATH)/qapi/common.json \
                $(SRC_PATH)/qapi/char.json \
                $(SRC_PATH)/qapi/crypto.json \
                $(SRC_PATH)/qapi/introspect.json \
+               $(SRC_PATH)/qapi/job.json \
                $(SRC_PATH)/qapi/migration.json \
                $(SRC_PATH)/qapi/misc.json \
                $(SRC_PATH)/qapi/net.json \
@@ -601,6 +606,7 @@ qapi/qapi-types-char.c qapi/qapi-types-char.h \
 qapi/qapi-types-common.c qapi/qapi-types-common.h \
 qapi/qapi-types-crypto.c qapi/qapi-types-crypto.h \
 qapi/qapi-types-introspect.c qapi/qapi-types-introspect.h \
+qapi/qapi-types-job.c qapi/qapi-types-job.h \
 qapi/qapi-types-migration.c qapi/qapi-types-migration.h \
 qapi/qapi-types-misc.c qapi/qapi-types-misc.h \
 qapi/qapi-types-net.c qapi/qapi-types-net.h \
@@ -619,6 +625,7 @@ qapi/qapi-visit-char.c qapi/qapi-visit-char.h \
 qapi/qapi-visit-common.c qapi/qapi-visit-common.h \
 qapi/qapi-visit-crypto.c qapi/qapi-visit-crypto.h \
 qapi/qapi-visit-introspect.c qapi/qapi-visit-introspect.h \
+qapi/qapi-visit-job.c qapi/qapi-visit-job.h \
 qapi/qapi-visit-migration.c qapi/qapi-visit-migration.h \
 qapi/qapi-visit-misc.c qapi/qapi-visit-misc.h \
 qapi/qapi-visit-net.c qapi/qapi-visit-net.h \
@@ -636,6 +643,7 @@ qapi/qapi-commands-char.c qapi/qapi-commands-char.h \
 qapi/qapi-commands-common.c qapi/qapi-commands-common.h \
 qapi/qapi-commands-crypto.c qapi/qapi-commands-crypto.h \
 qapi/qapi-commands-introspect.c qapi/qapi-commands-introspect.h \
+qapi/qapi-commands-job.c qapi/qapi-commands-job.h \
 qapi/qapi-commands-migration.c qapi/qapi-commands-migration.h \
 qapi/qapi-commands-misc.c qapi/qapi-commands-misc.h \
 qapi/qapi-commands-net.c qapi/qapi-commands-net.h \
@@ -653,6 +661,7 @@ qapi/qapi-events-char.c qapi/qapi-events-char.h \
 qapi/qapi-events-common.c qapi/qapi-events-common.h \
 qapi/qapi-events-crypto.c qapi/qapi-events-crypto.h \
 qapi/qapi-events-introspect.c qapi/qapi-events-introspect.h \
+qapi/qapi-events-job.c qapi/qapi-events-job.h \
 qapi/qapi-events-migration.c qapi/qapi-events-migration.h \
 qapi/qapi-events-misc.c qapi/qapi-events-misc.h \
 qapi/qapi-events-net.c qapi/qapi-events-net.h \
diff --git a/Makefile.objs b/Makefile.objs
index c6c9b8fc21..c6c3554203 100644
--- a/Makefile.objs
+++ b/Makefile.objs
@@ -10,6 +10,7 @@ util-obj-y += qapi/qapi-types-char.o
 util-obj-y += qapi/qapi-types-common.o
 util-obj-y += qapi/qapi-types-crypto.o
 util-obj-y += qapi/qapi-types-introspect.o
+util-obj-y += qapi/qapi-types-job.o
 util-obj-y += qapi/qapi-types-migration.o
 util-obj-y += qapi/qapi-types-misc.o
 util-obj-y += qapi/qapi-types-net.o
@@ -28,6 +29,7 @@ util-obj-y += qapi/qapi-visit-char.o
 util-obj-y += qapi/qapi-visit-common.o
 util-obj-y += qapi/qapi-visit-crypto.o
 util-obj-y += qapi/qapi-visit-introspect.o
+util-obj-y += qapi/qapi-visit-job.o
 util-obj-y += qapi/qapi-visit-migration.o
 util-obj-y += qapi/qapi-visit-misc.o
 util-obj-y += qapi/qapi-visit-net.o
@@ -45,6 +47,7 @@ util-obj-y += qapi/qapi-events-char.o
 util-obj-y += qapi/qapi-events-common.o
 util-obj-y += qapi/qapi-events-crypto.o
 util-obj-y += qapi/qapi-events-introspect.o
+util-obj-y += qapi/qapi-events-job.o
 util-obj-y += qapi/qapi-events-migration.o
 util-obj-y += qapi/qapi-events-misc.o
 util-obj-y += qapi/qapi-events-net.o
@@ -63,7 +66,7 @@ chardev-obj-y = chardev/
 # block-obj-y is code used by both qemu system emulation and qemu-img
 
 block-obj-y += nbd/
-block-obj-y += block.o blockjob.o
+block-obj-y += block.o blockjob.o job.o
 block-obj-y += block/ scsi/
 block-obj-y += qemu-io-cmds.o
 block-obj-$(CONFIG_REPLICATION) += replication.o
@@ -94,6 +97,7 @@ io-obj-y = io/
 ifeq ($(CONFIG_SOFTMMU),y)
 common-obj-y = blockdev.o blockdev-nbd.o block/
 common-obj-y += bootdevice.o iothread.o
+common-obj-y += job-qmp.o
 common-obj-y += net/
 common-obj-y += qdev-monitor.o device-hotplug.o
 common-obj-$(CONFIG_WIN32) += os-win32.o
@@ -140,6 +144,7 @@ common-obj-y += qapi/qapi-commands-char.o
 common-obj-y += qapi/qapi-commands-common.o
 common-obj-y += qapi/qapi-commands-crypto.o
 common-obj-y += qapi/qapi-commands-introspect.o
+common-obj-y += qapi/qapi-commands-job.o
 common-obj-y += qapi/qapi-commands-migration.o
 common-obj-y += qapi/qapi-commands-misc.o
 common-obj-y += qapi/qapi-commands-net.o
diff --git a/block.c b/block.c
index 676e57f562..501b64c819 100644
--- a/block.c
+++ b/block.c
@@ -3362,7 +3362,7 @@ static void bdrv_close(BlockDriverState *bs)
 
 void bdrv_close_all(void)
 {
-    block_job_cancel_sync_all();
+    assert(job_next(NULL) == NULL);
     nbd_export_close_all();
 
     /* Drop references from requests still in flight, such as canceled block
diff --git a/block/backup.c b/block/backup.c
index e14d99560d..4e228e959b 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -160,7 +160,7 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
          * offset field is an opaque progress value, it is not a disk offset.
          */
         job->bytes_read += n;
-        block_job_progress_update(&job->common, n);
+        job_progress_update(&job->common.job, n);
     }
 
 out:
@@ -207,25 +207,25 @@ static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret)
     }
 }
 
-static void backup_commit(BlockJob *job)
+static void backup_commit(Job *job)
 {
-    BackupBlockJob *s = container_of(job, BackupBlockJob, common);
+    BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
     if (s->sync_bitmap) {
         backup_cleanup_sync_bitmap(s, 0);
     }
 }
 
-static void backup_abort(BlockJob *job)
+static void backup_abort(Job *job)
 {
-    BackupBlockJob *s = container_of(job, BackupBlockJob, common);
+    BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
     if (s->sync_bitmap) {
         backup_cleanup_sync_bitmap(s, -1);
     }
 }
 
-static void backup_clean(BlockJob *job)
+static void backup_clean(Job *job)
 {
-    BackupBlockJob *s = container_of(job, BackupBlockJob, common);
+    BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
     assert(s->target);
     blk_unref(s->target);
     s->target = NULL;
@@ -317,11 +317,11 @@ typedef struct {
     int ret;
 } BackupCompleteData;
 
-static void backup_complete(BlockJob *job, void *opaque)
+static void backup_complete(Job *job, void *opaque)
 {
     BackupCompleteData *data = opaque;
 
-    block_job_completed(job, data->ret);
+    job_completed(job, data->ret);
     g_free(data);
 }
 
@@ -329,7 +329,7 @@ static bool coroutine_fn yield_and_check(BackupBlockJob *job)
 {
     uint64_t delay_ns;
 
-    if (block_job_is_cancelled(&job->common)) {
+    if (job_is_cancelled(&job->common.job)) {
         return true;
     }
 
@@ -337,9 +337,9 @@ static bool coroutine_fn yield_and_check(BackupBlockJob *job)
      * return. Without a yield, the VM would not reboot. */
     delay_ns = block_job_ratelimit_get_delay(&job->common, job->bytes_read);
     job->bytes_read = 0;
-    block_job_sleep_ns(&job->common, delay_ns);
+    job_sleep_ns(&job->common.job, delay_ns);
 
-    if (block_job_is_cancelled(&job->common)) {
+    if (job_is_cancelled(&job->common.job)) {
         return true;
     }
 
@@ -406,8 +406,8 @@ static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
         bdrv_set_dirty_iter(dbi, next_cluster * job->cluster_size);
     }
 
-    /* TODO block_job_progress_set_remaining() would make more sense */
-    block_job_progress_update(&job->common,
+    /* TODO job_progress_set_remaining() would make more sense */
+    job_progress_update(&job->common.job,
         job->len - hbitmap_count(job->copy_bitmap) * job->cluster_size);
 
     bdrv_dirty_iter_free(dbi);
@@ -425,7 +425,7 @@ static void coroutine_fn backup_run(void *opaque)
     qemu_co_rwlock_init(&job->flush_rwlock);
 
     nb_clusters = DIV_ROUND_UP(job->len, job->cluster_size);
-    block_job_progress_set_remaining(&job->common, job->len);
+    job_progress_set_remaining(&job->common.job, job->len);
 
     job->copy_bitmap = hbitmap_alloc(nb_clusters, 0);
     if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
@@ -441,10 +441,10 @@ static void coroutine_fn backup_run(void *opaque)
     if (job->sync_mode == MIRROR_SYNC_MODE_NONE) {
         /* All bits are set in copy_bitmap to allow any cluster to be copied.
          * This does not actually require them to be copied. */
-        while (!block_job_is_cancelled(&job->common)) {
+        while (!job_is_cancelled(&job->common.job)) {
             /* Yield until the job is cancelled.  We just let our before_write
              * notify callback service CoW requests. */
-            block_job_yield(&job->common);
+            job_yield(&job->common.job);
         }
     } else if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
         ret = backup_run_incremental(job);
@@ -519,16 +519,21 @@ static void coroutine_fn backup_run(void *opaque)
 
     data = g_malloc(sizeof(*data));
     data->ret = ret;
-    block_job_defer_to_main_loop(&job->common, backup_complete, data);
+    job_defer_to_main_loop(&job->common.job, backup_complete, data);
 }
 
 static const BlockJobDriver backup_job_driver = {
-    .instance_size          = sizeof(BackupBlockJob),
-    .job_type               = BLOCK_JOB_TYPE_BACKUP,
-    .start                  = backup_run,
-    .commit                 = backup_commit,
-    .abort                  = backup_abort,
-    .clean                  = backup_clean,
+    .job_driver = {
+        .instance_size          = sizeof(BackupBlockJob),
+        .job_type               = JOB_TYPE_BACKUP,
+        .free                   = block_job_free,
+        .user_resume            = block_job_user_resume,
+        .drain                  = block_job_drain,
+        .start                  = backup_run,
+        .commit                 = backup_commit,
+        .abort                  = backup_abort,
+        .clean                  = backup_clean,
+    },
     .attached_aio_context   = backup_attached_aio_context,
     .drain                  = backup_drain,
 };
@@ -541,7 +546,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
                   BlockdevOnError on_target_error,
                   int creation_flags,
                   BlockCompletionFunc *cb, void *opaque,
-                  BlockJobTxn *txn, Error **errp)
+                  JobTxn *txn, Error **errp)
 {
     int64_t len;
     BlockDriverInfo bdi;
@@ -673,8 +678,8 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
         bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL);
     }
     if (job) {
-        backup_clean(&job->common);
-        block_job_early_fail(&job->common);
+        backup_clean(&job->common.job);
+        job_early_fail(&job->common.job);
     }
 
     return NULL;
diff --git a/block/commit.c b/block/commit.c
index ba5df6aa0a..620666161b 100644
--- a/block/commit.c
+++ b/block/commit.c
@@ -72,9 +72,10 @@ typedef struct {
     int ret;
 } CommitCompleteData;
 
-static void commit_complete(BlockJob *job, void *opaque)
+static void commit_complete(Job *job, void *opaque)
 {
-    CommitBlockJob *s = container_of(job, CommitBlockJob, common);
+    CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
+    BlockJob *bjob = &s->common;
     CommitCompleteData *data = opaque;
     BlockDriverState *top = blk_bs(s->top);
     BlockDriverState *base = blk_bs(s->base);
@@ -90,7 +91,7 @@ static void commit_complete(BlockJob *job, void *opaque)
      * the normal backing chain can be restored. */
     blk_unref(s->base);
 
-    if (!block_job_is_cancelled(&s->common) && ret == 0) {
+    if (!job_is_cancelled(job) && ret == 0) {
         /* success */
         ret = bdrv_drop_intermediate(s->commit_top_bs, base,
                                      s->backing_file_str);
@@ -111,12 +112,12 @@ static void commit_complete(BlockJob *job, void *opaque)
     blk_unref(s->top);
 
     /* If there is more than one reference to the job (e.g. if called from
-     * block_job_finish_sync()), block_job_completed() won't free it and
-     * therefore the blockers on the intermediate nodes remain. This would
-     * cause bdrv_set_backing_hd() to fail. */
-    block_job_remove_all_bdrv(job);
+     * job_finish_sync()), job_completed() won't free it and therefore the
+     * blockers on the intermediate nodes remain. This would cause
+     * bdrv_set_backing_hd() to fail. */
+    block_job_remove_all_bdrv(bjob);
 
-    block_job_completed(&s->common, ret);
+    job_completed(job, ret);
     g_free(data);
 
     /* If bdrv_drop_intermediate() didn't already do that, remove the commit
@@ -149,7 +150,7 @@ static void coroutine_fn commit_run(void *opaque)
     if (len < 0) {
         goto out;
     }
-    block_job_progress_set_remaining(&s->common, len);
+    job_progress_set_remaining(&s->common.job, len);
 
     ret = base_len = blk_getlength(s->base);
     if (base_len < 0) {
@@ -171,8 +172,8 @@ static void coroutine_fn commit_run(void *opaque)
         /* Note that even when no rate limit is applied we need to yield
          * with no pending I/O here so that bdrv_drain_all() returns.
          */
-        block_job_sleep_ns(&s->common, delay_ns);
-        if (block_job_is_cancelled(&s->common)) {
+        job_sleep_ns(&s->common.job, delay_ns);
+        if (job_is_cancelled(&s->common.job)) {
             break;
         }
         /* Copy if allocated above the base */
@@ -195,7 +196,7 @@ static void coroutine_fn commit_run(void *opaque)
             }
         }
         /* Publish progress */
-        block_job_progress_update(&s->common, n);
+        job_progress_update(&s->common.job, n);
 
         if (copy) {
             delay_ns = block_job_ratelimit_get_delay(&s->common, n);
@@ -211,13 +212,18 @@ out:
 
     data = g_malloc(sizeof(*data));
     data->ret = ret;
-    block_job_defer_to_main_loop(&s->common, commit_complete, data);
+    job_defer_to_main_loop(&s->common.job, commit_complete, data);
 }
 
 static const BlockJobDriver commit_job_driver = {
-    .instance_size = sizeof(CommitBlockJob),
-    .job_type      = BLOCK_JOB_TYPE_COMMIT,
-    .start         = commit_run,
+    .job_driver = {
+        .instance_size = sizeof(CommitBlockJob),
+        .job_type      = JOB_TYPE_COMMIT,
+        .free          = block_job_free,
+        .user_resume   = block_job_user_resume,
+        .drain         = block_job_drain,
+        .start         = commit_run,
+    },
 };
 
 static int coroutine_fn bdrv_commit_top_preadv(BlockDriverState *bs,
@@ -277,7 +283,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
     }
 
     s = block_job_create(job_id, &commit_job_driver, NULL, bs, 0, BLK_PERM_ALL,
-                         speed, BLOCK_JOB_DEFAULT, NULL, NULL, errp);
+                         speed, JOB_DEFAULT, NULL, NULL, errp);
     if (!s) {
         return;
     }
@@ -367,7 +373,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
     s->on_error = on_error;
 
     trace_commit_start(bs, base, top, s);
-    block_job_start(&s->common);
+    job_start(&s->common.job);
     return;
 
 fail:
@@ -380,7 +386,7 @@ fail:
     if (commit_top_bs) {
         bdrv_replace_node(commit_top_bs, top, &error_abort);
     }
-    block_job_early_fail(&s->common);
+    job_early_fail(&s->common.job);
 }
 
 
diff --git a/block/mirror.c b/block/mirror.c
index a4197bb975..dcb66ec3be 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -119,14 +119,14 @@ static void mirror_iteration_done(MirrorOp *op, int ret)
             bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
         }
         if (!s->initial_zeroing_ongoing) {
-            block_job_progress_update(&s->common, op->bytes);
+            job_progress_update(&s->common.job, op->bytes);
         }
     }
     qemu_iovec_destroy(&op->qiov);
     g_free(op);
 
     if (s->waiting_for_io) {
-        qemu_coroutine_enter(s->common.co);
+        qemu_coroutine_enter(s->common.job.co);
     }
 }
 
@@ -345,7 +345,7 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
         mirror_wait_for_io(s);
     }
 
-    block_job_pause_point(&s->common);
+    job_pause_point(&s->common.job);
 
     /* Find the number of consective dirty chunks following the first dirty
      * one, and wait for in flight requests in them. */
@@ -484,9 +484,10 @@ typedef struct {
     int ret;
 } MirrorExitData;
 
-static void mirror_exit(BlockJob *job, void *opaque)
+static void mirror_exit(Job *job, void *opaque)
 {
-    MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
+    MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
+    BlockJob *bjob = &s->common;
     MirrorExitData *data = opaque;
     AioContext *replace_aio_context = NULL;
     BlockDriverState *src = s->source;
@@ -497,7 +498,7 @@ static void mirror_exit(BlockJob *job, void *opaque)
     bdrv_release_dirty_bitmap(src, s->dirty_bitmap);
 
     /* Make sure that the source BDS doesn't go away before we called
-     * block_job_completed(). */
+     * job_completed(). */
     bdrv_ref(src);
     bdrv_ref(mirror_top_bs);
     bdrv_ref(target_bs);
@@ -568,7 +569,7 @@ static void mirror_exit(BlockJob *job, void *opaque)
      * the blockers on the intermediate nodes so that the resulting state is
      * valid. Also give up permissions on mirror_top_bs->backing, which might
      * block the removal. */
-    block_job_remove_all_bdrv(job);
+    block_job_remove_all_bdrv(bjob);
     bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
                             &error_abort);
     bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
@@ -576,11 +577,11 @@ static void mirror_exit(BlockJob *job, void *opaque)
     /* We just changed the BDS the job BB refers to (with either or both of the
      * bdrv_replace_node() calls), so switch the BB back so the cleanup does
      * the right thing. We don't need any permissions any more now. */
-    blk_remove_bs(job->blk);
-    blk_set_perm(job->blk, 0, BLK_PERM_ALL, &error_abort);
-    blk_insert_bs(job->blk, mirror_top_bs, &error_abort);
+    blk_remove_bs(bjob->blk);
+    blk_set_perm(bjob->blk, 0, BLK_PERM_ALL, &error_abort);
+    blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort);
 
-    block_job_completed(&s->common, data->ret);
+    job_completed(job, data->ret);
 
     g_free(data);
     bdrv_drained_end(src);
@@ -594,9 +595,9 @@ static void mirror_throttle(MirrorBlockJob *s)
 
     if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) {
         s->last_pause_ns = now;
-        block_job_sleep_ns(&s->common, 0);
+        job_sleep_ns(&s->common.job, 0);
     } else {
-        block_job_pause_point(&s->common);
+        job_pause_point(&s->common.job);
     }
 }
 
@@ -622,7 +623,7 @@ static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
 
             mirror_throttle(s);
 
-            if (block_job_is_cancelled(&s->common)) {
+            if (job_is_cancelled(&s->common.job)) {
                 s->initial_zeroing_ongoing = false;
                 return 0;
             }
@@ -650,7 +651,7 @@ static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
 
         mirror_throttle(s);
 
-        if (block_job_is_cancelled(&s->common)) {
+        if (job_is_cancelled(&s->common.job)) {
             return 0;
         }
 
@@ -695,7 +696,7 @@ static void coroutine_fn mirror_run(void *opaque)
                                  checking for a NULL string */
     int ret = 0;
 
-    if (block_job_is_cancelled(&s->common)) {
+    if (job_is_cancelled(&s->common.job)) {
         goto immediate_exit;
     }
 
@@ -726,13 +727,13 @@ static void coroutine_fn mirror_run(void *opaque)
     }
 
     if (s->bdev_length == 0) {
-        /* Report BLOCK_JOB_READY and wait for complete. */
-        block_job_event_ready(&s->common);
+        /* Transition to the READY state and wait for complete. */
+        job_transition_to_ready(&s->common.job);
         s->synced = true;
-        while (!block_job_is_cancelled(&s->common) && !s->should_complete) {
-            block_job_yield(&s->common);
+        while (!job_is_cancelled(&s->common.job) && !s->should_complete) {
+            job_yield(&s->common.job);
         }
-        s->common.cancelled = false;
+        s->common.job.cancelled = false;
         goto immediate_exit;
     }
 
@@ -768,7 +769,7 @@ static void coroutine_fn mirror_run(void *opaque)
     s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
     if (!s->is_none_mode) {
         ret = mirror_dirty_init(s);
-        if (ret < 0 || block_job_is_cancelled(&s->common)) {
+        if (ret < 0 || job_is_cancelled(&s->common.job)) {
             goto immediate_exit;
         }
     }
@@ -785,13 +786,13 @@ static void coroutine_fn mirror_run(void *opaque)
             goto immediate_exit;
         }
 
-        block_job_pause_point(&s->common);
+        job_pause_point(&s->common.job);
 
         cnt = bdrv_get_dirty_count(s->dirty_bitmap);
         /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is
          * the number of bytes currently being processed; together those are
          * the current remaining operation length */
-        block_job_progress_set_remaining(&s->common, s->bytes_in_flight + cnt);
+        job_progress_set_remaining(&s->common.job, s->bytes_in_flight + cnt);
 
         /* Note that even when no rate limit is applied we need to yield
          * periodically with no pending I/O so that bdrv_drain_all() returns.
@@ -823,12 +824,12 @@ static void coroutine_fn mirror_run(void *opaque)
                  * report completion.  This way, block-job-cancel will leave
                  * the target in a consistent state.
                  */
-                block_job_event_ready(&s->common);
+                job_transition_to_ready(&s->common.job);
                 s->synced = true;
             }
 
             should_complete = s->should_complete ||
-                block_job_is_cancelled(&s->common);
+                job_is_cancelled(&s->common.job);
             cnt = bdrv_get_dirty_count(s->dirty_bitmap);
         }
 
@@ -856,7 +857,7 @@ static void coroutine_fn mirror_run(void *opaque)
              * completion.
              */
             assert(QLIST_EMPTY(&bs->tracked_requests));
-            s->common.cancelled = false;
+            s->common.job.cancelled = false;
             need_drain = false;
             break;
         }
@@ -868,9 +869,9 @@ static void coroutine_fn mirror_run(void *opaque)
                         cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0);
         }
         trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
-        block_job_sleep_ns(&s->common, delay_ns);
-        if (block_job_is_cancelled(&s->common) &&
-            (!s->synced || s->common.force))
+        job_sleep_ns(&s->common.job, delay_ns);
+        if (job_is_cancelled(&s->common.job) &&
+            (!s->synced || s->common.job.force_cancel))
         {
             break;
         }
@@ -883,8 +884,8 @@ immediate_exit:
          * or it was cancelled prematurely so that we do not guarantee that
          * the target is a copy of the source.
          */
-        assert(ret < 0 || ((s->common.force || !s->synced) &&
-               block_job_is_cancelled(&s->common)));
+        assert(ret < 0 || ((s->common.job.force_cancel || !s->synced) &&
+               job_is_cancelled(&s->common.job)));
         assert(need_drain);
         mirror_wait_for_all_io(s);
     }
@@ -901,12 +902,12 @@ immediate_exit:
     if (need_drain) {
         bdrv_drained_begin(bs);
     }
-    block_job_defer_to_main_loop(&s->common, mirror_exit, data);
+    job_defer_to_main_loop(&s->common.job, mirror_exit, data);
 }
 
-static void mirror_complete(BlockJob *job, Error **errp)
+static void mirror_complete(Job *job, Error **errp)
 {
-    MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
+    MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
     BlockDriverState *target;
 
     target = blk_bs(s->target);
@@ -953,12 +954,12 @@ static void mirror_complete(BlockJob *job, Error **errp)
     }
 
     s->should_complete = true;
-    block_job_enter(&s->common);
+    job_enter(job);
 }
 
-static void mirror_pause(BlockJob *job)
+static void mirror_pause(Job *job)
 {
-    MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
+    MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
 
     mirror_wait_for_all_io(s);
 }
@@ -986,21 +987,31 @@ static void mirror_drain(BlockJob *job)
 }
 
 static const BlockJobDriver mirror_job_driver = {
-    .instance_size          = sizeof(MirrorBlockJob),
-    .job_type               = BLOCK_JOB_TYPE_MIRROR,
-    .start                  = mirror_run,
-    .complete               = mirror_complete,
-    .pause                  = mirror_pause,
+    .job_driver = {
+        .instance_size          = sizeof(MirrorBlockJob),
+        .job_type               = JOB_TYPE_MIRROR,
+        .free                   = block_job_free,
+        .user_resume            = block_job_user_resume,
+        .drain                  = block_job_drain,
+        .start                  = mirror_run,
+        .pause                  = mirror_pause,
+        .complete               = mirror_complete,
+    },
     .attached_aio_context   = mirror_attached_aio_context,
     .drain                  = mirror_drain,
 };
 
 static const BlockJobDriver commit_active_job_driver = {
-    .instance_size          = sizeof(MirrorBlockJob),
-    .job_type               = BLOCK_JOB_TYPE_COMMIT,
-    .start                  = mirror_run,
-    .complete               = mirror_complete,
-    .pause                  = mirror_pause,
+    .job_driver = {
+        .instance_size          = sizeof(MirrorBlockJob),
+        .job_type               = JOB_TYPE_COMMIT,
+        .free                   = block_job_free,
+        .user_resume            = block_job_user_resume,
+        .drain                  = block_job_drain,
+        .start                  = mirror_run,
+        .pause                  = mirror_pause,
+        .complete               = mirror_complete,
+    },
     .attached_aio_context   = mirror_attached_aio_context,
     .drain                  = mirror_drain,
 };
@@ -1237,7 +1248,7 @@ static void mirror_start_job(const char *job_id, BlockDriverState *bs,
     }
 
     trace_mirror_start(bs, s, opaque);
-    block_job_start(&s->common);
+    job_start(&s->common.job);
     return;
 
 fail:
@@ -1248,7 +1259,7 @@ fail:
 
         g_free(s->replaces);
         blk_unref(s->target);
-        block_job_early_fail(&s->common);
+        job_early_fail(&s->common.job);
     }
 
     bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
@@ -1275,7 +1286,7 @@ void mirror_start(const char *job_id, BlockDriverState *bs,
     }
     is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
     base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL;
-    mirror_start_job(job_id, bs, BLOCK_JOB_DEFAULT, target, replaces,
+    mirror_start_job(job_id, bs, JOB_DEFAULT, target, replaces,
                      speed, granularity, buf_size, backing_mode,
                      on_source_error, on_target_error, unmap, NULL, NULL,
                      &mirror_job_driver, is_none_mode, base, false,
diff --git a/block/replication.c b/block/replication.c
index 48148b884a..826db7b304 100644
--- a/block/replication.c
+++ b/block/replication.c
@@ -145,7 +145,7 @@ static void replication_close(BlockDriverState *bs)
         replication_stop(s->rs, false, NULL);
     }
     if (s->stage == BLOCK_REPLICATION_FAILOVER) {
-        block_job_cancel_sync(s->active_disk->bs->job);
+        job_cancel_sync(&s->active_disk->bs->job->job);
     }
 
     if (s->mode == REPLICATION_MODE_SECONDARY) {
@@ -568,7 +568,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
         job = backup_job_create(NULL, s->secondary_disk->bs, s->hidden_disk->bs,
                                 0, MIRROR_SYNC_MODE_NONE, NULL, false,
                                 BLOCKDEV_ON_ERROR_REPORT,
-                                BLOCKDEV_ON_ERROR_REPORT, BLOCK_JOB_INTERNAL,
+                                BLOCKDEV_ON_ERROR_REPORT, JOB_INTERNAL,
                                 backup_job_completed, bs, NULL, &local_err);
         if (local_err) {
             error_propagate(errp, local_err);
@@ -576,7 +576,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
             aio_context_release(aio_context);
             return;
         }
-        block_job_start(job);
+        job_start(&job->job);
         break;
     default:
         aio_context_release(aio_context);
@@ -681,7 +681,7 @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp)
          * disk, secondary disk in backup_job_completed().
          */
         if (s->secondary_disk->bs->job) {
-            block_job_cancel_sync(s->secondary_disk->bs->job);
+            job_cancel_sync(&s->secondary_disk->bs->job->job);
         }
 
         if (!failover) {
@@ -693,7 +693,7 @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp)
 
         s->stage = BLOCK_REPLICATION_FAILOVER;
         commit_active_start(NULL, s->active_disk->bs, s->secondary_disk->bs,
-                            BLOCK_JOB_INTERNAL, 0, BLOCKDEV_ON_ERROR_REPORT,
+                            JOB_INTERNAL, 0, BLOCKDEV_ON_ERROR_REPORT,
                             NULL, replication_done, bs, true, errp);
         break;
     default:
diff --git a/block/sheepdog.c b/block/sheepdog.c
index 4237132419..2a5bc0a59a 100644
--- a/block/sheepdog.c
+++ b/block/sheepdog.c
@@ -1859,9 +1859,7 @@ out:
         error_setg_errno(errp, -ret, "Can't pre-allocate");
     }
 out_with_err_set:
-    if (blk) {
-        blk_unref(blk);
-    }
+    blk_unref(blk);
     g_free(buf);
 
     return ret;
diff --git a/block/stream.c b/block/stream.c
index df9660d2fc..a5d6e0cf8a 100644
--- a/block/stream.c
+++ b/block/stream.c
@@ -58,16 +58,16 @@ typedef struct {
     int ret;
 } StreamCompleteData;
 
-static void stream_complete(BlockJob *job, void *opaque)
+static void stream_complete(Job *job, void *opaque)
 {
-    StreamBlockJob *s = container_of(job, StreamBlockJob, common);
+    StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
+    BlockJob *bjob = &s->common;
     StreamCompleteData *data = opaque;
-    BlockDriverState *bs = blk_bs(job->blk);
+    BlockDriverState *bs = blk_bs(bjob->blk);
     BlockDriverState *base = s->base;
     Error *local_err = NULL;
 
-    if (!block_job_is_cancelled(&s->common) && bs->backing &&
-        data->ret == 0) {
+    if (!job_is_cancelled(job) && bs->backing && data->ret == 0) {
         const char *base_id = NULL, *base_fmt = NULL;
         if (base) {
             base_id = s->backing_file_str;
@@ -88,12 +88,12 @@ out:
     /* Reopen the image back in read-only mode if necessary */
     if (s->bs_flags != bdrv_get_flags(bs)) {
         /* Give up write permissions before making it read-only */
-        blk_set_perm(job->blk, 0, BLK_PERM_ALL, &error_abort);
+        blk_set_perm(bjob->blk, 0, BLK_PERM_ALL, &error_abort);
         bdrv_reopen(bs, s->bs_flags, NULL);
     }
 
     g_free(s->backing_file_str);
-    block_job_completed(&s->common, data->ret);
+    job_completed(job, data->ret);
     g_free(data);
 }
 
@@ -121,7 +121,7 @@ static void coroutine_fn stream_run(void *opaque)
         ret = len;
         goto out;
     }
-    block_job_progress_set_remaining(&s->common, len);
+    job_progress_set_remaining(&s->common.job, len);
 
     buf = qemu_blockalign(bs, STREAM_BUFFER_SIZE);
 
@@ -140,8 +140,8 @@ static void coroutine_fn stream_run(void *opaque)
         /* Note that even when no rate limit is applied we need to yield
          * with no pending I/O here so that bdrv_drain_all() returns.
          */
-        block_job_sleep_ns(&s->common, delay_ns);
-        if (block_job_is_cancelled(&s->common)) {
+        job_sleep_ns(&s->common.job, delay_ns);
+        if (job_is_cancelled(&s->common.job)) {
             break;
         }
 
@@ -184,7 +184,7 @@ static void coroutine_fn stream_run(void *opaque)
         ret = 0;
 
         /* Publish progress */
-        block_job_progress_update(&s->common, n);
+        job_progress_update(&s->common.job, n);
         if (copy) {
             delay_ns = block_job_ratelimit_get_delay(&s->common, n);
         } else {
@@ -205,13 +205,18 @@ out:
     /* Modify backing chain and close BDSes in main loop */
     data = g_malloc(sizeof(*data));
     data->ret = ret;
-    block_job_defer_to_main_loop(&s->common, stream_complete, data);
+    job_defer_to_main_loop(&s->common.job, stream_complete, data);
 }
 
 static const BlockJobDriver stream_job_driver = {
-    .instance_size = sizeof(StreamBlockJob),
-    .job_type      = BLOCK_JOB_TYPE_STREAM,
-    .start         = stream_run,
+    .job_driver = {
+        .instance_size = sizeof(StreamBlockJob),
+        .job_type      = JOB_TYPE_STREAM,
+        .free          = block_job_free,
+        .start         = stream_run,
+        .user_resume   = block_job_user_resume,
+        .drain         = block_job_drain,
+    },
 };
 
 void stream_start(const char *job_id, BlockDriverState *bs,
@@ -238,7 +243,7 @@ void stream_start(const char *job_id, BlockDriverState *bs,
                          BLK_PERM_GRAPH_MOD,
                          BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
                          BLK_PERM_WRITE,
-                         speed, BLOCK_JOB_DEFAULT, NULL, NULL, errp);
+                         speed, JOB_DEFAULT, NULL, NULL, errp);
     if (!s) {
         goto fail;
     }
@@ -259,7 +264,7 @@ void stream_start(const char *job_id, BlockDriverState *bs,
 
     s->on_error = on_error;
     trace_stream_start(bs, base, s);
-    block_job_start(&s->common);
+    job_start(&s->common.job);
     return;
 
 fail:
diff --git a/block/trace-events b/block/trace-events
index f8c50b4063..2d59b53fd3 100644
--- a/block/trace-events
+++ b/block/trace-events
@@ -4,11 +4,6 @@
 bdrv_open_common(void *bs, const char *filename, int flags, const char *format_name) "bs %p filename \"%s\" flags 0x%x format_name \"%s\""
 bdrv_lock_medium(void *bs, bool locked) "bs %p locked %d"
 
-# blockjob.c
-block_job_completed(void *job, int ret, int jret) "job %p ret %d corrected ret %d"
-block_job_state_transition(void *job,  int ret, const char *legal, const char *s0, const char *s1) "job %p (ret: %d) attempting %s transition (%s-->%s)"
-block_job_apply_verb(void *job, const char *state, const char *verb, const char *legal) "job %p in state %s; applying verb %s (%s)"
-
 # block/block-backend.c
 blk_co_preadv(void *blk, void *bs, int64_t offset, unsigned int bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %u flags 0x%x"
 blk_co_pwritev(void *blk, void *bs, int64_t offset, unsigned int bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %u flags 0x%x"
diff --git a/blockdev.c b/blockdev.c
index 3808b1fc00..8de95be8f4 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -150,7 +150,7 @@ void blockdev_mark_auto_del(BlockBackend *blk)
         aio_context_acquire(aio_context);
 
         if (bs->job) {
-            block_job_cancel(bs->job, false);
+            job_cancel(&bs->job->job, false);
         }
 
         aio_context_release(aio_context);
@@ -1446,7 +1446,7 @@ typedef struct BlkActionOps {
 struct BlkActionState {
     TransactionAction *action;
     const BlkActionOps *ops;
-    BlockJobTxn *block_job_txn;
+    JobTxn *block_job_txn;
     TransactionProperties *txn_props;
     QSIMPLEQ_ENTRY(BlkActionState) entry;
 };
@@ -1864,7 +1864,7 @@ typedef struct DriveBackupState {
     BlockJob *job;
 } DriveBackupState;
 
-static BlockJob *do_drive_backup(DriveBackup *backup, BlockJobTxn *txn,
+static BlockJob *do_drive_backup(DriveBackup *backup, JobTxn *txn,
                             Error **errp);
 
 static void drive_backup_prepare(BlkActionState *common, Error **errp)
@@ -1910,7 +1910,7 @@ static void drive_backup_commit(BlkActionState *common)
     aio_context_acquire(aio_context);
 
     assert(state->job);
-    block_job_start(state->job);
+    job_start(&state->job->job);
 
     aio_context_release(aio_context);
 }
@@ -1925,7 +1925,7 @@ static void drive_backup_abort(BlkActionState *common)
         aio_context = bdrv_get_aio_context(state->bs);
         aio_context_acquire(aio_context);
 
-        block_job_cancel_sync(state->job);
+        job_cancel_sync(&state->job->job);
 
         aio_context_release(aio_context);
     }
@@ -1954,7 +1954,7 @@ typedef struct BlockdevBackupState {
     BlockJob *job;
 } BlockdevBackupState;
 
-static BlockJob *do_blockdev_backup(BlockdevBackup *backup, BlockJobTxn *txn,
+static BlockJob *do_blockdev_backup(BlockdevBackup *backup, JobTxn *txn,
                                     Error **errp);
 
 static void blockdev_backup_prepare(BlkActionState *common, Error **errp)
@@ -2008,7 +2008,7 @@ static void blockdev_backup_commit(BlkActionState *common)
     aio_context_acquire(aio_context);
 
     assert(state->job);
-    block_job_start(state->job);
+    job_start(&state->job->job);
 
     aio_context_release(aio_context);
 }
@@ -2023,7 +2023,7 @@ static void blockdev_backup_abort(BlkActionState *common)
         aio_context = bdrv_get_aio_context(state->bs);
         aio_context_acquire(aio_context);
 
-        block_job_cancel_sync(state->job);
+        job_cancel_sync(&state->job->job);
 
         aio_context_release(aio_context);
     }
@@ -2243,7 +2243,7 @@ void qmp_transaction(TransactionActionList *dev_list,
                      Error **errp)
 {
     TransactionActionList *dev_entry = dev_list;
-    BlockJobTxn *block_job_txn = NULL;
+    JobTxn *block_job_txn = NULL;
     BlkActionState *state, *next;
     Error *local_err = NULL;
 
@@ -2251,11 +2251,11 @@ void qmp_transaction(TransactionActionList *dev_list,
     QSIMPLEQ_INIT(&snap_bdrv_states);
 
     /* Does this transaction get canceled as a group on failure?
-     * If not, we don't really need to make a BlockJobTxn.
+     * If not, we don't really need to make a JobTxn.
      */
     props = get_transaction_properties(props);
     if (props->completion_mode != ACTION_COMPLETION_MODE_INDIVIDUAL) {
-        block_job_txn = block_job_txn_new();
+        block_job_txn = job_txn_new();
     }
 
     /* drain all i/o before any operations */
@@ -2314,7 +2314,7 @@ exit:
     if (!has_props) {
         qapi_free_TransactionProperties(props);
     }
-    block_job_txn_unref(block_job_txn);
+    job_txn_unref(block_job_txn);
 }
 
 void qmp_eject(bool has_device, const char *device,
@@ -3244,7 +3244,7 @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
             goto out;
         }
         commit_active_start(has_job_id ? job_id : NULL, bs, base_bs,
-                            BLOCK_JOB_DEFAULT, speed, on_error,
+                            JOB_DEFAULT, speed, on_error,
                             filter_node_name, NULL, NULL, false, &local_err);
     } else {
         BlockDriverState *overlay_bs = bdrv_find_overlay(bs, top_bs);
@@ -3264,7 +3264,7 @@ out:
     aio_context_release(aio_context);
 }
 
-static BlockJob *do_drive_backup(DriveBackup *backup, BlockJobTxn *txn,
+static BlockJob *do_drive_backup(DriveBackup *backup, JobTxn *txn,
                                  Error **errp)
 {
     BlockDriverState *bs;
@@ -3275,7 +3275,7 @@ static BlockJob *do_drive_backup(DriveBackup *backup, BlockJobTxn *txn,
     AioContext *aio_context;
     QDict *options = NULL;
     Error *local_err = NULL;
-    int flags, job_flags = BLOCK_JOB_DEFAULT;
+    int flags, job_flags = JOB_DEFAULT;
     int64_t size;
     bool set_backing_hd = false;
 
@@ -3398,10 +3398,10 @@ static BlockJob *do_drive_backup(DriveBackup *backup, BlockJobTxn *txn,
         }
     }
     if (!backup->auto_finalize) {
-        job_flags |= BLOCK_JOB_MANUAL_FINALIZE;
+        job_flags |= JOB_MANUAL_FINALIZE;
     }
     if (!backup->auto_dismiss) {
-        job_flags |= BLOCK_JOB_MANUAL_DISMISS;
+        job_flags |= JOB_MANUAL_DISMISS;
     }
 
     job = backup_job_create(backup->job_id, bs, target_bs, backup->speed,
@@ -3425,7 +3425,7 @@ void qmp_drive_backup(DriveBackup *arg, Error **errp)
     BlockJob *job;
     job = do_drive_backup(arg, NULL, errp);
     if (job) {
-        block_job_start(job);
+        job_start(&job->job);
     }
 }
 
@@ -3434,7 +3434,7 @@ BlockDeviceInfoList *qmp_query_named_block_nodes(Error **errp)
     return bdrv_named_nodes_list(errp);
 }
 
-BlockJob *do_blockdev_backup(BlockdevBackup *backup, BlockJobTxn *txn,
+BlockJob *do_blockdev_backup(BlockdevBackup *backup, JobTxn *txn,
                              Error **errp)
 {
     BlockDriverState *bs;
@@ -3442,7 +3442,7 @@ BlockJob *do_blockdev_backup(BlockdevBackup *backup, BlockJobTxn *txn,
     Error *local_err = NULL;
     AioContext *aio_context;
     BlockJob *job = NULL;
-    int job_flags = BLOCK_JOB_DEFAULT;
+    int job_flags = JOB_DEFAULT;
 
     if (!backup->has_speed) {
         backup->speed = 0;
@@ -3491,10 +3491,10 @@ BlockJob *do_blockdev_backup(BlockdevBackup *backup, BlockJobTxn *txn,
         }
     }
     if (!backup->auto_finalize) {
-        job_flags |= BLOCK_JOB_MANUAL_FINALIZE;
+        job_flags |= JOB_MANUAL_FINALIZE;
     }
     if (!backup->auto_dismiss) {
-        job_flags |= BLOCK_JOB_MANUAL_DISMISS;
+        job_flags |= JOB_MANUAL_DISMISS;
     }
     job = backup_job_create(backup->job_id, bs, target_bs, backup->speed,
                             backup->sync, NULL, backup->compress,
@@ -3513,7 +3513,7 @@ void qmp_blockdev_backup(BlockdevBackup *arg, Error **errp)
     BlockJob *job;
     job = do_blockdev_backup(arg, NULL, errp);
     if (job) {
-        block_job_start(job);
+        job_start(&job->job);
     }
 }
 
@@ -3844,14 +3844,14 @@ void qmp_block_job_cancel(const char *device,
         force = false;
     }
 
-    if (block_job_user_paused(job) && !force) {
+    if (job_user_paused(&job->job) && !force) {
         error_setg(errp, "The block job for device '%s' is currently paused",
                    device);
         goto out;
     }
 
     trace_qmp_block_job_cancel(job);
-    block_job_user_cancel(job, force, errp);
+    job_user_cancel(&job->job, force, errp);
 out:
     aio_context_release(aio_context);
 }
@@ -3866,7 +3866,7 @@ void qmp_block_job_pause(const char *device, Error **errp)
     }
 
     trace_qmp_block_job_pause(job);
-    block_job_user_pause(job, errp);
+    job_user_pause(&job->job, errp);
     aio_context_release(aio_context);
 }
 
@@ -3880,7 +3880,7 @@ void qmp_block_job_resume(const char *device, Error **errp)
     }
 
     trace_qmp_block_job_resume(job);
-    block_job_user_resume(job, errp);
+    job_user_resume(&job->job, errp);
     aio_context_release(aio_context);
 }
 
@@ -3894,7 +3894,7 @@ void qmp_block_job_complete(const char *device, Error **errp)
     }
 
     trace_qmp_block_job_complete(job);
-    block_job_complete(job, errp);
+    job_complete(&job->job, errp);
     aio_context_release(aio_context);
 }
 
@@ -3908,21 +3908,23 @@ void qmp_block_job_finalize(const char *id, Error **errp)
     }
 
     trace_qmp_block_job_finalize(job);
-    block_job_finalize(job, errp);
+    job_finalize(&job->job, errp);
     aio_context_release(aio_context);
 }
 
 void qmp_block_job_dismiss(const char *id, Error **errp)
 {
     AioContext *aio_context;
-    BlockJob *job = find_block_job(id, &aio_context, errp);
+    BlockJob *bjob = find_block_job(id, &aio_context, errp);
+    Job *job;
 
-    if (!job) {
+    if (!bjob) {
         return;
     }
 
-    trace_qmp_block_job_dismiss(job);
-    block_job_dismiss(&job, errp);
+    trace_qmp_block_job_dismiss(bjob);
+    job = &bjob->job;
+    job_dismiss(&job, errp);
     aio_context_release(aio_context);
 }
 
diff --git a/blockjob.c b/blockjob.c
index 112672a68b..0306533a2e 100644
--- a/blockjob.c
+++ b/blockjob.c
@@ -34,103 +34,8 @@
 #include "qapi/qapi-events-block-core.h"
 #include "qapi/qmp/qerror.h"
 #include "qemu/coroutine.h"
-#include "qemu/id.h"
 #include "qemu/timer.h"
 
-/* Right now, this mutex is only needed to synchronize accesses to job->busy
- * and job->sleep_timer, such as concurrent calls to block_job_do_yield and
- * block_job_enter. */
-static QemuMutex block_job_mutex;
-
-/* BlockJob State Transition Table */
-bool BlockJobSTT[BLOCK_JOB_STATUS__MAX][BLOCK_JOB_STATUS__MAX] = {
-                                          /* U, C, R, P, Y, S, W, D, X, E, N */
-    /* U: */ [BLOCK_JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0},
-    /* C: */ [BLOCK_JOB_STATUS_CREATED]   = {0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1},
-    /* R: */ [BLOCK_JOB_STATUS_RUNNING]   = {0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0},
-    /* P: */ [BLOCK_JOB_STATUS_PAUSED]    = {0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0},
-    /* Y: */ [BLOCK_JOB_STATUS_READY]     = {0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0},
-    /* S: */ [BLOCK_JOB_STATUS_STANDBY]   = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0},
-    /* W: */ [BLOCK_JOB_STATUS_WAITING]   = {0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0},
-    /* D: */ [BLOCK_JOB_STATUS_PENDING]   = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0},
-    /* X: */ [BLOCK_JOB_STATUS_ABORTING]  = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0},
-    /* E: */ [BLOCK_JOB_STATUS_CONCLUDED] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
-    /* N: */ [BLOCK_JOB_STATUS_NULL]      = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
-};
-
-bool BlockJobVerbTable[BLOCK_JOB_VERB__MAX][BLOCK_JOB_STATUS__MAX] = {
-                                          /* U, C, R, P, Y, S, W, D, X, E, N */
-    [BLOCK_JOB_VERB_CANCEL]               = {0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0},
-    [BLOCK_JOB_VERB_PAUSE]                = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0},
-    [BLOCK_JOB_VERB_RESUME]               = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0},
-    [BLOCK_JOB_VERB_SET_SPEED]            = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0},
-    [BLOCK_JOB_VERB_COMPLETE]             = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0},
-    [BLOCK_JOB_VERB_FINALIZE]             = {0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0},
-    [BLOCK_JOB_VERB_DISMISS]              = {0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0},
-};
-
-static void block_job_state_transition(BlockJob *job, BlockJobStatus s1)
-{
-    BlockJobStatus s0 = job->status;
-    assert(s1 >= 0 && s1 <= BLOCK_JOB_STATUS__MAX);
-    trace_block_job_state_transition(job, job->ret, BlockJobSTT[s0][s1] ?
-                                     "allowed" : "disallowed",
-                                     BlockJobStatus_str(s0),
-                                     BlockJobStatus_str(s1));
-    assert(BlockJobSTT[s0][s1]);
-    job->status = s1;
-}
-
-static int block_job_apply_verb(BlockJob *job, BlockJobVerb bv, Error **errp)
-{
-    assert(bv >= 0 && bv <= BLOCK_JOB_VERB__MAX);
-    trace_block_job_apply_verb(job, BlockJobStatus_str(job->status),
-                               BlockJobVerb_str(bv),
-                               BlockJobVerbTable[bv][job->status] ?
-                               "allowed" : "prohibited");
-    if (BlockJobVerbTable[bv][job->status]) {
-        return 0;
-    }
-    error_setg(errp, "Job '%s' in state '%s' cannot accept command verb '%s'",
-               job->id, BlockJobStatus_str(job->status), BlockJobVerb_str(bv));
-    return -EPERM;
-}
-
-static void block_job_lock(void)
-{
-    qemu_mutex_lock(&block_job_mutex);
-}
-
-static void block_job_unlock(void)
-{
-    qemu_mutex_unlock(&block_job_mutex);
-}
-
-static void __attribute__((__constructor__)) block_job_init(void)
-{
-    qemu_mutex_init(&block_job_mutex);
-}
-
-static void block_job_event_cancelled(BlockJob *job);
-static void block_job_event_completed(BlockJob *job, const char *msg);
-static int block_job_event_pending(BlockJob *job);
-static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job));
-
-/* Transactional group of block jobs */
-struct BlockJobTxn {
-
-    /* Is this txn being cancelled? */
-    bool aborting;
-
-    /* List of jobs */
-    QLIST_HEAD(, BlockJob) jobs;
-
-    /* Reference count */
-    int refcnt;
-};
-
-static QLIST_HEAD(, BlockJob) block_jobs = QLIST_HEAD_INITIALIZER(block_jobs);
-
 /*
  * The block job API is composed of two categories of functions.
  *
@@ -146,147 +51,78 @@ static QLIST_HEAD(, BlockJob) block_jobs = QLIST_HEAD_INITIALIZER(block_jobs);
  * blockjob_int.h.
  */
 
-BlockJob *block_job_next(BlockJob *job)
+static bool is_block_job(Job *job)
 {
-    if (!job) {
-        return QLIST_FIRST(&block_jobs);
-    }
-    return QLIST_NEXT(job, job_list);
-}
-
-BlockJob *block_job_get(const char *id)
-{
-    BlockJob *job;
-
-    QLIST_FOREACH(job, &block_jobs, job_list) {
-        if (job->id && !strcmp(id, job->id)) {
-            return job;
-        }
-    }
-
-    return NULL;
+    return job_type(job) == JOB_TYPE_BACKUP ||
+           job_type(job) == JOB_TYPE_COMMIT ||
+           job_type(job) == JOB_TYPE_MIRROR ||
+           job_type(job) == JOB_TYPE_STREAM;
 }
 
-BlockJobTxn *block_job_txn_new(void)
+BlockJob *block_job_next(BlockJob *bjob)
 {
-    BlockJobTxn *txn = g_new0(BlockJobTxn, 1);
-    QLIST_INIT(&txn->jobs);
-    txn->refcnt = 1;
-    return txn;
-}
-
-static void block_job_txn_ref(BlockJobTxn *txn)
-{
-    txn->refcnt++;
-}
-
-void block_job_txn_unref(BlockJobTxn *txn)
-{
-    if (txn && --txn->refcnt == 0) {
-        g_free(txn);
-    }
-}
-
-void block_job_txn_add_job(BlockJobTxn *txn, BlockJob *job)
-{
-    if (!txn) {
-        return;
-    }
+    Job *job = bjob ? &bjob->job : NULL;
 
-    assert(!job->txn);
-    job->txn = txn;
+    do {
+        job = job_next(job);
+    } while (job && !is_block_job(job));
 
-    QLIST_INSERT_HEAD(&txn->jobs, job, txn_list);
-    block_job_txn_ref(txn);
+    return job ? container_of(job, BlockJob, job) : NULL;
 }
 
-static void block_job_txn_del_job(BlockJob *job)
-{
-    if (job->txn) {
-        QLIST_REMOVE(job, txn_list);
-        block_job_txn_unref(job->txn);
-        job->txn = NULL;
-    }
-}
-
-/* Assumes the block_job_mutex is held */
-static bool block_job_timer_pending(BlockJob *job)
-{
-    return timer_pending(&job->sleep_timer);
-}
-
-/* Assumes the block_job_mutex is held */
-static bool block_job_timer_not_pending(BlockJob *job)
-{
-    return !block_job_timer_pending(job);
-}
-
-static void block_job_pause(BlockJob *job)
+BlockJob *block_job_get(const char *id)
 {
-    job->pause_count++;
-}
+    Job *job = job_get(id);
 
-static void block_job_resume(BlockJob *job)
-{
-    assert(job->pause_count > 0);
-    job->pause_count--;
-    if (job->pause_count) {
-        return;
+    if (job && is_block_job(job)) {
+        return container_of(job, BlockJob, job);
+    } else {
+        return NULL;
     }
-
-    /* kick only if no timer is pending */
-    block_job_enter_cond(job, block_job_timer_not_pending);
-}
-
-void block_job_ref(BlockJob *job)
-{
-    ++job->refcnt;
 }
 
 static void block_job_attached_aio_context(AioContext *new_context,
                                            void *opaque);
 static void block_job_detach_aio_context(void *opaque);
 
-void block_job_unref(BlockJob *job)
+void block_job_free(Job *job)
 {
-    if (--job->refcnt == 0) {
-        assert(job->status == BLOCK_JOB_STATUS_NULL);
-        assert(!job->txn);
-        BlockDriverState *bs = blk_bs(job->blk);
-        QLIST_REMOVE(job, job_list);
-        bs->job = NULL;
-        block_job_remove_all_bdrv(job);
-        blk_remove_aio_context_notifier(job->blk,
-                                        block_job_attached_aio_context,
-                                        block_job_detach_aio_context, job);
-        blk_unref(job->blk);
-        error_free(job->blocker);
-        g_free(job->id);
-        assert(!timer_pending(&job->sleep_timer));
-        g_free(job);
-    }
+    BlockJob *bjob = container_of(job, BlockJob, job);
+    BlockDriverState *bs = blk_bs(bjob->blk);
+
+    bs->job = NULL;
+    block_job_remove_all_bdrv(bjob);
+    blk_remove_aio_context_notifier(bjob->blk,
+                                    block_job_attached_aio_context,
+                                    block_job_detach_aio_context, bjob);
+    blk_unref(bjob->blk);
+    error_free(bjob->blocker);
 }
 
 static void block_job_attached_aio_context(AioContext *new_context,
                                            void *opaque)
 {
     BlockJob *job = opaque;
+    const JobDriver *drv = job->job.driver;
+    BlockJobDriver *bjdrv = container_of(drv, BlockJobDriver, job_driver);
 
-    if (job->driver->attached_aio_context) {
-        job->driver->attached_aio_context(job, new_context);
+    job->job.aio_context = new_context;
+    if (bjdrv->attached_aio_context) {
+        bjdrv->attached_aio_context(job, new_context);
     }
 
-    block_job_resume(job);
+    job_resume(&job->job);
 }
 
-static void block_job_drain(BlockJob *job)
+void block_job_drain(Job *job)
 {
-    /* If job is !job->busy this kicks it into the next pause point. */
-    block_job_enter(job);
+    BlockJob *bjob = container_of(job, BlockJob, job);
+    const JobDriver *drv = job->driver;
+    BlockJobDriver *bjdrv = container_of(drv, BlockJobDriver, job_driver);
 
-    blk_drain(job->blk);
-    if (job->driver->drain) {
-        job->driver->drain(job);
+    blk_drain(bjob->blk);
+    if (bjdrv->drain) {
+        bjdrv->drain(bjob);
     }
 }
 
@@ -295,35 +131,34 @@ static void block_job_detach_aio_context(void *opaque)
     BlockJob *job = opaque;
 
     /* In case the job terminates during aio_poll()... */
-    block_job_ref(job);
+    job_ref(&job->job);
 
-    block_job_pause(job);
+    job_pause(&job->job);
 
-    while (!job->paused && !job->completed) {
-        block_job_drain(job);
+    while (!job->job.paused && !job_is_completed(&job->job)) {
+        job_drain(&job->job);
     }
 
-    block_job_unref(job);
+    job->job.aio_context = NULL;
+    job_unref(&job->job);
 }
 
 static char *child_job_get_parent_desc(BdrvChild *c)
 {
     BlockJob *job = c->opaque;
-    return g_strdup_printf("%s job '%s'",
-                           BlockJobType_str(job->driver->job_type),
-                           job->id);
+    return g_strdup_printf("%s job '%s'", job_type_str(&job->job), job->job.id);
 }
 
 static void child_job_drained_begin(BdrvChild *c)
 {
     BlockJob *job = c->opaque;
-    block_job_pause(job);
+    job_pause(&job->job);
 }
 
 static void child_job_drained_end(BdrvChild *c)
 {
     BlockJob *job = c->opaque;
-    block_job_resume(job);
+    job_resume(&job->job);
 }
 
 static const BdrvChildRole child_job = {
@@ -365,316 +200,25 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
 
 bool block_job_is_internal(BlockJob *job)
 {
-    return (job->id == NULL);
-}
-
-static bool block_job_started(BlockJob *job)
-{
-    return job->co;
+    return (job->job.id == NULL);
 }
 
 const BlockJobDriver *block_job_driver(BlockJob *job)
 {
-    return job->driver;
-}
-
-/**
- * All jobs must allow a pause point before entering their job proper. This
- * ensures that jobs can be paused prior to being started, then resumed later.
- */
-static void coroutine_fn block_job_co_entry(void *opaque)
-{
-    BlockJob *job = opaque;
-
-    assert(job && job->driver && job->driver->start);
-    block_job_pause_point(job);
-    job->driver->start(job);
-}
-
-static void block_job_sleep_timer_cb(void *opaque)
-{
-    BlockJob *job = opaque;
-
-    block_job_enter(job);
-}
-
-void block_job_start(BlockJob *job)
-{
-    assert(job && !block_job_started(job) && job->paused &&
-           job->driver && job->driver->start);
-    job->co = qemu_coroutine_create(block_job_co_entry, job);
-    job->pause_count--;
-    job->busy = true;
-    job->paused = false;
-    block_job_state_transition(job, BLOCK_JOB_STATUS_RUNNING);
-    bdrv_coroutine_enter(blk_bs(job->blk), job->co);
-}
-
-static void block_job_decommission(BlockJob *job)
-{
-    assert(job);
-    job->completed = true;
-    job->busy = false;
-    job->paused = false;
-    job->deferred_to_main_loop = true;
-    block_job_txn_del_job(job);
-    block_job_state_transition(job, BLOCK_JOB_STATUS_NULL);
-    block_job_unref(job);
+    return container_of(job->job.driver, BlockJobDriver, job_driver);
 }
 
-static void block_job_do_dismiss(BlockJob *job)
+/* Assumes the job_mutex is held */
+static bool job_timer_pending(Job *job)
 {
-    block_job_decommission(job);
-}
-
-static void block_job_conclude(BlockJob *job)
-{
-    block_job_state_transition(job, BLOCK_JOB_STATUS_CONCLUDED);
-    if (job->auto_dismiss || !block_job_started(job)) {
-        block_job_do_dismiss(job);
-    }
-}
-
-static void block_job_update_rc(BlockJob *job)
-{
-    if (!job->ret && block_job_is_cancelled(job)) {
-        job->ret = -ECANCELED;
-    }
-    if (job->ret) {
-        block_job_state_transition(job, BLOCK_JOB_STATUS_ABORTING);
-    }
-}
-
-static int block_job_prepare(BlockJob *job)
-{
-    if (job->ret == 0 && job->driver->prepare) {
-        job->ret = job->driver->prepare(job);
-    }
-    return job->ret;
-}
-
-static void block_job_commit(BlockJob *job)
-{
-    assert(!job->ret);
-    if (job->driver->commit) {
-        job->driver->commit(job);
-    }
-}
-
-static void block_job_abort(BlockJob *job)
-{
-    assert(job->ret);
-    if (job->driver->abort) {
-        job->driver->abort(job);
-    }
-}
-
-static void block_job_clean(BlockJob *job)
-{
-    if (job->driver->clean) {
-        job->driver->clean(job);
-    }
-}
-
-static int block_job_finalize_single(BlockJob *job)
-{
-    assert(job->completed);
-
-    /* Ensure abort is called for late-transactional failures */
-    block_job_update_rc(job);
-
-    if (!job->ret) {
-        block_job_commit(job);
-    } else {
-        block_job_abort(job);
-    }
-    block_job_clean(job);
-
-    if (job->cb) {
-        job->cb(job->opaque, job->ret);
-    }
-
-    /* Emit events only if we actually started */
-    if (block_job_started(job)) {
-        if (block_job_is_cancelled(job)) {
-            block_job_event_cancelled(job);
-        } else {
-            const char *msg = NULL;
-            if (job->ret < 0) {
-                msg = strerror(-job->ret);
-            }
-            block_job_event_completed(job, msg);
-        }
-    }
-
-    block_job_txn_del_job(job);
-    block_job_conclude(job);
-    return 0;
-}
-
-static void block_job_cancel_async(BlockJob *job, bool force)
-{
-    if (job->iostatus != BLOCK_DEVICE_IO_STATUS_OK) {
-        block_job_iostatus_reset(job);
-    }
-    if (job->user_paused) {
-        /* Do not call block_job_enter here, the caller will handle it.  */
-        job->user_paused = false;
-        job->pause_count--;
-    }
-    job->cancelled = true;
-    /* To prevent 'force == false' overriding a previous 'force == true' */
-    job->force |= force;
-}
-
-static int block_job_txn_apply(BlockJobTxn *txn, int fn(BlockJob *), bool lock)
-{
-    AioContext *ctx;
-    BlockJob *job, *next;
-    int rc = 0;
-
-    QLIST_FOREACH_SAFE(job, &txn->jobs, txn_list, next) {
-        if (lock) {
-            ctx = blk_get_aio_context(job->blk);
-            aio_context_acquire(ctx);
-        }
-        rc = fn(job);
-        if (lock) {
-            aio_context_release(ctx);
-        }
-        if (rc) {
-            break;
-        }
-    }
-    return rc;
-}
-
-static int block_job_finish_sync(BlockJob *job,
-                                 void (*finish)(BlockJob *, Error **errp),
-                                 Error **errp)
-{
-    Error *local_err = NULL;
-    int ret;
-
-    assert(blk_bs(job->blk)->job == job);
-
-    block_job_ref(job);
-
-    if (finish) {
-        finish(job, &local_err);
-    }
-    if (local_err) {
-        error_propagate(errp, local_err);
-        block_job_unref(job);
-        return -EBUSY;
-    }
-    /* block_job_drain calls block_job_enter, and it should be enough to
-     * induce progress until the job completes or moves to the main thread.
-    */
-    while (!job->deferred_to_main_loop && !job->completed) {
-        block_job_drain(job);
-    }
-    while (!job->completed) {
-        aio_poll(qemu_get_aio_context(), true);
-    }
-    ret = (job->cancelled && job->ret == 0) ? -ECANCELED : job->ret;
-    block_job_unref(job);
-    return ret;
-}
-
-static void block_job_completed_txn_abort(BlockJob *job)
-{
-    AioContext *ctx;
-    BlockJobTxn *txn = job->txn;
-    BlockJob *other_job;
-
-    if (txn->aborting) {
-        /*
-         * We are cancelled by another job, which will handle everything.
-         */
-        return;
-    }
-    txn->aborting = true;
-    block_job_txn_ref(txn);
-
-    /* We are the first failed job. Cancel other jobs. */
-    QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
-        ctx = blk_get_aio_context(other_job->blk);
-        aio_context_acquire(ctx);
-    }
-
-    /* Other jobs are effectively cancelled by us, set the status for
-     * them; this job, however, may or may not be cancelled, depending
-     * on the caller, so leave it. */
-    QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
-        if (other_job != job) {
-            block_job_cancel_async(other_job, false);
-        }
-    }
-    while (!QLIST_EMPTY(&txn->jobs)) {
-        other_job = QLIST_FIRST(&txn->jobs);
-        ctx = blk_get_aio_context(other_job->blk);
-        if (!other_job->completed) {
-            assert(other_job->cancelled);
-            block_job_finish_sync(other_job, NULL, NULL);
-        }
-        block_job_finalize_single(other_job);
-        aio_context_release(ctx);
-    }
-
-    block_job_txn_unref(txn);
-}
-
-static int block_job_needs_finalize(BlockJob *job)
-{
-    return !job->auto_finalize;
-}
-
-static void block_job_do_finalize(BlockJob *job)
-{
-    int rc;
-    assert(job && job->txn);
-
-    /* prepare the transaction to complete */
-    rc = block_job_txn_apply(job->txn, block_job_prepare, true);
-    if (rc) {
-        block_job_completed_txn_abort(job);
-    } else {
-        block_job_txn_apply(job->txn, block_job_finalize_single, true);
-    }
-}
-
-static void block_job_completed_txn_success(BlockJob *job)
-{
-    BlockJobTxn *txn = job->txn;
-    BlockJob *other_job;
-
-    block_job_state_transition(job, BLOCK_JOB_STATUS_WAITING);
-
-    /*
-     * Successful completion, see if there are other running jobs in this
-     * txn.
-     */
-    QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
-        if (!other_job->completed) {
-            return;
-        }
-        assert(other_job->ret == 0);
-    }
-
-    block_job_txn_apply(txn, block_job_event_pending, false);
-
-    /* If no jobs need manual finalization, automatically do so */
-    if (block_job_txn_apply(txn, block_job_needs_finalize, false) == 0) {
-        block_job_do_finalize(job);
-    }
+    return timer_pending(&job->sleep_timer);
 }
 
 void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
 {
     int64_t old_speed = job->speed;
 
-    if (block_job_apply_verb(job, BLOCK_JOB_VERB_SET_SPEED, errp)) {
+    if (job_apply_verb(&job->job, JOB_VERB_SET_SPEED, errp)) {
         return;
     }
     if (speed < 0) {
@@ -690,7 +234,7 @@ void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
     }
 
     /* kick only if a timer is pending */
-    block_job_enter_cond(job, block_job_timer_pending);
+    job_enter_cond(&job->job, job_timer_pending);
 }
 
 int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n)
@@ -702,142 +246,6 @@ int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n)
     return ratelimit_calculate_delay(&job->limit, n);
 }
 
-void block_job_complete(BlockJob *job, Error **errp)
-{
-    /* Should not be reachable via external interface for internal jobs */
-    assert(job->id);
-    if (block_job_apply_verb(job, BLOCK_JOB_VERB_COMPLETE, errp)) {
-        return;
-    }
-    if (job->pause_count || job->cancelled || !job->driver->complete) {
-        error_setg(errp, "The active block job '%s' cannot be completed",
-                   job->id);
-        return;
-    }
-
-    job->driver->complete(job, errp);
-}
-
-void block_job_finalize(BlockJob *job, Error **errp)
-{
-    assert(job && job->id);
-    if (block_job_apply_verb(job, BLOCK_JOB_VERB_FINALIZE, errp)) {
-        return;
-    }
-    block_job_do_finalize(job);
-}
-
-void block_job_dismiss(BlockJob **jobptr, Error **errp)
-{
-    BlockJob *job = *jobptr;
-    /* similarly to _complete, this is QMP-interface only. */
-    assert(job->id);
-    if (block_job_apply_verb(job, BLOCK_JOB_VERB_DISMISS, errp)) {
-        return;
-    }
-
-    block_job_do_dismiss(job);
-    *jobptr = NULL;
-}
-
-void block_job_user_pause(BlockJob *job, Error **errp)
-{
-    if (block_job_apply_verb(job, BLOCK_JOB_VERB_PAUSE, errp)) {
-        return;
-    }
-    if (job->user_paused) {
-        error_setg(errp, "Job is already paused");
-        return;
-    }
-    job->user_paused = true;
-    block_job_pause(job);
-}
-
-bool block_job_user_paused(BlockJob *job)
-{
-    return job->user_paused;
-}
-
-void block_job_user_resume(BlockJob *job, Error **errp)
-{
-    assert(job);
-    if (!job->user_paused || job->pause_count <= 0) {
-        error_setg(errp, "Can't resume a job that was not paused");
-        return;
-    }
-    if (block_job_apply_verb(job, BLOCK_JOB_VERB_RESUME, errp)) {
-        return;
-    }
-    block_job_iostatus_reset(job);
-    job->user_paused = false;
-    block_job_resume(job);
-}
-
-void block_job_cancel(BlockJob *job, bool force)
-{
-    if (job->status == BLOCK_JOB_STATUS_CONCLUDED) {
-        block_job_do_dismiss(job);
-        return;
-    }
-    block_job_cancel_async(job, force);
-    if (!block_job_started(job)) {
-        block_job_completed(job, -ECANCELED);
-    } else if (job->deferred_to_main_loop) {
-        block_job_completed_txn_abort(job);
-    } else {
-        block_job_enter(job);
-    }
-}
-
-void block_job_user_cancel(BlockJob *job, bool force, Error **errp)
-{
-    if (block_job_apply_verb(job, BLOCK_JOB_VERB_CANCEL, errp)) {
-        return;
-    }
-    block_job_cancel(job, force);
-}
-
-/* A wrapper around block_job_cancel() taking an Error ** parameter so it may be
- * used with block_job_finish_sync() without the need for (rather nasty)
- * function pointer casts there. */
-static void block_job_cancel_err(BlockJob *job, Error **errp)
-{
-    block_job_cancel(job, false);
-}
-
-int block_job_cancel_sync(BlockJob *job)
-{
-    return block_job_finish_sync(job, &block_job_cancel_err, NULL);
-}
-
-void block_job_cancel_sync_all(void)
-{
-    BlockJob *job;
-    AioContext *aio_context;
-
-    while ((job = QLIST_FIRST(&block_jobs))) {
-        aio_context = blk_get_aio_context(job->blk);
-        aio_context_acquire(aio_context);
-        block_job_cancel_sync(job);
-        aio_context_release(aio_context);
-    }
-}
-
-int block_job_complete_sync(BlockJob *job, Error **errp)
-{
-    return block_job_finish_sync(job, &block_job_complete, errp);
-}
-
-void block_job_progress_update(BlockJob *job, uint64_t done)
-{
-    job->offset += done;
-}
-
-void block_job_progress_set_remaining(BlockJob *job, uint64_t remaining)
-{
-    job->len = job->offset + remaining;
-}
-
 BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
 {
     BlockJobInfo *info;
@@ -847,20 +255,20 @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
         return NULL;
     }
     info = g_new0(BlockJobInfo, 1);
-    info->type      = g_strdup(BlockJobType_str(job->driver->job_type));
-    info->device    = g_strdup(job->id);
-    info->len       = job->len;
-    info->busy      = atomic_read(&job->busy);
-    info->paused    = job->pause_count > 0;
-    info->offset    = job->offset;
+    info->type      = g_strdup(job_type_str(&job->job));
+    info->device    = g_strdup(job->job.id);
+    info->busy      = atomic_read(&job->job.busy);
+    info->paused    = job->job.pause_count > 0;
+    info->offset    = job->job.progress_current;
+    info->len       = job->job.progress_total;
     info->speed     = job->speed;
     info->io_status = job->iostatus;
-    info->ready     = job->ready;
-    info->status    = job->status;
-    info->auto_finalize = job->auto_finalize;
-    info->auto_dismiss  = job->auto_dismiss;
-    info->has_error = job->ret != 0;
-    info->error     = job->ret ? g_strdup(strerror(-job->ret)) : NULL;
+    info->ready     = job_is_ready(&job->job),
+    info->status    = job->job.status;
+    info->auto_finalize = job->job.auto_finalize;
+    info->auto_dismiss  = job->job.auto_dismiss;
+    info->has_error = job->job.ret != 0;
+    info->error     = job->job.ret ? g_strdup(strerror(-job->job.ret)) : NULL;
     return info;
 }
 
@@ -872,54 +280,81 @@ static void block_job_iostatus_set_err(BlockJob *job, int error)
     }
 }
 
-static void block_job_event_cancelled(BlockJob *job)
+static void block_job_event_cancelled(Notifier *n, void *opaque)
 {
+    BlockJob *job = opaque;
+
     if (block_job_is_internal(job)) {
         return;
     }
 
-    qapi_event_send_block_job_cancelled(job->driver->job_type,
-                                        job->id,
-                                        job->len,
-                                        job->offset,
+    qapi_event_send_block_job_cancelled(job_type(&job->job),
+                                        job->job.id,
+                                        job->job.progress_total,
+                                        job->job.progress_current,
                                         job->speed,
                                         &error_abort);
 }
 
-static void block_job_event_completed(BlockJob *job, const char *msg)
+static void block_job_event_completed(Notifier *n, void *opaque)
 {
+    BlockJob *job = opaque;
+    const char *msg = NULL;
+
     if (block_job_is_internal(job)) {
         return;
     }
 
-    qapi_event_send_block_job_completed(job->driver->job_type,
-                                        job->id,
-                                        job->len,
-                                        job->offset,
+    if (job->job.ret < 0) {
+        msg = strerror(-job->job.ret);
+    }
+
+    qapi_event_send_block_job_completed(job_type(&job->job),
+                                        job->job.id,
+                                        job->job.progress_total,
+                                        job->job.progress_current,
                                         job->speed,
                                         !!msg,
                                         msg,
                                         &error_abort);
 }
 
-static int block_job_event_pending(BlockJob *job)
+static void block_job_event_pending(Notifier *n, void *opaque)
 {
-    block_job_state_transition(job, BLOCK_JOB_STATUS_PENDING);
-    if (!job->auto_finalize && !block_job_is_internal(job)) {
-        qapi_event_send_block_job_pending(job->driver->job_type,
-                                          job->id,
-                                          &error_abort);
+    BlockJob *job = opaque;
+
+    if (block_job_is_internal(job)) {
+        return;
     }
-    return 0;
+
+    qapi_event_send_block_job_pending(job_type(&job->job),
+                                      job->job.id,
+                                      &error_abort);
 }
 
+static void block_job_event_ready(Notifier *n, void *opaque)
+{
+    BlockJob *job = opaque;
+
+    if (block_job_is_internal(job)) {
+        return;
+    }
+
+    qapi_event_send_block_job_ready(job_type(&job->job),
+                                    job->job.id,
+                                    job->job.progress_total,
+                                    job->job.progress_current,
+                                    job->speed, &error_abort);
+}
+
+
 /*
  * API for block job drivers and the block layer.  These functions are
  * declared in blockjob_int.h.
  */
 
 void *block_job_create(const char *job_id, const BlockJobDriver *driver,
-                       BlockJobTxn *txn, BlockDriverState *bs, uint64_t perm,
+                       JobTxn *txn, BlockDriverState *bs, uint64_t perm,
                        uint64_t shared_perm, int64_t speed, int flags,
                        BlockCompletionFunc *cb, void *opaque, Error **errp)
 {
@@ -932,29 +367,8 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
         return NULL;
     }
 
-    if (job_id == NULL && !(flags & BLOCK_JOB_INTERNAL)) {
+    if (job_id == NULL && !(flags & JOB_INTERNAL)) {
         job_id = bdrv_get_device_name(bs);
-        if (!*job_id) {
-            error_setg(errp, "An explicit job ID is required for this node");
-            return NULL;
-        }
-    }
-
-    if (job_id) {
-        if (flags & BLOCK_JOB_INTERNAL) {
-            error_setg(errp, "Cannot specify job ID for internal block job");
-            return NULL;
-        }
-
-        if (!id_wellformed(job_id)) {
-            error_setg(errp, "Invalid job ID '%s'", job_id);
-            return NULL;
-        }
-
-        if (block_job_get(job_id)) {
-            error_setg(errp, "Job ID '%s' already in use", job_id);
-            return NULL;
-        }
     }
 
     blk = blk_new(perm, shared_perm);
@@ -964,32 +378,39 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
         return NULL;
     }
 
-    job = g_malloc0(driver->instance_size);
-    job->driver        = driver;
-    job->id            = g_strdup(job_id);
-    job->blk           = blk;
-    job->cb            = cb;
-    job->opaque        = opaque;
-    job->busy          = false;
-    job->paused        = true;
-    job->pause_count   = 1;
-    job->refcnt        = 1;
-    job->auto_finalize = !(flags & BLOCK_JOB_MANUAL_FINALIZE);
-    job->auto_dismiss  = !(flags & BLOCK_JOB_MANUAL_DISMISS);
-    block_job_state_transition(job, BLOCK_JOB_STATUS_CREATED);
-    aio_timer_init(qemu_get_aio_context(), &job->sleep_timer,
-                   QEMU_CLOCK_REALTIME, SCALE_NS,
-                   block_job_sleep_timer_cb, job);
+    job = job_create(job_id, &driver->job_driver, txn, blk_get_aio_context(blk),
+                     flags, cb, opaque, errp);
+    if (job == NULL) {
+        blk_unref(blk);
+        return NULL;
+    }
+
+    assert(is_block_job(&job->job));
+    assert(job->job.driver->free == &block_job_free);
+    assert(job->job.driver->user_resume == &block_job_user_resume);
+    assert(job->job.driver->drain == &block_job_drain);
+
+    job->blk = blk;
+
+    job->finalize_cancelled_notifier.notify = block_job_event_cancelled;
+    job->finalize_completed_notifier.notify = block_job_event_completed;
+    job->pending_notifier.notify = block_job_event_pending;
+    job->ready_notifier.notify = block_job_event_ready;
+
+    notifier_list_add(&job->job.on_finalize_cancelled,
+                      &job->finalize_cancelled_notifier);
+    notifier_list_add(&job->job.on_finalize_completed,
+                      &job->finalize_completed_notifier);
+    notifier_list_add(&job->job.on_pending, &job->pending_notifier);
+    notifier_list_add(&job->job.on_ready, &job->ready_notifier);
 
     error_setg(&job->blocker, "block device is in use by block job: %s",
-               BlockJobType_str(driver->job_type));
+               job_type_str(&job->job));
     block_job_add_bdrv(job, "main node", bs, 0, BLK_PERM_ALL, &error_abort);
     bs->job = job;
 
     bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
 
-    QLIST_INSERT_HEAD(&block_jobs, job, job_list);
-
     blk_add_aio_context_notifier(blk, block_job_attached_aio_context,
                                  block_job_detach_aio_context, job);
 
@@ -999,198 +420,28 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
 
         block_job_set_speed(job, speed, &local_err);
         if (local_err) {
-            block_job_early_fail(job);
+            job_early_fail(&job->job);
             error_propagate(errp, local_err);
             return NULL;
         }
     }
 
-    /* Single jobs are modeled as single-job transactions for sake of
-     * consolidating the job management logic */
-    if (!txn) {
-        txn = block_job_txn_new();
-        block_job_txn_add_job(txn, job);
-        block_job_txn_unref(txn);
-    } else {
-        block_job_txn_add_job(txn, job);
-    }
-
     return job;
 }
 
-void block_job_early_fail(BlockJob *job)
-{
-    assert(job->status == BLOCK_JOB_STATUS_CREATED);
-    block_job_decommission(job);
-}
-
-void block_job_completed(BlockJob *job, int ret)
-{
-    assert(job && job->txn && !job->completed);
-    assert(blk_bs(job->blk)->job == job);
-    job->completed = true;
-    job->ret = ret;
-    block_job_update_rc(job);
-    trace_block_job_completed(job, ret, job->ret);
-    if (job->ret) {
-        block_job_completed_txn_abort(job);
-    } else {
-        block_job_completed_txn_success(job);
-    }
-}
-
-static bool block_job_should_pause(BlockJob *job)
-{
-    return job->pause_count > 0;
-}
-
-/* Yield, and schedule a timer to reenter the coroutine after @ns nanoseconds.
- * Reentering the job coroutine with block_job_enter() before the timer has
- * expired is allowed and cancels the timer.
- *
- * If @ns is (uint64_t) -1, no timer is scheduled and block_job_enter() must be
- * called explicitly. */
-static void block_job_do_yield(BlockJob *job, uint64_t ns)
-{
-    block_job_lock();
-    if (ns != -1) {
-        timer_mod(&job->sleep_timer, ns);
-    }
-    job->busy = false;
-    block_job_unlock();
-    qemu_coroutine_yield();
-
-    /* Set by block_job_enter before re-entering the coroutine.  */
-    assert(job->busy);
-}
-
-void coroutine_fn block_job_pause_point(BlockJob *job)
-{
-    assert(job && block_job_started(job));
-
-    if (!block_job_should_pause(job)) {
-        return;
-    }
-    if (block_job_is_cancelled(job)) {
-        return;
-    }
-
-    if (job->driver->pause) {
-        job->driver->pause(job);
-    }
-
-    if (block_job_should_pause(job) && !block_job_is_cancelled(job)) {
-        BlockJobStatus status = job->status;
-        block_job_state_transition(job, status == BLOCK_JOB_STATUS_READY ? \
-                                   BLOCK_JOB_STATUS_STANDBY :           \
-                                   BLOCK_JOB_STATUS_PAUSED);
-        job->paused = true;
-        block_job_do_yield(job, -1);
-        job->paused = false;
-        block_job_state_transition(job, status);
-    }
-
-    if (job->driver->resume) {
-        job->driver->resume(job);
-    }
-}
-
-/*
- * Conditionally enter a block_job pending a call to fn() while
- * under the block_job_lock critical section.
- */
-static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job))
-{
-    if (!block_job_started(job)) {
-        return;
-    }
-    if (job->deferred_to_main_loop) {
-        return;
-    }
-
-    block_job_lock();
-    if (job->busy) {
-        block_job_unlock();
-        return;
-    }
-
-    if (fn && !fn(job)) {
-        block_job_unlock();
-        return;
-    }
-
-    assert(!job->deferred_to_main_loop);
-    timer_del(&job->sleep_timer);
-    job->busy = true;
-    block_job_unlock();
-    aio_co_wake(job->co);
-}
-
-void block_job_enter(BlockJob *job)
-{
-    block_job_enter_cond(job, NULL);
-}
-
-bool block_job_is_cancelled(BlockJob *job)
-{
-    return job->cancelled;
-}
-
-void block_job_sleep_ns(BlockJob *job, int64_t ns)
-{
-    assert(job->busy);
-
-    /* Check cancellation *before* setting busy = false, too!  */
-    if (block_job_is_cancelled(job)) {
-        return;
-    }
-
-    if (!block_job_should_pause(job)) {
-        block_job_do_yield(job, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + ns);
-    }
-
-    block_job_pause_point(job);
-}
-
-void block_job_yield(BlockJob *job)
-{
-    assert(job->busy);
-
-    /* Check cancellation *before* setting busy = false, too!  */
-    if (block_job_is_cancelled(job)) {
-        return;
-    }
-
-    if (!block_job_should_pause(job)) {
-        block_job_do_yield(job, -1);
-    }
-
-    block_job_pause_point(job);
-}
-
 void block_job_iostatus_reset(BlockJob *job)
 {
     if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
         return;
     }
-    assert(job->user_paused && job->pause_count > 0);
+    assert(job->job.user_paused && job->job.pause_count > 0);
     job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
 }
 
-void block_job_event_ready(BlockJob *job)
+void block_job_user_resume(Job *job)
 {
-    block_job_state_transition(job, BLOCK_JOB_STATUS_READY);
-    job->ready = true;
-
-    if (block_job_is_internal(job)) {
-        return;
-    }
-
-    qapi_event_send_block_job_ready(job->driver->job_type,
-                                    job->id,
-                                    job->len,
-                                    job->offset,
-                                    job->speed, &error_abort);
+    BlockJob *bjob = container_of(job, BlockJob, job);
+    block_job_iostatus_reset(bjob);
 }
 
 BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
@@ -1217,63 +468,16 @@ BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
         abort();
     }
     if (!block_job_is_internal(job)) {
-        qapi_event_send_block_job_error(job->id,
+        qapi_event_send_block_job_error(job->job.id,
                                         is_read ? IO_OPERATION_TYPE_READ :
                                         IO_OPERATION_TYPE_WRITE,
                                         action, &error_abort);
     }
     if (action == BLOCK_ERROR_ACTION_STOP) {
-        block_job_pause(job);
+        job_pause(&job->job);
         /* make the pause user visible, which will be resumed from QMP. */
-        job->user_paused = true;
+        job->job.user_paused = true;
         block_job_iostatus_set_err(job, error);
     }
     return action;
 }
-
-typedef struct {
-    BlockJob *job;
-    AioContext *aio_context;
-    BlockJobDeferToMainLoopFn *fn;
-    void *opaque;
-} BlockJobDeferToMainLoopData;
-
-static void block_job_defer_to_main_loop_bh(void *opaque)
-{
-    BlockJobDeferToMainLoopData *data = opaque;
-    AioContext *aio_context;
-
-    /* Prevent race with block_job_defer_to_main_loop() */
-    aio_context_acquire(data->aio_context);
-
-    /* Fetch BDS AioContext again, in case it has changed */
-    aio_context = blk_get_aio_context(data->job->blk);
-    if (aio_context != data->aio_context) {
-        aio_context_acquire(aio_context);
-    }
-
-    data->fn(data->job, data->opaque);
-
-    if (aio_context != data->aio_context) {
-        aio_context_release(aio_context);
-    }
-
-    aio_context_release(data->aio_context);
-
-    g_free(data);
-}
-
-void block_job_defer_to_main_loop(BlockJob *job,
-                                  BlockJobDeferToMainLoopFn *fn,
-                                  void *opaque)
-{
-    BlockJobDeferToMainLoopData *data = g_malloc(sizeof(*data));
-    data->job = job;
-    data->aio_context = blk_get_aio_context(job->blk);
-    data->fn = fn;
-    data->opaque = opaque;
-    job->deferred_to_main_loop = true;
-
-    aio_bh_schedule_oneshot(qemu_get_aio_context(),
-                            block_job_defer_to_main_loop_bh, data);
-}
diff --git a/configure b/configure
index 59f91ab3f9..a8498ab393 100755
--- a/configure
+++ b/configure
@@ -1588,7 +1588,7 @@ disabled with --disable-FEATURE, default is enabled if available:
   virtfs          VirtFS
   mpath           Multipath persistent reservation passthrough
   xen             xen backend driver support
-  xen-pci-passthrough
+  xen-pci-passthrough    PCI passthrough support for Xen
   brlapi          BrlAPI (Braile)
   curl            curl connectivity
   membarrier      membarrier system call (for Linux 4.14+ or Windows)
diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
index 95e50c4dfc..6026780f95 100644
--- a/hw/9pfs/xen-9p-backend.c
+++ b/hw/9pfs/xen-9p-backend.c
@@ -331,14 +331,14 @@ static int xen_9pfs_free(struct XenDevice *xendev)
 
     for (i = 0; i < xen_9pdev->num_rings; i++) {
         if (xen_9pdev->rings[i].data != NULL) {
-            xengnttab_unmap(xen_9pdev->xendev.gnttabdev,
-                    xen_9pdev->rings[i].data,
-                    (1 << xen_9pdev->rings[i].ring_order));
+            xen_be_unmap_grant_refs(&xen_9pdev->xendev,
+                                    xen_9pdev->rings[i].data,
+                                    (1 << xen_9pdev->rings[i].ring_order));
         }
         if (xen_9pdev->rings[i].intf != NULL) {
-            xengnttab_unmap(xen_9pdev->xendev.gnttabdev,
-                    xen_9pdev->rings[i].intf,
-                    1);
+            xen_be_unmap_grant_refs(&xen_9pdev->xendev,
+                                    xen_9pdev->rings[i].intf,
+                                    1);
         }
         if (xen_9pdev->rings[i].bh != NULL) {
             qemu_bh_delete(xen_9pdev->rings[i].bh);
@@ -390,11 +390,10 @@ static int xen_9pfs_connect(struct XenDevice *xendev)
         }
         g_free(str);
 
-        xen_9pdev->rings[i].intf =  xengnttab_map_grant_ref(
-                xen_9pdev->xendev.gnttabdev,
-                xen_9pdev->xendev.dom,
-                xen_9pdev->rings[i].ref,
-                PROT_READ | PROT_WRITE);
+        xen_9pdev->rings[i].intf =
+            xen_be_map_grant_ref(&xen_9pdev->xendev,
+                                 xen_9pdev->rings[i].ref,
+                                 PROT_READ | PROT_WRITE);
         if (!xen_9pdev->rings[i].intf) {
             goto out;
         }
@@ -403,12 +402,11 @@ static int xen_9pfs_connect(struct XenDevice *xendev)
             goto out;
         }
         xen_9pdev->rings[i].ring_order = ring_order;
-        xen_9pdev->rings[i].data = xengnttab_map_domain_grant_refs(
-                xen_9pdev->xendev.gnttabdev,
-                (1 << ring_order),
-                xen_9pdev->xendev.dom,
-                xen_9pdev->rings[i].intf->ref,
-                PROT_READ | PROT_WRITE);
+        xen_9pdev->rings[i].data =
+            xen_be_map_grant_refs(&xen_9pdev->xendev,
+                                  xen_9pdev->rings[i].intf->ref,
+                                  (1 << ring_order),
+                                  PROT_READ | PROT_WRITE);
         if (!xen_9pdev->rings[i].data) {
             goto out;
         }
diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
index f74fcd42d1..9fbc0cdb87 100644
--- a/hw/block/xen_disk.c
+++ b/hw/block/xen_disk.c
@@ -36,27 +36,9 @@
 
 /* ------------------------------------------------------------- */
 
-static int batch_maps   = 0;
-
-/* ------------------------------------------------------------- */
-
 #define BLOCK_SIZE  512
 #define IOCB_COUNT  (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
 
-struct PersistentGrant {
-    void *page;
-    struct XenBlkDev *blkdev;
-};
-
-typedef struct PersistentGrant PersistentGrant;
-
-struct PersistentRegion {
-    void *addr;
-    int num;
-};
-
-typedef struct PersistentRegion PersistentRegion;
-
 struct ioreq {
     blkif_request_t     req;
     int16_t             status;
@@ -64,16 +46,9 @@ struct ioreq {
     /* parsed request */
     off_t               start;
     QEMUIOVector        v;
+    void                *buf;
+    size_t              size;
     int                 presync;
-    uint8_t             mapped;
-
-    /* grant mapping */
-    uint32_t            domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-    uint32_t            refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-    int                 prot;
-    void                *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-    void                *pages;
-    int                 num_unmap;
 
     /* aio status */
     int                 aio_inflight;
@@ -104,7 +79,6 @@ struct XenBlkDev {
     int                 protocol;
     blkif_back_rings_t  rings;
     int                 more_work;
-    int                 cnt_map;
 
     /* request lists */
     QLIST_HEAD(inflight_head, ioreq) inflight;
@@ -115,13 +89,7 @@ struct XenBlkDev {
     int                 requests_finished;
     unsigned int        max_requests;
 
-    /* Persistent grants extension */
     gboolean            feature_discard;
-    gboolean            feature_persistent;
-    GTree               *persistent_gnts;
-    GSList              *persistent_regions;
-    unsigned int        persistent_gnt_count;
-    unsigned int        max_grants;
 
     /* qemu block driver */
     DriveInfo           *dinfo;
@@ -139,14 +107,9 @@ static void ioreq_reset(struct ioreq *ioreq)
     memset(&ioreq->req, 0, sizeof(ioreq->req));
     ioreq->status = 0;
     ioreq->start = 0;
+    ioreq->buf = NULL;
+    ioreq->size = 0;
     ioreq->presync = 0;
-    ioreq->mapped = 0;
-
-    memset(ioreq->domids, 0, sizeof(ioreq->domids));
-    memset(ioreq->refs, 0, sizeof(ioreq->refs));
-    ioreq->prot = 0;
-    memset(ioreq->page, 0, sizeof(ioreq->page));
-    ioreq->pages = NULL;
 
     ioreq->aio_inflight = 0;
     ioreq->aio_errors = 0;
@@ -158,46 +121,6 @@ static void ioreq_reset(struct ioreq *ioreq)
     qemu_iovec_reset(&ioreq->v);
 }
 
-static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
-{
-    uint ua = GPOINTER_TO_UINT(a);
-    uint ub = GPOINTER_TO_UINT(b);
-    return (ua > ub) - (ua < ub);
-}
-
-static void destroy_grant(gpointer pgnt)
-{
-    PersistentGrant *grant = pgnt;
-    xengnttab_handle *gnt = grant->blkdev->xendev.gnttabdev;
-
-    if (xengnttab_unmap(gnt, grant->page, 1) != 0) {
-        xen_pv_printf(&grant->blkdev->xendev, 0,
-                      "xengnttab_unmap failed: %s\n",
-                      strerror(errno));
-    }
-    grant->blkdev->persistent_gnt_count--;
-    xen_pv_printf(&grant->blkdev->xendev, 3,
-                  "unmapped grant %p\n", grant->page);
-    g_free(grant);
-}
-
-static void remove_persistent_region(gpointer data, gpointer dev)
-{
-    PersistentRegion *region = data;
-    struct XenBlkDev *blkdev = dev;
-    xengnttab_handle *gnt = blkdev->xendev.gnttabdev;
-
-    if (xengnttab_unmap(gnt, region->addr, region->num) != 0) {
-        xen_pv_printf(&blkdev->xendev, 0,
-                      "xengnttab_unmap region %p failed: %s\n",
-                      region->addr, strerror(errno));
-    }
-    xen_pv_printf(&blkdev->xendev, 3,
-                  "unmapped grant region %p with %d pages\n",
-                  region->addr, region->num);
-    g_free(region);
-}
-
 static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
 {
     struct ioreq *ioreq = NULL;
@@ -210,7 +133,7 @@ static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
         ioreq = g_malloc0(sizeof(*ioreq));
         ioreq->blkdev = blkdev;
         blkdev->requests_total++;
-        qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+        qemu_iovec_init(&ioreq->v, 1);
     } else {
         /* get one from freelist */
         ioreq = QLIST_FIRST(&blkdev->freelist);
@@ -255,17 +178,16 @@ static void ioreq_release(struct ioreq *ioreq, bool finish)
 static int ioreq_parse(struct ioreq *ioreq)
 {
     struct XenBlkDev *blkdev = ioreq->blkdev;
-    uintptr_t mem;
+    struct XenDevice *xendev = &blkdev->xendev;
     size_t len;
     int i;
 
-    xen_pv_printf(&blkdev->xendev, 3,
+    xen_pv_printf(xendev, 3,
                   "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
                   ioreq->req.operation, ioreq->req.nr_segments,
                   ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
     switch (ioreq->req.operation) {
     case BLKIF_OP_READ:
-        ioreq->prot = PROT_WRITE; /* to memory */
         break;
     case BLKIF_OP_FLUSH_DISKCACHE:
         ioreq->presync = 1;
@@ -274,45 +196,40 @@ static int ioreq_parse(struct ioreq *ioreq)
         }
         /* fall through */
     case BLKIF_OP_WRITE:
-        ioreq->prot = PROT_READ; /* from memory */
         break;
     case BLKIF_OP_DISCARD:
         return 0;
     default:
-        xen_pv_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
+        xen_pv_printf(xendev, 0, "error: unknown operation (%d)\n",
                       ioreq->req.operation);
         goto err;
     };
 
     if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
-        xen_pv_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
+        xen_pv_printf(xendev, 0, "error: write req for ro device\n");
         goto err;
     }
 
     ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
     for (i = 0; i < ioreq->req.nr_segments; i++) {
         if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
-            xen_pv_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
+            xen_pv_printf(xendev, 0, "error: nr_segments too big\n");
             goto err;
         }
         if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
-            xen_pv_printf(&blkdev->xendev, 0, "error: first > last sector\n");
+            xen_pv_printf(xendev, 0, "error: first > last sector\n");
             goto err;
         }
         if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
-            xen_pv_printf(&blkdev->xendev, 0, "error: page crossing\n");
+            xen_pv_printf(xendev, 0, "error: page crossing\n");
             goto err;
         }
 
-        ioreq->domids[i] = blkdev->xendev.dom;
-        ioreq->refs[i]   = ioreq->req.seg[i].gref;
-
-        mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
         len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
-        qemu_iovec_add(&ioreq->v, (void*)mem, len);
+        ioreq->size += len;
     }
-    if (ioreq->start + ioreq->v.size > blkdev->file_size) {
-        xen_pv_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
+    if (ioreq->start + ioreq->size > blkdev->file_size) {
+        xen_pv_printf(xendev, 0, "error: access beyond end of file\n");
         goto err;
     }
     return 0;
@@ -322,279 +239,48 @@ err:
     return -1;
 }
 
-static void ioreq_unmap(struct ioreq *ioreq)
-{
-    xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
-    int i;
-
-    if (ioreq->num_unmap == 0 || ioreq->mapped == 0) {
-        return;
-    }
-    if (batch_maps) {
-        if (!ioreq->pages) {
-            return;
-        }
-        if (xengnttab_unmap(gnt, ioreq->pages, ioreq->num_unmap) != 0) {
-            xen_pv_printf(&ioreq->blkdev->xendev, 0,
-                          "xengnttab_unmap failed: %s\n",
-                          strerror(errno));
-        }
-        ioreq->blkdev->cnt_map -= ioreq->num_unmap;
-        ioreq->pages = NULL;
-    } else {
-        for (i = 0; i < ioreq->num_unmap; i++) {
-            if (!ioreq->page[i]) {
-                continue;
-            }
-            if (xengnttab_unmap(gnt, ioreq->page[i], 1) != 0) {
-                xen_pv_printf(&ioreq->blkdev->xendev, 0,
-                              "xengnttab_unmap failed: %s\n",
-                              strerror(errno));
-            }
-            ioreq->blkdev->cnt_map--;
-            ioreq->page[i] = NULL;
-        }
-    }
-    ioreq->mapped = 0;
-}
-
-static int ioreq_map(struct ioreq *ioreq)
-{
-    xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
-    uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-    uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-    void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-    int i, j, new_maps = 0;
-    PersistentGrant *grant;
-    PersistentRegion *region;
-    /* domids and refs variables will contain the information necessary
-     * to map the grants that are needed to fulfill this request.
-     *
-     * After mapping the needed grants, the page array will contain the
-     * memory address of each granted page in the order specified in ioreq
-     * (disregarding if it's a persistent grant or not).
-     */
-
-    if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
-        return 0;
-    }
-    if (ioreq->blkdev->feature_persistent) {
-        for (i = 0; i < ioreq->v.niov; i++) {
-            grant = g_tree_lookup(ioreq->blkdev->persistent_gnts,
-                                    GUINT_TO_POINTER(ioreq->refs[i]));
-
-            if (grant != NULL) {
-                page[i] = grant->page;
-                xen_pv_printf(&ioreq->blkdev->xendev, 3,
-                              "using persistent-grant %" PRIu32 "\n",
-                              ioreq->refs[i]);
-            } else {
-                    /* Add the grant to the list of grants that
-                     * should be mapped
-                     */
-                    domids[new_maps] = ioreq->domids[i];
-                    refs[new_maps] = ioreq->refs[i];
-                    page[i] = NULL;
-                    new_maps++;
-            }
-        }
-        /* Set the protection to RW, since grants may be reused later
-         * with a different protection than the one needed for this request
-         */
-        ioreq->prot = PROT_WRITE | PROT_READ;
-    } else {
-        /* All grants in the request should be mapped */
-        memcpy(refs, ioreq->refs, sizeof(refs));
-        memcpy(domids, ioreq->domids, sizeof(domids));
-        memset(page, 0, sizeof(page));
-        new_maps = ioreq->v.niov;
-    }
-
-    if (batch_maps && new_maps) {
-        ioreq->pages = xengnttab_map_grant_refs
-            (gnt, new_maps, domids, refs, ioreq->prot);
-        if (ioreq->pages == NULL) {
-            xen_pv_printf(&ioreq->blkdev->xendev, 0,
-                          "can't map %d grant refs (%s, %d maps)\n",
-                          new_maps, strerror(errno), ioreq->blkdev->cnt_map);
-            return -1;
-        }
-        for (i = 0, j = 0; i < ioreq->v.niov; i++) {
-            if (page[i] == NULL) {
-                page[i] = ioreq->pages + (j++) * XC_PAGE_SIZE;
-            }
-        }
-        ioreq->blkdev->cnt_map += new_maps;
-    } else if (new_maps)  {
-        for (i = 0; i < new_maps; i++) {
-            ioreq->page[i] = xengnttab_map_grant_ref
-                (gnt, domids[i], refs[i], ioreq->prot);
-            if (ioreq->page[i] == NULL) {
-                xen_pv_printf(&ioreq->blkdev->xendev, 0,
-                              "can't map grant ref %d (%s, %d maps)\n",
-                              refs[i], strerror(errno), ioreq->blkdev->cnt_map);
-                ioreq->mapped = 1;
-                ioreq_unmap(ioreq);
-                return -1;
-            }
-            ioreq->blkdev->cnt_map++;
-        }
-        for (i = 0, j = 0; i < ioreq->v.niov; i++) {
-            if (page[i] == NULL) {
-                page[i] = ioreq->page[j++];
-            }
-        }
-    }
-    if (ioreq->blkdev->feature_persistent && new_maps != 0 &&
-        (!batch_maps || (ioreq->blkdev->persistent_gnt_count + new_maps <=
-        ioreq->blkdev->max_grants))) {
-        /*
-         * If we are using persistent grants and batch mappings only
-         * add the new maps to the list of persistent grants if the whole
-         * area can be persistently mapped.
-         */
-        if (batch_maps) {
-            region = g_malloc0(sizeof(*region));
-            region->addr = ioreq->pages;
-            region->num = new_maps;
-            ioreq->blkdev->persistent_regions = g_slist_append(
-                                            ioreq->blkdev->persistent_regions,
-                                            region);
-        }
-        while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants)
-              && new_maps) {
-            /* Go through the list of newly mapped grants and add as many
-             * as possible to the list of persistently mapped grants.
-             *
-             * Since we start at the end of ioreq->page(s), we only need
-             * to decrease new_maps to prevent this granted pages from
-             * being unmapped in ioreq_unmap.
-             */
-            grant = g_malloc0(sizeof(*grant));
-            new_maps--;
-            if (batch_maps) {
-                grant->page = ioreq->pages + (new_maps) * XC_PAGE_SIZE;
-            } else {
-                grant->page = ioreq->page[new_maps];
-            }
-            grant->blkdev = ioreq->blkdev;
-            xen_pv_printf(&ioreq->blkdev->xendev, 3,
-                          "adding grant %" PRIu32 " page: %p\n",
-                          refs[new_maps], grant->page);
-            g_tree_insert(ioreq->blkdev->persistent_gnts,
-                          GUINT_TO_POINTER(refs[new_maps]),
-                          grant);
-            ioreq->blkdev->persistent_gnt_count++;
-        }
-        assert(!batch_maps || new_maps == 0);
-    }
-    for (i = 0; i < ioreq->v.niov; i++) {
-        ioreq->v.iov[i].iov_base += (uintptr_t)page[i];
-    }
-    ioreq->mapped = 1;
-    ioreq->num_unmap = new_maps;
-    return 0;
-}
-
-#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40800
-
-static void ioreq_free_copy_buffers(struct ioreq *ioreq)
-{
-    int i;
-
-    for (i = 0; i < ioreq->v.niov; i++) {
-        ioreq->page[i] = NULL;
-    }
-
-    qemu_vfree(ioreq->pages);
-}
-
-static int ioreq_init_copy_buffers(struct ioreq *ioreq)
-{
-    int i;
-
-    if (ioreq->v.niov == 0) {
-        return 0;
-    }
-
-    ioreq->pages = qemu_memalign(XC_PAGE_SIZE, ioreq->v.niov * XC_PAGE_SIZE);
-
-    for (i = 0; i < ioreq->v.niov; i++) {
-        ioreq->page[i] = ioreq->pages + i * XC_PAGE_SIZE;
-        ioreq->v.iov[i].iov_base = ioreq->page[i];
-    }
-
-    return 0;
-}
-
 static int ioreq_grant_copy(struct ioreq *ioreq)
 {
-    xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
-    xengnttab_grant_copy_segment_t segs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+    struct XenBlkDev *blkdev = ioreq->blkdev;
+    struct XenDevice *xendev = &blkdev->xendev;
+    XenGrantCopySegment segs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
     int i, count, rc;
-    int64_t file_blk = ioreq->blkdev->file_blk;
+    int64_t file_blk = blkdev->file_blk;
+    bool to_domain = (ioreq->req.operation == BLKIF_OP_READ);
+    void *virt = ioreq->buf;
 
-    if (ioreq->v.niov == 0) {
+    if (ioreq->req.nr_segments == 0) {
         return 0;
     }
 
-    count = ioreq->v.niov;
+    count = ioreq->req.nr_segments;
 
     for (i = 0; i < count; i++) {
-        if (ioreq->req.operation == BLKIF_OP_READ) {
-            segs[i].flags = GNTCOPY_dest_gref;
-            segs[i].dest.foreign.ref = ioreq->refs[i];
-            segs[i].dest.foreign.domid = ioreq->domids[i];
+        if (to_domain) {
+            segs[i].dest.foreign.ref = ioreq->req.seg[i].gref;
             segs[i].dest.foreign.offset = ioreq->req.seg[i].first_sect * file_blk;
-            segs[i].source.virt = ioreq->v.iov[i].iov_base;
+            segs[i].source.virt = virt;
         } else {
-            segs[i].flags = GNTCOPY_source_gref;
-            segs[i].source.foreign.ref = ioreq->refs[i];
-            segs[i].source.foreign.domid = ioreq->domids[i];
+            segs[i].source.foreign.ref = ioreq->req.seg[i].gref;
             segs[i].source.foreign.offset = ioreq->req.seg[i].first_sect * file_blk;
-            segs[i].dest.virt = ioreq->v.iov[i].iov_base;
+            segs[i].dest.virt = virt;
         }
         segs[i].len = (ioreq->req.seg[i].last_sect
                        - ioreq->req.seg[i].first_sect + 1) * file_blk;
+        virt += segs[i].len;
     }
 
-    rc = xengnttab_grant_copy(gnt, count, segs);
+    rc = xen_be_copy_grant_refs(xendev, to_domain, segs, count);
 
     if (rc) {
-        xen_pv_printf(&ioreq->blkdev->xendev, 0,
+        xen_pv_printf(xendev, 0,
                       "failed to copy data %d\n", rc);
         ioreq->aio_errors++;
         return -1;
     }
 
-    for (i = 0; i < count; i++) {
-        if (segs[i].status != GNTST_okay) {
-            xen_pv_printf(&ioreq->blkdev->xendev, 3,
-                          "failed to copy data %d for gref %d, domid %d\n",
-                          segs[i].status, ioreq->refs[i], ioreq->domids[i]);
-            ioreq->aio_errors++;
-            rc = -1;
-        }
-    }
-
     return rc;
 }
-#else
-static void ioreq_free_copy_buffers(struct ioreq *ioreq)
-{
-    abort();
-}
-
-static int ioreq_init_copy_buffers(struct ioreq *ioreq)
-{
-    abort();
-}
-
-static int ioreq_grant_copy(struct ioreq *ioreq)
-{
-    abort();
-}
-#endif
 
 static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
 
@@ -602,11 +288,12 @@ static void qemu_aio_complete(void *opaque, int ret)
 {
     struct ioreq *ioreq = opaque;
     struct XenBlkDev *blkdev = ioreq->blkdev;
+    struct XenDevice *xendev = &blkdev->xendev;
 
     aio_context_acquire(blkdev->ctx);
 
     if (ret != 0) {
-        xen_pv_printf(&blkdev->xendev, 0, "%s I/O error\n",
+        xen_pv_printf(xendev, 0, "%s I/O error\n",
                       ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
         ioreq->aio_errors++;
     }
@@ -621,32 +308,28 @@ static void qemu_aio_complete(void *opaque, int ret)
         goto done;
     }
 
-    if (xen_feature_grant_copy) {
-        switch (ioreq->req.operation) {
-        case BLKIF_OP_READ:
-            /* in case of failure ioreq->aio_errors is increased */
-            if (ret == 0) {
-                ioreq_grant_copy(ioreq);
-            }
-            ioreq_free_copy_buffers(ioreq);
-            break;
-        case BLKIF_OP_WRITE:
-        case BLKIF_OP_FLUSH_DISKCACHE:
-            if (!ioreq->req.nr_segments) {
-                break;
-            }
-            ioreq_free_copy_buffers(ioreq);
-            break;
-        default:
+    switch (ioreq->req.operation) {
+    case BLKIF_OP_READ:
+        /* in case of failure ioreq->aio_errors is increased */
+        if (ret == 0) {
+            ioreq_grant_copy(ioreq);
+        }
+        qemu_vfree(ioreq->buf);
+        break;
+    case BLKIF_OP_WRITE:
+    case BLKIF_OP_FLUSH_DISKCACHE:
+        if (!ioreq->req.nr_segments) {
             break;
         }
+        qemu_vfree(ioreq->buf);
+        break;
+    default:
+        break;
     }
 
     ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
-    if (!xen_feature_grant_copy) {
-        ioreq_unmap(ioreq);
-    }
     ioreq_finish(ioreq);
+
     switch (ioreq->req.operation) {
     case BLKIF_OP_WRITE:
     case BLKIF_OP_FLUSH_DISKCACHE:
@@ -706,18 +389,13 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
 {
     struct XenBlkDev *blkdev = ioreq->blkdev;
 
-    if (xen_feature_grant_copy) {
-        ioreq_init_copy_buffers(ioreq);
-        if (ioreq->req.nr_segments && (ioreq->req.operation == BLKIF_OP_WRITE ||
-            ioreq->req.operation == BLKIF_OP_FLUSH_DISKCACHE) &&
-            ioreq_grant_copy(ioreq)) {
-                ioreq_free_copy_buffers(ioreq);
-                goto err;
-        }
-    } else {
-        if (ioreq->req.nr_segments && ioreq_map(ioreq)) {
-            goto err;
-        }
+    ioreq->buf = qemu_memalign(XC_PAGE_SIZE, ioreq->size);
+    if (ioreq->req.nr_segments &&
+        (ioreq->req.operation == BLKIF_OP_WRITE ||
+         ioreq->req.operation == BLKIF_OP_FLUSH_DISKCACHE) &&
+        ioreq_grant_copy(ioreq)) {
+        qemu_vfree(ioreq->buf);
+        goto err;
     }
 
     ioreq->aio_inflight++;
@@ -728,6 +406,7 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
 
     switch (ioreq->req.operation) {
     case BLKIF_OP_READ:
+        qemu_iovec_add(&ioreq->v, ioreq->buf, ioreq->size);
         block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
                          ioreq->v.size, BLOCK_ACCT_READ);
         ioreq->aio_inflight++;
@@ -740,6 +419,7 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
             break;
         }
 
+        qemu_iovec_add(&ioreq->v, ioreq->buf, ioreq->size);
         block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
                          ioreq->v.size,
                          ioreq->req.operation == BLKIF_OP_WRITE ?
@@ -758,9 +438,6 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
     }
     default:
         /* unknown operation (shouldn't happen -- parse catches this) */
-        if (!xen_feature_grant_copy) {
-            ioreq_unmap(ioreq);
-        }
         goto err;
     }
 
@@ -946,24 +623,21 @@ static void blk_alloc(struct XenDevice *xendev)
 
     blkdev->ctx = iothread_get_aio_context(blkdev->iothread);
     blkdev->bh = aio_bh_new(blkdev->ctx, blk_bh, blkdev);
-
-    if (xen_mode != XEN_EMULATE) {
-        batch_maps = 1;
-    }
 }
 
 static void blk_parse_discard(struct XenBlkDev *blkdev)
 {
+    struct XenDevice *xendev = &blkdev->xendev;
     int enable;
 
     blkdev->feature_discard = true;
 
-    if (xenstore_read_be_int(&blkdev->xendev, "discard-enable", &enable) == 0) {
+    if (xenstore_read_be_int(xendev, "discard-enable", &enable) == 0) {
         blkdev->feature_discard = !!enable;
     }
 
     if (blkdev->feature_discard) {
-        xenstore_write_be_int(&blkdev->xendev, "feature-discard", 1);
+        xenstore_write_be_int(xendev, "feature-discard", 1);
     }
 }
 
@@ -978,7 +652,7 @@ static int blk_init(struct XenDevice *xendev)
     /* read xenstore entries */
     if (blkdev->params == NULL) {
         char *h = NULL;
-        blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
+        blkdev->params = xenstore_read_be_str(xendev, "params");
         if (blkdev->params != NULL) {
             h = strchr(blkdev->params, ':');
         }
@@ -998,18 +672,18 @@ static int blk_init(struct XenDevice *xendev)
         blkdev->fileproto = "vpc";
     }
     if (blkdev->mode == NULL) {
-        blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
+        blkdev->mode = xenstore_read_be_str(xendev, "mode");
     }
     if (blkdev->type == NULL) {
-        blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
+        blkdev->type = xenstore_read_be_str(xendev, "type");
     }
     if (blkdev->dev == NULL) {
-        blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
+        blkdev->dev = xenstore_read_be_str(xendev, "dev");
     }
     if (blkdev->devtype == NULL) {
-        blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
+        blkdev->devtype = xenstore_read_be_str(xendev, "device-type");
     }
-    directiosafe = xenstore_read_be_str(&blkdev->xendev, "direct-io-safe");
+    directiosafe = xenstore_read_be_str(xendev, "direct-io-safe");
     blkdev->directiosafe = (directiosafe && atoi(directiosafe));
 
     /* do we have all we need? */
@@ -1032,18 +706,13 @@ static int blk_init(struct XenDevice *xendev)
 
     blkdev->file_blk  = BLOCK_SIZE;
 
-    xen_pv_printf(&blkdev->xendev, 3, "grant copy operation %s\n",
-                  xen_feature_grant_copy ? "enabled" : "disabled");
-
     /* fill info
      * blk_connect supplies sector-size and sectors
      */
-    xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1);
-    xenstore_write_be_int(&blkdev->xendev, "feature-persistent",
-                          !xen_feature_grant_copy);
-    xenstore_write_be_int(&blkdev->xendev, "info", info);
+    xenstore_write_be_int(xendev, "feature-flush-cache", 1);
+    xenstore_write_be_int(xendev, "info", info);
 
-    xenstore_write_be_int(&blkdev->xendev, "max-ring-page-order",
+    xenstore_write_be_int(xendev, "max-ring-page-order",
                           MAX_RING_PAGE_ORDER);
 
     blk_parse_discard(blkdev);
@@ -1067,25 +736,15 @@ out_error:
     return -1;
 }
 
-/*
- * We need to account for the grant allocations requiring contiguous
- * chunks; the worst case number would be
- *     max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
- * but in order to keep things simple just use
- *     2 * max_req * max_seg.
- */
-#define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
-
 static int blk_connect(struct XenDevice *xendev)
 {
     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
-    int pers, index, qflags;
+    int index, qflags;
     bool readonly = true;
     bool writethrough = true;
     int order, ring_ref;
     unsigned int ring_size, max_grants;
     unsigned int i;
-    uint32_t *domids;
 
     trace_xen_disk_connect(xendev->name);
 
@@ -1105,7 +764,7 @@ static int blk_connect(struct XenDevice *xendev)
     }
 
     /* init qemu block driver */
-    index = (blkdev->xendev.dev - 202 * 256) / 16;
+    index = (xendev->dev - 202 * 256) / 16;
     blkdev->dinfo = drive_get(IF_XEN, 0, index);
     if (!blkdev->dinfo) {
         Error *local_err = NULL;
@@ -1117,11 +776,11 @@ static int blk_connect(struct XenDevice *xendev)
         }
 
         /* setup via xenbus -> create new block driver instance */
-        xen_pv_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
+        xen_pv_printf(xendev, 2, "create new bdrv (xenbus setup)\n");
         blkdev->blk = blk_new_open(blkdev->filename, NULL, options,
                                    qflags, &local_err);
         if (!blkdev->blk) {
-            xen_pv_printf(&blkdev->xendev, 0, "error: %s\n",
+            xen_pv_printf(xendev, 0, "error: %s\n",
                           error_get_pretty(local_err));
             error_free(local_err);
             return -1;
@@ -1129,11 +788,11 @@ static int blk_connect(struct XenDevice *xendev)
         blk_set_enable_write_cache(blkdev->blk, !writethrough);
     } else {
         /* setup via qemu cmdline -> already setup for us */
-        xen_pv_printf(&blkdev->xendev, 2,
+        xen_pv_printf(xendev, 2,
                       "get configured bdrv (cmdline setup)\n");
         blkdev->blk = blk_by_legacy_dinfo(blkdev->dinfo);
         if (blk_is_read_only(blkdev->blk) && !readonly) {
-            xen_pv_printf(&blkdev->xendev, 0, "Unexpected read-only drive");
+            xen_pv_printf(xendev, 0, "Unexpected read-only drive");
             blkdev->blk = NULL;
             return -1;
         }
@@ -1146,7 +805,7 @@ static int blk_connect(struct XenDevice *xendev)
     if (blkdev->file_size < 0) {
         BlockDriverState *bs = blk_bs(blkdev->blk);
         const char *drv_name = bs ? bdrv_get_format_name(bs) : NULL;
-        xen_pv_printf(&blkdev->xendev, 1, "blk_getlength: %d (%s) | drv %s\n",
+        xen_pv_printf(xendev, 1, "blk_getlength: %d (%s) | drv %s\n",
                       (int)blkdev->file_size, strerror(-blkdev->file_size),
                       drv_name ?: "-");
         blkdev->file_size = 0;
@@ -1158,15 +817,15 @@ static int blk_connect(struct XenDevice *xendev)
                   blkdev->file_size, blkdev->file_size >> 20);
 
     /* Fill in number of sector size and number of sectors */
-    xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
-    xenstore_write_be_int64(&blkdev->xendev, "sectors",
+    xenstore_write_be_int(xendev, "sector-size", blkdev->file_blk);
+    xenstore_write_be_int64(xendev, "sectors",
                             blkdev->file_size / blkdev->file_blk);
 
-    if (xenstore_read_fe_int(&blkdev->xendev, "ring-page-order",
+    if (xenstore_read_fe_int(xendev, "ring-page-order",
                              &order) == -1) {
         blkdev->nr_ring_ref = 1;
 
-        if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref",
+        if (xenstore_read_fe_int(xendev, "ring-ref",
                                  &ring_ref) == -1) {
             return -1;
         }
@@ -1183,7 +842,7 @@ static int blk_connect(struct XenDevice *xendev)
                 return -1;
             }
 
-            if (xenstore_read_fe_int(&blkdev->xendev, key,
+            if (xenstore_read_fe_int(xendev, key,
                                      &ring_ref) == -1) {
                 g_free(key);
                 return -1;
@@ -1198,23 +857,18 @@ static int blk_connect(struct XenDevice *xendev)
         return -1;
     }
 
-    if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
-                             &blkdev->xendev.remote_port) == -1) {
+    if (xenstore_read_fe_int(xendev, "event-channel",
+                             &xendev->remote_port) == -1) {
         return -1;
     }
-    if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)) {
-        blkdev->feature_persistent = FALSE;
-    } else {
-        blkdev->feature_persistent = !!pers;
-    }
 
-    if (!blkdev->xendev.protocol) {
+    if (!xendev->protocol) {
         blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
-    } else if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_NATIVE) == 0) {
+    } else if (strcmp(xendev->protocol, XEN_IO_PROTO_ABI_NATIVE) == 0) {
         blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
-    } else if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
+    } else if (strcmp(xendev->protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
         blkdev->protocol = BLKIF_PROTOCOL_X86_32;
-    } else if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
+    } else if (strcmp(xendev->protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
         blkdev->protocol = BLKIF_PROTOCOL_X86_64;
     } else {
         blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
@@ -1241,43 +895,17 @@ static int blk_connect(struct XenDevice *xendev)
         return -1;
     }
 
-    /* Calculate the maximum number of grants needed by ioreqs */
-    max_grants = MAX_GRANTS(blkdev->max_requests,
-                            BLKIF_MAX_SEGMENTS_PER_REQUEST);
     /* Add on the number needed for the ring pages */
-    max_grants += blkdev->nr_ring_ref;
-
-    blkdev->xendev.gnttabdev = xengnttab_open(NULL, 0);
-    if (blkdev->xendev.gnttabdev == NULL) {
-        xen_pv_printf(xendev, 0, "xengnttab_open failed: %s\n",
-                      strerror(errno));
-        return -1;
-    }
-    if (xengnttab_set_max_grants(blkdev->xendev.gnttabdev, max_grants)) {
-        xen_pv_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\n",
-                      strerror(errno));
-        return -1;
-    }
-
-    domids = g_new0(uint32_t, blkdev->nr_ring_ref);
-    for (i = 0; i < blkdev->nr_ring_ref; i++) {
-        domids[i] = blkdev->xendev.dom;
-    }
-
-    blkdev->sring = xengnttab_map_grant_refs(blkdev->xendev.gnttabdev,
-                                             blkdev->nr_ring_ref,
-                                             domids,
-                                             blkdev->ring_ref,
-                                             PROT_READ | PROT_WRITE);
-
-    g_free(domids);
+    max_grants = blkdev->nr_ring_ref;
 
+    xen_be_set_max_grant_refs(xendev, max_grants);
+    blkdev->sring = xen_be_map_grant_refs(xendev, blkdev->ring_ref,
+                                          blkdev->nr_ring_ref,
+                                          PROT_READ | PROT_WRITE);
     if (!blkdev->sring) {
         return -1;
     }
 
-    blkdev->cnt_map++;
-
     switch (blkdev->protocol) {
     case BLKIF_PROTOCOL_NATIVE:
     {
@@ -1301,27 +929,14 @@ static int blk_connect(struct XenDevice *xendev)
     }
     }
 
-    if (blkdev->feature_persistent) {
-        /* Init persistent grants */
-        blkdev->max_grants = blkdev->max_requests *
-            BLKIF_MAX_SEGMENTS_PER_REQUEST;
-        blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp,
-                                             NULL, NULL,
-                                             batch_maps ?
-                                             (GDestroyNotify)g_free :
-                                             (GDestroyNotify)destroy_grant);
-        blkdev->persistent_regions = NULL;
-        blkdev->persistent_gnt_count = 0;
-    }
-
     blk_set_aio_context(blkdev->blk, blkdev->ctx);
 
-    xen_be_bind_evtchn(&blkdev->xendev);
+    xen_be_bind_evtchn(xendev);
 
-    xen_pv_printf(&blkdev->xendev, 1, "ok: proto %s, nr-ring-ref %u, "
+    xen_pv_printf(xendev, 1, "ok: proto %s, nr-ring-ref %u, "
                   "remote port %d, local port %d\n",
-                  blkdev->xendev.protocol, blkdev->nr_ring_ref,
-                  blkdev->xendev.remote_port, blkdev->xendev.local_port);
+                  xendev->protocol, blkdev->nr_ring_ref,
+                  xendev->remote_port, xendev->local_port);
     return 0;
 }
 
@@ -1339,41 +954,15 @@ static void blk_disconnect(struct XenDevice *xendev)
         blk_unref(blkdev->blk);
         blkdev->blk = NULL;
     }
-    xen_pv_unbind_evtchn(&blkdev->xendev);
+    xen_pv_unbind_evtchn(xendev);
 
     aio_context_release(blkdev->ctx);
 
     if (blkdev->sring) {
-        xengnttab_unmap(blkdev->xendev.gnttabdev, blkdev->sring,
-                        blkdev->nr_ring_ref);
-        blkdev->cnt_map--;
+        xen_be_unmap_grant_refs(xendev, blkdev->sring,
+                                blkdev->nr_ring_ref);
         blkdev->sring = NULL;
     }
-
-    /*
-     * Unmap persistent grants before switching to the closed state
-     * so the frontend can free them.
-     *
-     * In the !batch_maps case g_tree_destroy will take care of unmapping
-     * the grant, but in the batch_maps case we need to iterate over every
-     * region in persistent_regions and unmap it.
-     */
-    if (blkdev->feature_persistent) {
-        g_tree_destroy(blkdev->persistent_gnts);
-        assert(batch_maps || blkdev->persistent_gnt_count == 0);
-        if (batch_maps) {
-            blkdev->persistent_gnt_count = 0;
-            g_slist_foreach(blkdev->persistent_regions,
-                            (GFunc)remove_persistent_region, blkdev);
-            g_slist_free(blkdev->persistent_regions);
-        }
-        blkdev->feature_persistent = false;
-    }
-
-    if (blkdev->xendev.gnttabdev) {
-        xengnttab_close(blkdev->xendev.gnttabdev);
-        blkdev->xendev.gnttabdev = NULL;
-    }
 }
 
 static int blk_free(struct XenDevice *xendev)
@@ -1410,10 +999,11 @@ static void blk_event(struct XenDevice *xendev)
 }
 
 struct XenDevOps xen_blkdev_ops = {
+    .flags      = DEVOPS_FLAG_NEED_GNTDEV,
     .size       = sizeof(struct XenBlkDev),
     .alloc      = blk_alloc,
     .init       = blk_init,
-    .initialise    = blk_connect,
+    .initialise = blk_connect,
     .disconnect = blk_disconnect,
     .event      = blk_event,
     .free       = blk_free,
diff --git a/hw/char/xen_console.c b/hw/char/xen_console.c
index bdfaa40ed3..8b4b4bf523 100644
--- a/hw/char/xen_console.c
+++ b/hw/char/xen_console.c
@@ -233,12 +233,11 @@ static int con_initialise(struct XenDevice *xendev)
     if (!xendev->dev) {
         xen_pfn_t mfn = con->ring_ref;
         con->sring = xenforeignmemory_map(xen_fmem, con->xendev.dom,
-                                          PROT_READ|PROT_WRITE,
+                                          PROT_READ | PROT_WRITE,
                                           1, &mfn, NULL);
     } else {
-        con->sring = xengnttab_map_grant_ref(xendev->gnttabdev, con->xendev.dom,
-                                             con->ring_ref,
-                                             PROT_READ|PROT_WRITE);
+        con->sring = xen_be_map_grant_ref(xendev, con->ring_ref,
+                                          PROT_READ | PROT_WRITE);
     }
     if (!con->sring)
 	return -1;
@@ -267,7 +266,7 @@ static void con_disconnect(struct XenDevice *xendev)
         if (!xendev->dev) {
             xenforeignmemory_unmap(xen_fmem, con->sring, 1);
         } else {
-            xengnttab_unmap(xendev->gnttabdev, con->sring, 1);
+            xen_be_unmap_grant_ref(xendev, con->sring);
         }
         con->sring = NULL;
     }
diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c
index caa563be3d..6ffa3c22cc 100644
--- a/hw/i386/xen/xen-hvm.c
+++ b/hw/i386/xen/xen-hvm.c
@@ -95,7 +95,8 @@ typedef struct XenIOState {
     CPUState **cpu_by_vcpu_id;
     /* the evtchn port for polling the notification, */
     evtchn_port_t *ioreq_local_port;
-    /* evtchn local port for buffered io */
+    /* evtchn remote and local ports for buffered io */
+    evtchn_port_t bufioreq_remote_port;
     evtchn_port_t bufioreq_local_port;
     /* the evtchn fd for polling */
     xenevtchn_handle *xce_handle;
@@ -1236,12 +1237,52 @@ static void xen_wakeup_notifier(Notifier *notifier, void *data)
     xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 0);
 }
 
-void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory)
+static int xen_map_ioreq_server(XenIOState *state)
 {
-    int i, rc;
     xen_pfn_t ioreq_pfn;
     xen_pfn_t bufioreq_pfn;
     evtchn_port_t bufioreq_evtchn;
+    int rc;
+
+    rc = xen_get_ioreq_server_info(xen_domid, state->ioservid,
+                                   &ioreq_pfn, &bufioreq_pfn,
+                                   &bufioreq_evtchn);
+    if (rc < 0) {
+        error_report("failed to get ioreq server info: error %d handle=%p",
+                     errno, xen_xc);
+        return rc;
+    }
+
+    DPRINTF("shared page at pfn %lx\n", ioreq_pfn);
+    DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn);
+    DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn);
+
+    state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid,
+                                              PROT_READ | PROT_WRITE,
+                                              1, &ioreq_pfn, NULL);
+    if (state->shared_page == NULL) {
+        error_report("map shared IO page returned error %d handle=%p",
+                     errno, xen_xc);
+        return -1;
+    }
+
+    state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid,
+                                                   PROT_READ | PROT_WRITE,
+                                                   1, &bufioreq_pfn, NULL);
+    if (state->buffered_io_page == NULL) {
+        error_report("map buffered IO page returned error %d", errno);
+        return -1;
+    }
+
+    state->bufioreq_remote_port = bufioreq_evtchn;
+
+    return 0;
+}
+
+void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory)
+{
+    int i, rc;
+    xen_pfn_t ioreq_pfn;
     XenIOState *state;
 
     state = g_malloc0(sizeof (XenIOState));
@@ -1269,25 +1310,8 @@ void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory)
     state->wakeup.notify = xen_wakeup_notifier;
     qemu_register_wakeup_notifier(&state->wakeup);
 
-    rc = xen_get_ioreq_server_info(xen_domid, state->ioservid,
-                                   &ioreq_pfn, &bufioreq_pfn,
-                                   &bufioreq_evtchn);
+    rc = xen_map_ioreq_server(state);
     if (rc < 0) {
-        error_report("failed to get ioreq server info: error %d handle=%p",
-                     errno, xen_xc);
-        goto err;
-    }
-
-    DPRINTF("shared page at pfn %lx\n", ioreq_pfn);
-    DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn);
-    DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn);
-
-    state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid,
-                                              PROT_READ|PROT_WRITE,
-                                              1, &ioreq_pfn, NULL);
-    if (state->shared_page == NULL) {
-        error_report("map shared IO page returned error %d handle=%p",
-                     errno, xen_xc);
         goto err;
     }
 
@@ -1308,14 +1332,6 @@ void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory)
         goto err;
     }
 
-    state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid,
-                                                   PROT_READ|PROT_WRITE,
-                                                   1, &bufioreq_pfn, NULL);
-    if (state->buffered_io_page == NULL) {
-        error_report("map buffered IO page returned error %d", errno);
-        goto err;
-    }
-
     /* Note: cpus is empty at this point in init */
     state->cpu_by_vcpu_id = g_malloc0(max_cpus * sizeof(CPUState *));
 
@@ -1340,7 +1356,7 @@ void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory)
     }
 
     rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid,
-                                    bufioreq_evtchn);
+                                    state->bufioreq_remote_port);
     if (rc == -1) {
         error_report("buffered evtchn bind error %d", errno);
         goto err;
diff --git a/hw/i386/xen/xen_pvdevice.c b/hw/i386/xen/xen_pvdevice.c
index f748823658..a146f1883a 100644
--- a/hw/i386/xen/xen_pvdevice.c
+++ b/hw/i386/xen/xen_pvdevice.c
@@ -71,6 +71,16 @@ static const MemoryRegionOps xen_pv_mmio_ops = {
     .endianness = DEVICE_LITTLE_ENDIAN,
 };
 
+static const VMStateDescription vmstate_xen_pvdevice = {
+    .name = "xen-pvdevice",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .fields = (VMStateField[]) {
+        VMSTATE_PCI_DEVICE(parent_obj, XenPVDevice),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
 static void xen_pv_realize(PCIDevice *pci_dev, Error **errp)
 {
     XenPVDevice *d = XEN_PV_DEVICE(pci_dev);
@@ -120,6 +130,7 @@ static void xen_pv_class_init(ObjectClass *klass, void *data)
     k->class_id = PCI_CLASS_SYSTEM_OTHER;
     dc->desc = "Xen PV Device";
     dc->props = xen_pv_props;
+    dc->vmsd = &vmstate_xen_pvdevice;
 }
 
 static const TypeInfo xen_pv_type_info = {
diff --git a/hw/net/xen_nic.c b/hw/net/xen_nic.c
index 20c43a61b3..46a8dbfc90 100644
--- a/hw/net/xen_nic.c
+++ b/hw/net/xen_nic.c
@@ -160,9 +160,8 @@ static void net_tx_packets(struct XenNetDev *netdev)
                           (txreq.flags & NETTXF_more_data)      ? " more_data"      : "",
                           (txreq.flags & NETTXF_extra_info)     ? " extra_info"     : "");
 
-            page = xengnttab_map_grant_ref(netdev->xendev.gnttabdev,
-                                           netdev->xendev.dom,
-                                           txreq.gref, PROT_READ);
+            page = xen_be_map_grant_ref(&netdev->xendev, txreq.gref,
+                                        PROT_READ);
             if (page == NULL) {
                 xen_pv_printf(&netdev->xendev, 0,
                               "error: tx gref dereference failed (%d)\n",
@@ -183,7 +182,7 @@ static void net_tx_packets(struct XenNetDev *netdev)
                 qemu_send_packet(qemu_get_queue(netdev->nic),
                                  page + txreq.offset, txreq.size);
             }
-            xengnttab_unmap(netdev->xendev.gnttabdev, page, 1);
+            xen_be_unmap_grant_ref(&netdev->xendev, page);
             net_tx_response(netdev, &txreq, NETIF_RSP_OKAY);
         }
         if (!netdev->tx_work) {
@@ -254,9 +253,7 @@ static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size
     memcpy(&rxreq, RING_GET_REQUEST(&netdev->rx_ring, rc), sizeof(rxreq));
     netdev->rx_ring.req_cons = ++rc;
 
-    page = xengnttab_map_grant_ref(netdev->xendev.gnttabdev,
-                                   netdev->xendev.dom,
-                                   rxreq.gref, PROT_WRITE);
+    page = xen_be_map_grant_ref(&netdev->xendev, rxreq.gref, PROT_WRITE);
     if (page == NULL) {
         xen_pv_printf(&netdev->xendev, 0,
                       "error: rx gref dereference failed (%d)\n",
@@ -265,7 +262,7 @@ static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size
         return -1;
     }
     memcpy(page + NET_IP_ALIGN, buf, size);
-    xengnttab_unmap(netdev->xendev.gnttabdev, page, 1);
+    xen_be_unmap_grant_ref(&netdev->xendev, page);
     net_rx_response(netdev, &rxreq, NETIF_RSP_OKAY, NET_IP_ALIGN, size, 0);
 
     return size;
@@ -338,19 +335,17 @@ static int net_connect(struct XenDevice *xendev)
         return -1;
     }
 
-    netdev->txs = xengnttab_map_grant_ref(netdev->xendev.gnttabdev,
-                                          netdev->xendev.dom,
-                                          netdev->tx_ring_ref,
-                                          PROT_READ | PROT_WRITE);
+    netdev->txs = xen_be_map_grant_ref(&netdev->xendev,
+                                       netdev->tx_ring_ref,
+                                       PROT_READ | PROT_WRITE);
     if (!netdev->txs) {
         return -1;
     }
-    netdev->rxs = xengnttab_map_grant_ref(netdev->xendev.gnttabdev,
-                                          netdev->xendev.dom,
-                                          netdev->rx_ring_ref,
-                                          PROT_READ | PROT_WRITE);
+    netdev->rxs = xen_be_map_grant_ref(&netdev->xendev,
+                                       netdev->rx_ring_ref,
+                                       PROT_READ | PROT_WRITE);
     if (!netdev->rxs) {
-        xengnttab_unmap(netdev->xendev.gnttabdev, netdev->txs, 1);
+        xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs);
         netdev->txs = NULL;
         return -1;
     }
@@ -375,11 +370,11 @@ static void net_disconnect(struct XenDevice *xendev)
     xen_pv_unbind_evtchn(&netdev->xendev);
 
     if (netdev->txs) {
-        xengnttab_unmap(netdev->xendev.gnttabdev, netdev->txs, 1);
+        xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs);
         netdev->txs = NULL;
     }
     if (netdev->rxs) {
-        xengnttab_unmap(netdev->xendev.gnttabdev, netdev->rxs, 1);
+        xen_be_unmap_grant_ref(&netdev->xendev, netdev->rxs);
         netdev->rxs = NULL;
     }
 }
diff --git a/hw/usb/xen-usb.c b/hw/usb/xen-usb.c
index b3a90c0e68..5b2e21ed18 100644
--- a/hw/usb/xen-usb.c
+++ b/hw/usb/xen-usb.c
@@ -173,8 +173,9 @@ static int usbback_gnttab_map(struct usbback_req *usbback_req)
         for (i = 0; i < usbback_req->nr_buffer_segs; i++) {
             ref[i] = usbback_req->req.seg[i].gref;
         }
-        usbback_req->buffer = xengnttab_map_domain_grant_refs(xendev->gnttabdev,
-            usbback_req->nr_buffer_segs, xendev->dom, ref, prot);
+        usbback_req->buffer =
+            xen_be_map_grant_refs(xendev, ref, usbback_req->nr_buffer_segs,
+                                  prot);
 
         if (!usbback_req->buffer) {
             return -ENOMEM;
@@ -206,8 +207,9 @@ static int usbback_gnttab_map(struct usbback_req *usbback_req)
     for (i = 0; i < usbback_req->nr_extra_segs; i++) {
         ref[i] = usbback_req->req.seg[i + usbback_req->req.nr_buffer_segs].gref;
     }
-    usbback_req->isoc_buffer = xengnttab_map_domain_grant_refs(
-         xendev->gnttabdev, usbback_req->nr_extra_segs, xendev->dom, ref, prot);
+    usbback_req->isoc_buffer =
+        xen_be_map_grant_refs(xendev, ref, usbback_req->nr_extra_segs,
+                              prot);
 
     if (!usbback_req->isoc_buffer) {
         return -ENOMEM;
@@ -291,14 +293,14 @@ static void usbback_do_response(struct usbback_req *usbback_req, int32_t status,
     }
 
     if (usbback_req->buffer) {
-        xengnttab_unmap(xendev->gnttabdev, usbback_req->buffer,
-                        usbback_req->nr_buffer_segs);
+        xen_be_unmap_grant_refs(xendev, usbback_req->buffer,
+                                usbback_req->nr_buffer_segs);
         usbback_req->buffer = NULL;
     }
 
     if (usbback_req->isoc_buffer) {
-        xengnttab_unmap(xendev->gnttabdev, usbback_req->isoc_buffer,
-                        usbback_req->nr_extra_segs);
+        xen_be_unmap_grant_refs(xendev, usbback_req->isoc_buffer,
+                                usbback_req->nr_extra_segs);
         usbback_req->isoc_buffer = NULL;
     }
 
@@ -834,11 +836,11 @@ static void usbback_disconnect(struct XenDevice *xendev)
     xen_pv_unbind_evtchn(xendev);
 
     if (usbif->urb_sring) {
-        xengnttab_unmap(xendev->gnttabdev, usbif->urb_sring, 1);
+        xen_be_unmap_grant_ref(xendev, usbif->urb_sring);
         usbif->urb_sring = NULL;
     }
     if (usbif->conn_sring) {
-        xengnttab_unmap(xendev->gnttabdev, usbif->conn_sring, 1);
+        xen_be_unmap_grant_ref(xendev, usbif->conn_sring);
         usbif->conn_sring = NULL;
     }
 
@@ -877,12 +879,10 @@ static int usbback_connect(struct XenDevice *xendev)
         return -1;
     }
 
-    usbif->urb_sring = xengnttab_map_grant_ref(xendev->gnttabdev, xendev->dom,
-                                               urb_ring_ref,
-                                               PROT_READ | PROT_WRITE);
-    usbif->conn_sring = xengnttab_map_grant_ref(xendev->gnttabdev, xendev->dom,
-                                                conn_ring_ref,
-                                                PROT_READ | PROT_WRITE);
+    usbif->urb_sring = xen_be_map_grant_ref(xendev, urb_ring_ref,
+                                            PROT_READ | PROT_WRITE);
+    usbif->conn_sring = xen_be_map_grant_ref(xendev, conn_ring_ref,
+                                             PROT_READ | PROT_WRITE);
     if (!usbif->urb_sring || !usbif->conn_sring) {
         xen_pv_printf(xendev, 0, "error mapping rings\n");
         usbback_disconnect(xendev);
@@ -1024,10 +1024,7 @@ static void usbback_alloc(struct XenDevice *xendev)
 
     /* max_grants: for each request and for the rings (request and connect). */
     max_grants = USBIF_MAX_SEGMENTS_PER_REQUEST * USB_URB_RING_SIZE + 2;
-    if (xengnttab_set_max_grants(xendev->gnttabdev, max_grants) < 0) {
-        xen_pv_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\n",
-                      strerror(errno));
-    }
+    xen_be_set_max_grant_refs(xendev, max_grants);
 }
 
 static int usbback_free(struct XenDevice *xendev)
diff --git a/hw/xen/xen_backend.c b/hw/xen/xen_backend.c
index 7445b506ac..9a8e8771ec 100644
--- a/hw/xen/xen_backend.c
+++ b/hw/xen/xen_backend.c
@@ -44,9 +44,9 @@ BusState *xen_sysbus;
 /* public */
 struct xs_handle *xenstore = NULL;
 const char *xen_protocol;
-bool xen_feature_grant_copy;
 
 /* private */
+static bool xen_feature_grant_copy;
 static int debug;
 
 int xenstore_write_be_str(struct XenDevice *xendev, const char *node, const char *val)
@@ -106,6 +106,156 @@ int xen_be_set_state(struct XenDevice *xendev, enum xenbus_state state)
     return 0;
 }
 
+void xen_be_set_max_grant_refs(struct XenDevice *xendev,
+                               unsigned int nr_refs)
+{
+    assert(xendev->ops->flags & DEVOPS_FLAG_NEED_GNTDEV);
+
+    if (xengnttab_set_max_grants(xendev->gnttabdev, nr_refs)) {
+        xen_pv_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\n",
+                      strerror(errno));
+    }
+}
+
+void *xen_be_map_grant_refs(struct XenDevice *xendev, uint32_t *refs,
+                            unsigned int nr_refs, int prot)
+{
+    void *ptr;
+
+    assert(xendev->ops->flags & DEVOPS_FLAG_NEED_GNTDEV);
+
+    ptr = xengnttab_map_domain_grant_refs(xendev->gnttabdev, nr_refs,
+                                          xen_domid, refs, prot);
+    if (!ptr) {
+        xen_pv_printf(xendev, 0,
+                      "xengnttab_map_domain_grant_refs failed: %s\n",
+                      strerror(errno));
+    }
+
+    return ptr;
+}
+
+void xen_be_unmap_grant_refs(struct XenDevice *xendev, void *ptr,
+                             unsigned int nr_refs)
+{
+    assert(xendev->ops->flags & DEVOPS_FLAG_NEED_GNTDEV);
+
+    if (xengnttab_unmap(xendev->gnttabdev, ptr, nr_refs)) {
+        xen_pv_printf(xendev, 0, "xengnttab_unmap failed: %s\n",
+                      strerror(errno));
+    }
+}
+
+static int compat_copy_grant_refs(struct XenDevice *xendev,
+                                  bool to_domain,
+                                  XenGrantCopySegment segs[],
+                                  unsigned int nr_segs)
+{
+    uint32_t *refs = g_new(uint32_t, nr_segs);
+    int prot = to_domain ? PROT_WRITE : PROT_READ;
+    void *pages;
+    unsigned int i;
+
+    for (i = 0; i < nr_segs; i++) {
+        XenGrantCopySegment *seg = &segs[i];
+
+        refs[i] = to_domain ?
+            seg->dest.foreign.ref : seg->source.foreign.ref;
+    }
+
+    pages = xengnttab_map_domain_grant_refs(xendev->gnttabdev, nr_segs,
+                                            xen_domid, refs, prot);
+    if (!pages) {
+        xen_pv_printf(xendev, 0,
+                      "xengnttab_map_domain_grant_refs failed: %s\n",
+                      strerror(errno));
+        g_free(refs);
+        return -1;
+    }
+
+    for (i = 0; i < nr_segs; i++) {
+        XenGrantCopySegment *seg = &segs[i];
+        void *page = pages + (i * XC_PAGE_SIZE);
+
+        if (to_domain) {
+            memcpy(page + seg->dest.foreign.offset, seg->source.virt,
+                   seg->len);
+        } else {
+            memcpy(seg->dest.virt, page + seg->source.foreign.offset,
+                   seg->len);
+        }
+    }
+
+    if (xengnttab_unmap(xendev->gnttabdev, pages, nr_segs)) {
+        xen_pv_printf(xendev, 0, "xengnttab_unmap failed: %s\n",
+                      strerror(errno));
+    }
+
+    g_free(refs);
+    return 0;
+}
+
+int xen_be_copy_grant_refs(struct XenDevice *xendev,
+                           bool to_domain,
+                           XenGrantCopySegment segs[],
+                           unsigned int nr_segs)
+{
+    xengnttab_grant_copy_segment_t *xengnttab_segs;
+    unsigned int i;
+    int rc;
+
+    assert(xendev->ops->flags & DEVOPS_FLAG_NEED_GNTDEV);
+
+    if (!xen_feature_grant_copy) {
+        return compat_copy_grant_refs(xendev, to_domain, segs, nr_segs);
+    }
+
+    xengnttab_segs = g_new0(xengnttab_grant_copy_segment_t, nr_segs);
+
+    for (i = 0; i < nr_segs; i++) {
+        XenGrantCopySegment *seg = &segs[i];
+        xengnttab_grant_copy_segment_t *xengnttab_seg = &xengnttab_segs[i];
+
+        if (to_domain) {
+            xengnttab_seg->flags = GNTCOPY_dest_gref;
+            xengnttab_seg->dest.foreign.domid = xen_domid;
+            xengnttab_seg->dest.foreign.ref = seg->dest.foreign.ref;
+            xengnttab_seg->dest.foreign.offset = seg->dest.foreign.offset;
+            xengnttab_seg->source.virt = seg->source.virt;
+        } else {
+            xengnttab_seg->flags = GNTCOPY_source_gref;
+            xengnttab_seg->source.foreign.domid = xen_domid;
+            xengnttab_seg->source.foreign.ref = seg->source.foreign.ref;
+            xengnttab_seg->source.foreign.offset =
+                seg->source.foreign.offset;
+            xengnttab_seg->dest.virt = seg->dest.virt;
+        }
+
+        xengnttab_seg->len = seg->len;
+    }
+
+    rc = xengnttab_grant_copy(xendev->gnttabdev, nr_segs, xengnttab_segs);
+
+    if (rc) {
+        xen_pv_printf(xendev, 0, "xengnttab_copy failed: %s\n",
+                      strerror(errno));
+    }
+
+    for (i = 0; i < nr_segs; i++) {
+        xengnttab_grant_copy_segment_t *xengnttab_seg =
+            &xengnttab_segs[i];
+
+        if (xengnttab_seg->status != GNTST_okay) {
+            xen_pv_printf(xendev, 0, "segment[%u] status: %d\n", i,
+                          xengnttab_seg->status);
+            rc = -1;
+        }
+    }
+
+    g_free(xengnttab_segs);
+    return rc;
+}
+
 /*
  * get xen backend device, allocate a new one if it doesn't exist.
  */
@@ -149,18 +299,6 @@ static struct XenDevice *xen_be_get_xendev(const char *type, int dom, int dev,
     }
     qemu_set_cloexec(xenevtchn_fd(xendev->evtchndev));
 
-    if (ops->flags & DEVOPS_FLAG_NEED_GNTDEV) {
-        xendev->gnttabdev = xengnttab_open(NULL, 0);
-        if (xendev->gnttabdev == NULL) {
-            xen_pv_printf(NULL, 0, "can't open gnttab device\n");
-            xenevtchn_close(xendev->evtchndev);
-            qdev_unplug(DEVICE(xendev), NULL);
-            return NULL;
-        }
-    } else {
-        xendev->gnttabdev = NULL;
-    }
-
     xen_pv_insert_xendev(xendev);
 
     if (xendev->ops->alloc) {
@@ -322,6 +460,16 @@ static int xen_be_try_initialise(struct XenDevice *xendev)
         }
     }
 
+    if (xendev->ops->flags & DEVOPS_FLAG_NEED_GNTDEV) {
+        xendev->gnttabdev = xengnttab_open(NULL, 0);
+        if (xendev->gnttabdev == NULL) {
+            xen_pv_printf(NULL, 0, "can't open gnttab device\n");
+            return -1;
+        }
+    } else {
+        xendev->gnttabdev = NULL;
+    }
+
     if (xendev->ops->initialise) {
         rc = xendev->ops->initialise(xendev);
     }
@@ -369,6 +517,10 @@ static void xen_be_disconnect(struct XenDevice *xendev, enum xenbus_state state)
         xendev->ops->disconnect) {
         xendev->ops->disconnect(xendev);
     }
+    if (xendev->gnttabdev) {
+        xengnttab_close(xendev->gnttabdev);
+        xendev->gnttabdev = NULL;
+    }
     if (xendev->be_state != state) {
         xen_be_set_state(xendev, state);
     }
diff --git a/hw/xen/xen_pt.c b/hw/xen/xen_pt.c
index 9b7a960de1..e5a6eff44f 100644
--- a/hw/xen/xen_pt.c
+++ b/hw/xen/xen_pt.c
@@ -907,7 +907,7 @@ out:
         }
     }
 
-    memory_listener_register(&s->memory_listener, &s->dev.bus_master_as);
+    memory_listener_register(&s->memory_listener, &address_space_memory);
     memory_listener_register(&s->io_listener, &address_space_io);
     s->listener_set = true;
     XEN_PT_LOG(d,
diff --git a/hw/xen/xen_pt_config_init.c b/hw/xen/xen_pt_config_init.c
index a3ce33e78b..aee31c62bb 100644
--- a/hw/xen/xen_pt_config_init.c
+++ b/hw/xen/xen_pt_config_init.c
@@ -504,6 +504,8 @@ static int xen_pt_bar_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
         bar_ro_mask = XEN_PT_BAR_IO_RO_MASK | (r_size - 1);
         break;
     case XEN_PT_BAR_FLAG_UPPER:
+        assert(index > 0);
+        r_size = d->io_regions[index - 1].size >> 32;
         bar_emu_mask = XEN_PT_BAR_ALLF;
         bar_ro_mask = r_size ? r_size - 1 : 0;
         break;
diff --git a/include/block/block_int.h b/include/block/block_int.h
index 76b589da57..6c0927bce3 100644
--- a/include/block/block_int.h
+++ b/include/block/block_int.h
@@ -1029,7 +1029,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
                             BlockdevOnError on_target_error,
                             int creation_flags,
                             BlockCompletionFunc *cb, void *opaque,
-                            BlockJobTxn *txn, Error **errp);
+                            JobTxn *txn, Error **errp);
 
 void hmp_drive_add_node(Monitor *mon, const char *optstr);
 
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
index 0f56f723de..32c00b7dc0 100644
--- a/include/block/blockjob.h
+++ b/include/block/blockjob.h
@@ -26,13 +26,13 @@
 #ifndef BLOCKJOB_H
 #define BLOCKJOB_H
 
+#include "qemu/job.h"
 #include "block/block.h"
 #include "qemu/ratelimit.h"
 
 #define BLOCK_JOB_SLICE_TIME 100000000ULL /* ns */
 
 typedef struct BlockJobDriver BlockJobDriver;
-typedef struct BlockJobTxn BlockJobTxn;
 
 /**
  * BlockJob:
@@ -40,141 +40,40 @@ typedef struct BlockJobTxn BlockJobTxn;
  * Long-running operation on a BlockDriverState.
  */
 typedef struct BlockJob {
-    /** The job type, including the job vtable.  */
-    const BlockJobDriver *driver;
+    /** Data belonging to the generic Job infrastructure */
+    Job job;
 
     /** The block device on which the job is operating.  */
     BlockBackend *blk;
 
-    /**
-     * The ID of the block job. May be NULL for internal jobs.
-     */
-    char *id;
-
-    /**
-     * The coroutine that executes the job.  If not NULL, it is
-     * reentered when busy is false and the job is cancelled.
-     */
-    Coroutine *co;
-
-    /**
-     * Set to true if the job should cancel itself.  The flag must
-     * always be tested just before toggling the busy flag from false
-     * to true.  After a job has been cancelled, it should only yield
-     * if #aio_poll will ("sooner or later") reenter the coroutine.
-     */
-    bool cancelled;
-
-    /**
-     * Set to true if the job should abort immediately without waiting
-     * for data to be in sync.
-     */
-    bool force;
-
-    /**
-     * Counter for pause request. If non-zero, the block job is either paused,
-     * or if busy == true will pause itself as soon as possible.
-     */
-    int pause_count;
-
-    /**
-     * Set to true if the job is paused by user.  Can be unpaused with the
-     * block-job-resume QMP command.
-     */
-    bool user_paused;
-
-    /**
-     * Set to false by the job while the coroutine has yielded and may be
-     * re-entered by block_job_enter().  There may still be I/O or event loop
-     * activity pending.  Accessed under block_job_mutex (in blockjob.c).
-     */
-    bool busy;
-
-    /**
-     * Set to true by the job while it is in a quiescent state, where
-     * no I/O or event loop activity is pending.
-     */
-    bool paused;
-
-    /**
-     * Set to true when the job is ready to be completed.
-     */
-    bool ready;
-
-    /**
-     * Set to true when the job has deferred work to the main loop.
-     */
-    bool deferred_to_main_loop;
-
-    /** Element of the list of block jobs */
-    QLIST_ENTRY(BlockJob) job_list;
-
     /** Status that is published by the query-block-jobs QMP API */
     BlockDeviceIoStatus iostatus;
 
-    /** Offset that is published by the query-block-jobs QMP API */
-    int64_t offset;
-
-    /** Length that is published by the query-block-jobs QMP API */
-    int64_t len;
-
     /** Speed that was set with @block_job_set_speed.  */
     int64_t speed;
 
     /** Rate limiting data structure for implementing @speed. */
     RateLimit limit;
 
-    /** The completion function that will be called when the job completes.  */
-    BlockCompletionFunc *cb;
-
     /** Block other operations when block job is running */
     Error *blocker;
 
-    /** BlockDriverStates that are involved in this block job */
-    GSList *nodes;
-
-    /** The opaque value that is passed to the completion function.  */
-    void *opaque;
-
-    /** Reference count of the block job */
-    int refcnt;
-
-    /** True when job has reported completion by calling block_job_completed. */
-    bool completed;
-
-    /** ret code passed to block_job_completed. */
-    int ret;
-
-    /**
-     * Timer that is used by @block_job_sleep_ns. Accessed under
-     * block_job_mutex (in blockjob.c).
-     */
-    QEMUTimer sleep_timer;
+    /** Called when a cancelled job is finalised. */
+    Notifier finalize_cancelled_notifier;
 
-    /** Current state; See @BlockJobStatus for details. */
-    BlockJobStatus status;
+    /** Called when a successfully completed job is finalised. */
+    Notifier finalize_completed_notifier;
 
-    /** True if this job should automatically finalize itself */
-    bool auto_finalize;
+    /** Called when the job transitions to PENDING */
+    Notifier pending_notifier;
 
-    /** True if this job should automatically dismiss itself */
-    bool auto_dismiss;
+    /** Called when the job transitions to READY */
+    Notifier ready_notifier;
 
-    BlockJobTxn *txn;
-    QLIST_ENTRY(BlockJob) txn_list;
+    /** BlockDriverStates that are involved in this block job */
+    GSList *nodes;
 } BlockJob;
 
-typedef enum BlockJobCreateFlags {
-    /* Default behavior */
-    BLOCK_JOB_DEFAULT = 0x00,
-    /* BlockJob is not QMP-created and should not send QMP events */
-    BLOCK_JOB_INTERNAL = 0x01,
-    /* BlockJob requires manual finalize step */
-    BLOCK_JOB_MANUAL_FINALIZE = 0x02,
-    /* BlockJob requires manual dismiss step */
-    BLOCK_JOB_MANUAL_DISMISS = 0x04,
-} BlockJobCreateFlags;
-
 /**
  * block_job_next:
  * @job: A block job, or %NULL.
@@ -231,78 +130,6 @@ void block_job_remove_all_bdrv(BlockJob *job);
 void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp);
 
 /**
- * block_job_start:
- * @job: A job that has not yet been started.
- *
- * Begins execution of a block job.
- * Takes ownership of one reference to the job object.
- */
-void block_job_start(BlockJob *job);
-
-/**
- * block_job_cancel:
- * @job: The job to be canceled.
- * @force: Quit a job without waiting for data to be in sync.
- *
- * Asynchronously cancel the specified job.
- */
-void block_job_cancel(BlockJob *job, bool force);
-
-/**
- * block_job_complete:
- * @job: The job to be completed.
- * @errp: Error object.
- *
- * Asynchronously complete the specified job.
- */
-void block_job_complete(BlockJob *job, Error **errp);
-
-
-/**
- * block_job_finalize:
- * @job: The job to fully commit and finish.
- * @errp: Error object.
- *
- * For jobs that have finished their work and are pending
- * awaiting explicit acknowledgement to commit their work,
- * This will commit that work.
- *
- * FIXME: Make the below statement universally true:
- * For jobs that support the manual workflow mode, all graph
- * changes that occur as a result will occur after this command
- * and before a successful reply.
- */
-void block_job_finalize(BlockJob *job, Error **errp);
-
-/**
- * block_job_dismiss:
- * @job: The job to be dismissed.
- * @errp: Error object.
- *
- * Remove a concluded job from the query list.
- */
-void block_job_dismiss(BlockJob **job, Error **errp);
-
-/**
- * block_job_progress_update:
- * @job: The job that has made progress
- * @done: How much progress the job made
- *
- * Updates the progress counter of the job.
- */
-void block_job_progress_update(BlockJob *job, uint64_t done);
-
-/**
- * block_job_progress_set_remaining:
- * @job: The job whose expected progress end value is set
- * @remaining: Expected end value of the progress counter of the job
- *
- * Sets the expected end value of the progress counter of a job so that a
- * completion percentage can be calculated when the progress is updated.
- */
-void block_job_progress_set_remaining(BlockJob *job, uint64_t remaining);
-
-/**
  * block_job_query:
  * @job: The job to get information about.
  *
@@ -311,78 +138,6 @@ void block_job_progress_set_remaining(BlockJob *job, uint64_t remaining);
 BlockJobInfo *block_job_query(BlockJob *job, Error **errp);
 
 /**
- * block_job_user_pause:
- * @job: The job to be paused.
- *
- * Asynchronously pause the specified job.
- * Do not allow a resume until a matching call to block_job_user_resume.
- */
-void block_job_user_pause(BlockJob *job, Error **errp);
-
-/**
- * block_job_paused:
- * @job: The job to query.
- *
- * Returns true if the job is user-paused.
- */
-bool block_job_user_paused(BlockJob *job);
-
-/**
- * block_job_user_resume:
- * @job: The job to be resumed.
- *
- * Resume the specified job.
- * Must be paired with a preceding block_job_user_pause.
- */
-void block_job_user_resume(BlockJob *job, Error **errp);
-
-/**
- * block_job_user_cancel:
- * @job: The job to be cancelled.
- * @force: Quit a job without waiting for data to be in sync.
- *
- * Cancels the specified job, but may refuse to do so if the
- * operation isn't currently meaningful.
- */
-void block_job_user_cancel(BlockJob *job, bool force, Error **errp);
-
-/**
- * block_job_cancel_sync:
- * @job: The job to be canceled.
- *
- * Synchronously cancel the job.  The completion callback is called
- * before the function returns.  The job may actually complete
- * instead of canceling itself; the circumstances under which this
- * happens depend on the kind of job that is active.
- *
- * Returns the return value from the job if the job actually completed
- * during the call, or -ECANCELED if it was canceled.
- */
-int block_job_cancel_sync(BlockJob *job);
-
-/**
- * block_job_cancel_sync_all:
- *
- * Synchronously cancels all jobs using block_job_cancel_sync().
- */
-void block_job_cancel_sync_all(void);
-
-/**
- * block_job_complete_sync:
- * @job: The job to be completed.
- * @errp: Error object which may be set by block_job_complete(); this is not
- *        necessarily set on every error, the job return value has to be
- *        checked as well.
- *
- * Synchronously complete the job.  The completion callback is called before the
- * function returns, unless it is NULL (which is permissible when using this
- * function).
- *
- * Returns the return value from the job.
- */
-int block_job_complete_sync(BlockJob *job, Error **errp);
-
-/**
  * block_job_iostatus_reset:
  * @job: The job whose I/O status should be reset.
  *
@@ -392,59 +147,6 @@ int block_job_complete_sync(BlockJob *job, Error **errp);
 void block_job_iostatus_reset(BlockJob *job);
 
 /**
- * block_job_txn_new:
- *
- * Allocate and return a new block job transaction.  Jobs can be added to the
- * transaction using block_job_txn_add_job().
- *
- * The transaction is automatically freed when the last job completes or is
- * cancelled.
- *
- * All jobs in the transaction either complete successfully or fail/cancel as a
- * group.  Jobs wait for each other before completing.  Cancelling one job
- * cancels all jobs in the transaction.
- */
-BlockJobTxn *block_job_txn_new(void);
-
-/**
- * block_job_ref:
- *
- * Add a reference to BlockJob refcnt, it will be decreased with
- * block_job_unref, and then be freed if it comes to be the last
- * reference.
- */
-void block_job_ref(BlockJob *job);
-
-/**
- * block_job_unref:
- *
- * Release a reference that was previously acquired with block_job_ref
- * or block_job_create. If it's the last reference to the object, it will be
- * freed.
- */
-void block_job_unref(BlockJob *job);
-
-/**
- * block_job_txn_unref:
- *
- * Release a reference that was previously acquired with block_job_txn_add_job
- * or block_job_txn_new. If it's the last reference to the object, it will be
- * freed.
- */
-void block_job_txn_unref(BlockJobTxn *txn);
-
-/**
- * block_job_txn_add_job:
- * @txn: The transaction (may be NULL)
- * @job: Job to add to the transaction
- *
- * Add @job to the transaction.  The @job must not already be in a transaction.
- * The caller must call either block_job_txn_unref() or block_job_completed()
- * to release the reference that is automatically grabbed here.
- */
-void block_job_txn_add_job(BlockJobTxn *txn, BlockJob *job);
-
-/**
  * block_job_is_internal:
  * @job: The job to determine if it is user-visible or not.
  *
diff --git a/include/block/blockjob_int.h b/include/block/blockjob_int.h
index 62ec964d09..5cd50c6639 100644
--- a/include/block/blockjob_int.h
+++ b/include/block/blockjob_int.h
@@ -35,72 +35,8 @@
  * A class type for block job driver.
  */
 struct BlockJobDriver {
-    /** Derived BlockJob struct size */
-    size_t instance_size;
-
-    /** String describing the operation, part of query-block-jobs QMP API */
-    BlockJobType job_type;
-
-    /** Mandatory: Entrypoint for the Coroutine. */
-    CoroutineEntry *start;
-
-    /**
-     * Optional callback for job types whose completion must be triggered
-     * manually.
-     */
-    void (*complete)(BlockJob *job, Error **errp);
-
-    /**
-     * If the callback is not NULL, prepare will be invoked when all the jobs
-     * belonging to the same transaction complete; or upon this job's completion
-     * if it is not in a transaction.
-     *
-     * This callback will not be invoked if the job has already failed.
-     * If it fails, abort and then clean will be called.
-     */
-    int (*prepare)(BlockJob *job);
-
-    /**
-     * If the callback is not NULL, it will be invoked when all the jobs
-     * belonging to the same transaction complete; or upon this job's
-     * completion if it is not in a transaction. Skipped if NULL.
-     *
-     * All jobs will complete with a call to either .commit() or .abort() but
-     * never both.
-     */
-    void (*commit)(BlockJob *job);
-
-    /**
-     * If the callback is not NULL, it will be invoked when any job in the
-     * same transaction fails; or upon this job's failure (due to error or
-     * cancellation) if it is not in a transaction. Skipped if NULL.
-     *
-     * All jobs will complete with a call to either .commit() or .abort() but
-     * never both.
-     */
-    void (*abort)(BlockJob *job);
-
-    /**
-     * If the callback is not NULL, it will be invoked after a call to either
-     * .commit() or .abort(). Regardless of which callback is invoked after
-     * completion, .clean() will always be called, even if the job does not
-     * belong to a transaction group.
-     */
-    void (*clean)(BlockJob *job);
-
-    /**
-     * If the callback is not NULL, it will be invoked when the job transitions
-     * into the paused state.  Paused jobs must not perform any asynchronous
-     * I/O or event loop activity.  This callback is used to quiesce jobs.
-     */
-    void coroutine_fn (*pause)(BlockJob *job);
-
-    /**
-     * If the callback is not NULL, it will be invoked when the job transitions
-     * out of the paused state.  Any asynchronous I/O or event loop activity
-     * should be restarted from this callback.
-     */
-    void coroutine_fn (*resume)(BlockJob *job);
+    /** Generic JobDriver callbacks and settings */
+    JobDriver job_driver;
 
     /*
      * If the callback is not NULL, it will be invoked before the job is
@@ -113,6 +49,10 @@ struct BlockJobDriver {
      * If the callback is not NULL, it will be invoked when the job has to be
      * synchronously cancelled or completed; it should drain BlockDriverStates
      * as required to ensure progress.
+     *
+     * Block jobs must use the default implementation for job_driver.drain,
+     * which will in turn call this callback after doing generic block job
+     * stuff.
      */
     void (*drain)(BlockJob *job);
 };
@@ -126,8 +66,7 @@ struct BlockJobDriver {
  * @bs: The block
  * @perm, @shared_perm: Permissions to request for @bs
  * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
- * @flags: Creation flags for the Block Job.
- *         See @BlockJobCreateFlags
+ * @flags: Creation flags for the Block Job. See @JobCreateFlags.
  * @cb: Completion function for the job.
  * @opaque: Opaque pointer value passed to @cb.
  * @errp: Error object.
@@ -142,28 +81,31 @@ struct BlockJobDriver {
  * called from a wrapper that is specific to the job type.
  */
 void *block_job_create(const char *job_id, const BlockJobDriver *driver,
-                       BlockJobTxn *txn, BlockDriverState *bs, uint64_t perm,
+                       JobTxn *txn, BlockDriverState *bs, uint64_t perm,
                        uint64_t shared_perm, int64_t speed, int flags,
                        BlockCompletionFunc *cb, void *opaque, Error **errp);
 
 /**
- * block_job_sleep_ns:
- * @job: The job that calls the function.
- * @ns: How many nanoseconds to stop for.
- *
- * Put the job to sleep (assuming that it wasn't canceled) for @ns
- * %QEMU_CLOCK_REALTIME nanoseconds.  Canceling the job will immediately
- * interrupt the wait.
+ * block_job_free:
+ * Callback to be used for JobDriver.free in all block jobs. Frees block job
+ * specific resources in @job.
  */
-void block_job_sleep_ns(BlockJob *job, int64_t ns);
+void block_job_free(Job *job);
 
 /**
- * block_job_yield:
- * @job: The job that calls the function.
- *
- * Yield the block job coroutine.
+ * block_job_user_resume:
+ * Callback to be used for JobDriver.user_resume in all block jobs. Resets the
+ * iostatus when the user resumes @job.
+ */
+void block_job_user_resume(Job *job);
+
+/**
+ * block_job_drain:
+ * Callback to be used for JobDriver.drain in all block jobs. Drains the main
+ * block node associated with the block jobs and calls BlockJobDriver.drain for
+ * job-specific actions.
  */
-void block_job_yield(BlockJob *job);
+void block_job_drain(Job *job);
 
 /**
  * block_job_ratelimit_get_delay:
@@ -174,57 +116,6 @@ void block_job_yield(BlockJob *job);
 int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n);
 
 /**
- * block_job_early_fail:
- * @bs: The block device.
- *
- * The block job could not be started, free it.
- */
-void block_job_early_fail(BlockJob *job);
-
-/**
- * block_job_completed:
- * @job: The job being completed.
- * @ret: The status code.
- *
- * Call the completion function that was registered at creation time, and
- * free @job.
- */
-void block_job_completed(BlockJob *job, int ret);
-
-/**
- * block_job_is_cancelled:
- * @job: The job being queried.
- *
- * Returns whether the job is scheduled for cancellation.
- */
-bool block_job_is_cancelled(BlockJob *job);
-
-/**
- * block_job_pause_point:
- * @job: The job that is ready to pause.
- *
- * Pause now if block_job_pause() has been called.  Block jobs that perform
- * lots of I/O must call this between requests so that the job can be paused.
- */
-void coroutine_fn block_job_pause_point(BlockJob *job);
-
-/**
- * block_job_enter:
- * @job: The job to enter.
- *
- * Continue the specified job by entering the coroutine.
- */
-void block_job_enter(BlockJob *job);
-
-/**
- * block_job_event_ready:
- * @job: The job which is now ready to be completed.
- *
- * Send a BLOCK_JOB_READY event for the specified job.
- */
-void block_job_event_ready(BlockJob *job);
-
-/**
  * block_job_error_action:
  * @job: The job to signal an error for.
  * @on_err: The error action setting.
@@ -237,23 +128,4 @@ void block_job_event_ready(BlockJob *job);
 BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
                                         int is_read, int error);
 
-typedef void BlockJobDeferToMainLoopFn(BlockJob *job, void *opaque);
-
-/**
- * block_job_defer_to_main_loop:
- * @job: The job
- * @fn: The function to run in the main loop
- * @opaque: The opaque value that is passed to @fn
- *
- * This function must be called by the main job coroutine just before it
- * returns.  @fn is executed in the main loop with the BlockDriverState
- * AioContext acquired.  Block jobs must call bdrv_unref(), bdrv_close(), and
- * anything that uses bdrv_drain_all() in the main loop.
- *
- * The @job AioContext is held while @fn executes.
- */
-void block_job_defer_to_main_loop(BlockJob *job,
-                                  BlockJobDeferToMainLoopFn *fn,
-                                  void *opaque);
-
 #endif
diff --git a/include/hw/xen/xen_backend.h b/include/hw/xen/xen_backend.h
index 3a27692407..9c17fdd85d 100644
--- a/include/hw/xen/xen_backend.h
+++ b/include/hw/xen/xen_backend.h
@@ -16,7 +16,6 @@
 /* variables */
 extern struct xs_handle *xenstore;
 extern const char *xen_protocol;
-extern bool xen_feature_grant_copy;
 extern DeviceState *xen_sysdev;
 extern BusState *xen_sysbus;
 
@@ -42,6 +41,39 @@ void xen_be_register_common(void);
 int xen_be_register(const char *type, struct XenDevOps *ops);
 int xen_be_set_state(struct XenDevice *xendev, enum xenbus_state state);
 int xen_be_bind_evtchn(struct XenDevice *xendev);
+void xen_be_set_max_grant_refs(struct XenDevice *xendev,
+                               unsigned int nr_refs);
+void *xen_be_map_grant_refs(struct XenDevice *xendev, uint32_t *refs,
+                            unsigned int nr_refs, int prot);
+void xen_be_unmap_grant_refs(struct XenDevice *xendev, void *ptr,
+                             unsigned int nr_refs);
+
+typedef struct XenGrantCopySegment {
+    union {
+        void *virt;
+        struct {
+            uint32_t ref;
+            off_t offset;
+        } foreign;
+    } source, dest;
+    size_t len;
+} XenGrantCopySegment;
+
+int xen_be_copy_grant_refs(struct XenDevice *xendev,
+                           bool to_domain, XenGrantCopySegment segs[],
+                           unsigned int nr_segs);
+
+static inline void *xen_be_map_grant_ref(struct XenDevice *xendev,
+                                         uint32_t ref, int prot)
+{
+    return xen_be_map_grant_refs(xendev, &ref, 1, prot);
+}
+
+static inline void xen_be_unmap_grant_ref(struct XenDevice *xendev,
+                                          void *ptr)
+{
+    return xen_be_unmap_grant_refs(xendev, ptr, 1);
+}
 
 /* actual backend drivers */
 extern struct XenDevOps xen_console_ops;      /* xen_console.c     */
diff --git a/include/hw/xen/xen_common.h b/include/hw/xen/xen_common.h
index 5f1402b494..bbf207dcef 100644
--- a/include/hw/xen/xen_common.h
+++ b/include/hw/xen/xen_common.h
@@ -667,8 +667,21 @@ static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
 
 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40800
 
-
-typedef void *xengnttab_grant_copy_segment_t;
+struct xengnttab_grant_copy_segment {
+    union xengnttab_copy_ptr {
+        void *virt;
+        struct {
+            uint32_t ref;
+            uint16_t offset;
+            uint16_t domid;
+        } foreign;
+    } source, dest;
+    uint16_t len;
+    uint16_t flags;
+    int16_t status;
+};
+
+typedef struct xengnttab_grant_copy_segment xengnttab_grant_copy_segment_t;
 
 static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count,
                                        xengnttab_grant_copy_segment_t *segs)
diff --git a/include/qemu/job.h b/include/qemu/job.h
new file mode 100644
index 0000000000..8c8badf75e
--- /dev/null
+++ b/include/qemu/job.h
@@ -0,0 +1,562 @@
+/*
+ * Declarations for background jobs
+ *
+ * Copyright (c) 2011 IBM Corp.
+ * Copyright (c) 2012, 2018 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef JOB_H
+#define JOB_H
+
+#include "qapi/qapi-types-block-core.h"
+#include "qemu/queue.h"
+#include "qemu/coroutine.h"
+#include "block/aio.h"
+
+typedef struct JobDriver JobDriver;
+typedef struct JobTxn JobTxn;
+
+
+/**
+ * Long-running operation.
+ */
+typedef struct Job {
+    /** The ID of the job. May be NULL for internal jobs. */
+    char *id;
+
+    /** The type of this job. */
+    const JobDriver *driver;
+
+    /** Reference count of the block job */
+    int refcnt;
+
+    /** Current state; See @JobStatus for details. */
+    JobStatus status;
+
+    /** AioContext to run the job coroutine in */
+    AioContext *aio_context;
+
+    /**
+     * The coroutine that executes the job.  If not NULL, it is reentered when
+     * busy is false and the job is cancelled.
+     */
+    Coroutine *co;
+
+    /**
+     * Timer that is used by @job_sleep_ns. Accessed under job_mutex (in
+     * job.c).
+     */
+    QEMUTimer sleep_timer;
+
+    /**
+     * Counter for pause request. If non-zero, the block job is either paused,
+     * or if busy == true will pause itself as soon as possible.
+     */
+    int pause_count;
+
+    /**
+     * Set to false by the job while the coroutine has yielded and may be
+     * re-entered by job_enter(). There may still be I/O or event loop activity
+     * pending. Accessed under block_job_mutex (in blockjob.c).
+     */
+    bool busy;
+
+    /**
+     * Set to true by the job while it is in a quiescent state, where
+     * no I/O or event loop activity is pending.
+     */
+    bool paused;
+
+    /**
+     * Set to true if the job is paused by user.  Can be unpaused with the
+     * block-job-resume QMP command.
+     */
+    bool user_paused;
+
+    /**
+     * Set to true if the job should cancel itself.  The flag must
+     * always be tested just before toggling the busy flag from false
+     * to true.  After a job has been cancelled, it should only yield
+     * if #aio_poll will ("sooner or later") reenter the coroutine.
+     */
+    bool cancelled;
+
+    /**
+     * Set to true if the job should abort immediately without waiting
+     * for data to be in sync.
+     */
+    bool force_cancel;
+
+    /** Set to true when the job has deferred work to the main loop. */
+    bool deferred_to_main_loop;
+
+    /** True if this job should automatically finalize itself */
+    bool auto_finalize;
+
+    /** True if this job should automatically dismiss itself */
+    bool auto_dismiss;
+
+    /**
+     * Current progress. The unit is arbitrary as long as the ratio between
+     * progress_current and progress_total represents the estimated percentage
+     * of work already done.
+     */
+    int64_t progress_current;
+
+    /** Estimated progress_current value at the completion of the job */
+    int64_t progress_total;
+
+    /** ret code passed to job_completed. */
+    int ret;
+
+    /** The completion function that will be called when the job completes.  */
+    BlockCompletionFunc *cb;
+
+    /** The opaque value that is passed to the completion function.  */
+    void *opaque;
+
+    /** Notifiers called when a cancelled job is finalised */
+    NotifierList on_finalize_cancelled;
+
+    /** Notifiers called when a successfully completed job is finalised */
+    NotifierList on_finalize_completed;
+
+    /** Notifiers called when the job transitions to PENDING */
+    NotifierList on_pending;
+
+    /** Notifiers called when the job transitions to READY */
+    NotifierList on_ready;
+
+    /** Element of the list of jobs */
+    QLIST_ENTRY(Job) job_list;
+
+    /** Transaction this job is part of */
+    JobTxn *txn;
+
+    /** Element of the list of jobs in a job transaction */
+    QLIST_ENTRY(Job) txn_list;
+} Job;
+
+/**
+ * Callbacks and other information about a Job driver.
+ */
+struct JobDriver {
+    /** Derived Job struct size */
+    size_t instance_size;
+
+    /** Enum describing the operation */
+    JobType job_type;
+
+    /** Mandatory: Entrypoint for the Coroutine. */
+    CoroutineEntry *start;
+
+    /**
+     * If the callback is not NULL, it will be invoked when the job transitions
+     * into the paused state.  Paused jobs must not perform any asynchronous
+     * I/O or event loop activity.  This callback is used to quiesce jobs.
+     */
+    void coroutine_fn (*pause)(Job *job);
+
+    /**
+     * If the callback is not NULL, it will be invoked when the job transitions
+     * out of the paused state.  Any asynchronous I/O or event loop activity
+     * should be restarted from this callback.
+     */
+    void coroutine_fn (*resume)(Job *job);
+
+    /**
+     * Called when the job is resumed by the user (i.e. user_paused becomes
+     * false). .user_resume is called before .resume.
+     */
+    void (*user_resume)(Job *job);
+
+    /**
+     * Optional callback for job types whose completion must be triggered
+     * manually.
+     */
+    void (*complete)(Job *job, Error **errp);
+
+    /*
+     * If the callback is not NULL, it will be invoked when the job has to be
+     * synchronously cancelled or completed; it should drain any activities
+     * as required to ensure progress.
+     */
+    void (*drain)(Job *job);
+
+    /**
+     * If the callback is not NULL, prepare will be invoked when all the jobs
+     * belonging to the same transaction complete; or upon this job's completion
+     * if it is not in a transaction.
+     *
+     * This callback will not be invoked if the job has already failed.
+     * If it fails, abort and then clean will be called.
+     */
+    int (*prepare)(Job *job);
+
+    /**
+     * If the callback is not NULL, it will be invoked when all the jobs
+     * belonging to the same transaction complete; or upon this job's
+     * completion if it is not in a transaction. Skipped if NULL.
+     *
+     * All jobs will complete with a call to either .commit() or .abort() but
+     * never both.
+     */
+    void (*commit)(Job *job);
+
+    /**
+     * If the callback is not NULL, it will be invoked when any job in the
+     * same transaction fails; or upon this job's failure (due to error or
+     * cancellation) if it is not in a transaction. Skipped if NULL.
+     *
+     * All jobs will complete with a call to either .commit() or .abort() but
+     * never both.
+     */
+    void (*abort)(Job *job);
+
+    /**
+     * If the callback is not NULL, it will be invoked after a call to either
+     * .commit() or .abort(). Regardless of which callback is invoked after
+     * completion, .clean() will always be called, even if the job does not
+     * belong to a transaction group.
+     */
+    void (*clean)(Job *job);
+
+
+    /** Called when the job is freed */
+    void (*free)(Job *job);
+};
+
+typedef enum JobCreateFlags {
+    /* Default behavior */
+    JOB_DEFAULT = 0x00,
+    /* Job is not QMP-created and should not send QMP events */
+    JOB_INTERNAL = 0x01,
+    /* Job requires manual finalize step */
+    JOB_MANUAL_FINALIZE = 0x02,
+    /* Job requires manual dismiss step */
+    JOB_MANUAL_DISMISS = 0x04,
+} JobCreateFlags;
+
+/**
+ * Allocate and return a new job transaction. Jobs can be added to the
+ * transaction using job_txn_add_job().
+ *
+ * The transaction is automatically freed when the last job completes or is
+ * cancelled.
+ *
+ * All jobs in the transaction either complete successfully or fail/cancel as a
+ * group.  Jobs wait for each other before completing.  Cancelling one job
+ * cancels all jobs in the transaction.
+ */
+JobTxn *job_txn_new(void);
+
+/**
+ * Release a reference that was previously acquired with job_txn_add_job or
+ * job_txn_new. If it's the last reference to the object, it will be freed.
+ */
+void job_txn_unref(JobTxn *txn);
+
+/**
+ * @txn: The transaction (may be NULL)
+ * @job: Job to add to the transaction
+ *
+ * Add @job to the transaction.  The @job must not already be in a transaction.
+ * The caller must call either job_txn_unref() or job_completed() to release
+ * the reference that is automatically grabbed here.
+ *
+ * If @txn is NULL, the function does nothing.
+ */
+void job_txn_add_job(JobTxn *txn, Job *job);
+
+/**
+ * Create a new long-running job and return it.
+ *
+ * @job_id: The id of the newly-created job, or %NULL for internal jobs
+ * @driver: The class object for the newly-created job.
+ * @txn: The transaction this job belongs to, if any. %NULL otherwise.
+ * @ctx: The AioContext to run the job coroutine in.
+ * @flags: Creation flags for the job. See @JobCreateFlags.
+ * @cb: Completion function for the job.
+ * @opaque: Opaque pointer value passed to @cb.
+ * @errp: Error object.
+ */
+void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn,
+                 AioContext *ctx, int flags, BlockCompletionFunc *cb,
+                 void *opaque, Error **errp);
+
+/**
+ * Add a reference to Job refcnt, it will be decreased with job_unref, and then
+ * be freed if it comes to be the last reference.
+ */
+void job_ref(Job *job);
+
+/**
+ * Release a reference that was previously acquired with job_ref() or
+ * job_create(). If it's the last reference to the object, it will be freed.
+ */
+void job_unref(Job *job);
+
+/**
+ * @job: The job that has made progress
+ * @done: How much progress the job made since the last call
+ *
+ * Updates the progress counter of the job.
+ */
+void job_progress_update(Job *job, uint64_t done);
+
+/**
+ * @job: The job whose expected progress end value is set
+ * @remaining: Missing progress (on top of the current progress counter value)
+ *             until the new expected end value is reached
+ *
+ * Sets the expected end value of the progress counter of a job so that a
+ * completion percentage can be calculated when the progress is updated.
+ */
+void job_progress_set_remaining(Job *job, uint64_t remaining);
+
+/** To be called when a cancelled job is finalised. */
+void job_event_cancelled(Job *job);
+
+/** To be called when a successfully completed job is finalised. */
+void job_event_completed(Job *job);
+
+/**
+ * Conditionally enter the job coroutine if the job is ready to run, not
+ * already busy and fn() returns true. fn() is called while under the job_lock
+ * critical section.
+ */
+void job_enter_cond(Job *job, bool(*fn)(Job *job));
+
+/**
+ * @job: A job that has not yet been started.
+ *
+ * Begins execution of a job.
+ * Takes ownership of one reference to the job object.
+ */
+void job_start(Job *job);
+
+/**
+ * @job: The job to enter.
+ *
+ * Continue the specified job by entering the coroutine.
+ */
+void job_enter(Job *job);
+
+/**
+ * @job: The job that is ready to pause.
+ *
+ * Pause now if job_pause() has been called. Jobs that perform lots of I/O
+ * must call this between requests so that the job can be paused.
+ */
+void coroutine_fn job_pause_point(Job *job);
+
+/**
+ * @job: The job that calls the function.
+ *
+ * Yield the job coroutine.
+ */
+void job_yield(Job *job);
+
+/**
+ * @job: The job that calls the function.
+ * @ns: How many nanoseconds to stop for.
+ *
+ * Put the job to sleep (assuming that it wasn't canceled) for @ns
+ * %QEMU_CLOCK_REALTIME nanoseconds.  Canceling the job will immediately
+ * interrupt the wait.
+ */
+void coroutine_fn job_sleep_ns(Job *job, int64_t ns);
+
+
+/** Returns the JobType of a given Job. */
+JobType job_type(const Job *job);
+
+/** Returns the enum string for the JobType of a given Job. */
+const char *job_type_str(const Job *job);
+
+/** Returns true if the job should not be visible to the management layer. */
+bool job_is_internal(Job *job);
+
+/** Returns whether the job is scheduled for cancellation. */
+bool job_is_cancelled(Job *job);
+
+/** Returns whether the job is in a completed state. */
+bool job_is_completed(Job *job);
+
+/** Returns whether the job is ready to be completed. */
+bool job_is_ready(Job *job);
+
+/**
+ * Request @job to pause at the next pause point. Must be paired with
+ * job_resume(). If the job is supposed to be resumed by user action, call
+ * job_user_pause() instead.
+ */
+void job_pause(Job *job);
+
+/** Resumes a @job paused with job_pause. */
+void job_resume(Job *job);
+
+/**
+ * Asynchronously pause the specified @job.
+ * Do not allow a resume until a matching call to job_user_resume.
+ */
+void job_user_pause(Job *job, Error **errp);
+
+/** Returns true if the job is user-paused. */
+bool job_user_paused(Job *job);
+
+/**
+ * Resume the specified @job.
+ * Must be paired with a preceding job_user_pause.
+ */
+void job_user_resume(Job *job, Error **errp);
+
+/*
+ * Drain any activities as required to ensure progress. This can be called in a
+ * loop to synchronously complete a job.
+ */
+void job_drain(Job *job);
+
+/**
+ * Get the next element from the list of block jobs after @job, or the
+ * first one if @job is %NULL.
+ *
+ * Returns the requested job, or %NULL if there are no more jobs left.
+ */
+Job *job_next(Job *job);
+
+/**
+ * Get the job identified by @id (which must not be %NULL).
+ *
+ * Returns the requested job, or %NULL if it doesn't exist.
+ */
+Job *job_get(const char *id);
+
+/**
+ * Check whether the verb @verb can be applied to @job in its current state.
+ * Returns 0 if the verb can be applied; otherwise errp is set and -EPERM
+ * returned.
+ */
+int job_apply_verb(Job *job, JobVerb verb, Error **errp);
+
+/** The @job could not be started, free it. */
+void job_early_fail(Job *job);
+
+/** Moves the @job from RUNNING to READY */
+void job_transition_to_ready(Job *job);
+
+/**
+ * @job: The job being completed.
+ * @ret: The status code.
+ *
+ * Marks @job as completed. If @ret is non-zero, the job transaction it is part
+ * of is aborted. If @ret is zero, the job moves into the WAITING state. If it
+ * is the last job to complete in its transaction, all jobs in the transaction
+ * move from WAITING to PENDING.
+ */
+void job_completed(Job *job, int ret);
+
+/** Asynchronously complete the specified @job. */
+void job_complete(Job *job, Error **errp);
+
+/**
+ * Asynchronously cancel the specified @job. If @force is true, the job should
+ * be cancelled immediately without waiting for a consistent state.
+ */
+void job_cancel(Job *job, bool force);
+
+/**
+ * Cancels the specified job like job_cancel(), but may refuse to do so if the
+ * operation isn't meaningful in the current state of the job.
+ */
+void job_user_cancel(Job *job, bool force, Error **errp);
+
+/**
+ * Synchronously cancel the @job.  The completion callback is called
+ * before the function returns.  The job may actually complete
+ * instead of canceling itself; the circumstances under which this
+ * happens depend on the kind of job that is active.
+ *
+ * Returns the return value from the job if the job actually completed
+ * during the call, or -ECANCELED if it was canceled.
+ */
+int job_cancel_sync(Job *job);
+
+/** Synchronously cancels all jobs using job_cancel_sync(). */
+void job_cancel_sync_all(void);
+
+/**
+ * @job: The job to be completed.
+ * @errp: Error object which may be set by job_complete(); this is not
+ *        necessarily set on every error, the job return value has to be
+ *        checked as well.
+ *
+ * Synchronously complete the job.  The completion callback is called before the
+ * function returns, unless it is NULL (which is permissible when using this
+ * function).
+ *
+ * Returns the return value from the job.
+ */
+int job_complete_sync(Job *job, Error **errp);
+
+/**
+ * For a @job that has finished its work and is pending awaiting explicit
+ * acknowledgement to commit its work, this will commit that work.
+ *
+ * FIXME: Make the below statement universally true:
+ * For jobs that support the manual workflow mode, all graph changes that occur
+ * as a result will occur after this command and before a successful reply.
+ */
+void job_finalize(Job *job, Error **errp);
+
+/**
+ * Remove the concluded @job from the query list and resets the passed pointer
+ * to %NULL. Returns an error if the job is not actually concluded.
+ */
+void job_dismiss(Job **job, Error **errp);
+
+typedef void JobDeferToMainLoopFn(Job *job, void *opaque);
+
+/**
+ * @job: The job
+ * @fn: The function to run in the main loop
+ * @opaque: The opaque value that is passed to @fn
+ *
+ * This function must be called by the main job coroutine just before it
+ * returns.  @fn is executed in the main loop with the job AioContext acquired.
+ *
+ * Block jobs must call bdrv_unref(), bdrv_close(), and anything that uses
+ * bdrv_drain_all() in the main loop.
+ *
+ * The @job AioContext is held while @fn executes.
+ */
+void job_defer_to_main_loop(Job *job, JobDeferToMainLoopFn *fn, void *opaque);
+
+/**
+ * Synchronously finishes the given @job. If @finish is given, it is called to
+ * trigger completion or cancellation of the job.
+ *
+ * Returns 0 if the job is successfully completed, -ECANCELED if the job was
+ * cancelled before completing, and -errno in other error cases.
+ */
+int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp);
+
+#endif
diff --git a/job-qmp.c b/job-qmp.c
new file mode 100644
index 0000000000..7f38f63336
--- /dev/null
+++ b/job-qmp.c
@@ -0,0 +1,188 @@
+/*
+ * QMP interface for background jobs
+ *
+ * Copyright (c) 2011 IBM Corp.
+ * Copyright (c) 2012, 2018 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "qemu/job.h"
+#include "qapi/qapi-commands-job.h"
+#include "qapi/error.h"
+#include "trace-root.h"
+
+/* Get a job using its ID and acquire its AioContext */
+static Job *find_job(const char *id, AioContext **aio_context, Error **errp)
+{
+    Job *job;
+
+    *aio_context = NULL;
+
+    job = job_get(id);
+    if (!job) {
+        error_setg(errp, "Job not found");
+        return NULL;
+    }
+
+    *aio_context = job->aio_context;
+    aio_context_acquire(*aio_context);
+
+    return job;
+}
+
+void qmp_job_cancel(const char *id, Error **errp)
+{
+    AioContext *aio_context;
+    Job *job = find_job(id, &aio_context, errp);
+
+    if (!job) {
+        return;
+    }
+
+    trace_qmp_job_cancel(job);
+    job_user_cancel(job, true, errp);
+    aio_context_release(aio_context);
+}
+
+void qmp_job_pause(const char *id, Error **errp)
+{
+    AioContext *aio_context;
+    Job *job = find_job(id, &aio_context, errp);
+
+    if (!job) {
+        return;
+    }
+
+    trace_qmp_job_pause(job);
+    job_user_pause(job, errp);
+    aio_context_release(aio_context);
+}
+
+void qmp_job_resume(const char *id, Error **errp)
+{
+    AioContext *aio_context;
+    Job *job = find_job(id, &aio_context, errp);
+
+    if (!job) {
+        return;
+    }
+
+    trace_qmp_job_resume(job);
+    job_user_resume(job, errp);
+    aio_context_release(aio_context);
+}
+
+void qmp_job_complete(const char *id, Error **errp)
+{
+    AioContext *aio_context;
+    Job *job = find_job(id, &aio_context, errp);
+
+    if (!job) {
+        return;
+    }
+
+    trace_qmp_job_complete(job);
+    job_complete(job, errp);
+    aio_context_release(aio_context);
+}
+
+void qmp_job_finalize(const char *id, Error **errp)
+{
+    AioContext *aio_context;
+    Job *job = find_job(id, &aio_context, errp);
+
+    if (!job) {
+        return;
+    }
+
+    trace_qmp_job_finalize(job);
+    job_finalize(job, errp);
+    aio_context_release(aio_context);
+}
+
+void qmp_job_dismiss(const char *id, Error **errp)
+{
+    AioContext *aio_context;
+    Job *job = find_job(id, &aio_context, errp);
+
+    if (!job) {
+        return;
+    }
+
+    trace_qmp_job_dismiss(job);
+    job_dismiss(&job, errp);
+    aio_context_release(aio_context);
+}
+
+static JobInfo *job_query_single(Job *job, Error **errp)
+{
+    JobInfo *info;
+    const char *errmsg = NULL;
+
+    assert(!job_is_internal(job));
+
+    if (job->ret < 0) {
+        errmsg = strerror(-job->ret);
+    }
+
+    info = g_new(JobInfo, 1);
+    *info = (JobInfo) {
+        .id                 = g_strdup(job->id),
+        .type               = job_type(job),
+        .status             = job->status,
+        .current_progress   = job->progress_current,
+        .total_progress     = job->progress_total,
+        .has_error          = !!errmsg,
+        .error              = g_strdup(errmsg),
+    };
+
+    return info;
+}
+
+JobInfoList *qmp_query_jobs(Error **errp)
+{
+    JobInfoList *head = NULL, **p_next = &head;
+    Job *job;
+
+    for (job = job_next(NULL); job; job = job_next(job)) {
+        JobInfoList *elem;
+        AioContext *aio_context;
+
+        if (job_is_internal(job)) {
+            continue;
+        }
+        elem = g_new0(JobInfoList, 1);
+        aio_context = job->aio_context;
+        aio_context_acquire(aio_context);
+        elem->value = job_query_single(job, errp);
+        aio_context_release(aio_context);
+        if (!elem->value) {
+            g_free(elem);
+            qapi_free_JobInfoList(head);
+            return NULL;
+        }
+        *p_next = elem;
+        p_next = &elem->next;
+    }
+
+    return head;
+}
diff --git a/job.c b/job.c
new file mode 100644
index 0000000000..f026661b0f
--- /dev/null
+++ b/job.c
@@ -0,0 +1,1000 @@
+/*
+ * Background jobs (long-running operations)
+ *
+ * Copyright (c) 2011 IBM Corp.
+ * Copyright (c) 2012, 2018 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "qapi/error.h"
+#include "qemu/job.h"
+#include "qemu/id.h"
+#include "qemu/main-loop.h"
+#include "trace-root.h"
+#include "qapi/qapi-events-job.h"
+
+static QLIST_HEAD(, Job) jobs = QLIST_HEAD_INITIALIZER(jobs);
+
+/* Job State Transition Table */
+bool JobSTT[JOB_STATUS__MAX][JOB_STATUS__MAX] = {
+                                    /* U, C, R, P, Y, S, W, D, X, E, N */
+    /* U: */ [JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+    /* C: */ [JOB_STATUS_CREATED]   = {0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1},
+    /* R: */ [JOB_STATUS_RUNNING]   = {0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0},
+    /* P: */ [JOB_STATUS_PAUSED]    = {0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0},
+    /* Y: */ [JOB_STATUS_READY]     = {0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0},
+    /* S: */ [JOB_STATUS_STANDBY]   = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0},
+    /* W: */ [JOB_STATUS_WAITING]   = {0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0},
+    /* D: */ [JOB_STATUS_PENDING]   = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0},
+    /* X: */ [JOB_STATUS_ABORTING]  = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0},
+    /* E: */ [JOB_STATUS_CONCLUDED] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
+    /* N: */ [JOB_STATUS_NULL]      = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+};
+
+bool JobVerbTable[JOB_VERB__MAX][JOB_STATUS__MAX] = {
+                                    /* U, C, R, P, Y, S, W, D, X, E, N */
+    [JOB_VERB_CANCEL]               = {0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0},
+    [JOB_VERB_PAUSE]                = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0},
+    [JOB_VERB_RESUME]               = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0},
+    [JOB_VERB_SET_SPEED]            = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0},
+    [JOB_VERB_COMPLETE]             = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0},
+    [JOB_VERB_FINALIZE]             = {0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0},
+    [JOB_VERB_DISMISS]              = {0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0},
+};
+
+/* Transactional group of jobs */
+struct JobTxn {
+
+    /* Is this txn being cancelled? */
+    bool aborting;
+
+    /* List of jobs */
+    QLIST_HEAD(, Job) jobs;
+
+    /* Reference count */
+    int refcnt;
+};
+
+/* Right now, this mutex is only needed to synchronize accesses to job->busy
+ * and job->sleep_timer, such as concurrent calls to job_do_yield and
+ * job_enter. */
+static QemuMutex job_mutex;
+
+static void job_lock(void)
+{
+    qemu_mutex_lock(&job_mutex);
+}
+
+static void job_unlock(void)
+{
+    qemu_mutex_unlock(&job_mutex);
+}
+
+static void __attribute__((__constructor__)) job_init(void)
+{
+    qemu_mutex_init(&job_mutex);
+}
+
+JobTxn *job_txn_new(void)
+{
+    JobTxn *txn = g_new0(JobTxn, 1);
+    QLIST_INIT(&txn->jobs);
+    txn->refcnt = 1;
+    return txn;
+}
+
+static void job_txn_ref(JobTxn *txn)
+{
+    txn->refcnt++;
+}
+
+void job_txn_unref(JobTxn *txn)
+{
+    if (txn && --txn->refcnt == 0) {
+        g_free(txn);
+    }
+}
+
+void job_txn_add_job(JobTxn *txn, Job *job)
+{
+    if (!txn) {
+        return;
+    }
+
+    assert(!job->txn);
+    job->txn = txn;
+
+    QLIST_INSERT_HEAD(&txn->jobs, job, txn_list);
+    job_txn_ref(txn);
+}
+
+static void job_txn_del_job(Job *job)
+{
+    if (job->txn) {
+        QLIST_REMOVE(job, txn_list);
+        job_txn_unref(job->txn);
+        job->txn = NULL;
+    }
+}
+
+static int job_txn_apply(JobTxn *txn, int fn(Job *), bool lock)
+{
+    AioContext *ctx;
+    Job *job, *next;
+    int rc = 0;
+
+    QLIST_FOREACH_SAFE(job, &txn->jobs, txn_list, next) {
+        if (lock) {
+            ctx = job->aio_context;
+            aio_context_acquire(ctx);
+        }
+        rc = fn(job);
+        if (lock) {
+            aio_context_release(ctx);
+        }
+        if (rc) {
+            break;
+        }
+    }
+    return rc;
+}
+
+bool job_is_internal(Job *job)
+{
+    return (job->id == NULL);
+}
+
+static void job_state_transition(Job *job, JobStatus s1)
+{
+    JobStatus s0 = job->status;
+    assert(s1 >= 0 && s1 <= JOB_STATUS__MAX);
+    trace_job_state_transition(job, job->ret,
+                               JobSTT[s0][s1] ? "allowed" : "disallowed",
+                               JobStatus_str(s0), JobStatus_str(s1));
+    assert(JobSTT[s0][s1]);
+    job->status = s1;
+
+    if (!job_is_internal(job) && s1 != s0) {
+        qapi_event_send_job_status_change(job->id, job->status, &error_abort);
+    }
+}
+
+int job_apply_verb(Job *job, JobVerb verb, Error **errp)
+{
+    JobStatus s0 = job->status;
+    assert(verb >= 0 && verb <= JOB_VERB__MAX);
+    trace_job_apply_verb(job, JobStatus_str(s0), JobVerb_str(verb),
+                         JobVerbTable[verb][s0] ? "allowed" : "prohibited");
+    if (JobVerbTable[verb][s0]) {
+        return 0;
+    }
+    error_setg(errp, "Job '%s' in state '%s' cannot accept command verb '%s'",
+               job->id, JobStatus_str(s0), JobVerb_str(verb));
+    return -EPERM;
+}
+
+JobType job_type(const Job *job)
+{
+    return job->driver->job_type;
+}
+
+const char *job_type_str(const Job *job)
+{
+    return JobType_str(job_type(job));
+}
+
+bool job_is_cancelled(Job *job)
+{
+    return job->cancelled;
+}
+
+bool job_is_ready(Job *job)
+{
+    switch (job->status) {
+    case JOB_STATUS_UNDEFINED:
+    case JOB_STATUS_CREATED:
+    case JOB_STATUS_RUNNING:
+    case JOB_STATUS_PAUSED:
+    case JOB_STATUS_WAITING:
+    case JOB_STATUS_PENDING:
+    case JOB_STATUS_ABORTING:
+    case JOB_STATUS_CONCLUDED:
+    case JOB_STATUS_NULL:
+        return false;
+    case JOB_STATUS_READY:
+    case JOB_STATUS_STANDBY:
+        return true;
+    default:
+        g_assert_not_reached();
+    }
+    return false;
+}
+
+bool job_is_completed(Job *job)
+{
+    switch (job->status) {
+    case JOB_STATUS_UNDEFINED:
+    case JOB_STATUS_CREATED:
+    case JOB_STATUS_RUNNING:
+    case JOB_STATUS_PAUSED:
+    case JOB_STATUS_READY:
+    case JOB_STATUS_STANDBY:
+        return false;
+    case JOB_STATUS_WAITING:
+    case JOB_STATUS_PENDING:
+    case JOB_STATUS_ABORTING:
+    case JOB_STATUS_CONCLUDED:
+    case JOB_STATUS_NULL:
+        return true;
+    default:
+        g_assert_not_reached();
+    }
+    return false;
+}
+
+static bool job_started(Job *job)
+{
+    return job->co;
+}
+
+static bool job_should_pause(Job *job)
+{
+    return job->pause_count > 0;
+}
+
+Job *job_next(Job *job)
+{
+    if (!job) {
+        return QLIST_FIRST(&jobs);
+    }
+    return QLIST_NEXT(job, job_list);
+}
+
+Job *job_get(const char *id)
+{
+    Job *job;
+
+    QLIST_FOREACH(job, &jobs, job_list) {
+        if (job->id && !strcmp(id, job->id)) {
+            return job;
+        }
+    }
+
+    return NULL;
+}
+
+static void job_sleep_timer_cb(void *opaque)
+{
+    Job *job = opaque;
+
+    job_enter(job);
+}
+
+void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn,
+                 AioContext *ctx, int flags, BlockCompletionFunc *cb,
+                 void *opaque, Error **errp)
+{
+    Job *job;
+
+    if (job_id) {
+        if (flags & JOB_INTERNAL) {
+            error_setg(errp, "Cannot specify job ID for internal job");
+            return NULL;
+        }
+        if (!id_wellformed(job_id)) {
+            error_setg(errp, "Invalid job ID '%s'", job_id);
+            return NULL;
+        }
+        if (job_get(job_id)) {
+            error_setg(errp, "Job ID '%s' already in use", job_id);
+            return NULL;
+        }
+    } else if (!(flags & JOB_INTERNAL)) {
+        error_setg(errp, "An explicit job ID is required");
+        return NULL;
+    }
+
+    job = g_malloc0(driver->instance_size);
+    job->driver        = driver;
+    job->id            = g_strdup(job_id);
+    job->refcnt        = 1;
+    job->aio_context   = ctx;
+    job->busy          = false;
+    job->paused        = true;
+    job->pause_count   = 1;
+    job->auto_finalize = !(flags & JOB_MANUAL_FINALIZE);
+    job->auto_dismiss  = !(flags & JOB_MANUAL_DISMISS);
+    job->cb            = cb;
+    job->opaque        = opaque;
+
+    notifier_list_init(&job->on_finalize_cancelled);
+    notifier_list_init(&job->on_finalize_completed);
+    notifier_list_init(&job->on_pending);
+    notifier_list_init(&job->on_ready);
+
+    job_state_transition(job, JOB_STATUS_CREATED);
+    aio_timer_init(qemu_get_aio_context(), &job->sleep_timer,
+                   QEMU_CLOCK_REALTIME, SCALE_NS,
+                   job_sleep_timer_cb, job);
+
+    QLIST_INSERT_HEAD(&jobs, job, job_list);
+
+    /* Single jobs are modeled as single-job transactions for sake of
+     * consolidating the job management logic */
+    if (!txn) {
+        txn = job_txn_new();
+        job_txn_add_job(txn, job);
+        job_txn_unref(txn);
+    } else {
+        job_txn_add_job(txn, job);
+    }
+
+    return job;
+}
+
+void job_ref(Job *job)
+{
+    ++job->refcnt;
+}
+
+void job_unref(Job *job)
+{
+    if (--job->refcnt == 0) {
+        assert(job->status == JOB_STATUS_NULL);
+        assert(!timer_pending(&job->sleep_timer));
+        assert(!job->txn);
+
+        if (job->driver->free) {
+            job->driver->free(job);
+        }
+
+        QLIST_REMOVE(job, job_list);
+
+        g_free(job->id);
+        g_free(job);
+    }
+}
+
+void job_progress_update(Job *job, uint64_t done)
+{
+    job->progress_current += done;
+}
+
+void job_progress_set_remaining(Job *job, uint64_t remaining)
+{
+    job->progress_total = job->progress_current + remaining;
+}
+
+void job_event_cancelled(Job *job)
+{
+    notifier_list_notify(&job->on_finalize_cancelled, job);
+}
+
+void job_event_completed(Job *job)
+{
+    notifier_list_notify(&job->on_finalize_completed, job);
+}
+
+static void job_event_pending(Job *job)
+{
+    notifier_list_notify(&job->on_pending, job);
+}
+
+static void job_event_ready(Job *job)
+{
+    notifier_list_notify(&job->on_ready, job);
+}
+
+void job_enter_cond(Job *job, bool(*fn)(Job *job))
+{
+    if (!job_started(job)) {
+        return;
+    }
+    if (job->deferred_to_main_loop) {
+        return;
+    }
+
+    job_lock();
+    if (job->busy) {
+        job_unlock();
+        return;
+    }
+
+    if (fn && !fn(job)) {
+        job_unlock();
+        return;
+    }
+
+    assert(!job->deferred_to_main_loop);
+    timer_del(&job->sleep_timer);
+    job->busy = true;
+    job_unlock();
+    aio_co_wake(job->co);
+}
+
+void job_enter(Job *job)
+{
+    job_enter_cond(job, NULL);
+}
+
+/* Yield, and schedule a timer to reenter the coroutine after @ns nanoseconds.
+ * Reentering the job coroutine with job_enter() before the timer has expired
+ * is allowed and cancels the timer.
+ *
+ * If @ns is (uint64_t) -1, no timer is scheduled and job_enter() must be
+ * called explicitly. */
+static void coroutine_fn job_do_yield(Job *job, uint64_t ns)
+{
+    job_lock();
+    if (ns != -1) {
+        timer_mod(&job->sleep_timer, ns);
+    }
+    job->busy = false;
+    job_unlock();
+    qemu_coroutine_yield();
+
+    /* Set by job_enter_cond() before re-entering the coroutine.  */
+    assert(job->busy);
+}
+
+void coroutine_fn job_pause_point(Job *job)
+{
+    assert(job && job_started(job));
+
+    if (!job_should_pause(job)) {
+        return;
+    }
+    if (job_is_cancelled(job)) {
+        return;
+    }
+
+    if (job->driver->pause) {
+        job->driver->pause(job);
+    }
+
+    if (job_should_pause(job) && !job_is_cancelled(job)) {
+        JobStatus status = job->status;
+        job_state_transition(job, status == JOB_STATUS_READY
+                                  ? JOB_STATUS_STANDBY
+                                  : JOB_STATUS_PAUSED);
+        job->paused = true;
+        job_do_yield(job, -1);
+        job->paused = false;
+        job_state_transition(job, status);
+    }
+
+    if (job->driver->resume) {
+        job->driver->resume(job);
+    }
+}
+
+void job_yield(Job *job)
+{
+    assert(job->busy);
+
+    /* Check cancellation *before* setting busy = false, too!  */
+    if (job_is_cancelled(job)) {
+        return;
+    }
+
+    if (!job_should_pause(job)) {
+        job_do_yield(job, -1);
+    }
+
+    job_pause_point(job);
+}
+
+void coroutine_fn job_sleep_ns(Job *job, int64_t ns)
+{
+    assert(job->busy);
+
+    /* Check cancellation *before* setting busy = false, too!  */
+    if (job_is_cancelled(job)) {
+        return;
+    }
+
+    if (!job_should_pause(job)) {
+        job_do_yield(job, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + ns);
+    }
+
+    job_pause_point(job);
+}
+
+void job_drain(Job *job)
+{
+    /* If job is !busy this kicks it into the next pause point. */
+    job_enter(job);
+
+    if (job->driver->drain) {
+        job->driver->drain(job);
+    }
+}
+
+
+/**
+ * All jobs must allow a pause point before entering their job proper. This
+ * ensures that jobs can be paused prior to being started, then resumed later.
+ */
+static void coroutine_fn job_co_entry(void *opaque)
+{
+    Job *job = opaque;
+
+    assert(job && job->driver && job->driver->start);
+    job_pause_point(job);
+    job->driver->start(job);
+}
+
+
+void job_start(Job *job)
+{
+    assert(job && !job_started(job) && job->paused &&
+           job->driver && job->driver->start);
+    job->co = qemu_coroutine_create(job_co_entry, job);
+    job->pause_count--;
+    job->busy = true;
+    job->paused = false;
+    job_state_transition(job, JOB_STATUS_RUNNING);
+    aio_co_enter(job->aio_context, job->co);
+}
+
+/* Assumes the block_job_mutex is held */
+static bool job_timer_not_pending(Job *job)
+{
+    return !timer_pending(&job->sleep_timer);
+}
+
+void job_pause(Job *job)
+{
+    job->pause_count++;
+}
+
+void job_resume(Job *job)
+{
+    assert(job->pause_count > 0);
+    job->pause_count--;
+    if (job->pause_count) {
+        return;
+    }
+
+    /* kick only if no timer is pending */
+    job_enter_cond(job, job_timer_not_pending);
+}
+
+void job_user_pause(Job *job, Error **errp)
+{
+    if (job_apply_verb(job, JOB_VERB_PAUSE, errp)) {
+        return;
+    }
+    if (job->user_paused) {
+        error_setg(errp, "Job is already paused");
+        return;
+    }
+    job->user_paused = true;
+    job_pause(job);
+}
+
+bool job_user_paused(Job *job)
+{
+    return job->user_paused;
+}
+
+void job_user_resume(Job *job, Error **errp)
+{
+    assert(job);
+    if (!job->user_paused || job->pause_count <= 0) {
+        error_setg(errp, "Can't resume a job that was not paused");
+        return;
+    }
+    if (job_apply_verb(job, JOB_VERB_RESUME, errp)) {
+        return;
+    }
+    if (job->driver->user_resume) {
+        job->driver->user_resume(job);
+    }
+    job->user_paused = false;
+    job_resume(job);
+}
+
+static void job_do_dismiss(Job *job)
+{
+    assert(job);
+    job->busy = false;
+    job->paused = false;
+    job->deferred_to_main_loop = true;
+
+    job_txn_del_job(job);
+
+    job_state_transition(job, JOB_STATUS_NULL);
+    job_unref(job);
+}
+
+void job_dismiss(Job **jobptr, Error **errp)
+{
+    Job *job = *jobptr;
+    /* similarly to _complete, this is QMP-interface only. */
+    assert(job->id);
+    if (job_apply_verb(job, JOB_VERB_DISMISS, errp)) {
+        return;
+    }
+
+    job_do_dismiss(job);
+    *jobptr = NULL;
+}
+
+void job_early_fail(Job *job)
+{
+    assert(job->status == JOB_STATUS_CREATED);
+    job_do_dismiss(job);
+}
+
+static void job_conclude(Job *job)
+{
+    job_state_transition(job, JOB_STATUS_CONCLUDED);
+    if (job->auto_dismiss || !job_started(job)) {
+        job_do_dismiss(job);
+    }
+}
+
+static void job_update_rc(Job *job)
+{
+    if (!job->ret && job_is_cancelled(job)) {
+        job->ret = -ECANCELED;
+    }
+    if (job->ret) {
+        job_state_transition(job, JOB_STATUS_ABORTING);
+    }
+}
+
+static void job_commit(Job *job)
+{
+    assert(!job->ret);
+    if (job->driver->commit) {
+        job->driver->commit(job);
+    }
+}
+
+static void job_abort(Job *job)
+{
+    assert(job->ret);
+    if (job->driver->abort) {
+        job->driver->abort(job);
+    }
+}
+
+static void job_clean(Job *job)
+{
+    if (job->driver->clean) {
+        job->driver->clean(job);
+    }
+}
+
+static int job_finalize_single(Job *job)
+{
+    assert(job_is_completed(job));
+
+    /* Ensure abort is called for late-transactional failures */
+    job_update_rc(job);
+
+    if (!job->ret) {
+        job_commit(job);
+    } else {
+        job_abort(job);
+    }
+    job_clean(job);
+
+    if (job->cb) {
+        job->cb(job->opaque, job->ret);
+    }
+
+    /* Emit events only if we actually started */
+    if (job_started(job)) {
+        if (job_is_cancelled(job)) {
+            job_event_cancelled(job);
+        } else {
+            job_event_completed(job);
+        }
+    }
+
+    job_txn_del_job(job);
+    job_conclude(job);
+    return 0;
+}
+
+static void job_cancel_async(Job *job, bool force)
+{
+    if (job->user_paused) {
+        /* Do not call job_enter here, the caller will handle it.  */
+        job->user_paused = false;
+        if (job->driver->user_resume) {
+            job->driver->user_resume(job);
+        }
+        assert(job->pause_count > 0);
+        job->pause_count--;
+    }
+    job->cancelled = true;
+    /* To prevent 'force == false' overriding a previous 'force == true' */
+    job->force_cancel |= force;
+}
+
+static void job_completed_txn_abort(Job *job)
+{
+    AioContext *ctx;
+    JobTxn *txn = job->txn;
+    Job *other_job;
+
+    if (txn->aborting) {
+        /*
+         * We are cancelled by another job, which will handle everything.
+         */
+        return;
+    }
+    txn->aborting = true;
+    job_txn_ref(txn);
+
+    /* We are the first failed job. Cancel other jobs. */
+    QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
+        ctx = other_job->aio_context;
+        aio_context_acquire(ctx);
+    }
+
+    /* Other jobs are effectively cancelled by us, set the status for
+     * them; this job, however, may or may not be cancelled, depending
+     * on the caller, so leave it. */
+    QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
+        if (other_job != job) {
+            job_cancel_async(other_job, false);
+        }
+    }
+    while (!QLIST_EMPTY(&txn->jobs)) {
+        other_job = QLIST_FIRST(&txn->jobs);
+        ctx = other_job->aio_context;
+        if (!job_is_completed(other_job)) {
+            assert(job_is_cancelled(other_job));
+            job_finish_sync(other_job, NULL, NULL);
+        }
+        job_finalize_single(other_job);
+        aio_context_release(ctx);
+    }
+
+    job_txn_unref(txn);
+}
+
+static int job_prepare(Job *job)
+{
+    if (job->ret == 0 && job->driver->prepare) {
+        job->ret = job->driver->prepare(job);
+    }
+    return job->ret;
+}
+
+static int job_needs_finalize(Job *job)
+{
+    return !job->auto_finalize;
+}
+
+static void job_do_finalize(Job *job)
+{
+    int rc;
+    assert(job && job->txn);
+
+    /* prepare the transaction to complete */
+    rc = job_txn_apply(job->txn, job_prepare, true);
+    if (rc) {
+        job_completed_txn_abort(job);
+    } else {
+        job_txn_apply(job->txn, job_finalize_single, true);
+    }
+}
+
+void job_finalize(Job *job, Error **errp)
+{
+    assert(job && job->id);
+    if (job_apply_verb(job, JOB_VERB_FINALIZE, errp)) {
+        return;
+    }
+    job_do_finalize(job);
+}
+
+static int job_transition_to_pending(Job *job)
+{
+    job_state_transition(job, JOB_STATUS_PENDING);
+    if (!job->auto_finalize) {
+        job_event_pending(job);
+    }
+    return 0;
+}
+
+void job_transition_to_ready(Job *job)
+{
+    job_state_transition(job, JOB_STATUS_READY);
+    job_event_ready(job);
+}
+
+static void job_completed_txn_success(Job *job)
+{
+    JobTxn *txn = job->txn;
+    Job *other_job;
+
+    job_state_transition(job, JOB_STATUS_WAITING);
+
+    /*
+     * Successful completion, see if there are other running jobs in this
+     * txn.
+     */
+    QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
+        if (!job_is_completed(other_job)) {
+            return;
+        }
+        assert(other_job->ret == 0);
+    }
+
+    job_txn_apply(txn, job_transition_to_pending, false);
+
+    /* If no jobs need manual finalization, automatically do so */
+    if (job_txn_apply(txn, job_needs_finalize, false) == 0) {
+        job_do_finalize(job);
+    }
+}
+
+void job_completed(Job *job, int ret)
+{
+    assert(job && job->txn && !job_is_completed(job));
+    job->ret = ret;
+    job_update_rc(job);
+    trace_job_completed(job, ret, job->ret);
+    if (job->ret) {
+        job_completed_txn_abort(job);
+    } else {
+        job_completed_txn_success(job);
+    }
+}
+
+void job_cancel(Job *job, bool force)
+{
+    if (job->status == JOB_STATUS_CONCLUDED) {
+        job_do_dismiss(job);
+        return;
+    }
+    job_cancel_async(job, force);
+    if (!job_started(job)) {
+        job_completed(job, -ECANCELED);
+    } else if (job->deferred_to_main_loop) {
+        job_completed_txn_abort(job);
+    } else {
+        job_enter(job);
+    }
+}
+
+void job_user_cancel(Job *job, bool force, Error **errp)
+{
+    if (job_apply_verb(job, JOB_VERB_CANCEL, errp)) {
+        return;
+    }
+    job_cancel(job, force);
+}
+
+/* A wrapper around job_cancel() taking an Error ** parameter so it may be
+ * used with job_finish_sync() without the need for (rather nasty) function
+ * pointer casts there. */
+static void job_cancel_err(Job *job, Error **errp)
+{
+    job_cancel(job, false);
+}
+
+int job_cancel_sync(Job *job)
+{
+    return job_finish_sync(job, &job_cancel_err, NULL);
+}
+
+void job_cancel_sync_all(void)
+{
+    Job *job;
+    AioContext *aio_context;
+
+    while ((job = job_next(NULL))) {
+        aio_context = job->aio_context;
+        aio_context_acquire(aio_context);
+        job_cancel_sync(job);
+        aio_context_release(aio_context);
+    }
+}
+
+int job_complete_sync(Job *job, Error **errp)
+{
+    return job_finish_sync(job, job_complete, errp);
+}
+
+void job_complete(Job *job, Error **errp)
+{
+    /* Should not be reachable via external interface for internal jobs */
+    assert(job->id);
+    if (job_apply_verb(job, JOB_VERB_COMPLETE, errp)) {
+        return;
+    }
+    if (job->pause_count || job_is_cancelled(job) || !job->driver->complete) {
+        error_setg(errp, "The active block job '%s' cannot be completed",
+                   job->id);
+        return;
+    }
+
+    job->driver->complete(job, errp);
+}
+
+
+typedef struct {
+    Job *job;
+    JobDeferToMainLoopFn *fn;
+    void *opaque;
+} JobDeferToMainLoopData;
+
+static void job_defer_to_main_loop_bh(void *opaque)
+{
+    JobDeferToMainLoopData *data = opaque;
+    Job *job = data->job;
+    AioContext *aio_context = job->aio_context;
+
+    aio_context_acquire(aio_context);
+    data->fn(data->job, data->opaque);
+    aio_context_release(aio_context);
+
+    g_free(data);
+}
+
+void job_defer_to_main_loop(Job *job, JobDeferToMainLoopFn *fn, void *opaque)
+{
+    JobDeferToMainLoopData *data = g_malloc(sizeof(*data));
+    data->job = job;
+    data->fn = fn;
+    data->opaque = opaque;
+    job->deferred_to_main_loop = true;
+
+    aio_bh_schedule_oneshot(qemu_get_aio_context(),
+                            job_defer_to_main_loop_bh, data);
+}
+
+int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp)
+{
+    Error *local_err = NULL;
+    int ret;
+
+    job_ref(job);
+
+    if (finish) {
+        finish(job, &local_err);
+    }
+    if (local_err) {
+        error_propagate(errp, local_err);
+        job_unref(job);
+        return -EBUSY;
+    }
+    /* job_drain calls job_enter, and it should be enough to induce progress
+     * until the job completes or moves to the main thread. */
+    while (!job->deferred_to_main_loop && !job_is_completed(job)) {
+        job_drain(job);
+    }
+    while (!job_is_completed(job)) {
+        aio_poll(qemu_get_aio_context(), true);
+    }
+    ret = (job_is_cancelled(job) && job->ret == 0) ? -ECANCELED : job->ret;
+    job_unref(job);
+    return ret;
+}
diff --git a/qapi/block-core.json b/qapi/block-core.json
index 55728cb823..7dfa77a05c 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -6,6 +6,7 @@
 
 { 'include': 'common.json' }
 { 'include': 'crypto.json' }
+{ 'include': 'job.json' }
 { 'include': 'sockets.json' }
 
 ##
@@ -1050,95 +1051,6 @@
   'data': ['top', 'full', 'none', 'incremental'] }
 
 ##
-# @BlockJobType:
-#
-# Type of a block job.
-#
-# @commit: block commit job type, see "block-commit"
-#
-# @stream: block stream job type, see "block-stream"
-#
-# @mirror: drive mirror job type, see "drive-mirror"
-#
-# @backup: drive backup job type, see "drive-backup"
-#
-# Since: 1.7
-##
-{ 'enum': 'BlockJobType',
-  'data': ['commit', 'stream', 'mirror', 'backup'] }
-
-##
-# @BlockJobVerb:
-#
-# Represents command verbs that can be applied to a blockjob.
-#
-# @cancel: see @block-job-cancel
-#
-# @pause: see @block-job-pause
-#
-# @resume: see @block-job-resume
-#
-# @set-speed: see @block-job-set-speed
-#
-# @complete: see @block-job-complete
-#
-# @dismiss: see @block-job-dismiss
-#
-# @finalize: see @block-job-finalize
-#
-# Since: 2.12
-##
-{ 'enum': 'BlockJobVerb',
-  'data': ['cancel', 'pause', 'resume', 'set-speed', 'complete', 'dismiss',
-           'finalize' ] }
-
-##
-# @BlockJobStatus:
-#
-# Indicates the present state of a given blockjob in its lifetime.
-#
-# @undefined: Erroneous, default state. Should not ever be visible.
-#
-# @created: The job has been created, but not yet started.
-#
-# @running: The job is currently running.
-#
-# @paused: The job is running, but paused. The pause may be requested by
-#          either the QMP user or by internal processes.
-#
-# @ready: The job is running, but is ready for the user to signal completion.
-#         This is used for long-running jobs like mirror that are designed to
-#         run indefinitely.
-#
-# @standby: The job is ready, but paused. This is nearly identical to @paused.
-#           The job may return to @ready or otherwise be canceled.
-#
-# @waiting: The job is waiting for other jobs in the transaction to converge
-#           to the waiting state. This status will likely not be visible for
-#           the last job in a transaction.
-#
-# @pending: The job has finished its work, but has finalization steps that it
-#           needs to make prior to completing. These changes may require
-#           manual intervention by the management process if manual was set
-#           to true. These changes may still fail.
-#
-# @aborting: The job is in the process of being aborted, and will finish with
-#            an error. The job will afterwards report that it is @concluded.
-#            This status may not be visible to the management process.
-#
-# @concluded: The job has finished all work. If manual was set to true, the job
-#             will remain in the query list until it is dismissed.
-#
-# @null: The job is in the process of being dismantled. This state should not
-#        ever be visible externally.
-#
-# Since: 2.12
-##
-{ 'enum': 'BlockJobStatus',
-  'data': ['undefined', 'created', 'running', 'paused', 'ready', 'standby',
-           'waiting', 'pending', 'aborting', 'concluded', 'null' ] }
-
-##
 # @BlockJobInfo:
 #
 # Information about a long-running block device operation.
@@ -1148,7 +1060,12 @@
 # @device: The job identifier. Originally the device name but other
 #          values are allowed since QEMU 2.7
 #
-# @len: the maximum progress value
+# @len: Estimated @offset value at the completion of the job. This value can
+#       arbitrarily change while the job is running, in both directions.
+#
+# @offset: Progress made until now. The unit is arbitrary and the value can
+#          only meaningfully be used for the ratio of @offset to @len. The
+#          value is monotonically increasing.
 #
 # @busy: false if the job is known to be in a quiescent state, with
 #        no pending I/O.  Since 1.3.
@@ -1156,8 +1073,6 @@
 # @paused: whether the job is paused or, if @busy is true, will
 #          pause itself as soon as possible.  Since 1.3.
 #
-# @offset: the current progress value
-#
 # @speed: the rate limit, bytes per second
 #
 # @io-status: the status of the job (since 1.3)
@@ -1181,7 +1096,7 @@
   'data': {'type': 'str', 'device': 'str', 'len': 'int',
            'offset': 'int', 'busy': 'bool', 'paused': 'bool', 'speed': 'int',
            'io-status': 'BlockDeviceIoStatus', 'ready': 'bool',
-           'status': 'BlockJobStatus',
+           'status': 'JobStatus',
            'auto-finalize': 'bool', 'auto-dismiss': 'bool',
            '*error': 'str' } }
 
@@ -2338,8 +2253,7 @@
 #
 # This command returns immediately after marking the active background block
 # operation for pausing.  It is an error to call this command if no
-# operation is in progress.  Pausing an already paused job has no cumulative
-# effect; a single block-job-resume command will resume the job.
+# operation is in progress or if the job is already paused.
 #
 # The operation will pause as soon as possible.  No event is emitted when
 # the operation is actually paused.  Cancelling a paused job automatically
@@ -2363,7 +2277,7 @@
 #
 # This command returns immediately after resuming a paused background block
 # operation.  It is an error to call this command if no operation is in
-# progress.  Resuming an already running job is not an error.
+# progress or if the job is not paused.
 #
 # This command also clears the error status of the job.
 #
@@ -2414,7 +2328,7 @@
 # QEMU 2.12+ job lifetime management semantics.
 #
 # This command will refuse to operate on any job that has not yet reached
-# its terminal state, BLOCK_JOB_STATUS_CONCLUDED. For jobs that make use of
+# its terminal state, JOB_STATUS_CONCLUDED. For jobs that make use of the
 # BLOCK_JOB_READY event, block-job-cancel or block-job-complete will still need
 # to be used as appropriate.
 #
@@ -4497,7 +4411,7 @@
 #
 ##
 { 'event': 'BLOCK_JOB_COMPLETED',
-  'data': { 'type'  : 'BlockJobType',
+  'data': { 'type'  : 'JobType',
             'device': 'str',
             'len'   : 'int',
             'offset': 'int',
@@ -4533,7 +4447,7 @@
 #
 ##
 { 'event': 'BLOCK_JOB_CANCELLED',
-  'data': { 'type'  : 'BlockJobType',
+  'data': { 'type'  : 'JobType',
             'device': 'str',
             'len'   : 'int',
             'offset': 'int',
@@ -4598,7 +4512,7 @@
 #
 ##
 { 'event': 'BLOCK_JOB_READY',
-  'data': { 'type'  : 'BlockJobType',
+  'data': { 'type'  : 'JobType',
             'device': 'str',
             'len'   : 'int',
             'offset': 'int',
@@ -4625,7 +4539,7 @@
 #
 ##
 { 'event': 'BLOCK_JOB_PENDING',
-  'data': { 'type'  : 'BlockJobType',
+  'data': { 'type'  : 'JobType',
             'id'    : 'str' } }
 
 ##
diff --git a/qapi/job.json b/qapi/job.json
new file mode 100644
index 0000000000..970124de76
--- /dev/null
+++ b/qapi/job.json
@@ -0,0 +1,253 @@
+# -*- Mode: Python -*-
+
+##
+# == Background jobs
+##
+
+##
+# @JobType:
+#
+# Type of a background job.
+#
+# @commit: block commit job type, see "block-commit"
+#
+# @stream: block stream job type, see "block-stream"
+#
+# @mirror: drive mirror job type, see "drive-mirror"
+#
+# @backup: drive backup job type, see "drive-backup"
+#
+# Since: 1.7
+##
+{ 'enum': 'JobType',
+  'data': ['commit', 'stream', 'mirror', 'backup'] }
+
+##
+# @JobStatus:
+#
+# Indicates the present state of a given job in its lifetime.
+#
+# @undefined: Erroneous, default state. Should not ever be visible.
+#
+# @created: The job has been created, but not yet started.
+#
+# @running: The job is currently running.
+#
+# @paused: The job is running, but paused. The pause may be requested by
+#          either the QMP user or by internal processes.
+#
+# @ready: The job is running, but is ready for the user to signal completion.
+#         This is used for long-running jobs like mirror that are designed to
+#         run indefinitely.
+#
+# @standby: The job is ready, but paused. This is nearly identical to @paused.
+#           The job may return to @ready or otherwise be canceled.
+#
+# @waiting: The job is waiting for other jobs in the transaction to converge
+#           to the waiting state. This status will likely not be visible for
+#           the last job in a transaction.
+#
+# @pending: The job has finished its work, but has finalization steps that it
+#           needs to make prior to completing. These changes may require
+#           manual intervention by the management process if manual was set
+#           to true. These changes may still fail.
+#
+# @aborting: The job is in the process of being aborted, and will finish with
+#            an error. The job will afterwards report that it is @concluded.
+#            This status may not be visible to the management process.
+#
+# @concluded: The job has finished all work. If manual was set to true, the job
+#             will remain in the query list until it is dismissed.
+#
+# @null: The job is in the process of being dismantled. This state should not
+#        ever be visible externally.
+#
+# Since: 2.12
+##
+{ 'enum': 'JobStatus',
+  'data': ['undefined', 'created', 'running', 'paused', 'ready', 'standby',
+           'waiting', 'pending', 'aborting', 'concluded', 'null' ] }
+
+##
+# @JobVerb:
+#
+# Represents command verbs that can be applied to a job.
+#
+# @cancel: see @block-job-cancel
+#
+# @pause: see @block-job-pause
+#
+# @resume: see @block-job-resume
+#
+# @set-speed: see @block-job-set-speed
+#
+# @complete: see @block-job-complete
+#
+# @dismiss: see @block-job-dismiss
+#
+# @finalize: see @block-job-finalize
+#
+# Since: 2.12
+##
+{ 'enum': 'JobVerb',
+  'data': ['cancel', 'pause', 'resume', 'set-speed', 'complete', 'dismiss',
+           'finalize' ] }
+
+##
+# @JOB_STATUS_CHANGE:
+#
+# Emitted when a job transitions to a different status.
+#
+# @id: The job identifier
+# @status: The new job status
+#
+# Since: 2.13
+##
+{ 'event': 'JOB_STATUS_CHANGE',
+  'data': { 'id': 'str',
+            'status': 'JobStatus' } }
+
+##
+# @job-pause:
+#
+# Pause an active job.
+#
+# This command returns immediately after marking the active job for pausing.
+# Pausing an already paused job is an error.
+#
+# The job will pause as soon as possible, which means transitioning into the
+# PAUSED state if it was RUNNING, or into STANDBY if it was READY. The
+# corresponding JOB_STATUS_CHANGE event will be emitted.
+#
+# Cancelling a paused job automatically resumes it.
+#
+# @id: The job identifier.
+#
+# Since: 2.13
+##
+{ 'command': 'job-pause', 'data': { 'id': 'str' } }
+
+##
+# @job-resume:
+#
+# Resume a paused job.
+#
+# This command returns immediately after resuming a paused job. Resuming an
+# already running job is an error.
+#
+# @id : The job identifier.
+#
+# Since: 2.13
+##
+{ 'command': 'job-resume', 'data': { 'id': 'str' } }
+
+##
+# @job-cancel:
+#
+# Instruct an active background job to cancel at the next opportunity.
+# This command returns immediately after marking the active job for
+# cancellation.
+#
+# The job will cancel as soon as possible and then emit a JOB_STATUS_CHANGE
+# event. Usually, the status will change to ABORTING, but it is possible that
+# a job successfully completes (e.g. because it was almost done and there was
+# no opportunity to cancel earlier than completing the job) and transitions to
+# PENDING instead.
+#
+# @id: The job identifier.
+#
+# Since: 2.13
+##
+{ 'command': 'job-cancel', 'data': { 'id': 'str' } }
+
+
+##
+# @job-complete:
+#
+# Manually trigger completion of an active job in the READY state.
+#
+# @id: The job identifier.
+#
+# Since: 2.13
+##
+{ 'command': 'job-complete', 'data': { 'id': 'str' } }
+
+##
+# @job-dismiss:
+#
+# Deletes a job that is in the CONCLUDED state. This command only needs to be
+# run explicitly for jobs that don't have automatic dismiss enabled.
+#
+# This command will refuse to operate on any job that has not yet reached its
+# terminal state, JOB_STATUS_CONCLUDED. For jobs that make use of JOB_READY
+# event, job-cancel or job-complete will still need to be used as appropriate.
+#
+# @id: The job identifier.
+#
+# Since: 2.13
+##
+{ 'command': 'job-dismiss', 'data': { 'id': 'str' } }
+
+##
+# @job-finalize:
+#
+# Instructs all jobs in a transaction (or a single job if it is not part of any
+# transaction) to finalize any graph changes and do any necessary cleanup. This
+# command requires that all involved jobs are in the PENDING state.
+#
+# For jobs in a transaction, instructing one job to finalize will force
+# ALL jobs in the transaction to finalize, so it is only necessary to instruct
+# a single member job to finalize.
+#
+# @id: The identifier of any job in the transaction, or of a job that is not
+#      part of any transaction.
+#
+# Since: 2.13
+##
+{ 'command': 'job-finalize', 'data': { 'id': 'str' } }
+
+##
+# @JobInfo:
+#
+# Information about a job.
+#
+# @id:                  The job identifier
+#
+# @type:                The kind of job that is being performed
+#
+# @status:              Current job state/status
+#
+# @current-progress:    Progress made until now. The unit is arbitrary and the
+#                       value can only meaningfully be used for the ratio of
+#                       @current-progress to @total-progress. The value is
+#                       monotonically increasing.
+#
+# @total-progress:      Estimated @current-progress value at the completion of
+#                       the job. This value can arbitrarily change while the
+#                       job is running, in both directions.
+#
+# @error:               If this field is present, the job failed; if it is
+#                       still missing in the CONCLUDED state, this indicates
+#                       successful completion.
+#
+#                       The value is a human-readable error message to describe
+#                       the reason for the job failure. It should not be parsed
+#                       by applications.
+#
+# Since: 2.13
+##
+{ 'struct': 'JobInfo',
+  'data': { 'id': 'str', 'type': 'JobType', 'status': 'JobStatus',
+            'current-progress': 'int', 'total-progress': 'int',
+            '*error': 'str' } }
+
+##
+# @query-jobs:
+#
+# Return information about jobs.
+#
+# Returns: a list with a @JobInfo for each active job
+#
+# Since: 2.13
+##
+{ 'command': 'query-jobs', 'returns': ['JobInfo'] }
diff --git a/qapi/qapi-schema.json b/qapi/qapi-schema.json
index 25bce78352..65b6dc2f6f 100644
--- a/qapi/qapi-schema.json
+++ b/qapi/qapi-schema.json
@@ -84,6 +84,7 @@
 { 'include': 'crypto.json' }
 { 'include': 'block.json' }
 { 'include': 'char.json' }
+{ 'include': 'job.json' }
 { 'include': 'net.json' }
 { 'include': 'rocker.json' }
 { 'include': 'tpm.json' }
diff --git a/qemu-img.c b/qemu-img.c
index 2b5a5706b6..976b437da0 100644
--- a/qemu-img.c
+++ b/qemu-img.c
@@ -861,19 +861,23 @@ static void run_block_job(BlockJob *job, Error **errp)
     int ret = 0;
 
     aio_context_acquire(aio_context);
-    block_job_ref(job);
+    job_ref(&job->job);
     do {
+        float progress = 0.0f;
         aio_poll(aio_context, true);
-        qemu_progress_print(job->len ?
-                            ((float)job->offset / job->len * 100.f) : 0.0f, 0);
-    } while (!job->ready && !job->completed);
+        if (job->job.progress_total) {
+            progress = (float)job->job.progress_current /
+                       job->job.progress_total * 100.f;
+        }
+        qemu_progress_print(progress, 0);
+    } while (!job_is_ready(&job->job) && !job_is_completed(&job->job));
 
-    if (!job->completed) {
-        ret = block_job_complete_sync(job, errp);
+    if (!job_is_completed(&job->job)) {
+        ret = job_complete_sync(&job->job, errp);
     } else {
-        ret = job->ret;
+        ret = job->job.ret;
     }
-    block_job_unref(job);
+    job_unref(&job->job);
     aio_context_release(aio_context);
 
     /* publish completion progress only when success */
@@ -1014,7 +1018,7 @@ static int img_commit(int argc, char **argv)
 
     aio_context = bdrv_get_aio_context(bs);
     aio_context_acquire(aio_context);
-    commit_active_start("commit", bs, base_bs, BLOCK_JOB_DEFAULT, 0,
+    commit_active_start("commit", bs, base_bs, JOB_DEFAULT, 0,
                         BLOCKDEV_ON_ERROR_REPORT, NULL, common_block_job_cb,
                         &cbi, false, &local_err);
     aio_context_release(aio_context);
diff --git a/qemu-nbd.c b/qemu-nbd.c
index 0af0560ad1..51b9d38c72 100644
--- a/qemu-nbd.c
+++ b/qemu-nbd.c
@@ -482,6 +482,12 @@ static const char *socket_activation_validate_opts(const char *device,
     return NULL;
 }
 
+static void qemu_nbd_shutdown(void)
+{
+    job_cancel_sync_all();
+    bdrv_close_all();
+}
+
 int main(int argc, char **argv)
 {
     BlockBackend *blk;
@@ -928,7 +934,7 @@ int main(int argc, char **argv)
         exit(EXIT_FAILURE);
     }
     bdrv_init();
-    atexit(bdrv_close_all);
+    atexit(qemu_nbd_shutdown);
 
     srcpath = argv[optind];
     if (imageOpts) {
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index cb1b652388..e3d8c2cdfc 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -271,7 +271,7 @@ our @typeList = (
 	qr{hwaddr},
         # external libraries
 	qr{xml${Ident}},
-	qr{xendevicemodel_handle},
+	qr{xen\w+_handle},
 	# Glib definitions
 	qr{gchar},
 	qr{gshort},
diff --git a/target/lm32/op_helper.c b/target/lm32/op_helper.c
index 577f8306e3..234d55e056 100644
--- a/target/lm32/op_helper.c
+++ b/target/lm32/op_helper.c
@@ -102,12 +102,16 @@ void HELPER(wcsr_dc)(CPULM32State *env, uint32_t dc)
 
 void HELPER(wcsr_im)(CPULM32State *env, uint32_t im)
 {
+    qemu_mutex_lock_iothread();
     lm32_pic_set_im(env->pic_state, im);
+    qemu_mutex_unlock_iothread();
 }
 
 void HELPER(wcsr_ip)(CPULM32State *env, uint32_t im)
 {
+    qemu_mutex_lock_iothread();
     lm32_pic_set_ip(env->pic_state, im);
+    qemu_mutex_unlock_iothread();
 }
 
 void HELPER(wcsr_jtx)(CPULM32State *env, uint32_t jtx)
diff --git a/tests/qemu-iotests/030 b/tests/qemu-iotests/030
index 640a6dfd10..1dbc2ddc49 100755
--- a/tests/qemu-iotests/030
+++ b/tests/qemu-iotests/030
@@ -304,8 +304,7 @@ class TestParallelOps(iotests.QMPTestCase):
         result = self.vm.qmp('block-stream', device='node5', base=self.imgs[3], job_id='stream-node6')
         self.assert_qmp(result, 'error/class', 'GenericError')
 
-        event = self.vm.get_qmp_event(wait=True)
-        self.assertEqual(event['event'], 'BLOCK_JOB_READY')
+        event = self.vm.event_wait(name='BLOCK_JOB_READY')
         self.assert_qmp(event, 'data/device', 'commit-drive0')
         self.assert_qmp(event, 'data/type', 'commit')
         self.assert_qmp_absent(event, 'data/error')
@@ -565,6 +564,8 @@ class TestEIO(TestErrors):
                     self.assert_qmp(event, 'data/offset', self.STREAM_BUFFER_SIZE)
                     self.assert_qmp(event, 'data/len', self.image_len)
                     completed = True
+                elif event['event'] == 'JOB_STATUS_CHANGE':
+                    self.assert_qmp(event, 'data/id', 'drive0')
 
         self.assert_no_active_block_jobs()
         self.vm.shutdown()
@@ -596,6 +597,8 @@ class TestEIO(TestErrors):
                     self.assert_qmp(event, 'data/offset', self.image_len)
                     self.assert_qmp(event, 'data/len', self.image_len)
                     completed = True
+                elif event['event'] == 'JOB_STATUS_CHANGE':
+                    self.assert_qmp(event, 'data/id', 'drive0')
 
         self.assert_no_active_block_jobs()
         self.vm.shutdown()
@@ -637,6 +640,8 @@ class TestEIO(TestErrors):
                     self.assert_qmp(event, 'data/offset', self.image_len)
                     self.assert_qmp(event, 'data/len', self.image_len)
                     completed = True
+                elif event['event'] == 'JOB_STATUS_CHANGE':
+                    self.assert_qmp(event, 'data/id', 'drive0')
 
         self.assert_no_active_block_jobs()
         self.vm.shutdown()
@@ -663,6 +668,8 @@ class TestEIO(TestErrors):
                     self.assert_qmp(event, 'data/offset', self.STREAM_BUFFER_SIZE)
                     self.assert_qmp(event, 'data/len', self.image_len)
                     completed = True
+                elif event['event'] == 'JOB_STATUS_CHANGE':
+                    self.assert_qmp(event, 'data/id', 'drive0')
 
         self.assert_no_active_block_jobs()
         self.vm.shutdown()
@@ -722,6 +729,8 @@ class TestENOSPC(TestErrors):
                     self.assert_qmp(event, 'data/offset', self.image_len)
                     self.assert_qmp(event, 'data/len', self.image_len)
                     completed = True
+                elif event['event'] == 'JOB_STATUS_CHANGE':
+                    self.assert_qmp(event, 'data/id', 'drive0')
 
         self.assert_no_active_block_jobs()
         self.vm.shutdown()
@@ -751,7 +760,9 @@ class TestStreamStop(iotests.QMPTestCase):
 
         time.sleep(0.1)
         events = self.vm.get_qmp_events(wait=False)
-        self.assertEqual(events, [], 'unexpected QMP event: %s' % events)
+        for e in events:
+            self.assert_qmp(e, 'event', 'JOB_STATUS_CHANGE')
+            self.assert_qmp(e, 'data/id', 'drive0')
 
         self.cancel_and_wait(resume=True)
 
diff --git a/tests/qemu-iotests/040 b/tests/qemu-iotests/040
index 90b5b4f2ad..1beb5e6dab 100755
--- a/tests/qemu-iotests/040
+++ b/tests/qemu-iotests/040
@@ -162,6 +162,8 @@ class TestSingleDrive(ImageCommitTestCase):
                 elif event['event'] == 'BLOCK_JOB_CANCELLED':
                     self.assert_qmp(event, 'data/device', 'drive0')
                     cancelled = True
+                elif event['event'] == 'JOB_STATUS_CHANGE':
+                    self.assert_qmp(event, 'data/id', 'drive0')
                 else:
                     self.fail("Unexpected event %s" % (event['event']))
 
diff --git a/tests/qemu-iotests/041 b/tests/qemu-iotests/041
index a860a31e9a..c20ac7da87 100755
--- a/tests/qemu-iotests/041
+++ b/tests/qemu-iotests/041
@@ -445,6 +445,8 @@ new_state = "1"
                     self.assert_qmp(event, 'data/device', 'drive0')
                     self.assert_qmp(event, 'data/error', 'Input/output error')
                     completed = True
+                elif event['event'] == 'JOB_STATUS_CHANGE':
+                    self.assert_qmp(event, 'data/id', 'drive0')
 
         self.assert_no_active_block_jobs()
         self.vm.shutdown()
@@ -457,6 +459,10 @@ new_state = "1"
         self.assert_qmp(result, 'return', {})
 
         event = self.vm.get_qmp_event(wait=True)
+        while event['event'] == 'JOB_STATUS_CHANGE':
+            self.assert_qmp(event, 'data/id', 'drive0')
+            event = self.vm.get_qmp_event(wait=True)
+
         self.assertEquals(event['event'], 'BLOCK_JOB_ERROR')
         self.assert_qmp(event, 'data/device', 'drive0')
         self.assert_qmp(event, 'data/operation', 'read')
@@ -478,6 +484,10 @@ new_state = "1"
         self.assert_qmp(result, 'return', {})
 
         event = self.vm.get_qmp_event(wait=True)
+        while event['event'] == 'JOB_STATUS_CHANGE':
+            self.assert_qmp(event, 'data/id', 'drive0')
+            event = self.vm.get_qmp_event(wait=True)
+
         self.assertEquals(event['event'], 'BLOCK_JOB_ERROR')
         self.assert_qmp(event, 'data/device', 'drive0')
         self.assert_qmp(event, 'data/operation', 'read')
@@ -608,7 +618,7 @@ new_state = "1"
                              on_target_error='ignore')
         self.assert_qmp(result, 'return', {})
 
-        event = self.vm.get_qmp_event(wait=True)
+        event = self.vm.event_wait(name='BLOCK_JOB_ERROR')
         self.assertEquals(event['event'], 'BLOCK_JOB_ERROR')
         self.assert_qmp(event, 'data/device', 'drive0')
         self.assert_qmp(event, 'data/operation', 'write')
@@ -784,7 +794,12 @@ class TestGranularity(iotests.QMPTestCase):
                              sync='full', target=target_img,
                              mode='absolute-paths', granularity=8192)
         self.assert_qmp(result, 'return', {})
+
         event = self.vm.get_qmp_event(wait=60.0)
+        while event['event'] == 'JOB_STATUS_CHANGE':
+            self.assert_qmp(event, 'data/id', 'drive0')
+            event = self.vm.get_qmp_event(wait=60.0)
+
         # Failures will manifest as COMPLETED/ERROR.
         self.assert_qmp(event, 'event', 'BLOCK_JOB_READY')
         self.complete_and_wait(drive='drive0', wait_ready=False)
@@ -1015,9 +1030,9 @@ class TestOrphanedSource(iotests.QMPTestCase):
                  'read-only': 'on' }
 
         self.vm = iotests.VM()
-        self.vm.add_blockdev(self.qmp_to_opts(blk0))
-        self.vm.add_blockdev(self.qmp_to_opts(blk1))
-        self.vm.add_blockdev(self.qmp_to_opts(blk2))
+        self.vm.add_blockdev(self.vm.qmp_to_opts(blk0))
+        self.vm.add_blockdev(self.vm.qmp_to_opts(blk1))
+        self.vm.add_blockdev(self.vm.qmp_to_opts(blk2))
         self.vm.launch()
 
     def tearDown(self):
diff --git a/tests/qemu-iotests/086 b/tests/qemu-iotests/086
index cd4494a660..84e3835071 100755
--- a/tests/qemu-iotests/086
+++ b/tests/qemu-iotests/086
@@ -38,7 +38,7 @@ trap "_cleanup; exit \$status" 0 1 2 3 15
 . ./common.filter
 
 _supported_fmt qcow2 raw
-_supported_proto file nfs
+_supported_proto file
 _supported_os Linux
 
 function run_qemu_img()
diff --git a/tests/qemu-iotests/094.out b/tests/qemu-iotests/094.out
index f52baffe70..665b630b08 100644
--- a/tests/qemu-iotests/094.out
+++ b/tests/qemu-iotests/094.out
@@ -2,10 +2,17 @@ QA output created by 094
 Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
 Formatting 'TEST_DIR/source.IMGFMT', fmt=IMGFMT size=67108864
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 67108864, "offset": 67108864, "speed": 0, "type": "mirror"}}
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 67108864, "offset": 67108864, "speed": 0, "type": "mirror"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
 *** done
diff --git a/tests/qemu-iotests/095 b/tests/qemu-iotests/095
index 030adb22e1..72ecc22e1b 100755
--- a/tests/qemu-iotests/095
+++ b/tests/qemu-iotests/095
@@ -72,7 +72,7 @@ _send_qemu_cmd $h "{ 'execute': 'qmp_capabilities' }" "return"
 
 _send_qemu_cmd $h "{ 'execute': 'block-commit',
                                  'arguments': { 'device': 'test',
-                                 'top': '"${TEST_IMG}.snp1"' } }" "BLOCK_JOB_COMPLETED"
+                                 'top': '"${TEST_IMG}.snp1"' } }" '"status": "null"'
 
 _cleanup_qemu
 
diff --git a/tests/qemu-iotests/095.out b/tests/qemu-iotests/095.out
index 73875cab40..8c093dfff3 100644
--- a/tests/qemu-iotests/095.out
+++ b/tests/qemu-iotests/095.out
@@ -11,8 +11,14 @@ virtual size: 5.0M (5242880 bytes)
 === Running QEMU Live Commit Test ===
 
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "test"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "test"}}
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "test"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "test"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "test", "len": 104857600, "offset": 104857600, "speed": 0, "type": "commit"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "test"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "test"}}
 
 === Base image info after commit and resize ===
 image: TEST_DIR/t.IMGFMT.base
diff --git a/tests/qemu-iotests/109 b/tests/qemu-iotests/109
index d70b574d88..acbd079136 100755
--- a/tests/qemu-iotests/109
+++ b/tests/qemu-iotests/109
@@ -64,7 +64,7 @@ function run_qemu()
 
     _send_qemu_cmd $QEMU_HANDLE '' "$qmp_event"
     if test "$qmp_event" = BLOCK_JOB_ERROR; then
-        _send_qemu_cmd $QEMU_HANDLE '' "BLOCK_JOB_COMPLETED"
+        _send_qemu_cmd $QEMU_HANDLE '' '"status": "null"'
     fi
     _send_qemu_cmd $QEMU_HANDLE '{"execute":"query-block-jobs"}' "return"
     _send_qemu_cmd $QEMU_HANDLE '{"execute":"quit"}' "return"
diff --git a/tests/qemu-iotests/109.out b/tests/qemu-iotests/109.out
index 8a9b93672b..ad0ee6fb48 100644
--- a/tests/qemu-iotests/109.out
+++ b/tests/qemu-iotests/109.out
@@ -6,23 +6,35 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
 Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864
 {"return": {}}
 WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
-Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
-Specify the 'raw' format explicitly to remove the restrictions.
+         Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
+         Specify the 'raw' format explicitly to remove the restrictions.
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": 0, "speed": 0, "type": "mirror", "error": "Operation not permitted"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
 {"return": []}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
 read 65536/65536 bytes at offset 0
 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}}
 {"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 1024, "offset": 1024, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
 Warning: Image size mismatch!
 Images are identical.
 
@@ -32,23 +44,35 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
 Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864
 {"return": {}}
 WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
-Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
-Specify the 'raw' format explicitly to remove the restrictions.
+         Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
+         Specify the 'raw' format explicitly to remove the restrictions.
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": 512, "speed": 0, "type": "mirror", "error": "Operation not permitted"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
 {"return": []}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
 read 65536/65536 bytes at offset 0
 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 197120, "offset": 197120, "speed": 0, "type": "mirror"}}
 {"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 197120, "offset": 197120, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 197120, "offset": 197120, "speed": 0, "type": "mirror"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
 Warning: Image size mismatch!
 Images are identical.
 
@@ -58,23 +82,35 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
 Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864
 {"return": {}}
 WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
-Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
-Specify the 'raw' format explicitly to remove the restrictions.
+         Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
+         Specify the 'raw' format explicitly to remove the restrictions.
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": 262144, "speed": 0, "type": "mirror", "error": "Operation not permitted"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
 {"return": []}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
 read 65536/65536 bytes at offset 0
 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}}
 {"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 327680, "offset": 327680, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
 Warning: Image size mismatch!
 Images are identical.
 
@@ -84,23 +120,35 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
 Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864
 {"return": {}}
 WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
-Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
-Specify the 'raw' format explicitly to remove the restrictions.
+         Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
+         Specify the 'raw' format explicitly to remove the restrictions.
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": 0, "speed": 0, "type": "mirror", "error": "Operation not permitted"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
 {"return": []}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
 read 65536/65536 bytes at offset 0
 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}}
 {"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 1024, "offset": 1024, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
 Warning: Image size mismatch!
 Images are identical.
 
@@ -110,23 +158,35 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
 Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864
 {"return": {}}
 WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
-Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
-Specify the 'raw' format explicitly to remove the restrictions.
+         Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
+         Specify the 'raw' format explicitly to remove the restrictions.
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": 0, "speed": 0, "type": "mirror", "error": "Operation not permitted"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
 {"return": []}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
 read 65536/65536 bytes at offset 0
 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}}
 {"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 65536, "offset": 65536, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
 Warning: Image size mismatch!
 Images are identical.
 
@@ -136,23 +196,35 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
 Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864
 {"return": {}}
 WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
-Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
-Specify the 'raw' format explicitly to remove the restrictions.
+         Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
+         Specify the 'raw' format explicitly to remove the restrictions.
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": 0, "speed": 0, "type": "mirror", "error": "Operation not permitted"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
 {"return": []}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
 read 65536/65536 bytes at offset 0
 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}}
 {"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 2560, "offset": 2560, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
 Warning: Image size mismatch!
 Images are identical.
 
@@ -161,23 +233,35 @@ Images are identical.
 Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
 {"return": {}}
 WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
-Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
-Specify the 'raw' format explicitly to remove the restrictions.
+         Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
+         Specify the 'raw' format explicitly to remove the restrictions.
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror", "error": "Operation not permitted"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
 {"return": []}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
 read 65536/65536 bytes at offset 0
 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}}
 {"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 2560, "offset": 2560, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
 Warning: Image size mismatch!
 Images are identical.
 
@@ -186,23 +270,35 @@ Images are identical.
 Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
 {"return": {}}
 WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
-Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
-Specify the 'raw' format explicitly to remove the restrictions.
+         Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
+         Specify the 'raw' format explicitly to remove the restrictions.
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror", "error": "Operation not permitted"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
 {"return": []}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
 read 65536/65536 bytes at offset 0
 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 31457280, "offset": 31457280, "speed": 0, "type": "mirror"}}
 {"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 31457280, "offset": 31457280, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 31457280, "offset": 31457280, "speed": 0, "type": "mirror"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
 Warning: Image size mismatch!
 Images are identical.
 
@@ -211,23 +307,35 @@ Images are identical.
 Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
 {"return": {}}
 WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
-Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
-Specify the 'raw' format explicitly to remove the restrictions.
+         Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
+         Specify the 'raw' format explicitly to remove the restrictions.
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror", "error": "Operation not permitted"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
 {"return": []}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
 read 65536/65536 bytes at offset 0
 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}}
 {"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 327680, "offset": 327680, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
 Warning: Image size mismatch!
 Images are identical.
 
@@ -236,23 +344,35 @@ Images are identical.
 Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
 {"return": {}}
 WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
-Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
-Specify the 'raw' format explicitly to remove the restrictions.
+         Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
+         Specify the 'raw' format explicitly to remove the restrictions.
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror", "error": "Operation not permitted"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
 {"return": []}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
 read 65536/65536 bytes at offset 0
 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 2048, "offset": 2048, "speed": 0, "type": "mirror"}}
 {"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 2048, "offset": 2048, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2048, "offset": 2048, "speed": 0, "type": "mirror"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
 Warning: Image size mismatch!
 Images are identical.
 
@@ -261,23 +381,37 @@ Images are identical.
 Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
 {"return": {}}
 WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
-Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
-Specify the 'raw' format explicitly to remove the restrictions.
+         Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
+         Specify the 'raw' format explicitly to remove the restrictions.
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}}
 {"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 512, "offset": 512, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
 Warning: Image size mismatch!
 Images are identical.
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}}
 {"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 512, "offset": 512, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
 Warning: Image size mismatch!
 Images are identical.
 *** done
diff --git a/tests/qemu-iotests/124 b/tests/qemu-iotests/124
index 8e76e62f93..3ea4ac53f5 100755
--- a/tests/qemu-iotests/124
+++ b/tests/qemu-iotests/124
@@ -151,10 +151,17 @@ class TestIncrementalBackupBase(iotests.QMPTestCase):
         return self.wait_qmp_backup(kwargs['device'], error)
 
 
+    def ignore_job_status_change_events(self):
+        while True:
+            e = self.vm.event_wait(name="JOB_STATUS_CHANGE")
+            if e['data']['status'] == 'null':
+                break
+
     def wait_qmp_backup(self, device, error='Input/output error'):
         event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED",
                                    match={'data': {'device': device}})
         self.assertNotEqual(event, None)
+        self.ignore_job_status_change_events()
 
         try:
             failure = self.dictpath(event, 'data/error')
@@ -172,6 +179,7 @@ class TestIncrementalBackupBase(iotests.QMPTestCase):
         event = self.vm.event_wait(name='BLOCK_JOB_CANCELLED',
                                    match={'data': {'device': device}})
         self.assertNotEqual(event, None)
+        self.ignore_job_status_change_events()
 
 
     def create_anchor_backup(self, drive=None):
diff --git a/tests/qemu-iotests/126.out b/tests/qemu-iotests/126.out
index 50d73080fa..17d03d5248 100644
--- a/tests/qemu-iotests/126.out
+++ b/tests/qemu-iotests/126.out
@@ -3,7 +3,7 @@ QA output created by 126
 === Testing plain files ===
 
 Formatting 'TEST_DIR/a:b.IMGFMT', fmt=IMGFMT size=67108864
-Formatting 'TEST_DIR/a:b.IMGFMT', fmt=IMGFMT size=67108864
+Formatting 'file:TEST_DIR/a:b.IMGFMT', fmt=IMGFMT size=67108864
 
 === Testing relative backing filename resolution ===
 
diff --git a/tests/qemu-iotests/127.out b/tests/qemu-iotests/127.out
index 543d075005..83b522d4c2 100644
--- a/tests/qemu-iotests/127.out
+++ b/tests/qemu-iotests/127.out
@@ -5,10 +5,17 @@ Formatting 'TEST_DIR/t.IMGFMT.overlay1', fmt=IMGFMT size=65536 backing_file=TEST
 wrote 42/42 bytes at offset 0
 42 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "mirror"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "mirror"}}
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "mirror"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "mirror", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}}
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "mirror"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "mirror"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "mirror", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "mirror"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "mirror"}}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
 *** done
diff --git a/tests/qemu-iotests/141 b/tests/qemu-iotests/141
index 2f9d7b9bc2..4246d387a1 100755
--- a/tests/qemu-iotests/141
+++ b/tests/qemu-iotests/141
@@ -107,7 +107,7 @@ test_blockjob \
                     'format': '$IMGFMT',
                     'sync': 'none'}}" \
     'return' \
-    'BLOCK_JOB_CANCELLED'
+    '"status": "null"'
 
 echo
 echo '=== Testing drive-mirror ==='
@@ -124,7 +124,7 @@ test_blockjob \
                     'format': '$IMGFMT',
                     'sync': 'none'}}" \
     'BLOCK_JOB_READY' \
-    'BLOCK_JOB_COMPLETED'
+    '"status": "null"'
 
 echo
 echo '=== Testing active block-commit ==='
@@ -138,7 +138,7 @@ test_blockjob \
     "{'execute': 'block-commit',
       'arguments': {'job-id': 'job0', 'device': 'drv0'}}" \
     'BLOCK_JOB_READY' \
-    'BLOCK_JOB_COMPLETED'
+    '"status": "null"'
 
 echo
 echo '=== Testing non-active block-commit ==='
@@ -157,7 +157,7 @@ test_blockjob \
                     'top':    '$TEST_DIR/m.$IMGFMT',
                     'speed':  1}}" \
     'return' \
-    'BLOCK_JOB_CANCELLED'
+    '"status": "null"'
 
 echo
 echo '=== Testing block-stream ==='
@@ -170,8 +170,7 @@ echo
 $QEMU_IO -c 'write 0 1M' "$TEST_DIR/b.$IMGFMT" | _filter_qemu_io
 
 # With some data to stream (and @speed set to 1), block-stream will not complete
-# until we send the block-job-cancel command. Therefore, no event other than
-# BLOCK_JOB_CANCELLED will be emitted.
+# until we send the block-job-cancel command.
 
 test_blockjob \
     "{'execute': 'block-stream',
@@ -179,7 +178,7 @@ test_blockjob \
                     'device': 'drv0',
                     'speed': 1}}" \
     'return' \
-    'BLOCK_JOB_CANCELLED'
+    '"status": "null"'
 
 _cleanup_qemu
 
diff --git a/tests/qemu-iotests/141.out b/tests/qemu-iotests/141.out
index 82e763b68d..f252c86875 100644
--- a/tests/qemu-iotests/141.out
+++ b/tests/qemu-iotests/141.out
@@ -8,31 +8,50 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/m.
 
 {"return": {}}
 Formatting 'TEST_DIR/o.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.IMGFMT backing_fmt=IMGFMT
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
 {"return": {}}
 {"error": {"class": "GenericError", "desc": "Node drv0 is in use"}}
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "job0"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "job0", "len": 1048576, "offset": 0, "speed": 0, "type": "backup"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job0"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job0"}}
 {"return": {}}
 
 === Testing drive-mirror ===
 
 {"return": {}}
 Formatting 'TEST_DIR/o.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.IMGFMT backing_fmt=IMGFMT
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "job0"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job0", "len": 0, "offset": 0, "speed": 0, "type": "mirror"}}
 {"return": {}}
 {"error": {"class": "GenericError", "desc": "Node 'drv0' is busy: node is used as backing hd of 'NODE_NAME'"}}
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job0"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job0"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job0", "len": 0, "offset": 0, "speed": 0, "type": "mirror"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job0"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job0"}}
 {"return": {}}
 
 === Testing active block-commit ===
 
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "job0"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job0", "len": 0, "offset": 0, "speed": 0, "type": "commit"}}
 {"return": {}}
 {"error": {"class": "GenericError", "desc": "Node 'drv0' is busy: node is used as backing hd of 'NODE_NAME'"}}
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job0"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job0"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job0", "len": 0, "offset": 0, "speed": 0, "type": "commit"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job0"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job0"}}
 {"return": {}}
 
 === Testing non-active block-commit ===
@@ -40,10 +59,15 @@ Formatting 'TEST_DIR/o.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.
 wrote 1048576/1048576 bytes at offset 0
 1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
 {"return": {}}
 {"error": {"class": "GenericError", "desc": "Node drv0 is in use"}}
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "job0"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "job0", "len": 1048576, "offset": 524288, "speed": 1, "type": "commit"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job0"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job0"}}
 {"return": {}}
 
 === Testing block-stream ===
@@ -51,9 +75,14 @@ wrote 1048576/1048576 bytes at offset 0
 wrote 1048576/1048576 bytes at offset 0
 1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
 {"return": {}}
 {"error": {"class": "GenericError", "desc": "Node drv0 is in use"}}
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "job0"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "job0", "len": 1048576, "offset": 524288, "speed": 1, "type": "stream"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job0"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job0"}}
 {"return": {}}
 *** done
diff --git a/tests/qemu-iotests/144 b/tests/qemu-iotests/144
index 00de3c33cf..4b915718cd 100755
--- a/tests/qemu-iotests/144
+++ b/tests/qemu-iotests/144
@@ -93,7 +93,7 @@ _send_qemu_cmd $h "{ 'execute': 'block-job-complete',
                                 'arguments': {
                                                 'device': 'virtio0'
                                               }
-                   }" "COMPLETED"
+                   }" '"status": "null"'
 
 echo
 echo === Performing Live Snapshot 2 ===
diff --git a/tests/qemu-iotests/144.out b/tests/qemu-iotests/144.out
index 014b2817ee..55299201e4 100644
--- a/tests/qemu-iotests/144.out
+++ b/tests/qemu-iotests/144.out
@@ -12,10 +12,17 @@ Formatting 'TEST_DIR/tmp.qcow2', fmt=qcow2 size=536870912 backing_file=TEST_DIR/
 
 === Performing block-commit on active layer ===
 
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "virtio0"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "virtio0"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "virtio0"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "virtio0", "len": 0, "offset": 0, "speed": 0, "type": "commit"}}
 {"return": {}}
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "virtio0"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "virtio0"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "virtio0", "len": 0, "offset": 0, "speed": 0, "type": "commit"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "virtio0"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "virtio0"}}
 
 === Performing Live Snapshot 2 ===
 
diff --git a/tests/qemu-iotests/155 b/tests/qemu-iotests/155
index 42dae04c83..63a5b5e2c0 100755
--- a/tests/qemu-iotests/155
+++ b/tests/qemu-iotests/155
@@ -63,7 +63,7 @@ class BaseClass(iotests.QMPTestCase):
                     'driver': iotests.imgfmt,
                     'file': {'driver': 'file',
                              'filename': source_img}}
-        self.vm.add_blockdev(self.qmp_to_opts(blockdev))
+        self.vm.add_blockdev(self.vm.qmp_to_opts(blockdev))
         self.vm.add_device('virtio-blk,id=qdev0,drive=source')
         self.vm.launch()
 
diff --git a/tests/qemu-iotests/156 b/tests/qemu-iotests/156
index e75dc4d743..0a9a09802e 100755
--- a/tests/qemu-iotests/156
+++ b/tests/qemu-iotests/156
@@ -119,7 +119,7 @@ _send_qemu_cmd $QEMU_HANDLE \
 
 _send_qemu_cmd $QEMU_HANDLE \
     '' \
-    'BLOCK_JOB_COMPLETED'
+    '"status": "null"'
 
 # Remove the source images
 rm -f "$TEST_IMG{,.backing,.overlay}"
diff --git a/tests/qemu-iotests/156.out b/tests/qemu-iotests/156.out
index f96a564c1d..34c057b626 100644
--- a/tests/qemu-iotests/156.out
+++ b/tests/qemu-iotests/156.out
@@ -12,13 +12,20 @@ wrote 131072/131072 bytes at offset 131072
 128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
 {"return": ""}
 Formatting 'TEST_DIR/t.IMGFMT.target.overlay', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.IMGFMT.target
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "source"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "source"}}
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "source"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "source", "len": 131072, "offset": 131072, "speed": 0, "type": "mirror"}}
 wrote 65536/65536 bytes at offset 196608
 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
 {"return": ""}
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "source"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "source"}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "source", "len": 196608, "offset": 196608, "speed": 0, "type": "mirror"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "source"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "source"}}
 
 read 65536/65536 bytes at offset 0
 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
diff --git a/tests/qemu-iotests/185 b/tests/qemu-iotests/185
index 9a2d317414..7dcfdeac60 100755
--- a/tests/qemu-iotests/185
+++ b/tests/qemu-iotests/185
@@ -27,8 +27,6 @@ echo "QA output created by $seq"
 here=`pwd`
 status=1 # failure is the default!
 
-MIG_SOCKET="${TEST_DIR}/migrate"
-
 _cleanup()
 {
     rm -f "${TEST_IMG}.mid"
@@ -118,8 +116,10 @@ _send_qemu_cmd $h \
 # If we don't sleep here 'quit' command races with disk I/O
 sleep 0.5
 
+# Ignore the JOB_STATUS_CHANGE events while shutting down the VM. Depending on
+# the timing, jobs may or may not transition through a paused state.
 _send_qemu_cmd $h "{ 'execute': 'quit' }" "return"
-wait=1 _cleanup_qemu
+wait=1 _cleanup_qemu | grep -v 'JOB_STATUS_CHANGE'
 
 echo
 echo === Start active commit job and exit qemu ===
@@ -141,7 +141,7 @@ _send_qemu_cmd $h \
 sleep 0.5
 
 _send_qemu_cmd $h "{ 'execute': 'quit' }" "return"
-wait=1 _cleanup_qemu
+wait=1 _cleanup_qemu | grep -v 'JOB_STATUS_CHANGE'
 
 echo
 echo === Start mirror job and exit qemu ===
@@ -166,7 +166,7 @@ _send_qemu_cmd $h \
 sleep 0.5
 
 _send_qemu_cmd $h "{ 'execute': 'quit' }" "return"
-wait=1 _cleanup_qemu
+wait=1 _cleanup_qemu | grep -v 'JOB_STATUS_CHANGE'
 
 echo
 echo === Start backup job and exit qemu ===
@@ -190,7 +190,7 @@ _send_qemu_cmd $h \
 sleep 0.5
 
 _send_qemu_cmd $h "{ 'execute': 'quit' }" "return"
-wait=1 _cleanup_qemu
+wait=1 _cleanup_qemu | grep -v 'JOB_STATUS_CHANGE'
 
 echo
 echo === Start streaming job and exit qemu ===
@@ -211,7 +211,7 @@ _send_qemu_cmd $h \
 sleep 0.5
 
 _send_qemu_cmd $h "{ 'execute': 'quit' }" "return"
-wait=1 _cleanup_qemu
+wait=1 _cleanup_qemu | grep -v 'JOB_STATUS_CHANGE'
 
 _check_test_img
 
diff --git a/tests/qemu-iotests/185.out b/tests/qemu-iotests/185.out
index 57eaf8d699..4e0ca0dffa 100644
--- a/tests/qemu-iotests/185.out
+++ b/tests/qemu-iotests/185.out
@@ -17,6 +17,8 @@ Formatting 'TEST_DIR/t.qcow2', fmt=qcow2 size=67108864 backing_file=TEST_DIR/t.q
 
 === Start commit job and exit qemu ===
 
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}}
 {"return": {}}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
@@ -25,6 +27,8 @@ Formatting 'TEST_DIR/t.qcow2', fmt=qcow2 size=67108864 backing_file=TEST_DIR/t.q
 === Start active commit job and exit qemu ===
 
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}}
 {"return": {}}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
@@ -34,6 +38,8 @@ Formatting 'TEST_DIR/t.qcow2', fmt=qcow2 size=67108864 backing_file=TEST_DIR/t.q
 
 {"return": {}}
 Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 size=67108864 cluster_size=65536 lazy_refcounts=off refcount_bits=16
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}}
 {"return": {}}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
@@ -43,6 +49,8 @@ Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 size=67108864 cluster_size=65536 l
 
 {"return": {}}
 Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 size=67108864 cluster_size=65536 lazy_refcounts=off refcount_bits=16
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}}
 {"return": {}}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
@@ -51,6 +59,8 @@ Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 size=67108864 cluster_size=65536 l
 === Start streaming job and exit qemu ===
 
 {"return": {}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk"}}
+{"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}}
 {"return": {}}
 {"return": {}}
 {"timestamp": {"seconds":  TIMESTAMP, "microseconds":  TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
diff --git a/tests/qemu-iotests/191 b/tests/qemu-iotests/191
index dfad6555e4..d6860e72f7 100755
--- a/tests/qemu-iotests/191
+++ b/tests/qemu-iotests/191
@@ -27,8 +27,6 @@ echo "QA output created by $seq"
 here=`pwd`
 status=1 # failure is the default!
 
-MIG_SOCKET="${TEST_DIR}/migrate"
-
 _cleanup()
 {
     rm -f "${TEST_IMG}.mid"
@@ -83,7 +81,7 @@ _send_qemu_cmd $h \
                       'device': 'top',
                       'base':'$TEST_IMG.base',
                       'top': '$TEST_IMG.mid' } }" \
-    "BLOCK_JOB_COMPLETED"
+    '"status": "null"'
 _send_qemu_cmd $h "" "^}"
 
 echo
@@ -131,7 +129,7 @@ _send_qemu_cmd $h \
                       'device': 'top',
                       'base':'$TEST_IMG.base',
                       'top': '$TEST_IMG.mid' } }" \
-    "BLOCK_JOB_COMPLETED"
+    '"status": "null"'
 _send_qemu_cmd $h "" "^}"
 
 echo
diff --git a/tests/qemu-iotests/191.out b/tests/qemu-iotests/191.out
index 190c5f049a..31a0c7d4c4 100644
--- a/tests/qemu-iotests/191.out
+++ b/tests/qemu-iotests/191.out
@@ -16,6 +16,28 @@ wrote 65536/65536 bytes at offset 1048576
 === Perform commit job ===
 
 {
+    "timestamp": {
+        "seconds":  TIMESTAMP,
+        "microseconds":  TIMESTAMP
+    },
+    "event": "JOB_STATUS_CHANGE",
+    "data": {
+        "status": "created",
+        "id": "commit0"
+    }
+}
+{
+    "timestamp": {
+        "seconds":  TIMESTAMP,
+        "microseconds":  TIMESTAMP
+    },
+    "event": "JOB_STATUS_CHANGE",
+    "data": {
+        "status": "running",
+        "id": "commit0"
+    }
+}
+{
     "return": {
     }
 }
@@ -24,6 +46,28 @@ wrote 65536/65536 bytes at offset 1048576
         "seconds":  TIMESTAMP,
         "microseconds":  TIMESTAMP
     },
+    "event": "JOB_STATUS_CHANGE",
+    "data": {
+        "status": "waiting",
+        "id": "commit0"
+    }
+}
+{
+    "timestamp": {
+        "seconds":  TIMESTAMP,
+        "microseconds":  TIMESTAMP
+    },
+    "event": "JOB_STATUS_CHANGE",
+    "data": {
+        "status": "pending",
+        "id": "commit0"
+    }
+}
+{
+    "timestamp": {
+        "seconds":  TIMESTAMP,
+        "microseconds":  TIMESTAMP
+    },
     "event": "BLOCK_JOB_COMPLETED",
     "data": {
         "device": "commit0",
@@ -33,6 +77,28 @@ wrote 65536/65536 bytes at offset 1048576
         "type": "commit"
     }
 }
+{
+    "timestamp": {
+        "seconds":  TIMESTAMP,
+        "microseconds":  TIMESTAMP
+    },
+    "event": "JOB_STATUS_CHANGE",
+    "data": {
+        "status": "concluded",
+        "id": "commit0"
+    }
+}
+{
+    "timestamp": {
+        "seconds":  TIMESTAMP,
+        "microseconds":  TIMESTAMP
+    },
+    "event": "JOB_STATUS_CHANGE",
+    "data": {
+        "status": "null",
+        "id": "commit0"
+    }
+}
 
 === Check that both top and top2 point to base now ===
 
@@ -356,6 +422,28 @@ wrote 65536/65536 bytes at offset 1048576
 === Perform commit job ===
 
 {
+    "timestamp": {
+        "seconds":  TIMESTAMP,
+        "microseconds":  TIMESTAMP
+    },
+    "event": "JOB_STATUS_CHANGE",
+    "data": {
+        "status": "created",
+        "id": "commit0"
+    }
+}
+{
+    "timestamp": {
+        "seconds":  TIMESTAMP,
+        "microseconds":  TIMESTAMP
+    },
+    "event": "JOB_STATUS_CHANGE",
+    "data": {
+        "status": "running",
+        "id": "commit0"
+    }
+}
+{
     "return": {
     }
 }
@@ -364,6 +452,28 @@ wrote 65536/65536 bytes at offset 1048576
         "seconds":  TIMESTAMP,
         "microseconds":  TIMESTAMP
     },
+    "event": "JOB_STATUS_CHANGE",
+    "data": {
+        "status": "waiting",
+        "id": "commit0"
+    }
+}
+{
+    "timestamp": {
+        "seconds":  TIMESTAMP,
+        "microseconds":  TIMESTAMP
+    },
+    "event": "JOB_STATUS_CHANGE",
+    "data": {
+        "status": "pending",
+        "id": "commit0"
+    }
+}
+{
+    "timestamp": {
+        "seconds":  TIMESTAMP,
+        "microseconds":  TIMESTAMP
+    },
     "event": "BLOCK_JOB_COMPLETED",
     "data": {
         "device": "commit0",
@@ -373,6 +483,28 @@ wrote 65536/65536 bytes at offset 1048576
         "type": "commit"
     }
 }
+{
+    "timestamp": {
+        "seconds":  TIMESTAMP,
+        "microseconds":  TIMESTAMP
+    },
+    "event": "JOB_STATUS_CHANGE",
+    "data": {
+        "status": "concluded",
+        "id": "commit0"
+    }
+}
+{
+    "timestamp": {
+        "seconds":  TIMESTAMP,
+        "microseconds":  TIMESTAMP
+    },
+    "event": "JOB_STATUS_CHANGE",
+    "data": {
+        "status": "null",
+        "id": "commit0"
+    }
+}
 
 === Check that both top and top2 point to base now ===
 
diff --git a/tests/qemu-iotests/219 b/tests/qemu-iotests/219
new file mode 100755
index 0000000000..898a26eef0
--- /dev/null
+++ b/tests/qemu-iotests/219
@@ -0,0 +1,209 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2018 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Creator/Owner: Kevin Wolf <kwolf@redhat.com>
+#
+# Check using the job-* QMP commands with block jobs
+
+import iotests
+
+iotests.verify_image_format(supported_fmts=['qcow2'])
+
+def pause_wait(vm, job_id):
+    with iotests.Timeout(3, "Timeout waiting for job to pause"):
+        while True:
+            result = vm.qmp('query-jobs')
+            for job in result['return']:
+                if job['id'] == job_id and job['status'] in ['paused', 'standby']:
+                    return job
+
+# Test that block-job-pause/resume and job-pause/resume can be mixed
+def test_pause_resume(vm):
+    for pause_cmd, pause_arg in [('block-job-pause', 'device'),
+                                 ('job-pause', 'id')]:
+        for resume_cmd, resume_arg in [('block-job-resume', 'device'),
+                                       ('job-resume', 'id')]:
+            iotests.log('=== Testing %s/%s ===' % (pause_cmd, resume_cmd))
+
+            iotests.log(vm.qmp(pause_cmd, **{pause_arg: 'job0'}))
+            pause_wait(vm, 'job0')
+            iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
+            iotests.log(vm.qmp('query-jobs'))
+
+            iotests.log(vm.qmp(resume_cmd, **{resume_arg: 'job0'}))
+            iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
+            iotests.log(vm.qmp('query-jobs'))
+
+def test_job_lifecycle(vm, job, job_args, has_ready=False):
+    iotests.log('')
+    iotests.log('')
+    iotests.log('Starting block job: %s (auto-finalize: %s; auto-dismiss: %s)' %
+                (job,
+                 job_args.get('auto-finalize', True),
+                 job_args.get('auto-dismiss', True)))
+    iotests.log(vm.qmp(job, job_id='job0', **job_args))
+
+    # Depending on the storage, the first request may or may not have completed
+    # yet, so filter out the progress. Later query-job calls don't need the
+    # filtering because the progress is made deterministic by the block job
+    # speed
+    result = vm.qmp('query-jobs')
+    for j in result['return']:
+        del j['current-progress']
+    iotests.log(result)
+
+    # undefined -> created -> running
+    iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
+    iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
+
+    # RUNNING state:
+    # pause/resume should work, complete/finalize/dismiss should error out
+    iotests.log('')
+    iotests.log('Pause/resume in RUNNING')
+    test_pause_resume(vm)
+
+    iotests.log(vm.qmp('job-complete', id='job0'))
+    iotests.log(vm.qmp('job-finalize', id='job0'))
+    iotests.log(vm.qmp('job-dismiss', id='job0'))
+
+    iotests.log(vm.qmp('block-job-complete', device='job0'))
+    iotests.log(vm.qmp('block-job-finalize', id='job0'))
+    iotests.log(vm.qmp('block-job-dismiss', id='job0'))
+
+    # Let the job complete (or transition to READY if it supports that)
+    iotests.log(vm.qmp('block-job-set-speed', device='job0', speed=0))
+    if has_ready:
+        iotests.log('')
+        iotests.log('Waiting for READY state...')
+        vm.event_wait('BLOCK_JOB_READY')
+        iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
+        iotests.log(vm.qmp('query-jobs'))
+
+        # READY state:
+        # pause/resume/complete should work, finalize/dismiss should error out
+        iotests.log('')
+        iotests.log('Pause/resume in READY')
+        test_pause_resume(vm)
+
+        iotests.log(vm.qmp('job-finalize', id='job0'))
+        iotests.log(vm.qmp('job-dismiss', id='job0'))
+
+        iotests.log(vm.qmp('block-job-finalize', id='job0'))
+        iotests.log(vm.qmp('block-job-dismiss', id='job0'))
+
+        # Transition to WAITING
+        iotests.log(vm.qmp('job-complete', id='job0'))
+
+    # Move to WAITING and PENDING state
+    iotests.log('')
+    iotests.log('Waiting for PENDING state...')
+    iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
+    iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
+
+    if not job_args.get('auto-finalize', True):
+        # PENDING state:
+        # finalize should work, pause/complete/dismiss should error out
+        iotests.log(vm.qmp('query-jobs'))
+
+        iotests.log(vm.qmp('job-pause', id='job0'))
+        iotests.log(vm.qmp('job-complete', id='job0'))
+        iotests.log(vm.qmp('job-dismiss', id='job0'))
+
+        iotests.log(vm.qmp('block-job-pause', device='job0'))
+        iotests.log(vm.qmp('block-job-complete', device='job0'))
+        iotests.log(vm.qmp('block-job-dismiss', id='job0'))
+
+        # Transition to CONCLUDED
+        iotests.log(vm.qmp('job-finalize', id='job0'))
+
+
+    # Move to CONCLUDED state
+    iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
+
+    if not job_args.get('auto-dismiss', True):
+        # CONCLUDED state:
+        # dismiss should work, pause/complete/finalize should error out
+        iotests.log(vm.qmp('query-jobs'))
+
+        iotests.log(vm.qmp('job-pause', id='job0'))
+        iotests.log(vm.qmp('job-complete', id='job0'))
+        iotests.log(vm.qmp('job-finalize', id='job0'))
+
+        iotests.log(vm.qmp('block-job-pause', device='job0'))
+        iotests.log(vm.qmp('block-job-complete', device='job0'))
+        iotests.log(vm.qmp('block-job-finalize', id='job0'))
+
+        # Transition to NULL
+        iotests.log(vm.qmp('job-dismiss', id='job0'))
+
+    # Move to NULL state
+    iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
+    iotests.log(vm.qmp('query-jobs'))
+
+
+with iotests.FilePath('disk.img') as disk_path, \
+     iotests.FilePath('copy.img') as copy_path, \
+     iotests.VM() as vm:
+
+    img_size = '4M'
+    iotests.qemu_img_create('-f', iotests.imgfmt, disk_path, img_size)
+    iotests.qemu_io('-c', 'write 0 %s' % (img_size),
+                    '-f', iotests.imgfmt, disk_path)
+
+    iotests.log('Launching VM...')
+    vm.add_blockdev(vm.qmp_to_opts({
+        'driver': iotests.imgfmt,
+        'node-name': 'drive0-node',
+        'file': {
+            'driver': 'file',
+            'filename': disk_path,
+        },
+    }))
+    vm.launch()
+
+    # In order to keep things deterministic (especially progress in query-job,
+    # but related to this also automatic state transitions like job
+    # completion), but still get pause points often enough to avoid making this
+    # test very slow, it's important to have the right ratio between speed and
+    # buf_size.
+    #
+    # For backup, buf_size is hard-coded to the source image cluster size (64k),
+    # so we'll pick the same for mirror. The slice time, i.e. the granularity
+    # of the rate limiting is 100ms. With a speed of 256k per second, we can
+    # get four pause points per second. This gives us 250ms per iteration,
+    # which should be enough to stay deterministic.
+
+    test_job_lifecycle(vm, 'drive-mirror', has_ready=True, job_args={
+        'device': 'drive0-node',
+        'target': copy_path,
+        'sync': 'full',
+        'speed': 262144,
+        'buf_size': 65536,
+    })
+
+    for auto_finalize in [True, False]:
+        for auto_dismiss in [True, False]:
+            test_job_lifecycle(vm, 'drive-backup', job_args={
+                'device': 'drive0-node',
+                'target': copy_path,
+                'sync': 'full',
+                'speed': 262144,
+                'auto-finalize': auto_finalize,
+                'auto-dismiss': auto_dismiss,
+            })
+
+    vm.shutdown()
diff --git a/tests/qemu-iotests/219.out b/tests/qemu-iotests/219.out
new file mode 100644
index 0000000000..346801b655
--- /dev/null
+++ b/tests/qemu-iotests/219.out
@@ -0,0 +1,327 @@
+Launching VM...
+
+
+Starting block job: drive-mirror (auto-finalize: True; auto-dismiss: True)
+{u'return': {}}
+{u'return': [{u'status': u'running', u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'created', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+
+Pause/resume in RUNNING
+=== Testing block-job-pause/block-job-resume ===
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'paused', u'current-progress': 65536, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'running', u'current-progress': 131072, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
+=== Testing block-job-pause/job-resume ===
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'paused', u'current-progress': 131072, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'running', u'current-progress': 196608, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
+=== Testing job-pause/block-job-resume ===
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'paused', u'current-progress': 196608, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'running', u'current-progress': 262144, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
+=== Testing job-pause/job-resume ===
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'paused', u'current-progress': 262144, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'running', u'current-progress': 327680, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'complete'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'finalize'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'dismiss'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'complete'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'finalize'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'dismiss'"}}
+{u'return': {}}
+
+Waiting for READY state...
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'ready', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'ready', u'current-progress': 4194304, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
+
+Pause/resume in READY
+=== Testing block-job-pause/block-job-resume ===
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'standby', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'standby', u'current-progress': 4194304, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'ready', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'ready', u'current-progress': 4194304, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
+=== Testing block-job-pause/job-resume ===
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'standby', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'standby', u'current-progress': 4194304, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'ready', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'ready', u'current-progress': 4194304, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
+=== Testing job-pause/block-job-resume ===
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'standby', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'standby', u'current-progress': 4194304, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'ready', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'ready', u'current-progress': 4194304, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
+=== Testing job-pause/job-resume ===
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'standby', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'standby', u'current-progress': 4194304, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'ready', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'ready', u'current-progress': 4194304, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'ready' cannot accept command verb 'finalize'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'ready' cannot accept command verb 'dismiss'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'ready' cannot accept command verb 'finalize'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'ready' cannot accept command verb 'dismiss'"}}
+{u'return': {}}
+
+Waiting for PENDING state...
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'waiting', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'pending', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'concluded', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'null', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': []}
+
+
+Starting block job: drive-backup (auto-finalize: True; auto-dismiss: True)
+{u'return': {}}
+{u'return': [{u'status': u'running', u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'created', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+
+Pause/resume in RUNNING
+=== Testing block-job-pause/block-job-resume ===
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'paused', u'current-progress': 65536, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'running', u'current-progress': 131072, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+=== Testing block-job-pause/job-resume ===
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'paused', u'current-progress': 131072, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'running', u'current-progress': 196608, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+=== Testing job-pause/block-job-resume ===
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'paused', u'current-progress': 196608, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'running', u'current-progress': 262144, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+=== Testing job-pause/job-resume ===
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'paused', u'current-progress': 262144, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'running', u'current-progress': 327680, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'complete'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'finalize'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'dismiss'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'complete'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'finalize'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'dismiss'"}}
+{u'return': {}}
+
+Waiting for PENDING state...
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'waiting', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'pending', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'concluded', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'null', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': []}
+
+
+Starting block job: drive-backup (auto-finalize: True; auto-dismiss: False)
+{u'return': {}}
+{u'return': [{u'status': u'running', u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'created', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+
+Pause/resume in RUNNING
+=== Testing block-job-pause/block-job-resume ===
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'paused', u'current-progress': 65536, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'running', u'current-progress': 131072, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+=== Testing block-job-pause/job-resume ===
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'paused', u'current-progress': 131072, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'running', u'current-progress': 196608, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+=== Testing job-pause/block-job-resume ===
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'paused', u'current-progress': 196608, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'running', u'current-progress': 262144, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+=== Testing job-pause/job-resume ===
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'paused', u'current-progress': 262144, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'running', u'current-progress': 327680, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'complete'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'finalize'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'dismiss'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'complete'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'finalize'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'dismiss'"}}
+{u'return': {}}
+
+Waiting for PENDING state...
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'waiting', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'pending', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'concluded', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'concluded', u'current-progress': 4194304, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'concluded' cannot accept command verb 'pause'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'concluded' cannot accept command verb 'complete'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'concluded' cannot accept command verb 'finalize'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'concluded' cannot accept command verb 'pause'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'concluded' cannot accept command verb 'complete'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'concluded' cannot accept command verb 'finalize'"}}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'null', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': []}
+
+
+Starting block job: drive-backup (auto-finalize: False; auto-dismiss: True)
+{u'return': {}}
+{u'return': [{u'status': u'running', u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'created', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+
+Pause/resume in RUNNING
+=== Testing block-job-pause/block-job-resume ===
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'paused', u'current-progress': 65536, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'running', u'current-progress': 131072, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+=== Testing block-job-pause/job-resume ===
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'paused', u'current-progress': 131072, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'running', u'current-progress': 196608, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+=== Testing job-pause/block-job-resume ===
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'paused', u'current-progress': 196608, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'running', u'current-progress': 262144, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+=== Testing job-pause/job-resume ===
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'paused', u'current-progress': 262144, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'running', u'current-progress': 327680, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'complete'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'finalize'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'dismiss'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'complete'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'finalize'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'dismiss'"}}
+{u'return': {}}
+
+Waiting for PENDING state...
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'waiting', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'pending', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'pending', u'current-progress': 4194304, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'pending' cannot accept command verb 'pause'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'pending' cannot accept command verb 'complete'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'pending' cannot accept command verb 'dismiss'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'pending' cannot accept command verb 'pause'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'pending' cannot accept command verb 'complete'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'pending' cannot accept command verb 'dismiss'"}}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'concluded', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'null', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': []}
+
+
+Starting block job: drive-backup (auto-finalize: False; auto-dismiss: False)
+{u'return': {}}
+{u'return': [{u'status': u'running', u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'created', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+
+Pause/resume in RUNNING
+=== Testing block-job-pause/block-job-resume ===
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'paused', u'current-progress': 65536, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'running', u'current-progress': 131072, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+=== Testing block-job-pause/job-resume ===
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'paused', u'current-progress': 131072, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'running', u'current-progress': 196608, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+=== Testing job-pause/block-job-resume ===
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'paused', u'current-progress': 196608, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'running', u'current-progress': 262144, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+=== Testing job-pause/job-resume ===
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'paused', u'current-progress': 262144, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'running', u'current-progress': 327680, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'complete'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'finalize'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'dismiss'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'complete'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'finalize'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'dismiss'"}}
+{u'return': {}}
+
+Waiting for PENDING state...
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'waiting', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'pending', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'pending', u'current-progress': 4194304, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'pending' cannot accept command verb 'pause'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'pending' cannot accept command verb 'complete'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'pending' cannot accept command verb 'dismiss'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'pending' cannot accept command verb 'pause'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'pending' cannot accept command verb 'complete'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'pending' cannot accept command verb 'dismiss'"}}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'concluded', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': [{u'status': u'concluded', u'current-progress': 4194304, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'concluded' cannot accept command verb 'pause'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'concluded' cannot accept command verb 'complete'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'concluded' cannot accept command verb 'finalize'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'concluded' cannot accept command verb 'pause'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'concluded' cannot accept command verb 'complete'"}}
+{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'concluded' cannot accept command verb 'finalize'"}}
+{u'return': {}}
+{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'null', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
+{u'return': []}
diff --git a/tests/qemu-iotests/common.filter b/tests/qemu-iotests/common.filter
index c5f4bcf578..f08ee55046 100644
--- a/tests/qemu-iotests/common.filter
+++ b/tests/qemu-iotests/common.filter
@@ -119,7 +119,8 @@ _filter_actual_image_size()
 # replace driver-specific options in the "Formatting..." line
 _filter_img_create()
 {
-    sed -e "s#$IMGPROTO:$TEST_DIR#TEST_DIR#g" \
+    sed -e "s#$REMOTE_TEST_DIR#TEST_DIR#g" \
+        -e "s#$IMGPROTO:$TEST_DIR#TEST_DIR#g" \
         -e "s#$TEST_DIR#TEST_DIR#g" \
         -e "s#$IMGFMT#IMGFMT#g" \
         -e 's#nbd:127.0.0.1:10810#TEST_DIR/t.IMGFMT#g' \
@@ -154,7 +155,8 @@ _filter_img_info()
 
     discard=0
     regex_json_spec_start='^ *"format-specific": \{'
-    sed -e "s#$IMGPROTO:$TEST_DIR#TEST_DIR#g" \
+    sed -e "s#$REMOTE_TEST_DIR#TEST_DIR#g" \
+        -e "s#$IMGPROTO:$TEST_DIR#TEST_DIR#g" \
         -e "s#$TEST_DIR#TEST_DIR#g" \
         -e "s#$IMGFMT#IMGFMT#g" \
         -e 's#nbd://127.0.0.1:10810$#TEST_DIR/t.IMGFMT#g' \
diff --git a/tests/qemu-iotests/common.rc b/tests/qemu-iotests/common.rc
index 9a65a11026..d054cb97fc 100644
--- a/tests/qemu-iotests/common.rc
+++ b/tests/qemu-iotests/common.rc
@@ -147,8 +147,9 @@ else
         TEST_IMG_FILE=$TEST_DIR/t.$IMGFMT
         TEST_IMG="ssh://127.0.0.1$TEST_IMG_FILE"
     elif [ "$IMGPROTO" = "nfs" ]; then
-        TEST_DIR="nfs://127.0.0.1/$TEST_DIR"
-        TEST_IMG=$TEST_DIR/t.$IMGFMT
+        TEST_IMG_FILE=$TEST_DIR/t.$IMGFMT
+        REMOTE_TEST_DIR="nfs://127.0.0.1$TEST_DIR"
+        TEST_IMG="nfs://127.0.0.1$TEST_IMG_FILE"
     elif [ "$IMGPROTO" = "vxhs" ]; then
         TEST_IMG_FILE=$TEST_DIR/t.$IMGFMT
         TEST_IMG="vxhs://127.0.0.1:9999/t.$IMGFMT"
@@ -173,6 +174,10 @@ if [ ! -d "$TEST_DIR" ]; then
     exit 1
 fi
 
+if [ -z "$REMOTE_TEST_DIR" ]; then
+    REMOTE_TEST_DIR="$TEST_DIR"
+fi
+
 if [ ! -d "$SAMPLE_IMG_DIR" ]; then
     echo "common.config: Error: \$SAMPLE_IMG_DIR ($SAMPLE_IMG_DIR) is not a directory"
     exit 1
@@ -333,7 +338,8 @@ _img_info()
     discard=0
     regex_json_spec_start='^ *"format-specific": \{'
     $QEMU_IMG info $QEMU_IMG_EXTRA_ARGS "$@" "$TEST_IMG" 2>&1 | \
-        sed -e "s#$IMGPROTO:$TEST_DIR#TEST_DIR#g" \
+        sed -e "s#$REMOTE_TEST_DIR#TEST_DIR#g" \
+            -e "s#$IMGPROTO:$TEST_DIR#TEST_DIR#g" \
             -e "s#$TEST_DIR#TEST_DIR#g" \
             -e "s#$IMGFMT#IMGFMT#g" \
             -e "/^disk size:/ D" \
diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group
index cc8cd8cc8e..93f93d71ba 100644
--- a/tests/qemu-iotests/group
+++ b/tests/qemu-iotests/group
@@ -97,7 +97,7 @@
 088 rw auto quick
 089 rw auto quick
 090 rw auto quick
-091 rw auto
+091 rw auto migration
 092 rw auto quick
 093 auto
 094 rw auto quick
@@ -169,7 +169,7 @@
 162 auto quick
 163 rw auto
 165 rw auto quick
-169 rw auto quick
+169 rw auto quick migration
 170 rw auto quick
 171 rw auto quick
 172 auto
@@ -194,14 +194,14 @@
 192 rw auto quick
 194 rw auto migration quick
 195 rw auto quick
-196 rw auto quick
+196 rw auto quick migration
 197 rw auto quick
 198 rw auto
-199 rw auto
+199 rw auto migration
 200 rw auto
 201 rw auto migration
 202 rw auto quick
-203 rw auto
+203 rw auto migration
 204 rw auto quick
 205 rw auto quick
 206 rw auto
@@ -216,3 +216,4 @@
 215 rw auto quick
 216 rw auto quick
 218 rw auto quick
+219 rw auto
diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py
index b25d48a91b..28159d837a 100644
--- a/tests/qemu-iotests/iotests.py
+++ b/tests/qemu-iotests/iotests.py
@@ -363,6 +363,27 @@ class VM(qtest.QEMUQtestMachine):
         return self.qmp('human-monitor-command',
                         command_line='qemu-io %s "%s"' % (drive, cmd))
 
+    def flatten_qmp_object(self, obj, output=None, basestr=''):
+        if output is None:
+            output = dict()
+        if isinstance(obj, list):
+            for i in range(len(obj)):
+                self.flatten_qmp_object(obj[i], output, basestr + str(i) + '.')
+        elif isinstance(obj, dict):
+            for key in obj:
+                self.flatten_qmp_object(obj[key], output, basestr + key + '.')
+        else:
+            output[basestr[:-1]] = obj # Strip trailing '.'
+        return output
+
+    def qmp_to_opts(self, obj):
+        obj = self.flatten_qmp_object(obj)
+        output_list = list()
+        for key in obj:
+            output_list += [key + '=' + obj[key]]
+        return ','.join(output_list)
+
+
 
 index_re = re.compile(r'([^\[]+)\[([^\]]+)\]')
 
@@ -390,26 +411,6 @@ class QMPTestCase(unittest.TestCase):
                     self.fail('invalid index "%s" in path "%s" in "%s"' % (idx, path, str(d)))
         return d
 
-    def flatten_qmp_object(self, obj, output=None, basestr=''):
-        if output is None:
-            output = dict()
-        if isinstance(obj, list):
-            for i in range(len(obj)):
-                self.flatten_qmp_object(obj[i], output, basestr + str(i) + '.')
-        elif isinstance(obj, dict):
-            for key in obj:
-                self.flatten_qmp_object(obj[key], output, basestr + key + '.')
-        else:
-            output[basestr[:-1]] = obj # Strip trailing '.'
-        return output
-
-    def qmp_to_opts(self, obj):
-        obj = self.flatten_qmp_object(obj)
-        output_list = list()
-        for key in obj:
-            output_list += [key + '=' + obj[key]]
-        return ','.join(output_list)
-
     def assert_qmp_absent(self, d, path):
         try:
             result = self.dictpath(d, path)
@@ -444,8 +445,8 @@ class QMPTestCase(unittest.TestCase):
         '''Asserts that the given filename is a json: filename and that its
            content is equal to the given reference object'''
         self.assertEqual(json_filename[:5], 'json:')
-        self.assertEqual(self.flatten_qmp_object(json.loads(json_filename[5:])),
-                         self.flatten_qmp_object(reference))
+        self.assertEqual(self.vm.flatten_qmp_object(json.loads(json_filename[5:])),
+                         self.vm.flatten_qmp_object(reference))
 
     def cancel_and_wait(self, drive='drive0', force=False, resume=False):
         '''Cancel a block job and wait for it to finish, returning the event'''
@@ -464,6 +465,9 @@ class QMPTestCase(unittest.TestCase):
                     self.assert_qmp(event, 'data/device', drive)
                     result = event
                     cancelled = True
+                elif event['event'] == 'JOB_STATUS_CHANGE':
+                    self.assert_qmp(event, 'data/id', drive)
+
 
         self.assert_no_active_block_jobs()
         return result
@@ -479,6 +483,8 @@ class QMPTestCase(unittest.TestCase):
                         self.assert_qmp(event, 'data/offset', event['data']['len'])
                     self.assert_no_active_block_jobs()
                     return event
+                elif event['event'] == 'JOB_STATUS_CHANGE':
+                    self.assert_qmp(event, 'data/id', drive)
 
     def wait_ready(self, drive='drive0'):
         '''Wait until a block job BLOCK_JOB_READY event'''
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
index 7673de1062..2cba63b881 100644
--- a/tests/test-bdrv-drain.c
+++ b/tests/test-bdrv-drain.c
@@ -496,33 +496,38 @@ typedef struct TestBlockJob {
     bool should_complete;
 } TestBlockJob;
 
-static void test_job_completed(BlockJob *job, void *opaque)
+static void test_job_completed(Job *job, void *opaque)
 {
-    block_job_completed(job, 0);
+    job_completed(job, 0);
 }
 
 static void coroutine_fn test_job_start(void *opaque)
 {
     TestBlockJob *s = opaque;
 
-    block_job_event_ready(&s->common);
+    job_transition_to_ready(&s->common.job);
     while (!s->should_complete) {
-        block_job_sleep_ns(&s->common, 100000);
+        job_sleep_ns(&s->common.job, 100000);
     }
 
-    block_job_defer_to_main_loop(&s->common, test_job_completed, NULL);
+    job_defer_to_main_loop(&s->common.job, test_job_completed, NULL);
 }
 
-static void test_job_complete(BlockJob *job, Error **errp)
+static void test_job_complete(Job *job, Error **errp)
 {
-    TestBlockJob *s = container_of(job, TestBlockJob, common);
+    TestBlockJob *s = container_of(job, TestBlockJob, common.job);
     s->should_complete = true;
 }
 
 BlockJobDriver test_job_driver = {
-    .instance_size  = sizeof(TestBlockJob),
-    .start          = test_job_start,
-    .complete       = test_job_complete,
+    .job_driver = {
+        .instance_size  = sizeof(TestBlockJob),
+        .free           = block_job_free,
+        .user_resume    = block_job_user_resume,
+        .drain          = block_job_drain,
+        .start          = test_job_start,
+        .complete       = test_job_complete,
+    },
 };
 
 static void test_blockjob_common(enum drain_type drain_type)
@@ -545,49 +550,49 @@ static void test_blockjob_common(enum drain_type drain_type)
     job = block_job_create("job0", &test_job_driver, NULL, src, 0, BLK_PERM_ALL,
                            0, 0, NULL, NULL, &error_abort);
     block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort);
-    block_job_start(job);
+    job_start(&job->job);
 
-    g_assert_cmpint(job->pause_count, ==, 0);
-    g_assert_false(job->paused);
-    g_assert_false(job->busy); /* We're in block_job_sleep_ns() */
+    g_assert_cmpint(job->job.pause_count, ==, 0);
+    g_assert_false(job->job.paused);
+    g_assert_false(job->job.busy); /* We're in job_sleep_ns() */
 
     do_drain_begin(drain_type, src);
 
     if (drain_type == BDRV_DRAIN_ALL) {
         /* bdrv_drain_all() drains both src and target */
-        g_assert_cmpint(job->pause_count, ==, 2);
+        g_assert_cmpint(job->job.pause_count, ==, 2);
     } else {
-        g_assert_cmpint(job->pause_count, ==, 1);
+        g_assert_cmpint(job->job.pause_count, ==, 1);
     }
     /* XXX We don't wait until the job is actually paused. Is this okay? */
-    /* g_assert_true(job->paused); */
-    g_assert_false(job->busy); /* The job is paused */
+    /* g_assert_true(job->job.paused); */
+    g_assert_false(job->job.busy); /* The job is paused */
 
     do_drain_end(drain_type, src);
 
-    g_assert_cmpint(job->pause_count, ==, 0);
-    g_assert_false(job->paused);
-    g_assert_false(job->busy); /* We're in block_job_sleep_ns() */
+    g_assert_cmpint(job->job.pause_count, ==, 0);
+    g_assert_false(job->job.paused);
+    g_assert_false(job->job.busy); /* We're in job_sleep_ns() */
 
     do_drain_begin(drain_type, target);
 
     if (drain_type == BDRV_DRAIN_ALL) {
         /* bdrv_drain_all() drains both src and target */
-        g_assert_cmpint(job->pause_count, ==, 2);
+        g_assert_cmpint(job->job.pause_count, ==, 2);
     } else {
-        g_assert_cmpint(job->pause_count, ==, 1);
+        g_assert_cmpint(job->job.pause_count, ==, 1);
     }
     /* XXX We don't wait until the job is actually paused. Is this okay? */
-    /* g_assert_true(job->paused); */
-    g_assert_false(job->busy); /* The job is paused */
+    /* g_assert_true(job->job.paused); */
+    g_assert_false(job->job.busy); /* The job is paused */
 
     do_drain_end(drain_type, target);
 
-    g_assert_cmpint(job->pause_count, ==, 0);
-    g_assert_false(job->paused);
-    g_assert_false(job->busy); /* We're in block_job_sleep_ns() */
+    g_assert_cmpint(job->job.pause_count, ==, 0);
+    g_assert_false(job->job.paused);
+    g_assert_false(job->job.busy); /* We're in job_sleep_ns() */
 
-    ret = block_job_complete_sync(job, &error_abort);
+    ret = job_complete_sync(&job->job, &error_abort);
     g_assert_cmpint(ret, ==, 0);
 
     blk_unref(blk_src);
diff --git a/tests/test-blockjob-txn.c b/tests/test-blockjob-txn.c
index 5789893dda..fce836639a 100644
--- a/tests/test-blockjob-txn.c
+++ b/tests/test-blockjob-txn.c
@@ -24,16 +24,17 @@ typedef struct {
     int *result;
 } TestBlockJob;
 
-static void test_block_job_complete(BlockJob *job, void *opaque)
+static void test_block_job_complete(Job *job, void *opaque)
 {
-    BlockDriverState *bs = blk_bs(job->blk);
+    BlockJob *bjob = container_of(job, BlockJob, job);
+    BlockDriverState *bs = blk_bs(bjob->blk);
     int rc = (intptr_t)opaque;
 
-    if (block_job_is_cancelled(job)) {
+    if (job_is_cancelled(job)) {
         rc = -ECANCELED;
     }
 
-    block_job_completed(job, rc);
+    job_completed(job, rc);
     bdrv_unref(bs);
 }
 
@@ -44,18 +45,18 @@ static void coroutine_fn test_block_job_run(void *opaque)
 
     while (s->iterations--) {
         if (s->use_timer) {
-            block_job_sleep_ns(job, 0);
+            job_sleep_ns(&job->job, 0);
         } else {
-            block_job_yield(job);
+            job_yield(&job->job);
         }
 
-        if (block_job_is_cancelled(job)) {
+        if (job_is_cancelled(&job->job)) {
             break;
         }
     }
 
-    block_job_defer_to_main_loop(job, test_block_job_complete,
-                                 (void *)(intptr_t)s->rc);
+    job_defer_to_main_loop(&job->job, test_block_job_complete,
+                           (void *)(intptr_t)s->rc);
 }
 
 typedef struct {
@@ -66,7 +67,7 @@ typedef struct {
 static void test_block_job_cb(void *opaque, int ret)
 {
     TestBlockJobCBData *data = opaque;
-    if (!ret && block_job_is_cancelled(&data->job->common)) {
+    if (!ret && job_is_cancelled(&data->job->common.job)) {
         ret = -ECANCELED;
     }
     *data->result = ret;
@@ -74,8 +75,13 @@ static void test_block_job_cb(void *opaque, int ret)
 }
 
 static const BlockJobDriver test_block_job_driver = {
-    .instance_size = sizeof(TestBlockJob),
-    .start = test_block_job_run,
+    .job_driver = {
+        .instance_size = sizeof(TestBlockJob),
+        .free          = block_job_free,
+        .user_resume   = block_job_user_resume,
+        .drain         = block_job_drain,
+        .start         = test_block_job_run,
+    },
 };
 
 /* Create a block job that completes with a given return code after a given
@@ -87,7 +93,7 @@ static const BlockJobDriver test_block_job_driver = {
  */
 static BlockJob *test_block_job_start(unsigned int iterations,
                                       bool use_timer,
-                                      int rc, int *result, BlockJobTxn *txn)
+                                      int rc, int *result, JobTxn *txn)
 {
     BlockDriverState *bs;
     TestBlockJob *s;
@@ -102,7 +108,7 @@ static BlockJob *test_block_job_start(unsigned int iterations,
 
     snprintf(job_id, sizeof(job_id), "job%u", counter++);
     s = block_job_create(job_id, &test_block_job_driver, txn, bs,
-                         0, BLK_PERM_ALL, 0, BLOCK_JOB_DEFAULT,
+                         0, BLK_PERM_ALL, 0, JOB_DEFAULT,
                          test_block_job_cb, data, &error_abort);
     s->iterations = iterations;
     s->use_timer = use_timer;
@@ -116,15 +122,15 @@ static BlockJob *test_block_job_start(unsigned int iterations,
 static void test_single_job(int expected)
 {
     BlockJob *job;
-    BlockJobTxn *txn;
+    JobTxn *txn;
     int result = -EINPROGRESS;
 
-    txn = block_job_txn_new();
+    txn = job_txn_new();
     job = test_block_job_start(1, true, expected, &result, txn);
-    block_job_start(job);
+    job_start(&job->job);
 
     if (expected == -ECANCELED) {
-        block_job_cancel(job, false);
+        job_cancel(&job->job, false);
     }
 
     while (result == -EINPROGRESS) {
@@ -132,7 +138,7 @@ static void test_single_job(int expected)
     }
     g_assert_cmpint(result, ==, expected);
 
-    block_job_txn_unref(txn);
+    job_txn_unref(txn);
 }
 
 static void test_single_job_success(void)
@@ -154,26 +160,26 @@ static void test_pair_jobs(int expected1, int expected2)
 {
     BlockJob *job1;
     BlockJob *job2;
-    BlockJobTxn *txn;
+    JobTxn *txn;
     int result1 = -EINPROGRESS;
     int result2 = -EINPROGRESS;
 
-    txn = block_job_txn_new();
+    txn = job_txn_new();
     job1 = test_block_job_start(1, true, expected1, &result1, txn);
     job2 = test_block_job_start(2, true, expected2, &result2, txn);
-    block_job_start(job1);
-    block_job_start(job2);
+    job_start(&job1->job);
+    job_start(&job2->job);
 
     /* Release our reference now to trigger as many nice
      * use-after-free bugs as possible.
      */
-    block_job_txn_unref(txn);
+    job_txn_unref(txn);
 
     if (expected1 == -ECANCELED) {
-        block_job_cancel(job1, false);
+        job_cancel(&job1->job, false);
     }
     if (expected2 == -ECANCELED) {
-        block_job_cancel(job2, false);
+        job_cancel(&job2->job, false);
     }
 
     while (result1 == -EINPROGRESS || result2 == -EINPROGRESS) {
@@ -216,23 +222,23 @@ static void test_pair_jobs_fail_cancel_race(void)
 {
     BlockJob *job1;
     BlockJob *job2;
-    BlockJobTxn *txn;
+    JobTxn *txn;
     int result1 = -EINPROGRESS;
     int result2 = -EINPROGRESS;
 
-    txn = block_job_txn_new();
+    txn = job_txn_new();
     job1 = test_block_job_start(1, true, -ECANCELED, &result1, txn);
     job2 = test_block_job_start(2, false, 0, &result2, txn);
-    block_job_start(job1);
-    block_job_start(job2);
+    job_start(&job1->job);
+    job_start(&job2->job);
 
-    block_job_cancel(job1, false);
+    job_cancel(&job1->job, false);
 
     /* Now make job2 finish before the main loop kicks jobs.  This simulates
      * the race between a pending kick and another job completing.
      */
-    block_job_enter(job2);
-    block_job_enter(job2);
+    job_enter(&job2->job);
+    job_enter(&job2->job);
 
     while (result1 == -EINPROGRESS || result2 == -EINPROGRESS) {
         aio_poll(qemu_get_aio_context(), true);
@@ -241,7 +247,7 @@ static void test_pair_jobs_fail_cancel_race(void)
     g_assert_cmpint(result1, ==, -ECANCELED);
     g_assert_cmpint(result2, ==, -ECANCELED);
 
-    block_job_txn_unref(txn);
+    job_txn_unref(txn);
 }
 
 int main(int argc, char **argv)
diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c
index 8946bfd37b..e408d52351 100644
--- a/tests/test-blockjob.c
+++ b/tests/test-blockjob.c
@@ -17,7 +17,12 @@
 #include "sysemu/block-backend.h"
 
 static const BlockJobDriver test_block_job_driver = {
-    .instance_size = sizeof(BlockJob),
+    .job_driver = {
+        .instance_size = sizeof(BlockJob),
+        .free          = block_job_free,
+        .user_resume   = block_job_user_resume,
+        .drain         = block_job_drain,
+    },
 };
 
 static void block_job_cb(void *opaque, int ret)
@@ -38,9 +43,9 @@ static BlockJob *mk_job(BlockBackend *blk, const char *id,
         g_assert_null(errp);
         g_assert_nonnull(job);
         if (id) {
-            g_assert_cmpstr(job->id, ==, id);
+            g_assert_cmpstr(job->job.id, ==, id);
         } else {
-            g_assert_cmpstr(job->id, ==, blk_name(blk));
+            g_assert_cmpstr(job->job.id, ==, blk_name(blk));
         }
     } else {
         g_assert_nonnull(errp);
@@ -55,7 +60,7 @@ static BlockJob *do_test_id(BlockBackend *blk, const char *id,
                             bool should_succeed)
 {
     return mk_job(blk, id, &test_block_job_driver,
-                  should_succeed, BLOCK_JOB_DEFAULT);
+                  should_succeed, JOB_DEFAULT);
 }
 
 /* This creates a BlockBackend (optionally with a name) with a
@@ -124,11 +129,11 @@ static void test_job_ids(void)
     job[1] = do_test_id(blk[1], "id0", false);
 
     /* But once job[0] finishes we can reuse its ID */
-    block_job_early_fail(job[0]);
+    job_early_fail(&job[0]->job);
     job[1] = do_test_id(blk[1], "id0", true);
 
     /* No job ID specified, defaults to the backend name ('drive1') */
-    block_job_early_fail(job[1]);
+    job_early_fail(&job[1]->job);
     job[1] = do_test_id(blk[1], NULL, true);
 
     /* Duplicate job ID */
@@ -141,9 +146,9 @@ static void test_job_ids(void)
     /* This one is valid */
     job[2] = do_test_id(blk[2], "id_2", true);
 
-    block_job_early_fail(job[0]);
-    block_job_early_fail(job[1]);
-    block_job_early_fail(job[2]);
+    job_early_fail(&job[0]->job);
+    job_early_fail(&job[1]->job);
+    job_early_fail(&job[2]->job);
 
     destroy_blk(blk[0]);
     destroy_blk(blk[1]);
@@ -158,16 +163,16 @@ typedef struct CancelJob {
     bool completed;
 } CancelJob;
 
-static void cancel_job_completed(BlockJob *job, void *opaque)
+static void cancel_job_completed(Job *job, void *opaque)
 {
     CancelJob *s = opaque;
     s->completed = true;
-    block_job_completed(job, 0);
+    job_completed(job, 0);
 }
 
-static void cancel_job_complete(BlockJob *job, Error **errp)
+static void cancel_job_complete(Job *job, Error **errp)
 {
-    CancelJob *s = container_of(job, CancelJob, common);
+    CancelJob *s = container_of(job, CancelJob, common.job);
     s->should_complete = true;
 }
 
@@ -176,25 +181,30 @@ static void coroutine_fn cancel_job_start(void *opaque)
     CancelJob *s = opaque;
 
     while (!s->should_complete) {
-        if (block_job_is_cancelled(&s->common)) {
+        if (job_is_cancelled(&s->common.job)) {
             goto defer;
         }
 
-        if (!s->common.ready && s->should_converge) {
-            block_job_event_ready(&s->common);
+        if (!job_is_ready(&s->common.job) && s->should_converge) {
+            job_transition_to_ready(&s->common.job);
         }
 
-        block_job_sleep_ns(&s->common, 100000);
+        job_sleep_ns(&s->common.job, 100000);
     }
 
  defer:
-    block_job_defer_to_main_loop(&s->common, cancel_job_completed, s);
+    job_defer_to_main_loop(&s->common.job, cancel_job_completed, s);
 }
 
 static const BlockJobDriver test_cancel_driver = {
-    .instance_size = sizeof(CancelJob),
-    .start         = cancel_job_start,
-    .complete      = cancel_job_complete,
+    .job_driver = {
+        .instance_size = sizeof(CancelJob),
+        .free          = block_job_free,
+        .user_resume   = block_job_user_resume,
+        .drain         = block_job_drain,
+        .start         = cancel_job_start,
+        .complete      = cancel_job_complete,
+    },
 };
 
 static CancelJob *create_common(BlockJob **pjob)
@@ -205,9 +215,9 @@ static CancelJob *create_common(BlockJob **pjob)
 
     blk = create_blk(NULL);
     job = mk_job(blk, "Steve", &test_cancel_driver, true,
-                 BLOCK_JOB_MANUAL_FINALIZE | BLOCK_JOB_MANUAL_DISMISS);
-    block_job_ref(job);
-    assert(job->status == BLOCK_JOB_STATUS_CREATED);
+                 JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS);
+    job_ref(&job->job);
+    assert(job->job.status == JOB_STATUS_CREATED);
     s = container_of(job, CancelJob, common);
     s->blk = blk;
 
@@ -219,16 +229,15 @@ static void cancel_common(CancelJob *s)
 {
     BlockJob *job = &s->common;
     BlockBackend *blk = s->blk;
-    BlockJobStatus sts = job->status;
+    JobStatus sts = job->job.status;
 
-    block_job_cancel_sync(job);
-    if ((sts != BLOCK_JOB_STATUS_CREATED) &&
-        (sts != BLOCK_JOB_STATUS_CONCLUDED)) {
-        BlockJob *dummy = job;
-        block_job_dismiss(&dummy, &error_abort);
+    job_cancel_sync(&job->job);
+    if (sts != JOB_STATUS_CREATED && sts != JOB_STATUS_CONCLUDED) {
+        Job *dummy = &job->job;
+        job_dismiss(&dummy, &error_abort);
     }
-    assert(job->status == BLOCK_JOB_STATUS_NULL);
-    block_job_unref(job);
+    assert(job->job.status == JOB_STATUS_NULL);
+    job_unref(&job->job);
     destroy_blk(blk);
 }
 
@@ -248,8 +257,8 @@ static void test_cancel_running(void)
 
     s = create_common(&job);
 
-    block_job_start(job);
-    assert(job->status == BLOCK_JOB_STATUS_RUNNING);
+    job_start(&job->job);
+    assert(job->job.status == JOB_STATUS_RUNNING);
 
     cancel_common(s);
 }
@@ -261,12 +270,12 @@ static void test_cancel_paused(void)
 
     s = create_common(&job);
 
-    block_job_start(job);
-    assert(job->status == BLOCK_JOB_STATUS_RUNNING);
+    job_start(&job->job);
+    assert(job->job.status == JOB_STATUS_RUNNING);
 
-    block_job_user_pause(job, &error_abort);
-    block_job_enter(job);
-    assert(job->status == BLOCK_JOB_STATUS_PAUSED);
+    job_user_pause(&job->job, &error_abort);
+    job_enter(&job->job);
+    assert(job->job.status == JOB_STATUS_PAUSED);
 
     cancel_common(s);
 }
@@ -278,12 +287,12 @@ static void test_cancel_ready(void)
 
     s = create_common(&job);
 
-    block_job_start(job);
-    assert(job->status == BLOCK_JOB_STATUS_RUNNING);
+    job_start(&job->job);
+    assert(job->job.status == JOB_STATUS_RUNNING);
 
     s->should_converge = true;
-    block_job_enter(job);
-    assert(job->status == BLOCK_JOB_STATUS_READY);
+    job_enter(&job->job);
+    assert(job->job.status == JOB_STATUS_READY);
 
     cancel_common(s);
 }
@@ -295,16 +304,16 @@ static void test_cancel_standby(void)
 
     s = create_common(&job);
 
-    block_job_start(job);
-    assert(job->status == BLOCK_JOB_STATUS_RUNNING);
+    job_start(&job->job);
+    assert(job->job.status == JOB_STATUS_RUNNING);
 
     s->should_converge = true;
-    block_job_enter(job);
-    assert(job->status == BLOCK_JOB_STATUS_READY);
+    job_enter(&job->job);
+    assert(job->job.status == JOB_STATUS_READY);
 
-    block_job_user_pause(job, &error_abort);
-    block_job_enter(job);
-    assert(job->status == BLOCK_JOB_STATUS_STANDBY);
+    job_user_pause(&job->job, &error_abort);
+    job_enter(&job->job);
+    assert(job->job.status == JOB_STATUS_STANDBY);
 
     cancel_common(s);
 }
@@ -316,19 +325,19 @@ static void test_cancel_pending(void)
 
     s = create_common(&job);
 
-    block_job_start(job);
-    assert(job->status == BLOCK_JOB_STATUS_RUNNING);
+    job_start(&job->job);
+    assert(job->job.status == JOB_STATUS_RUNNING);
 
     s->should_converge = true;
-    block_job_enter(job);
-    assert(job->status == BLOCK_JOB_STATUS_READY);
+    job_enter(&job->job);
+    assert(job->job.status == JOB_STATUS_READY);
 
-    block_job_complete(job, &error_abort);
-    block_job_enter(job);
+    job_complete(&job->job, &error_abort);
+    job_enter(&job->job);
     while (!s->completed) {
         aio_poll(qemu_get_aio_context(), true);
     }
-    assert(job->status == BLOCK_JOB_STATUS_PENDING);
+    assert(job->job.status == JOB_STATUS_PENDING);
 
     cancel_common(s);
 }
@@ -340,22 +349,22 @@ static void test_cancel_concluded(void)
 
     s = create_common(&job);
 
-    block_job_start(job);
-    assert(job->status == BLOCK_JOB_STATUS_RUNNING);
+    job_start(&job->job);
+    assert(job->job.status == JOB_STATUS_RUNNING);
 
     s->should_converge = true;
-    block_job_enter(job);
-    assert(job->status == BLOCK_JOB_STATUS_READY);
+    job_enter(&job->job);
+    assert(job->job.status == JOB_STATUS_READY);
 
-    block_job_complete(job, &error_abort);
-    block_job_enter(job);
+    job_complete(&job->job, &error_abort);
+    job_enter(&job->job);
     while (!s->completed) {
         aio_poll(qemu_get_aio_context(), true);
     }
-    assert(job->status == BLOCK_JOB_STATUS_PENDING);
+    assert(job->job.status == JOB_STATUS_PENDING);
 
-    block_job_finalize(job, &error_abort);
-    assert(job->status == BLOCK_JOB_STATUS_CONCLUDED);
+    job_finalize(&job->job, &error_abort);
+    assert(job->job.status == JOB_STATUS_CONCLUDED);
 
     cancel_common(s);
 }
diff --git a/trace-events b/trace-events
index ed71f44649..c445f54773 100644
--- a/trace-events
+++ b/trace-events
@@ -104,6 +104,20 @@ gdbstub_err_invalid_rle(void) "got invalid RLE sequence"
 gdbstub_err_checksum_invalid(uint8_t ch) "got invalid command checksum digit: 0x%02x"
 gdbstub_err_checksum_incorrect(uint8_t expected, uint8_t got) "got command packet with incorrect checksum, expected=0x%02x, received=0x%02x"
 
+# job.c
+job_state_transition(void *job,  int ret, const char *legal, const char *s0, const char *s1) "job %p (ret: %d) attempting %s transition (%s-->%s)"
+job_apply_verb(void *job, const char *state, const char *verb, const char *legal) "job %p in state %s; applying verb %s (%s)"
+job_completed(void *job, int ret, int jret) "job %p ret %d corrected ret %d"
+
+# job-qmp.c
+qmp_job_cancel(void *job) "job %p"
+qmp_job_pause(void *job) "job %p"
+qmp_job_resume(void *job) "job %p"
+qmp_job_complete(void *job) "job %p"
+qmp_job_finalize(void *job) "job %p"
+qmp_job_dismiss(void *job) "job %p"
+
+
 ### Guest events, keep at bottom
 
 
diff --git a/vl.c b/vl.c
index d5836c65ae..038d7f8042 100644
--- a/vl.c
+++ b/vl.c
@@ -4683,6 +4683,7 @@ int main(int argc, char **argv, char **envp)
     /* No more vcpu or device emulation activity beyond this point */
     vm_shutdown();
 
+    job_cancel_sync_all();
     bdrv_close_all();
 
     res_free();