summary refs log tree commit diff stats
path: root/tests
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2023-12-21 19:43:58 -0500
committerStefan Hajnoczi <stefanha@redhat.com>2023-12-21 19:43:58 -0500
commit6370d13c62c300826f8eb80e4ed9d2e67bad3fa7 (patch)
tree0f4409771270ba5ed28e53f284c117b8400b062d /tests
parent191710c221f65b1542f6ea7fa4d30dde6e134fd7 (diff)
parentb6948ab01df068bef591868c22d1f873d2d05cde (diff)
downloadfocaccia-qemu-6370d13c62c300826f8eb80e4ed9d2e67bad3fa7.tar.gz
focaccia-qemu-6370d13c62c300826f8eb80e4ed9d2e67bad3fa7.zip
Merge tag 'for-upstream' of https://repo.or.cz/qemu/kevin into staging
Block layer patches

- virtio-blk: Multiqueue support (configurable iothread per queue)
- Made NBD export and hw/scsi thread-safe without AioContext lock
- Fix crash when loading snapshot on inactive node

# -----BEGIN PGP SIGNATURE-----
#
# iQJFBAABCAAvFiEE3D3rFZqa+V09dFb+fwmycsiPL9YFAmWEw/8RHGt3b2xmQHJl
# ZGhhdC5jb20ACgkQfwmycsiPL9bX0Q/9G+Qx8mQGmbxJzvtW7/1eaeJ5CPCYT8w3
# 033S5hCil43mvX2aQKTFrh1Nz4aYlqMDyURvNu7nigyADY+kBpzzJ1MFr6WQrzYv
# QEk4jf/FOllfKn8+/A0z2NJDhtpVgqKKHBsFZl8FBUcxd79daTaoPPM3BNNsOHQD
# o7Z7hR/iEdG9dkAh/fpwctsgMO/CoN0BRRyN2OByj03zeu1TlDJ6lX0hxlcJl9Jw
# vLo81rWTCqKRu+SbjBsb0HfYE2hP54A4hvxn4I9vYGYDz8ElucluYyeqUEK+mdrX
# /DQBdb+Osl1FD6MuIaFR+Rgp9Mu5h6ZOdvUyCY0zuByti851hV8qjW9BtrTfqaMh
# LMOKoL6c5B8XJYWVGAGrJexIw1hHq5WKdXN9zp4FZA4tOyHUMRjHuR1+zScU6gnU
# WRSIQR46w75A13clWyJs9Hf/q5Fp/1KT4nfuZ/hmiXvxdsYY5x1w/W3s9tRNjYKL
# d6FVk17cFc6Ksb7lWvDCgg61BNZtGm4Clmw0kJ6V1reiQz7AvDLmduLUQbmrVt7G
# gWAY4b2L9YXJpEx5en0kE50KLAUw/E9ozbOq6ZT9nFUKeNAPC8PS5lK7vYVwebCk
# VA0t8pFzKhdB1bJaG5fMSRPBuqkvhsaDEEDABlSro8dyyjoQBaEdk5P9Kxe66hBc
# xhTmDPdv/JM=
# =E3Zh
# -----END PGP SIGNATURE-----
# gpg: Signature made Thu 21 Dec 2023 18:02:23 EST
# gpg:                using RSA key DC3DEB159A9AF95D3D7456FE7F09B272C88F2FD6
# gpg:                issuer "kwolf@redhat.com"
# gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>" [full]
# Primary key fingerprint: DC3D EB15 9A9A F95D 3D74  56FE 7F09 B272 C88F 2FD6

* tag 'for-upstream' of https://repo.or.cz/qemu/kevin: (33 commits)
  virtio-blk: add iothread-vq-mapping parameter
  qdev: add IOThreadVirtQueueMappingList property type
  qdev-properties: alias all object class properties
  string-output-visitor: show structs as "<omitted>"
  block-coroutine-wrapper: use qemu_get_current_aio_context()
  block: remove outdated AioContext locking comments
  job: remove outdated AioContext locking comments
  scsi: remove outdated AioContext lock comment
  docs: remove AioContext lock from IOThread docs
  aio: remove aio_context_acquire()/aio_context_release() API
  aio-wait: draw equivalence between AIO_WAIT_WHILE() and AIO_WAIT_WHILE_UNLOCKED()
  scsi: remove AioContext locking
  block: remove bdrv_co_lock()
  block: remove AioContext locking
  graph-lock: remove AioContext locking
  aio: make aio_context_acquire()/aio_context_release() a no-op
  tests: remove aio_context_acquire() tests
  scsi: assert that callbacks run in the correct AioContext
  virtio-scsi: replace AioContext lock with tmf_bh_lock
  dma-helpers: don't lock AioContext in dma_blk_cb()
  ...

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'tests')
-rwxr-xr-xtests/qemu-iotests/2022
-rwxr-xr-xtests/qemu-iotests/2033
-rwxr-xr-xtests/qemu-iotests/tests/qcow2-internal-snapshots170
-rw-r--r--tests/qemu-iotests/tests/qcow2-internal-snapshots.out107
-rw-r--r--tests/tsan/suppressions.tsan1
-rw-r--r--tests/unit/test-aio.c67
-rw-r--r--tests/unit/test-bdrv-drain.c91
-rw-r--r--tests/unit/test-bdrv-graph-mod.c26
-rw-r--r--tests/unit/test-block-iothread.c31
-rw-r--r--tests/unit/test-blockjob.c137
-rw-r--r--tests/unit/test-replication.c11
11 files changed, 313 insertions, 333 deletions
diff --git a/tests/qemu-iotests/202 b/tests/qemu-iotests/202
index b784dcd791..13304242e5 100755
--- a/tests/qemu-iotests/202
+++ b/tests/qemu-iotests/202
@@ -21,7 +21,7 @@
 # Check that QMP 'transaction' blockdev-snapshot-sync with multiple drives on a
 # single IOThread completes successfully.  This particular command triggered a
 # hang due to recursive AioContext locking and BDRV_POLL_WHILE().  Protect
-# against regressions.
+# against regressions even though the AioContext lock no longer exists.
 
 import iotests
 
diff --git a/tests/qemu-iotests/203 b/tests/qemu-iotests/203
index ab80fd0e44..1ba878522b 100755
--- a/tests/qemu-iotests/203
+++ b/tests/qemu-iotests/203
@@ -21,7 +21,8 @@
 # Check that QMP 'migrate' with multiple drives on a single IOThread completes
 # successfully.  This particular command triggered a hang in the source QEMU
 # process due to recursive AioContext locking in bdrv_invalidate_all() and
-# BDRV_POLL_WHILE().
+# BDRV_POLL_WHILE().  Protect against regressions even though the AioContext
+# lock no longer exists.
 
 import iotests
 
diff --git a/tests/qemu-iotests/tests/qcow2-internal-snapshots b/tests/qemu-iotests/tests/qcow2-internal-snapshots
new file mode 100755
index 0000000000..36523aba06
--- /dev/null
+++ b/tests/qemu-iotests/tests/qcow2-internal-snapshots
@@ -0,0 +1,170 @@
+#!/usr/bin/env bash
+# group: rw quick
+#
+# Test case for internal snapshots in qcow2
+#
+# Copyright (C) 2023 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+# creator
+owner=kwolf@redhat.com
+
+seq="$(basename $0)"
+echo "QA output created by $seq"
+
+status=1	# failure is the default!
+
+_cleanup()
+{
+	_cleanup_test_img
+}
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+# get standard environment, filters and checks
+. ../common.rc
+. ../common.filter
+
+# This tests qcow2-specific low-level functionality
+_supported_fmt qcow2
+_supported_proto generic
+# Internal snapshots are (currently) impossible with refcount_bits=1,
+# and generally impossible with external data files
+_unsupported_imgopts 'compat=0.10' 'refcount_bits=1[^0-9]' data_file
+
+IMG_SIZE=64M
+
+_qemu()
+{
+    $QEMU -no-shutdown -nographic -monitor stdio -serial none \
+          -blockdev file,filename="$TEST_IMG",node-name=disk0-file \
+          -blockdev "$IMGFMT",file=disk0-file,node-name=disk0 \
+          -object iothread,id=iothread0 \
+          -device virtio-scsi,iothread=iothread0 \
+          -device scsi-hd,drive=disk0,share-rw=on \
+          "$@" 2>&1 |\
+    _filter_qemu | _filter_hmp | _filter_qemu_io
+}
+
+_make_test_img $IMG_SIZE
+
+echo
+echo "=== Write some data, take a snapshot and overwrite part of it ==="
+echo
+
+{
+    echo 'qemu-io disk0 "write -P0x11 0 1M"'
+    # Give qemu some time to boot before saving the VM state
+    sleep 0.5
+    echo "savevm snap0"
+    echo 'qemu-io disk0 "write -P0x22 0 512k"'
+    echo "quit"
+} | _qemu
+
+echo
+$QEMU_IMG snapshot -l "$TEST_IMG" | _filter_date | _filter_vmstate_size
+_check_test_img
+
+echo
+echo "=== Verify that loading the snapshot reverts to the old content ==="
+echo
+
+{
+    # -loadvm reverted the write from the previous QEMU instance
+    echo 'qemu-io disk0 "read -P0x11 0 1M"'
+
+    # Verify that it works without restarting QEMU, too
+    echo 'qemu-io disk0 "write -P0x33 512k 512k"'
+    echo "loadvm snap0"
+    echo 'qemu-io disk0 "read -P0x11 0 1M"'
+
+    # Verify COW by writing a partial cluster
+    echo 'qemu-io disk0 "write -P0x33 63k 2k"'
+    echo 'qemu-io disk0 "read -P0x11 0 63k"'
+    echo 'qemu-io disk0 "read -P0x33 63k 2k"'
+    echo 'qemu-io disk0 "read -P0x11 65k 63k"'
+
+    # Take a second snapshot
+    echo "savevm snap1"
+
+    echo "quit"
+} | _qemu -loadvm snap0
+
+echo
+$QEMU_IMG snapshot -l "$TEST_IMG" | _filter_date | _filter_vmstate_size
+_check_test_img
+
+echo
+echo "=== qemu-img snapshot can revert to snapshots ==="
+echo
+
+$QEMU_IMG snapshot -a snap0 "$TEST_IMG"
+$QEMU_IO -c "read -P0x11 0 1M" "$TEST_IMG" | _filter_qemu_io
+$QEMU_IMG snapshot -a snap1 "$TEST_IMG"
+$QEMU_IO \
+    -c "read -P0x11 0 63k" \
+    -c "read -P0x33 63k 2k" \
+    -c "read -P0x11 65k 63k" \
+    "$TEST_IMG" | _filter_qemu_io
+
+echo
+echo "=== Deleting snapshots ==="
+echo
+{
+    # The active layer stays unaffected by deleting the snapshot
+    echo "delvm snap1"
+    echo 'qemu-io disk0 "read -P0x11 0 63k"'
+    echo 'qemu-io disk0 "read -P0x33 63k 2k"'
+    echo 'qemu-io disk0 "read -P0x11 65k 63k"'
+
+    echo "quit"
+} | _qemu
+
+
+echo
+$QEMU_IMG snapshot -l "$TEST_IMG" | _filter_date | _filter_vmstate_size
+_check_test_img
+
+echo
+echo "=== Error cases ==="
+echo
+
+# snap1 should not exist any more
+_qemu -loadvm snap1
+
+echo
+{
+    echo "loadvm snap1"
+    echo "quit"
+} | _qemu
+
+# Snapshot operations and inactive images are incompatible
+echo
+_qemu -loadvm snap0 -incoming defer
+{
+    echo "loadvm snap0"
+    echo "delvm snap0"
+    echo "savevm snap1"
+    echo "quit"
+} | _qemu -incoming defer
+
+# -loadvm and -preconfig are incompatible
+echo
+_qemu -loadvm snap0 -preconfig
+
+# success, all done
+echo "*** done"
+rm -f $seq.full
+status=0
diff --git a/tests/qemu-iotests/tests/qcow2-internal-snapshots.out b/tests/qemu-iotests/tests/qcow2-internal-snapshots.out
new file mode 100644
index 0000000000..438f535e6a
--- /dev/null
+++ b/tests/qemu-iotests/tests/qcow2-internal-snapshots.out
@@ -0,0 +1,107 @@
+QA output created by qcow2-internal-snapshots
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
+
+=== Write some data, take a snapshot and overwrite part of it ===
+
+QEMU X.Y.Z monitor - type 'help' for more information
+(qemu) qemu-io disk0 "write -P0x11 0 1M"
+wrote 1048576/1048576 bytes at offset 0
+1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+(qemu) savevm snap0
+(qemu) qemu-io disk0 "write -P0x22 0 512k"
+wrote 524288/524288 bytes at offset 0
+512 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+(qemu) quit
+
+Snapshot list:
+ID        TAG               VM SIZE                DATE     VM CLOCK     ICOUNT
+1         snap0                SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000
+No errors were found on the image.
+
+=== Verify that loading the snapshot reverts to the old content ===
+
+QEMU X.Y.Z monitor - type 'help' for more information
+(qemu) qemu-io disk0 "read -P0x11 0 1M"
+read 1048576/1048576 bytes at offset 0
+1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+(qemu) qemu-io disk0 "write -P0x33 512k 512k"
+wrote 524288/524288 bytes at offset 524288
+512 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+(qemu) loadvm snap0
+(qemu) qemu-io disk0 "read -P0x11 0 1M"
+read 1048576/1048576 bytes at offset 0
+1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+(qemu) qemu-io disk0 "write -P0x33 63k 2k"
+wrote 2048/2048 bytes at offset 64512
+2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+(qemu) qemu-io disk0 "read -P0x11 0 63k"
+read 64512/64512 bytes at offset 0
+63 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+(qemu) qemu-io disk0 "read -P0x33 63k 2k"
+read 2048/2048 bytes at offset 64512
+2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+(qemu) qemu-io disk0 "read -P0x11 65k 63k"
+read 64512/64512 bytes at offset 66560
+63 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+(qemu) savevm snap1
+(qemu) quit
+
+Snapshot list:
+ID        TAG               VM SIZE                DATE     VM CLOCK     ICOUNT
+1         snap0                SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000
+2         snap1                SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000
+No errors were found on the image.
+
+=== qemu-img snapshot can revert to snapshots ===
+
+read 1048576/1048576 bytes at offset 0
+1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 64512/64512 bytes at offset 0
+63 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 2048/2048 bytes at offset 64512
+2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 64512/64512 bytes at offset 66560
+63 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+=== Deleting snapshots ===
+
+QEMU X.Y.Z monitor - type 'help' for more information
+(qemu) delvm snap1
+(qemu) qemu-io disk0 "read -P0x11 0 63k"
+read 64512/64512 bytes at offset 0
+63 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+(qemu) qemu-io disk0 "read -P0x33 63k 2k"
+read 2048/2048 bytes at offset 64512
+2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+(qemu) qemu-io disk0 "read -P0x11 65k 63k"
+read 64512/64512 bytes at offset 66560
+63 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+(qemu) quit
+
+Snapshot list:
+ID        TAG               VM SIZE                DATE     VM CLOCK     ICOUNT
+1         snap0                SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000
+No errors were found on the image.
+
+=== Error cases ===
+
+QEMU X.Y.Z monitor - type 'help' for more information
+(qemu) QEMU_PROG: Snapshot 'snap1' does not exist in one or more devices
+
+QEMU X.Y.Z monitor - type 'help' for more information
+(qemu) loadvm snap1
+Error: Snapshot 'snap1' does not exist in one or more devices
+(qemu) quit
+
+QEMU_PROG: 'incoming' and 'loadvm' options are mutually exclusive
+QEMU X.Y.Z monitor - type 'help' for more information
+(qemu) loadvm snap0
+Error: Device 'disk0' is writable but does not support snapshots
+(qemu) delvm snap0
+Error: Device 'disk0' is writable but does not support snapshots
+(qemu) savevm snap1
+Error: Device 'disk0' is writable but does not support snapshots
+(qemu) quit
+
+QEMU_PROG: 'preconfig' and 'loadvm' options are mutually exclusive
+*** done
diff --git a/tests/tsan/suppressions.tsan b/tests/tsan/suppressions.tsan
index d9a002a2ef..b3ef59c27c 100644
--- a/tests/tsan/suppressions.tsan
+++ b/tests/tsan/suppressions.tsan
@@ -4,7 +4,6 @@
 
 # TSan reports a double lock on RECURSIVE mutexes.
 # Since the recursive lock is intentional, we choose to ignore it.
-mutex:aio_context_acquire
 mutex:pthread_mutex_lock
 
 # TSan reports a race between pthread_mutex_init() and
diff --git a/tests/unit/test-aio.c b/tests/unit/test-aio.c
index 337b6e4ea7..e77d86be87 100644
--- a/tests/unit/test-aio.c
+++ b/tests/unit/test-aio.c
@@ -100,76 +100,12 @@ static void event_ready_cb(EventNotifier *e)
 
 /* Tests using aio_*.  */
 
-typedef struct {
-    QemuMutex start_lock;
-    EventNotifier notifier;
-    bool thread_acquired;
-} AcquireTestData;
-
-static void *test_acquire_thread(void *opaque)
-{
-    AcquireTestData *data = opaque;
-
-    /* Wait for other thread to let us start */
-    qemu_mutex_lock(&data->start_lock);
-    qemu_mutex_unlock(&data->start_lock);
-
-    /* event_notifier_set might be called either before or after
-     * the main thread's call to poll().  The test case's outcome
-     * should be the same in either case.
-     */
-    event_notifier_set(&data->notifier);
-    aio_context_acquire(ctx);
-    aio_context_release(ctx);
-
-    data->thread_acquired = true; /* success, we got here */
-
-    return NULL;
-}
-
 static void set_event_notifier(AioContext *nctx, EventNotifier *notifier,
                                EventNotifierHandler *handler)
 {
     aio_set_event_notifier(nctx, notifier, handler, NULL, NULL);
 }
 
-static void dummy_notifier_read(EventNotifier *n)
-{
-    event_notifier_test_and_clear(n);
-}
-
-static void test_acquire(void)
-{
-    QemuThread thread;
-    AcquireTestData data;
-
-    /* Dummy event notifier ensures aio_poll() will block */
-    event_notifier_init(&data.notifier, false);
-    set_event_notifier(ctx, &data.notifier, dummy_notifier_read);
-    g_assert(!aio_poll(ctx, false)); /* consume aio_notify() */
-
-    qemu_mutex_init(&data.start_lock);
-    qemu_mutex_lock(&data.start_lock);
-    data.thread_acquired = false;
-
-    qemu_thread_create(&thread, "test_acquire_thread",
-                       test_acquire_thread,
-                       &data, QEMU_THREAD_JOINABLE);
-
-    /* Block in aio_poll(), let other thread kick us and acquire context */
-    aio_context_acquire(ctx);
-    qemu_mutex_unlock(&data.start_lock); /* let the thread run */
-    g_assert(aio_poll(ctx, true));
-    g_assert(!data.thread_acquired);
-    aio_context_release(ctx);
-
-    qemu_thread_join(&thread);
-    set_event_notifier(ctx, &data.notifier, NULL);
-    event_notifier_cleanup(&data.notifier);
-
-    g_assert(data.thread_acquired);
-}
-
 static void test_bh_schedule(void)
 {
     BHTestData data = { .n = 0 };
@@ -879,7 +815,7 @@ static void test_worker_thread_co_enter(void)
     qemu_thread_get_self(&this_thread);
     co = qemu_coroutine_create(co_check_current_thread, &this_thread);
 
-    qemu_thread_create(&worker_thread, "test_acquire_thread",
+    qemu_thread_create(&worker_thread, "test_aio_co_enter",
                        test_aio_co_enter,
                        co, QEMU_THREAD_JOINABLE);
 
@@ -899,7 +835,6 @@ int main(int argc, char **argv)
     while (g_main_context_iteration(NULL, false));
 
     g_test_init(&argc, &argv, NULL);
-    g_test_add_func("/aio/acquire",                 test_acquire);
     g_test_add_func("/aio/bh/schedule",             test_bh_schedule);
     g_test_add_func("/aio/bh/schedule10",           test_bh_schedule10);
     g_test_add_func("/aio/bh/cancel",               test_bh_cancel);
diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c
index 704d1a3f36..17830a69c1 100644
--- a/tests/unit/test-bdrv-drain.c
+++ b/tests/unit/test-bdrv-drain.c
@@ -179,13 +179,7 @@ static void do_drain_end(enum drain_type drain_type, BlockDriverState *bs)
 
 static void do_drain_begin_unlocked(enum drain_type drain_type, BlockDriverState *bs)
 {
-    if (drain_type != BDRV_DRAIN_ALL) {
-        aio_context_acquire(bdrv_get_aio_context(bs));
-    }
     do_drain_begin(drain_type, bs);
-    if (drain_type != BDRV_DRAIN_ALL) {
-        aio_context_release(bdrv_get_aio_context(bs));
-    }
 }
 
 static BlockBackend * no_coroutine_fn test_setup(void)
@@ -209,13 +203,7 @@ static BlockBackend * no_coroutine_fn test_setup(void)
 
 static void do_drain_end_unlocked(enum drain_type drain_type, BlockDriverState *bs)
 {
-    if (drain_type != BDRV_DRAIN_ALL) {
-        aio_context_acquire(bdrv_get_aio_context(bs));
-    }
     do_drain_end(drain_type, bs);
-    if (drain_type != BDRV_DRAIN_ALL) {
-        aio_context_release(bdrv_get_aio_context(bs));
-    }
 }
 
 /*
@@ -520,12 +508,8 @@ static void test_iothread_main_thread_bh(void *opaque)
 {
     struct test_iothread_data *data = opaque;
 
-    /* Test that the AioContext is not yet locked in a random BH that is
-     * executed during drain, otherwise this would deadlock. */
-    aio_context_acquire(bdrv_get_aio_context(data->bs));
     bdrv_flush(data->bs);
     bdrv_dec_in_flight(data->bs); /* incremented by test_iothread_common() */
-    aio_context_release(bdrv_get_aio_context(data->bs));
 }
 
 /*
@@ -567,7 +551,6 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread)
     blk_set_disable_request_queuing(blk, true);
 
     blk_set_aio_context(blk, ctx_a, &error_abort);
-    aio_context_acquire(ctx_a);
 
     s->bh_indirection_ctx = ctx_b;
 
@@ -582,8 +565,6 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread)
     g_assert(acb != NULL);
     g_assert_cmpint(aio_ret, ==, -EINPROGRESS);
 
-    aio_context_release(ctx_a);
-
     data = (struct test_iothread_data) {
         .bs         = bs,
         .drain_type = drain_type,
@@ -592,10 +573,6 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread)
 
     switch (drain_thread) {
     case 0:
-        if (drain_type != BDRV_DRAIN_ALL) {
-            aio_context_acquire(ctx_a);
-        }
-
         /*
          * Increment in_flight so that do_drain_begin() waits for
          * test_iothread_main_thread_bh(). This prevents the race between
@@ -613,20 +590,10 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread)
         do_drain_begin(drain_type, bs);
         g_assert_cmpint(bs->in_flight, ==, 0);
 
-        if (drain_type != BDRV_DRAIN_ALL) {
-            aio_context_release(ctx_a);
-        }
         qemu_event_wait(&done_event);
-        if (drain_type != BDRV_DRAIN_ALL) {
-            aio_context_acquire(ctx_a);
-        }
 
         g_assert_cmpint(aio_ret, ==, 0);
         do_drain_end(drain_type, bs);
-
-        if (drain_type != BDRV_DRAIN_ALL) {
-            aio_context_release(ctx_a);
-        }
         break;
     case 1:
         co = qemu_coroutine_create(test_iothread_drain_co_entry, &data);
@@ -637,9 +604,7 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread)
         g_assert_not_reached();
     }
 
-    aio_context_acquire(ctx_a);
     blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
-    aio_context_release(ctx_a);
 
     bdrv_unref(bs);
     blk_unref(blk);
@@ -757,7 +722,6 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
     BlockJob *job;
     TestBlockJob *tjob;
     IOThread *iothread = NULL;
-    AioContext *ctx;
     int ret;
 
     src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR,
@@ -787,11 +751,11 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
     }
 
     if (use_iothread) {
+        AioContext *ctx;
+
         iothread = iothread_new();
         ctx = iothread_get_aio_context(iothread);
         blk_set_aio_context(blk_src, ctx, &error_abort);
-    } else {
-        ctx = qemu_get_aio_context();
     }
 
     target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR,
@@ -800,16 +764,15 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
     blk_insert_bs(blk_target, target, &error_abort);
     blk_set_allow_aio_context_change(blk_target, true);
 
-    aio_context_acquire(ctx);
     tjob = block_job_create("job0", &test_job_driver, NULL, src,
                             0, BLK_PERM_ALL,
                             0, 0, NULL, NULL, &error_abort);
     tjob->bs = src;
     job = &tjob->common;
 
-    bdrv_graph_wrlock(target);
+    bdrv_graph_wrlock();
     block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort);
-    bdrv_graph_wrunlock(target);
+    bdrv_graph_wrunlock();
 
     switch (result) {
     case TEST_JOB_SUCCESS:
@@ -821,7 +784,6 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
         tjob->prepare_ret = -EIO;
         break;
     }
-    aio_context_release(ctx);
 
     job_start(&job->job);
 
@@ -912,12 +874,10 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
     }
     g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO));
 
-    aio_context_acquire(ctx);
     if (use_iothread) {
         blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort);
         assert(blk_get_aio_context(blk_target) == qemu_get_aio_context());
     }
-    aio_context_release(ctx);
 
     blk_unref(blk_src);
     blk_unref(blk_target);
@@ -991,11 +951,11 @@ static void bdrv_test_top_close(BlockDriverState *bs)
 {
     BdrvChild *c, *next_c;
 
-    bdrv_graph_wrlock(NULL);
+    bdrv_graph_wrlock();
     QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
         bdrv_unref_child(bs, c);
     }
-    bdrv_graph_wrunlock(NULL);
+    bdrv_graph_wrunlock();
 }
 
 static int coroutine_fn GRAPH_RDLOCK
@@ -1085,10 +1045,10 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete,
 
     null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
                         &error_abort);
-    bdrv_graph_wrlock(NULL);
+    bdrv_graph_wrlock();
     bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds,
                       BDRV_CHILD_DATA, &error_abort);
-    bdrv_graph_wrunlock(NULL);
+    bdrv_graph_wrunlock();
 
     /* This child will be the one to pass to requests through to, and
      * it will stall until a drain occurs */
@@ -1096,21 +1056,21 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete,
                                     &error_abort);
     child_bs->total_sectors = 65536 >> BDRV_SECTOR_BITS;
     /* Takes our reference to child_bs */
-    bdrv_graph_wrlock(NULL);
+    bdrv_graph_wrlock();
     tts->wait_child = bdrv_attach_child(bs, child_bs, "wait-child",
                                         &child_of_bds,
                                         BDRV_CHILD_DATA | BDRV_CHILD_PRIMARY,
                                         &error_abort);
-    bdrv_graph_wrunlock(NULL);
+    bdrv_graph_wrunlock();
 
     /* This child is just there to be deleted
      * (for detach_instead_of_delete == true) */
     null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
                         &error_abort);
-    bdrv_graph_wrlock(NULL);
+    bdrv_graph_wrlock();
     bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds, BDRV_CHILD_DATA,
                       &error_abort);
-    bdrv_graph_wrunlock(NULL);
+    bdrv_graph_wrunlock();
 
     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
     blk_insert_bs(blk, bs, &error_abort);
@@ -1193,14 +1153,14 @@ static void no_coroutine_fn detach_indirect_bh(void *opaque)
 
     bdrv_dec_in_flight(data->child_b->bs);
 
-    bdrv_graph_wrlock(NULL);
+    bdrv_graph_wrlock();
     bdrv_unref_child(data->parent_b, data->child_b);
 
     bdrv_ref(data->c);
     data->child_c = bdrv_attach_child(data->parent_b, data->c, "PB-C",
                                       &child_of_bds, BDRV_CHILD_DATA,
                                       &error_abort);
-    bdrv_graph_wrunlock(NULL);
+    bdrv_graph_wrunlock();
 }
 
 static void coroutine_mixed_fn detach_by_parent_aio_cb(void *opaque, int ret)
@@ -1298,7 +1258,7 @@ static void TSA_NO_TSA test_detach_indirect(bool by_parent_cb)
     /* Set child relationships */
     bdrv_ref(b);
     bdrv_ref(a);
-    bdrv_graph_wrlock(NULL);
+    bdrv_graph_wrlock();
     child_b = bdrv_attach_child(parent_b, b, "PB-B", &child_of_bds,
                                 BDRV_CHILD_DATA, &error_abort);
     child_a = bdrv_attach_child(parent_b, a, "PB-A", &child_of_bds,
@@ -1308,7 +1268,7 @@ static void TSA_NO_TSA test_detach_indirect(bool by_parent_cb)
     bdrv_attach_child(parent_a, a, "PA-A",
                       by_parent_cb ? &child_of_bds : &detach_by_driver_cb_class,
                       BDRV_CHILD_DATA, &error_abort);
-    bdrv_graph_wrunlock(NULL);
+    bdrv_graph_wrunlock();
 
     g_assert_cmpint(parent_a->refcnt, ==, 1);
     g_assert_cmpint(parent_b->refcnt, ==, 1);
@@ -1401,9 +1361,7 @@ static void test_append_to_drained(void)
     g_assert_cmpint(base_s->drain_count, ==, 1);
     g_assert_cmpint(base->in_flight, ==, 0);
 
-    aio_context_acquire(qemu_get_aio_context());
     bdrv_append(overlay, base, &error_abort);
-    aio_context_release(qemu_get_aio_context());
 
     g_assert_cmpint(base->in_flight, ==, 0);
     g_assert_cmpint(overlay->in_flight, ==, 0);
@@ -1438,16 +1396,11 @@ static void test_set_aio_context(void)
 
     bdrv_drained_begin(bs);
     bdrv_try_change_aio_context(bs, ctx_a, NULL, &error_abort);
-
-    aio_context_acquire(ctx_a);
     bdrv_drained_end(bs);
 
     bdrv_drained_begin(bs);
     bdrv_try_change_aio_context(bs, ctx_b, NULL, &error_abort);
-    aio_context_release(ctx_a);
-    aio_context_acquire(ctx_b);
     bdrv_try_change_aio_context(bs, qemu_get_aio_context(), NULL, &error_abort);
-    aio_context_release(ctx_b);
     bdrv_drained_end(bs);
 
     bdrv_unref(bs);
@@ -1727,7 +1680,7 @@ static void test_drop_intermediate_poll(void)
      * Establish the chain last, so the chain links are the first
      * elements in the BDS.parents lists
      */
-    bdrv_graph_wrlock(NULL);
+    bdrv_graph_wrlock();
     for (i = 0; i < 3; i++) {
         if (i) {
             /* Takes the reference to chain[i - 1] */
@@ -1735,7 +1688,7 @@ static void test_drop_intermediate_poll(void)
                               &chain_child_class, BDRV_CHILD_COW, &error_abort);
         }
     }
-    bdrv_graph_wrunlock(NULL);
+    bdrv_graph_wrunlock();
 
     job = block_job_create("job", &test_simple_job_driver, NULL, job_node,
                            0, BLK_PERM_ALL, 0, 0, NULL, NULL, &error_abort);
@@ -1982,10 +1935,10 @@ static void do_test_replace_child_mid_drain(int old_drain_count,
     new_child_bs->total_sectors = 1;
 
     bdrv_ref(old_child_bs);
-    bdrv_graph_wrlock(NULL);
+    bdrv_graph_wrlock();
     bdrv_attach_child(parent_bs, old_child_bs, "child", &child_of_bds,
                       BDRV_CHILD_COW, &error_abort);
-    bdrv_graph_wrunlock(NULL);
+    bdrv_graph_wrunlock();
     parent_s->setup_completed = true;
 
     for (i = 0; i < old_drain_count; i++) {
@@ -2016,9 +1969,9 @@ static void do_test_replace_child_mid_drain(int old_drain_count,
     g_assert(parent_bs->quiesce_counter == old_drain_count);
     bdrv_drained_begin(old_child_bs);
     bdrv_drained_begin(new_child_bs);
-    bdrv_graph_wrlock(NULL);
+    bdrv_graph_wrlock();
     bdrv_replace_node(old_child_bs, new_child_bs, &error_abort);
-    bdrv_graph_wrunlock(NULL);
+    bdrv_graph_wrunlock();
     bdrv_drained_end(new_child_bs);
     bdrv_drained_end(old_child_bs);
     g_assert(parent_bs->quiesce_counter == new_drain_count);
diff --git a/tests/unit/test-bdrv-graph-mod.c b/tests/unit/test-bdrv-graph-mod.c
index 074adcbb93..cafc023db4 100644
--- a/tests/unit/test-bdrv-graph-mod.c
+++ b/tests/unit/test-bdrv-graph-mod.c
@@ -137,15 +137,13 @@ static void test_update_perm_tree(void)
 
     blk_insert_bs(root, bs, &error_abort);
 
-    bdrv_graph_wrlock(NULL);
+    bdrv_graph_wrlock();
     bdrv_attach_child(filter, bs, "child", &child_of_bds,
                       BDRV_CHILD_DATA, &error_abort);
-    bdrv_graph_wrunlock(NULL);
+    bdrv_graph_wrunlock();
 
-    aio_context_acquire(qemu_get_aio_context());
     ret = bdrv_append(filter, bs, NULL);
     g_assert_cmpint(ret, <, 0);
-    aio_context_release(qemu_get_aio_context());
 
     bdrv_unref(filter);
     blk_unref(root);
@@ -206,14 +204,12 @@ static void test_should_update_child(void)
 
     bdrv_set_backing_hd(target, bs, &error_abort);
 
-    bdrv_graph_wrlock(NULL);
+    bdrv_graph_wrlock();
     g_assert(target->backing->bs == bs);
     bdrv_attach_child(filter, target, "target", &child_of_bds,
                       BDRV_CHILD_DATA, &error_abort);
-    bdrv_graph_wrunlock(NULL);
-    aio_context_acquire(qemu_get_aio_context());
+    bdrv_graph_wrunlock();
     bdrv_append(filter, bs, &error_abort);
-    aio_context_release(qemu_get_aio_context());
 
     bdrv_graph_rdlock_main_loop();
     g_assert(target->backing->bs == bs);
@@ -248,7 +244,7 @@ static void test_parallel_exclusive_write(void)
     bdrv_ref(base);
     bdrv_ref(fl1);
 
-    bdrv_graph_wrlock(NULL);
+    bdrv_graph_wrlock();
     bdrv_attach_child(top, fl1, "backing", &child_of_bds,
                       BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
                       &error_abort);
@@ -260,7 +256,7 @@ static void test_parallel_exclusive_write(void)
                       &error_abort);
 
     bdrv_replace_node(fl1, fl2, &error_abort);
-    bdrv_graph_wrunlock(NULL);
+    bdrv_graph_wrunlock();
 
     bdrv_drained_end(fl2);
     bdrv_drained_end(fl1);
@@ -367,7 +363,7 @@ static void test_parallel_perm_update(void)
      */
     bdrv_ref(base);
 
-    bdrv_graph_wrlock(NULL);
+    bdrv_graph_wrlock();
     bdrv_attach_child(top, ws, "file", &child_of_bds, BDRV_CHILD_DATA,
                       &error_abort);
     c_fl1 = bdrv_attach_child(ws, fl1, "first", &child_of_bds,
@@ -380,7 +376,7 @@ static void test_parallel_perm_update(void)
     bdrv_attach_child(fl2, base, "backing", &child_of_bds,
                       BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
                       &error_abort);
-    bdrv_graph_wrunlock(NULL);
+    bdrv_graph_wrunlock();
 
     /* Select fl1 as first child to be active */
     s->selected = c_fl1;
@@ -434,15 +430,13 @@ static void test_append_greedy_filter(void)
     BlockDriverState *base = no_perm_node("base");
     BlockDriverState *fl = exclusive_writer_node("fl1");
 
-    bdrv_graph_wrlock(NULL);
+    bdrv_graph_wrlock();
     bdrv_attach_child(top, base, "backing", &child_of_bds,
                       BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
                       &error_abort);
-    bdrv_graph_wrunlock(NULL);
+    bdrv_graph_wrunlock();
 
-    aio_context_acquire(qemu_get_aio_context());
     bdrv_append(fl, base, &error_abort);
-    aio_context_release(qemu_get_aio_context());
     bdrv_unref(fl);
     bdrv_unref(top);
 }
diff --git a/tests/unit/test-block-iothread.c b/tests/unit/test-block-iothread.c
index 9b15d2768c..3766d5de6b 100644
--- a/tests/unit/test-block-iothread.c
+++ b/tests/unit/test-block-iothread.c
@@ -483,7 +483,6 @@ static void test_sync_op(const void *opaque)
     bdrv_graph_rdunlock_main_loop();
 
     blk_set_aio_context(blk, ctx, &error_abort);
-    aio_context_acquire(ctx);
     if (t->fn) {
         t->fn(c);
     }
@@ -491,7 +490,6 @@ static void test_sync_op(const void *opaque)
         t->blkfn(blk);
     }
     blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
-    aio_context_release(ctx);
 
     bdrv_unref(bs);
     blk_unref(blk);
@@ -576,9 +574,7 @@ static void test_attach_blockjob(void)
         aio_poll(qemu_get_aio_context(), false);
     }
 
-    aio_context_acquire(ctx);
     blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
-    aio_context_release(ctx);
 
     tjob->n = 0;
     while (tjob->n == 0) {
@@ -595,9 +591,7 @@ static void test_attach_blockjob(void)
     WITH_JOB_LOCK_GUARD() {
         job_complete_sync_locked(&tjob->common.job, &error_abort);
     }
-    aio_context_acquire(ctx);
     blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
-    aio_context_release(ctx);
 
     bdrv_unref(bs);
     blk_unref(blk);
@@ -654,9 +648,7 @@ static void test_propagate_basic(void)
 
     /* Switch the AioContext back */
     main_ctx = qemu_get_aio_context();
-    aio_context_acquire(ctx);
     blk_set_aio_context(blk, main_ctx, &error_abort);
-    aio_context_release(ctx);
     g_assert(blk_get_aio_context(blk) == main_ctx);
     g_assert(bdrv_get_aio_context(bs_a) == main_ctx);
     g_assert(bdrv_get_aio_context(bs_verify) == main_ctx);
@@ -732,9 +724,7 @@ static void test_propagate_diamond(void)
 
     /* Switch the AioContext back */
     main_ctx = qemu_get_aio_context();
-    aio_context_acquire(ctx);
     blk_set_aio_context(blk, main_ctx, &error_abort);
-    aio_context_release(ctx);
     g_assert(blk_get_aio_context(blk) == main_ctx);
     g_assert(bdrv_get_aio_context(bs_verify) == main_ctx);
     g_assert(bdrv_get_aio_context(bs_a) == main_ctx);
@@ -764,13 +754,11 @@ static void test_propagate_mirror(void)
                                   &error_abort);
 
     /* Start a mirror job */
-    aio_context_acquire(main_ctx);
     mirror_start("job0", src, target, NULL, JOB_DEFAULT, 0, 0, 0,
                  MIRROR_SYNC_MODE_NONE, MIRROR_OPEN_BACKING_CHAIN, false,
                  BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
                  false, "filter_node", MIRROR_COPY_MODE_BACKGROUND,
                  &error_abort);
-    aio_context_release(main_ctx);
 
     WITH_JOB_LOCK_GUARD() {
         job = job_get_locked("job0");
@@ -785,9 +773,7 @@ static void test_propagate_mirror(void)
     g_assert(job->aio_context == ctx);
 
     /* Change the AioContext of target */
-    aio_context_acquire(ctx);
     bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort);
-    aio_context_release(ctx);
     g_assert(bdrv_get_aio_context(src) == main_ctx);
     g_assert(bdrv_get_aio_context(target) == main_ctx);
     g_assert(bdrv_get_aio_context(filter) == main_ctx);
@@ -805,10 +791,8 @@ static void test_propagate_mirror(void)
     g_assert(bdrv_get_aio_context(filter) == main_ctx);
 
     /* ...unless we explicitly allow it */
-    aio_context_acquire(ctx);
     blk_set_allow_aio_context_change(blk, true);
     bdrv_try_change_aio_context(target, ctx, NULL, &error_abort);
-    aio_context_release(ctx);
 
     g_assert(blk_get_aio_context(blk) == ctx);
     g_assert(bdrv_get_aio_context(src) == ctx);
@@ -817,10 +801,8 @@ static void test_propagate_mirror(void)
 
     job_cancel_sync_all();
 
-    aio_context_acquire(ctx);
     blk_set_aio_context(blk, main_ctx, &error_abort);
     bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort);
-    aio_context_release(ctx);
 
     blk_unref(blk);
     bdrv_unref(src);
@@ -836,7 +818,6 @@ static void test_attach_second_node(void)
     BlockDriverState *bs, *filter;
     QDict *options;
 
-    aio_context_acquire(main_ctx);
     blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
     bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
     blk_insert_bs(blk, bs, &error_abort);
@@ -846,15 +827,12 @@ static void test_attach_second_node(void)
     qdict_put_str(options, "file", "base");
 
     filter = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
-    aio_context_release(main_ctx);
 
     g_assert(blk_get_aio_context(blk) == ctx);
     g_assert(bdrv_get_aio_context(bs) == ctx);
     g_assert(bdrv_get_aio_context(filter) == ctx);
 
-    aio_context_acquire(ctx);
     blk_set_aio_context(blk, main_ctx, &error_abort);
-    aio_context_release(ctx);
     g_assert(blk_get_aio_context(blk) == main_ctx);
     g_assert(bdrv_get_aio_context(bs) == main_ctx);
     g_assert(bdrv_get_aio_context(filter) == main_ctx);
@@ -868,11 +846,9 @@ static void test_attach_preserve_blk_ctx(void)
 {
     IOThread *iothread = iothread_new();
     AioContext *ctx = iothread_get_aio_context(iothread);
-    AioContext *main_ctx = qemu_get_aio_context();
     BlockBackend *blk;
     BlockDriverState *bs;
 
-    aio_context_acquire(main_ctx);
     blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
     bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
     bs->total_sectors = 65536 / BDRV_SECTOR_SIZE;
@@ -881,25 +857,18 @@ static void test_attach_preserve_blk_ctx(void)
     blk_insert_bs(blk, bs, &error_abort);
     g_assert(blk_get_aio_context(blk) == ctx);
     g_assert(bdrv_get_aio_context(bs) == ctx);
-    aio_context_release(main_ctx);
 
     /* Remove the node again */
-    aio_context_acquire(ctx);
     blk_remove_bs(blk);
-    aio_context_release(ctx);
     g_assert(blk_get_aio_context(blk) == ctx);
     g_assert(bdrv_get_aio_context(bs) == qemu_get_aio_context());
 
     /* Re-attach the node */
-    aio_context_acquire(main_ctx);
     blk_insert_bs(blk, bs, &error_abort);
-    aio_context_release(main_ctx);
     g_assert(blk_get_aio_context(blk) == ctx);
     g_assert(bdrv_get_aio_context(bs) == ctx);
 
-    aio_context_acquire(ctx);
     blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
-    aio_context_release(ctx);
     bdrv_unref(bs);
     blk_unref(blk);
 }
diff --git a/tests/unit/test-blockjob.c b/tests/unit/test-blockjob.c
index a130f6fefb..fe3e0d2d38 100644
--- a/tests/unit/test-blockjob.c
+++ b/tests/unit/test-blockjob.c
@@ -228,7 +228,6 @@ static void cancel_common(CancelJob *s)
     BlockJob *job = &s->common;
     BlockBackend *blk = s->blk;
     JobStatus sts = job->job.status;
-    AioContext *ctx = job->job.aio_context;
 
     job_cancel_sync(&job->job, true);
     WITH_JOB_LOCK_GUARD() {
@@ -240,9 +239,7 @@ static void cancel_common(CancelJob *s)
         job_unref_locked(&job->job);
     }
 
-    aio_context_acquire(ctx);
     destroy_blk(blk);
-    aio_context_release(ctx);
 
 }
 
@@ -391,132 +388,6 @@ static void test_cancel_concluded(void)
     cancel_common(s);
 }
 
-/* (See test_yielding_driver for the job description) */
-typedef struct YieldingJob {
-    BlockJob common;
-    bool should_complete;
-} YieldingJob;
-
-static void yielding_job_complete(Job *job, Error **errp)
-{
-    YieldingJob *s = container_of(job, YieldingJob, common.job);
-    s->should_complete = true;
-    job_enter(job);
-}
-
-static int coroutine_fn yielding_job_run(Job *job, Error **errp)
-{
-    YieldingJob *s = container_of(job, YieldingJob, common.job);
-
-    job_transition_to_ready(job);
-
-    while (!s->should_complete) {
-        job_yield(job);
-    }
-
-    return 0;
-}
-
-/*
- * This job transitions immediately to the READY state, and then
- * yields until it is to complete.
- */
-static const BlockJobDriver test_yielding_driver = {
-    .job_driver = {
-        .instance_size  = sizeof(YieldingJob),
-        .free           = block_job_free,
-        .user_resume    = block_job_user_resume,
-        .run            = yielding_job_run,
-        .complete       = yielding_job_complete,
-    },
-};
-
-/*
- * Test that job_complete_locked() works even on jobs that are in a paused
- * state (i.e., STANDBY).
- *
- * To do this, run YieldingJob in an IO thread, get it into the READY
- * state, then have a drained section.  Before ending the section,
- * acquire the context so the job will not be entered and will thus
- * remain on STANDBY.
- *
- * job_complete_locked() should still work without error.
- *
- * Note that on the QMP interface, it is impossible to lock an IO
- * thread before a drained section ends.  In practice, the
- * bdrv_drain_all_end() and the aio_context_acquire() will be
- * reversed.  However, that makes for worse reproducibility here:
- * Sometimes, the job would no longer be in STANDBY then but already
- * be started.  We cannot prevent that, because the IO thread runs
- * concurrently.  We can only prevent it by taking the lock before
- * ending the drained section, so we do that.
- *
- * (You can reverse the order of operations and most of the time the
- * test will pass, but sometimes the assert(status == STANDBY) will
- * fail.)
- */
-static void test_complete_in_standby(void)
-{
-    BlockBackend *blk;
-    IOThread *iothread;
-    AioContext *ctx;
-    Job *job;
-    BlockJob *bjob;
-
-    /* Create a test drive, move it to an IO thread */
-    blk = create_blk(NULL);
-    iothread = iothread_new();
-
-    ctx = iothread_get_aio_context(iothread);
-    blk_set_aio_context(blk, ctx, &error_abort);
-
-    /* Create our test job */
-    bjob = mk_job(blk, "job", &test_yielding_driver, true,
-                  JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS);
-    job = &bjob->job;
-    assert_job_status_is(job, JOB_STATUS_CREATED);
-
-    /* Wait for the job to become READY */
-    job_start(job);
-    /*
-     * Here we are waiting for the status to change, so don't bother
-     * protecting the read every time.
-     */
-    AIO_WAIT_WHILE_UNLOCKED(ctx, job->status != JOB_STATUS_READY);
-
-    /* Begin the drained section, pausing the job */
-    bdrv_drain_all_begin();
-    assert_job_status_is(job, JOB_STATUS_STANDBY);
-
-    /* Lock the IO thread to prevent the job from being run */
-    aio_context_acquire(ctx);
-    /* This will schedule the job to resume it */
-    bdrv_drain_all_end();
-    aio_context_release(ctx);
-
-    WITH_JOB_LOCK_GUARD() {
-        /* But the job cannot run, so it will remain on standby */
-        assert(job->status == JOB_STATUS_STANDBY);
-
-        /* Even though the job is on standby, this should work */
-        job_complete_locked(job, &error_abort);
-
-        /* The test is done now, clean up. */
-        job_finish_sync_locked(job, NULL, &error_abort);
-        assert(job->status == JOB_STATUS_PENDING);
-
-        job_finalize_locked(job, &error_abort);
-        assert(job->status == JOB_STATUS_CONCLUDED);
-
-        job_dismiss_locked(&job, &error_abort);
-    }
-
-    aio_context_acquire(ctx);
-    destroy_blk(blk);
-    aio_context_release(ctx);
-    iothread_join(iothread);
-}
-
 int main(int argc, char **argv)
 {
     qemu_init_main_loop(&error_abort);
@@ -531,13 +402,5 @@ int main(int argc, char **argv)
     g_test_add_func("/blockjob/cancel/standby", test_cancel_standby);
     g_test_add_func("/blockjob/cancel/pending", test_cancel_pending);
     g_test_add_func("/blockjob/cancel/concluded", test_cancel_concluded);
-
-    /*
-     * This test is flaky and sometimes fails in CI and otherwise:
-     * don't run unless user opts in via environment variable.
-     */
-    if (getenv("QEMU_TEST_FLAKY_TESTS")) {
-        g_test_add_func("/blockjob/complete_in_standby", test_complete_in_standby);
-    }
     return g_test_run();
 }
diff --git a/tests/unit/test-replication.c b/tests/unit/test-replication.c
index afff908d77..5d2003b8ce 100644
--- a/tests/unit/test-replication.c
+++ b/tests/unit/test-replication.c
@@ -199,17 +199,13 @@ static BlockBackend *start_primary(void)
 static void teardown_primary(void)
 {
     BlockBackend *blk;
-    AioContext *ctx;
 
     /* remove P_ID */
     blk = blk_by_name(P_ID);
     assert(blk);
 
-    ctx = blk_get_aio_context(blk);
-    aio_context_acquire(ctx);
     monitor_remove_blk(blk);
     blk_unref(blk);
-    aio_context_release(ctx);
 }
 
 static void test_primary_read(void)
@@ -345,27 +341,20 @@ static void teardown_secondary(void)
 {
     /* only need to destroy two BBs */
     BlockBackend *blk;
-    AioContext *ctx;
 
     /* remove S_LOCAL_DISK_ID */
     blk = blk_by_name(S_LOCAL_DISK_ID);
     assert(blk);
 
-    ctx = blk_get_aio_context(blk);
-    aio_context_acquire(ctx);
     monitor_remove_blk(blk);
     blk_unref(blk);
-    aio_context_release(ctx);
 
     /* remove S_ID */
     blk = blk_by_name(S_ID);
     assert(blk);
 
-    ctx = blk_get_aio_context(blk);
-    aio_context_acquire(ctx);
     monitor_remove_blk(blk);
     blk_unref(blk);
-    aio_context_release(ctx);
 }
 
 static void test_secondary_read(void)