diff options
Diffstat (limited to 'tests/unit')
| -rw-r--r-- | tests/unit/meson.build | 8 | ||||
| -rw-r--r-- | tests/unit/test-aio.c | 67 | ||||
| -rw-r--r-- | tests/unit/test-bdrv-drain.c | 91 | ||||
| -rw-r--r-- | tests/unit/test-bdrv-graph-mod.c | 26 | ||||
| -rw-r--r-- | tests/unit/test-block-iothread.c | 31 | ||||
| -rw-r--r-- | tests/unit/test-blockjob.c | 137 | ||||
| -rw-r--r-- | tests/unit/test-io-task.c | 2 | ||||
| -rw-r--r-- | tests/unit/test-qmp-event.c | 108 | ||||
| -rw-r--r-- | tests/unit/test-replication.c | 11 | ||||
| -rw-r--r-- | tests/unit/test-vmstate.c | 36 |
10 files changed, 87 insertions, 430 deletions
diff --git a/tests/unit/meson.build b/tests/unit/meson.build index a05d471090..69f9c05050 100644 --- a/tests/unit/meson.build +++ b/tests/unit/meson.build @@ -100,7 +100,7 @@ if have_block } if gnutls.found() and \ tasn1.found() and \ - targetos != 'windows' + host_os != 'windows' tests += { 'test-crypto-tlscredsx509': ['crypto-tls-x509-helpers.c', 'pkix_asn1_tab.c', tasn1, crypto, gnutls], @@ -115,7 +115,7 @@ if have_block if xts == 'private' tests += {'test-crypto-xts': [crypto, io]} endif - if targetos != 'windows' + if host_os != 'windows' tests += { 'test-image-locking': [testblock], 'test-nested-aio-poll': [testblock], @@ -150,7 +150,7 @@ if have_system # are not runnable under TSan due to a known issue. # https://github.com/google/sanitizers/issues/1116 if not get_option('tsan') - if targetos != 'windows' + if host_os != 'windows' tests += { 'test-char': ['socket-helpers.c', qom, io, chardev] } @@ -162,7 +162,7 @@ if have_system endif endif -if have_ga and targetos == 'linux' +if have_ga and host_os == 'linux' tests += {'test-qga': ['../qtest/libqmp.c']} test_deps += {'test-qga': qga} endif diff --git a/tests/unit/test-aio.c b/tests/unit/test-aio.c index 337b6e4ea7..e77d86be87 100644 --- a/tests/unit/test-aio.c +++ b/tests/unit/test-aio.c @@ -100,76 +100,12 @@ static void event_ready_cb(EventNotifier *e) /* Tests using aio_*. */ -typedef struct { - QemuMutex start_lock; - EventNotifier notifier; - bool thread_acquired; -} AcquireTestData; - -static void *test_acquire_thread(void *opaque) -{ - AcquireTestData *data = opaque; - - /* Wait for other thread to let us start */ - qemu_mutex_lock(&data->start_lock); - qemu_mutex_unlock(&data->start_lock); - - /* event_notifier_set might be called either before or after - * the main thread's call to poll(). The test case's outcome - * should be the same in either case. - */ - event_notifier_set(&data->notifier); - aio_context_acquire(ctx); - aio_context_release(ctx); - - data->thread_acquired = true; /* success, we got here */ - - return NULL; -} - static void set_event_notifier(AioContext *nctx, EventNotifier *notifier, EventNotifierHandler *handler) { aio_set_event_notifier(nctx, notifier, handler, NULL, NULL); } -static void dummy_notifier_read(EventNotifier *n) -{ - event_notifier_test_and_clear(n); -} - -static void test_acquire(void) -{ - QemuThread thread; - AcquireTestData data; - - /* Dummy event notifier ensures aio_poll() will block */ - event_notifier_init(&data.notifier, false); - set_event_notifier(ctx, &data.notifier, dummy_notifier_read); - g_assert(!aio_poll(ctx, false)); /* consume aio_notify() */ - - qemu_mutex_init(&data.start_lock); - qemu_mutex_lock(&data.start_lock); - data.thread_acquired = false; - - qemu_thread_create(&thread, "test_acquire_thread", - test_acquire_thread, - &data, QEMU_THREAD_JOINABLE); - - /* Block in aio_poll(), let other thread kick us and acquire context */ - aio_context_acquire(ctx); - qemu_mutex_unlock(&data.start_lock); /* let the thread run */ - g_assert(aio_poll(ctx, true)); - g_assert(!data.thread_acquired); - aio_context_release(ctx); - - qemu_thread_join(&thread); - set_event_notifier(ctx, &data.notifier, NULL); - event_notifier_cleanup(&data.notifier); - - g_assert(data.thread_acquired); -} - static void test_bh_schedule(void) { BHTestData data = { .n = 0 }; @@ -879,7 +815,7 @@ static void test_worker_thread_co_enter(void) qemu_thread_get_self(&this_thread); co = qemu_coroutine_create(co_check_current_thread, &this_thread); - qemu_thread_create(&worker_thread, "test_acquire_thread", + qemu_thread_create(&worker_thread, "test_aio_co_enter", test_aio_co_enter, co, QEMU_THREAD_JOINABLE); @@ -899,7 +835,6 @@ int main(int argc, char **argv) while (g_main_context_iteration(NULL, false)); g_test_init(&argc, &argv, NULL); - g_test_add_func("/aio/acquire", test_acquire); g_test_add_func("/aio/bh/schedule", test_bh_schedule); g_test_add_func("/aio/bh/schedule10", test_bh_schedule10); g_test_add_func("/aio/bh/cancel", test_bh_cancel); diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c index 704d1a3f36..17830a69c1 100644 --- a/tests/unit/test-bdrv-drain.c +++ b/tests/unit/test-bdrv-drain.c @@ -179,13 +179,7 @@ static void do_drain_end(enum drain_type drain_type, BlockDriverState *bs) static void do_drain_begin_unlocked(enum drain_type drain_type, BlockDriverState *bs) { - if (drain_type != BDRV_DRAIN_ALL) { - aio_context_acquire(bdrv_get_aio_context(bs)); - } do_drain_begin(drain_type, bs); - if (drain_type != BDRV_DRAIN_ALL) { - aio_context_release(bdrv_get_aio_context(bs)); - } } static BlockBackend * no_coroutine_fn test_setup(void) @@ -209,13 +203,7 @@ static BlockBackend * no_coroutine_fn test_setup(void) static void do_drain_end_unlocked(enum drain_type drain_type, BlockDriverState *bs) { - if (drain_type != BDRV_DRAIN_ALL) { - aio_context_acquire(bdrv_get_aio_context(bs)); - } do_drain_end(drain_type, bs); - if (drain_type != BDRV_DRAIN_ALL) { - aio_context_release(bdrv_get_aio_context(bs)); - } } /* @@ -520,12 +508,8 @@ static void test_iothread_main_thread_bh(void *opaque) { struct test_iothread_data *data = opaque; - /* Test that the AioContext is not yet locked in a random BH that is - * executed during drain, otherwise this would deadlock. */ - aio_context_acquire(bdrv_get_aio_context(data->bs)); bdrv_flush(data->bs); bdrv_dec_in_flight(data->bs); /* incremented by test_iothread_common() */ - aio_context_release(bdrv_get_aio_context(data->bs)); } /* @@ -567,7 +551,6 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread) blk_set_disable_request_queuing(blk, true); blk_set_aio_context(blk, ctx_a, &error_abort); - aio_context_acquire(ctx_a); s->bh_indirection_ctx = ctx_b; @@ -582,8 +565,6 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread) g_assert(acb != NULL); g_assert_cmpint(aio_ret, ==, -EINPROGRESS); - aio_context_release(ctx_a); - data = (struct test_iothread_data) { .bs = bs, .drain_type = drain_type, @@ -592,10 +573,6 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread) switch (drain_thread) { case 0: - if (drain_type != BDRV_DRAIN_ALL) { - aio_context_acquire(ctx_a); - } - /* * Increment in_flight so that do_drain_begin() waits for * test_iothread_main_thread_bh(). This prevents the race between @@ -613,20 +590,10 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread) do_drain_begin(drain_type, bs); g_assert_cmpint(bs->in_flight, ==, 0); - if (drain_type != BDRV_DRAIN_ALL) { - aio_context_release(ctx_a); - } qemu_event_wait(&done_event); - if (drain_type != BDRV_DRAIN_ALL) { - aio_context_acquire(ctx_a); - } g_assert_cmpint(aio_ret, ==, 0); do_drain_end(drain_type, bs); - - if (drain_type != BDRV_DRAIN_ALL) { - aio_context_release(ctx_a); - } break; case 1: co = qemu_coroutine_create(test_iothread_drain_co_entry, &data); @@ -637,9 +604,7 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread) g_assert_not_reached(); } - aio_context_acquire(ctx_a); blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); - aio_context_release(ctx_a); bdrv_unref(bs); blk_unref(blk); @@ -757,7 +722,6 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, BlockJob *job; TestBlockJob *tjob; IOThread *iothread = NULL; - AioContext *ctx; int ret; src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR, @@ -787,11 +751,11 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, } if (use_iothread) { + AioContext *ctx; + iothread = iothread_new(); ctx = iothread_get_aio_context(iothread); blk_set_aio_context(blk_src, ctx, &error_abort); - } else { - ctx = qemu_get_aio_context(); } target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR, @@ -800,16 +764,15 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, blk_insert_bs(blk_target, target, &error_abort); blk_set_allow_aio_context_change(blk_target, true); - aio_context_acquire(ctx); tjob = block_job_create("job0", &test_job_driver, NULL, src, 0, BLK_PERM_ALL, 0, 0, NULL, NULL, &error_abort); tjob->bs = src; job = &tjob->common; - bdrv_graph_wrlock(target); + bdrv_graph_wrlock(); block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort); - bdrv_graph_wrunlock(target); + bdrv_graph_wrunlock(); switch (result) { case TEST_JOB_SUCCESS: @@ -821,7 +784,6 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, tjob->prepare_ret = -EIO; break; } - aio_context_release(ctx); job_start(&job->job); @@ -912,12 +874,10 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, } g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO)); - aio_context_acquire(ctx); if (use_iothread) { blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort); assert(blk_get_aio_context(blk_target) == qemu_get_aio_context()); } - aio_context_release(ctx); blk_unref(blk_src); blk_unref(blk_target); @@ -991,11 +951,11 @@ static void bdrv_test_top_close(BlockDriverState *bs) { BdrvChild *c, *next_c; - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) { bdrv_unref_child(bs, c); } - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); } static int coroutine_fn GRAPH_RDLOCK @@ -1085,10 +1045,10 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete, null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, &error_abort); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds, BDRV_CHILD_DATA, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); /* This child will be the one to pass to requests through to, and * it will stall until a drain occurs */ @@ -1096,21 +1056,21 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete, &error_abort); child_bs->total_sectors = 65536 >> BDRV_SECTOR_BITS; /* Takes our reference to child_bs */ - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); tts->wait_child = bdrv_attach_child(bs, child_bs, "wait-child", &child_of_bds, BDRV_CHILD_DATA | BDRV_CHILD_PRIMARY, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); /* This child is just there to be deleted * (for detach_instead_of_delete == true) */ null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, &error_abort); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds, BDRV_CHILD_DATA, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); blk_insert_bs(blk, bs, &error_abort); @@ -1193,14 +1153,14 @@ static void no_coroutine_fn detach_indirect_bh(void *opaque) bdrv_dec_in_flight(data->child_b->bs); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_unref_child(data->parent_b, data->child_b); bdrv_ref(data->c); data->child_c = bdrv_attach_child(data->parent_b, data->c, "PB-C", &child_of_bds, BDRV_CHILD_DATA, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); } static void coroutine_mixed_fn detach_by_parent_aio_cb(void *opaque, int ret) @@ -1298,7 +1258,7 @@ static void TSA_NO_TSA test_detach_indirect(bool by_parent_cb) /* Set child relationships */ bdrv_ref(b); bdrv_ref(a); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); child_b = bdrv_attach_child(parent_b, b, "PB-B", &child_of_bds, BDRV_CHILD_DATA, &error_abort); child_a = bdrv_attach_child(parent_b, a, "PB-A", &child_of_bds, @@ -1308,7 +1268,7 @@ static void TSA_NO_TSA test_detach_indirect(bool by_parent_cb) bdrv_attach_child(parent_a, a, "PA-A", by_parent_cb ? &child_of_bds : &detach_by_driver_cb_class, BDRV_CHILD_DATA, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); g_assert_cmpint(parent_a->refcnt, ==, 1); g_assert_cmpint(parent_b->refcnt, ==, 1); @@ -1401,9 +1361,7 @@ static void test_append_to_drained(void) g_assert_cmpint(base_s->drain_count, ==, 1); g_assert_cmpint(base->in_flight, ==, 0); - aio_context_acquire(qemu_get_aio_context()); bdrv_append(overlay, base, &error_abort); - aio_context_release(qemu_get_aio_context()); g_assert_cmpint(base->in_flight, ==, 0); g_assert_cmpint(overlay->in_flight, ==, 0); @@ -1438,16 +1396,11 @@ static void test_set_aio_context(void) bdrv_drained_begin(bs); bdrv_try_change_aio_context(bs, ctx_a, NULL, &error_abort); - - aio_context_acquire(ctx_a); bdrv_drained_end(bs); bdrv_drained_begin(bs); bdrv_try_change_aio_context(bs, ctx_b, NULL, &error_abort); - aio_context_release(ctx_a); - aio_context_acquire(ctx_b); bdrv_try_change_aio_context(bs, qemu_get_aio_context(), NULL, &error_abort); - aio_context_release(ctx_b); bdrv_drained_end(bs); bdrv_unref(bs); @@ -1727,7 +1680,7 @@ static void test_drop_intermediate_poll(void) * Establish the chain last, so the chain links are the first * elements in the BDS.parents lists */ - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); for (i = 0; i < 3; i++) { if (i) { /* Takes the reference to chain[i - 1] */ @@ -1735,7 +1688,7 @@ static void test_drop_intermediate_poll(void) &chain_child_class, BDRV_CHILD_COW, &error_abort); } } - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); job = block_job_create("job", &test_simple_job_driver, NULL, job_node, 0, BLK_PERM_ALL, 0, 0, NULL, NULL, &error_abort); @@ -1982,10 +1935,10 @@ static void do_test_replace_child_mid_drain(int old_drain_count, new_child_bs->total_sectors = 1; bdrv_ref(old_child_bs); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_attach_child(parent_bs, old_child_bs, "child", &child_of_bds, BDRV_CHILD_COW, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); parent_s->setup_completed = true; for (i = 0; i < old_drain_count; i++) { @@ -2016,9 +1969,9 @@ static void do_test_replace_child_mid_drain(int old_drain_count, g_assert(parent_bs->quiesce_counter == old_drain_count); bdrv_drained_begin(old_child_bs); bdrv_drained_begin(new_child_bs); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_replace_node(old_child_bs, new_child_bs, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); bdrv_drained_end(new_child_bs); bdrv_drained_end(old_child_bs); g_assert(parent_bs->quiesce_counter == new_drain_count); diff --git a/tests/unit/test-bdrv-graph-mod.c b/tests/unit/test-bdrv-graph-mod.c index 074adcbb93..cafc023db4 100644 --- a/tests/unit/test-bdrv-graph-mod.c +++ b/tests/unit/test-bdrv-graph-mod.c @@ -137,15 +137,13 @@ static void test_update_perm_tree(void) blk_insert_bs(root, bs, &error_abort); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_attach_child(filter, bs, "child", &child_of_bds, BDRV_CHILD_DATA, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); - aio_context_acquire(qemu_get_aio_context()); ret = bdrv_append(filter, bs, NULL); g_assert_cmpint(ret, <, 0); - aio_context_release(qemu_get_aio_context()); bdrv_unref(filter); blk_unref(root); @@ -206,14 +204,12 @@ static void test_should_update_child(void) bdrv_set_backing_hd(target, bs, &error_abort); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); g_assert(target->backing->bs == bs); bdrv_attach_child(filter, target, "target", &child_of_bds, BDRV_CHILD_DATA, &error_abort); - bdrv_graph_wrunlock(NULL); - aio_context_acquire(qemu_get_aio_context()); + bdrv_graph_wrunlock(); bdrv_append(filter, bs, &error_abort); - aio_context_release(qemu_get_aio_context()); bdrv_graph_rdlock_main_loop(); g_assert(target->backing->bs == bs); @@ -248,7 +244,7 @@ static void test_parallel_exclusive_write(void) bdrv_ref(base); bdrv_ref(fl1); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_attach_child(top, fl1, "backing", &child_of_bds, BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, &error_abort); @@ -260,7 +256,7 @@ static void test_parallel_exclusive_write(void) &error_abort); bdrv_replace_node(fl1, fl2, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); bdrv_drained_end(fl2); bdrv_drained_end(fl1); @@ -367,7 +363,7 @@ static void test_parallel_perm_update(void) */ bdrv_ref(base); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_attach_child(top, ws, "file", &child_of_bds, BDRV_CHILD_DATA, &error_abort); c_fl1 = bdrv_attach_child(ws, fl1, "first", &child_of_bds, @@ -380,7 +376,7 @@ static void test_parallel_perm_update(void) bdrv_attach_child(fl2, base, "backing", &child_of_bds, BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); /* Select fl1 as first child to be active */ s->selected = c_fl1; @@ -434,15 +430,13 @@ static void test_append_greedy_filter(void) BlockDriverState *base = no_perm_node("base"); BlockDriverState *fl = exclusive_writer_node("fl1"); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_attach_child(top, base, "backing", &child_of_bds, BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); - aio_context_acquire(qemu_get_aio_context()); bdrv_append(fl, base, &error_abort); - aio_context_release(qemu_get_aio_context()); bdrv_unref(fl); bdrv_unref(top); } diff --git a/tests/unit/test-block-iothread.c b/tests/unit/test-block-iothread.c index 9b15d2768c..3766d5de6b 100644 --- a/tests/unit/test-block-iothread.c +++ b/tests/unit/test-block-iothread.c @@ -483,7 +483,6 @@ static void test_sync_op(const void *opaque) bdrv_graph_rdunlock_main_loop(); blk_set_aio_context(blk, ctx, &error_abort); - aio_context_acquire(ctx); if (t->fn) { t->fn(c); } @@ -491,7 +490,6 @@ static void test_sync_op(const void *opaque) t->blkfn(blk); } blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); - aio_context_release(ctx); bdrv_unref(bs); blk_unref(blk); @@ -576,9 +574,7 @@ static void test_attach_blockjob(void) aio_poll(qemu_get_aio_context(), false); } - aio_context_acquire(ctx); blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); - aio_context_release(ctx); tjob->n = 0; while (tjob->n == 0) { @@ -595,9 +591,7 @@ static void test_attach_blockjob(void) WITH_JOB_LOCK_GUARD() { job_complete_sync_locked(&tjob->common.job, &error_abort); } - aio_context_acquire(ctx); blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); - aio_context_release(ctx); bdrv_unref(bs); blk_unref(blk); @@ -654,9 +648,7 @@ static void test_propagate_basic(void) /* Switch the AioContext back */ main_ctx = qemu_get_aio_context(); - aio_context_acquire(ctx); blk_set_aio_context(blk, main_ctx, &error_abort); - aio_context_release(ctx); g_assert(blk_get_aio_context(blk) == main_ctx); g_assert(bdrv_get_aio_context(bs_a) == main_ctx); g_assert(bdrv_get_aio_context(bs_verify) == main_ctx); @@ -732,9 +724,7 @@ static void test_propagate_diamond(void) /* Switch the AioContext back */ main_ctx = qemu_get_aio_context(); - aio_context_acquire(ctx); blk_set_aio_context(blk, main_ctx, &error_abort); - aio_context_release(ctx); g_assert(blk_get_aio_context(blk) == main_ctx); g_assert(bdrv_get_aio_context(bs_verify) == main_ctx); g_assert(bdrv_get_aio_context(bs_a) == main_ctx); @@ -764,13 +754,11 @@ static void test_propagate_mirror(void) &error_abort); /* Start a mirror job */ - aio_context_acquire(main_ctx); mirror_start("job0", src, target, NULL, JOB_DEFAULT, 0, 0, 0, MIRROR_SYNC_MODE_NONE, MIRROR_OPEN_BACKING_CHAIN, false, BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT, false, "filter_node", MIRROR_COPY_MODE_BACKGROUND, &error_abort); - aio_context_release(main_ctx); WITH_JOB_LOCK_GUARD() { job = job_get_locked("job0"); @@ -785,9 +773,7 @@ static void test_propagate_mirror(void) g_assert(job->aio_context == ctx); /* Change the AioContext of target */ - aio_context_acquire(ctx); bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort); - aio_context_release(ctx); g_assert(bdrv_get_aio_context(src) == main_ctx); g_assert(bdrv_get_aio_context(target) == main_ctx); g_assert(bdrv_get_aio_context(filter) == main_ctx); @@ -805,10 +791,8 @@ static void test_propagate_mirror(void) g_assert(bdrv_get_aio_context(filter) == main_ctx); /* ...unless we explicitly allow it */ - aio_context_acquire(ctx); blk_set_allow_aio_context_change(blk, true); bdrv_try_change_aio_context(target, ctx, NULL, &error_abort); - aio_context_release(ctx); g_assert(blk_get_aio_context(blk) == ctx); g_assert(bdrv_get_aio_context(src) == ctx); @@ -817,10 +801,8 @@ static void test_propagate_mirror(void) job_cancel_sync_all(); - aio_context_acquire(ctx); blk_set_aio_context(blk, main_ctx, &error_abort); bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort); - aio_context_release(ctx); blk_unref(blk); bdrv_unref(src); @@ -836,7 +818,6 @@ static void test_attach_second_node(void) BlockDriverState *bs, *filter; QDict *options; - aio_context_acquire(main_ctx); blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL); bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort); blk_insert_bs(blk, bs, &error_abort); @@ -846,15 +827,12 @@ static void test_attach_second_node(void) qdict_put_str(options, "file", "base"); filter = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort); - aio_context_release(main_ctx); g_assert(blk_get_aio_context(blk) == ctx); g_assert(bdrv_get_aio_context(bs) == ctx); g_assert(bdrv_get_aio_context(filter) == ctx); - aio_context_acquire(ctx); blk_set_aio_context(blk, main_ctx, &error_abort); - aio_context_release(ctx); g_assert(blk_get_aio_context(blk) == main_ctx); g_assert(bdrv_get_aio_context(bs) == main_ctx); g_assert(bdrv_get_aio_context(filter) == main_ctx); @@ -868,11 +846,9 @@ static void test_attach_preserve_blk_ctx(void) { IOThread *iothread = iothread_new(); AioContext *ctx = iothread_get_aio_context(iothread); - AioContext *main_ctx = qemu_get_aio_context(); BlockBackend *blk; BlockDriverState *bs; - aio_context_acquire(main_ctx); blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL); bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort); bs->total_sectors = 65536 / BDRV_SECTOR_SIZE; @@ -881,25 +857,18 @@ static void test_attach_preserve_blk_ctx(void) blk_insert_bs(blk, bs, &error_abort); g_assert(blk_get_aio_context(blk) == ctx); g_assert(bdrv_get_aio_context(bs) == ctx); - aio_context_release(main_ctx); /* Remove the node again */ - aio_context_acquire(ctx); blk_remove_bs(blk); - aio_context_release(ctx); g_assert(blk_get_aio_context(blk) == ctx); g_assert(bdrv_get_aio_context(bs) == qemu_get_aio_context()); /* Re-attach the node */ - aio_context_acquire(main_ctx); blk_insert_bs(blk, bs, &error_abort); - aio_context_release(main_ctx); g_assert(blk_get_aio_context(blk) == ctx); g_assert(bdrv_get_aio_context(bs) == ctx); - aio_context_acquire(ctx); blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); - aio_context_release(ctx); bdrv_unref(bs); blk_unref(blk); } diff --git a/tests/unit/test-blockjob.c b/tests/unit/test-blockjob.c index a130f6fefb..fe3e0d2d38 100644 --- a/tests/unit/test-blockjob.c +++ b/tests/unit/test-blockjob.c @@ -228,7 +228,6 @@ static void cancel_common(CancelJob *s) BlockJob *job = &s->common; BlockBackend *blk = s->blk; JobStatus sts = job->job.status; - AioContext *ctx = job->job.aio_context; job_cancel_sync(&job->job, true); WITH_JOB_LOCK_GUARD() { @@ -240,9 +239,7 @@ static void cancel_common(CancelJob *s) job_unref_locked(&job->job); } - aio_context_acquire(ctx); destroy_blk(blk); - aio_context_release(ctx); } @@ -391,132 +388,6 @@ static void test_cancel_concluded(void) cancel_common(s); } -/* (See test_yielding_driver for the job description) */ -typedef struct YieldingJob { - BlockJob common; - bool should_complete; -} YieldingJob; - -static void yielding_job_complete(Job *job, Error **errp) -{ - YieldingJob *s = container_of(job, YieldingJob, common.job); - s->should_complete = true; - job_enter(job); -} - -static int coroutine_fn yielding_job_run(Job *job, Error **errp) -{ - YieldingJob *s = container_of(job, YieldingJob, common.job); - - job_transition_to_ready(job); - - while (!s->should_complete) { - job_yield(job); - } - - return 0; -} - -/* - * This job transitions immediately to the READY state, and then - * yields until it is to complete. - */ -static const BlockJobDriver test_yielding_driver = { - .job_driver = { - .instance_size = sizeof(YieldingJob), - .free = block_job_free, - .user_resume = block_job_user_resume, - .run = yielding_job_run, - .complete = yielding_job_complete, - }, -}; - -/* - * Test that job_complete_locked() works even on jobs that are in a paused - * state (i.e., STANDBY). - * - * To do this, run YieldingJob in an IO thread, get it into the READY - * state, then have a drained section. Before ending the section, - * acquire the context so the job will not be entered and will thus - * remain on STANDBY. - * - * job_complete_locked() should still work without error. - * - * Note that on the QMP interface, it is impossible to lock an IO - * thread before a drained section ends. In practice, the - * bdrv_drain_all_end() and the aio_context_acquire() will be - * reversed. However, that makes for worse reproducibility here: - * Sometimes, the job would no longer be in STANDBY then but already - * be started. We cannot prevent that, because the IO thread runs - * concurrently. We can only prevent it by taking the lock before - * ending the drained section, so we do that. - * - * (You can reverse the order of operations and most of the time the - * test will pass, but sometimes the assert(status == STANDBY) will - * fail.) - */ -static void test_complete_in_standby(void) -{ - BlockBackend *blk; - IOThread *iothread; - AioContext *ctx; - Job *job; - BlockJob *bjob; - - /* Create a test drive, move it to an IO thread */ - blk = create_blk(NULL); - iothread = iothread_new(); - - ctx = iothread_get_aio_context(iothread); - blk_set_aio_context(blk, ctx, &error_abort); - - /* Create our test job */ - bjob = mk_job(blk, "job", &test_yielding_driver, true, - JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS); - job = &bjob->job; - assert_job_status_is(job, JOB_STATUS_CREATED); - - /* Wait for the job to become READY */ - job_start(job); - /* - * Here we are waiting for the status to change, so don't bother - * protecting the read every time. - */ - AIO_WAIT_WHILE_UNLOCKED(ctx, job->status != JOB_STATUS_READY); - - /* Begin the drained section, pausing the job */ - bdrv_drain_all_begin(); - assert_job_status_is(job, JOB_STATUS_STANDBY); - - /* Lock the IO thread to prevent the job from being run */ - aio_context_acquire(ctx); - /* This will schedule the job to resume it */ - bdrv_drain_all_end(); - aio_context_release(ctx); - - WITH_JOB_LOCK_GUARD() { - /* But the job cannot run, so it will remain on standby */ - assert(job->status == JOB_STATUS_STANDBY); - - /* Even though the job is on standby, this should work */ - job_complete_locked(job, &error_abort); - - /* The test is done now, clean up. */ - job_finish_sync_locked(job, NULL, &error_abort); - assert(job->status == JOB_STATUS_PENDING); - - job_finalize_locked(job, &error_abort); - assert(job->status == JOB_STATUS_CONCLUDED); - - job_dismiss_locked(&job, &error_abort); - } - - aio_context_acquire(ctx); - destroy_blk(blk); - aio_context_release(ctx); - iothread_join(iothread); -} - int main(int argc, char **argv) { qemu_init_main_loop(&error_abort); @@ -531,13 +402,5 @@ int main(int argc, char **argv) g_test_add_func("/blockjob/cancel/standby", test_cancel_standby); g_test_add_func("/blockjob/cancel/pending", test_cancel_pending); g_test_add_func("/blockjob/cancel/concluded", test_cancel_concluded); - - /* - * This test is flaky and sometimes fails in CI and otherwise: - * don't run unless user opts in via environment variable. - */ - if (getenv("QEMU_TEST_FLAKY_TESTS")) { - g_test_add_func("/blockjob/complete_in_standby", test_complete_in_standby); - } return g_test_run(); } diff --git a/tests/unit/test-io-task.c b/tests/unit/test-io-task.c index 953a50ae66..115dba8970 100644 --- a/tests/unit/test-io-task.c +++ b/tests/unit/test-io-task.c @@ -25,7 +25,7 @@ #include "qapi/error.h" #include "qemu/module.h" -#define TYPE_DUMMY "qemu:dummy" +#define TYPE_DUMMY "qemu-dummy" typedef struct DummyObject DummyObject; typedef struct DummyObjectClass DummyObjectClass; diff --git a/tests/unit/test-qmp-event.c b/tests/unit/test-qmp-event.c index 3626d2372f..08e95a382b 100644 --- a/tests/unit/test-qmp-event.c +++ b/tests/unit/test-qmp-event.c @@ -24,19 +24,15 @@ #include "test-qapi-events.h" #include "test-qapi-emit-events.h" -typedef struct TestEventData { - QDict *expect; - bool emitted; -} TestEventData; - -TestEventData *test_event_data; -static GMutex test_event_lock; +static QDict *expected_event; void test_qapi_event_emit(test_QAPIEvent event, QDict *d) { QDict *t; int64_t s, ms; + g_assert(expected_event); + /* Verify that we have timestamp, then remove it to compare other fields */ t = qdict_get_qdict(d, "timestamp"); g_assert(t); @@ -52,71 +48,38 @@ void test_qapi_event_emit(test_QAPIEvent event, QDict *d) qdict_del(d, "timestamp"); - g_assert(qobject_is_equal(QOBJECT(d), QOBJECT(test_event_data->expect))); - test_event_data->emitted = true; -} - -static void event_prepare(TestEventData *data, - const void *unused) -{ - /* Global variable test_event_data was used to pass the expectation, so - test cases can't be executed at same time. */ - g_mutex_lock(&test_event_lock); - test_event_data = data; -} - -static void event_teardown(TestEventData *data, - const void *unused) -{ - test_event_data = NULL; - g_mutex_unlock(&test_event_lock); + g_assert(qobject_is_equal(QOBJECT(d), QOBJECT(expected_event))); + qobject_unref(expected_event); + expected_event = NULL; } -static void event_test_add(const char *testpath, - void (*test_func)(TestEventData *data, - const void *user_data)) +static void test_event_a(void) { - g_test_add(testpath, TestEventData, NULL, event_prepare, test_func, - event_teardown); -} - - -/* Test cases */ - -static void test_event_a(TestEventData *data, - const void *unused) -{ - data->expect = qdict_from_jsonf_nofail("{ 'event': 'EVENT_A' }"); + expected_event = qdict_from_jsonf_nofail("{ 'event': 'EVENT_A' }"); qapi_event_send_event_a(); - g_assert(data->emitted); - qobject_unref(data->expect); + g_assert(!expected_event); } -static void test_event_b(TestEventData *data, - const void *unused) +static void test_event_b(void) { - data->expect = qdict_from_jsonf_nofail("{ 'event': 'EVENT_B' }"); + expected_event = qdict_from_jsonf_nofail("{ 'event': 'EVENT_B' }"); qapi_event_send_event_b(); - g_assert(data->emitted); - qobject_unref(data->expect); + g_assert(!expected_event); } -static void test_event_c(TestEventData *data, - const void *unused) +static void test_event_c(void) { UserDefOne b = { .integer = 2, .string = (char *)"test1" }; - data->expect = qdict_from_jsonf_nofail( + expected_event = qdict_from_jsonf_nofail( "{ 'event': 'EVENT_C', 'data': {" " 'a': 1, 'b': { 'integer': 2, 'string': 'test1' }, 'c': 'test2' } }"); qapi_event_send_event_c(true, 1, &b, "test2"); - g_assert(data->emitted); - qobject_unref(data->expect); + g_assert(!expected_event); } /* Complex type */ -static void test_event_d(TestEventData *data, - const void *unused) +static void test_event_d(void) { UserDefOne struct1 = { .integer = 2, .string = (char *)"test1", @@ -129,65 +92,56 @@ static void test_event_d(TestEventData *data, .enum2 = ENUM_ONE_VALUE2, }; - data->expect = qdict_from_jsonf_nofail( + expected_event = qdict_from_jsonf_nofail( "{ 'event': 'EVENT_D', 'data': {" " 'a': {" " 'struct1': { 'integer': 2, 'string': 'test1', 'enum1': 'value1' }," " 'string': 'test2', 'enum2': 'value2' }," " 'b': 'test3', 'enum3': 'value3' } }"); qapi_event_send_event_d(&a, "test3", NULL, true, ENUM_ONE_VALUE3); - g_assert(data->emitted); - qobject_unref(data->expect); + g_assert(!expected_event); } -static void test_event_deprecated(TestEventData *data, const void *unused) +static void test_event_deprecated(void) { - data->expect = qdict_from_jsonf_nofail("{ 'event': 'TEST_EVENT_FEATURES1' }"); + expected_event = qdict_from_jsonf_nofail("{ 'event': 'TEST_EVENT_FEATURES1' }"); memset(&compat_policy, 0, sizeof(compat_policy)); qapi_event_send_test_event_features1(); - g_assert(data->emitted); + g_assert(!expected_event); compat_policy.has_deprecated_output = true; compat_policy.deprecated_output = COMPAT_POLICY_OUTPUT_HIDE; - data->emitted = false; qapi_event_send_test_event_features1(); - g_assert(!data->emitted); - - qobject_unref(data->expect); } -static void test_event_deprecated_data(TestEventData *data, const void *unused) +static void test_event_deprecated_data(void) { memset(&compat_policy, 0, sizeof(compat_policy)); - data->expect = qdict_from_jsonf_nofail("{ 'event': 'TEST_EVENT_FEATURES0'," + expected_event = qdict_from_jsonf_nofail("{ 'event': 'TEST_EVENT_FEATURES0'," " 'data': { 'foo': 42 } }"); qapi_event_send_test_event_features0(42); - g_assert(data->emitted); + g_assert(!expected_event); - qobject_unref(data->expect); compat_policy.has_deprecated_output = true; compat_policy.deprecated_output = COMPAT_POLICY_OUTPUT_HIDE; - data->expect = qdict_from_jsonf_nofail("{ 'event': 'TEST_EVENT_FEATURES0' }"); + expected_event = qdict_from_jsonf_nofail("{ 'event': 'TEST_EVENT_FEATURES0' }"); qapi_event_send_test_event_features0(42); - g_assert(data->emitted); - - qobject_unref(data->expect); } int main(int argc, char **argv) { g_test_init(&argc, &argv, NULL); - event_test_add("/event/event_a", test_event_a); - event_test_add("/event/event_b", test_event_b); - event_test_add("/event/event_c", test_event_c); - event_test_add("/event/event_d", test_event_d); - event_test_add("/event/deprecated", test_event_deprecated); - event_test_add("/event/deprecated_data", test_event_deprecated_data); + g_test_add_func("/event/event_a", test_event_a); + g_test_add_func("/event/event_b", test_event_b); + g_test_add_func("/event/event_c", test_event_c); + g_test_add_func("/event/event_d", test_event_d); + g_test_add_func("/event/deprecated", test_event_deprecated); + g_test_add_func("/event/deprecated_data", test_event_deprecated_data); g_test_run(); return 0; diff --git a/tests/unit/test-replication.c b/tests/unit/test-replication.c index afff908d77..5d2003b8ce 100644 --- a/tests/unit/test-replication.c +++ b/tests/unit/test-replication.c @@ -199,17 +199,13 @@ static BlockBackend *start_primary(void) static void teardown_primary(void) { BlockBackend *blk; - AioContext *ctx; /* remove P_ID */ blk = blk_by_name(P_ID); assert(blk); - ctx = blk_get_aio_context(blk); - aio_context_acquire(ctx); monitor_remove_blk(blk); blk_unref(blk); - aio_context_release(ctx); } static void test_primary_read(void) @@ -345,27 +341,20 @@ static void teardown_secondary(void) { /* only need to destroy two BBs */ BlockBackend *blk; - AioContext *ctx; /* remove S_LOCAL_DISK_ID */ blk = blk_by_name(S_LOCAL_DISK_ID); assert(blk); - ctx = blk_get_aio_context(blk); - aio_context_acquire(ctx); monitor_remove_blk(blk); blk_unref(blk); - aio_context_release(ctx); /* remove S_ID */ blk = blk_by_name(S_ID); assert(blk); - ctx = blk_get_aio_context(blk); - aio_context_acquire(ctx); monitor_remove_blk(blk); blk_unref(blk); - aio_context_release(ctx); } static void test_secondary_read(void) diff --git a/tests/unit/test-vmstate.c b/tests/unit/test-vmstate.c index 0b7d5ecd68..c4f9faa273 100644 --- a/tests/unit/test-vmstate.c +++ b/tests/unit/test-vmstate.c @@ -197,7 +197,7 @@ static const VMStateDescription vmstate_simple_primitive = { .name = "simple/primitive", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(b_1, TestSimple), VMSTATE_BOOL(b_2, TestSimple), VMSTATE_UINT8(u8_1, TestSimple), @@ -299,7 +299,7 @@ static const VMStateDescription vmstate_simple_arr = { .name = "simple/array", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16_ARRAY(u16_1, TestSimpleArray, 3), VMSTATE_END_OF_LIST() } @@ -341,7 +341,7 @@ static const VMStateDescription vmstate_versioned = { .name = "test/versioned", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(a, TestStruct), VMSTATE_UINT32_V(b, TestStruct, 2), /* Versioned field in the middle, so * we catch bugs more easily. @@ -412,7 +412,7 @@ static const VMStateDescription vmstate_skipping = { .name = "test/skip", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(a, TestStruct), VMSTATE_UINT32(b, TestStruct), VMSTATE_UINT32_TEST(c, TestStruct, test_skip), @@ -524,7 +524,7 @@ const VMStateDescription vmsd_tst = { .name = "test/tst", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(i, TestStructTriv), VMSTATE_END_OF_LIST() } @@ -542,7 +542,7 @@ const VMStateDescription vmsd_arps = { .name = "test/arps", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(ar, TestArrayOfPtrToStuct, AR_SIZE, 0, vmsd_tst, TestStructTriv), VMSTATE_END_OF_LIST() @@ -630,7 +630,7 @@ const VMStateDescription vmsd_arpp = { .name = "test/arps", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_ARRAY_OF_POINTER(ar, TestArrayOfPtrToInt, AR_SIZE, 0, vmstate_info_int32, int32_t*), VMSTATE_END_OF_LIST() @@ -685,7 +685,7 @@ static const VMStateDescription vmstate_q_element = { .name = "test/queue-element", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(b, TestQtailqElement), VMSTATE_UINT8(u8, TestQtailqElement), VMSTATE_END_OF_LIST() @@ -696,7 +696,7 @@ static const VMStateDescription vmstate_q = { .name = "test/queue", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT16(i16, TestQtailq), VMSTATE_QTAILQ_V(q, TestQtailq, 1, vmstate_q_element, TestQtailqElement, next), @@ -821,7 +821,7 @@ typedef struct TestGTreeInterval { .name = "interval", \ .version_id = 1, \ .minimum_version_id = 1, \ - .fields = (VMStateField[]) { \ + .fields = (const VMStateField[]) { \ VMSTATE_UINT64(low, TestGTreeInterval), \ VMSTATE_UINT64(high, TestGTreeInterval), \ VMSTATE_END_OF_LIST() \ @@ -839,7 +839,7 @@ typedef struct TestGTreeMapping { .name = "mapping", \ .version_id = 1, \ .minimum_version_id = 1, \ - .fields = (VMStateField[]) { \ + .fields = (const VMStateField[]) { \ VMSTATE_UINT64(phys_addr, TestGTreeMapping), \ VMSTATE_UINT32(flags, TestGTreeMapping), \ VMSTATE_END_OF_LIST() \ @@ -915,7 +915,7 @@ static const VMStateDescription vmstate_domain = { .version_id = 1, .minimum_version_id = 1, .pre_load = domain_preload, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(id, TestGTreeDomain), VMSTATE_GTREE_V(mappings, TestGTreeDomain, 1, vmstate_interval_mapping, @@ -940,7 +940,7 @@ static const VMStateDescription vmstate_qlist_element = { .name = "test/queue list", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(id, TestQListElement), VMSTATE_END_OF_LIST() } @@ -951,7 +951,7 @@ static const VMStateDescription vmstate_iommu = { .version_id = 1, .minimum_version_id = 1, .pre_load = iommu_preload, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(id, TestGTreeIOMMU), VMSTATE_GTREE_DIRECT_KEY_V(domains, TestGTreeIOMMU, 1, &vmstate_domain, TestGTreeDomain), @@ -963,7 +963,7 @@ static const VMStateDescription vmstate_container = { .name = "test/container/qlist", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(id, TestQListContainer), VMSTATE_QLIST_V(list, TestQListContainer, 1, vmstate_qlist_element, TestQListElement, next), @@ -1414,7 +1414,7 @@ static int tmp_child_post_load(void *opaque, int version_id) static const VMStateDescription vmstate_tmp_back_to_parent = { .name = "test/tmp_child_parent", - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(f, TestStruct), VMSTATE_END_OF_LIST() } @@ -1424,7 +1424,7 @@ static const VMStateDescription vmstate_tmp_child = { .name = "test/tmp_child", .pre_save = tmp_child_pre_save, .post_load = tmp_child_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT64(diff, TmpTestStruct), VMSTATE_STRUCT_POINTER(parent, TmpTestStruct, vmstate_tmp_back_to_parent, TestStruct), @@ -1435,7 +1435,7 @@ static const VMStateDescription vmstate_tmp_child = { static const VMStateDescription vmstate_with_tmp = { .name = "test/with_tmp", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(a, TestStruct), VMSTATE_UINT64(d, TestStruct), VMSTATE_WITH_TMP(TestStruct, TmpTestStruct, vmstate_tmp_child), |