diff options
| author | Richard Henderson <richard.henderson@linaro.org> | 2023-05-30 09:48:55 -0700 |
|---|---|---|
| committer | Richard Henderson <richard.henderson@linaro.org> | 2023-05-30 09:48:55 -0700 |
| commit | f89f54d52bf8fdc6de1c90367f9bdd65e40fa382 (patch) | |
| tree | d139adc41463fd57a6aa723a32b3b787ce4adf9f /util/aio-posix.c | |
| parent | 7fe6cb68117ac856e03c93d18aca09de015392b0 (diff) | |
| parent | 60f782b6b78211c125970768be726c9f380dbd61 (diff) | |
| download | focaccia-qemu-f89f54d52bf8fdc6de1c90367f9bdd65e40fa382.tar.gz focaccia-qemu-f89f54d52bf8fdc6de1c90367f9bdd65e40fa382.zip | |
Merge tag 'for-upstream' of https://repo.or.cz/qemu/kevin into staging
Block layer patches - Fix blockdev-create with iothreads - Remove aio_disable_external() API # -----BEGIN PGP SIGNATURE----- # # iQJFBAABCAAvFiEE3D3rFZqa+V09dFb+fwmycsiPL9YFAmR2JIARHGt3b2xmQHJl # ZGhhdC5jb20ACgkQfwmycsiPL9brtA/9HVdAdtJxW78J60TE2lTqE9XlqMOEHBZl # 8GN72trjP2geY/9mVsv/XoFie4ecqFsYjwAWWUuXZwLgAo53jh7oFN7gBH5iGyyD # +EukYEfjqoykX5BkoK0gbMZZUe5Y4Dr2CNXYw4bNg8kDzj2RLifGA1XhdL3HoiVt # PHZrhwBR7ddww6gVOnyJrfGL8fMkW/ZNeKRhrTZuSP+63oDOeGTsTumD+YKJzfPs # p5WlwkuPjcqbO+w32FeVOHVhNI4swkN5svz3fkr8NuflfA7kH6nBQ5wymObbaTLc # Erx03lrtP1+6nw43V11UnYt6iDMg4EBUQwtzNaKFnk3rMIdjoQYxIM5FTBWL2rYD # Dg6PhkncXQ1WNWhUaFqpTFLB52XAYsSa4/y2QAGP6nWbqAUAUknQ3exaMvWiq7Z0 # nZeyyhIWvpJIHGCArWRdqqh+zsBdsmUVuPGyZnZgL/cXoJboYiHMyMJSUWE0XxML # NGrncwxdsBXkVGGwTdHpBT64dcu3ENRgwtraqRLQm+tp5MKNTJB/+Ug2/p1vonHT # UOoHz//UPskn8sHIyevoHXeu2Ns0uIHzrAXr+7Ay+9UYyIH6a07F4b2BGqkfyi/i # 8wQsDmJ/idx5C4q1+jS+GuIbpnjIx6nxXwXMqpscUXZmM4Am8OMkiKxQAa1wExGF # paId+HHwyks= # =yuER # -----END PGP SIGNATURE----- # gpg: Signature made Tue 30 May 2023 09:29:52 AM PDT # gpg: using RSA key DC3DEB159A9AF95D3D7456FE7F09B272C88F2FD6 # gpg: issuer "kwolf@redhat.com" # gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>" [full] * tag 'for-upstream' of https://repo.or.cz/qemu/kevin: (32 commits) aio: remove aio_disable_external() API virtio: do not set is_external=true on host notifiers virtio-scsi: implement BlockDevOps->drained_begin() virtio-blk: implement BlockDevOps->drained_begin() virtio: make it possible to detach host notifier from any thread block/fuse: do not set is_external=true on FUSE fd block/export: don't require AioContext lock around blk_exp_ref/unref() block/export: rewrite vduse-blk drain code hw/xen: do not set is_external=true on evtchn fds xen-block: implement BlockDevOps->drained_begin() block: drain from main loop thread in bdrv_co_yield_to_drain() block: add blk_in_drain() API hw/xen: do not use aio_set_fd_handler(is_external=true) in xen_xenstore block/export: stop using is_external in vhost-user-blk server block/export: wait for vhost-user-blk requests when draining util/vhost-user-server: rename refcount to in_flight counter virtio-scsi: stop using aio_disable_external() during unplug virtio-scsi: avoid race between unplug and transport event hw/qdev: introduce qdev_is_realized() helper block-backend: split blk_do_set_aio_context() ... Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'util/aio-posix.c')
| -rw-r--r-- | util/aio-posix.c | 20 |
1 files changed, 5 insertions, 15 deletions
diff --git a/util/aio-posix.c b/util/aio-posix.c index 34bc2a64d8..7f2c99729d 100644 --- a/util/aio-posix.c +++ b/util/aio-posix.c @@ -99,7 +99,6 @@ static bool aio_remove_fd_handler(AioContext *ctx, AioHandler *node) void aio_set_fd_handler(AioContext *ctx, int fd, - bool is_external, IOHandler *io_read, IOHandler *io_write, AioPollFn *io_poll, @@ -144,7 +143,6 @@ void aio_set_fd_handler(AioContext *ctx, new_node->io_poll = io_poll; new_node->io_poll_ready = io_poll_ready; new_node->opaque = opaque; - new_node->is_external = is_external; if (is_new) { new_node->pfd.fd = fd; @@ -196,12 +194,11 @@ static void aio_set_fd_poll(AioContext *ctx, int fd, void aio_set_event_notifier(AioContext *ctx, EventNotifier *notifier, - bool is_external, EventNotifierHandler *io_read, AioPollFn *io_poll, EventNotifierHandler *io_poll_ready) { - aio_set_fd_handler(ctx, event_notifier_get_fd(notifier), is_external, + aio_set_fd_handler(ctx, event_notifier_get_fd(notifier), (IOHandler *)io_read, NULL, io_poll, (IOHandler *)io_poll_ready, notifier); } @@ -285,13 +282,11 @@ bool aio_pending(AioContext *ctx) /* TODO should this check poll ready? */ revents = node->pfd.revents & node->pfd.events; - if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read && - aio_node_check(ctx, node->is_external)) { + if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read) { result = true; break; } - if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write && - aio_node_check(ctx, node->is_external)) { + if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write) { result = true; break; } @@ -350,9 +345,7 @@ static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node) QLIST_INSERT_HEAD(&ctx->poll_aio_handlers, node, node_poll); } if (!QLIST_IS_INSERTED(node, node_deleted) && - poll_ready && revents == 0 && - aio_node_check(ctx, node->is_external) && - node->io_poll_ready) { + poll_ready && revents == 0 && node->io_poll_ready) { /* * Remove temporarily to avoid infinite loops when ->io_poll_ready() * calls aio_poll() before clearing the condition that made the poll @@ -375,7 +368,6 @@ static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node) if (!QLIST_IS_INSERTED(node, node_deleted) && (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) && - aio_node_check(ctx, node->is_external) && node->io_read) { node->io_read(node->opaque); @@ -386,7 +378,6 @@ static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node) } if (!QLIST_IS_INSERTED(node, node_deleted) && (revents & (G_IO_OUT | G_IO_ERR)) && - aio_node_check(ctx, node->is_external) && node->io_write) { node->io_write(node->opaque); progress = true; @@ -447,8 +438,7 @@ static bool run_poll_handlers_once(AioContext *ctx, AioHandler *tmp; QLIST_FOREACH_SAFE(node, &ctx->poll_aio_handlers, node_poll, tmp) { - if (aio_node_check(ctx, node->is_external) && - node->io_poll(node->opaque)) { + if (node->io_poll(node->opaque)) { aio_add_poll_ready_handler(ready_list, node); node->poll_idle_timeout = now + POLL_IDLE_INTERVAL_NS; |