diff options
Diffstat (limited to 'util')
| -rw-r--r-- | util/aio-posix.c | 8 | ||||
| -rw-r--r-- | util/async.c | 30 | ||||
| -rw-r--r-- | util/qemu-error.c | 6 | ||||
| -rw-r--r-- | util/qemu-print.c | 3 | ||||
| -rw-r--r-- | util/qemu-sockets.c | 1 |
5 files changed, 43 insertions, 5 deletions
diff --git a/util/aio-posix.c b/util/aio-posix.c index 280f27bb99..30f5354b1e 100644 --- a/util/aio-posix.c +++ b/util/aio-posix.c @@ -15,6 +15,7 @@ #include "qemu/osdep.h" #include "block/block.h" +#include "qemu/main-loop.h" #include "qemu/rcu.h" #include "qemu/rcu_queue.h" #include "qemu/sockets.h" @@ -558,8 +559,13 @@ bool aio_poll(AioContext *ctx, bool blocking) * There cannot be two concurrent aio_poll calls for the same AioContext (or * an aio_poll concurrent with a GSource prepare/check/dispatch callback). * We rely on this below to avoid slow locked accesses to ctx->notify_me. + * + * aio_poll() may only be called in the AioContext's thread. iohandler_ctx + * is special in that it runs in the main thread, but that thread's context + * is qemu_aio_context. */ - assert(in_aio_context_home_thread(ctx)); + assert(in_aio_context_home_thread(ctx == iohandler_get_aio_context() ? + qemu_get_aio_context() : ctx)); qemu_lockcnt_inc(&ctx->list_lock); diff --git a/util/async.c b/util/async.c index f758354c6a..674dbefb7c 100644 --- a/util/async.c +++ b/util/async.c @@ -569,6 +569,36 @@ void aio_co_schedule(AioContext *ctx, Coroutine *co) aio_context_unref(ctx); } +typedef struct AioCoRescheduleSelf { + Coroutine *co; + AioContext *new_ctx; +} AioCoRescheduleSelf; + +static void aio_co_reschedule_self_bh(void *opaque) +{ + AioCoRescheduleSelf *data = opaque; + aio_co_schedule(data->new_ctx, data->co); +} + +void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx) +{ + AioContext *old_ctx = qemu_get_current_aio_context(); + + if (old_ctx != new_ctx) { + AioCoRescheduleSelf data = { + .co = qemu_coroutine_self(), + .new_ctx = new_ctx, + }; + /* + * We can't directly schedule the coroutine in the target context + * because this would be racy: The other thread could try to enter the + * coroutine before it has yielded in this one. + */ + aio_bh_schedule_oneshot(old_ctx, aio_co_reschedule_self_bh, &data); + qemu_coroutine_yield(); + } +} + void aio_co_wake(struct Coroutine *co) { AioContext *ctx; diff --git a/util/qemu-error.c b/util/qemu-error.c index 3ee41438e9..aa30f03564 100644 --- a/util/qemu-error.c +++ b/util/qemu-error.c @@ -171,7 +171,7 @@ static void print_loc(void) int i; const char *const *argp; - if (!cur_mon && progname) { + if (!monitor_cur() && progname) { fprintf(stderr, "%s:", progname); sep = " "; } @@ -208,7 +208,7 @@ static void vreport(report_type type, const char *fmt, va_list ap) GTimeVal tv; gchar *timestr; - if (error_with_timestamp && !cur_mon) { + if (error_with_timestamp && !monitor_cur()) { g_get_current_time(&tv); timestr = g_time_val_to_iso8601(&tv); error_printf("%s ", timestr); @@ -216,7 +216,7 @@ static void vreport(report_type type, const char *fmt, va_list ap) } /* Only prepend guest name if -msg guest-name and -name guest=... are set */ - if (error_with_guestname && error_guest_name && !cur_mon) { + if (error_with_guestname && error_guest_name && !monitor_cur()) { error_printf("%s ", error_guest_name); } diff --git a/util/qemu-print.c b/util/qemu-print.c index e79d6b8396..69ba612f56 100644 --- a/util/qemu-print.c +++ b/util/qemu-print.c @@ -20,6 +20,7 @@ */ int qemu_vprintf(const char *fmt, va_list ap) { + Monitor *cur_mon = monitor_cur(); if (cur_mon) { return monitor_vprintf(cur_mon, fmt, ap); } @@ -48,7 +49,7 @@ int qemu_printf(const char *fmt, ...) int qemu_vfprintf(FILE *stream, const char *fmt, va_list ap) { if (!stream) { - return monitor_vprintf(cur_mon, fmt, ap); + return monitor_vprintf(monitor_cur(), fmt, ap); } return vfprintf(stream, fmt, ap); } diff --git a/util/qemu-sockets.c b/util/qemu-sockets.c index de4bf7616e..05e5c73f9d 100644 --- a/util/qemu-sockets.c +++ b/util/qemu-sockets.c @@ -1092,6 +1092,7 @@ fail: static int socket_get_fd(const char *fdstr, int num, Error **errp) { + Monitor *cur_mon = monitor_cur(); int fd; if (num != 1) { error_setg_errno(errp, EINVAL, "socket_get_fd: too many connections"); |