diff options
Diffstat (limited to 'util')
| -rw-r--r-- | util/async-teardown.c | 146 | ||||
| -rw-r--r-- | util/iov.c | 2 | ||||
| -rw-r--r-- | util/meson.build | 1 | ||||
| -rw-r--r-- | util/vhost-user-server.c | 27 |
4 files changed, 21 insertions, 155 deletions
diff --git a/util/async-teardown.c b/util/async-teardown.c deleted file mode 100644 index 62cdeb0f20..0000000000 --- a/util/async-teardown.c +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Asynchronous teardown - * - * Copyright IBM, Corp. 2022 - * - * Authors: - * Claudio Imbrenda <imbrenda@linux.ibm.com> - * - * This work is licensed under the terms of the GNU GPL, version 2 or (at your - * option) any later version. See the COPYING file in the top-level directory. - * - */ - -#include "qemu/osdep.h" -#include <dirent.h> -#include <sys/prctl.h> -#include <sched.h> - -#include "qemu/async-teardown.h" - -#ifdef _SC_THREAD_STACK_MIN -#define CLONE_STACK_SIZE sysconf(_SC_THREAD_STACK_MIN) -#else -#define CLONE_STACK_SIZE 16384 -#endif - -static pid_t the_ppid; - -/* - * Close all open file descriptors. - */ -static void close_all_open_fd(void) -{ - struct dirent *de; - int fd, dfd; - DIR *dir; - -#ifdef CONFIG_CLOSE_RANGE - int r = close_range(0, ~0U, 0); - if (!r) { - /* Success, no need to try other ways. */ - return; - } -#endif - - dir = opendir("/proc/self/fd"); - if (!dir) { - /* If /proc is not mounted, there is nothing that can be done. */ - return; - } - /* Avoid closing the directory. */ - dfd = dirfd(dir); - - for (de = readdir(dir); de; de = readdir(dir)) { - fd = atoi(de->d_name); - if (fd != dfd) { - close(fd); - } - } - closedir(dir); -} - -static void hup_handler(int signal) -{ - /* Check every second if this process has been reparented. */ - while (the_ppid == getppid()) { - /* sleep() is safe to use in a signal handler. */ - sleep(1); - } - - /* At this point the parent process has terminated completely. */ - _exit(0); -} - -static int async_teardown_fn(void *arg) -{ - struct sigaction sa = { .sa_handler = hup_handler }; - sigset_t hup_signal; - char name[16]; - - /* Set a meaningful name for this process. */ - snprintf(name, 16, "cleanup/%d", the_ppid); - prctl(PR_SET_NAME, (unsigned long)name); - - /* - * Close all file descriptors that might have been inherited from the - * main qemu process when doing clone, needed to make libvirt happy. - * Not using close_range for increased compatibility with older kernels. - */ - close_all_open_fd(); - - /* Set up a handler for SIGHUP and unblock SIGHUP. */ - sigaction(SIGHUP, &sa, NULL); - sigemptyset(&hup_signal); - sigaddset(&hup_signal, SIGHUP); - sigprocmask(SIG_UNBLOCK, &hup_signal, NULL); - - /* Ask to receive SIGHUP when the parent dies. */ - prctl(PR_SET_PDEATHSIG, SIGHUP); - - /* - * Sleep forever, unless the parent process has already terminated. The - * only interruption can come from the SIGHUP signal, which in normal - * operation is received when the parent process dies. - */ - if (the_ppid == getppid()) { - pause(); - } - - /* At this point the parent process has terminated completely. */ - _exit(0); -} - -/* - * Allocate a new stack of a reasonable size, and return a pointer to its top. - */ -static void *new_stack_for_clone(void) -{ - size_t stack_size = CLONE_STACK_SIZE; - char *stack_ptr; - - /* Allocate a new stack and get a pointer to its top. */ - stack_ptr = qemu_alloc_stack(&stack_size); -#if !defined(HOST_HPPA) - /* The top is at the end of the area, except on HPPA. */ - stack_ptr += stack_size; -#endif - - return stack_ptr; -} - -/* - * Block all signals, start (clone) a new process sharing the address space - * with qemu (CLONE_VM), then restore signals. - */ -void init_async_teardown(void) -{ - sigset_t all_signals, old_signals; - - the_ppid = getpid(); - - sigfillset(&all_signals); - sigprocmask(SIG_BLOCK, &all_signals, &old_signals); - clone(async_teardown_fn, new_stack_for_clone(), CLONE_VM, NULL); - sigprocmask(SIG_SETMASK, &old_signals, NULL); -} diff --git a/util/iov.c b/util/iov.c index 866fb577f3..7e73948f5e 100644 --- a/util/iov.c +++ b/util/iov.c @@ -571,7 +571,7 @@ static int sortelem_cmp_src_index(const void *a, const void *b) */ void qemu_iovec_clone(QEMUIOVector *dest, const QEMUIOVector *src, void *buf) { - IOVectorSortElem sortelems[src->niov]; + g_autofree IOVectorSortElem *sortelems = g_new(IOVectorSortElem, src->niov); void *last_end; int i; diff --git a/util/meson.build b/util/meson.build index a375160286..c4827fd70a 100644 --- a/util/meson.build +++ b/util/meson.build @@ -3,7 +3,6 @@ util_ss.add(files('thread-context.c'), numa) if not config_host_data.get('CONFIG_ATOMIC64') util_ss.add(files('atomic64.c')) endif -util_ss.add(when: 'CONFIG_LINUX', if_true: files('async-teardown.c')) util_ss.add(when: 'CONFIG_POSIX', if_true: files('aio-posix.c')) util_ss.add(when: 'CONFIG_POSIX', if_true: files('fdmon-poll.c')) if config_host_data.get('CONFIG_EPOLL_CREATE1') diff --git a/util/vhost-user-server.c b/util/vhost-user-server.c index cd17fb5326..b4b6bf30a2 100644 --- a/util/vhost-user-server.c +++ b/util/vhost-user-server.c @@ -127,7 +127,14 @@ vu_message_read(VuDev *vu_dev, int conn_fd, VhostUserMsg *vmsg) if (rc < 0) { if (rc == QIO_CHANNEL_ERR_BLOCK) { assert(local_err == NULL); - qio_channel_yield(ioc, G_IO_IN); + if (server->ctx) { + server->in_qio_channel_yield = true; + qio_channel_yield(ioc, G_IO_IN); + server->in_qio_channel_yield = false; + } else { + /* Wait until attached to an AioContext again */ + qemu_coroutine_yield(); + } continue; } else { error_report_err(local_err); @@ -278,7 +285,7 @@ set_watch(VuDev *vu_dev, int fd, int vu_evt, vu_fd_watch->fd = fd; vu_fd_watch->cb = cb; qemu_socket_set_nonblock(fd); - aio_set_fd_handler(server->ioc->ctx, fd, kick_handler, + aio_set_fd_handler(server->ctx, fd, kick_handler, NULL, NULL, NULL, vu_fd_watch); vu_fd_watch->vu_dev = vu_dev; vu_fd_watch->pvt = pvt; @@ -299,7 +306,7 @@ static void remove_watch(VuDev *vu_dev, int fd) if (!vu_fd_watch) { return; } - aio_set_fd_handler(server->ioc->ctx, fd, NULL, NULL, NULL, NULL, NULL); + aio_set_fd_handler(server->ctx, fd, NULL, NULL, NULL, NULL, NULL); QTAILQ_REMOVE(&server->vu_fd_watches, vu_fd_watch, next); g_free(vu_fd_watch); @@ -344,6 +351,8 @@ static void vu_accept(QIONetListener *listener, QIOChannelSocket *sioc, /* TODO vu_message_write() spins if non-blocking! */ qio_channel_set_blocking(server->ioc, false, NULL); + qio_channel_set_follow_coroutine_ctx(server->ioc, true); + server->co_trip = qemu_coroutine_create(vu_client_trip, server); aio_context_acquire(server->ctx); @@ -399,13 +408,12 @@ void vhost_user_server_attach_aio_context(VuServer *server, AioContext *ctx) return; } - qio_channel_attach_aio_context(server->ioc, ctx); - QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) { aio_set_fd_handler(ctx, vu_fd_watch->fd, kick_handler, NULL, NULL, NULL, vu_fd_watch); } + assert(!server->in_qio_channel_yield); aio_co_schedule(ctx, server->co_trip); } @@ -419,11 +427,16 @@ void vhost_user_server_detach_aio_context(VuServer *server) aio_set_fd_handler(server->ctx, vu_fd_watch->fd, NULL, NULL, NULL, NULL, vu_fd_watch); } - - qio_channel_detach_aio_context(server->ioc); } server->ctx = NULL; + + if (server->ioc) { + if (server->in_qio_channel_yield) { + /* Stop receiving the next vhost-user message */ + qio_channel_wake_read(server->ioc); + } + } } bool vhost_user_server_start(VuServer *server, |