diff options
Diffstat (limited to 'util')
| -rw-r--r-- | util/compatfd.c | 16 | ||||
| -rw-r--r-- | util/iov.c | 10 | ||||
| -rw-r--r-- | util/osdep.c | 7 | ||||
| -rw-r--r-- | util/oslib-posix.c | 16 | ||||
| -rw-r--r-- | util/oslib-win32.c | 15 | ||||
| -rw-r--r-- | util/path.c | 4 | ||||
| -rw-r--r-- | util/qemu-option.c | 6 | ||||
| -rw-r--r-- | util/qemu-sockets.c | 6 | ||||
| -rw-r--r-- | util/qemu-thread-posix.c | 116 | ||||
| -rw-r--r-- | util/qemu-thread-win32.c | 26 |
10 files changed, 191 insertions, 31 deletions
diff --git a/util/compatfd.c b/util/compatfd.c index 9cf3f2834d..430a41c855 100644 --- a/util/compatfd.c +++ b/util/compatfd.c @@ -15,9 +15,9 @@ #include "qemu-common.h" #include "qemu/compatfd.h" +#include "qemu/thread.h" #include <sys/syscall.h> -#include <pthread.h> struct sigfd_compat_info { @@ -28,10 +28,6 @@ struct sigfd_compat_info static void *sigwait_compat(void *opaque) { struct sigfd_compat_info *info = opaque; - sigset_t all; - - sigfillset(&all); - pthread_sigmask(SIG_BLOCK, &all, NULL); while (1) { int sig; @@ -71,9 +67,8 @@ static void *sigwait_compat(void *opaque) static int qemu_signalfd_compat(const sigset_t *mask) { - pthread_attr_t attr; - pthread_t tid; struct sigfd_compat_info *info; + QemuThread thread; int fds[2]; info = malloc(sizeof(*info)); @@ -93,12 +88,7 @@ static int qemu_signalfd_compat(const sigset_t *mask) memcpy(&info->mask, mask, sizeof(*mask)); info->fd = fds[1]; - pthread_attr_init(&attr); - pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); - - pthread_create(&tid, &attr, sigwait_compat, info); - - pthread_attr_destroy(&attr); + qemu_thread_create(&thread, sigwait_compat, info, QEMU_THREAD_DETACHED); return fds[0]; } diff --git a/util/iov.c b/util/iov.c index f705586808..bb46c04e4d 100644 --- a/util/iov.c +++ b/util/iov.c @@ -181,13 +181,11 @@ ssize_t iov_send_recv(int sockfd, struct iovec *iov, unsigned iov_cnt, assert(iov[niov].iov_len > tail); orig_len = iov[niov].iov_len; iov[niov++].iov_len = tail; - } - - ret = do_send_recv(sockfd, iov, niov, do_send); - - /* Undo the changes above before checking for errors */ - if (tail) { + ret = do_send_recv(sockfd, iov, niov, do_send); + /* Undo the changes above before checking for errors */ iov[niov-1].iov_len = orig_len; + } else { + ret = do_send_recv(sockfd, iov, niov, do_send); } if (offset) { iov[0].iov_base -= offset; diff --git a/util/osdep.c b/util/osdep.c index 685c8ae889..62072b4be3 100644 --- a/util/osdep.c +++ b/util/osdep.c @@ -207,6 +207,13 @@ int qemu_open(const char *name, int flags, ...) } #endif +#ifdef O_DIRECT + if (ret == -1 && errno == EINVAL && (flags & O_DIRECT)) { + error_report("file system may not support O_DIRECT"); + errno = EINVAL; /* in case it was clobbered */ + } +#endif /* O_DIRECT */ + return ret; } diff --git a/util/oslib-posix.c b/util/oslib-posix.c index 3dc8b1b074..e00a44c86f 100644 --- a/util/oslib-posix.c +++ b/util/oslib-posix.c @@ -112,9 +112,7 @@ void *qemu_anon_ram_alloc(size_t size) size_t offset = QEMU_ALIGN_UP((uintptr_t)ptr, align) - (uintptr_t)ptr; if (ptr == MAP_FAILED) { - fprintf(stderr, "Failed to allocate %zu B: %s\n", - size, strerror(errno)); - abort(); + return NULL; } ptr += offset; @@ -159,6 +157,18 @@ void qemu_set_nonblock(int fd) fcntl(fd, F_SETFL, f | O_NONBLOCK); } +int socket_set_fast_reuse(int fd) +{ + int val = 1, ret; + + ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, + (const char *)&val, sizeof(val)); + + assert(ret == 0); + + return ret; +} + void qemu_set_cloexec(int fd) { int f; diff --git a/util/oslib-win32.c b/util/oslib-win32.c index 961fbf5e3d..776ccfaaf0 100644 --- a/util/oslib-win32.c +++ b/util/oslib-win32.c @@ -65,10 +65,7 @@ void *qemu_anon_ram_alloc(size_t size) /* FIXME: this is not exactly optimal solution since VirtualAlloc has 64Kb granularity, but at least it guarantees us that the memory is page aligned. */ - if (!size) { - abort(); - } - ptr = qemu_oom_check(VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE)); + ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE); trace_qemu_anon_ram_alloc(size, ptr); return ptr; } @@ -127,6 +124,16 @@ void qemu_set_nonblock(int fd) qemu_fd_register(fd); } +int socket_set_fast_reuse(int fd) +{ + /* Enabling the reuse of an endpoint that was used by a socket still in + * TIME_WAIT state is usually performed by setting SO_REUSEADDR. On Windows + * fast reuse is the default and SO_REUSEADDR does strange things. So we + * don't have to do anything here. More info can be found at: + * http://msdn.microsoft.com/en-us/library/windows/desktop/ms740621.aspx */ + return 0; +} + int inet_aton(const char *cp, struct in_addr *ia) { uint32_t addr = inet_addr(cp); diff --git a/util/path.c b/util/path.c index f0c69627c7..623219e4c5 100644 --- a/util/path.c +++ b/util/path.c @@ -39,7 +39,7 @@ static int strneq(const char *s1, unsigned int n, const char *s2) } static struct pathelem *add_entry(struct pathelem *root, const char *name, - unsigned char type); + unsigned type); static struct pathelem *new_entry(const char *root, struct pathelem *parent, @@ -82,7 +82,7 @@ static struct pathelem *add_dir_maybe(struct pathelem *path) } static struct pathelem *add_entry(struct pathelem *root, const char *name, - unsigned char type) + unsigned type) { struct pathelem **e; diff --git a/util/qemu-option.c b/util/qemu-option.c index e0844a966c..efcb5dcfcb 100644 --- a/util/qemu-option.c +++ b/util/qemu-option.c @@ -834,6 +834,12 @@ const char *qemu_opts_id(QemuOpts *opts) return opts->id; } +/* The id string will be g_free()d by qemu_opts_del */ +void qemu_opts_set_id(QemuOpts *opts, char *id) +{ + opts->id = id; +} + void qemu_opts_del(QemuOpts *opts) { QemuOpt *opt; diff --git a/util/qemu-sockets.c b/util/qemu-sockets.c index 095716ecdb..6b97dc11f9 100644 --- a/util/qemu-sockets.c +++ b/util/qemu-sockets.c @@ -155,7 +155,7 @@ int inet_listen_opts(QemuOpts *opts, int port_offset, Error **errp) continue; } - qemu_setsockopt(slisten, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)); + socket_set_fast_reuse(slisten); #ifdef IPV6_V6ONLY if (e->ai_family == PF_INET6) { /* listen on both ipv4 and ipv6 */ @@ -274,7 +274,7 @@ static int inet_connect_addr(struct addrinfo *addr, bool *in_progress, error_set_errno(errp, errno, QERR_SOCKET_CREATE_FAILED); return -1; } - qemu_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)); + socket_set_fast_reuse(sock); if (connect_state != NULL) { qemu_set_nonblock(sock); } @@ -455,7 +455,7 @@ int inet_dgram_opts(QemuOpts *opts, Error **errp) error_set_errno(errp, errno, QERR_SOCKET_CREATE_FAILED); goto err; } - qemu_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)); + socket_set_fast_reuse(sock); /* bind socket */ if (bind(sock, local->ai_addr, local->ai_addrlen) < 0) { diff --git a/util/qemu-thread-posix.c b/util/qemu-thread-posix.c index 4de133e7b2..37dd298631 100644 --- a/util/qemu-thread-posix.c +++ b/util/qemu-thread-posix.c @@ -20,7 +20,12 @@ #include <limits.h> #include <unistd.h> #include <sys/time.h> +#ifdef __linux__ +#include <sys/syscall.h> +#include <linux/futex.h> +#endif #include "qemu/thread.h" +#include "qemu/atomic.h" static void error_exit(int err, const char *msg) { @@ -272,6 +277,117 @@ void qemu_sem_wait(QemuSemaphore *sem) #endif } +#ifdef __linux__ +#define futex(...) syscall(__NR_futex, __VA_ARGS__) + +static inline void futex_wake(QemuEvent *ev, int n) +{ + futex(ev, FUTEX_WAKE, n, NULL, NULL, 0); +} + +static inline void futex_wait(QemuEvent *ev, unsigned val) +{ + futex(ev, FUTEX_WAIT, (int) val, NULL, NULL, 0); +} +#else +static inline void futex_wake(QemuEvent *ev, int n) +{ + if (n == 1) { + pthread_cond_signal(&ev->cond); + } else { + pthread_cond_broadcast(&ev->cond); + } +} + +static inline void futex_wait(QemuEvent *ev, unsigned val) +{ + pthread_mutex_lock(&ev->lock); + if (ev->value == val) { + pthread_cond_wait(&ev->cond, &ev->lock); + } + pthread_mutex_unlock(&ev->lock); +} +#endif + +/* Valid transitions: + * - free->set, when setting the event + * - busy->set, when setting the event, followed by futex_wake + * - set->free, when resetting the event + * - free->busy, when waiting + * + * set->busy does not happen (it can be observed from the outside but + * it really is set->free->busy). + * + * busy->free provably cannot happen; to enforce it, the set->free transition + * is done with an OR, which becomes a no-op if the event has concurrently + * transitioned to free or busy. + */ + +#define EV_SET 0 +#define EV_FREE 1 +#define EV_BUSY -1 + +void qemu_event_init(QemuEvent *ev, bool init) +{ +#ifndef __linux__ + pthread_mutex_init(&ev->lock, NULL); + pthread_cond_init(&ev->cond, NULL); +#endif + + ev->value = (init ? EV_SET : EV_FREE); +} + +void qemu_event_destroy(QemuEvent *ev) +{ +#ifndef __linux__ + pthread_mutex_destroy(&ev->lock); + pthread_cond_destroy(&ev->cond); +#endif +} + +void qemu_event_set(QemuEvent *ev) +{ + if (atomic_mb_read(&ev->value) != EV_SET) { + if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) { + /* There were waiters, wake them up. */ + futex_wake(ev, INT_MAX); + } + } +} + +void qemu_event_reset(QemuEvent *ev) +{ + if (atomic_mb_read(&ev->value) == EV_SET) { + /* + * If there was a concurrent reset (or even reset+wait), + * do nothing. Otherwise change EV_SET->EV_FREE. + */ + atomic_or(&ev->value, EV_FREE); + } +} + +void qemu_event_wait(QemuEvent *ev) +{ + unsigned value; + + value = atomic_mb_read(&ev->value); + if (value != EV_SET) { + if (value == EV_FREE) { + /* + * Leave the event reset and tell qemu_event_set that there + * are waiters. No need to retry, because there cannot be + * a concurent busy->free transition. After the CAS, the + * event will be either set or busy. + */ + if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) { + return; + } + } + futex_wait(ev, EV_BUSY); + } +} + + void qemu_thread_create(QemuThread *thread, void *(*start_routine)(void*), void *arg, int mode) diff --git a/util/qemu-thread-win32.c b/util/qemu-thread-win32.c index 517878dcc1..27a5217769 100644 --- a/util/qemu-thread-win32.c +++ b/util/qemu-thread-win32.c @@ -227,6 +227,32 @@ void qemu_sem_wait(QemuSemaphore *sem) } } +void qemu_event_init(QemuEvent *ev, bool init) +{ + /* Manual reset. */ + ev->event = CreateEvent(NULL, TRUE, init, NULL); +} + +void qemu_event_destroy(QemuEvent *ev) +{ + CloseHandle(ev->event); +} + +void qemu_event_set(QemuEvent *ev) +{ + SetEvent(ev->event); +} + +void qemu_event_reset(QemuEvent *ev) +{ + ResetEvent(ev->event); +} + +void qemu_event_wait(QemuEvent *ev) +{ + WaitForSingleObject(ev->event, INFINITE); +} + struct QemuThreadData { /* Passed to win32_start_routine. */ void *(*start_routine)(void *); |