From b811203cf2fbf83e26f8e8feb2c77784259a4cbd Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Thu, 7 Mar 2013 13:41:45 +0100 Subject: threadpool: move globals into struct ThreadPool Move global variables into a struct so multiple thread pools can be supported in the future. This patch does not change thread-pool.h interfaces. There is still a global thread pool and it is not yet possible to create/destroy individual thread pools. Moving the variables into a struct first makes later patches easier to review. Signed-off-by: Stefan Hajnoczi Reviewed-by: Paolo Bonzini --- thread-pool.c | 190 +++++++++++++++++++++++++++++++++------------------------- 1 file changed, 110 insertions(+), 80 deletions(-) (limited to 'thread-pool.c') diff --git a/thread-pool.c b/thread-pool.c index e3ca64d790..a0aecd08fe 100644 --- a/thread-pool.c +++ b/thread-pool.c @@ -24,7 +24,9 @@ #include "qemu/event_notifier.h" #include "block/thread-pool.h" -static void do_spawn_thread(void); +typedef struct ThreadPool ThreadPool; + +static void do_spawn_thread(ThreadPool *pool); typedef struct ThreadPoolElement ThreadPoolElement; @@ -37,6 +39,7 @@ enum ThreadState { struct ThreadPoolElement { BlockDriverAIOCB common; + ThreadPool *pool; ThreadPoolFunc *func; void *arg; @@ -54,49 +57,56 @@ struct ThreadPoolElement { QLIST_ENTRY(ThreadPoolElement) all; }; -static EventNotifier notifier; -static QemuMutex lock; -static QemuCond check_cancel; -static QemuSemaphore sem; -static int max_threads = 64; -static QEMUBH *new_thread_bh; - -/* The following variables are protected by the global mutex. */ -static QLIST_HEAD(, ThreadPoolElement) head; - -/* The following variables are protected by lock. */ -static QTAILQ_HEAD(, ThreadPoolElement) request_list; -static int cur_threads; -static int idle_threads; -static int new_threads; /* backlog of threads we need to create */ -static int pending_threads; /* threads created but not running yet */ -static int pending_cancellations; /* whether we need a cond_broadcast */ - -static void *worker_thread(void *unused) +struct ThreadPool { + EventNotifier notifier; + QemuMutex lock; + QemuCond check_cancel; + QemuSemaphore sem; + int max_threads; + QEMUBH *new_thread_bh; + + /* The following variables are only accessed from one AioContext. */ + QLIST_HEAD(, ThreadPoolElement) head; + + /* The following variables are protected by lock. */ + QTAILQ_HEAD(, ThreadPoolElement) request_list; + int cur_threads; + int idle_threads; + int new_threads; /* backlog of threads we need to create */ + int pending_threads; /* threads created but not running yet */ + int pending_cancellations; /* whether we need a cond_broadcast */ +}; + +/* Currently there is only one thread pool instance. */ +static ThreadPool global_pool; + +static void *worker_thread(void *opaque) { - qemu_mutex_lock(&lock); - pending_threads--; - do_spawn_thread(); + ThreadPool *pool = opaque; + + qemu_mutex_lock(&pool->lock); + pool->pending_threads--; + do_spawn_thread(pool); while (1) { ThreadPoolElement *req; int ret; do { - idle_threads++; - qemu_mutex_unlock(&lock); - ret = qemu_sem_timedwait(&sem, 10000); - qemu_mutex_lock(&lock); - idle_threads--; - } while (ret == -1 && !QTAILQ_EMPTY(&request_list)); + pool->idle_threads++; + qemu_mutex_unlock(&pool->lock); + ret = qemu_sem_timedwait(&pool->sem, 10000); + qemu_mutex_lock(&pool->lock); + pool->idle_threads--; + } while (ret == -1 && !QTAILQ_EMPTY(&pool->request_list)); if (ret == -1) { break; } - req = QTAILQ_FIRST(&request_list); - QTAILQ_REMOVE(&request_list, req, reqs); + req = QTAILQ_FIRST(&pool->request_list); + QTAILQ_REMOVE(&pool->request_list, req, reqs); req->state = THREAD_ACTIVE; - qemu_mutex_unlock(&lock); + qemu_mutex_unlock(&pool->lock); ret = req->func(req->arg); @@ -105,45 +115,47 @@ static void *worker_thread(void *unused) smp_wmb(); req->state = THREAD_DONE; - qemu_mutex_lock(&lock); - if (pending_cancellations) { - qemu_cond_broadcast(&check_cancel); + qemu_mutex_lock(&pool->lock); + if (pool->pending_cancellations) { + qemu_cond_broadcast(&pool->check_cancel); } - event_notifier_set(¬ifier); + event_notifier_set(&pool->notifier); } - cur_threads--; - qemu_mutex_unlock(&lock); + pool->cur_threads--; + qemu_mutex_unlock(&pool->lock); return NULL; } -static void do_spawn_thread(void) +static void do_spawn_thread(ThreadPool *pool) { QemuThread t; /* Runs with lock taken. */ - if (!new_threads) { + if (!pool->new_threads) { return; } - new_threads--; - pending_threads++; + pool->new_threads--; + pool->pending_threads++; - qemu_thread_create(&t, worker_thread, NULL, QEMU_THREAD_DETACHED); + qemu_thread_create(&t, worker_thread, pool, QEMU_THREAD_DETACHED); } static void spawn_thread_bh_fn(void *opaque) { - qemu_mutex_lock(&lock); - do_spawn_thread(); - qemu_mutex_unlock(&lock); + ThreadPool *pool = opaque; + + qemu_mutex_lock(&pool->lock); + do_spawn_thread(pool); + qemu_mutex_unlock(&pool->lock); } -static void spawn_thread(void) +static void spawn_thread(ThreadPool *pool) { - cur_threads++; - new_threads++; + pool->cur_threads++; + pool->new_threads++; /* If there are threads being created, they will spawn new workers, so * we don't spend time creating many threads in a loop holding a mutex or * starving the current vcpu. @@ -151,23 +163,25 @@ static void spawn_thread(void) * If there are no idle threads, ask the main thread to create one, so we * inherit the correct affinity instead of the vcpu affinity. */ - if (!pending_threads) { - qemu_bh_schedule(new_thread_bh); + if (!pool->pending_threads) { + qemu_bh_schedule(pool->new_thread_bh); } } static void event_notifier_ready(EventNotifier *notifier) { + ThreadPool *pool = container_of(notifier, ThreadPool, notifier); ThreadPoolElement *elem, *next; event_notifier_test_and_clear(notifier); restart: - QLIST_FOREACH_SAFE(elem, &head, all, next) { + QLIST_FOREACH_SAFE(elem, &pool->head, all, next) { if (elem->state != THREAD_CANCELED && elem->state != THREAD_DONE) { continue; } if (elem->state == THREAD_DONE) { - trace_thread_pool_complete(elem, elem->common.opaque, elem->ret); + trace_thread_pool_complete(pool, elem, elem->common.opaque, + elem->ret); } if (elem->state == THREAD_DONE && elem->common.cb) { QLIST_REMOVE(elem, all); @@ -186,34 +200,36 @@ restart: static int thread_pool_active(EventNotifier *notifier) { - return !QLIST_EMPTY(&head); + ThreadPool *pool = container_of(notifier, ThreadPool, notifier); + return !QLIST_EMPTY(&pool->head); } static void thread_pool_cancel(BlockDriverAIOCB *acb) { ThreadPoolElement *elem = (ThreadPoolElement *)acb; + ThreadPool *pool = elem->pool; trace_thread_pool_cancel(elem, elem->common.opaque); - qemu_mutex_lock(&lock); + qemu_mutex_lock(&pool->lock); if (elem->state == THREAD_QUEUED && /* No thread has yet started working on elem. we can try to "steal" * the item from the worker if we can get a signal from the * semaphore. Because this is non-blocking, we can do it with * the lock taken and ensure that elem will remain THREAD_QUEUED. */ - qemu_sem_timedwait(&sem, 0) == 0) { - QTAILQ_REMOVE(&request_list, elem, reqs); + qemu_sem_timedwait(&pool->sem, 0) == 0) { + QTAILQ_REMOVE(&pool->request_list, elem, reqs); elem->state = THREAD_CANCELED; - event_notifier_set(¬ifier); + event_notifier_set(&pool->notifier); } else { - pending_cancellations++; + pool->pending_cancellations++; while (elem->state != THREAD_CANCELED && elem->state != THREAD_DONE) { - qemu_cond_wait(&check_cancel, &lock); + qemu_cond_wait(&pool->check_cancel, &pool->lock); } - pending_cancellations--; + pool->pending_cancellations--; } - qemu_mutex_unlock(&lock); + qemu_mutex_unlock(&pool->lock); } static const AIOCBInfo thread_pool_aiocb_info = { @@ -224,24 +240,26 @@ static const AIOCBInfo thread_pool_aiocb_info = { BlockDriverAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg, BlockDriverCompletionFunc *cb, void *opaque) { + ThreadPool *pool = &global_pool; ThreadPoolElement *req; req = qemu_aio_get(&thread_pool_aiocb_info, NULL, cb, opaque); req->func = func; req->arg = arg; req->state = THREAD_QUEUED; + req->pool = pool; - QLIST_INSERT_HEAD(&head, req, all); + QLIST_INSERT_HEAD(&pool->head, req, all); - trace_thread_pool_submit(req, arg); + trace_thread_pool_submit(pool, req, arg); - qemu_mutex_lock(&lock); - if (idle_threads == 0 && cur_threads < max_threads) { - spawn_thread(); + qemu_mutex_lock(&pool->lock); + if (pool->idle_threads == 0 && pool->cur_threads < pool->max_threads) { + spawn_thread(pool); } - QTAILQ_INSERT_TAIL(&request_list, req, reqs); - qemu_mutex_unlock(&lock); - qemu_sem_post(&sem); + QTAILQ_INSERT_TAIL(&pool->request_list, req, reqs); + qemu_mutex_unlock(&pool->lock); + qemu_sem_post(&pool->sem); return &req->common; } @@ -272,18 +290,30 @@ void thread_pool_submit(ThreadPoolFunc *func, void *arg) thread_pool_submit_aio(func, arg, NULL, NULL); } +static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx) +{ + if (!ctx) { + ctx = qemu_get_aio_context(); + } + + memset(pool, 0, sizeof(*pool)); + event_notifier_init(&pool->notifier, false); + qemu_mutex_init(&pool->lock); + qemu_cond_init(&pool->check_cancel); + qemu_sem_init(&pool->sem, 0); + pool->max_threads = 64; + pool->new_thread_bh = aio_bh_new(ctx, spawn_thread_bh_fn, pool); + + QLIST_INIT(&pool->head); + QTAILQ_INIT(&pool->request_list); + + aio_set_event_notifier(ctx, &pool->notifier, event_notifier_ready, + thread_pool_active); +} + static void thread_pool_init(void) { - QLIST_INIT(&head); - event_notifier_init(¬ifier, false); - qemu_mutex_init(&lock); - qemu_cond_init(&check_cancel); - qemu_sem_init(&sem, 0); - qemu_aio_set_event_notifier(¬ifier, event_notifier_ready, - thread_pool_active); - - QTAILQ_INIT(&request_list); - new_thread_bh = qemu_bh_new(spawn_thread_bh_fn, NULL); + thread_pool_init_one(&global_pool, NULL); } block_init(thread_pool_init) -- cgit 1.4.1 From f7311ccc630d925e7351e9440b7ad8bc6f0a51de Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Thu, 7 Mar 2013 13:41:46 +0100 Subject: threadpool: add thread_pool_new() and thread_pool_free() ThreadPool is tied to an AioContext through its event notifier, which dictates in which AioContext the work item's callback function will be invoked. In order to support multiple AioContexts we need to support multiple ThreadPool instances. This patch adds the new/free functions. The free function deserves special attention because it quiesces remaining worker threads. This requires a new condition variable and a "stopping" flag to let workers know they should terminate once idle. We never needed to do this before since the global threadpool was not explicitly destroyed until process termination. Also stash the AioContext pointer in ThreadPool so that we can call aio_set_event_notifier() in thread_pool_free(). We didn't need to hold onto AioContext previously since there was no free function. Signed-off-by: Stefan Hajnoczi Reviewed-by: Paolo Bonzini --- include/block/thread-pool.h | 5 +++++ thread-pool.c | 52 +++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 53 insertions(+), 4 deletions(-) (limited to 'thread-pool.c') diff --git a/include/block/thread-pool.h b/include/block/thread-pool.h index 200703e35f..e1453c685d 100644 --- a/include/block/thread-pool.h +++ b/include/block/thread-pool.h @@ -26,6 +26,11 @@ typedef int ThreadPoolFunc(void *opaque); +typedef struct ThreadPool ThreadPool; + +ThreadPool *thread_pool_new(struct AioContext *ctx); +void thread_pool_free(ThreadPool *pool); + BlockDriverAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg, BlockDriverCompletionFunc *cb, void *opaque); int coroutine_fn thread_pool_submit_co(ThreadPoolFunc *func, void *arg); diff --git a/thread-pool.c b/thread-pool.c index a0aecd08fe..d1e4570829 100644 --- a/thread-pool.c +++ b/thread-pool.c @@ -24,8 +24,6 @@ #include "qemu/event_notifier.h" #include "block/thread-pool.h" -typedef struct ThreadPool ThreadPool; - static void do_spawn_thread(ThreadPool *pool); typedef struct ThreadPoolElement ThreadPoolElement; @@ -59,8 +57,10 @@ struct ThreadPoolElement { struct ThreadPool { EventNotifier notifier; + AioContext *ctx; QemuMutex lock; QemuCond check_cancel; + QemuCond worker_stopped; QemuSemaphore sem; int max_threads; QEMUBH *new_thread_bh; @@ -75,6 +75,7 @@ struct ThreadPool { int new_threads; /* backlog of threads we need to create */ int pending_threads; /* threads created but not running yet */ int pending_cancellations; /* whether we need a cond_broadcast */ + bool stopping; }; /* Currently there is only one thread pool instance. */ @@ -88,7 +89,7 @@ static void *worker_thread(void *opaque) pool->pending_threads--; do_spawn_thread(pool); - while (1) { + while (!pool->stopping) { ThreadPoolElement *req; int ret; @@ -99,7 +100,7 @@ static void *worker_thread(void *opaque) qemu_mutex_lock(&pool->lock); pool->idle_threads--; } while (ret == -1 && !QTAILQ_EMPTY(&pool->request_list)); - if (ret == -1) { + if (ret == -1 || pool->stopping) { break; } @@ -124,6 +125,7 @@ static void *worker_thread(void *opaque) } pool->cur_threads--; + qemu_cond_signal(&pool->worker_stopped); qemu_mutex_unlock(&pool->lock); return NULL; } @@ -298,8 +300,10 @@ static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx) memset(pool, 0, sizeof(*pool)); event_notifier_init(&pool->notifier, false); + pool->ctx = ctx; qemu_mutex_init(&pool->lock); qemu_cond_init(&pool->check_cancel); + qemu_cond_init(&pool->worker_stopped); qemu_sem_init(&pool->sem, 0); pool->max_threads = 64; pool->new_thread_bh = aio_bh_new(ctx, spawn_thread_bh_fn, pool); @@ -311,6 +315,46 @@ static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx) thread_pool_active); } +ThreadPool *thread_pool_new(AioContext *ctx) +{ + ThreadPool *pool = g_new(ThreadPool, 1); + thread_pool_init_one(pool, ctx); + return pool; +} + +void thread_pool_free(ThreadPool *pool) +{ + if (!pool) { + return; + } + + assert(QLIST_EMPTY(&pool->head)); + + qemu_mutex_lock(&pool->lock); + + /* Stop new threads from spawning */ + qemu_bh_delete(pool->new_thread_bh); + pool->cur_threads -= pool->new_threads; + pool->new_threads = 0; + + /* Wait for worker threads to terminate */ + pool->stopping = true; + while (pool->cur_threads > 0) { + qemu_sem_post(&pool->sem); + qemu_cond_wait(&pool->worker_stopped, &pool->lock); + } + + qemu_mutex_unlock(&pool->lock); + + aio_set_event_notifier(pool->ctx, &pool->notifier, NULL, NULL); + qemu_sem_destroy(&pool->sem); + qemu_cond_destroy(&pool->check_cancel); + qemu_cond_destroy(&pool->worker_stopped); + qemu_mutex_destroy(&pool->lock); + event_notifier_cleanup(&pool->notifier); + g_free(pool); +} + static void thread_pool_init(void) { thread_pool_init_one(&global_pool, NULL); -- cgit 1.4.1 From c4d9d19645a484298a67e9021060bc7c2b081d0f Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Thu, 7 Mar 2013 13:41:49 +0100 Subject: threadpool: drop global thread pool Now that each AioContext has a ThreadPool and the main loop AioContext can be fetched with bdrv_get_aio_context(), we can eliminate the concept of a global thread pool from thread-pool.c. The submit functions must take a ThreadPool* argument. block/raw-posix.c and block/raw-win32.c use aio_get_thread_pool(bdrv_get_aio_context(bs)) to fetch the main loop's ThreadPool. tests/test-thread-pool.c must be updated to reflect the new thread_pool_submit() function prototypes. Signed-off-by: Stefan Hajnoczi Reviewed-by: Paolo Bonzini --- block/raw-posix.c | 8 ++++++-- block/raw-win32.c | 4 +++- include/block/thread-pool.h | 10 ++++++---- tests/test-thread-pool.c | 44 +++++++++++++++++++++----------------------- thread-pool.c | 23 +++++++---------------- 5 files changed, 43 insertions(+), 46 deletions(-) (limited to 'thread-pool.c') diff --git a/block/raw-posix.c b/block/raw-posix.c index 4dfdf985b0..8a3cdbc1f3 100644 --- a/block/raw-posix.c +++ b/block/raw-posix.c @@ -750,6 +750,7 @@ static BlockDriverAIOCB *paio_submit(BlockDriverState *bs, int fd, BlockDriverCompletionFunc *cb, void *opaque, int type) { RawPosixAIOData *acb = g_slice_new(RawPosixAIOData); + ThreadPool *pool; acb->bs = bs; acb->aio_type = type; @@ -763,7 +764,8 @@ static BlockDriverAIOCB *paio_submit(BlockDriverState *bs, int fd, acb->aio_offset = sector_num * 512; trace_paio_submit(acb, opaque, sector_num, nb_sectors, type); - return thread_pool_submit_aio(aio_worker, acb, cb, opaque); + pool = aio_get_thread_pool(bdrv_get_aio_context(bs)); + return thread_pool_submit_aio(pool, aio_worker, acb, cb, opaque); } static BlockDriverAIOCB *raw_aio_submit(BlockDriverState *bs, @@ -1413,6 +1415,7 @@ static BlockDriverAIOCB *hdev_aio_ioctl(BlockDriverState *bs, { BDRVRawState *s = bs->opaque; RawPosixAIOData *acb; + ThreadPool *pool; if (fd_open(bs) < 0) return NULL; @@ -1424,7 +1427,8 @@ static BlockDriverAIOCB *hdev_aio_ioctl(BlockDriverState *bs, acb->aio_offset = 0; acb->aio_ioctl_buf = buf; acb->aio_ioctl_cmd = req; - return thread_pool_submit_aio(aio_worker, acb, cb, opaque); + pool = aio_get_thread_pool(bdrv_get_aio_context(bs)); + return thread_pool_submit_aio(pool, aio_worker, acb, cb, opaque); } #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) diff --git a/block/raw-win32.c b/block/raw-win32.c index b89ac19ffa..18e0068b26 100644 --- a/block/raw-win32.c +++ b/block/raw-win32.c @@ -144,6 +144,7 @@ static BlockDriverAIOCB *paio_submit(BlockDriverState *bs, HANDLE hfile, BlockDriverCompletionFunc *cb, void *opaque, int type) { RawWin32AIOData *acb = g_slice_new(RawWin32AIOData); + ThreadPool *pool; acb->bs = bs; acb->hfile = hfile; @@ -157,7 +158,8 @@ static BlockDriverAIOCB *paio_submit(BlockDriverState *bs, HANDLE hfile, acb->aio_offset = sector_num * 512; trace_paio_submit(acb, opaque, sector_num, nb_sectors, type); - return thread_pool_submit_aio(aio_worker, acb, cb, opaque); + pool = aio_get_thread_pool(bdrv_get_aio_context(bs)); + return thread_pool_submit_aio(pool, aio_worker, acb, cb, opaque); } int qemu_ftruncate64(int fd, int64_t length) diff --git a/include/block/thread-pool.h b/include/block/thread-pool.h index e1453c685d..32afcdd1d6 100644 --- a/include/block/thread-pool.h +++ b/include/block/thread-pool.h @@ -31,9 +31,11 @@ typedef struct ThreadPool ThreadPool; ThreadPool *thread_pool_new(struct AioContext *ctx); void thread_pool_free(ThreadPool *pool); -BlockDriverAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg, - BlockDriverCompletionFunc *cb, void *opaque); -int coroutine_fn thread_pool_submit_co(ThreadPoolFunc *func, void *arg); -void thread_pool_submit(ThreadPoolFunc *func, void *arg); +BlockDriverAIOCB *thread_pool_submit_aio(ThreadPool *pool, + ThreadPoolFunc *func, void *arg, + BlockDriverCompletionFunc *cb, void *opaque); +int coroutine_fn thread_pool_submit_co(ThreadPool *pool, + ThreadPoolFunc *func, void *arg); +void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, void *arg); #endif diff --git a/tests/test-thread-pool.c b/tests/test-thread-pool.c index 9998e031f2..22915aac10 100644 --- a/tests/test-thread-pool.c +++ b/tests/test-thread-pool.c @@ -4,6 +4,8 @@ #include "block/thread-pool.h" #include "block/block.h" +static AioContext *ctx; +static ThreadPool *pool; static int active; typedef struct { @@ -38,19 +40,10 @@ static void done_cb(void *opaque, int ret) active--; } -/* A non-blocking poll of the main AIO context (we cannot use aio_poll - * because we do not know the AioContext). - */ -static void qemu_aio_wait_nonblocking(void) -{ - qemu_notify_event(); - qemu_aio_wait(); -} - /* Wait until all aio and bh activity has finished */ static void qemu_aio_wait_all(void) { - while (qemu_aio_wait()) { + while (aio_poll(ctx, true)) { /* Do nothing */ } } @@ -58,7 +51,7 @@ static void qemu_aio_wait_all(void) static void test_submit(void) { WorkerTestData data = { .n = 0 }; - thread_pool_submit(worker_cb, &data); + thread_pool_submit(pool, worker_cb, &data); qemu_aio_wait_all(); g_assert_cmpint(data.n, ==, 1); } @@ -66,7 +59,8 @@ static void test_submit(void) static void test_submit_aio(void) { WorkerTestData data = { .n = 0, .ret = -EINPROGRESS }; - data.aiocb = thread_pool_submit_aio(worker_cb, &data, done_cb, &data); + data.aiocb = thread_pool_submit_aio(pool, worker_cb, &data, + done_cb, &data); /* The callbacks are not called until after the first wait. */ active = 1; @@ -84,7 +78,7 @@ static void co_test_cb(void *opaque) active = 1; data->n = 0; data->ret = -EINPROGRESS; - thread_pool_submit_co(worker_cb, data); + thread_pool_submit_co(pool, worker_cb, data); /* The test continues in test_submit_co, after qemu_coroutine_enter... */ @@ -126,12 +120,12 @@ static void test_submit_many(void) for (i = 0; i < 100; i++) { data[i].n = 0; data[i].ret = -EINPROGRESS; - thread_pool_submit_aio(worker_cb, &data[i], done_cb, &data[i]); + thread_pool_submit_aio(pool, worker_cb, &data[i], done_cb, &data[i]); } active = 100; while (active > 0) { - qemu_aio_wait(); + aio_poll(ctx, true); } for (i = 0; i < 100; i++) { g_assert_cmpint(data[i].n, ==, 1); @@ -154,7 +148,7 @@ static void test_cancel(void) for (i = 0; i < 100; i++) { data[i].n = 0; data[i].ret = -EINPROGRESS; - data[i].aiocb = thread_pool_submit_aio(long_cb, &data[i], + data[i].aiocb = thread_pool_submit_aio(pool, long_cb, &data[i], done_cb, &data[i]); } @@ -162,7 +156,8 @@ static void test_cancel(void) * run, but do not waste too much time... */ active = 100; - qemu_aio_wait_nonblocking(); + aio_notify(ctx); + aio_poll(ctx, false); /* Wait some time for the threads to start, with some sanity * testing on the behavior of the scheduler... @@ -208,11 +203,10 @@ static void test_cancel(void) int main(int argc, char **argv) { - /* These should be removed once each AioContext has its thread pool. - * The test should create its own AioContext. - */ - qemu_init_main_loop(); - bdrv_init(); + int ret; + + ctx = aio_context_new(); + pool = aio_get_thread_pool(ctx); g_test_init(&argc, &argv, NULL); g_test_add_func("/thread-pool/submit", test_submit); @@ -220,5 +214,9 @@ int main(int argc, char **argv) g_test_add_func("/thread-pool/submit-co", test_submit_co); g_test_add_func("/thread-pool/submit-many", test_submit_many); g_test_add_func("/thread-pool/cancel", test_cancel); - return g_test_run(); + + ret = g_test_run(); + + aio_context_unref(ctx); + return ret; } diff --git a/thread-pool.c b/thread-pool.c index d1e4570829..0ebd4c2964 100644 --- a/thread-pool.c +++ b/thread-pool.c @@ -78,9 +78,6 @@ struct ThreadPool { bool stopping; }; -/* Currently there is only one thread pool instance. */ -static ThreadPool global_pool; - static void *worker_thread(void *opaque) { ThreadPool *pool = opaque; @@ -239,10 +236,10 @@ static const AIOCBInfo thread_pool_aiocb_info = { .cancel = thread_pool_cancel, }; -BlockDriverAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg, +BlockDriverAIOCB *thread_pool_submit_aio(ThreadPool *pool, + ThreadPoolFunc *func, void *arg, BlockDriverCompletionFunc *cb, void *opaque) { - ThreadPool *pool = &global_pool; ThreadPoolElement *req; req = qemu_aio_get(&thread_pool_aiocb_info, NULL, cb, opaque); @@ -278,18 +275,19 @@ static void thread_pool_co_cb(void *opaque, int ret) qemu_coroutine_enter(co->co, NULL); } -int coroutine_fn thread_pool_submit_co(ThreadPoolFunc *func, void *arg) +int coroutine_fn thread_pool_submit_co(ThreadPool *pool, ThreadPoolFunc *func, + void *arg) { ThreadPoolCo tpc = { .co = qemu_coroutine_self(), .ret = -EINPROGRESS }; assert(qemu_in_coroutine()); - thread_pool_submit_aio(func, arg, thread_pool_co_cb, &tpc); + thread_pool_submit_aio(pool, func, arg, thread_pool_co_cb, &tpc); qemu_coroutine_yield(); return tpc.ret; } -void thread_pool_submit(ThreadPoolFunc *func, void *arg) +void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, void *arg) { - thread_pool_submit_aio(func, arg, NULL, NULL); + thread_pool_submit_aio(pool, func, arg, NULL, NULL); } static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx) @@ -354,10 +352,3 @@ void thread_pool_free(ThreadPool *pool) event_notifier_cleanup(&pool->notifier); g_free(pool); } - -static void thread_pool_init(void) -{ - thread_pool_init_one(&global_pool, NULL); -} - -block_init(thread_pool_init) -- cgit 1.4.1