From c2cba0ffe495b60c4cc58080281e99c7a6580d4b Mon Sep 17 00:00:00 2001 From: Fam Zheng Date: Mon, 16 Mar 2015 17:03:33 +0800 Subject: exec: Atomic access to bounce buffer There could be a race condition when two processes call address_space_map concurrently and both want to use the bounce buffer. Add an in_use flag in BounceBuffer to sync it. Signed-off-by: Fam Zheng Message-Id: <1426496617-10702-2-git-send-email-famz@redhat.com> Signed-off-by: Paolo Bonzini --- exec.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'exec.c') diff --git a/exec.c b/exec.c index 874ecfc2c6..5a1c70085c 100644 --- a/exec.c +++ b/exec.c @@ -2482,6 +2482,7 @@ typedef struct { void *buffer; hwaddr addr; hwaddr len; + bool in_use; } BounceBuffer; static BounceBuffer bounce; @@ -2570,7 +2571,7 @@ void *address_space_map(AddressSpace *as, l = len; mr = address_space_translate(as, addr, &xlat, &l, is_write); if (!memory_access_is_direct(mr, is_write)) { - if (bounce.buffer) { + if (atomic_xchg(&bounce.in_use, true)) { return NULL; } /* Avoid unbounded allocations */ @@ -2640,6 +2641,7 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, qemu_vfree(bounce.buffer); bounce.buffer = NULL; memory_region_unref(bounce.mr); + atomic_mb_set(&bounce.in_use, false); cpu_notify_map_clients(); } -- cgit 1.4.1 From 38e047b50d2bfd1df99fbbca884c9f1db0785ff4 Mon Sep 17 00:00:00 2001 From: Fam Zheng Date: Mon, 16 Mar 2015 17:03:35 +0800 Subject: exec: Protect map_client_list with mutex So that accesses from multiple threads are safe. Signed-off-by: Fam Zheng Message-Id: <1426496617-10702-4-git-send-email-famz@redhat.com> [Remove #if from cpu_exec_init_all. - Paolo] Signed-off-by: Paolo Bonzini --- exec.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) (limited to 'exec.c') diff --git a/exec.c b/exec.c index 5a1c70085c..81666d3d6d 100644 --- a/exec.c +++ b/exec.c @@ -429,15 +429,6 @@ address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr, } #endif -void cpu_exec_init_all(void) -{ -#if !defined(CONFIG_USER_ONLY) - qemu_mutex_init(&ram_list.mutex); - memory_map_init(); - io_mem_init(); -#endif -} - #if !defined(CONFIG_USER_ONLY) static int cpu_common_post_load(void *opaque, int version_id) @@ -2493,6 +2484,7 @@ typedef struct MapClient { QLIST_ENTRY(MapClient) link; } MapClient; +QemuMutex map_client_list_lock; static QLIST_HEAD(map_client_list, MapClient) map_client_list = QLIST_HEAD_INITIALIZER(map_client_list); @@ -2500,12 +2492,22 @@ void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)) { MapClient *client = g_malloc(sizeof(*client)); + qemu_mutex_lock(&map_client_list_lock); client->opaque = opaque; client->callback = callback; QLIST_INSERT_HEAD(&map_client_list, client, link); + qemu_mutex_unlock(&map_client_list_lock); return client; } +void cpu_exec_init_all(void) +{ + qemu_mutex_init(&ram_list.mutex); + memory_map_init(); + io_mem_init(); + qemu_mutex_init(&map_client_list_lock); +} + static void cpu_unregister_map_client(void *_client) { MapClient *client = (MapClient *)_client; @@ -2518,11 +2520,13 @@ static void cpu_notify_map_clients(void) { MapClient *client; + qemu_mutex_lock(&map_client_list_lock); while (!QLIST_EMPTY(&map_client_list)) { client = QLIST_FIRST(&map_client_list); client->callback(client->opaque); cpu_unregister_map_client(client); } + qemu_mutex_unlock(&map_client_list_lock); } bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write) -- cgit 1.4.1 From 33b6c2edf6214f02b9beaea61b169506c01f90aa Mon Sep 17 00:00:00 2001 From: Fam Zheng Date: Mon, 16 Mar 2015 17:03:36 +0800 Subject: exec: Notify cpu_register_map_client caller if the bounce buffer is available The caller's workflow is like if (!address_space_map()) { ... cpu_register_map_client(); } If bounce buffer became available after address_space_map() but before cpu_register_map_client(), the caller could miss it and has to wait for the next bounce buffer notify, which may never happen in the worse case. Just notify the list in cpu_register_map_client(). Signed-off-by: Fam Zheng Message-Id: <1426496617-10702-5-git-send-email-famz@redhat.com> Signed-off-by: Paolo Bonzini --- exec.c | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) (limited to 'exec.c') diff --git a/exec.c b/exec.c index 81666d3d6d..2c87f1d6d6 100644 --- a/exec.c +++ b/exec.c @@ -2488,6 +2488,18 @@ QemuMutex map_client_list_lock; static QLIST_HEAD(map_client_list, MapClient) map_client_list = QLIST_HEAD_INITIALIZER(map_client_list); +static void cpu_unregister_map_client(void *_client); +static void cpu_notify_map_clients_locked(void) +{ + MapClient *client; + + while (!QLIST_EMPTY(&map_client_list)) { + client = QLIST_FIRST(&map_client_list); + client->callback(client->opaque); + cpu_unregister_map_client(client); + } +} + void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)) { MapClient *client = g_malloc(sizeof(*client)); @@ -2496,6 +2508,9 @@ void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)) client->opaque = opaque; client->callback = callback; QLIST_INSERT_HEAD(&map_client_list, client, link); + if (!atomic_read(&bounce.in_use)) { + cpu_notify_map_clients_locked(); + } qemu_mutex_unlock(&map_client_list_lock); return client; } @@ -2518,14 +2533,8 @@ static void cpu_unregister_map_client(void *_client) static void cpu_notify_map_clients(void) { - MapClient *client; - qemu_mutex_lock(&map_client_list_lock); - while (!QLIST_EMPTY(&map_client_list)) { - client = QLIST_FIRST(&map_client_list); - client->callback(client->opaque); - cpu_unregister_map_client(client); - } + cpu_notify_map_clients_locked(); qemu_mutex_unlock(&map_client_list_lock); } -- cgit 1.4.1 From e95205e1f9cd2c4262b7a7b1c992a94512c86d0e Mon Sep 17 00:00:00 2001 From: Fam Zheng Date: Mon, 16 Mar 2015 17:03:37 +0800 Subject: dma-helpers: Fix race condition of continue_after_map_failure and dma_aio_cancel If DMA's owning thread cancels the IO while the bounce buffer's owning thread is notifying the "cpu client list", a use-after-free happens: continue_after_map_failure dma_aio_cancel ------------------------------------------------------------------ aio_bh_new qemu_bh_delete qemu_bh_schedule (use after free) Also, the old code doesn't run the bh in the right AioContext. Fix both problems by passing a QEMUBH to cpu_register_map_client. Signed-off-by: Fam Zheng Reviewed-by: Paolo Bonzini Message-Id: <1426496617-10702-6-git-send-email-famz@redhat.com> [Remove unnecessary forward declaration. - Paolo] Signed-off-by: Paolo Bonzini --- dma-helpers.c | 17 ++++++++--------- exec.c | 34 +++++++++++++++++++++------------- include/exec/cpu-common.h | 3 ++- 3 files changed, 31 insertions(+), 23 deletions(-) (limited to 'exec.c') diff --git a/dma-helpers.c b/dma-helpers.c index 6918572e18..1fddf6a11c 100644 --- a/dma-helpers.c +++ b/dma-helpers.c @@ -92,14 +92,6 @@ static void reschedule_dma(void *opaque) dma_blk_cb(dbs, 0); } -static void continue_after_map_failure(void *opaque) -{ - DMAAIOCB *dbs = (DMAAIOCB *)opaque; - - dbs->bh = qemu_bh_new(reschedule_dma, dbs); - qemu_bh_schedule(dbs->bh); -} - static void dma_blk_unmap(DMAAIOCB *dbs) { int i; @@ -161,7 +153,9 @@ static void dma_blk_cb(void *opaque, int ret) if (dbs->iov.size == 0) { trace_dma_map_wait(dbs); - cpu_register_map_client(dbs, continue_after_map_failure); + dbs->bh = aio_bh_new(blk_get_aio_context(dbs->blk), + reschedule_dma, dbs); + cpu_register_map_client(dbs->bh); return; } @@ -183,6 +177,11 @@ static void dma_aio_cancel(BlockAIOCB *acb) if (dbs->acb) { blk_aio_cancel_async(dbs->acb); } + if (dbs->bh) { + cpu_unregister_map_client(dbs->bh); + qemu_bh_delete(dbs->bh); + dbs->bh = NULL; + } } diff --git a/exec.c b/exec.c index 2c87f1d6d6..065f5e8360 100644 --- a/exec.c +++ b/exec.c @@ -2479,8 +2479,7 @@ typedef struct { static BounceBuffer bounce; typedef struct MapClient { - void *opaque; - void (*callback)(void *opaque); + QEMUBH *bh; QLIST_ENTRY(MapClient) link; } MapClient; @@ -2488,31 +2487,34 @@ QemuMutex map_client_list_lock; static QLIST_HEAD(map_client_list, MapClient) map_client_list = QLIST_HEAD_INITIALIZER(map_client_list); -static void cpu_unregister_map_client(void *_client); +static void cpu_unregister_map_client_do(MapClient *client) +{ + QLIST_REMOVE(client, link); + g_free(client); +} + static void cpu_notify_map_clients_locked(void) { MapClient *client; while (!QLIST_EMPTY(&map_client_list)) { client = QLIST_FIRST(&map_client_list); - client->callback(client->opaque); - cpu_unregister_map_client(client); + qemu_bh_schedule(client->bh); + cpu_unregister_map_client_do(client); } } -void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)) +void cpu_register_map_client(QEMUBH *bh) { MapClient *client = g_malloc(sizeof(*client)); qemu_mutex_lock(&map_client_list_lock); - client->opaque = opaque; - client->callback = callback; + client->bh = bh; QLIST_INSERT_HEAD(&map_client_list, client, link); if (!atomic_read(&bounce.in_use)) { cpu_notify_map_clients_locked(); } qemu_mutex_unlock(&map_client_list_lock); - return client; } void cpu_exec_init_all(void) @@ -2523,12 +2525,18 @@ void cpu_exec_init_all(void) qemu_mutex_init(&map_client_list_lock); } -static void cpu_unregister_map_client(void *_client) +void cpu_unregister_map_client(QEMUBH *bh) { - MapClient *client = (MapClient *)_client; + MapClient *client; - QLIST_REMOVE(client, link); - g_free(client); + qemu_mutex_lock(&map_client_list_lock); + QLIST_FOREACH(client, &map_client_list, link) { + if (client->bh == bh) { + cpu_unregister_map_client_do(client); + break; + } + } + qemu_mutex_unlock(&map_client_list_lock); } static void cpu_notify_map_clients(void) diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h index fcc316271e..43428bd030 100644 --- a/include/exec/cpu-common.h +++ b/include/exec/cpu-common.h @@ -82,7 +82,8 @@ void *cpu_physical_memory_map(hwaddr addr, int is_write); void cpu_physical_memory_unmap(void *buffer, hwaddr len, int is_write, hwaddr access_len); -void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)); +void cpu_register_map_client(QEMUBH *bh); +void cpu_unregister_map_client(QEMUBH *bh); bool cpu_physical_memory_is_io(hwaddr phys_addr); -- cgit 1.4.1 From 23820dbfc79d1c9dce090b4c555994f2bb6a69b3 Mon Sep 17 00:00:00 2001 From: Peter Crosthwaite Date: Mon, 16 Mar 2015 22:35:54 -0700 Subject: exec: Respect as_translate_internal length clamp address_space_translate_internal will clamp the *plen length argument based on the size of the memory region being queried. The iommu walker logic in addresss_space_translate was ignoring this by discarding the post fn call value of *plen. Fix by just always using *plen as the length argument throughout the fn, removing the len local variable. This fixes a bootloader bug when a single elf section spans multiple QEMU memory regions. Signed-off-by: Peter Crosthwaite Message-Id: <1426570554-15940-1-git-send-email-peter.crosthwaite@xilinx.com> Signed-off-by: Paolo Bonzini --- exec.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'exec.c') diff --git a/exec.c b/exec.c index 065f5e8360..4717928cff 100644 --- a/exec.c +++ b/exec.c @@ -380,7 +380,6 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, IOMMUTLBEntry iotlb; MemoryRegionSection *section; MemoryRegion *mr; - hwaddr len = *plen; rcu_read_lock(); for (;;) { @@ -395,7 +394,7 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, iotlb = mr->iommu_ops->translate(mr, addr, is_write); addr = ((iotlb.translated_addr & ~iotlb.addr_mask) | (addr & iotlb.addr_mask)); - len = MIN(len, (addr | iotlb.addr_mask) - addr + 1); + *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1); if (!(iotlb.perm & (1 << is_write))) { mr = &io_mem_unassigned; break; @@ -406,10 +405,9 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, if (xen_enabled() && memory_access_is_direct(mr, is_write)) { hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr; - len = MIN(page, len); + *plen = MIN(page, *plen); } - *plen = len; *xlat = addr; rcu_read_unlock(); return mr; -- cgit 1.4.1