diff options
| author | John Levon <levon@movementarian.org> | 2025-06-25 20:30:05 +0100 |
|---|---|---|
| committer | Cédric Le Goater <clg@redhat.com> | 2025-06-26 08:55:38 +0200 |
| commit | 18e899e63dd9d05e567c081023ad7fc5b2c5dc9a (patch) | |
| tree | 3ae2f6b165b49a9669ed9f8d727255d1c2af5542 /hw/vfio-user/container.c | |
| parent | 019232358124e4f4b929e40fa23253de60eec73e (diff) | |
| download | focaccia-qemu-18e899e63dd9d05e567c081023ad7fc5b2c5dc9a.tar.gz focaccia-qemu-18e899e63dd9d05e567c081023ad7fc5b2c5dc9a.zip | |
vfio-user: implement VFIO_USER_DMA_MAP/UNMAP
When the vfio-user container gets mapping updates, share them with the vfio-user by sending a message; this can include the region fd, allowing the server to directly mmap() the region as needed. For performance, we only wait for the message responses when we're doing with a series of updates via the listener_commit() callback. Originally-by: John Johnson <john.g.johnson@oracle.com> Signed-off-by: Jagannathan Raman <jag.raman@oracle.com> Signed-off-by: Elena Ufimtseva <elena.ufimtseva@oracle.com> Signed-off-by: John Levon <john.levon@nutanix.com> Reviewed-by: Cédric Le Goater <clg@redhat.com> Link: https://lore.kernel.org/qemu-devel/20250625193012.2316242-14-john.levon@nutanix.com Signed-off-by: Cédric Le Goater <clg@redhat.com>
Diffstat (limited to 'hw/vfio-user/container.c')
| -rw-r--r-- | hw/vfio-user/container.c | 135 |
1 files changed, 133 insertions, 2 deletions
diff --git a/hw/vfio-user/container.c b/hw/vfio-user/container.c index b4a5a840b0..3133fef177 100644 --- a/hw/vfio-user/container.c +++ b/hw/vfio-user/container.c @@ -12,23 +12,152 @@ #include "hw/vfio-user/container.h" #include "hw/vfio-user/device.h" +#include "hw/vfio-user/trace.h" #include "hw/vfio/vfio-cpr.h" #include "hw/vfio/vfio-device.h" #include "hw/vfio/vfio-listener.h" #include "qapi/error.h" +/* + * When DMA space is the physical address space, the region add/del listeners + * will fire during memory update transactions. These depend on BQL being held, + * so do any resulting map/demap ops async while keeping BQL. + */ +static void vfio_user_listener_begin(VFIOContainerBase *bcontainer) +{ + VFIOUserContainer *container = container_of(bcontainer, VFIOUserContainer, + bcontainer); + + container->proxy->async_ops = true; +} + +static void vfio_user_listener_commit(VFIOContainerBase *bcontainer) +{ + VFIOUserContainer *container = container_of(bcontainer, VFIOUserContainer, + bcontainer); + + /* wait here for any async requests sent during the transaction */ + container->proxy->async_ops = false; + vfio_user_wait_reqs(container->proxy); +} + static int vfio_user_dma_unmap(const VFIOContainerBase *bcontainer, hwaddr iova, ram_addr_t size, IOMMUTLBEntry *iotlb, bool unmap_all) { - return -ENOTSUP; + VFIOUserContainer *container = container_of(bcontainer, VFIOUserContainer, + bcontainer); + Error *local_err = NULL; + int ret = 0; + + VFIOUserDMAUnmap *msgp = g_malloc(sizeof(*msgp)); + + vfio_user_request_msg(&msgp->hdr, VFIO_USER_DMA_UNMAP, sizeof(*msgp), 0); + msgp->argsz = sizeof(struct vfio_iommu_type1_dma_unmap); + msgp->flags = unmap_all ? VFIO_DMA_UNMAP_FLAG_ALL : 0; + msgp->iova = iova; + msgp->size = size; + trace_vfio_user_dma_unmap(msgp->iova, msgp->size, msgp->flags, + container->proxy->async_ops); + + if (container->proxy->async_ops) { + if (!vfio_user_send_nowait(container->proxy, &msgp->hdr, NULL, + 0, &local_err)) { + error_report_err(local_err); + ret = -EFAULT; + } else { + ret = 0; + } + } else { + if (!vfio_user_send_wait(container->proxy, &msgp->hdr, NULL, + 0, &local_err)) { + error_report_err(local_err); + ret = -EFAULT; + } + + if (msgp->hdr.flags & VFIO_USER_ERROR) { + ret = -msgp->hdr.error_reply; + } + + g_free(msgp); + } + + return ret; } static int vfio_user_dma_map(const VFIOContainerBase *bcontainer, hwaddr iova, ram_addr_t size, void *vaddr, bool readonly, MemoryRegion *mrp) { - return -ENOTSUP; + VFIOUserContainer *container = container_of(bcontainer, VFIOUserContainer, + bcontainer); + int fd = memory_region_get_fd(mrp); + Error *local_err = NULL; + int ret; + + VFIOUserFDs *fds = NULL; + VFIOUserDMAMap *msgp = g_malloc0(sizeof(*msgp)); + + vfio_user_request_msg(&msgp->hdr, VFIO_USER_DMA_MAP, sizeof(*msgp), 0); + msgp->argsz = sizeof(struct vfio_iommu_type1_dma_map); + msgp->flags = VFIO_DMA_MAP_FLAG_READ; + msgp->offset = 0; + msgp->iova = iova; + msgp->size = size; + + /* + * vaddr enters as a QEMU process address; make it either a file offset + * for mapped areas or leave as 0. + */ + if (fd != -1) { + msgp->offset = qemu_ram_block_host_offset(mrp->ram_block, vaddr); + } + + if (!readonly) { + msgp->flags |= VFIO_DMA_MAP_FLAG_WRITE; + } + + trace_vfio_user_dma_map(msgp->iova, msgp->size, msgp->offset, msgp->flags, + container->proxy->async_ops); + + /* + * The async_ops case sends without blocking. They're later waited for in + * vfio_send_wait_reqs. + */ + if (container->proxy->async_ops) { + /* can't use auto variable since we don't block */ + if (fd != -1) { + fds = vfio_user_getfds(1); + fds->send_fds = 1; + fds->fds[0] = fd; + } + + if (!vfio_user_send_nowait(container->proxy, &msgp->hdr, fds, + 0, &local_err)) { + error_report_err(local_err); + ret = -EFAULT; + } else { + ret = 0; + } + } else { + VFIOUserFDs local_fds = { 1, 0, &fd }; + + fds = fd != -1 ? &local_fds : NULL; + + if (!vfio_user_send_wait(container->proxy, &msgp->hdr, fds, + 0, &local_err)) { + error_report_err(local_err); + ret = -EFAULT; + } + + if (msgp->hdr.flags & VFIO_USER_ERROR) { + ret = -msgp->hdr.error_reply; + } + + g_free(msgp); + } + + return ret; } static int @@ -218,6 +347,8 @@ static void vfio_iommu_user_class_init(ObjectClass *klass, const void *data) VFIOIOMMUClass *vioc = VFIO_IOMMU_CLASS(klass); vioc->setup = vfio_user_setup; + vioc->listener_begin = vfio_user_listener_begin, + vioc->listener_commit = vfio_user_listener_commit, vioc->dma_map = vfio_user_dma_map; vioc->dma_unmap = vfio_user_dma_unmap; vioc->attach_device = vfio_user_device_attach; |