diff options
Diffstat (limited to 'hw/virtio/vhost-vdpa.c')
| -rw-r--r-- | hw/virtio/vhost-vdpa.c | 164 |
1 files changed, 85 insertions, 79 deletions
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c index 819b2d811a..ddae494ca8 100644 --- a/hw/virtio/vhost-vdpa.c +++ b/hw/virtio/vhost-vdpa.c @@ -86,14 +86,14 @@ static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section, * The caller must set asid = 0 if the device does not support asid. * This is not an ABI break since it is set to 0 by the initializer anyway. */ -int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, +int vhost_vdpa_dma_map(VhostVDPAShared *s, uint32_t asid, hwaddr iova, hwaddr size, void *vaddr, bool readonly) { struct vhost_msg_v2 msg = {}; - int fd = v->device_fd; + int fd = s->device_fd; int ret = 0; - msg.type = v->msg_type; + msg.type = VHOST_IOTLB_MSG_V2; msg.asid = asid; msg.iotlb.iova = iova; msg.iotlb.size = size; @@ -101,7 +101,7 @@ int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW; msg.iotlb.type = VHOST_IOTLB_UPDATE; - trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.asid, msg.iotlb.iova, + trace_vhost_vdpa_dma_map(s, fd, msg.type, msg.asid, msg.iotlb.iova, msg.iotlb.size, msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type); @@ -118,20 +118,20 @@ int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, * The caller must set asid = 0 if the device does not support asid. * This is not an ABI break since it is set to 0 by the initializer anyway. */ -int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, +int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova, hwaddr size) { struct vhost_msg_v2 msg = {}; - int fd = v->device_fd; + int fd = s->device_fd; int ret = 0; - msg.type = v->msg_type; + msg.type = VHOST_IOTLB_MSG_V2; msg.asid = asid; msg.iotlb.iova = iova; msg.iotlb.size = size; msg.iotlb.type = VHOST_IOTLB_INVALIDATE; - trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.asid, msg.iotlb.iova, + trace_vhost_vdpa_dma_unmap(s, fd, msg.type, msg.asid, msg.iotlb.iova, msg.iotlb.size, msg.iotlb.type); if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { @@ -143,56 +143,55 @@ int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, return ret; } -static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v) +static void vhost_vdpa_listener_begin_batch(VhostVDPAShared *s) { - int fd = v->device_fd; + int fd = s->device_fd; struct vhost_msg_v2 msg = { - .type = v->msg_type, + .type = VHOST_IOTLB_MSG_V2, .iotlb.type = VHOST_IOTLB_BATCH_BEGIN, }; - trace_vhost_vdpa_listener_begin_batch(v, fd, msg.type, msg.iotlb.type); + trace_vhost_vdpa_listener_begin_batch(s, fd, msg.type, msg.iotlb.type); if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { error_report("failed to write, fd=%d, errno=%d (%s)", fd, errno, strerror(errno)); } } -static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v) +static void vhost_vdpa_iotlb_batch_begin_once(VhostVDPAShared *s) { - if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) && - !v->iotlb_batch_begin_sent) { - vhost_vdpa_listener_begin_batch(v); + if (s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) && + !s->iotlb_batch_begin_sent) { + vhost_vdpa_listener_begin_batch(s); } - v->iotlb_batch_begin_sent = true; + s->iotlb_batch_begin_sent = true; } static void vhost_vdpa_listener_commit(MemoryListener *listener) { - struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); - struct vhost_dev *dev = v->dev; + VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener); struct vhost_msg_v2 msg = {}; - int fd = v->device_fd; + int fd = s->device_fd; - if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) { + if (!(s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) { return; } - if (!v->iotlb_batch_begin_sent) { + if (!s->iotlb_batch_begin_sent) { return; } - msg.type = v->msg_type; + msg.type = VHOST_IOTLB_MSG_V2; msg.iotlb.type = VHOST_IOTLB_BATCH_END; - trace_vhost_vdpa_listener_commit(v, fd, msg.type, msg.iotlb.type); + trace_vhost_vdpa_listener_commit(s, fd, msg.type, msg.iotlb.type); if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { error_report("failed to write, fd=%d, errno=%d (%s)", fd, errno, strerror(errno)); } - v->iotlb_batch_begin_sent = false; + s->iotlb_batch_begin_sent = false; } static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) @@ -200,7 +199,7 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) struct vdpa_iommu *iommu = container_of(n, struct vdpa_iommu, n); hwaddr iova = iotlb->iova + iommu->iommu_offset; - struct vhost_vdpa *v = iommu->dev; + VhostVDPAShared *s = iommu->dev_shared; void *vaddr; int ret; Int128 llend; @@ -213,10 +212,10 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) RCU_READ_LOCK_GUARD(); /* check if RAM section out of device range */ llend = int128_add(int128_makes64(iotlb->addr_mask), int128_makes64(iova)); - if (int128_gt(llend, int128_make64(v->iova_range.last))) { + if (int128_gt(llend, int128_make64(s->iova_range.last))) { error_report("RAM section out of device range (max=0x%" PRIx64 ", end addr=0x%" PRIx64 ")", - v->iova_range.last, int128_get64(llend)); + s->iova_range.last, int128_get64(llend)); return; } @@ -226,20 +225,20 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) if (!memory_get_xlat_addr(iotlb, &vaddr, NULL, &read_only, NULL)) { return; } - ret = vhost_vdpa_dma_map(v, VHOST_VDPA_GUEST_PA_ASID, iova, + ret = vhost_vdpa_dma_map(s, VHOST_VDPA_GUEST_PA_ASID, iova, iotlb->addr_mask + 1, vaddr, read_only); if (ret) { error_report("vhost_vdpa_dma_map(%p, 0x%" HWADDR_PRIx ", " "0x%" HWADDR_PRIx ", %p) = %d (%m)", - v, iova, iotlb->addr_mask + 1, vaddr, ret); + s, iova, iotlb->addr_mask + 1, vaddr, ret); } } else { - ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova, + ret = vhost_vdpa_dma_unmap(s, VHOST_VDPA_GUEST_PA_ASID, iova, iotlb->addr_mask + 1); if (ret) { error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", " "0x%" HWADDR_PRIx ") = %d (%m)", - v, iova, iotlb->addr_mask + 1, ret); + s, iova, iotlb->addr_mask + 1, ret); } } } @@ -247,7 +246,7 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) static void vhost_vdpa_iommu_region_add(MemoryListener *listener, MemoryRegionSection *section) { - struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); + VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener); struct vdpa_iommu *iommu; Int128 end; @@ -271,7 +270,7 @@ static void vhost_vdpa_iommu_region_add(MemoryListener *listener, iommu_idx); iommu->iommu_offset = section->offset_within_address_space - section->offset_within_region; - iommu->dev = v; + iommu->dev_shared = s; ret = memory_region_register_iommu_notifier(section->mr, &iommu->n, NULL); if (ret) { @@ -279,7 +278,7 @@ static void vhost_vdpa_iommu_region_add(MemoryListener *listener, return; } - QLIST_INSERT_HEAD(&v->iommu_list, iommu, iommu_next); + QLIST_INSERT_HEAD(&s->iommu_list, iommu, iommu_next); memory_region_iommu_replay(iommu->iommu_mr, &iommu->n); return; @@ -288,11 +287,11 @@ static void vhost_vdpa_iommu_region_add(MemoryListener *listener, static void vhost_vdpa_iommu_region_del(MemoryListener *listener, MemoryRegionSection *section) { - struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); + VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener); struct vdpa_iommu *iommu; - QLIST_FOREACH(iommu, &v->iommu_list, iommu_next) + QLIST_FOREACH(iommu, &s->iommu_list, iommu_next) { if (MEMORY_REGION(iommu->iommu_mr) == section->mr && iommu->n.start == section->offset_within_region) { @@ -308,7 +307,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, MemoryRegionSection *section) { DMAMap mem_region = {}; - struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); + VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener); hwaddr iova; Int128 llend, llsize; void *vaddr; @@ -316,8 +315,8 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, int page_size = qemu_target_page_size(); int page_mask = -page_size; - if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first, - v->iova_range.last, page_mask)) { + if (vhost_vdpa_listener_skipped_section(section, s->iova_range.first, + s->iova_range.last, page_mask)) { return; } if (memory_region_is_iommu(section->mr)) { @@ -327,7 +326,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, if (unlikely((section->offset_within_address_space & ~page_mask) != (section->offset_within_region & ~page_mask))) { - trace_vhost_vdpa_listener_region_add_unaligned(v, section->mr->name, + trace_vhost_vdpa_listener_region_add_unaligned(s, section->mr->name, section->offset_within_address_space & ~page_mask, section->offset_within_region & ~page_mask); return; @@ -347,18 +346,18 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, section->offset_within_region + (iova - section->offset_within_address_space); - trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend), + trace_vhost_vdpa_listener_region_add(s, iova, int128_get64(llend), vaddr, section->readonly); llsize = int128_sub(llend, int128_make64(iova)); - if (v->shadow_data) { + if (s->shadow_data) { int r; mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr, mem_region.size = int128_get64(llsize) - 1, mem_region.perm = IOMMU_ACCESS_FLAG(true, section->readonly), - r = vhost_iova_tree_map_alloc(v->iova_tree, &mem_region); + r = vhost_iova_tree_map_alloc(s->iova_tree, &mem_region); if (unlikely(r != IOVA_OK)) { error_report("Can't allocate a mapping (%d)", r); goto fail; @@ -367,8 +366,8 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, iova = mem_region.iova; } - vhost_vdpa_iotlb_batch_begin_once(v); - ret = vhost_vdpa_dma_map(v, VHOST_VDPA_GUEST_PA_ASID, iova, + vhost_vdpa_iotlb_batch_begin_once(s); + ret = vhost_vdpa_dma_map(s, VHOST_VDPA_GUEST_PA_ASID, iova, int128_get64(llsize), vaddr, section->readonly); if (ret) { error_report("vhost vdpa map fail!"); @@ -378,8 +377,8 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, return; fail_map: - if (v->shadow_data) { - vhost_iova_tree_remove(v->iova_tree, mem_region); + if (s->shadow_data) { + vhost_iova_tree_remove(s->iova_tree, mem_region); } fail: @@ -396,15 +395,15 @@ fail: static void vhost_vdpa_listener_region_del(MemoryListener *listener, MemoryRegionSection *section) { - struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); + VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener); hwaddr iova; Int128 llend, llsize; int ret; int page_size = qemu_target_page_size(); int page_mask = -page_size; - if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first, - v->iova_range.last, page_mask)) { + if (vhost_vdpa_listener_skipped_section(section, s->iova_range.first, + s->iova_range.last, page_mask)) { return; } if (memory_region_is_iommu(section->mr)) { @@ -413,7 +412,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, if (unlikely((section->offset_within_address_space & ~page_mask) != (section->offset_within_region & ~page_mask))) { - trace_vhost_vdpa_listener_region_del_unaligned(v, section->mr->name, + trace_vhost_vdpa_listener_region_del_unaligned(s, section->mr->name, section->offset_within_address_space & ~page_mask, section->offset_within_region & ~page_mask); return; @@ -422,7 +421,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, iova = ROUND_UP(section->offset_within_address_space, page_size); llend = vhost_vdpa_section_end(section, page_mask); - trace_vhost_vdpa_listener_region_del(v, iova, + trace_vhost_vdpa_listener_region_del(s, iova, int128_get64(int128_sub(llend, int128_one()))); if (int128_ge(int128_make64(iova), llend)) { @@ -431,7 +430,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, llsize = int128_sub(llend, int128_make64(iova)); - if (v->shadow_data) { + if (s->shadow_data) { const DMAMap *result; const void *vaddr = memory_region_get_ram_ptr(section->mr) + section->offset_within_region + @@ -441,37 +440,37 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, .size = int128_get64(llsize) - 1, }; - result = vhost_iova_tree_find_iova(v->iova_tree, &mem_region); + result = vhost_iova_tree_find_iova(s->iova_tree, &mem_region); if (!result) { /* The memory listener map wasn't mapped */ return; } iova = result->iova; - vhost_iova_tree_remove(v->iova_tree, *result); + vhost_iova_tree_remove(s->iova_tree, *result); } - vhost_vdpa_iotlb_batch_begin_once(v); + vhost_vdpa_iotlb_batch_begin_once(s); /* * The unmap ioctl doesn't accept a full 64-bit. need to check it */ if (int128_eq(llsize, int128_2_64())) { llsize = int128_rshift(llsize, 1); - ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova, + ret = vhost_vdpa_dma_unmap(s, VHOST_VDPA_GUEST_PA_ASID, iova, int128_get64(llsize)); if (ret) { error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", " "0x%" HWADDR_PRIx ") = %d (%m)", - v, iova, int128_get64(llsize), ret); + s, iova, int128_get64(llsize), ret); } iova += int128_get64(llsize); } - ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova, + ret = vhost_vdpa_dma_unmap(s, VHOST_VDPA_GUEST_PA_ASID, iova, int128_get64(llsize)); if (ret) { error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", " "0x%" HWADDR_PRIx ") = %d (%m)", - v, iova, int128_get64(llsize), ret); + s, iova, int128_get64(llsize), ret); } memory_region_unref(section->mr); @@ -492,7 +491,7 @@ static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request, void *arg) { struct vhost_vdpa *v = dev->opaque; - int fd = v->device_fd; + int fd = v->shared->device_fd; int ret; assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); @@ -511,6 +510,10 @@ static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status) if (ret < 0) { return ret; } + if ((s & status) == status) { + /* Don't set bits already set */ + return 0; + } s |= status; @@ -579,16 +582,14 @@ static void vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v) static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp) { - struct vhost_vdpa *v; + struct vhost_vdpa *v = opaque; assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); - trace_vhost_vdpa_init(dev, opaque); + trace_vhost_vdpa_init(dev, v->shared, opaque); int ret; - v = opaque; v->dev = dev; dev->opaque = opaque ; - v->listener = vhost_vdpa_memory_listener; - v->msg_type = VHOST_IOTLB_MSG_V2; + v->shared->listener = vhost_vdpa_memory_listener; vhost_vdpa_init_svq(dev, v); error_propagate(&dev->migration_blocker, v->migration_blocker); @@ -651,7 +652,7 @@ static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index) struct vhost_vdpa *v = dev->opaque; VirtIODevice *vdev = dev->vdev; VhostVDPAHostNotifier *n; - int fd = v->device_fd; + int fd = v->shared->device_fd; void *addr; char *name; @@ -748,10 +749,10 @@ static int vhost_vdpa_cleanup(struct vhost_dev *dev) trace_vhost_vdpa_cleanup(dev, v); if (vhost_vdpa_first_dev(dev)) { ram_block_discard_disable(false); + memory_listener_unregister(&v->shared->listener); } vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); - memory_listener_unregister(&v->listener); vhost_vdpa_svq_cleanup(dev); dev->opaque = NULL; @@ -828,6 +829,8 @@ static int vhost_vdpa_set_features(struct vhost_dev *dev, static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev) { + struct vhost_vdpa *v = dev->opaque; + uint64_t features; uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 | 0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH | @@ -849,6 +852,7 @@ static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev) } dev->backend_cap = features; + v->shared->backend_cap = features; return 0; } @@ -1059,7 +1063,8 @@ static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr) const DMAMap needle = { .translated_addr = addr, }; - const DMAMap *result = vhost_iova_tree_find_iova(v->iova_tree, &needle); + const DMAMap *result = vhost_iova_tree_find_iova(v->shared->iova_tree, + &needle); hwaddr size; int r; @@ -1069,13 +1074,14 @@ static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr) } size = ROUND_UP(result->size, qemu_real_host_page_size()); - r = vhost_vdpa_dma_unmap(v, v->address_space_id, result->iova, size); + r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, result->iova, + size); if (unlikely(r < 0)) { error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r); return; } - vhost_iova_tree_remove(v->iova_tree, *result); + vhost_iova_tree_remove(v->shared->iova_tree, *result); } static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev, @@ -1103,19 +1109,19 @@ static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle, { int r; - r = vhost_iova_tree_map_alloc(v->iova_tree, needle); + r = vhost_iova_tree_map_alloc(v->shared->iova_tree, needle); if (unlikely(r != IOVA_OK)) { error_setg(errp, "Cannot allocate iova (%d)", r); return false; } - r = vhost_vdpa_dma_map(v, v->address_space_id, needle->iova, + r = vhost_vdpa_dma_map(v->shared, v->address_space_id, needle->iova, needle->size + 1, (void *)(uintptr_t)needle->translated_addr, needle->perm == IOMMU_RO); if (unlikely(r != 0)) { error_setg_errno(errp, -r, "Cannot map region to device"); - vhost_iova_tree_remove(v->iova_tree, *needle); + vhost_iova_tree_remove(v->shared->iova_tree, *needle); } return r == 0; @@ -1216,7 +1222,7 @@ static bool vhost_vdpa_svqs_start(struct vhost_dev *dev) goto err; } - vhost_svq_start(svq, dev->vdev, vq, v->iova_tree); + vhost_svq_start(svq, dev->vdev, vq, v->shared->iova_tree); ok = vhost_vdpa_svq_map_rings(dev, svq, &addr, &err); if (unlikely(!ok)) { goto err_map; @@ -1279,7 +1285,7 @@ static void vhost_vdpa_suspend(struct vhost_dev *dev) if (dev->backend_cap & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) { trace_vhost_vdpa_suspend(dev); - r = ioctl(v->device_fd, VHOST_VDPA_SUSPEND); + r = ioctl(v->shared->device_fd, VHOST_VDPA_SUSPEND); if (unlikely(r)) { error_report("Cannot suspend: %s(%d)", g_strerror(errno), errno); } else { @@ -1319,7 +1325,7 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) "IOMMU and try again"); return -1; } - memory_listener_register(&v->listener, dev->vdev->dma_as); + memory_listener_register(&v->shared->listener, dev->vdev->dma_as); return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); } @@ -1338,7 +1344,7 @@ static void vhost_vdpa_reset_status(struct vhost_dev *dev) vhost_vdpa_reset_device(dev); vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER); - memory_listener_unregister(&v->listener); + memory_listener_unregister(&v->shared->listener); } static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base, |