diff options
| author | Jonah Palmer <jonah.palmer@oracle.com> | 2025-02-17 09:49:33 -0500 |
|---|---|---|
| committer | Michael S. Tsirkin <mst@redhat.com> | 2025-02-21 07:18:42 -0500 |
| commit | 05063f55841babae7216d36105440ed8ba632938 (patch) | |
| tree | 431b5199e4061f31fc623d9526d4c4747e4163df /hw/virtio/vhost-shadow-virtqueue.c | |
| parent | 92cf61e70838c20adc82daa3170fdbb9d174b508 (diff) | |
| download | focaccia-qemu-05063f55841babae7216d36105440ed8ba632938.tar.gz focaccia-qemu-05063f55841babae7216d36105440ed8ba632938.zip | |
vhost-iova-tree, svq: Implement GPA->IOVA & partial IOVA->HVA trees
Creates and supports a GPA->IOVA tree and a partial IOVA->HVA tree by splitting up guest-backed memory maps and host-only memory maps from the full IOVA->HVA tree. That is, any guest-backed memory maps are now stored in the GPA->IOVA tree and host-only memory maps stay in the IOVA->HVA tree. Also propagates the GPAs (in_addr/out_addr) of a VirtQueueElement to vhost_svq_translate_addr() to translate GPAs to IOVAs via the GPA->IOVA tree (when descriptors are backed by guest memory). For descriptors backed by host-only memory, the existing partial SVQ IOVA->HVA tree is used. GPAs are unique in the guest's address space, ensuring unambiguous IOVA translations. This avoids the issue where different GPAs map to the same HVA, causing the original HVA->IOVA translation to potentially return an IOVA associated with the wrong intended GPA. Signed-off-by: Jonah Palmer <jonah.palmer@oracle.com> Acked-by: Eugenio Pérez <eperezma@redhat.com> Message-Id: <20250217144936.3589907-3-jonah.palmer@oracle.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'hw/virtio/vhost-shadow-virtqueue.c')
| -rw-r--r-- | hw/virtio/vhost-shadow-virtqueue.c | 55 |
1 files changed, 38 insertions, 17 deletions
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c index 37aca8b431..30ba565f03 100644 --- a/hw/virtio/vhost-shadow-virtqueue.c +++ b/hw/virtio/vhost-shadow-virtqueue.c @@ -78,24 +78,39 @@ uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq) * @vaddr: Translated IOVA addresses * @iovec: Source qemu's VA addresses * @num: Length of iovec and minimum length of vaddr + * @gpas: Descriptors' GPAs, if backed by guest memory */ static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq, hwaddr *addrs, const struct iovec *iovec, - size_t num) + size_t num, const hwaddr *gpas) { if (num == 0) { return true; } for (size_t i = 0; i < num; ++i) { - DMAMap needle = { - .translated_addr = (hwaddr)(uintptr_t)iovec[i].iov_base, - .size = iovec[i].iov_len, - }; Int128 needle_last, map_last; size_t off; + const DMAMap *map; + DMAMap needle; + + /* Check if the descriptor is backed by guest memory */ + if (gpas) { + /* Search the GPA->IOVA tree */ + needle = (DMAMap) { + .translated_addr = gpas[i], + .size = iovec[i].iov_len, + }; + map = vhost_iova_tree_find_gpa(svq->iova_tree, &needle); + } else { + /* Search the IOVA->HVA tree */ + needle = (DMAMap) { + .translated_addr = (hwaddr)(uintptr_t)iovec[i].iov_base, + .size = iovec[i].iov_len, + }; + map = vhost_iova_tree_find_iova(svq->iova_tree, &needle); + } - const DMAMap *map = vhost_iova_tree_find_iova(svq->iova_tree, &needle); /* * Map cannot be NULL since iova map contains all guest space and * qemu already has a physical address mapped @@ -130,6 +145,7 @@ static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq, * @sg: Cache for hwaddr * @iovec: The iovec from the guest * @num: iovec length + * @addr: Descriptors' GPAs, if backed by guest memory * @more_descs: True if more descriptors come in the chain * @write: True if they are writeable descriptors * @@ -137,7 +153,8 @@ static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq, */ static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg, const struct iovec *iovec, size_t num, - bool more_descs, bool write) + const hwaddr *addr, bool more_descs, + bool write) { uint16_t i = svq->free_head, last = svq->free_head; unsigned n; @@ -149,7 +166,7 @@ static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg, return true; } - ok = vhost_svq_translate_addr(svq, sg, iovec, num); + ok = vhost_svq_translate_addr(svq, sg, iovec, num, addr); if (unlikely(!ok)) { return false; } @@ -174,8 +191,9 @@ static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg, static bool vhost_svq_add_split(VhostShadowVirtqueue *svq, const struct iovec *out_sg, size_t out_num, + const hwaddr *out_addr, const struct iovec *in_sg, size_t in_num, - unsigned *head) + const hwaddr *in_addr, unsigned *head) { unsigned avail_idx; vring_avail_t *avail = svq->vring.avail; @@ -191,13 +209,14 @@ static bool vhost_svq_add_split(VhostShadowVirtqueue *svq, return false; } - ok = vhost_svq_vring_write_descs(svq, sgs, out_sg, out_num, in_num > 0, - false); + ok = vhost_svq_vring_write_descs(svq, sgs, out_sg, out_num, out_addr, + in_num > 0, false); if (unlikely(!ok)) { return false; } - ok = vhost_svq_vring_write_descs(svq, sgs, in_sg, in_num, false, true); + ok = vhost_svq_vring_write_descs(svq, sgs, in_sg, in_num, in_addr, false, + true); if (unlikely(!ok)) { return false; } @@ -247,8 +266,9 @@ static void vhost_svq_kick(VhostShadowVirtqueue *svq) * Return -EINVAL if element is invalid, -ENOSPC if dev queue is full */ int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, - size_t out_num, const struct iovec *in_sg, size_t in_num, - VirtQueueElement *elem) + size_t out_num, const hwaddr *out_addr, + const struct iovec *in_sg, size_t in_num, + const hwaddr *in_addr, VirtQueueElement *elem) { unsigned qemu_head; unsigned ndescs = in_num + out_num; @@ -258,7 +278,8 @@ int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, return -ENOSPC; } - ok = vhost_svq_add_split(svq, out_sg, out_num, in_sg, in_num, &qemu_head); + ok = vhost_svq_add_split(svq, out_sg, out_num, out_addr, in_sg, in_num, + in_addr, &qemu_head); if (unlikely(!ok)) { return -EINVAL; } @@ -274,8 +295,8 @@ int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, static int vhost_svq_add_element(VhostShadowVirtqueue *svq, VirtQueueElement *elem) { - return vhost_svq_add(svq, elem->out_sg, elem->out_num, elem->in_sg, - elem->in_num, elem); + return vhost_svq_add(svq, elem->out_sg, elem->out_num, elem->out_addr, + elem->in_sg, elem->in_num, elem->in_addr, elem); } /** |