summary refs log tree commit diff stats
path: root/hw/virtio/vhost-shadow-virtqueue.c
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2022-09-02 13:23:32 -0400
committerStefan Hajnoczi <stefanha@redhat.com>2022-09-02 13:23:32 -0400
commitfccffd53719255cafd4bc89f5b0bda1cd37924f4 (patch)
tree1f43f28083615001d6f3b1aeafd6b0be88f60de7 /hw/virtio/vhost-shadow-virtqueue.c
parent9fd704da6809f3e01d0283f0d6d619022d481fb9 (diff)
parent36a894aeb64a2e02871016da1c37d4a4ca109182 (diff)
downloadfocaccia-qemu-fccffd53719255cafd4bc89f5b0bda1cd37924f4.tar.gz
focaccia-qemu-fccffd53719255cafd4bc89f5b0bda1cd37924f4.zip
Merge tag 'net-pull-request' of https://github.com/jasowang/qemu into staging
# -----BEGIN PGP SIGNATURE-----
# Version: GnuPG v1
#
# iQEcBAABAgAGBQJjEaMLAAoJEO8Ells5jWIRoRwIAJpwefLgH/+lkd1mtWqxBhuS
# KLa0bkcS6nIGnjQzNX/XWipu/5tMbBLzbaKw0myodvoK6Yx0MFog1cWf6gLHuvWH
# Jy3ONUrF9umHYuOa9sJJtXv/aP7neNJSB3RW67BaiLCLkaetDj9lLciA/KKMvb/I
# JNFtuLVTPibZ5iVTjvifFWmJD/Yk0P8mlrH5yfrA3B2EaaWf1es0GWobGIwwLu9s
# ZSqjhMDAhfOW2E1sBh7jFRh4lJX1t1jRhyIGx2bOXevPx2hFHq6FSq+yuJ9OsZvO
# wC8mC4DD+fovypDWbv3WLslIejM0+THD8KuBQnZtKX5Mbhc+0cELpIFLUdH95TM=
# =eMUT
# -----END PGP SIGNATURE-----
# gpg: Signature made Fri 02 Sep 2022 02:30:35 EDT
# gpg:                using RSA key EF04965B398D6211
# gpg: Good signature from "Jason Wang (Jason Wang on RedHat) <jasowang@redhat.com>" [full]
# Primary key fingerprint: 215D 46F4 8246 689E C77F  3562 EF04 965B 398D 6211

* tag 'net-pull-request' of https://github.com/jasowang/qemu: (21 commits)
  net: tulip: Restrict DMA engine to memories
  net/colo.c: Fix the pointer issue reported by Coverity.
  vdpa: Delete CVQ migration blocker
  vdpa: Add virtio-net mac address via CVQ at start
  vhost_net: add NetClientState->load() callback
  vdpa: extract vhost_vdpa_net_cvq_add from vhost_vdpa_net_handle_ctrl_avail
  vdpa: Move command buffers map to start of net device
  vdpa: add net_vhost_vdpa_cvq_info NetClientInfo
  vhost_net: Add NetClientInfo stop callback
  vhost_net: Add NetClientInfo start callback
  vhost: Do not depend on !NULL VirtQueueElement on vhost_svq_flush
  vhost: Delete useless read memory barrier
  vhost: use SVQ element ndescs instead of opaque data for desc validation
  vhost: stop transfer elem ownership in vhost_handle_guest_kick
  vdpa: Use ring hwaddr at vhost_vdpa_svq_unmap_ring
  vhost: Always store new kick fd on vhost_svq_set_svq_kick_fd
  vdpa: Make SVQ vring unmapping return void
  vdpa: Remove SVQ vring from iova_tree at shutdown
  util: accept iova_tree_remove_parameter by value
  vdpa: do not save failed dma maps in SVQ iova tree
  ...

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'hw/virtio/vhost-shadow-virtqueue.c')
-rw-r--r--hw/virtio/vhost-shadow-virtqueue.c31
1 files changed, 15 insertions, 16 deletions
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index e4956728dd..e8e5bbc368 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -233,9 +233,6 @@ static void vhost_svq_kick(VhostShadowVirtqueue *svq)
 /**
  * Add an element to a SVQ.
  *
- * The caller must check that there is enough slots for the new element. It
- * takes ownership of the element: In case of failure not ENOSPC, it is free.
- *
  * Return -EINVAL if element is invalid, -ENOSPC if dev queue is full
  */
 int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
@@ -252,7 +249,6 @@ int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
 
     ok = vhost_svq_add_split(svq, out_sg, out_num, in_sg, in_num, &qemu_head);
     if (unlikely(!ok)) {
-        g_free(elem);
         return -EINVAL;
     }
 
@@ -293,7 +289,7 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq)
         virtio_queue_set_notification(svq->vq, false);
 
         while (true) {
-            VirtQueueElement *elem;
+            g_autofree VirtQueueElement *elem;
             int r;
 
             if (svq->next_guest_avail_elem) {
@@ -324,12 +320,14 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq)
                      * queue the current guest descriptor and ignore kicks
                      * until some elements are used.
                      */
-                    svq->next_guest_avail_elem = elem;
+                    svq->next_guest_avail_elem = g_steal_pointer(&elem);
                 }
 
                 /* VQ is full or broken, just return and ignore kicks */
                 return;
             }
+            /* elem belongs to SVQ or external caller now */
+            elem = NULL;
         }
 
         virtio_queue_set_notification(svq->vq, true);
@@ -416,7 +414,7 @@ static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
         return NULL;
     }
 
-    if (unlikely(!svq->desc_state[used_elem.id].elem)) {
+    if (unlikely(!svq->desc_state[used_elem.id].ndescs)) {
         qemu_log_mask(LOG_GUEST_ERROR,
             "Device %s says index %u is used, but it was not available",
             svq->vdev->name, used_elem.id);
@@ -424,6 +422,7 @@ static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
     }
 
     num = svq->desc_state[used_elem.id].ndescs;
+    svq->desc_state[used_elem.id].ndescs = 0;
     last_used_chain = vhost_svq_last_desc_of_chain(svq, num, used_elem.id);
     svq->desc_next[last_used_chain] = svq->free_head;
     svq->free_head = used_elem.id;
@@ -500,20 +499,20 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq,
 size_t vhost_svq_poll(VhostShadowVirtqueue *svq)
 {
     int64_t start_us = g_get_monotonic_time();
+    uint32_t len;
+
     do {
-        uint32_t len;
-        VirtQueueElement *elem = vhost_svq_get_buf(svq, &len);
-        if (elem) {
-            return len;
+        if (vhost_svq_more_used(svq)) {
+            break;
         }
 
         if (unlikely(g_get_monotonic_time() - start_us > 10e6)) {
             return 0;
         }
-
-        /* Make sure we read new used_idx */
-        smp_rmb();
     } while (true);
+
+    vhost_svq_get_buf(svq, &len);
+    return len;
 }
 
 /**
@@ -602,13 +601,13 @@ void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd)
         event_notifier_set_handler(svq_kick, NULL);
     }
 
+    event_notifier_init_fd(svq_kick, svq_kick_fd);
     /*
      * event_notifier_set_handler already checks for guest's notifications if
      * they arrive at the new file descriptor in the switch, so there is no
      * need to explicitly check for them.
      */
     if (poll_start) {
-        event_notifier_init_fd(svq_kick, svq_kick_fd);
         event_notifier_set(svq_kick);
         event_notifier_set_handler(svq_kick, vhost_handle_guest_kick_notifier);
     }
@@ -655,7 +654,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
  */
 void vhost_svq_stop(VhostShadowVirtqueue *svq)
 {
-    event_notifier_set_handler(&svq->svq_kick, NULL);
+    vhost_svq_set_svq_kick_fd(svq, VHOST_FILE_UNBIND);
     g_autofree VirtQueueElement *next_avail_elem = NULL;
 
     if (!svq->vq) {