summary refs log tree commit diff stats
path: root/hw
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2022-06-28 11:51:07 +0530
committerRichard Henderson <richard.henderson@linaro.org>2022-06-28 11:51:07 +0530
commit2a8835cb45371a1f05c9c5899741d66685290f28 (patch)
tree0ffc5882f2e248c3ed5d00789b20aff87b7f2205 /hw
parentad4c7f529a279685da84297773b4ec8080153c2d (diff)
parent81cf38f3ff3c7db8fcd2f46df9a294fdf6f4a910 (diff)
downloadfocaccia-qemu-2a8835cb45371a1f05c9c5899741d66685290f28.tar.gz
focaccia-qemu-2a8835cb45371a1f05c9c5899741d66685290f28.zip
Merge tag 'for_upstream' of git://git.kernel.org/pub/scm/virt/kvm/mst/qemu into staging
virtio: fixes

fixes all over the place

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

# -----BEGIN PGP SIGNATURE-----
#
# iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmK6NQoPHG1zdEByZWRo
# YXQuY29tAAoJECgfDbjSjVRp/sQIAJGiYliUHElJapM/4KSsXKWCFtk9B8wJuUie
# yeMKOdD6QSk9tk/HkYSCnMB7G6Fe+MtoE+sPm/6l5nOFqvqVVJw9vOKteWHSpQ0E
# 9CgbR7s7K1MoLG9J613iB2OtAfhPrWEvSOJ6mvTAxGgxhCQw6UzC88cYfBHJ/efn
# GAhVlriSfSCRANmivjY+g4h4JFWWSMTH6m9u4wKBJF8GRkNgN+C50Z+bp8aE7wRT
# KiMoaaYUDOjxzD+8nGYggg/t+UIM7jG2t8M5BMbC0NMP+ovVZeesWK6ZOzoda2tI
# ZONV0dLikLCicyOvfMH6YDqzGtchCDmS0hpfuorhlzsntm42RBM=
# =T+gr
# -----END PGP SIGNATURE-----
# gpg: Signature made Tue 28 Jun 2022 04:24:02 AM +0530
# gpg:                using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469
# gpg:                issuer "mst@redhat.com"
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [undefined]
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>" [undefined]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 0270 606B 6F3C DF3D 0B17  0970 C350 3912 AFBE 8E67
#      Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA  8A0D 281F 0DB8 D28D 5469

* tag 'for_upstream' of git://git.kernel.org/pub/scm/virt/kvm/mst/qemu:
  include/hw/virtio: document vhost_ack_features
  include/hw/virtio: document vhost_get_features
  contrib/vhost-user-blk: fix 32 bit build and enable
  MAINTAINERS: Collect memory device files in "Memory devices"
  libvhost-user: Fix VHOST_USER_ADD_MEM_REG reply
  libvhost-user: Fix VHOST_USER_GET_MAX_MEM_SLOTS reply
  docs/vhost-user: Fix mismerge
  virtio-iommu: Fix migration regression
  vhost: setup error eventfd and dump errors
  vhost: add method vhost_set_vring_err
  msi: fix MSI vector limit check in msi_set_mask()
  virtio-iommu: Fix the partial copy of probe request

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'hw')
-rw-r--r--hw/pci/msi.c4
-rw-r--r--hw/virtio/vhost-backend.c7
-rw-r--r--hw/virtio/vhost-user.c6
-rw-r--r--hw/virtio/vhost.c37
-rw-r--r--hw/virtio/virtio-iommu.c16
5 files changed, 64 insertions, 6 deletions
diff --git a/hw/pci/msi.c b/hw/pci/msi.c
index 5c471b9616..058d1d1ef1 100644
--- a/hw/pci/msi.c
+++ b/hw/pci/msi.c
@@ -322,9 +322,9 @@ void msi_set_mask(PCIDevice *dev, int vector, bool mask, Error **errp)
     bool msi64bit = flags & PCI_MSI_FLAGS_64BIT;
     uint32_t irq_state, vector_mask, pending;
 
-    if (vector > PCI_MSI_VECTORS_MAX) {
+    if (vector >= PCI_MSI_VECTORS_MAX) {
         error_setg(errp, "msi: vector %d not allocated. max vector is %d",
-                   vector, PCI_MSI_VECTORS_MAX);
+                   vector, (PCI_MSI_VECTORS_MAX - 1));
         return;
     }
 
diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
index 4de8b6b3b0..8e581575c9 100644
--- a/hw/virtio/vhost-backend.c
+++ b/hw/virtio/vhost-backend.c
@@ -146,6 +146,12 @@ static int vhost_kernel_set_vring_call(struct vhost_dev *dev,
     return vhost_kernel_call(dev, VHOST_SET_VRING_CALL, file);
 }
 
+static int vhost_kernel_set_vring_err(struct vhost_dev *dev,
+                                      struct vhost_vring_file *file)
+{
+    return vhost_kernel_call(dev, VHOST_SET_VRING_ERR, file);
+}
+
 static int vhost_kernel_set_vring_busyloop_timeout(struct vhost_dev *dev,
                                                    struct vhost_vring_state *s)
 {
@@ -309,6 +315,7 @@ const VhostOps kernel_ops = {
         .vhost_get_vring_base = vhost_kernel_get_vring_base,
         .vhost_set_vring_kick = vhost_kernel_set_vring_kick,
         .vhost_set_vring_call = vhost_kernel_set_vring_call,
+        .vhost_set_vring_err = vhost_kernel_set_vring_err,
         .vhost_set_vring_busyloop_timeout =
                                 vhost_kernel_set_vring_busyloop_timeout,
         .vhost_set_features = vhost_kernel_set_features,
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 4b9be26e84..75b8df21a4 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -1313,6 +1313,11 @@ static int vhost_user_set_vring_call(struct vhost_dev *dev,
     return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file);
 }
 
+static int vhost_user_set_vring_err(struct vhost_dev *dev,
+                                    struct vhost_vring_file *file)
+{
+    return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_ERR, file);
+}
 
 static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
 {
@@ -2616,6 +2621,7 @@ const VhostOps user_ops = {
         .vhost_get_vring_base = vhost_user_get_vring_base,
         .vhost_set_vring_kick = vhost_user_set_vring_kick,
         .vhost_set_vring_call = vhost_user_set_vring_call,
+        .vhost_set_vring_err = vhost_user_set_vring_err,
         .vhost_set_features = vhost_user_set_features,
         .vhost_get_features = vhost_user_get_features,
         .vhost_set_owner = vhost_user_set_owner,
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 6c41fa13e3..0827d631c0 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -1278,6 +1278,19 @@ static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
     return 0;
 }
 
+static void vhost_virtqueue_error_notifier(EventNotifier *n)
+{
+    struct vhost_virtqueue *vq = container_of(n, struct vhost_virtqueue,
+                                              error_notifier);
+    struct vhost_dev *dev = vq->dev;
+    int index = vq - dev->vqs;
+
+    if (event_notifier_test_and_clear(n) && dev->vdev) {
+        VHOST_OPS_DEBUG(-EINVAL,  "vhost vring error in virtqueue %d",
+                        dev->vq_index + index);
+    }
+}
+
 static int vhost_virtqueue_init(struct vhost_dev *dev,
                                 struct vhost_virtqueue *vq, int n)
 {
@@ -1299,7 +1312,27 @@ static int vhost_virtqueue_init(struct vhost_dev *dev,
 
     vq->dev = dev;
 
+    if (dev->vhost_ops->vhost_set_vring_err) {
+        r = event_notifier_init(&vq->error_notifier, 0);
+        if (r < 0) {
+            goto fail_call;
+        }
+
+        file.fd = event_notifier_get_fd(&vq->error_notifier);
+        r = dev->vhost_ops->vhost_set_vring_err(dev, &file);
+        if (r) {
+            VHOST_OPS_DEBUG(r, "vhost_set_vring_err failed");
+            goto fail_err;
+        }
+
+        event_notifier_set_handler(&vq->error_notifier,
+                                   vhost_virtqueue_error_notifier);
+    }
+
     return 0;
+
+fail_err:
+    event_notifier_cleanup(&vq->error_notifier);
 fail_call:
     event_notifier_cleanup(&vq->masked_notifier);
     return r;
@@ -1308,6 +1341,10 @@ fail_call:
 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
 {
     event_notifier_cleanup(&vq->masked_notifier);
+    if (vq->dev->vhost_ops->vhost_set_vring_err) {
+        event_notifier_set_handler(&vq->error_notifier, NULL);
+        event_notifier_cleanup(&vq->error_notifier);
+    }
 }
 
 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c
index 7c122ab957..281152d338 100644
--- a/hw/virtio/virtio-iommu.c
+++ b/hw/virtio/virtio-iommu.c
@@ -675,11 +675,10 @@ static int virtio_iommu_probe(VirtIOIOMMU *s,
 
 static int virtio_iommu_iov_to_req(struct iovec *iov,
                                    unsigned int iov_cnt,
-                                   void *req, size_t req_sz)
+                                   void *req, size_t payload_sz)
 {
-    size_t sz, payload_sz = req_sz - sizeof(struct virtio_iommu_req_tail);
+    size_t sz = iov_to_buf(iov, iov_cnt, 0, req, payload_sz);
 
-    sz = iov_to_buf(iov, iov_cnt, 0, req, payload_sz);
     if (unlikely(sz != payload_sz)) {
         return VIRTIO_IOMMU_S_INVAL;
     }
@@ -692,7 +691,8 @@ static int virtio_iommu_handle_ ## __req(VirtIOIOMMU *s,                \
                                          unsigned int iov_cnt)          \
 {                                                                       \
     struct virtio_iommu_req_ ## __req req;                              \
-    int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req, sizeof(req)); \
+    int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req,               \
+                    sizeof(req) - sizeof(struct virtio_iommu_req_tail));\
                                                                         \
     return ret ? ret : virtio_iommu_ ## __req(s, &req);                 \
 }
@@ -1322,6 +1322,14 @@ static int iommu_post_load(void *opaque, int version_id)
     VirtIOIOMMU *s = opaque;
 
     g_tree_foreach(s->domains, reconstruct_endpoints, s);
+
+    /*
+     * Memory regions are dynamically turned on/off depending on
+     * 'config.bypass' and attached domain type if there is. After
+     * migration, we need to make sure the memory regions are
+     * still correct.
+     */
+    virtio_iommu_switch_address_space_all(s);
     return 0;
 }