summary refs log tree commit diff stats
path: root/subprojects/libvhost-user/libvhost-user.c
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2023-10-05 09:01:01 -0400
committerStefan Hajnoczi <stefanha@redhat.com>2023-10-05 09:01:01 -0400
commit2f3913f4b2ad74baeb5a6f1d36efbd9ecdf1057d (patch)
tree2a77273973037c80a25b88dfa80bfc24baf7de52 /subprojects/libvhost-user/libvhost-user.c
parent800af0aae1cfa456701c5fa1ef273ce47585179c (diff)
parentce0f3b032a960726c0dddfb4f81f223215179f26 (diff)
downloadfocaccia-qemu-2f3913f4b2ad74baeb5a6f1d36efbd9ecdf1057d.tar.gz
focaccia-qemu-2f3913f4b2ad74baeb5a6f1d36efbd9ecdf1057d.zip
Merge tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu into staging
virtio,pci: features, cleanups

vdpa:
      shadow vq vlan support
      net migration with cvq
cxl:
     support emulating 4 HDM decoders
     serial number extended capability
virtio:
      hared dma-buf

Fixes, cleanups all over the place.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

* tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu: (53 commits)
  libvhost-user: handle shared_object msg
  vhost-user: add shared_object msg
  hw/display: introduce virtio-dmabuf
  util/uuid: add a hash function
  virtio: remove unused next argument from virtqueue_split_read_next_desc()
  virtio: remove unnecessary thread fence while reading next descriptor
  virtio: use shadow_avail_idx while checking number of heads
  libvhost-user.c: add assertion to vu_message_read_default
  pcie_sriov: unregister_vfs(): fix error path
  hw/i386/pc: improve physical address space bound check for 32-bit x86 systems
  amd_iommu: Fix APIC address check
  vdpa net: follow VirtIO initialization properly at cvq isolation probing
  vdpa net: stop probing if cannot set features
  vdpa net: fix error message setting virtio status
  hw/pci-bridge/cxl-upstream: Add serial number extended capability support
  hw/cxl: Support 4 HDM decoders at all levels of topology
  hw/cxl: Fix and use same calculation for HDM decoder block size everywhere
  hw/cxl: Add utility functions decoder interleave ways and target count.
  hw/cxl: Push cxl_decoder_count_enc() and cxl_decode_ig() into .c
  vdpa net: zero vhost_vdpa iova_tree pointer at cleanup
  ...

Conflicts:
  hw/core/machine.c
  Context conflict with commit 314e0a84cd5d ("hw/core: remove needless
  includes") because it removed an adjacent #include.
Diffstat (limited to 'subprojects/libvhost-user/libvhost-user.c')
-rw-r--r--subprojects/libvhost-user/libvhost-user.c121
1 files changed, 121 insertions, 0 deletions
diff --git a/subprojects/libvhost-user/libvhost-user.c b/subprojects/libvhost-user/libvhost-user.c
index 0469a50101..051a611da3 100644
--- a/subprojects/libvhost-user/libvhost-user.c
+++ b/subprojects/libvhost-user/libvhost-user.c
@@ -161,6 +161,7 @@ vu_request_to_string(unsigned int req)
         REQ(VHOST_USER_GET_MAX_MEM_SLOTS),
         REQ(VHOST_USER_ADD_MEM_REG),
         REQ(VHOST_USER_REM_MEM_REG),
+        REQ(VHOST_USER_GET_SHARED_OBJECT),
         REQ(VHOST_USER_MAX),
     };
 #undef REQ
@@ -322,6 +323,7 @@ vu_message_read_default(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
             fd_size = cmsg->cmsg_len - CMSG_LEN(0);
             vmsg->fd_num = fd_size / sizeof(int);
+            assert(fd_size < VHOST_MEMORY_BASELINE_NREGIONS);
             memcpy(vmsg->fds, CMSG_DATA(cmsg), fd_size);
             break;
         }
@@ -901,6 +903,24 @@ vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
 }
 
 static bool
+vu_get_shared_object(VuDev *dev, VhostUserMsg *vmsg)
+{
+    int fd_num = 0;
+    int dmabuf_fd = -1;
+    if (dev->iface->get_shared_object) {
+        dmabuf_fd = dev->iface->get_shared_object(
+            dev, &vmsg->payload.object.uuid[0]);
+    }
+    if (dmabuf_fd != -1) {
+        DPRINT("dmabuf_fd found for requested UUID\n");
+        vmsg->fds[fd_num++] = dmabuf_fd;
+    }
+    vmsg->fd_num = fd_num;
+
+    return true;
+}
+
+static bool
 vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg)
 {
     unsigned int i;
@@ -1403,6 +1423,105 @@ bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd,
     return vu_process_message_reply(dev, &vmsg);
 }
 
+bool
+vu_lookup_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN],
+                        int *dmabuf_fd)
+{
+    bool result = false;
+    VhostUserMsg msg_reply;
+    VhostUserMsg msg = {
+        .request = VHOST_USER_BACKEND_SHARED_OBJECT_LOOKUP,
+        .size = sizeof(msg.payload.object),
+        .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
+    };
+
+    memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN);
+
+    if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) {
+        return false;
+    }
+
+    pthread_mutex_lock(&dev->backend_mutex);
+    if (!vu_message_write(dev, dev->backend_fd, &msg)) {
+        goto out;
+    }
+
+    if (!vu_message_read_default(dev, dev->backend_fd, &msg_reply)) {
+        goto out;
+    }
+
+    if (msg_reply.request != msg.request) {
+        DPRINT("Received unexpected msg type. Expected %d, received %d",
+               msg.request, msg_reply.request);
+        goto out;
+    }
+
+    if (msg_reply.fd_num != 1) {
+        DPRINT("Received unexpected number of fds. Expected 1, received %d",
+               msg_reply.fd_num);
+        goto out;
+    }
+
+    *dmabuf_fd = msg_reply.fds[0];
+    result = *dmabuf_fd > 0 && msg_reply.payload.u64 == 0;
+out:
+    pthread_mutex_unlock(&dev->backend_mutex);
+
+    return result;
+}
+
+static bool
+vu_send_message(VuDev *dev, VhostUserMsg *vmsg)
+{
+    bool result = false;
+    pthread_mutex_lock(&dev->backend_mutex);
+    if (!vu_message_write(dev, dev->backend_fd, vmsg)) {
+        goto out;
+    }
+
+    result = true;
+out:
+    pthread_mutex_unlock(&dev->backend_mutex);
+
+    return result;
+}
+
+bool
+vu_add_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN])
+{
+    VhostUserMsg msg = {
+        .request = VHOST_USER_BACKEND_SHARED_OBJECT_ADD,
+        .size = sizeof(msg.payload.object),
+        .flags = VHOST_USER_VERSION,
+    };
+
+    memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN);
+
+    if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) {
+        return false;
+    }
+
+    return vu_send_message(dev, &msg);
+}
+
+bool
+vu_rm_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN])
+{
+    VhostUserMsg msg = {
+        .request = VHOST_USER_BACKEND_SHARED_OBJECT_REMOVE,
+        .size = sizeof(msg.payload.object),
+        .flags = VHOST_USER_VERSION,
+    };
+
+    memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN);
+
+    if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) {
+        return false;
+    }
+
+    return vu_send_message(dev, &msg);
+}
+
 static bool
 vu_set_vring_call_exec(VuDev *dev, VhostUserMsg *vmsg)
 {
@@ -1943,6 +2062,8 @@ vu_process_message(VuDev *dev, VhostUserMsg *vmsg)
         return vu_add_mem_reg(dev, vmsg);
     case VHOST_USER_REM_MEM_REG:
         return vu_rem_mem_reg(dev, vmsg);
+    case VHOST_USER_GET_SHARED_OBJECT:
+        return vu_get_shared_object(dev, vmsg);
     default:
         vmsg_close_fds(vmsg);
         vu_panic(dev, "Unhandled request: %d", vmsg->request);