diff options
| author | Maciej S. Szmigiero <maciej.szmigiero@oracle.com> | 2025-07-15 16:37:37 +0200 |
|---|---|---|
| committer | Cédric Le Goater <clg@redhat.com> | 2025-07-15 17:11:12 +0200 |
| commit | 300dcf58b72fa1635190b19f102231b0775e93cb (patch) | |
| tree | ef712b0fe23af924c43bbab81ec40948d9027aa2 /hw/vfio/migration-multifd.c | |
| parent | 6380b0a02fbdac253b8a98b300398319ab655237 (diff) | |
| download | focaccia-qemu-300dcf58b72fa1635190b19f102231b0775e93cb.tar.gz focaccia-qemu-300dcf58b72fa1635190b19f102231b0775e93cb.zip | |
vfio/migration: Max in-flight VFIO device state buffers size limit
Allow capping the maximum total size of in-flight VFIO device state buffers queued at the destination, otherwise a malicious QEMU source could theoretically cause the target QEMU to allocate unlimited amounts of memory for buffers-in-flight. Since this is not expected to be a realistic threat in most of VFIO live migration use cases and the right value depends on the particular setup disable this limit by default by setting it to UINT64_MAX. Reviewed-by: Fabiano Rosas <farosas@suse.de> Reviewed-by: Avihai Horon <avihaih@nvidia.com> Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com> Link: https://lore.kernel.org/qemu-devel/4f7cad490988288f58e36b162d7a888ed7e7fd17.1752589295.git.maciej.szmigiero@oracle.com Signed-off-by: Cédric Le Goater <clg@redhat.com>
Diffstat (limited to 'hw/vfio/migration-multifd.c')
| -rw-r--r-- | hw/vfio/migration-multifd.c | 21 |
1 files changed, 19 insertions, 2 deletions
diff --git a/hw/vfio/migration-multifd.c b/hw/vfio/migration-multifd.c index e539befaa9..d522671b8d 100644 --- a/hw/vfio/migration-multifd.c +++ b/hw/vfio/migration-multifd.c @@ -72,6 +72,7 @@ typedef struct VFIOMultifd { QemuMutex load_bufs_mutex; /* Lock order: this lock -> BQL */ uint32_t load_buf_idx; uint32_t load_buf_idx_last; + size_t load_buf_queued_pending_buffers_size; } VFIOMultifd; static void vfio_state_buffer_clear(gpointer data) @@ -128,6 +129,7 @@ static bool vfio_load_state_buffer_insert(VFIODevice *vbasedev, VFIOMigration *migration = vbasedev->migration; VFIOMultifd *multifd = migration->multifd; VFIOStateBuffer *lb; + size_t data_size = packet_total_size - sizeof(*packet); vfio_state_buffers_assert_init(&multifd->load_bufs); if (packet->idx >= vfio_state_buffers_size_get(&multifd->load_bufs)) { @@ -143,8 +145,19 @@ static bool vfio_load_state_buffer_insert(VFIODevice *vbasedev, assert(packet->idx >= multifd->load_buf_idx); - lb->data = g_memdup2(&packet->data, packet_total_size - sizeof(*packet)); - lb->len = packet_total_size - sizeof(*packet); + multifd->load_buf_queued_pending_buffers_size += data_size; + if (multifd->load_buf_queued_pending_buffers_size > + vbasedev->migration_max_queued_buffers_size) { + error_setg(errp, + "%s: queuing state buffer %" PRIu32 + " would exceed the size max of %" PRIu64, + vbasedev->name, packet->idx, + vbasedev->migration_max_queued_buffers_size); + return false; + } + + lb->data = g_memdup2(&packet->data, data_size); + lb->len = data_size; lb->is_present = true; return true; @@ -328,6 +341,9 @@ static bool vfio_load_state_buffer_write(VFIODevice *vbasedev, assert(wr_ret <= buf_len); buf_len -= wr_ret; buf_cur += wr_ret; + + assert(multifd->load_buf_queued_pending_buffers_size >= wr_ret); + multifd->load_buf_queued_pending_buffers_size -= wr_ret; } trace_vfio_load_state_device_buffer_load_end(vbasedev->name, @@ -497,6 +513,7 @@ static VFIOMultifd *vfio_multifd_new(void) multifd->load_buf_idx = 0; multifd->load_buf_idx_last = UINT32_MAX; + multifd->load_buf_queued_pending_buffers_size = 0; qemu_cond_init(&multifd->load_bufs_buffer_ready_cond); multifd->load_bufs_iter_done = false; |