From cd7ebf6bf2ca367fde49d73f662e700fd355741f Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Fri, 25 Oct 2024 00:02:59 +0300 Subject: virtio-gpu: Use trace events for tracking number of in-flight fences MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace printf's used for tracking of in-flight fence inc/dec events with tracing, for consistency with the rest of virtio-gpu code that uses tracing. Suggested-by: Marc-André Lureau Signed-off-by: Dmitry Osipenko Message-Id: <20241024210311.118220-2-dmitry.osipenko@collabora.com> Signed-off-by: Alex Bennée --- hw/display/virtio-gpu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'hw/display/virtio-gpu.c') diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index 49fd803393..3fcc434732 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -1046,7 +1046,7 @@ void virtio_gpu_process_cmdq(VirtIOGPU *g) if (g->stats.max_inflight < g->inflight) { g->stats.max_inflight = g->inflight; } - fprintf(stderr, "inflight: %3d (+)\r", g->inflight); + trace_virtio_gpu_inc_inflight_fences(g->inflight); } } else { g_free(cmd); @@ -1066,7 +1066,7 @@ static void virtio_gpu_process_fenceq(VirtIOGPU *g) g_free(cmd); g->inflight--; if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { - fprintf(stderr, "inflight: %3d (-)\r", g->inflight); + trace_virtio_gpu_dec_inflight_fences(g->inflight); } } } -- cgit 1.4.1 From 6a3e00d8c9822433a8989bd0529e80580bf3e54f Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Fri, 25 Oct 2024 00:03:06 +0300 Subject: virtio-gpu: Don't require udmabuf when blobs and virgl are enabled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The udmabuf usage is mandatory when virgl is disabled and blobs feature enabled in the Qemu machine configuration. If virgl and blobs are enabled, then udmabuf requirement is optional. Since udmabuf isn't widely supported by a popular Linux distros today, let's relax the udmabuf requirement for blobs=on,virgl=on. Now, a full-featured virtio-gpu acceleration is available to Qemu users without a need to have udmabuf available in the system. Reviewed-by: Antonio Caggiano Signed-off-by: Huang Rui Reviewed-by: Antonio Caggiano Reviewed-by: Marc-André Lureau Signed-off-by: Dmitry Osipenko Message-Id: <20241024210311.118220-9-dmitry.osipenko@collabora.com> Signed-off-by: Alex Bennée --- hw/display/virtio-gpu.c | 1 + 1 file changed, 1 insertion(+) (limited to 'hw/display/virtio-gpu.c') diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index 3fcc434732..3d9679c1ef 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -1455,6 +1455,7 @@ void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) if (virtio_gpu_blob_enabled(g->parent_obj.conf)) { if (!virtio_gpu_rutabaga_enabled(g->parent_obj.conf) && + !virtio_gpu_virgl_enabled(g->parent_obj.conf) && !virtio_gpu_have_udmabuf()) { error_setg(errp, "need rutabaga or udmabuf for blob resources"); return; -- cgit 1.4.1 From 640f9149c3dcafbfeb495a10f6105c6f71f24d1d Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Fri, 25 Oct 2024 00:03:08 +0300 Subject: virtio-gpu: Support suspension of commands processing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Check whether command processing has been finished; otherwise, stop processing commands and retry the command again next time. This allows us to support asynchronous execution of non-fenced commands needed for unmapping host blobs safely. Suggested-by: Akihiko Odaki Signed-off-by: Dmitry Osipenko Message-Id: <20241024210311.118220-11-dmitry.osipenko@collabora.com> Signed-off-by: Alex Bennée --- hw/display/trace-events | 1 + hw/display/virtio-gpu.c | 6 ++++++ 2 files changed, 7 insertions(+) (limited to 'hw/display/virtio-gpu.c') diff --git a/hw/display/trace-events b/hw/display/trace-events index e212710284..d26d663f96 100644 --- a/hw/display/trace-events +++ b/hw/display/trace-events @@ -55,6 +55,7 @@ virtio_gpu_fence_ctrl(uint64_t fence, uint32_t type) "fence 0x%" PRIx64 ", type virtio_gpu_fence_resp(uint64_t fence) "fence 0x%" PRIx64 virtio_gpu_inc_inflight_fences(uint32_t inflight) "in-flight+ %u" virtio_gpu_dec_inflight_fences(uint32_t inflight) "in-flight- %u" +virtio_gpu_cmd_suspended(uint32_t cmd) "cmd 0x%x" # qxl.c disable qxl_io_write_vga(int qid, const char *mode, uint32_t addr, uint32_t val) "%d %s addr=%u val=%u" diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index 3d9679c1ef..180d882f0a 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -1034,6 +1034,12 @@ void virtio_gpu_process_cmdq(VirtIOGPU *g) /* process command */ vgc->process_cmd(g, cmd); + /* command suspended */ + if (!cmd->finished && !(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) { + trace_virtio_gpu_cmd_suspended(cmd->cmd_hdr.type); + break; + } + QTAILQ_REMOVE(&g->cmdq, cmd, next); if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { g->stats.requests++; -- cgit 1.4.1 From 7c092f17cceef10258ed23006b40e19b14996471 Mon Sep 17 00:00:00 2001 From: Robert Beckett Date: Fri, 25 Oct 2024 00:03:09 +0300 Subject: virtio-gpu: Handle resource blob commands MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Support BLOB resources creation, mapping, unmapping and set-scanout by calling the new stable virglrenderer 0.10 interface. Only enabled when available and via the blob config. E.g. -device virtio-vga-gl,blob=true Signed-off-by: Antonio Caggiano Signed-off-by: Robert Beckett # added set_scanout_blob Signed-off-by: Xenia Ragiadakou Signed-off-by: Huang Rui Signed-off-by: Dmitry Osipenko Message-Id: <20241024210311.118220-12-dmitry.osipenko@collabora.com> Signed-off-by: Alex Bennée --- hw/display/virtio-gpu-gl.c | 3 + hw/display/virtio-gpu-virgl.c | 414 ++++++++++++++++++++++++++++++++++++++++- hw/display/virtio-gpu.c | 18 +- include/hw/virtio/virtio-gpu.h | 9 + 4 files changed, 434 insertions(+), 10 deletions(-) (limited to 'hw/display/virtio-gpu.c') diff --git a/hw/display/virtio-gpu-gl.c b/hw/display/virtio-gpu-gl.c index bf87ba4232..f2555673a1 100644 --- a/hw/display/virtio-gpu-gl.c +++ b/hw/display/virtio-gpu-gl.c @@ -166,6 +166,9 @@ static void virtio_gpu_gl_device_unrealize(DeviceState *qdev) VirtIOGPUGL *gl = VIRTIO_GPU_GL(qdev); if (gl->renderer_state >= RS_INITED) { +#if VIRGL_VERSION_MAJOR >= 1 + qemu_bh_delete(gl->cmdq_resume_bh); +#endif if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { timer_free(gl->print_stats); } diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c index 3ffea478e7..b2f4e215a7 100644 --- a/hw/display/virtio-gpu-virgl.c +++ b/hw/display/virtio-gpu-virgl.c @@ -17,6 +17,8 @@ #include "trace.h" #include "hw/virtio/virtio.h" #include "hw/virtio/virtio-gpu.h" +#include "hw/virtio/virtio-gpu-bswap.h" +#include "hw/virtio/virtio-gpu-pixman.h" #include "ui/egl-helpers.h" @@ -24,6 +26,7 @@ struct virtio_gpu_virgl_resource { struct virtio_gpu_simple_resource base; + MemoryRegion *mr; }; static struct virtio_gpu_virgl_resource * @@ -47,6 +50,145 @@ virgl_get_egl_display(G_GNUC_UNUSED void *cookie) } #endif +#if VIRGL_VERSION_MAJOR >= 1 +struct virtio_gpu_virgl_hostmem_region { + MemoryRegion mr; + struct VirtIOGPU *g; + bool finish_unmapping; +}; + +static struct virtio_gpu_virgl_hostmem_region * +to_hostmem_region(MemoryRegion *mr) +{ + return container_of(mr, struct virtio_gpu_virgl_hostmem_region, mr); +} + +static void virtio_gpu_virgl_resume_cmdq_bh(void *opaque) +{ + VirtIOGPU *g = opaque; + + virtio_gpu_process_cmdq(g); +} + +static void virtio_gpu_virgl_hostmem_region_free(void *obj) +{ + MemoryRegion *mr = MEMORY_REGION(obj); + struct virtio_gpu_virgl_hostmem_region *vmr; + VirtIOGPUBase *b; + VirtIOGPUGL *gl; + + vmr = to_hostmem_region(mr); + vmr->finish_unmapping = true; + + b = VIRTIO_GPU_BASE(vmr->g); + b->renderer_blocked--; + + /* + * memory_region_unref() is executed from RCU thread context, while + * virglrenderer works only on the main-loop thread that's holding GL + * context. + */ + gl = VIRTIO_GPU_GL(vmr->g); + qemu_bh_schedule(gl->cmdq_resume_bh); +} + +static int +virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g, + struct virtio_gpu_virgl_resource *res, + uint64_t offset) +{ + struct virtio_gpu_virgl_hostmem_region *vmr; + VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); + MemoryRegion *mr; + uint64_t size; + void *data; + int ret; + + if (!virtio_gpu_hostmem_enabled(b->conf)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: hostmem disabled\n", __func__); + return -EOPNOTSUPP; + } + + ret = virgl_renderer_resource_map(res->base.resource_id, &data, &size); + if (ret) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map virgl resource: %s\n", + __func__, strerror(-ret)); + return ret; + } + + vmr = g_new0(struct virtio_gpu_virgl_hostmem_region, 1); + vmr->g = g; + + mr = &vmr->mr; + memory_region_init_ram_ptr(mr, OBJECT(mr), "blob", size, data); + memory_region_add_subregion(&b->hostmem, offset, mr); + memory_region_set_enabled(mr, true); + + /* + * MR could outlive the resource if MR's reference is held outside of + * virtio-gpu. In order to prevent unmapping resource while MR is alive, + * and thus, making the data pointer invalid, we will block virtio-gpu + * command processing until MR is fully unreferenced and freed. + */ + OBJECT(mr)->free = virtio_gpu_virgl_hostmem_region_free; + + res->mr = mr; + + return 0; +} + +static int +virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g, + struct virtio_gpu_virgl_resource *res, + bool *cmd_suspended) +{ + struct virtio_gpu_virgl_hostmem_region *vmr; + VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); + MemoryRegion *mr = res->mr; + int ret; + + if (!mr) { + return 0; + } + + vmr = to_hostmem_region(res->mr); + + /* + * Perform async unmapping in 3 steps: + * + * 1. Begin async unmapping with memory_region_del_subregion() + * and suspend/block cmd processing. + * 2. Wait for res->mr to be freed and cmd processing resumed + * asynchronously by virtio_gpu_virgl_hostmem_region_free(). + * 3. Finish the unmapping with final virgl_renderer_resource_unmap(). + */ + if (vmr->finish_unmapping) { + res->mr = NULL; + g_free(vmr); + + ret = virgl_renderer_resource_unmap(res->base.resource_id); + if (ret) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: failed to unmap virgl resource: %s\n", + __func__, strerror(-ret)); + return ret; + } + } else { + *cmd_suspended = true; + + /* render will be unblocked once MR is freed */ + b->renderer_blocked++; + + /* memory region owns self res->mr object and frees it by itself */ + memory_region_set_enabled(mr, false); + memory_region_del_subregion(&b->hostmem, mr); + object_unparent(OBJECT(mr)); + } + + return 0; +} +#endif + static void virgl_cmd_create_resource_2d(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd) { @@ -78,6 +220,7 @@ static void virgl_cmd_create_resource_2d(VirtIOGPU *g, res->base.height = c2d.height; res->base.format = c2d.format; res->base.resource_id = c2d.resource_id; + res->base.dmabuf_fd = -1; QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next); args.handle = c2d.resource_id; @@ -125,6 +268,7 @@ static void virgl_cmd_create_resource_3d(VirtIOGPU *g, res->base.height = c3d.height; res->base.format = c3d.format; res->base.resource_id = c3d.resource_id; + res->base.dmabuf_fd = -1; QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next); args.handle = c3d.resource_id; @@ -142,7 +286,8 @@ static void virgl_cmd_create_resource_3d(VirtIOGPU *g, } static void virgl_cmd_resource_unref(VirtIOGPU *g, - struct virtio_gpu_ctrl_command *cmd) + struct virtio_gpu_ctrl_command *cmd, + bool *cmd_suspended) { struct virtio_gpu_resource_unref unref; struct virtio_gpu_virgl_resource *res; @@ -160,6 +305,16 @@ static void virgl_cmd_resource_unref(VirtIOGPU *g, return; } +#if VIRGL_VERSION_MAJOR >= 1 + if (virtio_gpu_virgl_unmap_resource_blob(g, res, cmd_suspended)) { + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + return; + } + if (*cmd_suspended) { + return; + } +#endif + virgl_renderer_resource_detach_iov(unref.resource_id, &res_iovs, &num_iovs); @@ -509,9 +664,241 @@ static void virgl_cmd_get_capset(VirtIOGPU *g, g_free(resp); } +#if VIRGL_VERSION_MAJOR >= 1 +static void virgl_cmd_resource_create_blob(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virgl_renderer_resource_create_blob_args virgl_args = { 0 }; + g_autofree struct virtio_gpu_virgl_resource *res = NULL; + struct virtio_gpu_resource_create_blob cblob; + struct virgl_renderer_resource_info info; + int ret; + + if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + + VIRTIO_GPU_FILL_CMD(cblob); + virtio_gpu_create_blob_bswap(&cblob); + trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size); + + if (cblob.resource_id == 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", + __func__); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + res = virtio_gpu_virgl_find_resource(g, cblob.resource_id); + if (res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", + __func__, cblob.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + res = g_new0(struct virtio_gpu_virgl_resource, 1); + res->base.resource_id = cblob.resource_id; + res->base.blob_size = cblob.size; + res->base.dmabuf_fd = -1; + + if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) { + ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob), + cmd, &res->base.addrs, + &res->base.iov, &res->base.iov_cnt); + if (!ret) { + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + return; + } + } + + virgl_args.res_handle = cblob.resource_id; + virgl_args.ctx_id = cblob.hdr.ctx_id; + virgl_args.blob_mem = cblob.blob_mem; + virgl_args.blob_id = cblob.blob_id; + virgl_args.blob_flags = cblob.blob_flags; + virgl_args.size = cblob.size; + virgl_args.iovecs = res->base.iov; + virgl_args.num_iovs = res->base.iov_cnt; + + ret = virgl_renderer_resource_create_blob(&virgl_args); + if (ret) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: virgl blob create error: %s\n", + __func__, strerror(-ret)); + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + virtio_gpu_cleanup_mapping(g, &res->base); + return; + } + + ret = virgl_renderer_resource_get_info(cblob.resource_id, &info); + if (ret) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: resource does not have info %d: %s\n", + __func__, cblob.resource_id, strerror(-ret)); + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + virtio_gpu_cleanup_mapping(g, &res->base); + virgl_renderer_resource_unref(cblob.resource_id); + return; + } + + res->base.dmabuf_fd = info.fd; + + QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next); + res = NULL; +} + +static void virgl_cmd_resource_map_blob(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_resource_map_blob mblob; + struct virtio_gpu_virgl_resource *res; + struct virtio_gpu_resp_map_info resp; + int ret; + + VIRTIO_GPU_FILL_CMD(mblob); + virtio_gpu_map_blob_bswap(&mblob); + + res = virtio_gpu_virgl_find_resource(g, mblob.resource_id); + if (!res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n", + __func__, mblob.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + ret = virtio_gpu_virgl_map_resource_blob(g, res, mblob.offset); + if (ret) { + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + return; + } + + memset(&resp, 0, sizeof(resp)); + resp.hdr.type = VIRTIO_GPU_RESP_OK_MAP_INFO; + virgl_renderer_resource_get_map_info(mblob.resource_id, &resp.map_info); + virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp)); +} + +static void virgl_cmd_resource_unmap_blob(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd, + bool *cmd_suspended) +{ + struct virtio_gpu_resource_unmap_blob ublob; + struct virtio_gpu_virgl_resource *res; + int ret; + + VIRTIO_GPU_FILL_CMD(ublob); + virtio_gpu_unmap_blob_bswap(&ublob); + + res = virtio_gpu_virgl_find_resource(g, ublob.resource_id); + if (!res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n", + __func__, ublob.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + ret = virtio_gpu_virgl_unmap_resource_blob(g, res, cmd_suspended); + if (ret) { + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + return; + } +} + +static void virgl_cmd_set_scanout_blob(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_framebuffer fb = { 0 }; + struct virtio_gpu_virgl_resource *res; + struct virtio_gpu_set_scanout_blob ss; + uint64_t fbend; + + VIRTIO_GPU_FILL_CMD(ss); + virtio_gpu_scanout_blob_bswap(&ss); + trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id, + ss.r.width, ss.r.height, ss.r.x, + ss.r.y); + + if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", + __func__, ss.scanout_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; + return; + } + + if (ss.resource_id == 0) { + virtio_gpu_disable_scanout(g, ss.scanout_id); + return; + } + + if (ss.width < 16 || + ss.height < 16 || + ss.r.x + ss.r.width > ss.width || + ss.r.y + ss.r.height > ss.height) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" + " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n", + __func__, ss.scanout_id, ss.resource_id, + ss.r.x, ss.r.y, ss.r.width, ss.r.height, + ss.width, ss.height); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + + res = virtio_gpu_virgl_find_resource(g, ss.resource_id); + if (!res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n", + __func__, ss.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + if (res->base.dmabuf_fd < 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource not backed by dmabuf %d\n", + __func__, ss.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + return; + } + + fb.format = virtio_gpu_get_pixman_format(ss.format); + if (!fb.format) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: pixel format not supported %d\n", + __func__, ss.format); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + + fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8); + fb.width = ss.width; + fb.height = ss.height; + fb.stride = ss.strides[0]; + fb.offset = ss.offsets[0] + ss.r.x * fb.bytes_pp + ss.r.y * fb.stride; + + fbend = fb.offset; + fbend += fb.stride * (ss.r.height - 1); + fbend += fb.bytes_pp * ss.r.width; + if (fbend > res->base.blob_size) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: fb end out of range\n", + __func__); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + + g->parent_obj.enable = 1; + if (virtio_gpu_update_dmabuf(g, ss.scanout_id, &res->base, &fb, &ss.r)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to update dmabuf\n", + __func__); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + + virtio_gpu_update_scanout(g, ss.scanout_id, &res->base, &fb, &ss.r); +} +#endif + void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd) { + bool cmd_suspended = false; + VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); virgl_renderer_force_ctx_0(); @@ -553,7 +940,7 @@ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, virgl_cmd_resource_flush(g, cmd); break; case VIRTIO_GPU_CMD_RESOURCE_UNREF: - virgl_cmd_resource_unref(g, cmd); + virgl_cmd_resource_unref(g, cmd, &cmd_suspended); break; case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE: /* TODO add security */ @@ -575,12 +962,26 @@ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, case VIRTIO_GPU_CMD_GET_EDID: virtio_gpu_get_edid(g, cmd); break; +#if VIRGL_VERSION_MAJOR >= 1 + case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB: + virgl_cmd_resource_create_blob(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB: + virgl_cmd_resource_map_blob(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB: + virgl_cmd_resource_unmap_blob(g, cmd, &cmd_suspended); + break; + case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB: + virgl_cmd_set_scanout_blob(g, cmd); + break; +#endif default: cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; break; } - if (cmd->finished) { + if (cmd_suspended || cmd->finished) { return; } if (cmd->error) { @@ -749,6 +1150,13 @@ int virtio_gpu_virgl_init(VirtIOGPU *g) timer_mod(gl->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000); } + +#if VIRGL_VERSION_MAJOR >= 1 + gl->cmdq_resume_bh = aio_bh_new(qemu_get_aio_context(), + virtio_gpu_virgl_resume_cmdq_bh, + g); +#endif + return 0; } diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index 180d882f0a..b6cd9fe567 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -362,7 +362,7 @@ static void virtio_gpu_resource_create_blob(VirtIOGPU *g, QTAILQ_INSERT_HEAD(&g->reslist, res, next); } -static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id) +void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id) { struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id]; struct virtio_gpu_simple_resource *res; @@ -579,11 +579,11 @@ static void virtio_unref_resource(pixman_image_t *image, void *data) pixman_image_unref(data); } -static void virtio_gpu_update_scanout(VirtIOGPU *g, - uint32_t scanout_id, - struct virtio_gpu_simple_resource *res, - struct virtio_gpu_framebuffer *fb, - struct virtio_gpu_rect *r) +void virtio_gpu_update_scanout(VirtIOGPU *g, + uint32_t scanout_id, + struct virtio_gpu_simple_resource *res, + struct virtio_gpu_framebuffer *fb, + struct virtio_gpu_rect *r) { struct virtio_gpu_simple_resource *ores; struct virtio_gpu_scanout *scanout; @@ -1467,10 +1467,14 @@ void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) return; } +#ifdef VIRGL_VERSION_MAJOR + #if VIRGL_VERSION_MAJOR < 1 if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) { - error_setg(errp, "blobs and virgl are not compatible (yet)"); + error_setg(errp, "old virglrenderer, blob resources unsupported"); return; } + #endif +#endif } if (!virtio_gpu_base_device_realize(qdev, diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h index 71775243e9..f9ed81071f 100644 --- a/include/hw/virtio/virtio-gpu.h +++ b/include/hw/virtio/virtio-gpu.h @@ -237,6 +237,8 @@ struct VirtIOGPUGL { QEMUTimer *fence_poll; QEMUTimer *print_stats; + + QEMUBH *cmdq_resume_bh; }; struct VhostUserGPU { @@ -336,6 +338,13 @@ int virtio_gpu_update_dmabuf(VirtIOGPU *g, struct virtio_gpu_framebuffer *fb, struct virtio_gpu_rect *r); +void virtio_gpu_update_scanout(VirtIOGPU *g, + uint32_t scanout_id, + struct virtio_gpu_simple_resource *res, + struct virtio_gpu_framebuffer *fb, + struct virtio_gpu_rect *r); +void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id); + /* virtio-gpu-3d.c */ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd); -- cgit 1.4.1 From 94d0ea1c19289d76ced934711fccd2269e69bb29 Mon Sep 17 00:00:00 2001 From: Antonio Caggiano Date: Fri, 25 Oct 2024 00:03:11 +0300 Subject: virtio-gpu: Support Venus context MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Request Venus when initializing VirGL and if venus=true flag is set for virtio-gpu-gl device. Signed-off-by: Antonio Caggiano Signed-off-by: Huang Rui Signed-off-by: Dmitry Osipenko Message-Id: <20241024210311.118220-14-dmitry.osipenko@collabora.com> Signed-off-by: Alex Bennée --- docs/system/devices/virtio-gpu.rst | 11 +++++++++++ hw/display/virtio-gpu-gl.c | 2 ++ hw/display/virtio-gpu-virgl.c | 22 ++++++++++++++++++---- hw/display/virtio-gpu.c | 15 +++++++++++++++ include/hw/virtio/virtio-gpu.h | 3 +++ 5 files changed, 49 insertions(+), 4 deletions(-) (limited to 'hw/display/virtio-gpu.c') diff --git a/docs/system/devices/virtio-gpu.rst b/docs/system/devices/virtio-gpu.rst index cb73dd7998..b7eb0fc0e7 100644 --- a/docs/system/devices/virtio-gpu.rst +++ b/docs/system/devices/virtio-gpu.rst @@ -71,6 +71,17 @@ representation back to OpenGL API calls. .. _Gallium3D: https://www.freedesktop.org/wiki/Software/gallium/ .. _virglrenderer: https://gitlab.freedesktop.org/virgl/virglrenderer/ +Translation of Vulkan API calls is supported since release of `virglrenderer`_ +v1.0.0 using `venus`_ protocol. ``Venus`` virtio-gpu capability set ("capset") +requires host blob support (``hostmem`` and ``blob`` fields) and should +be enabled using ``venus`` field. The ``hostmem`` field specifies the size +of virtio-gpu host memory window. This is typically between 256M and 8G. + +.. parsed-literal:: + -device virtio-gpu-gl,hostmem=8G,blob=true,venus=true + +.. _venus: https://gitlab.freedesktop.org/virgl/venus-protocol/ + virtio-gpu rutabaga ------------------- diff --git a/hw/display/virtio-gpu-gl.c b/hw/display/virtio-gpu-gl.c index e859c0dff0..7c0e448b46 100644 --- a/hw/display/virtio-gpu-gl.c +++ b/hw/display/virtio-gpu-gl.c @@ -157,6 +157,8 @@ static void virtio_gpu_gl_device_realize(DeviceState *qdev, Error **errp) static Property virtio_gpu_gl_properties[] = { DEFINE_PROP_BIT("stats", VirtIOGPU, parent_obj.conf.flags, VIRTIO_GPU_FLAG_STATS_ENABLED, false), + DEFINE_PROP_BIT("venus", VirtIOGPU, parent_obj.conf.flags, + VIRTIO_GPU_FLAG_VENUS_ENABLED, false), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c index 5a881c58a1..eedae7357f 100644 --- a/hw/display/virtio-gpu-virgl.c +++ b/hw/display/virtio-gpu-virgl.c @@ -1128,6 +1128,11 @@ int virtio_gpu_virgl_init(VirtIOGPU *g) flags |= VIRGL_RENDERER_D3D11_SHARE_TEXTURE; } #endif +#if VIRGL_VERSION_MAJOR >= 1 + if (virtio_gpu_venus_enabled(g->parent_obj.conf)) { + flags |= VIRGL_RENDERER_VENUS | VIRGL_RENDERER_RENDER_SERVER; + } +#endif ret = virgl_renderer_init(g, flags, &virtio_gpu_3d_cbs); if (ret != 0) { @@ -1161,7 +1166,7 @@ static void virtio_gpu_virgl_add_capset(GArray *capset_ids, uint32_t capset_id) GArray *virtio_gpu_virgl_get_capsets(VirtIOGPU *g) { - uint32_t capset2_max_ver, capset2_max_size; + uint32_t capset_max_ver, capset_max_size; GArray *capset_ids; capset_ids = g_array_new(false, false, sizeof(uint32_t)); @@ -1170,11 +1175,20 @@ GArray *virtio_gpu_virgl_get_capsets(VirtIOGPU *g) virtio_gpu_virgl_add_capset(capset_ids, VIRTIO_GPU_CAPSET_VIRGL); virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2, - &capset2_max_ver, - &capset2_max_size); - if (capset2_max_ver) { + &capset_max_ver, + &capset_max_size); + if (capset_max_ver) { virtio_gpu_virgl_add_capset(capset_ids, VIRTIO_GPU_CAPSET_VIRGL2); } + if (virtio_gpu_venus_enabled(g->parent_obj.conf)) { + virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VENUS, + &capset_max_ver, + &capset_max_size); + if (capset_max_size) { + virtio_gpu_virgl_add_capset(capset_ids, VIRTIO_GPU_CAPSET_VENUS); + } + } + return capset_ids; } diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index b6cd9fe567..c0570ef856 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -1477,6 +1477,21 @@ void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) #endif } + if (virtio_gpu_venus_enabled(g->parent_obj.conf)) { +#ifdef VIRGL_VERSION_MAJOR + #if VIRGL_VERSION_MAJOR >= 1 + if (!virtio_gpu_blob_enabled(g->parent_obj.conf) || + !virtio_gpu_hostmem_enabled(g->parent_obj.conf)) { + error_setg(errp, "venus requires enabled blob and hostmem options"); + return; + } + #else + error_setg(errp, "old virglrenderer, venus unsupported"); + return; + #endif +#endif + } + if (!virtio_gpu_base_device_realize(qdev, virtio_gpu_handle_ctrl_cb, virtio_gpu_handle_cursor_cb, diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h index 07131d8eb9..553799b8cc 100644 --- a/include/hw/virtio/virtio-gpu.h +++ b/include/hw/virtio/virtio-gpu.h @@ -97,6 +97,7 @@ enum virtio_gpu_base_conf_flags { VIRTIO_GPU_FLAG_BLOB_ENABLED, VIRTIO_GPU_FLAG_CONTEXT_INIT_ENABLED, VIRTIO_GPU_FLAG_RUTABAGA_ENABLED, + VIRTIO_GPU_FLAG_VENUS_ENABLED, }; #define virtio_gpu_virgl_enabled(_cfg) \ @@ -115,6 +116,8 @@ enum virtio_gpu_base_conf_flags { (_cfg.flags & (1 << VIRTIO_GPU_FLAG_RUTABAGA_ENABLED)) #define virtio_gpu_hostmem_enabled(_cfg) \ (_cfg.hostmem > 0) +#define virtio_gpu_venus_enabled(_cfg) \ + (_cfg.flags & (1 << VIRTIO_GPU_FLAG_VENUS_ENABLED)) struct virtio_gpu_base_conf { uint32_t max_outputs; -- cgit 1.4.1