From d40ddd529004709350202114fc1c2fc99127647d Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Wed, 7 Dec 2022 09:19:31 +0000 Subject: hw/xen: Add xen_overlay device for emulating shared xenheap pages For the shared info page and for grant tables, Xen shares its own pages from the "Xen heap" to the guest. The guest requests that a given page from a certain address space (XENMAPSPACE_shared_info, etc.) be mapped to a given GPA using the XENMEM_add_to_physmap hypercall. To support that in qemu when *emulating* Xen, create a memory region (migratable) and allow it to be mapped as an overlay when requested. Xen theoretically allows the same page to be mapped multiple times into the guest, but that's hard to track and reinstate over migration, so we automatically *unmap* any previous mapping when creating a new one. This approach has been used in production with.... a non-trivial number of guests expecting true Xen, without any problems yet being noticed. This adds just the shared info page for now. The grant tables will be a larger region, and will need to be overlaid one page at a time. I think that means I need to create separate aliases for each page of the overall grant_frames region, so that they can be mapped individually. Signed-off-by: David Woodhouse Reviewed-by: Paul Durrant --- hw/i386/kvm/xen_overlay.c | 210 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 210 insertions(+) create mode 100644 hw/i386/kvm/xen_overlay.c (limited to 'hw/i386/kvm/xen_overlay.c') diff --git a/hw/i386/kvm/xen_overlay.c b/hw/i386/kvm/xen_overlay.c new file mode 100644 index 0000000000..a2441e2b4e --- /dev/null +++ b/hw/i386/kvm/xen_overlay.c @@ -0,0 +1,210 @@ +/* + * QEMU Xen emulation: Shared/overlay pages support + * + * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Authors: David Woodhouse + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/host-utils.h" +#include "qemu/module.h" +#include "qemu/main-loop.h" +#include "qapi/error.h" +#include "qom/object.h" +#include "exec/target_page.h" +#include "exec/address-spaces.h" +#include "migration/vmstate.h" + +#include "hw/sysbus.h" +#include "hw/xen/xen.h" +#include "xen_overlay.h" + +#include "sysemu/kvm.h" +#include "sysemu/kvm_xen.h" +#include + +#include "hw/xen/interface/memory.h" + + +#define TYPE_XEN_OVERLAY "xen-overlay" +OBJECT_DECLARE_SIMPLE_TYPE(XenOverlayState, XEN_OVERLAY) + +#define XEN_PAGE_SHIFT 12 +#define XEN_PAGE_SIZE (1ULL << XEN_PAGE_SHIFT) + +struct XenOverlayState { + /*< private >*/ + SysBusDevice busdev; + /*< public >*/ + + MemoryRegion shinfo_mem; + void *shinfo_ptr; + uint64_t shinfo_gpa; +}; + +struct XenOverlayState *xen_overlay_singleton; + +static void xen_overlay_do_map_page(MemoryRegion *page, uint64_t gpa) +{ + /* + * Xen allows guests to map the same page as many times as it likes + * into guest physical frames. We don't, because it would be hard + * to track and restore them all. One mapping of each page is + * perfectly sufficient for all known guests... and we've tested + * that theory on a few now in other implementations. dwmw2. + */ + if (memory_region_is_mapped(page)) { + if (gpa == INVALID_GPA) { + memory_region_del_subregion(get_system_memory(), page); + } else { + /* Just move it */ + memory_region_set_address(page, gpa); + } + } else if (gpa != INVALID_GPA) { + memory_region_add_subregion_overlap(get_system_memory(), gpa, page, 0); + } +} + +/* KVM is the only existing back end for now. Let's not overengineer it yet. */ +static int xen_overlay_set_be_shinfo(uint64_t gfn) +{ + struct kvm_xen_hvm_attr xa = { + .type = KVM_XEN_ATTR_TYPE_SHARED_INFO, + .u.shared_info.gfn = gfn, + }; + + return kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &xa); +} + + +static void xen_overlay_realize(DeviceState *dev, Error **errp) +{ + XenOverlayState *s = XEN_OVERLAY(dev); + + if (xen_mode != XEN_EMULATE) { + error_setg(errp, "Xen overlay page support is for Xen emulation"); + return; + } + + memory_region_init_ram(&s->shinfo_mem, OBJECT(dev), "xen:shared_info", + XEN_PAGE_SIZE, &error_abort); + memory_region_set_enabled(&s->shinfo_mem, true); + + s->shinfo_ptr = memory_region_get_ram_ptr(&s->shinfo_mem); + s->shinfo_gpa = INVALID_GPA; + memset(s->shinfo_ptr, 0, XEN_PAGE_SIZE); +} + +static int xen_overlay_post_load(void *opaque, int version_id) +{ + XenOverlayState *s = opaque; + + if (s->shinfo_gpa != INVALID_GPA) { + xen_overlay_do_map_page(&s->shinfo_mem, s->shinfo_gpa); + xen_overlay_set_be_shinfo(s->shinfo_gpa >> XEN_PAGE_SHIFT); + } + + return 0; +} + +static bool xen_overlay_is_needed(void *opaque) +{ + return xen_mode == XEN_EMULATE; +} + +static const VMStateDescription xen_overlay_vmstate = { + .name = "xen_overlay", + .version_id = 1, + .minimum_version_id = 1, + .needed = xen_overlay_is_needed, + .post_load = xen_overlay_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT64(shinfo_gpa, XenOverlayState), + VMSTATE_END_OF_LIST() + } +}; + +static void xen_overlay_reset(DeviceState *dev) +{ + kvm_xen_soft_reset(); +} + +static void xen_overlay_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = xen_overlay_reset; + dc->realize = xen_overlay_realize; + dc->vmsd = &xen_overlay_vmstate; +} + +static const TypeInfo xen_overlay_info = { + .name = TYPE_XEN_OVERLAY, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XenOverlayState), + .class_init = xen_overlay_class_init, +}; + +void xen_overlay_create(void) +{ + xen_overlay_singleton = XEN_OVERLAY(sysbus_create_simple(TYPE_XEN_OVERLAY, + -1, NULL)); + + /* If xen_domid wasn't explicitly set, at least make sure it isn't zero. */ + if (xen_domid == DOMID_QEMU) { + xen_domid = 1; + }; +} + +static void xen_overlay_register_types(void) +{ + type_register_static(&xen_overlay_info); +} + +type_init(xen_overlay_register_types) + +int xen_overlay_map_shinfo_page(uint64_t gpa) +{ + XenOverlayState *s = xen_overlay_singleton; + int ret; + + if (!s) { + return -ENOENT; + } + + assert(qemu_mutex_iothread_locked()); + + if (s->shinfo_gpa) { + /* If removing shinfo page, turn the kernel magic off first */ + ret = xen_overlay_set_be_shinfo(INVALID_GFN); + if (ret) { + return ret; + } + } + + xen_overlay_do_map_page(&s->shinfo_mem, gpa); + if (gpa != INVALID_GPA) { + ret = xen_overlay_set_be_shinfo(gpa >> XEN_PAGE_SHIFT); + if (ret) { + return ret; + } + } + s->shinfo_gpa = gpa; + + return 0; +} + +void *xen_overlay_get_shinfo_ptr(void) +{ + XenOverlayState *s = xen_overlay_singleton; + + if (!s) { + return NULL; + } + + return s->shinfo_ptr; +} -- cgit 1.4.1 From 110a0ea59f263b6e382ee22c70c31c2364d11eb0 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Mon, 12 Dec 2022 14:03:41 +0000 Subject: i386/xen: manage and save/restore Xen guest long_mode setting Xen will "latch" the guest's 32-bit or 64-bit ("long mode") setting when the guest writes the MSR to fill in the hypercall page, or when the guest sets the event channel callback in HVM_PARAM_CALLBACK_IRQ. KVM handles the former and sets the kernel's long_mode flag accordingly. The latter will be handled in userspace. Keep them in sync by noticing when a hypercall is made in a mode that doesn't match qemu's idea of the guest mode, and resyncing from the kernel. Do that same sync right before serialization too, in case the guest has set the hypercall page but hasn't yet made a system call. Signed-off-by: David Woodhouse Reviewed-by: Paul Durrant --- hw/i386/kvm/xen_overlay.c | 62 +++++++++++++++++++++++++++++++++++++++++++++++ hw/i386/kvm/xen_overlay.h | 4 +++ target/i386/kvm/xen-emu.c | 12 +++++++++ 3 files changed, 78 insertions(+) (limited to 'hw/i386/kvm/xen_overlay.c') diff --git a/hw/i386/kvm/xen_overlay.c b/hw/i386/kvm/xen_overlay.c index a2441e2b4e..8685d87959 100644 --- a/hw/i386/kvm/xen_overlay.c +++ b/hw/i386/kvm/xen_overlay.c @@ -44,6 +44,7 @@ struct XenOverlayState { MemoryRegion shinfo_mem; void *shinfo_ptr; uint64_t shinfo_gpa; + bool long_mode; }; struct XenOverlayState *xen_overlay_singleton; @@ -96,9 +97,21 @@ static void xen_overlay_realize(DeviceState *dev, Error **errp) s->shinfo_ptr = memory_region_get_ram_ptr(&s->shinfo_mem); s->shinfo_gpa = INVALID_GPA; + s->long_mode = false; memset(s->shinfo_ptr, 0, XEN_PAGE_SIZE); } +static int xen_overlay_pre_save(void *opaque) +{ + /* + * Fetch the kernel's idea of long_mode to avoid the race condition + * where the guest has set the hypercall page up in 64-bit mode but + * not yet made a hypercall by the time migration happens, so qemu + * hasn't yet noticed. + */ + return xen_sync_long_mode(); +} + static int xen_overlay_post_load(void *opaque, int version_id) { XenOverlayState *s = opaque; @@ -107,6 +120,9 @@ static int xen_overlay_post_load(void *opaque, int version_id) xen_overlay_do_map_page(&s->shinfo_mem, s->shinfo_gpa); xen_overlay_set_be_shinfo(s->shinfo_gpa >> XEN_PAGE_SHIFT); } + if (s->long_mode) { + xen_set_long_mode(true); + } return 0; } @@ -121,9 +137,11 @@ static const VMStateDescription xen_overlay_vmstate = { .version_id = 1, .minimum_version_id = 1, .needed = xen_overlay_is_needed, + .pre_save = xen_overlay_pre_save, .post_load = xen_overlay_post_load, .fields = (VMStateField[]) { VMSTATE_UINT64(shinfo_gpa, XenOverlayState), + VMSTATE_BOOL(long_mode, XenOverlayState), VMSTATE_END_OF_LIST() } }; @@ -208,3 +226,47 @@ void *xen_overlay_get_shinfo_ptr(void) return s->shinfo_ptr; } + +int xen_sync_long_mode(void) +{ + int ret; + struct kvm_xen_hvm_attr xa = { + .type = KVM_XEN_ATTR_TYPE_LONG_MODE, + }; + + if (!xen_overlay_singleton) { + return -ENOENT; + } + + ret = kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_GET_ATTR, &xa); + if (!ret) { + xen_overlay_singleton->long_mode = xa.u.long_mode; + } + + return ret; +} + +int xen_set_long_mode(bool long_mode) +{ + int ret; + struct kvm_xen_hvm_attr xa = { + .type = KVM_XEN_ATTR_TYPE_LONG_MODE, + .u.long_mode = long_mode, + }; + + if (!xen_overlay_singleton) { + return -ENOENT; + } + + ret = kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &xa); + if (!ret) { + xen_overlay_singleton->long_mode = xa.u.long_mode; + } + + return ret; +} + +bool xen_is_long_mode(void) +{ + return xen_overlay_singleton && xen_overlay_singleton->long_mode; +} diff --git a/hw/i386/kvm/xen_overlay.h b/hw/i386/kvm/xen_overlay.h index 00cff05bb0..5c46a0b036 100644 --- a/hw/i386/kvm/xen_overlay.h +++ b/hw/i386/kvm/xen_overlay.h @@ -17,4 +17,8 @@ void xen_overlay_create(void); int xen_overlay_map_shinfo_page(uint64_t gpa); void *xen_overlay_get_shinfo_ptr(void); +int xen_sync_long_mode(void); +int xen_set_long_mode(bool long_mode); +bool xen_is_long_mode(void); + #endif /* QEMU_XEN_OVERLAY_H */ diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c index ebea27caf6..be6d85f2cb 100644 --- a/target/i386/kvm/xen-emu.c +++ b/target/i386/kvm/xen-emu.c @@ -20,6 +20,8 @@ #include "trace.h" #include "sysemu/runstate.h" +#include "hw/i386/kvm/xen_overlay.h" + #include "hw/xen/interface/version.h" #include "hw/xen/interface/sched.h" @@ -282,6 +284,16 @@ int kvm_xen_handle_exit(X86CPU *cpu, struct kvm_xen_exit *exit) return -1; } + /* + * The kernel latches the guest 32/64 mode when the MSR is used to fill + * the hypercall page. So if we see a hypercall in a mode that doesn't + * match our own idea of the guest mode, fetch the kernel's idea of the + * "long mode" to remain in sync. + */ + if (exit->u.hcall.longmode != xen_is_long_mode()) { + xen_sync_long_mode(); + } + if (!do_kvm_xen_handle_exit(cpu, exit)) { /* * Some hypercalls will be deliberately "implemented" by returning -- cgit 1.4.1 From e33cb789afd3aedbe181b78fd75d04a5c48f7cba Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Fri, 16 Dec 2022 18:33:49 +0000 Subject: hw/xen: Support mapping grant frames Signed-off-by: David Woodhouse Reviewed-by: Paul Durrant --- hw/i386/kvm/xen_gnttab.c | 73 ++++++++++++++++++++++++++++++++++++++++++++++- hw/i386/kvm/xen_overlay.c | 2 +- hw/i386/kvm/xen_overlay.h | 2 ++ 3 files changed, 75 insertions(+), 2 deletions(-) (limited to 'hw/i386/kvm/xen_overlay.c') diff --git a/hw/i386/kvm/xen_gnttab.c b/hw/i386/kvm/xen_gnttab.c index ef8857e50c..72e87aea6a 100644 --- a/hw/i386/kvm/xen_gnttab.c +++ b/hw/i386/kvm/xen_gnttab.c @@ -37,13 +37,26 @@ OBJECT_DECLARE_SIMPLE_TYPE(XenGnttabState, XEN_GNTTAB) #define XEN_PAGE_SHIFT 12 #define XEN_PAGE_SIZE (1ULL << XEN_PAGE_SHIFT) +#define ENTRIES_PER_FRAME_V1 (XEN_PAGE_SIZE / sizeof(grant_entry_v1_t)) + struct XenGnttabState { /*< private >*/ SysBusDevice busdev; /*< public >*/ + QemuMutex gnt_lock; + uint32_t nr_frames; uint32_t max_frames; + + union { + grant_entry_v1_t *v1; + /* Theoretically, v2 support could be added here. */ + } entries; + + MemoryRegion gnt_frames; + MemoryRegion *gnt_aliases; + uint64_t *gnt_frame_gpas; }; struct XenGnttabState *xen_gnttab_singleton; @@ -51,6 +64,7 @@ struct XenGnttabState *xen_gnttab_singleton; static void xen_gnttab_realize(DeviceState *dev, Error **errp) { XenGnttabState *s = XEN_GNTTAB(dev); + int i; if (xen_mode != XEN_EMULATE) { error_setg(errp, "Xen grant table support is for Xen emulation"); @@ -58,6 +72,38 @@ static void xen_gnttab_realize(DeviceState *dev, Error **errp) } s->nr_frames = 0; s->max_frames = kvm_xen_get_gnttab_max_frames(); + memory_region_init_ram(&s->gnt_frames, OBJECT(dev), "xen:grant_table", + XEN_PAGE_SIZE * s->max_frames, &error_abort); + memory_region_set_enabled(&s->gnt_frames, true); + s->entries.v1 = memory_region_get_ram_ptr(&s->gnt_frames); + memset(s->entries.v1, 0, XEN_PAGE_SIZE * s->max_frames); + + /* Create individual page-sizes aliases for overlays */ + s->gnt_aliases = (void *)g_new0(MemoryRegion, s->max_frames); + s->gnt_frame_gpas = (void *)g_new(uint64_t, s->max_frames); + for (i = 0; i < s->max_frames; i++) { + memory_region_init_alias(&s->gnt_aliases[i], OBJECT(dev), + NULL, &s->gnt_frames, + i * XEN_PAGE_SIZE, XEN_PAGE_SIZE); + s->gnt_frame_gpas[i] = INVALID_GPA; + } + + qemu_mutex_init(&s->gnt_lock); + + xen_gnttab_singleton = s; +} + +static int xen_gnttab_post_load(void *opaque, int version_id) +{ + XenGnttabState *s = XEN_GNTTAB(opaque); + uint32_t i; + + for (i = 0; i < s->nr_frames; i++) { + if (s->gnt_frame_gpas[i] != INVALID_GPA) { + xen_overlay_do_map_page(&s->gnt_aliases[i], s->gnt_frame_gpas[i]); + } + } + return 0; } static bool xen_gnttab_is_needed(void *opaque) @@ -70,8 +116,11 @@ static const VMStateDescription xen_gnttab_vmstate = { .version_id = 1, .minimum_version_id = 1, .needed = xen_gnttab_is_needed, + .post_load = xen_gnttab_post_load, .fields = (VMStateField[]) { VMSTATE_UINT32(nr_frames, XenGnttabState), + VMSTATE_VARRAY_UINT32(gnt_frame_gpas, XenGnttabState, nr_frames, 0, + vmstate_info_uint64, uint64_t), VMSTATE_END_OF_LIST() } }; @@ -106,6 +155,28 @@ type_init(xen_gnttab_register_types) int xen_gnttab_map_page(uint64_t idx, uint64_t gfn) { - return -ENOSYS; + XenGnttabState *s = xen_gnttab_singleton; + uint64_t gpa = gfn << XEN_PAGE_SHIFT; + + if (!s) { + return -ENOTSUP; + } + + if (idx >= s->max_frames) { + return -EINVAL; + } + + QEMU_IOTHREAD_LOCK_GUARD(); + QEMU_LOCK_GUARD(&s->gnt_lock); + + xen_overlay_do_map_page(&s->gnt_aliases[idx], gpa); + + s->gnt_frame_gpas[idx] = gpa; + + if (s->nr_frames <= idx) { + s->nr_frames = idx + 1; + } + + return 0; } diff --git a/hw/i386/kvm/xen_overlay.c b/hw/i386/kvm/xen_overlay.c index 8685d87959..39fda1b72c 100644 --- a/hw/i386/kvm/xen_overlay.c +++ b/hw/i386/kvm/xen_overlay.c @@ -49,7 +49,7 @@ struct XenOverlayState { struct XenOverlayState *xen_overlay_singleton; -static void xen_overlay_do_map_page(MemoryRegion *page, uint64_t gpa) +void xen_overlay_do_map_page(MemoryRegion *page, uint64_t gpa) { /* * Xen allows guests to map the same page as many times as it likes diff --git a/hw/i386/kvm/xen_overlay.h b/hw/i386/kvm/xen_overlay.h index 5c46a0b036..75ecb6b359 100644 --- a/hw/i386/kvm/xen_overlay.h +++ b/hw/i386/kvm/xen_overlay.h @@ -21,4 +21,6 @@ int xen_sync_long_mode(void); int xen_set_long_mode(bool long_mode); bool xen_is_long_mode(void); +void xen_overlay_do_map_page(MemoryRegion *page, uint64_t gpa); + #endif /* QEMU_XEN_OVERLAY_H */ -- cgit 1.4.1