diff options
| author | Joao Martins <joao.m.martins@oracle.com> | 2018-06-29 10:54:50 -0400 |
|---|---|---|
| committer | David Woodhouse <dwmw@amazon.co.uk> | 2023-03-01 08:22:49 +0000 |
| commit | c345104cd1d17e1e801b99a216fa3654cdcbce35 (patch) | |
| tree | cd754acc9b9545e864970e05615b0fc86b49d10b /target/i386/kvm/xen-emu.c | |
| parent | d70bd6a485d54ff60a6dd708c51b5c0ba679056f (diff) | |
| download | focaccia-qemu-c345104cd1d17e1e801b99a216fa3654cdcbce35.tar.gz focaccia-qemu-c345104cd1d17e1e801b99a216fa3654cdcbce35.zip | |
i386/xen: handle VCPUOP_register_vcpu_info
Handle the hypercall to set a per vcpu info, and also wire up the default vcpu_info in the shared_info page for the first 32 vCPUs. To avoid deadlock within KVM a vCPU thread must set its *own* vcpu_info rather than it being set from the context in which the hypercall is invoked. Add the vcpu_info (and default) GPA to the vmstate_x86_cpu for migration, and restore it in kvm_arch_put_registers() appropriately. Signed-off-by: Joao Martins <joao.m.martins@oracle.com> Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> Reviewed-by: Paul Durrant <paul@xen.org>
Diffstat (limited to 'target/i386/kvm/xen-emu.c')
| -rw-r--r-- | target/i386/kvm/xen-emu.c | 153 |
1 files changed, 150 insertions, 3 deletions
diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c index e5ae0a9a38..30b4789da3 100644 --- a/target/i386/kvm/xen-emu.c +++ b/target/i386/kvm/xen-emu.c @@ -119,6 +119,8 @@ int kvm_xen_init(KVMState *s, uint32_t hypercall_msr) int kvm_xen_init_vcpu(CPUState *cs) { + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; int err; /* @@ -142,6 +144,9 @@ int kvm_xen_init_vcpu(CPUState *cs) } } + env->xen_vcpu_info_gpa = INVALID_GPA; + env->xen_vcpu_info_default_gpa = INVALID_GPA; + return 0; } @@ -187,10 +192,58 @@ static bool kvm_xen_hcall_xen_version(struct kvm_xen_exit *exit, X86CPU *cpu, return true; } +static int kvm_xen_set_vcpu_attr(CPUState *cs, uint16_t type, uint64_t gpa) +{ + struct kvm_xen_vcpu_attr xhsi; + + xhsi.type = type; + xhsi.u.gpa = gpa; + + trace_kvm_xen_set_vcpu_attr(cs->cpu_index, type, gpa); + + return kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_SET_ATTR, &xhsi); +} + +static void do_set_vcpu_info_default_gpa(CPUState *cs, run_on_cpu_data data) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + env->xen_vcpu_info_default_gpa = data.host_ulong; + + /* Changing the default does nothing if a vcpu_info was explicitly set. */ + if (env->xen_vcpu_info_gpa == INVALID_GPA) { + kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, + env->xen_vcpu_info_default_gpa); + } +} + +static void do_set_vcpu_info_gpa(CPUState *cs, run_on_cpu_data data) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + env->xen_vcpu_info_gpa = data.host_ulong; + + kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, + env->xen_vcpu_info_gpa); +} + +static void do_vcpu_soft_reset(CPUState *cs, run_on_cpu_data data) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + env->xen_vcpu_info_gpa = INVALID_GPA; + env->xen_vcpu_info_default_gpa = INVALID_GPA; + + kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, INVALID_GPA); +} + static int xen_set_shared_info(uint64_t gfn) { uint64_t gpa = gfn << TARGET_PAGE_BITS; - int err; + int i, err; QEMU_IOTHREAD_LOCK_GUARD(); @@ -207,6 +260,15 @@ static int xen_set_shared_info(uint64_t gfn) trace_kvm_xen_set_shared_info(gfn); + for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++) { + CPUState *cpu = qemu_get_cpu(i); + if (cpu) { + async_run_on_cpu(cpu, do_set_vcpu_info_default_gpa, + RUN_ON_CPU_HOST_ULONG(gpa)); + } + gpa += sizeof(vcpu_info_t); + } + return err; } @@ -364,15 +426,43 @@ static bool kvm_xen_hcall_hvm_op(struct kvm_xen_exit *exit, X86CPU *cpu, } } +static int vcpuop_register_vcpu_info(CPUState *cs, CPUState *target, + uint64_t arg) +{ + struct vcpu_register_vcpu_info rvi; + uint64_t gpa; + + /* No need for 32/64 compat handling */ + qemu_build_assert(sizeof(rvi) == 16); + qemu_build_assert(sizeof(struct vcpu_info) == 64); + + if (!target) { + return -ENOENT; + } + + if (kvm_copy_from_gva(cs, arg, &rvi, sizeof(rvi))) { + return -EFAULT; + } + + if (rvi.offset > TARGET_PAGE_SIZE - sizeof(struct vcpu_info)) { + return -EINVAL; + } + + gpa = ((rvi.mfn << TARGET_PAGE_BITS) + rvi.offset); + async_run_on_cpu(target, do_set_vcpu_info_gpa, RUN_ON_CPU_HOST_ULONG(gpa)); + return 0; +} + static bool kvm_xen_hcall_vcpu_op(struct kvm_xen_exit *exit, X86CPU *cpu, int cmd, int vcpu_id, uint64_t arg) { + CPUState *dest = qemu_get_cpu(vcpu_id); + CPUState *cs = CPU(cpu); int err; switch (cmd) { case VCPUOP_register_vcpu_info: - /* no vcpu info placement for now */ - err = -ENOSYS; + err = vcpuop_register_vcpu_info(cs, dest, arg); break; default: @@ -385,12 +475,17 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_xen_exit *exit, X86CPU *cpu, int kvm_xen_soft_reset(void) { + CPUState *cpu; int err; assert(qemu_mutex_iothread_locked()); trace_kvm_xen_soft_reset(); + CPU_FOREACH(cpu) { + async_run_on_cpu(cpu, do_vcpu_soft_reset, RUN_ON_CPU_NULL); + } + err = xen_overlay_map_shinfo_page(INVALID_GFN); if (err) { return err; @@ -539,3 +634,55 @@ int kvm_xen_handle_exit(X86CPU *cpu, struct kvm_xen_exit *exit) exit->u.hcall.result); return 0; } + +int kvm_put_xen_state(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + uint64_t gpa; + int ret; + + gpa = env->xen_vcpu_info_gpa; + if (gpa == INVALID_GPA) { + gpa = env->xen_vcpu_info_default_gpa; + } + + if (gpa != INVALID_GPA) { + ret = kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, gpa); + if (ret < 0) { + return ret; + } + } + + return 0; +} + +int kvm_get_xen_state(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + uint64_t gpa; + + /* + * The kernel does not mark vcpu_info as dirty when it delivers interrupts + * to it. It's up to userspace to *assume* that any page shared thus is + * always considered dirty. The shared_info page is different since it's + * an overlay and migrated separately anyway. + */ + gpa = env->xen_vcpu_info_gpa; + if (gpa == INVALID_GPA) { + gpa = env->xen_vcpu_info_default_gpa; + } + if (gpa != INVALID_GPA) { + MemoryRegionSection mrs = memory_region_find(get_system_memory(), + gpa, + sizeof(struct vcpu_info)); + if (mrs.mr && + !int128_lt(mrs.size, int128_make64(sizeof(struct vcpu_info)))) { + memory_region_set_dirty(mrs.mr, mrs.offset_within_region, + sizeof(struct vcpu_info)); + } + } + + return 0; +} |