diff options
Diffstat (limited to 'target/i386/kvm/kvm.c')
| -rw-r--r-- | target/i386/kvm/kvm.c | 510 |
1 files changed, 277 insertions, 233 deletions
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index d972eb4705..c676ee8b38 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -129,6 +129,7 @@ static int has_exception_payload; static bool has_msr_mcg_ext_ctl; static struct kvm_cpuid2 *cpuid_cache; +static struct kvm_cpuid2 *hv_cpuid_cache; static struct kvm_msr_list *kvm_feature_msrs; int kvm_has_pit_state2(void) @@ -715,8 +716,7 @@ unsigned long kvm_arch_vcpu_id(CPUState *cs) static bool hyperv_enabled(X86CPU *cpu) { - CPUState *cs = CPU(cpu); - return kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0 && + return kvm_check_extension(kvm_state, KVM_CAP_HYPERV) > 0 && ((cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_NOTIFY) || cpu->hyperv_features || cpu->hyperv_passthrough); } @@ -801,7 +801,8 @@ static bool tsc_is_stable_and_known(CPUX86State *env) static struct { const char *desc; struct { - uint32_t fw; + uint32_t func; + int reg; uint32_t bits; } flags[2]; uint64_t dependencies; @@ -809,25 +810,25 @@ static struct { [HYPERV_FEAT_RELAXED] = { .desc = "relaxed timing (hv-relaxed)", .flags = { - {.fw = FEAT_HYPERV_EAX, + {.func = HV_CPUID_FEATURES, .reg = R_EAX, .bits = HV_HYPERCALL_AVAILABLE}, - {.fw = FEAT_HV_RECOMM_EAX, + {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, .bits = HV_RELAXED_TIMING_RECOMMENDED} } }, [HYPERV_FEAT_VAPIC] = { .desc = "virtual APIC (hv-vapic)", .flags = { - {.fw = FEAT_HYPERV_EAX, + {.func = HV_CPUID_FEATURES, .reg = R_EAX, .bits = HV_HYPERCALL_AVAILABLE | HV_APIC_ACCESS_AVAILABLE}, - {.fw = FEAT_HV_RECOMM_EAX, + {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, .bits = HV_APIC_ACCESS_RECOMMENDED} } }, [HYPERV_FEAT_TIME] = { .desc = "clocksources (hv-time)", .flags = { - {.fw = FEAT_HYPERV_EAX, + {.func = HV_CPUID_FEATURES, .reg = R_EAX, .bits = HV_HYPERCALL_AVAILABLE | HV_TIME_REF_COUNT_AVAILABLE | HV_REFERENCE_TSC_AVAILABLE} } @@ -835,42 +836,42 @@ static struct { [HYPERV_FEAT_CRASH] = { .desc = "crash MSRs (hv-crash)", .flags = { - {.fw = FEAT_HYPERV_EDX, + {.func = HV_CPUID_FEATURES, .reg = R_EDX, .bits = HV_GUEST_CRASH_MSR_AVAILABLE} } }, [HYPERV_FEAT_RESET] = { .desc = "reset MSR (hv-reset)", .flags = { - {.fw = FEAT_HYPERV_EAX, + {.func = HV_CPUID_FEATURES, .reg = R_EAX, .bits = HV_RESET_AVAILABLE} } }, [HYPERV_FEAT_VPINDEX] = { .desc = "VP_INDEX MSR (hv-vpindex)", .flags = { - {.fw = FEAT_HYPERV_EAX, + {.func = HV_CPUID_FEATURES, .reg = R_EAX, .bits = HV_VP_INDEX_AVAILABLE} } }, [HYPERV_FEAT_RUNTIME] = { .desc = "VP_RUNTIME MSR (hv-runtime)", .flags = { - {.fw = FEAT_HYPERV_EAX, + {.func = HV_CPUID_FEATURES, .reg = R_EAX, .bits = HV_VP_RUNTIME_AVAILABLE} } }, [HYPERV_FEAT_SYNIC] = { .desc = "synthetic interrupt controller (hv-synic)", .flags = { - {.fw = FEAT_HYPERV_EAX, + {.func = HV_CPUID_FEATURES, .reg = R_EAX, .bits = HV_SYNIC_AVAILABLE} } }, [HYPERV_FEAT_STIMER] = { .desc = "synthetic timers (hv-stimer)", .flags = { - {.fw = FEAT_HYPERV_EAX, + {.func = HV_CPUID_FEATURES, .reg = R_EAX, .bits = HV_SYNTIMERS_AVAILABLE} }, .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_TIME) @@ -878,23 +879,23 @@ static struct { [HYPERV_FEAT_FREQUENCIES] = { .desc = "frequency MSRs (hv-frequencies)", .flags = { - {.fw = FEAT_HYPERV_EAX, + {.func = HV_CPUID_FEATURES, .reg = R_EAX, .bits = HV_ACCESS_FREQUENCY_MSRS}, - {.fw = FEAT_HYPERV_EDX, + {.func = HV_CPUID_FEATURES, .reg = R_EDX, .bits = HV_FREQUENCY_MSRS_AVAILABLE} } }, [HYPERV_FEAT_REENLIGHTENMENT] = { .desc = "reenlightenment MSRs (hv-reenlightenment)", .flags = { - {.fw = FEAT_HYPERV_EAX, + {.func = HV_CPUID_FEATURES, .reg = R_EAX, .bits = HV_ACCESS_REENLIGHTENMENTS_CONTROL} } }, [HYPERV_FEAT_TLBFLUSH] = { .desc = "paravirtualized TLB flush (hv-tlbflush)", .flags = { - {.fw = FEAT_HV_RECOMM_EAX, + {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, .bits = HV_REMOTE_TLB_FLUSH_RECOMMENDED | HV_EX_PROCESSOR_MASKS_RECOMMENDED} }, @@ -903,7 +904,7 @@ static struct { [HYPERV_FEAT_EVMCS] = { .desc = "enlightened VMCS (hv-evmcs)", .flags = { - {.fw = FEAT_HV_RECOMM_EAX, + {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, .bits = HV_ENLIGHTENED_VMCS_RECOMMENDED} }, .dependencies = BIT(HYPERV_FEAT_VAPIC) @@ -911,7 +912,7 @@ static struct { [HYPERV_FEAT_IPI] = { .desc = "paravirtualized IPI (hv-ipi)", .flags = { - {.fw = FEAT_HV_RECOMM_EAX, + {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, .bits = HV_CLUSTER_IPI_RECOMMENDED | HV_EX_PROCESSOR_MASKS_RECOMMENDED} }, @@ -920,14 +921,15 @@ static struct { [HYPERV_FEAT_STIMER_DIRECT] = { .desc = "direct mode synthetic timers (hv-stimer-direct)", .flags = { - {.fw = FEAT_HYPERV_EDX, + {.func = HV_CPUID_FEATURES, .reg = R_EDX, .bits = HV_STIMER_DIRECT_MODE_AVAILABLE} }, .dependencies = BIT(HYPERV_FEAT_STIMER) }, }; -static struct kvm_cpuid2 *try_get_hv_cpuid(CPUState *cs, int max) +static struct kvm_cpuid2 *try_get_hv_cpuid(CPUState *cs, int max, + bool do_sys_ioctl) { struct kvm_cpuid2 *cpuid; int r, size; @@ -936,7 +938,11 @@ static struct kvm_cpuid2 *try_get_hv_cpuid(CPUState *cs, int max) cpuid = g_malloc0(size); cpuid->nent = max; - r = kvm_vcpu_ioctl(cs, KVM_GET_SUPPORTED_HV_CPUID, cpuid); + if (do_sys_ioctl) { + r = kvm_ioctl(kvm_state, KVM_GET_SUPPORTED_HV_CPUID, cpuid); + } else { + r = kvm_vcpu_ioctl(cs, KVM_GET_SUPPORTED_HV_CPUID, cpuid); + } if (r == 0 && cpuid->nent >= max) { r = -E2BIG; } @@ -960,16 +966,38 @@ static struct kvm_cpuid2 *try_get_hv_cpuid(CPUState *cs, int max) static struct kvm_cpuid2 *get_supported_hv_cpuid(CPUState *cs) { struct kvm_cpuid2 *cpuid; - int max = 7; /* 0x40000000..0x40000005, 0x4000000A */ + /* 0x40000000..0x40000005, 0x4000000A, 0x40000080..0x40000080 leaves */ + int max = 10; + int i; + bool do_sys_ioctl; + + do_sys_ioctl = + kvm_check_extension(kvm_state, KVM_CAP_SYS_HYPERV_CPUID) > 0; /* * When the buffer is too small, KVM_GET_SUPPORTED_HV_CPUID fails with * -E2BIG, however, it doesn't report back the right size. Keep increasing * it and re-trying until we succeed. */ - while ((cpuid = try_get_hv_cpuid(cs, max)) == NULL) { + while ((cpuid = try_get_hv_cpuid(cs, max, do_sys_ioctl)) == NULL) { max++; } + + /* + * KVM_GET_SUPPORTED_HV_CPUID does not set EVMCS CPUID bit before + * KVM_CAP_HYPERV_ENLIGHTENED_VMCS is enabled but we want to get the + * information early, just check for the capability and set the bit + * manually. + */ + if (!do_sys_ioctl && kvm_check_extension(cs->kvm_state, + KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) { + for (i = 0; i < cpuid->nent; i++) { + if (cpuid->entries[i].function == HV_CPUID_ENLIGHTMENT_INFO) { + cpuid->entries[i].eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED; + } + } + } + return cpuid; } @@ -1066,56 +1094,62 @@ static struct kvm_cpuid2 *get_supported_hv_cpuid_legacy(CPUState *cs) return cpuid; } -static int hv_cpuid_get_fw(struct kvm_cpuid2 *cpuid, int fw, uint32_t *r) +static uint32_t hv_cpuid_get_host(CPUState *cs, uint32_t func, int reg) { struct kvm_cpuid_entry2 *entry; - uint32_t func; - int reg; + struct kvm_cpuid2 *cpuid; - switch (fw) { - case FEAT_HYPERV_EAX: - reg = R_EAX; - func = HV_CPUID_FEATURES; - break; - case FEAT_HYPERV_EDX: - reg = R_EDX; - func = HV_CPUID_FEATURES; - break; - case FEAT_HV_RECOMM_EAX: - reg = R_EAX; - func = HV_CPUID_ENLIGHTMENT_INFO; - break; - default: - return -EINVAL; + if (hv_cpuid_cache) { + cpuid = hv_cpuid_cache; + } else { + if (kvm_check_extension(kvm_state, KVM_CAP_HYPERV_CPUID) > 0) { + cpuid = get_supported_hv_cpuid(cs); + } else { + cpuid = get_supported_hv_cpuid_legacy(cs); + } + hv_cpuid_cache = cpuid; + } + + if (!cpuid) { + return 0; } entry = cpuid_find_entry(cpuid, func, 0); if (!entry) { - return -ENOENT; + return 0; } - switch (reg) { - case R_EAX: - *r = entry->eax; - break; - case R_EDX: - *r = entry->edx; - break; - default: - return -EINVAL; + return cpuid_entry_get_reg(entry, reg); +} + +static bool hyperv_feature_supported(CPUState *cs, int feature) +{ + uint32_t func, bits; + int i, reg; + + for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties[feature].flags); i++) { + + func = kvm_hyperv_properties[feature].flags[i].func; + reg = kvm_hyperv_properties[feature].flags[i].reg; + bits = kvm_hyperv_properties[feature].flags[i].bits; + + if (!func) { + continue; + } + + if ((hv_cpuid_get_host(cs, func, reg) & bits) != bits) { + return false; + } } - return 0; + return true; } -static int hv_cpuid_check_and_set(CPUState *cs, struct kvm_cpuid2 *cpuid, - int feature) +static int hv_cpuid_check_and_set(CPUState *cs, int feature, Error **errp) { X86CPU *cpu = X86_CPU(cs); - CPUX86State *env = &cpu->env; - uint32_t r, fw, bits; uint64_t deps; - int i, dep_feat; + int dep_feat; if (!hyperv_feat_enabled(cpu, feature) && !cpu->hyperv_passthrough) { return 0; @@ -1125,35 +1159,22 @@ static int hv_cpuid_check_and_set(CPUState *cs, struct kvm_cpuid2 *cpuid, while (deps) { dep_feat = ctz64(deps); if (!(hyperv_feat_enabled(cpu, dep_feat))) { - fprintf(stderr, - "Hyper-V %s requires Hyper-V %s\n", - kvm_hyperv_properties[feature].desc, - kvm_hyperv_properties[dep_feat].desc); - return 1; + error_setg(errp, "Hyper-V %s requires Hyper-V %s", + kvm_hyperv_properties[feature].desc, + kvm_hyperv_properties[dep_feat].desc); + return 1; } deps &= ~(1ull << dep_feat); } - for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties[feature].flags); i++) { - fw = kvm_hyperv_properties[feature].flags[i].fw; - bits = kvm_hyperv_properties[feature].flags[i].bits; - - if (!fw) { - continue; - } - - if (hv_cpuid_get_fw(cpuid, fw, &r) || (r & bits) != bits) { - if (hyperv_feat_enabled(cpu, feature)) { - fprintf(stderr, - "Hyper-V %s is not supported by kernel\n", - kvm_hyperv_properties[feature].desc); - return 1; - } else { - return 0; - } + if (!hyperv_feature_supported(cs, feature)) { + if (hyperv_feat_enabled(cpu, feature)) { + error_setg(errp, "Hyper-V %s is not supported by kernel", + kvm_hyperv_properties[feature].desc); + return 1; + } else { + return 0; } - - env->features[fw] |= bits; } if (cpu->hyperv_passthrough) { @@ -1163,157 +1184,156 @@ static int hv_cpuid_check_and_set(CPUState *cs, struct kvm_cpuid2 *cpuid, return 0; } -/* - * Fill in Hyper-V CPUIDs. Returns the number of entries filled in cpuid_ent in - * case of success, errno < 0 in case of failure and 0 when no Hyper-V - * extentions are enabled. - */ -static int hyperv_handle_properties(CPUState *cs, - struct kvm_cpuid_entry2 *cpuid_ent) +static uint32_t hv_build_cpuid_leaf(CPUState *cs, uint32_t func, int reg) { X86CPU *cpu = X86_CPU(cs); - CPUX86State *env = &cpu->env; - struct kvm_cpuid2 *cpuid; - struct kvm_cpuid_entry2 *c; - uint32_t cpuid_i = 0; - int r; + uint32_t r = 0; + int i, j; - if (!hyperv_enabled(cpu)) - return 0; - - if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) || - cpu->hyperv_passthrough) { - uint16_t evmcs_version; - - r = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, 0, - (uintptr_t)&evmcs_version); - - if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) && r) { - fprintf(stderr, "Hyper-V %s is not supported by kernel\n", - kvm_hyperv_properties[HYPERV_FEAT_EVMCS].desc); - return -ENOSYS; - } - - if (!r) { - env->features[FEAT_HV_RECOMM_EAX] |= - HV_ENLIGHTENED_VMCS_RECOMMENDED; - env->features[FEAT_HV_NESTED_EAX] = evmcs_version; - } - } - - if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_CPUID) > 0) { - cpuid = get_supported_hv_cpuid(cs); - } else { - cpuid = get_supported_hv_cpuid_legacy(cs); - } - - if (cpu->hyperv_passthrough) { - memcpy(cpuid_ent, &cpuid->entries[0], - cpuid->nent * sizeof(cpuid->entries[0])); - - c = cpuid_find_entry(cpuid, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, 0); - if (c) { - cpu->hyperv_vendor_id[0] = c->ebx; - cpu->hyperv_vendor_id[1] = c->ecx; - cpu->hyperv_vendor_id[2] = c->edx; - } - - c = cpuid_find_entry(cpuid, HV_CPUID_INTERFACE, 0); - if (c) { - cpu->hyperv_interface_id[0] = c->eax; - cpu->hyperv_interface_id[1] = c->ebx; - cpu->hyperv_interface_id[2] = c->ecx; - cpu->hyperv_interface_id[3] = c->edx; + for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties); i++) { + if (!hyperv_feat_enabled(cpu, i)) { + continue; } - c = cpuid_find_entry(cpuid, HV_CPUID_VERSION, 0); - if (c) { - cpu->hyperv_version_id[0] = c->eax; - cpu->hyperv_version_id[1] = c->ebx; - cpu->hyperv_version_id[2] = c->ecx; - cpu->hyperv_version_id[3] = c->edx; - } + for (j = 0; j < ARRAY_SIZE(kvm_hyperv_properties[i].flags); j++) { + if (kvm_hyperv_properties[i].flags[j].func != func) { + continue; + } + if (kvm_hyperv_properties[i].flags[j].reg != reg) { + continue; + } - c = cpuid_find_entry(cpuid, HV_CPUID_FEATURES, 0); - if (c) { - env->features[FEAT_HYPERV_EAX] = c->eax; - env->features[FEAT_HYPERV_EBX] = c->ebx; - env->features[FEAT_HYPERV_EDX] = c->edx; + r |= kvm_hyperv_properties[i].flags[j].bits; } + } - c = cpuid_find_entry(cpuid, HV_CPUID_IMPLEMENT_LIMITS, 0); - if (c) { - cpu->hv_max_vps = c->eax; - cpu->hyperv_limits[0] = c->ebx; - cpu->hyperv_limits[1] = c->ecx; - cpu->hyperv_limits[2] = c->edx; - } + return r; +} - c = cpuid_find_entry(cpuid, HV_CPUID_ENLIGHTMENT_INFO, 0); - if (c) { - env->features[FEAT_HV_RECOMM_EAX] = c->eax; +/* + * Expand Hyper-V CPU features. In partucular, check that all the requested + * features are supported by the host and the sanity of the configuration + * (that all the required dependencies are included). Also, this takes care + * of 'hv_passthrough' mode and fills the environment with all supported + * Hyper-V features. + */ +static void hyperv_expand_features(CPUState *cs, Error **errp) +{ + X86CPU *cpu = X86_CPU(cs); - /* hv-spinlocks may have been overriden */ - if (cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_NOTIFY) { - c->ebx = cpu->hyperv_spinlock_attempts; - } - } - c = cpuid_find_entry(cpuid, HV_CPUID_NESTED_FEATURES, 0); - if (c) { - env->features[FEAT_HV_NESTED_EAX] = c->eax; - } - } + if (!hyperv_enabled(cpu)) + return; - if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_ON) { - env->features[FEAT_HV_RECOMM_EAX] |= HV_NO_NONARCH_CORESHARING; - } else if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO) { - c = cpuid_find_entry(cpuid, HV_CPUID_ENLIGHTMENT_INFO, 0); - if (c) { - env->features[FEAT_HV_RECOMM_EAX] |= - c->eax & HV_NO_NONARCH_CORESHARING; - } + if (cpu->hyperv_passthrough) { + cpu->hyperv_vendor_id[0] = + hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EBX); + cpu->hyperv_vendor_id[1] = + hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_ECX); + cpu->hyperv_vendor_id[2] = + hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EDX); + cpu->hyperv_vendor = g_realloc(cpu->hyperv_vendor, + sizeof(cpu->hyperv_vendor_id) + 1); + memcpy(cpu->hyperv_vendor, cpu->hyperv_vendor_id, + sizeof(cpu->hyperv_vendor_id)); + cpu->hyperv_vendor[sizeof(cpu->hyperv_vendor_id)] = 0; + + cpu->hyperv_interface_id[0] = + hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EAX); + cpu->hyperv_interface_id[1] = + hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EBX); + cpu->hyperv_interface_id[2] = + hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_ECX); + cpu->hyperv_interface_id[3] = + hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EDX); + + cpu->hyperv_version_id[0] = + hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EAX); + cpu->hyperv_version_id[1] = + hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX); + cpu->hyperv_version_id[2] = + hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_ECX); + cpu->hyperv_version_id[3] = + hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX); + + cpu->hv_max_vps = hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, + R_EAX); + cpu->hyperv_limits[0] = + hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EBX); + cpu->hyperv_limits[1] = + hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_ECX); + cpu->hyperv_limits[2] = + hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EDX); + + cpu->hyperv_spinlock_attempts = + hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EBX); } /* Features */ - r = hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RELAXED); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_VAPIC); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_TIME); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_CRASH); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RESET); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_VPINDEX); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RUNTIME); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_SYNIC); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_STIMER); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_FREQUENCIES); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_REENLIGHTENMENT); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_TLBFLUSH); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_EVMCS); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_IPI); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_STIMER_DIRECT); + if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_RELAXED, errp)) { + return; + } + if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_VAPIC, errp)) { + return; + } + if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_TIME, errp)) { + return; + } + if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_CRASH, errp)) { + return; + } + if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_RESET, errp)) { + return; + } + if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_VPINDEX, errp)) { + return; + } + if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_RUNTIME, errp)) { + return; + } + if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_SYNIC, errp)) { + return; + } + if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_STIMER, errp)) { + return; + } + if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_FREQUENCIES, errp)) { + return; + } + if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_REENLIGHTENMENT, errp)) { + return; + } + if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_TLBFLUSH, errp)) { + return; + } + if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_EVMCS, errp)) { + return; + } + if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_IPI, errp)) { + return; + } + if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_STIMER_DIRECT, errp)) { + return; + } /* Additional dependencies not covered by kvm_hyperv_properties[] */ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) && !cpu->hyperv_synic_kvm_only && !hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)) { - fprintf(stderr, "Hyper-V %s requires Hyper-V %s\n", - kvm_hyperv_properties[HYPERV_FEAT_SYNIC].desc, - kvm_hyperv_properties[HYPERV_FEAT_VPINDEX].desc); - r |= 1; - } - - /* Not exposed by KVM but needed to make CPU hotplug in Windows work */ - env->features[FEAT_HYPERV_EDX] |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE; - - if (r) { - r = -ENOSYS; - goto free; + error_setg(errp, "Hyper-V %s requires Hyper-V %s", + kvm_hyperv_properties[HYPERV_FEAT_SYNIC].desc, + kvm_hyperv_properties[HYPERV_FEAT_VPINDEX].desc); } +} - if (cpu->hyperv_passthrough) { - /* We already copied all feature words from KVM as is */ - r = cpuid->nent; - goto free; - } +/* + * Fill in Hyper-V CPUIDs. Returns the number of entries filled in cpuid_ent. + */ +static int hyperv_fill_cpuids(CPUState *cs, + struct kvm_cpuid_entry2 *cpuid_ent) +{ + X86CPU *cpu = X86_CPU(cs); + struct kvm_cpuid_entry2 *c; + uint32_t cpuid_i = 0; c = &cpuid_ent[cpuid_i++]; c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS; @@ -1339,15 +1359,25 @@ static int hyperv_handle_properties(CPUState *cs, c = &cpuid_ent[cpuid_i++]; c->function = HV_CPUID_FEATURES; - c->eax = env->features[FEAT_HYPERV_EAX]; - c->ebx = env->features[FEAT_HYPERV_EBX]; - c->edx = env->features[FEAT_HYPERV_EDX]; + c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EAX); + c->ebx = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EBX); + c->edx = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EDX); + + /* Not exposed by KVM but needed to make CPU hotplug in Windows work */ + c->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE; c = &cpuid_ent[cpuid_i++]; c->function = HV_CPUID_ENLIGHTMENT_INFO; - c->eax = env->features[FEAT_HV_RECOMM_EAX]; + c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX); c->ebx = cpu->hyperv_spinlock_attempts; + if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_ON) { + c->eax |= HV_NO_NONARCH_CORESHARING; + } else if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO) { + c->eax |= hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX) & + HV_NO_NONARCH_CORESHARING; + } + c = &cpuid_ent[cpuid_i++]; c->function = HV_CPUID_IMPLEMENT_LIMITS; c->eax = cpu->hv_max_vps; @@ -1367,14 +1397,10 @@ static int hyperv_handle_properties(CPUState *cs, c = &cpuid_ent[cpuid_i++]; c->function = HV_CPUID_NESTED_FEATURES; - c->eax = env->features[FEAT_HV_NESTED_EAX]; + c->eax = cpu->hyperv_nested[0]; } - r = cpuid_i; - -free: - g_free(cpuid); - return r; + return cpuid_i; } static Error *hv_passthrough_mig_blocker; @@ -1458,6 +1484,21 @@ static int hyperv_init_vcpu(X86CPU *cpu) } } + if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) { + uint16_t evmcs_version; + + ret = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, 0, + (uintptr_t)&evmcs_version); + + if (ret < 0) { + fprintf(stderr, "Hyper-V %s is not supported by kernel\n", + kvm_hyperv_properties[HYPERV_FEAT_EVMCS].desc); + return ret; + } + + cpu->hyperv_nested[0] = evmcs_version; + } + return 0; } @@ -1516,11 +1557,19 @@ int kvm_arch_init_vcpu(CPUState *cs) env->apic_bus_freq = KVM_APIC_BUS_FREQUENCY; /* Paravirtualization CPUIDs */ - r = hyperv_handle_properties(cs, cpuid_data.entries); - if (r < 0) { - return r; - } else if (r > 0) { - cpuid_i = r; + hyperv_expand_features(cs, &local_err); + if (local_err) { + error_report_err(local_err); + return -ENOSYS; + } + + if (hyperv_enabled(cpu)) { + r = hyperv_init_vcpu(cpu); + if (r) { + return r; + } + + cpuid_i = hyperv_fill_cpuids(cs, cpuid_data.entries); kvm_base = KVM_CPUID_SIGNATURE_NEXT; has_msr_hv_hypercall = true; } @@ -1869,11 +1918,6 @@ int kvm_arch_init_vcpu(CPUState *cs) kvm_init_msrs(cpu); - r = hyperv_init_vcpu(cpu); - if (r) { - goto fail; - } - return 0; fail: |