diff options
Diffstat (limited to 'target/i386')
| -rw-r--r-- | target/i386/arch_dump.c | 1 | ||||
| -rw-r--r-- | target/i386/arch_memory_mapping.c | 1 | ||||
| -rw-r--r-- | target/i386/cpu.c | 30 | ||||
| -rw-r--r-- | target/i386/cpu.h | 12 | ||||
| -rw-r--r-- | target/i386/hax-mem.c | 6 | ||||
| -rw-r--r-- | target/i386/hyperv-proto.h | 260 | ||||
| -rw-r--r-- | target/i386/hyperv.c | 6 | ||||
| -rw-r--r-- | target/i386/kvm.c | 195 | ||||
| -rw-r--r-- | target/i386/machine.c | 15 | ||||
| -rw-r--r-- | target/i386/monitor.c | 8 | ||||
| -rw-r--r-- | target/i386/ops_sse.h | 47 | ||||
| -rw-r--r-- | target/i386/svm_helper.c | 1 | ||||
| -rw-r--r-- | target/i386/translate.c | 3 |
13 files changed, 433 insertions, 152 deletions
diff --git a/target/i386/arch_dump.c b/target/i386/arch_dump.c index e682904052..35b55fc200 100644 --- a/target/i386/arch_dump.c +++ b/target/i386/arch_dump.c @@ -13,7 +13,6 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/cpu-all.h" #include "sysemu/dump.h" #include "elf.h" #include "sysemu/memory_mapping.h" diff --git a/target/i386/arch_memory_mapping.c b/target/i386/arch_memory_mapping.c index 647cff2829..271cb5e41b 100644 --- a/target/i386/arch_memory_mapping.c +++ b/target/i386/arch_memory_mapping.c @@ -13,7 +13,6 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/cpu-all.h" #include "sysemu/memory_mapping.h" /* PAE Paging or IA-32e Paging */ diff --git a/target/i386/cpu.c b/target/i386/cpu.c index d0000e40ae..0aa28fc775 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -1763,12 +1763,12 @@ static void report_unavailable_features(FeatureWord w, uint32_t mask) if ((1UL << i) & mask) { const char *reg = get_register_name_32(f->cpuid_reg); assert(reg); - fprintf(stderr, "warning: %s doesn't support requested feature: " - "CPUID.%02XH:%s%s%s [bit %d]\n", - kvm_enabled() ? "host" : "TCG", - f->cpuid_eax, reg, - f->feat_names[i] ? "." : "", - f->feat_names[i] ? f->feat_names[i] : "", i); + warn_report("%s doesn't support requested feature: " + "CPUID.%02XH:%s%s%s [bit %d]", + kvm_enabled() ? "host" : "TCG", + f->cpuid_eax, reg, + f->feat_names[i] ? "." : "", + f->feat_names[i] ? f->feat_names[i] : "", i); } } } @@ -3931,12 +3931,12 @@ static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs) CPUX86State *env = &cpu->env; GuestPanicInformation *panic_info = NULL; - if (env->features[FEAT_HYPERV_EDX] & HV_X64_GUEST_CRASH_MSR_AVAILABLE) { + if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) { panic_info = g_malloc0(sizeof(GuestPanicInformation)); panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V; - assert(HV_X64_MSR_CRASH_PARAMS >= 5); + assert(HV_CRASH_PARAMS >= 5); panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0]; panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1]; panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2]; @@ -4142,6 +4142,20 @@ static Property x86_cpu_properties[] = { false), DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true), DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true), + + /* + * From "Requirements for Implementing the Microsoft + * Hypervisor Interface": + * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs + * + * "Starting with Windows Server 2012 and Windows 8, if + * CPUID.40000005.EAX contains a value of -1, Windows assumes that + * the hypervisor imposes no specific limit to the number of VPs. + * In this case, Windows Server 2012 guest VMs may use more than + * 64 VPs, up to the maximum supported number of processors applicable + * to the specific Windows version being used." + */ + DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1), DEFINE_PROP_END_OF_LIST() }; diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 4035a11613..b086b1528b 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -22,7 +22,7 @@ #include "qemu-common.h" #include "cpu-qom.h" -#include "standard-headers/asm-x86/hyperv.h" +#include "hyperv-proto.h" #ifdef TARGET_X86_64 #define TARGET_LONG_BITS 64 @@ -1095,15 +1095,15 @@ typedef struct CPUX86State { uint64_t msr_hv_guest_os_id; uint64_t msr_hv_vapic; uint64_t msr_hv_tsc; - uint64_t msr_hv_crash_params[HV_X64_MSR_CRASH_PARAMS]; + uint64_t msr_hv_crash_params[HV_CRASH_PARAMS]; uint64_t msr_hv_runtime; uint64_t msr_hv_synic_control; uint64_t msr_hv_synic_version; uint64_t msr_hv_synic_evt_page; uint64_t msr_hv_synic_msg_page; - uint64_t msr_hv_synic_sint[HV_SYNIC_SINT_COUNT]; - uint64_t msr_hv_stimer_config[HV_SYNIC_STIMER_COUNT]; - uint64_t msr_hv_stimer_count[HV_SYNIC_STIMER_COUNT]; + uint64_t msr_hv_synic_sint[HV_SINT_COUNT]; + uint64_t msr_hv_stimer_config[HV_STIMER_COUNT]; + uint64_t msr_hv_stimer_count[HV_STIMER_COUNT]; /* exception/interrupt handling */ int error_code; @@ -1282,6 +1282,8 @@ struct X86CPU { int32_t socket_id; int32_t core_id; int32_t thread_id; + + int32_t hv_max_vps; }; static inline X86CPU *x86_env_get_cpu(CPUX86State *env) diff --git a/target/i386/hax-mem.c b/target/i386/hax-mem.c index af090343f3..27a0d214f2 100644 --- a/target/i386/hax-mem.c +++ b/target/i386/hax-mem.c @@ -12,6 +12,7 @@ #include "cpu.h" #include "exec/address-spaces.h" #include "exec/exec-all.h" +#include "qemu/error-report.h" #include "target/i386/hax-i386.h" #include "qemu/queue.h" @@ -178,9 +179,8 @@ static void hax_process_section(MemoryRegionSection *section, uint8_t flags) if (!memory_region_is_ram(mr)) { if (memory_region_is_romd(mr)) { /* HAXM kernel module does not support ROMD yet */ - fprintf(stderr, "%s: Warning: Ignoring ROMD region 0x%016" PRIx64 - "->0x%016" PRIx64 "\n", __func__, start_pa, - start_pa + size); + warn_report("Ignoring ROMD region 0x%016" PRIx64 "->0x%016" PRIx64, + start_pa, start_pa + size); } return; } diff --git a/target/i386/hyperv-proto.h b/target/i386/hyperv-proto.h new file mode 100644 index 0000000000..cb4d7f2b7a --- /dev/null +++ b/target/i386/hyperv-proto.h @@ -0,0 +1,260 @@ +/* + * Definitions for Hyper-V guest/hypervisor interaction + * + * Copyright (C) 2017 Parallels International GmbH + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef TARGET_I386_HYPERV_PROTO_H +#define TARGET_I386_HYPERV_PROTO_H + +#include "qemu/bitmap.h" + +#define HV_CPUID_VENDOR_AND_MAX_FUNCTIONS 0x40000000 +#define HV_CPUID_INTERFACE 0x40000001 +#define HV_CPUID_VERSION 0x40000002 +#define HV_CPUID_FEATURES 0x40000003 +#define HV_CPUID_ENLIGHTMENT_INFO 0x40000004 +#define HV_CPUID_IMPLEMENT_LIMITS 0x40000005 +#define HV_CPUID_MIN 0x40000005 +#define HV_CPUID_MAX 0x4000ffff +#define HV_HYPERVISOR_PRESENT_BIT 0x80000000 + +/* + * HV_CPUID_FEATURES.EAX bits + */ +#define HV_VP_RUNTIME_AVAILABLE (1u << 0) +#define HV_TIME_REF_COUNT_AVAILABLE (1u << 1) +#define HV_SYNIC_AVAILABLE (1u << 2) +#define HV_SYNTIMERS_AVAILABLE (1u << 3) +#define HV_APIC_ACCESS_AVAILABLE (1u << 4) +#define HV_HYPERCALL_AVAILABLE (1u << 5) +#define HV_VP_INDEX_AVAILABLE (1u << 6) +#define HV_RESET_AVAILABLE (1u << 7) +#define HV_REFERENCE_TSC_AVAILABLE (1u << 9) +#define HV_ACCESS_FREQUENCY_MSRS (1u << 11) + + +/* + * HV_CPUID_FEATURES.EDX bits + */ +#define HV_MWAIT_AVAILABLE (1u << 0) +#define HV_GUEST_DEBUGGING_AVAILABLE (1u << 1) +#define HV_PERF_MONITOR_AVAILABLE (1u << 2) +#define HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE (1u << 3) +#define HV_HYPERCALL_PARAMS_XMM_AVAILABLE (1u << 4) +#define HV_GUEST_IDLE_STATE_AVAILABLE (1u << 5) +#define HV_FREQUENCY_MSRS_AVAILABLE (1u << 8) +#define HV_GUEST_CRASH_MSR_AVAILABLE (1u << 10) + +/* + * HV_CPUID_ENLIGHTMENT_INFO.EAX bits + */ +#define HV_AS_SWITCH_RECOMMENDED (1u << 0) +#define HV_LOCAL_TLB_FLUSH_RECOMMENDED (1u << 1) +#define HV_REMOTE_TLB_FLUSH_RECOMMENDED (1u << 2) +#define HV_APIC_ACCESS_RECOMMENDED (1u << 3) +#define HV_SYSTEM_RESET_RECOMMENDED (1u << 4) +#define HV_RELAXED_TIMING_RECOMMENDED (1u << 5) + +/* + * Basic virtualized MSRs + */ +#define HV_X64_MSR_GUEST_OS_ID 0x40000000 +#define HV_X64_MSR_HYPERCALL 0x40000001 +#define HV_X64_MSR_VP_INDEX 0x40000002 +#define HV_X64_MSR_RESET 0x40000003 +#define HV_X64_MSR_VP_RUNTIME 0x40000010 +#define HV_X64_MSR_TIME_REF_COUNT 0x40000020 +#define HV_X64_MSR_REFERENCE_TSC 0x40000021 +#define HV_X64_MSR_TSC_FREQUENCY 0x40000022 +#define HV_X64_MSR_APIC_FREQUENCY 0x40000023 + +/* + * Virtual APIC MSRs + */ +#define HV_X64_MSR_EOI 0x40000070 +#define HV_X64_MSR_ICR 0x40000071 +#define HV_X64_MSR_TPR 0x40000072 +#define HV_X64_MSR_APIC_ASSIST_PAGE 0x40000073 + +/* + * Synthetic interrupt controller MSRs + */ +#define HV_X64_MSR_SCONTROL 0x40000080 +#define HV_X64_MSR_SVERSION 0x40000081 +#define HV_X64_MSR_SIEFP 0x40000082 +#define HV_X64_MSR_SIMP 0x40000083 +#define HV_X64_MSR_EOM 0x40000084 +#define HV_X64_MSR_SINT0 0x40000090 +#define HV_X64_MSR_SINT1 0x40000091 +#define HV_X64_MSR_SINT2 0x40000092 +#define HV_X64_MSR_SINT3 0x40000093 +#define HV_X64_MSR_SINT4 0x40000094 +#define HV_X64_MSR_SINT5 0x40000095 +#define HV_X64_MSR_SINT6 0x40000096 +#define HV_X64_MSR_SINT7 0x40000097 +#define HV_X64_MSR_SINT8 0x40000098 +#define HV_X64_MSR_SINT9 0x40000099 +#define HV_X64_MSR_SINT10 0x4000009A +#define HV_X64_MSR_SINT11 0x4000009B +#define HV_X64_MSR_SINT12 0x4000009C +#define HV_X64_MSR_SINT13 0x4000009D +#define HV_X64_MSR_SINT14 0x4000009E +#define HV_X64_MSR_SINT15 0x4000009F + +/* + * Synthetic timer MSRs + */ +#define HV_X64_MSR_STIMER0_CONFIG 0x400000B0 +#define HV_X64_MSR_STIMER0_COUNT 0x400000B1 +#define HV_X64_MSR_STIMER1_CONFIG 0x400000B2 +#define HV_X64_MSR_STIMER1_COUNT 0x400000B3 +#define HV_X64_MSR_STIMER2_CONFIG 0x400000B4 +#define HV_X64_MSR_STIMER2_COUNT 0x400000B5 +#define HV_X64_MSR_STIMER3_CONFIG 0x400000B6 +#define HV_X64_MSR_STIMER3_COUNT 0x400000B7 + +/* + * Guest crash notification MSRs + */ +#define HV_X64_MSR_CRASH_P0 0x40000100 +#define HV_X64_MSR_CRASH_P1 0x40000101 +#define HV_X64_MSR_CRASH_P2 0x40000102 +#define HV_X64_MSR_CRASH_P3 0x40000103 +#define HV_X64_MSR_CRASH_P4 0x40000104 +#define HV_CRASH_PARAMS (HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0 + 1) +#define HV_X64_MSR_CRASH_CTL 0x40000105 +#define HV_CRASH_CTL_NOTIFY (1ull << 63) + +/* + * Hypercall status code + */ +#define HV_STATUS_SUCCESS 0 +#define HV_STATUS_INVALID_HYPERCALL_CODE 2 +#define HV_STATUS_INVALID_HYPERCALL_INPUT 3 +#define HV_STATUS_INVALID_ALIGNMENT 4 +#define HV_STATUS_INVALID_PARAMETER 5 +#define HV_STATUS_INSUFFICIENT_MEMORY 11 +#define HV_STATUS_INVALID_CONNECTION_ID 18 +#define HV_STATUS_INSUFFICIENT_BUFFERS 19 + +/* + * Hypercall numbers + */ +#define HV_POST_MESSAGE 0x005c +#define HV_SIGNAL_EVENT 0x005d +#define HV_HYPERCALL_FAST (1u << 16) + +/* + * Hypercall MSR bits + */ +#define HV_HYPERCALL_ENABLE (1u << 0) + +/* + * Synthetic interrupt controller definitions + */ +#define HV_SYNIC_VERSION 1 +#define HV_SINT_COUNT 16 +#define HV_SYNIC_ENABLE (1u << 0) +#define HV_SIMP_ENABLE (1u << 0) +#define HV_SIEFP_ENABLE (1u << 0) +#define HV_SINT_MASKED (1u << 16) +#define HV_SINT_AUTO_EOI (1u << 17) +#define HV_SINT_VECTOR_MASK 0xff + +#define HV_STIMER_COUNT 4 + +/* + * Message size + */ +#define HV_MESSAGE_PAYLOAD_SIZE 240 + +/* + * Message types + */ +#define HV_MESSAGE_NONE 0x00000000 +#define HV_MESSAGE_VMBUS 0x00000001 +#define HV_MESSAGE_UNMAPPED_GPA 0x80000000 +#define HV_MESSAGE_GPA_INTERCEPT 0x80000001 +#define HV_MESSAGE_TIMER_EXPIRED 0x80000010 +#define HV_MESSAGE_INVALID_VP_REGISTER_VALUE 0x80000020 +#define HV_MESSAGE_UNRECOVERABLE_EXCEPTION 0x80000021 +#define HV_MESSAGE_UNSUPPORTED_FEATURE 0x80000022 +#define HV_MESSAGE_EVENTLOG_BUFFERCOMPLETE 0x80000040 +#define HV_MESSAGE_X64_IOPORT_INTERCEPT 0x80010000 +#define HV_MESSAGE_X64_MSR_INTERCEPT 0x80010001 +#define HV_MESSAGE_X64_CPUID_INTERCEPT 0x80010002 +#define HV_MESSAGE_X64_EXCEPTION_INTERCEPT 0x80010003 +#define HV_MESSAGE_X64_APIC_EOI 0x80010004 +#define HV_MESSAGE_X64_LEGACY_FP_ERROR 0x80010005 + +/* + * Message flags + */ +#define HV_MESSAGE_FLAG_PENDING 0x1 + +/* + * Event flags number per SINT + */ +#define HV_EVENT_FLAGS_COUNT (256 * 8) + +/* + * Connection id valid bits + */ +#define HV_CONNECTION_ID_MASK 0x00ffffff + +/* + * Input structure for POST_MESSAGE hypercall + */ +struct hyperv_post_message_input { + uint32_t connection_id; + uint32_t _reserved; + uint32_t message_type; + uint32_t payload_size; + uint8_t payload[HV_MESSAGE_PAYLOAD_SIZE]; +}; + +/* + * Input structure for SIGNAL_EVENT hypercall + */ +struct hyperv_signal_event_input { + uint32_t connection_id; + uint16_t flag_number; + uint16_t _reserved_zero; +}; + +/* + * SynIC message structures + */ +struct hyperv_message_header { + uint32_t message_type; + uint8_t payload_size; + uint8_t message_flags; /* HV_MESSAGE_FLAG_XX */ + uint8_t _reserved[2]; + uint64_t sender; +}; + +struct hyperv_message { + struct hyperv_message_header header; + uint8_t payload[HV_MESSAGE_PAYLOAD_SIZE]; +}; + +struct hyperv_message_page { + struct hyperv_message slot[HV_SINT_COUNT]; +}; + +/* + * SynIC event flags structures + */ +struct hyperv_event_flags { + DECLARE_BITMAP(flags, HV_EVENT_FLAGS_COUNT); +}; + +struct hyperv_event_flags_page { + struct hyperv_event_flags slot[HV_SINT_COUNT]; +}; + +#endif diff --git a/target/i386/hyperv.c b/target/i386/hyperv.c index 8545574568..a050c9d2d1 100644 --- a/target/i386/hyperv.c +++ b/target/i386/hyperv.c @@ -14,7 +14,7 @@ #include "qemu/osdep.h" #include "qemu/main-loop.h" #include "hyperv.h" -#include "standard-headers/asm-x86/hyperv.h" +#include "hyperv-proto.h" int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit) { @@ -50,8 +50,8 @@ int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit) code = exit->u.hcall.input & 0xffff; switch (code) { - case HVCALL_POST_MESSAGE: - case HVCALL_SIGNAL_EVENT: + case HV_POST_MESSAGE: + case HV_SIGNAL_EVENT: default: exit->u.hcall.result = HV_STATUS_INVALID_HYPERCALL_CODE; return 0; diff --git a/target/i386/kvm.c b/target/i386/kvm.c index 6db7783edc..b1e32e95d3 100644 --- a/target/i386/kvm.c +++ b/target/i386/kvm.c @@ -27,6 +27,7 @@ #include "sysemu/kvm_int.h" #include "kvm_i386.h" #include "hyperv.h" +#include "hyperv-proto.h" #include "exec/gdbstub.h" #include "qemu/host-utils.h" @@ -40,7 +41,6 @@ #include "hw/i386/x86-iommu.h" #include "exec/ioport.h" -#include "standard-headers/asm-x86/hyperv.h" #include "hw/pci/pci.h" #include "hw/pci/msi.h" #include "hw/pci/msix.h" @@ -89,6 +89,7 @@ static bool has_msr_hv_vpindex; static bool has_msr_hv_runtime; static bool has_msr_hv_synic; static bool has_msr_hv_stimer; +static bool has_msr_hv_frequencies; static bool has_msr_xss; static bool has_msr_architectural_pmu; @@ -611,6 +612,15 @@ static int kvm_arch_set_tsc_khz(CPUState *cs) return 0; } +static bool tsc_is_stable_and_known(CPUX86State *env) +{ + if (!env->tsc_khz) { + return false; + } + return (env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) + || env->user_tsc_khz; +} + static int hyperv_handle_properties(CPUState *cs) { X86CPU *cpu = X86_CPU(cs); @@ -622,29 +632,34 @@ static int hyperv_handle_properties(CPUState *cs) } if (cpu->hyperv_relaxed_timing) { - env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE; + env->features[FEAT_HYPERV_EAX] |= HV_HYPERCALL_AVAILABLE; } if (cpu->hyperv_vapic) { - env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE; - env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_APIC_ACCESS_AVAILABLE; + env->features[FEAT_HYPERV_EAX] |= HV_HYPERCALL_AVAILABLE; + env->features[FEAT_HYPERV_EAX] |= HV_APIC_ACCESS_AVAILABLE; } if (cpu->hyperv_time) { - env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE; - env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_TIME_REF_COUNT_AVAILABLE; - env->features[FEAT_HYPERV_EAX] |= 0x200; + env->features[FEAT_HYPERV_EAX] |= HV_HYPERCALL_AVAILABLE; + env->features[FEAT_HYPERV_EAX] |= HV_TIME_REF_COUNT_AVAILABLE; + env->features[FEAT_HYPERV_EAX] |= HV_REFERENCE_TSC_AVAILABLE; + + if (has_msr_hv_frequencies && tsc_is_stable_and_known(env)) { + env->features[FEAT_HYPERV_EAX] |= HV_ACCESS_FREQUENCY_MSRS; + env->features[FEAT_HYPERV_EDX] |= HV_FREQUENCY_MSRS_AVAILABLE; + } } if (cpu->hyperv_crash && has_msr_hv_crash) { - env->features[FEAT_HYPERV_EDX] |= HV_X64_GUEST_CRASH_MSR_AVAILABLE; + env->features[FEAT_HYPERV_EDX] |= HV_GUEST_CRASH_MSR_AVAILABLE; } - env->features[FEAT_HYPERV_EDX] |= HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE; + env->features[FEAT_HYPERV_EDX] |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE; if (cpu->hyperv_reset && has_msr_hv_reset) { - env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_RESET_AVAILABLE; + env->features[FEAT_HYPERV_EAX] |= HV_RESET_AVAILABLE; } if (cpu->hyperv_vpindex && has_msr_hv_vpindex) { - env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_VP_INDEX_AVAILABLE; + env->features[FEAT_HYPERV_EAX] |= HV_VP_INDEX_AVAILABLE; } if (cpu->hyperv_runtime && has_msr_hv_runtime) { - env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_VP_RUNTIME_AVAILABLE; + env->features[FEAT_HYPERV_EAX] |= HV_VP_RUNTIME_AVAILABLE; } if (cpu->hyperv_synic) { int sint; @@ -655,10 +670,10 @@ static int hyperv_handle_properties(CPUState *cs) return -ENOSYS; } - env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_SYNIC_AVAILABLE; - env->msr_hv_synic_version = HV_SYNIC_VERSION_1; + env->features[FEAT_HYPERV_EAX] |= HV_SYNIC_AVAILABLE; + env->msr_hv_synic_version = HV_SYNIC_VERSION; for (sint = 0; sint < ARRAY_SIZE(env->msr_hv_synic_sint); sint++) { - env->msr_hv_synic_sint[sint] = HV_SYNIC_SINT_MASKED; + env->msr_hv_synic_sint[sint] = HV_SINT_MASKED; } } if (cpu->hyperv_stimer) { @@ -666,7 +681,7 @@ static int hyperv_handle_properties(CPUState *cs) fprintf(stderr, "Hyper-V timers aren't supported by kernel\n"); return -ENOSYS; } - env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_SYNTIMER_AVAILABLE; + env->features[FEAT_HYPERV_EAX] |= HV_SYNTIMERS_AVAILABLE; } return 0; } @@ -695,10 +710,29 @@ int kvm_arch_init_vcpu(CPUState *cs) cpuid_i = 0; + r = kvm_arch_set_tsc_khz(cs); + if (r < 0) { + goto fail; + } + + /* vcpu's TSC frequency is either specified by user, or following + * the value used by KVM if the former is not present. In the + * latter case, we query it from KVM and record in env->tsc_khz, + * so that vcpu's TSC frequency can be migrated later via this field. + */ + if (!env->tsc_khz) { + r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ? + kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : + -ENOTSUP; + if (r > 0) { + env->tsc_khz = r; + } + } + /* Paravirtualization CPUIDs */ if (hyperv_enabled(cpu)) { c = &cpuid_data.entries[cpuid_i++]; - c->function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS; + c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS; if (!cpu->hyperv_vendor_id) { memcpy(signature, "Microsoft Hv", 12); } else { @@ -711,13 +745,13 @@ int kvm_arch_init_vcpu(CPUState *cs) memset(signature, 0, 12); memcpy(signature, cpu->hyperv_vendor_id, len); } - c->eax = HYPERV_CPUID_MIN; + c->eax = HV_CPUID_MIN; c->ebx = signature[0]; c->ecx = signature[1]; c->edx = signature[2]; c = &cpuid_data.entries[cpuid_i++]; - c->function = HYPERV_CPUID_INTERFACE; + c->function = HV_CPUID_INTERFACE; memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12); c->eax = signature[0]; c->ebx = 0; @@ -725,12 +759,12 @@ int kvm_arch_init_vcpu(CPUState *cs) c->edx = 0; c = &cpuid_data.entries[cpuid_i++]; - c->function = HYPERV_CPUID_VERSION; + c->function = HV_CPUID_VERSION; c->eax = 0x00001bbc; c->ebx = 0x00060001; c = &cpuid_data.entries[cpuid_i++]; - c->function = HYPERV_CPUID_FEATURES; + c->function = HV_CPUID_FEATURES; r = hyperv_handle_properties(cs); if (r) { return r; @@ -740,18 +774,19 @@ int kvm_arch_init_vcpu(CPUState *cs) c->edx = env->features[FEAT_HYPERV_EDX]; c = &cpuid_data.entries[cpuid_i++]; - c->function = HYPERV_CPUID_ENLIGHTMENT_INFO; + c->function = HV_CPUID_ENLIGHTMENT_INFO; if (cpu->hyperv_relaxed_timing) { - c->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED; + c->eax |= HV_RELAXED_TIMING_RECOMMENDED; } if (cpu->hyperv_vapic) { - c->eax |= HV_X64_APIC_ACCESS_RECOMMENDED; + c->eax |= HV_APIC_ACCESS_RECOMMENDED; } c->ebx = cpu->hyperv_spinlock_attempts; c = &cpuid_data.entries[cpuid_i++]; - c->function = HYPERV_CPUID_IMPLEMENT_LIMITS; - c->eax = 0x40; + c->function = HV_CPUID_IMPLEMENT_LIMITS; + + c->eax = cpu->hv_max_vps; c->ebx = 0x40; kvm_base = KVM_CPUID_SIGNATURE_NEXT; @@ -961,34 +996,13 @@ int kvm_arch_init_vcpu(CPUState *cs) } } - r = kvm_arch_set_tsc_khz(cs); - if (r < 0) { - goto fail; - } - - /* vcpu's TSC frequency is either specified by user, or following - * the value used by KVM if the former is not present. In the - * latter case, we query it from KVM and record in env->tsc_khz, - * so that vcpu's TSC frequency can be migrated later via this field. - */ - if (!env->tsc_khz) { - r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ? - kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : - -ENOTSUP; - if (r > 0) { - env->tsc_khz = r; - } - } - if (cpu->vmware_cpuid_freq /* Guests depend on 0x40000000 to detect this feature, so only expose * it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */ && cpu->expose_kvm && kvm_base == KVM_CPUID_SIGNATURE /* TSC clock must be stable and known for this feature. */ - && ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) - || env->user_tsc_khz != 0) - && env->tsc_khz != 0) { + && tsc_is_stable_and_known(env)) { c = &cpuid_data.entries[cpuid_i++]; c->function = KVM_CPUID_SIGNATURE | 0x10; @@ -1081,65 +1095,55 @@ static int kvm_get_supported_msrs(KVMState *s) int i; for (i = 0; i < kvm_msr_list->nmsrs; i++) { - if (kvm_msr_list->indices[i] == MSR_STAR) { + switch (kvm_msr_list->indices[i]) { + case MSR_STAR: has_msr_star = true; - continue; - } - if (kvm_msr_list->indices[i] == MSR_VM_HSAVE_PA) { + break; + case MSR_VM_HSAVE_PA: has_msr_hsave_pa = true; - continue; - } - if (kvm_msr_list->indices[i] == MSR_TSC_AUX) { + break; + case MSR_TSC_AUX: has_msr_tsc_aux = true; - continue; - } - if (kvm_msr_list->indices[i] == MSR_TSC_ADJUST) { + break; + case MSR_TSC_ADJUST: has_msr_tsc_adjust = true; - continue; - } - if (kvm_msr_list->indices[i] == MSR_IA32_TSCDEADLINE) { + break; + case MSR_IA32_TSCDEADLINE: has_msr_tsc_deadline = true; - continue; - } - if (kvm_msr_list->indices[i] == MSR_IA32_SMBASE) { + break; + case MSR_IA32_SMBASE: has_msr_smbase = true; - continue; - } - if (kvm_msr_list->indices[i] == MSR_IA32_MISC_ENABLE) { + break; + case MSR_IA32_MISC_ENABLE: has_msr_misc_enable = true; - continue; - } - if (kvm_msr_list->indices[i] == MSR_IA32_BNDCFGS) { + break; + case MSR_IA32_BNDCFGS: has_msr_bndcfgs = true; - continue; - } - if (kvm_msr_list->indices[i] == MSR_IA32_XSS) { + break; + case MSR_IA32_XSS: has_msr_xss = true; - continue; - } - if (kvm_msr_list->indices[i] == HV_X64_MSR_CRASH_CTL) { + break;; + case HV_X64_MSR_CRASH_CTL: has_msr_hv_crash = true; - continue; - } - if (kvm_msr_list->indices[i] == HV_X64_MSR_RESET) { + break; + case HV_X64_MSR_RESET: has_msr_hv_reset = true; - continue; - } - if (kvm_msr_list->indices[i] == HV_X64_MSR_VP_INDEX) { + break; + case HV_X64_MSR_VP_INDEX: has_msr_hv_vpindex = true; - continue; - } - if (kvm_msr_list->indices[i] == HV_X64_MSR_VP_RUNTIME) { + break; + case HV_X64_MSR_VP_RUNTIME: has_msr_hv_runtime = true; - continue; - } - if (kvm_msr_list->indices[i] == HV_X64_MSR_SCONTROL) { + break; + case HV_X64_MSR_SCONTROL: has_msr_hv_synic = true; - continue; - } - if (kvm_msr_list->indices[i] == HV_X64_MSR_STIMER0_CONFIG) { + break; + case HV_X64_MSR_STIMER0_CONFIG: has_msr_hv_stimer = true; - continue; + break; + case HV_X64_MSR_TSC_FREQUENCY: + has_msr_hv_frequencies = true; + break; } } } @@ -1690,12 +1694,11 @@ static int kvm_put_msrs(X86CPU *cpu, int level) if (has_msr_hv_crash) { int j; - for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++) + for (j = 0; j < HV_CRASH_PARAMS; j++) kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, env->msr_hv_crash_params[j]); - kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL, - HV_X64_MSR_CRASH_CTL_NOTIFY); + kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_NOTIFY); } if (has_msr_hv_runtime) { kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime); @@ -2059,7 +2062,7 @@ static int kvm_get_msrs(X86CPU *cpu) if (has_msr_hv_crash) { int j; - for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++) { + for (j = 0; j < HV_CRASH_PARAMS; j++) { kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0); } } diff --git a/target/i386/machine.c b/target/i386/machine.c index eab33725a3..29cc58eda9 100644 --- a/target/i386/machine.c +++ b/target/i386/machine.c @@ -587,7 +587,7 @@ static bool hyperv_crash_enable_needed(void *opaque) CPUX86State *env = &cpu->env; int i; - for (i = 0; i < HV_X64_MSR_CRASH_PARAMS; i++) { + for (i = 0; i < HV_CRASH_PARAMS; i++) { if (env->msr_hv_crash_params[i]) { return true; } @@ -601,8 +601,7 @@ static const VMStateDescription vmstate_msr_hyperv_crash = { .minimum_version_id = 1, .needed = hyperv_crash_enable_needed, .fields = (VMStateField[]) { - VMSTATE_UINT64_ARRAY(env.msr_hv_crash_params, - X86CPU, HV_X64_MSR_CRASH_PARAMS), + VMSTATE_UINT64_ARRAY(env.msr_hv_crash_params, X86CPU, HV_CRASH_PARAMS), VMSTATE_END_OF_LIST() } }; @@ -660,8 +659,7 @@ static const VMStateDescription vmstate_msr_hyperv_synic = { VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU), VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU), VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU), - VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU, - HV_SYNIC_SINT_COUNT), + VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU, HV_SINT_COUNT), VMSTATE_END_OF_LIST() } }; @@ -686,10 +684,9 @@ static const VMStateDescription vmstate_msr_hyperv_stimer = { .minimum_version_id = 1, .needed = hyperv_stimer_enable_needed, .fields = (VMStateField[]) { - VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_config, - X86CPU, HV_SYNIC_STIMER_COUNT), - VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_count, - X86CPU, HV_SYNIC_STIMER_COUNT), + VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_config, X86CPU, + HV_STIMER_COUNT), + VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_count, X86CPU, HV_STIMER_COUNT), VMSTATE_END_OF_LIST() } }; diff --git a/target/i386/monitor.c b/target/i386/monitor.c index fe7d57b6aa..75e155ffb1 100644 --- a/target/i386/monitor.c +++ b/target/i386/monitor.c @@ -447,7 +447,7 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env) start = -1; for (l0 = 0; l0 < 512; l0++) { cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8); - pml4e = le64_to_cpu(pml5e); + pml5e = le64_to_cpu(pml5e); end = l0 << 48; if (!(pml5e & PG_PRESENT_MASK)) { prot = 0; @@ -480,7 +480,7 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env) if (pdpe & PG_PSE_MASK) { prot = pdpe & (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK); - prot &= pml4e; + prot &= pml5e & pml4e; mem_print(mon, &start, &last_prot, end, prot); continue; } @@ -499,7 +499,7 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env) if (pde & PG_PSE_MASK) { prot = pde & (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK); - prot &= pml4e & pdpe; + prot &= pml5e & pml4e & pdpe; mem_print(mon, &start, &last_prot, end, prot); continue; } @@ -513,7 +513,7 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env) if (pte & PG_PRESENT_MASK) { prot = pte & (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK); - prot &= pml4e & pdpe & pde; + prot &= pml5e & pml4e & pdpe & pde; } else { prot = 0; } diff --git a/target/i386/ops_sse.h b/target/i386/ops_sse.h index 16509d0a74..ed05989768 100644 --- a/target/i386/ops_sse.h +++ b/target/i386/ops_sse.h @@ -1617,18 +1617,18 @@ void glue(helper_ptest, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) #define SSE_HELPER_F(name, elem, num, F) \ void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \ { \ - d->elem(0) = F(0); \ - d->elem(1) = F(1); \ if (num > 2) { \ - d->elem(2) = F(2); \ - d->elem(3) = F(3); \ if (num > 4) { \ - d->elem(4) = F(4); \ - d->elem(5) = F(5); \ - d->elem(6) = F(6); \ d->elem(7) = F(7); \ + d->elem(6) = F(6); \ + d->elem(5) = F(5); \ + d->elem(4) = F(4); \ } \ + d->elem(3) = F(3); \ + d->elem(2) = F(2); \ } \ + d->elem(1) = F(1); \ + d->elem(0) = F(0); \ } SSE_HELPER_F(helper_pmovsxbw, W, 8, (int8_t) s->B) @@ -1655,14 +1655,17 @@ SSE_HELPER_Q(helper_pcmpeqq, FCMPEQQ) void glue(helper_packusdw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { - d->W(0) = satuw((int32_t) d->L(0)); - d->W(1) = satuw((int32_t) d->L(1)); - d->W(2) = satuw((int32_t) d->L(2)); - d->W(3) = satuw((int32_t) d->L(3)); - d->W(4) = satuw((int32_t) s->L(0)); - d->W(5) = satuw((int32_t) s->L(1)); - d->W(6) = satuw((int32_t) s->L(2)); - d->W(7) = satuw((int32_t) s->L(3)); + Reg r; + + r.W(0) = satuw((int32_t) d->L(0)); + r.W(1) = satuw((int32_t) d->L(1)); + r.W(2) = satuw((int32_t) d->L(2)); + r.W(3) = satuw((int32_t) d->L(3)); + r.W(4) = satuw((int32_t) s->L(0)); + r.W(5) = satuw((int32_t) s->L(1)); + r.W(6) = satuw((int32_t) s->L(2)); + r.W(7) = satuw((int32_t) s->L(3)); + *d = r; } #define FMINSB(d, s) MIN((int8_t)d, (int8_t)s) @@ -1707,10 +1710,10 @@ void glue(helper_phminposuw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) idx = 7; } - d->Q(1) = 0; - d->L(1) = 0; - d->W(1) = idx; d->W(0) = s->W(idx); + d->W(1) = idx; + d->L(1) = 0; + d->Q(1) = 0; } void glue(helper_roundps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, @@ -2037,10 +2040,14 @@ static inline unsigned pcmpxstrx(CPUX86State *env, Reg *d, Reg *s, } break; case 3: - for (j = valids; j >= 0; j--) { + if (validd == -1) { + res = (2 << upper) - 1; + break; + } + for (j = valids - validd; j >= 0; j--) { res <<= 1; v = 1; - for (i = MIN(valids - j, validd); i >= 0; i--) { + for (i = validd; i >= 0; i--) { v &= (pcmp_val(s, ctrl, i + j) == pcmp_val(d, ctrl, i)); } res |= v; diff --git a/target/i386/svm_helper.c b/target/i386/svm_helper.c index 59e8b5091c..f479239875 100644 --- a/target/i386/svm_helper.c +++ b/target/i386/svm_helper.c @@ -19,7 +19,6 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/cpu-all.h" #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" diff --git a/target/i386/translate.c b/target/i386/translate.c index de0c989763..a8986f4c1a 100644 --- a/target/i386/translate.c +++ b/target/i386/translate.c @@ -4076,10 +4076,11 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask)) goto illegal_op; + s->rip_offset = 1; + if (sse_fn_eppi == SSE_SPECIAL) { ot = mo_64_32(s->dflag); rm = (modrm & 7) | REX_B(s); - s->rip_offset = 1; if (mod != 3) gen_lea_modrm(env, s, modrm); reg = ((modrm >> 3) & 7) | rex_r; |