diff options
Diffstat (limited to 'target/i386/kvm.c')
| -rw-r--r-- | target/i386/kvm.c | 135 |
1 files changed, 64 insertions, 71 deletions
diff --git a/target/i386/kvm.c b/target/i386/kvm.c index 6f69e2fcfd..ad4b159b28 100644 --- a/target/i386/kvm.c +++ b/target/i386/kvm.c @@ -91,9 +91,11 @@ static bool has_msr_hv_synic; static bool has_msr_hv_stimer; static bool has_msr_hv_frequencies; static bool has_msr_xss; +static bool has_msr_spec_ctrl; -static bool has_msr_architectural_pmu; -static uint32_t num_architectural_pmu_counters; +static uint32_t has_architectural_pmu_version; +static uint32_t num_architectural_pmu_gp_counters; +static uint32_t num_architectural_pmu_fixed_counters; static int has_xsave; static int has_xcrs; @@ -872,19 +874,28 @@ int kvm_arch_init_vcpu(CPUState *cs) } if (limit >= 0x0a) { - uint32_t ver; + uint32_t eax, edx; - cpu_x86_cpuid(env, 0x0a, 0, &ver, &unused, &unused, &unused); - if ((ver & 0xff) > 0) { - has_msr_architectural_pmu = true; - num_architectural_pmu_counters = (ver & 0xff00) >> 8; + cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx); + + has_architectural_pmu_version = eax & 0xff; + if (has_architectural_pmu_version > 0) { + num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8; /* Shouldn't be more than 32, since that's the number of bits * available in EBX to tell us _which_ counters are available. * Play it safe. */ - if (num_architectural_pmu_counters > MAX_GP_COUNTERS) { - num_architectural_pmu_counters = MAX_GP_COUNTERS; + if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) { + num_architectural_pmu_gp_counters = MAX_GP_COUNTERS; + } + + if (has_architectural_pmu_version > 1) { + num_architectural_pmu_fixed_counters = edx & 0x1f; + + if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) { + num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS; + } } } } @@ -1143,6 +1154,9 @@ static int kvm_get_supported_msrs(KVMState *s) case HV_X64_MSR_TSC_FREQUENCY: has_msr_hv_frequencies = true; break; + case MSR_IA32_SPEC_CTRL: + has_msr_spec_ctrl = true; + break; } } } @@ -1625,6 +1639,9 @@ static int kvm_put_msrs(X86CPU *cpu, int level) if (has_msr_xss) { kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss); } + if (has_msr_spec_ctrl) { + kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl); + } #ifdef TARGET_X86_64 if (lm_capable_kernel) { kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar); @@ -1633,6 +1650,7 @@ static int kvm_put_msrs(X86CPU *cpu, int level) kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar); } #endif + /* * The following MSRs have side effects on the guest or are too heavy * for normal writeback. Limit them to reset or full state updates. @@ -1650,32 +1668,36 @@ static int kvm_put_msrs(X86CPU *cpu, int level) if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) { kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr); } - if (has_msr_architectural_pmu) { - /* Stop the counter. */ - kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); - kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); + if (has_architectural_pmu_version > 0) { + if (has_architectural_pmu_version > 1) { + /* Stop the counter. */ + kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); + kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); + } /* Set the counter values. */ - for (i = 0; i < MAX_FIXED_COUNTERS; i++) { + for (i = 0; i < num_architectural_pmu_fixed_counters; i++) { kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, env->msr_fixed_counters[i]); } - for (i = 0; i < num_architectural_pmu_counters; i++) { + for (i = 0; i < num_architectural_pmu_gp_counters; i++) { kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, env->msr_gp_counters[i]); kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, env->msr_gp_evtsel[i]); } - kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, - env->msr_global_status); - kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, - env->msr_global_ovf_ctrl); - - /* Now start the PMU. */ - kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, - env->msr_fixed_ctr_ctrl); - kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, - env->msr_global_ctrl); + if (has_architectural_pmu_version > 1) { + kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, + env->msr_global_status); + kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, + env->msr_global_ovf_ctrl); + + /* Now start the PMU. */ + kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, + env->msr_fixed_ctr_ctrl); + kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, + env->msr_global_ctrl); + } } /* * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add, @@ -1877,7 +1899,6 @@ static int kvm_get_sregs(X86CPU *cpu) { CPUX86State *env = &cpu->env; struct kvm_sregs sregs; - uint32_t hflags; int bit, i, ret; ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs); @@ -1919,44 +1940,7 @@ static int kvm_get_sregs(X86CPU *cpu) env->efer = sregs.efer; /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */ - -#define HFLAG_COPY_MASK \ - ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ - HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ - HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ - HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) - - hflags = env->hflags & HFLAG_COPY_MASK; - hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; - hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); - hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & - (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); - hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); - - if (env->cr[4] & CR4_OSFXSR_MASK) { - hflags |= HF_OSFXSR_MASK; - } - - if (env->efer & MSR_EFER_LMA) { - hflags |= HF_LMA_MASK; - } - - if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { - hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; - } else { - hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> - (DESC_B_SHIFT - HF_CS32_SHIFT); - hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> - (DESC_B_SHIFT - HF_SS32_SHIFT); - if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || - !(hflags & HF_CS32_MASK)) { - hflags |= HF_ADDSEG_MASK; - } else { - hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | - env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; - } - } - env->hflags = hflags; + x86_update_hflags(env); return 0; } @@ -2004,6 +1988,9 @@ static int kvm_get_msrs(X86CPU *cpu) if (has_msr_xss) { kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0); } + if (has_msr_spec_ctrl) { + kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0); + } if (!env->tsc_valid) { @@ -2030,15 +2017,17 @@ static int kvm_get_msrs(X86CPU *cpu) if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) { kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0); } - if (has_msr_architectural_pmu) { - kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); - kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); - kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0); - kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0); - for (i = 0; i < MAX_FIXED_COUNTERS; i++) { + if (has_architectural_pmu_version > 0) { + if (has_architectural_pmu_version > 1) { + kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); + kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); + kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0); + kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0); + } + for (i = 0; i < num_architectural_pmu_fixed_counters; i++) { kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0); } - for (i = 0; i < num_architectural_pmu_counters; i++) { + for (i = 0; i < num_architectural_pmu_gp_counters; i++) { kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0); kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0); } @@ -2349,6 +2338,9 @@ static int kvm_get_msrs(X86CPU *cpu) env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data; } break; + case MSR_IA32_SPEC_CTRL: + env->spec_ctrl = msrs[i].data; + break; } } @@ -3492,6 +3484,7 @@ int kvm_arch_release_virq_post(int virq) if (entry->virq == virq) { trace_kvm_x86_remove_msi_route(virq); QLIST_REMOVE(entry, list); + g_free(entry); break; } } |