diff options
Diffstat (limited to 'target')
| -rw-r--r-- | target/i386/cpu.c | 4 | ||||
| -rw-r--r-- | target/i386/kvm/kvm.c | 225 | ||||
| -rw-r--r-- | target/i386/kvm/kvm_i386.h | 2 | ||||
| -rw-r--r-- | target/i386/ops_sse.h | 128 | ||||
| -rw-r--r-- | target/i386/tcg/decode-new.c.inc | 234 | ||||
| -rw-r--r-- | target/i386/tcg/decode-new.h | 36 | ||||
| -rw-r--r-- | target/i386/tcg/emit.c.inc | 62 | ||||
| -rw-r--r-- | target/i386/tcg/ops_sse_header.h.inc | 14 | ||||
| -rw-r--r-- | target/riscv/kvm/kvm-cpu.c | 2 |
9 files changed, 420 insertions, 287 deletions
diff --git a/target/i386/cpu.c b/target/i386/cpu.c index bdca901dfa..fc8484cb5e 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -714,7 +714,7 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \ CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \ CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_RDSEED | \ - CPUID_7_0_EBX_KERNEL_FEATURES) + CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_KERNEL_FEATURES) /* missing: CPUID_7_0_EBX_HLE CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM */ @@ -7377,7 +7377,7 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) return; } - if (env->features[FEAT_1_EDX] & CPUID_PSE36) { + if (env->features[FEAT_1_EDX] & (CPUID_PSE36 | CPUID_PAE)) { cpu->phys_bits = 36; } else { cpu->phys_bits = 32; diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index e7c054cc16..770e81d56e 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -91,6 +91,15 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = { KVM_CAP_INFO(SET_TSS_ADDR), KVM_CAP_INFO(EXT_CPUID), KVM_CAP_INFO(MP_STATE), + KVM_CAP_INFO(SIGNAL_MSI), + KVM_CAP_INFO(IRQ_ROUTING), + KVM_CAP_INFO(DEBUGREGS), + KVM_CAP_INFO(XSAVE), + KVM_CAP_INFO(VCPU_EVENTS), + KVM_CAP_INFO(X86_ROBUST_SINGLESTEP), + KVM_CAP_INFO(MCE), + KVM_CAP_INFO(ADJUST_CLOCK), + KVM_CAP_INFO(SET_IDENTITY_MAP_ADDR), KVM_CAP_LAST_INFO }; @@ -134,10 +143,8 @@ static uint32_t has_architectural_pmu_version; static uint32_t num_architectural_pmu_gp_counters; static uint32_t num_architectural_pmu_fixed_counters; -static int has_xsave; static int has_xsave2; static int has_xcrs; -static int has_pit_state2; static int has_sregs2; static int has_exception_payload; static int has_triple_fault_event; @@ -154,11 +161,6 @@ static KVMMSRHandlers msr_handlers[KVM_MSR_FILTER_MAX_RANGES]; static RateLimit bus_lock_ratelimit_ctrl; static int kvm_get_one_msr(X86CPU *cpu, int index, uint64_t *value); -bool kvm_has_pit_state2(void) -{ - return !!has_pit_state2; -} - bool kvm_has_smm(void) { return kvm_vm_check_extension(kvm_state, KVM_CAP_X86_SMM); @@ -171,11 +173,6 @@ bool kvm_has_adjust_clock_stable(void) return (ret & KVM_CLOCK_TSC_STABLE); } -bool kvm_has_adjust_clock(void) -{ - return kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK); -} - bool kvm_has_exception_payload(void) { return has_exception_payload; @@ -577,14 +574,8 @@ uint64_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index) static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap, int *max_banks) { - int r; - - r = kvm_check_extension(s, KVM_CAP_MCE); - if (r > 0) { - *max_banks = r; - return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap); - } - return -ENOSYS; + *max_banks = kvm_check_extension(s, KVM_CAP_MCE); + return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap); } static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code) @@ -687,15 +678,6 @@ void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr) emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_IGNORE, false); } -static void kvm_reset_exception(CPUX86State *env) -{ - env->exception_nr = -1; - env->exception_pending = 0; - env->exception_injected = 0; - env->exception_has_payload = false; - env->exception_payload = 0; -} - static void kvm_queue_exception(CPUX86State *env, int32_t exception_nr, uint8_t exception_has_payload, @@ -728,38 +710,6 @@ static void kvm_queue_exception(CPUX86State *env, } } -static int kvm_inject_mce_oldstyle(X86CPU *cpu) -{ - CPUX86State *env = &cpu->env; - - if (!kvm_has_vcpu_events() && env->exception_nr == EXCP12_MCHK) { - unsigned int bank, bank_num = env->mcg_cap & 0xff; - struct kvm_x86_mce mce; - - kvm_reset_exception(env); - - /* - * There must be at least one bank in use if an MCE is pending. - * Find it and use its values for the event injection. - */ - for (bank = 0; bank < bank_num; bank++) { - if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) { - break; - } - } - assert(bank < bank_num); - - mce.bank = bank; - mce.status = env->mce_banks[bank * 4 + 1]; - mce.mcg_status = env->mcg_status; - mce.addr = env->mce_banks[bank * 4 + 2]; - mce.misc = env->mce_banks[bank * 4 + 3]; - - return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce); - } - return 0; -} - static void cpu_update_state(void *opaque, bool running, RunState state) { CPUX86State *env = opaque; @@ -1711,10 +1661,8 @@ static void kvm_init_xsave(CPUX86State *env) { if (has_xsave2) { env->xsave_buf_len = QEMU_ALIGN_UP(has_xsave2, 4096); - } else if (has_xsave) { - env->xsave_buf_len = sizeof(struct kvm_xsave); } else { - return; + env->xsave_buf_len = sizeof(struct kvm_xsave); } env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len); @@ -2154,8 +2102,7 @@ int kvm_arch_init_vcpu(CPUState *cs) if (((env->cpuid_version >> 8)&0xF) >= 6 && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == - (CPUID_MCE | CPUID_MCA) - && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) { + (CPUID_MCE | CPUID_MCA)) { uint64_t mcg_cap, unsupported_caps; int banks; int ret; @@ -2589,14 +2536,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s) return ret; } - if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) { - error_report("kvm: KVM_CAP_IRQ_ROUTING not supported by KVM"); - return -ENOTSUP; - } - - has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE); has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS); - has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2); has_sregs2 = kvm_check_extension(s, KVM_CAP_SREGS2) > 0; hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX); @@ -2654,20 +2594,13 @@ int kvm_arch_init(MachineState *ms, KVMState *s) * In order to use vm86 mode, an EPT identity map and a TSS are needed. * Since these must be part of guest physical memory, we need to allocate * them, both by setting their start addresses in the kernel and by - * creating a corresponding e820 entry. We need 4 pages before the BIOS. - * - * Older KVM versions may not support setting the identity map base. In - * that case we need to stick with the default, i.e. a 256K maximum BIOS - * size. + * creating a corresponding e820 entry. We need 4 pages before the BIOS, + * so this value allows up to 16M BIOSes. */ - if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) { - /* Allows up to 16M BIOSes. */ - identity_base = 0xfeffc000; - - ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base); - if (ret < 0) { - return ret; - } + identity_base = 0xfeffc000; + ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base); + if (ret < 0) { + return ret; } /* Set TSS base one page after EPT identity map. */ @@ -2879,40 +2812,11 @@ static int kvm_getput_regs(X86CPU *cpu, int set) return ret; } -static int kvm_put_fpu(X86CPU *cpu) -{ - CPUX86State *env = &cpu->env; - struct kvm_fpu fpu; - int i; - - memset(&fpu, 0, sizeof fpu); - fpu.fsw = env->fpus & ~(7 << 11); - fpu.fsw |= (env->fpstt & 7) << 11; - fpu.fcw = env->fpuc; - fpu.last_opcode = env->fpop; - fpu.last_ip = env->fpip; - fpu.last_dp = env->fpdp; - for (i = 0; i < 8; ++i) { - fpu.ftwx |= (!env->fptags[i]) << i; - } - memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs); - for (i = 0; i < CPU_NB_REGS; i++) { - stq_p(&fpu.xmm[i][0], env->xmm_regs[i].ZMM_Q(0)); - stq_p(&fpu.xmm[i][8], env->xmm_regs[i].ZMM_Q(1)); - } - fpu.mxcsr = env->mxcsr; - - return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu); -} - static int kvm_put_xsave(X86CPU *cpu) { CPUX86State *env = &cpu->env; void *xsave = env->xsave_buf; - if (!has_xsave) { - return kvm_put_fpu(cpu); - } x86_cpu_xsave_all_areas(cpu, xsave, env->xsave_buf_len); return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave); @@ -3657,46 +3561,12 @@ static int kvm_put_msrs(X86CPU *cpu, int level) } -static int kvm_get_fpu(X86CPU *cpu) -{ - CPUX86State *env = &cpu->env; - struct kvm_fpu fpu; - int i, ret; - - ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu); - if (ret < 0) { - return ret; - } - - env->fpstt = (fpu.fsw >> 11) & 7; - env->fpus = fpu.fsw; - env->fpuc = fpu.fcw; - env->fpop = fpu.last_opcode; - env->fpip = fpu.last_ip; - env->fpdp = fpu.last_dp; - for (i = 0; i < 8; ++i) { - env->fptags[i] = !((fpu.ftwx >> i) & 1); - } - memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs); - for (i = 0; i < CPU_NB_REGS; i++) { - env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.xmm[i][0]); - env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.xmm[i][8]); - } - env->mxcsr = fpu.mxcsr; - - return 0; -} - static int kvm_get_xsave(X86CPU *cpu) { CPUX86State *env = &cpu->env; void *xsave = env->xsave_buf; int type, ret; - if (!has_xsave) { - return kvm_get_fpu(cpu); - } - type = has_xsave2 ? KVM_GET_XSAVE2 : KVM_GET_XSAVE; ret = kvm_vcpu_ioctl(CPU(cpu), type, xsave); if (ret < 0) { @@ -4427,10 +4297,6 @@ static int kvm_put_vcpu_events(X86CPU *cpu, int level) CPUX86State *env = &cpu->env; struct kvm_vcpu_events events = {}; - if (!kvm_has_vcpu_events()) { - return 0; - } - events.flags = 0; if (has_exception_payload) { @@ -4498,10 +4364,6 @@ static int kvm_get_vcpu_events(X86CPU *cpu) struct kvm_vcpu_events events; int ret; - if (!kvm_has_vcpu_events()) { - return 0; - } - memset(&events, 0, sizeof(events)); ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events); if (ret < 0) { @@ -4567,47 +4429,12 @@ static int kvm_get_vcpu_events(X86CPU *cpu) return 0; } -static int kvm_guest_debug_workarounds(X86CPU *cpu) -{ - CPUState *cs = CPU(cpu); - CPUX86State *env = &cpu->env; - int ret = 0; - unsigned long reinject_trap = 0; - - if (!kvm_has_vcpu_events()) { - if (env->exception_nr == EXCP01_DB) { - reinject_trap = KVM_GUESTDBG_INJECT_DB; - } else if (env->exception_injected == EXCP03_INT3) { - reinject_trap = KVM_GUESTDBG_INJECT_BP; - } - kvm_reset_exception(env); - } - - /* - * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF - * injected via SET_GUEST_DEBUG while updating GP regs. Work around this - * by updating the debug state once again if single-stepping is on. - * Another reason to call kvm_update_guest_debug here is a pending debug - * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to - * reinject them via SET_GUEST_DEBUG. - */ - if (reinject_trap || - (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) { - ret = kvm_update_guest_debug(cs, reinject_trap); - } - return ret; -} - static int kvm_put_debugregs(X86CPU *cpu) { CPUX86State *env = &cpu->env; struct kvm_debugregs dbgregs; int i; - if (!kvm_has_debugregs()) { - return 0; - } - memset(&dbgregs, 0, sizeof(dbgregs)); for (i = 0; i < 4; i++) { dbgregs.db[i] = env->dr[i]; @@ -4625,10 +4452,6 @@ static int kvm_get_debugregs(X86CPU *cpu) struct kvm_debugregs dbgregs; int i, ret; - if (!kvm_has_debugregs()) { - return 0; - } - ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs); if (ret < 0) { return ret; @@ -4778,11 +4601,6 @@ int kvm_arch_put_registers(CPUState *cpu, int level) if (ret < 0) { return ret; } - /* must be before kvm_put_msrs */ - ret = kvm_inject_mce_oldstyle(x86_cpu); - if (ret < 0) { - return ret; - } ret = kvm_put_msrs(x86_cpu, level); if (ret < 0) { return ret; @@ -4806,11 +4624,6 @@ int kvm_arch_put_registers(CPUState *cpu, int level) if (ret < 0) { return ret; } - /* must be last */ - ret = kvm_guest_debug_workarounds(x86_cpu); - if (ret < 0) { - return ret; - } return 0; } diff --git a/target/i386/kvm/kvm_i386.h b/target/i386/kvm/kvm_i386.h index 55d4e68c34..30fedcffea 100644 --- a/target/i386/kvm/kvm_i386.h +++ b/target/i386/kvm/kvm_i386.h @@ -33,7 +33,6 @@ bool kvm_has_smm(void); bool kvm_enable_x2apic(void); bool kvm_hv_vpindex_settable(void); -bool kvm_has_pit_state2(void); bool kvm_enable_sgx_provisioning(KVMState *s); bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp); @@ -50,7 +49,6 @@ void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask); #ifdef CONFIG_KVM -bool kvm_has_adjust_clock(void); bool kvm_has_adjust_clock_stable(void); bool kvm_has_exception_payload(void); void kvm_synchronize_all_tsc(void); diff --git a/target/i386/ops_sse.h b/target/i386/ops_sse.h index 33908c0691..6a465a35fd 100644 --- a/target/i386/ops_sse.h +++ b/target/i386/ops_sse.h @@ -2527,6 +2527,134 @@ SSE_HELPER_FMAP(helper_fma4ps, ZMM_S, 2 << SHIFT, float32_muladd) SSE_HELPER_FMAP(helper_fma4pd, ZMM_D, 1 << SHIFT, float64_muladd) #endif +#if SHIFT == 1 +#define SSE_HELPER_SHA1RNDS4(name, F, K) \ + void name(Reg *d, Reg *a, Reg *b) \ + { \ + uint32_t A, B, C, D, E, t, i; \ + \ + A = a->L(3); \ + B = a->L(2); \ + C = a->L(1); \ + D = a->L(0); \ + E = 0; \ + \ + for (i = 0; i <= 3; i++) { \ + t = F(B, C, D) + rol32(A, 5) + b->L(3 - i) + E + K; \ + E = D; \ + D = C; \ + C = rol32(B, 30); \ + B = A; \ + A = t; \ + } \ + \ + d->L(3) = A; \ + d->L(2) = B; \ + d->L(1) = C; \ + d->L(0) = D; \ + } + +#define SHA1_F0(b, c, d) (((b) & (c)) ^ (~(b) & (d))) +#define SHA1_F1(b, c, d) ((b) ^ (c) ^ (d)) +#define SHA1_F2(b, c, d) (((b) & (c)) ^ ((b) & (d)) ^ ((c) & (d))) + +SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f0, SHA1_F0, 0x5A827999) +SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f1, SHA1_F1, 0x6ED9EBA1) +SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f2, SHA1_F2, 0x8F1BBCDC) +SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f3, SHA1_F1, 0xCA62C1D6) + +void helper_sha1nexte(Reg *d, Reg *a, Reg *b) +{ + d->L(3) = b->L(3) + rol32(a->L(3), 30); + d->L(2) = b->L(2); + d->L(1) = b->L(1); + d->L(0) = b->L(0); +} + +void helper_sha1msg1(Reg *d, Reg *a, Reg *b) +{ + /* These could be overwritten by the first two assignments, save them. */ + uint32_t b3 = b->L(3); + uint32_t b2 = b->L(2); + + d->L(3) = a->L(3) ^ a->L(1); + d->L(2) = a->L(2) ^ a->L(0); + d->L(1) = a->L(1) ^ b3; + d->L(0) = a->L(0) ^ b2; +} + +void helper_sha1msg2(Reg *d, Reg *a, Reg *b) +{ + d->L(3) = rol32(a->L(3) ^ b->L(2), 1); + d->L(2) = rol32(a->L(2) ^ b->L(1), 1); + d->L(1) = rol32(a->L(1) ^ b->L(0), 1); + d->L(0) = rol32(a->L(0) ^ d->L(3), 1); +} + +#define SHA256_CH(e, f, g) (((e) & (f)) ^ (~(e) & (g))) +#define SHA256_MAJ(a, b, c) (((a) & (b)) ^ ((a) & (c)) ^ ((b) & (c))) + +#define SHA256_RNDS0(w) (ror32((w), 2) ^ ror32((w), 13) ^ ror32((w), 22)) +#define SHA256_RNDS1(w) (ror32((w), 6) ^ ror32((w), 11) ^ ror32((w), 25)) +#define SHA256_MSGS0(w) (ror32((w), 7) ^ ror32((w), 18) ^ ((w) >> 3)) +#define SHA256_MSGS1(w) (ror32((w), 17) ^ ror32((w), 19) ^ ((w) >> 10)) + +void helper_sha256rnds2(Reg *d, Reg *a, Reg *b, uint32_t wk0, uint32_t wk1) +{ + uint32_t t, AA, EE; + + uint32_t A = b->L(3); + uint32_t B = b->L(2); + uint32_t C = a->L(3); + uint32_t D = a->L(2); + uint32_t E = b->L(1); + uint32_t F = b->L(0); + uint32_t G = a->L(1); + uint32_t H = a->L(0); + + /* Even round */ + t = SHA256_CH(E, F, G) + SHA256_RNDS1(E) + wk0 + H; + AA = t + SHA256_MAJ(A, B, C) + SHA256_RNDS0(A); + EE = t + D; + + /* These will be B and F at the end of the odd round */ + d->L(2) = AA; + d->L(0) = EE; + + D = C, C = B, B = A, A = AA; + H = G, G = F, F = E, E = EE; + + /* Odd round */ + t = SHA256_CH(E, F, G) + SHA256_RNDS1(E) + wk1 + H; + AA = t + SHA256_MAJ(A, B, C) + SHA256_RNDS0(A); + EE = t + D; + + d->L(3) = AA; + d->L(1) = EE; +} + +void helper_sha256msg1(Reg *d, Reg *a, Reg *b) +{ + /* b->L(0) could be overwritten by the first assignment, save it. */ + uint32_t b0 = b->L(0); + + d->L(0) = a->L(0) + SHA256_MSGS0(a->L(1)); + d->L(1) = a->L(1) + SHA256_MSGS0(a->L(2)); + d->L(2) = a->L(2) + SHA256_MSGS0(a->L(3)); + d->L(3) = a->L(3) + SHA256_MSGS0(b0); +} + +void helper_sha256msg2(Reg *d, Reg *a, Reg *b) +{ + /* Earlier assignments cannot overwrite any of the two operands. */ + d->L(0) = a->L(0) + SHA256_MSGS1(b->L(2)); + d->L(1) = a->L(1) + SHA256_MSGS1(b->L(3)); + /* Yes, this reuses the previously computed values. */ + d->L(2) = a->L(2) + SHA256_MSGS1(d->L(0)); + d->L(3) = a->L(3) + SHA256_MSGS1(d->L(1)); +} +#endif + #undef SSE_HELPER_S #undef LANE_WIDTH diff --git a/target/i386/tcg/decode-new.c.inc b/target/i386/tcg/decode-new.c.inc index 7d76f15275..2bdbb1bba0 100644 --- a/target/i386/tcg/decode-new.c.inc +++ b/target/i386/tcg/decode-new.c.inc @@ -23,7 +23,11 @@ * The decoder is mostly based on tables copied from the Intel SDM. As * a result, most operand load and writeback is done entirely in common * table-driven code using the same operand type (X86_TYPE_*) and - * size (X86_SIZE_*) codes used in the manual. + * size (X86_SIZE_*) codes used in the manual. There are a few differences + * though. + * + * Vector operands + * --------------- * * The main difference is that the V, U and W types are extended to * cover MMX as well; if an instruction is like @@ -43,6 +47,50 @@ * There are a couple cases in which instructions (e.g. MOVD) write the * whole XMM or MM register but are established incorrectly in the manual * as "d" or "q". These have to be fixed for the decoder to work correctly. + * + * VEX exception classes + * --------------------- + * + * Speaking about imprecisions in the manual, the decoder treats all + * exception-class 4 instructions as having an optional VEX prefix, and + * all exception-class 6 instructions as having a mandatory VEX prefix. + * This is true except for a dozen instructions; these are in exception + * class 4 but do not ignore the VEX.W bit (which does not even exist + * without a VEX prefix). These instructions are mostly listed in Intel's + * table 2-16, but with a few exceptions. + * + * The AMD manual has more precise subclasses for exceptions, and unlike Intel + * they list the VEX.W requirements in the exception classes as well (except + * when they don't). AMD describes class 6 as "AVX Mixed Memory Argument" + * without defining what a mixed memory argument is, but still use 4 as the + * primary exception class... except when they don't. + * + * The summary is: + * Intel AMD VEX.W note + * ------------------------------------------------------------------- + * vpblendd 4 4J 0 + * vpblendvb 4 4E-X 0 (*) + * vpbroadcastq 6 6D 0 (+) + * vpermd/vpermps 4 4H 0 (§) + * vpermq/vpermpd 4 4H-1 1 (§) + * vpermilpd/vpermilps 4 6E 0 (^) + * vpmaskmovd 6 4K significant (^) + * vpsllv 4 4K significant + * vpsrav 4 4J 0 + * vpsrlv 4 4K significant + * vtestps/vtestpd 4 4G 0 + * + * (*) AMD lists VPBLENDVB as related to SSE4.1 PBLENDVB, which may + * explain why it is considered exception class 4. However, + * Intel says that VEX-only instructions should be in class 6... + * + * (+) Not found in Intel's table 2-16 + * + * (§) 4H and 4H-1 do not mention VEX.W requirements, which are + * however present in the description of the instruction + * + * (^) these are the two cases in which Intel and AMD disagree on the + * primary exception class */ #define X86_OP_NONE { 0 }, @@ -90,8 +138,6 @@ X86_OP_ENTRY3(op, None, None, None, None, None, None, ## __VA_ARGS__) #define cpuid(feat) .cpuid = X86_FEAT_##feat, -#define i64 .special = X86_SPECIAL_i64, -#define o64 .special = X86_SPECIAL_o64, #define xchg .special = X86_SPECIAL_Locked, #define mmx .special = X86_SPECIAL_MMX, #define zext0 .special = X86_SPECIAL_ZExtOp0, @@ -114,6 +160,9 @@ #define vex12 .vex_class = 12, #define vex13 .vex_class = 13, +#define chk(a) .check = X86_CHECK_##a, +#define svm(a) .intercept = SVM_EXIT_##a, + #define avx2_256 .vex_special = X86_VEX_AVX2_256, #define P_00 1 @@ -161,8 +210,8 @@ static void decode_group15(DisasContext *s, CPUX86State *env, X86OpEntry *entry, }; static const X86OpEntry group15_mem[8] = { - [2] = X86_OP_ENTRYr(LDMXCSR, E,d, vex5), - [3] = X86_OP_ENTRYw(STMXCSR, E,d, vex5), + [2] = X86_OP_ENTRYr(LDMXCSR, E,d, vex5 chk(VEX128)), + [3] = X86_OP_ENTRYw(STMXCSR, E,d, vex5 chk(VEX128)), }; uint8_t modrm = get_modrm(s, env); @@ -337,11 +386,11 @@ static const X86OpEntry opcodes_0F38_00toEF[240] = { [0x07] = X86_OP_ENTRY3(PHSUBSW, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), [0x10] = X86_OP_ENTRY2(PBLENDVB, V,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), - [0x13] = X86_OP_ENTRY2(VCVTPH2PS, V,x, W,xh, vex11 cpuid(F16C) p_66), + [0x13] = X86_OP_ENTRY2(VCVTPH2PS, V,x, W,xh, vex11 chk(W0) cpuid(F16C) p_66), [0x14] = X86_OP_ENTRY2(BLENDVPS, V,x, W,x, vex4 cpuid(SSE41) p_66), [0x15] = X86_OP_ENTRY2(BLENDVPD, V,x, W,x, vex4 cpuid(SSE41) p_66), /* Listed incorrectly as type 4 */ - [0x16] = X86_OP_ENTRY3(VPERMD, V,qq, H,qq, W,qq, vex6 cpuid(AVX2) p_66), + [0x16] = X86_OP_ENTRY3(VPERMD, V,qq, H,qq, W,qq, vex6 chk(W0) cpuid(AVX2) p_66), /* vpermps */ [0x17] = X86_OP_ENTRY3(VPTEST, None,None, V,x, W,x, vex4 cpuid(SSE41) p_66), /* @@ -362,14 +411,14 @@ static const X86OpEntry opcodes_0F38_00toEF[240] = { [0x33] = X86_OP_ENTRY3(VPMOVZXWD, V,x, None,None, W,q, vex5 cpuid(SSE41) avx_movx avx2_256 p_66), [0x34] = X86_OP_ENTRY3(VPMOVZXWQ, V,x, None,None, W,d, vex5 cpuid(SSE41) avx_movx avx2_256 p_66), [0x35] = X86_OP_ENTRY3(VPMOVZXDQ, V,x, None,None, W,q, vex5 cpuid(SSE41) avx_movx avx2_256 p_66), - [0x36] = X86_OP_ENTRY3(VPERMD, V,qq, H,qq, W,qq, vex6 cpuid(AVX2) p_66), + [0x36] = X86_OP_ENTRY3(VPERMD, V,qq, H,qq, W,qq, vex6 chk(W0) cpuid(AVX2) p_66), [0x37] = X86_OP_ENTRY3(PCMPGTQ, V,x, H,x, W,x, vex4 cpuid(SSE42) avx2_256 p_66), [0x40] = X86_OP_ENTRY3(PMULLD, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), [0x41] = X86_OP_ENTRY3(VPHMINPOSUW, V,dq, None,None, W,dq, vex4 cpuid(SSE41) p_66), /* Listed incorrectly as type 4 */ [0x45] = X86_OP_ENTRY3(VPSRLV, V,x, H,x, W,x, vex6 cpuid(AVX2) p_66), - [0x46] = X86_OP_ENTRY3(VPSRAV, V,x, H,x, W,x, vex6 cpuid(AVX2) p_66), + [0x46] = X86_OP_ENTRY3(VPSRAV, V,x, H,x, W,x, vex6 chk(W0) cpuid(AVX2) p_66), [0x47] = X86_OP_ENTRY3(VPSLLV, V,x, H,x, W,x, vex6 cpuid(AVX2) p_66), [0x90] = X86_OP_ENTRY3(VPGATHERD, V,x, H,x, M,d, vex12 cpuid(AVX2) p_66), /* vpgatherdd/q */ @@ -391,14 +440,15 @@ static const X86OpEntry opcodes_0F38_00toEF[240] = { [0x09] = X86_OP_ENTRY3(PSIGNW, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), [0x0a] = X86_OP_ENTRY3(PSIGND, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), [0x0b] = X86_OP_ENTRY3(PMULHRSW, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), - [0x0c] = X86_OP_ENTRY3(VPERMILPS, V,x, H,x, W,x, vex4 cpuid(AVX) p_00_66), - [0x0d] = X86_OP_ENTRY3(VPERMILPD, V,x, H,x, W,x, vex4 cpuid(AVX) p_66), - [0x0e] = X86_OP_ENTRY3(VTESTPS, None,None, V,x, W,x, vex4 cpuid(AVX) p_66), - [0x0f] = X86_OP_ENTRY3(VTESTPD, None,None, V,x, W,x, vex4 cpuid(AVX) p_66), - - [0x18] = X86_OP_ENTRY3(VPBROADCASTD, V,x, None,None, W,d, vex6 cpuid(AVX) p_66), /* vbroadcastss */ - [0x19] = X86_OP_ENTRY3(VPBROADCASTQ, V,qq, None,None, W,q, vex6 cpuid(AVX) p_66), /* vbroadcastsd */ - [0x1a] = X86_OP_ENTRY3(VBROADCASTx128, V,qq, None,None, WM,dq,vex6 cpuid(AVX) p_66), + /* Listed incorrectly as type 4 */ + [0x0c] = X86_OP_ENTRY3(VPERMILPS, V,x, H,x, W,x, vex6 chk(W0) cpuid(AVX) p_00_66), + [0x0d] = X86_OP_ENTRY3(VPERMILPD, V,x, H,x, W,x, vex6 chk(W0) cpuid(AVX) p_66), + [0x0e] = X86_OP_ENTRY3(VTESTPS, None,None, V,x, W,x, vex6 chk(W0) cpuid(AVX) p_66), + [0x0f] = X86_OP_ENTRY3(VTESTPD, None,None, V,x, W,x, vex6 chk(W0) cpuid(AVX) p_66), + + [0x18] = X86_OP_ENTRY3(VPBROADCASTD, V,x, None,None, W,d, vex6 chk(W0) cpuid(AVX) p_66), /* vbroadcastss */ + [0x19] = X86_OP_ENTRY3(VPBROADCASTQ, V,qq, None,None, W,q, vex6 chk(W0) cpuid(AVX) p_66), /* vbroadcastsd */ + [0x1a] = X86_OP_ENTRY3(VBROADCASTx128, V,qq, None,None, WM,dq,vex6 chk(W0) cpuid(AVX) p_66), [0x1c] = X86_OP_ENTRY3(PABSB, V,x, None,None, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), [0x1d] = X86_OP_ENTRY3(PABSW, V,x, None,None, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), [0x1e] = X86_OP_ENTRY3(PABSD, V,x, None,None, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), @@ -407,11 +457,11 @@ static const X86OpEntry opcodes_0F38_00toEF[240] = { [0x29] = X86_OP_ENTRY3(PCMPEQQ, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), [0x2a] = X86_OP_ENTRY3(MOVDQ, V,x, None,None, WM,x, vex1 cpuid(SSE41) avx2_256 p_66), /* movntdqa */ [0x2b] = X86_OP_ENTRY3(VPACKUSDW, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), - [0x2c] = X86_OP_ENTRY3(VMASKMOVPS, V,x, H,x, WM,x, vex6 cpuid(AVX) p_66), - [0x2d] = X86_OP_ENTRY3(VMASKMOVPD, V,x, H,x, WM,x, vex6 cpuid(AVX) p_66), + [0x2c] = X86_OP_ENTRY3(VMASKMOVPS, V,x, H,x, WM,x, vex6 chk(W0) cpuid(AVX) p_66), + [0x2d] = X86_OP_ENTRY3(VMASKMOVPD, V,x, H,x, WM,x, vex6 chk(W0) cpuid(AVX) p_66), /* Incorrectly listed as Mx,Hx,Vx in the manual */ - [0x2e] = X86_OP_ENTRY3(VMASKMOVPS_st, M,x, V,x, H,x, vex6 cpuid(AVX) p_66), - [0x2f] = X86_OP_ENTRY3(VMASKMOVPD_st, M,x, V,x, H,x, vex6 cpuid(AVX) p_66), + [0x2e] = X86_OP_ENTRY3(VMASKMOVPS_st, M,x, V,x, H,x, vex6 chk(W0) cpuid(AVX) p_66), + [0x2f] = X86_OP_ENTRY3(VMASKMOVPD_st, M,x, V,x, H,x, vex6 chk(W0) cpuid(AVX) p_66), [0x38] = X86_OP_ENTRY3(PMINSB, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), [0x39] = X86_OP_ENTRY3(PMINSD, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), @@ -422,12 +472,13 @@ static const X86OpEntry opcodes_0F38_00toEF[240] = { [0x3e] = X86_OP_ENTRY3(PMAXUW, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), [0x3f] = X86_OP_ENTRY3(PMAXUD, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), - [0x58] = X86_OP_ENTRY3(VPBROADCASTD, V,x, None,None, W,d, vex6 cpuid(AVX2) p_66), - [0x59] = X86_OP_ENTRY3(VPBROADCASTQ, V,x, None,None, W,q, vex6 cpuid(AVX2) p_66), - [0x5a] = X86_OP_ENTRY3(VBROADCASTx128, V,qq, None,None, WM,dq,vex6 cpuid(AVX2) p_66), + /* VPBROADCASTQ not listed as W0 in table 2-16 */ + [0x58] = X86_OP_ENTRY3(VPBROADCASTD, V,x, None,None, W,d, vex6 chk(W0) cpuid(AVX2) p_66), + [0x59] = X86_OP_ENTRY3(VPBROADCASTQ, V,x, None,None, W,q, vex6 chk(W0) cpuid(AVX2) p_66), + [0x5a] = X86_OP_ENTRY3(VBROADCASTx128, V,qq, None,None, WM,dq,vex6 chk(W0) cpuid(AVX2) p_66), - [0x78] = X86_OP_ENTRY3(VPBROADCASTB, V,x, None,None, W,b, vex6 cpuid(AVX2) p_66), - [0x79] = X86_OP_ENTRY3(VPBROADCASTW, V,x, None,None, W,w, vex6 cpuid(AVX2) p_66), + [0x78] = X86_OP_ENTRY3(VPBROADCASTB, V,x, None,None, W,b, vex6 chk(W0) cpuid(AVX2) p_66), + [0x79] = X86_OP_ENTRY3(VPBROADCASTW, V,x, None,None, W,w, vex6 chk(W0) cpuid(AVX2) p_66), [0x8c] = X86_OP_ENTRY3(VPMASKMOV, V,x, H,x, WM,x, vex6 cpuid(AVX2) p_66), [0x8e] = X86_OP_ENTRY3(VPMASKMOV_st, M,x, V,x, H,x, vex6 cpuid(AVX2) p_66), @@ -460,6 +511,13 @@ static const X86OpEntry opcodes_0F38_00toEF[240] = { [0xbe] = X86_OP_ENTRY3(VFNMSUB231Px, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), [0xbf] = X86_OP_ENTRY3(VFNMSUB231Sx, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + [0xc8] = X86_OP_ENTRY2(SHA1NEXTE, V,dq, W,dq, cpuid(SHA_NI)), + [0xc9] = X86_OP_ENTRY2(SHA1MSG1, V,dq, W,dq, cpuid(SHA_NI)), + [0xca] = X86_OP_ENTRY2(SHA1MSG2, V,dq, W,dq, cpuid(SHA_NI)), + [0xcb] = X86_OP_ENTRY2(SHA256RNDS2, V,dq, W,dq, cpuid(SHA_NI)), + [0xcc] = X86_OP_ENTRY2(SHA256MSG1, V,dq, W,dq, cpuid(SHA_NI)), + [0xcd] = X86_OP_ENTRY2(SHA256MSG2, V,dq, W,dq, cpuid(SHA_NI)), + [0xdb] = X86_OP_ENTRY3(VAESIMC, V,dq, None,None, W,dq, vex4 cpuid(AES) p_66), [0xdc] = X86_OP_ENTRY3(VAESENC, V,x, H,x, W,x, vex4 cpuid(AES) p_66), [0xdd] = X86_OP_ENTRY3(VAESENCLAST, V,x, H,x, W,x, vex4 cpuid(AES) p_66), @@ -554,18 +612,18 @@ static const X86OpEntry opcodes_0F3A[256] = { * Also the "qq" instructions are sometimes omitted by Table 2-17, but are VEX256 * only. */ - [0x00] = X86_OP_ENTRY3(VPERMQ, V,qq, W,qq, I,b, vex6 cpuid(AVX2) p_66), - [0x01] = X86_OP_ENTRY3(VPERMQ, V,qq, W,qq, I,b, vex6 cpuid(AVX2) p_66), /* VPERMPD */ - [0x02] = X86_OP_ENTRY4(VBLENDPS, V,x, H,x, W,x, vex6 cpuid(AVX2) p_66), /* VPBLENDD */ - [0x04] = X86_OP_ENTRY3(VPERMILPS_i, V,x, W,x, I,b, vex6 cpuid(AVX) p_66), - [0x05] = X86_OP_ENTRY3(VPERMILPD_i, V,x, W,x, I,b, vex6 cpuid(AVX) p_66), - [0x06] = X86_OP_ENTRY4(VPERM2x128, V,qq, H,qq, W,qq, vex6 cpuid(AVX) p_66), + [0x00] = X86_OP_ENTRY3(VPERMQ, V,qq, W,qq, I,b, vex6 chk(W1) cpuid(AVX2) p_66), + [0x01] = X86_OP_ENTRY3(VPERMQ, V,qq, W,qq, I,b, vex6 chk(W1) cpuid(AVX2) p_66), /* VPERMPD */ + [0x02] = X86_OP_ENTRY4(VBLENDPS, V,x, H,x, W,x, vex6 chk(W0) cpuid(AVX2) p_66), /* VPBLENDD */ + [0x04] = X86_OP_ENTRY3(VPERMILPS_i, V,x, W,x, I,b, vex6 chk(W0) cpuid(AVX) p_66), + [0x05] = X86_OP_ENTRY3(VPERMILPD_i, V,x, W,x, I,b, vex6 chk(W0) cpuid(AVX) p_66), + [0x06] = X86_OP_ENTRY4(VPERM2x128, V,qq, H,qq, W,qq, vex6 chk(W0) cpuid(AVX) p_66), [0x14] = X86_OP_ENTRY3(PEXTRB, E,b, V,dq, I,b, vex5 cpuid(SSE41) zext0 p_66), [0x15] = X86_OP_ENTRY3(PEXTRW, E,w, V,dq, I,b, vex5 cpuid(SSE41) zext0 p_66), [0x16] = X86_OP_ENTRY3(PEXTR, E,y, V,dq, I,b, vex5 cpuid(SSE41) p_66), [0x17] = X86_OP_ENTRY3(VEXTRACTPS, E,d, V,dq, I,b, vex5 cpuid(SSE41) p_66), - [0x1d] = X86_OP_ENTRY3(VCVTPS2PH, W,xh, V,x, I,b, vex11 cpuid(F16C) p_66), + [0x1d] = X86_OP_ENTRY3(VCVTPS2PH, W,xh, V,x, I,b, vex11 chk(W0) cpuid(F16C) p_66), [0x20] = X86_OP_ENTRY4(PINSRB, V,dq, H,dq, E,b, vex5 cpuid(SSE41) zext2 p_66), [0x21] = X86_OP_GROUP0(VINSERTPS), @@ -575,7 +633,7 @@ static const X86OpEntry opcodes_0F3A[256] = { [0x41] = X86_OP_ENTRY4(VDDPD, V,dq, H,dq, W,dq, vex2 cpuid(SSE41) p_66), [0x42] = X86_OP_ENTRY4(VMPSADBW, V,x, H,x, W,x, vex2 cpuid(SSE41) avx2_256 p_66), [0x44] = X86_OP_ENTRY4(PCLMULQDQ, V,dq, H,dq, W,dq, vex4 cpuid(PCLMULQDQ) p_66), - [0x46] = X86_OP_ENTRY4(VPERM2x128, V,qq, H,qq, W,qq, vex6 cpuid(AVX2) p_66), + [0x46] = X86_OP_ENTRY4(VPERM2x128, V,qq, H,qq, W,qq, vex6 chk(W0) cpuid(AVX2) p_66), [0x60] = X86_OP_ENTRY4(PCMPESTRM, None,None, V,dq, W,dq, vex4_unal cpuid(SSE42) p_66), [0x61] = X86_OP_ENTRY4(PCMPESTRI, None,None, V,dq, W,dq, vex4_unal cpuid(SSE42) p_66), @@ -598,16 +656,18 @@ static const X86OpEntry opcodes_0F3A[256] = { [0x0e] = X86_OP_ENTRY4(VPBLENDW, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), [0x0f] = X86_OP_ENTRY4(PALIGNR, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), - [0x18] = X86_OP_ENTRY4(VINSERTx128, V,qq, H,qq, W,qq, vex6 cpuid(AVX) p_66), - [0x19] = X86_OP_ENTRY3(VEXTRACTx128, W,dq, V,qq, I,b, vex6 cpuid(AVX) p_66), + [0x18] = X86_OP_ENTRY4(VINSERTx128, V,qq, H,qq, W,qq, vex6 chk(W0) cpuid(AVX) p_66), + [0x19] = X86_OP_ENTRY3(VEXTRACTx128, W,dq, V,qq, I,b, vex6 chk(W0) cpuid(AVX) p_66), - [0x38] = X86_OP_ENTRY4(VINSERTx128, V,qq, H,qq, W,qq, vex6 cpuid(AVX2) p_66), - [0x39] = X86_OP_ENTRY3(VEXTRACTx128, W,dq, V,qq, I,b, vex6 cpuid(AVX2) p_66), + [0x38] = X86_OP_ENTRY4(VINSERTx128, V,qq, H,qq, W,qq, vex6 chk(W0) cpuid(AVX2) p_66), + [0x39] = X86_OP_ENTRY3(VEXTRACTx128, W,dq, V,qq, I,b, vex6 chk(W0) cpuid(AVX2) p_66), /* Listed incorrectly as type 4 */ - [0x4a] = X86_OP_ENTRY4(VBLENDVPS, V,x, H,x, W,x, vex6 cpuid(AVX) p_66), - [0x4b] = X86_OP_ENTRY4(VBLENDVPD, V,x, H,x, W,x, vex6 cpuid(AVX) p_66), - [0x4c] = X86_OP_ENTRY4(VPBLENDVB, V,x, H,x, W,x, vex6 cpuid(AVX) p_66 avx2_256), + [0x4a] = X86_OP_ENTRY4(VBLENDVPS, V,x, H,x, W,x, vex6 chk(W0) cpuid(AVX) p_66), + [0x4b] = X86_OP_ENTRY4(VBLENDVPD, V,x, H,x, W,x, vex6 chk(W0) cpuid(AVX) p_66), + [0x4c] = X86_OP_ENTRY4(VPBLENDVB, V,x, H,x, W,x, vex6 chk(W0) cpuid(AVX) p_66 avx2_256), + + [0xcc] = X86_OP_ENTRY3(SHA1RNDS4, V,dq, W,dq, I,b, cpuid(SHA_NI)), [0xdf] = X86_OP_ENTRY3(VAESKEYGEN, V,dq, W,dq, I,b, vex4 cpuid(AES) p_66), @@ -1456,6 +1516,8 @@ static bool has_cpuid_feature(DisasContext *s, X86CPUIDFeature cpuid) return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2); case X86_FEAT_AVX2: return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_AVX2); + case X86_FEAT_SHA_NI: + return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SHA_NI); } g_assert_not_reached(); } @@ -1493,8 +1555,6 @@ static bool validate_vex(DisasContext *s, X86DecodedInsn *decode) } } - /* TODO: instructions that require VEX.W=0 (Table 2-16) */ - switch (e->vex_class) { case 0: if (s->prefix & PREFIX_VEX) { @@ -1579,6 +1639,24 @@ static bool validate_vex(DisasContext *s, X86DecodedInsn *decode) if (s->flags & HF_EM_MASK) { goto illegal; } + + if (e->check) { + if (e->check & X86_CHECK_VEX128) { + if (s->vex_l) { + goto illegal; + } + } + if (e->check & X86_CHECK_W0) { + if (s->vex_w) { + goto illegal; + } + } + if (e->check & X86_CHECK_W1) { + if (!s->vex_w) { + goto illegal; + } + } + } return true; nm_exception: @@ -1764,6 +1842,25 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b) goto illegal_op; } + /* Checks that result in #UD come first. */ + if (decode.e.check) { + if (decode.e.check & X86_CHECK_i64) { + if (CODE64(s)) { + goto illegal_op; + } + } + if (decode.e.check & X86_CHECK_o64) { + if (!CODE64(s)) { + goto illegal_op; + } + } + if (decode.e.check & X86_CHECK_prot) { + if (!PE(s) || VM86(s)) { + goto illegal_op; + } + } + } + switch (decode.e.special) { case X86_SPECIAL_None: break; @@ -1774,23 +1871,6 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b) } break; - case X86_SPECIAL_ProtMode: - if (!PE(s) || VM86(s)) { - goto illegal_op; - } - break; - - case X86_SPECIAL_i64: - if (CODE64(s)) { - goto illegal_op; - } - break; - case X86_SPECIAL_o64: - if (!CODE64(s)) { - goto illegal_op; - } - break; - case X86_SPECIAL_ZExtOp0: assert(decode.op[0].unit == X86_OP_INT); if (!decode.op[0].has_ea) { @@ -1820,6 +1900,37 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b) if (!validate_vex(s, &decode)) { return; } + + /* + * Checks that result in #GP or VMEXIT come second. Intercepts are + * generally checked after non-memory exceptions (i.e. before all + * exceptions if there is no memory operand). Exceptions are + * vm86 checks (INTn, IRET, PUSHF/POPF), RSM and XSETBV (!). + * + * RSM and XSETBV will be handled in the gen_* functions + * instead of using chk(). + */ + if (decode.e.check & X86_CHECK_cpl0) { + if (CPL(s) != 0) { + goto gp_fault; + } + } + if (decode.e.intercept && unlikely(GUEST(s))) { + gen_helper_svm_check_intercept(tcg_env, + tcg_constant_i32(decode.e.intercept)); + } + if (decode.e.check) { + if ((decode.e.check & X86_CHECK_vm86_iopl) && VM86(s)) { + if (IOPL(s) < 3) { + goto gp_fault; + } + } else if (decode.e.check & X86_CHECK_cpl_iopl) { + if (IOPL(s) < CPL(s)) { + goto gp_fault; + } + } + } + if (decode.e.special == X86_SPECIAL_MMX && !(s->prefix & (PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA))) { gen_helper_enter_mmx(tcg_env); @@ -1846,6 +1957,9 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b) gen_writeback(s, &decode, 0, s->T0); } return; + gp_fault: + gen_exception_gpf(s); + return; illegal_op: gen_illegal_opcode(s); return; diff --git a/target/i386/tcg/decode-new.h b/target/i386/tcg/decode-new.h index a542ec1681..e6c904a319 100644 --- a/target/i386/tcg/decode-new.h +++ b/target/i386/tcg/decode-new.h @@ -108,6 +108,7 @@ typedef enum X86CPUIDFeature { X86_FEAT_FMA, X86_FEAT_MOVBE, X86_FEAT_PCLMULQDQ, + X86_FEAT_SHA_NI, X86_FEAT_SSE, X86_FEAT_SSE2, X86_FEAT_SSE3, @@ -130,15 +131,36 @@ typedef enum X86OpUnit { X86_OP_MMX, /* address in either s->ptrX or s->A0 depending on has_ea */ } X86OpUnit; +typedef enum X86InsnCheck { + /* Illegal or exclusive to 64-bit mode */ + X86_CHECK_i64 = 1, + X86_CHECK_o64 = 2, + + /* Fault outside protected mode */ + X86_CHECK_prot = 4, + + /* Privileged instruction checks */ + X86_CHECK_cpl0 = 8, + X86_CHECK_vm86_iopl = 16, + X86_CHECK_cpl_iopl = 32, + X86_CHECK_iopl = X86_CHECK_cpl_iopl | X86_CHECK_vm86_iopl, + + /* Fault if VEX.L=1 */ + X86_CHECK_VEX128 = 64, + + /* Fault if VEX.W=1 */ + X86_CHECK_W0 = 128, + + /* Fault if VEX.W=0 */ + X86_CHECK_W1 = 256, +} X86InsnCheck; + typedef enum X86InsnSpecial { X86_SPECIAL_None, /* Always locked if it has a memory operand (XCHG) */ X86_SPECIAL_Locked, - /* Fault outside protected mode */ - X86_SPECIAL_ProtMode, - /* * Register operand 0/2 is zero extended to 32 bits. Rd/Mb or Rd/Mw * in the manual. @@ -157,10 +179,6 @@ typedef enum X86InsnSpecial { * become P/P/Q/N, and size "x" becomes "q". */ X86_SPECIAL_MMX, - - /* Illegal or exclusive to 64-bit mode */ - X86_SPECIAL_i64, - X86_SPECIAL_o64, } X86InsnSpecial; /* @@ -223,7 +241,9 @@ struct X86OpEntry { X86CPUIDFeature cpuid:8; unsigned vex_class:8; X86VEXSpecial vex_special:8; - uint16_t valid_prefix:16; + unsigned valid_prefix:16; + unsigned check:16; + unsigned intercept:8; bool is_decode:1; }; diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc index 88793ba988..82da5488d4 100644 --- a/target/i386/tcg/emit.c.inc +++ b/target/i386/tcg/emit.c.inc @@ -1236,10 +1236,6 @@ static void gen_INSERTQ_r(DisasContext *s, CPUX86State *env, X86DecodedInsn *dec static void gen_LDMXCSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) { - if (s->vex_l) { - gen_illegal_opcode(s); - return; - } tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T1); gen_helper_ldmxcsr(tcg_env, s->tmp2_i32); } @@ -1800,6 +1796,60 @@ static void gen_SARX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) tcg_gen_sar_tl(s->T0, s->T0, s->T1); } +static void gen_SHA1NEXTE(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_helper_sha1nexte(OP_PTR0, OP_PTR1, OP_PTR2); +} + +static void gen_SHA1MSG1(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_helper_sha1msg1(OP_PTR0, OP_PTR1, OP_PTR2); +} + +static void gen_SHA1MSG2(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_helper_sha1msg2(OP_PTR0, OP_PTR1, OP_PTR2); +} + +static void gen_SHA1RNDS4(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + switch(decode->immediate & 3) { + case 0: + gen_helper_sha1rnds4_f0(OP_PTR0, OP_PTR0, OP_PTR1); + break; + case 1: + gen_helper_sha1rnds4_f1(OP_PTR0, OP_PTR0, OP_PTR1); + break; + case 2: + gen_helper_sha1rnds4_f2(OP_PTR0, OP_PTR0, OP_PTR1); + break; + case 3: + gen_helper_sha1rnds4_f3(OP_PTR0, OP_PTR0, OP_PTR1); + break; + } +} + +static void gen_SHA256MSG1(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_helper_sha256msg1(OP_PTR0, OP_PTR1, OP_PTR2); +} + +static void gen_SHA256MSG2(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_helper_sha256msg2(OP_PTR0, OP_PTR1, OP_PTR2); +} + +static void gen_SHA256RNDS2(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + TCGv_i32 wk0 = tcg_temp_new_i32(); + TCGv_i32 wk1 = tcg_temp_new_i32(); + + tcg_gen_ld_i32(wk0, tcg_env, ZMM_OFFSET(0) + offsetof(ZMMReg, ZMM_L(0))); + tcg_gen_ld_i32(wk1, tcg_env, ZMM_OFFSET(0) + offsetof(ZMMReg, ZMM_L(1))); + + gen_helper_sha256rnds2(OP_PTR0, OP_PTR1, OP_PTR2, wk0, wk1); +} + static void gen_SHLX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) { MemOp ot = decode->op[0].ot; @@ -1832,10 +1882,6 @@ static void gen_VAESKEYGEN(DisasContext *s, CPUX86State *env, X86DecodedInsn *de static void gen_STMXCSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) { - if (s->vex_l) { - gen_illegal_opcode(s); - return; - } gen_helper_update_mxcsr(tcg_env); tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, mxcsr)); } diff --git a/target/i386/tcg/ops_sse_header.h.inc b/target/i386/tcg/ops_sse_header.h.inc index 8a7b2f4e2f..d92c6faf6d 100644 --- a/target/i386/tcg/ops_sse_header.h.inc +++ b/target/i386/tcg/ops_sse_header.h.inc @@ -399,6 +399,20 @@ DEF_HELPER_3(vpermq_ymm, void, Reg, Reg, i32) #endif #endif +/* SHA helpers */ +#if SHIFT == 1 +DEF_HELPER_3(sha1rnds4_f0, void, Reg, Reg, Reg) +DEF_HELPER_3(sha1rnds4_f1, void, Reg, Reg, Reg) +DEF_HELPER_3(sha1rnds4_f2, void, Reg, Reg, Reg) +DEF_HELPER_3(sha1rnds4_f3, void, Reg, Reg, Reg) +DEF_HELPER_3(sha1nexte, void, Reg, Reg, Reg) +DEF_HELPER_3(sha1msg1, void, Reg, Reg, Reg) +DEF_HELPER_3(sha1msg2, void, Reg, Reg, Reg) +DEF_HELPER_5(sha256rnds2, void, Reg, Reg, Reg, i32, i32) +DEF_HELPER_3(sha256msg1, void, Reg, Reg, Reg) +DEF_HELPER_3(sha256msg2, void, Reg, Reg, Reg) +#endif + #undef SHIFT #undef Reg #undef SUFFIX diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c index 090d617627..26e68c7ab4 100644 --- a/target/riscv/kvm/kvm-cpu.c +++ b/target/riscv/kvm/kvm-cpu.c @@ -1420,7 +1420,7 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift, exit(1); } - kvm_msi_via_irqfd_allowed = kvm_irqfds_enabled(); + kvm_msi_via_irqfd_allowed = true; } static void kvm_cpu_instance_init(CPUState *cs) |