diff options
Diffstat (limited to 'target/i386/cpu.h')
| -rw-r--r-- | target/i386/cpu.h | 78 |
1 files changed, 65 insertions, 13 deletions
diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 74886d1580..59959b8b7a 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -24,6 +24,7 @@ #include "cpu-qom.h" #include "kvm/hyperv-proto.h" #include "exec/cpu-defs.h" +#include "exec/memop.h" #include "hw/i386/topology.h" #include "qapi/qapi-types-common.h" #include "qemu/cpu-float.h" @@ -634,6 +635,8 @@ typedef enum FeatureWord { FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */ FEAT_8000_0008_EBX, /* CPUID[8000_0008].EBX */ FEAT_8000_0021_EAX, /* CPUID[8000_0021].EAX */ + FEAT_8000_0021_EBX, /* CPUID[8000_0021].EBX */ + FEAT_8000_0022_EAX, /* CPUID[8000_0022].EAX */ FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */ FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */ FEAT_KVM_HINTS, /* CPUID[4000_0001].EDX */ @@ -662,6 +665,7 @@ typedef enum FeatureWord { FEAT_XSAVE_XSS_HI, /* CPUID[EAX=0xd,ECX=1].EDX */ FEAT_7_1_EDX, /* CPUID[EAX=7,ECX=1].EDX */ FEAT_7_2_EDX, /* CPUID[EAX=7,ECX=2].EDX */ + FEAT_24_0_EBX, /* CPUID[EAX=0x24,ECX=0].EBX */ FEATURE_WORDS, } FeatureWord; @@ -972,6 +976,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); #define CPUID_7_1_EDX_AMX_COMPLEX (1U << 8) /* PREFETCHIT0/1 Instructions */ #define CPUID_7_1_EDX_PREFETCHITI (1U << 14) +/* Support for Advanced Vector Extensions 10 */ +#define CPUID_7_1_EDX_AVX10 (1U << 19) /* Flexible return and event delivery (FRED) */ #define CPUID_7_1_EAX_FRED (1U << 17) /* Load into IA32_KERNEL_GS_BASE (LKGS) */ @@ -988,6 +994,17 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); /* Packets which contain IP payload have LIP values */ #define CPUID_14_0_ECX_LIP (1U << 31) +/* AVX10 128-bit vector support is present */ +#define CPUID_24_0_EBX_AVX10_128 (1U << 16) +/* AVX10 256-bit vector support is present */ +#define CPUID_24_0_EBX_AVX10_256 (1U << 17) +/* AVX10 512-bit vector support is present */ +#define CPUID_24_0_EBX_AVX10_512 (1U << 18) +/* AVX10 vector length support mask */ +#define CPUID_24_0_EBX_AVX10_VL_MASK (CPUID_24_0_EBX_AVX10_128 | \ + CPUID_24_0_EBX_AVX10_256 | \ + CPUID_24_0_EBX_AVX10_512) + /* RAS Features */ #define CPUID_8000_0007_EBX_OVERFLOW_RECOV (1U << 0) #define CPUID_8000_0007_EBX_SUCCOR (1U << 1) @@ -1014,13 +1031,32 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); #define CPUID_8000_0008_EBX_AMD_PSFD (1U << 28) /* Processor ignores nested data breakpoints */ -#define CPUID_8000_0021_EAX_No_NESTED_DATA_BP (1U << 0) +#define CPUID_8000_0021_EAX_NO_NESTED_DATA_BP (1U << 0) /* LFENCE is always serializing */ #define CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING (1U << 2) /* Null Selector Clears Base */ -#define CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE (1U << 6) +#define CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE (1U << 6) /* Automatic IBRS */ -#define CPUID_8000_0021_EAX_AUTO_IBRS (1U << 8) +#define CPUID_8000_0021_EAX_AUTO_IBRS (1U << 8) +/* Enhanced Return Address Predictor Scurity */ +#define CPUID_8000_0021_EAX_ERAPS (1U << 24) +/* Selective Branch Predictor Barrier */ +#define CPUID_8000_0021_EAX_SBPB (1U << 27) +/* IBPB includes branch type prediction flushing */ +#define CPUID_8000_0021_EAX_IBPB_BRTYPE (1U << 28) +/* Not vulnerable to Speculative Return Stack Overflow */ +#define CPUID_8000_0021_EAX_SRSO_NO (1U << 29) +/* Not vulnerable to SRSO at the user-kernel boundary */ +#define CPUID_8000_0021_EAX_SRSO_USER_KERNEL_NO (1U << 30) + +/* + * Return Address Predictor size. RapSize x 8 is the minimum number of + * CALL instructions software needs to execute to flush the RAP. + */ +#define CPUID_8000_0021_EBX_RAPSIZE (8U << 16) + +/* Performance Monitoring Version 2 */ +#define CPUID_8000_0022_EAX_PERFMON_V2 (1U << 0) #define CPUID_XSAVE_XSAVEOPT (1U << 0) #define CPUID_XSAVE_XSAVEC (1U << 1) @@ -1278,14 +1314,14 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); * are only needed for conditional branches. */ typedef enum { - CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */ - CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */ - CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */ - CC_OP_ADOX, /* CC_SRC2 = O, CC_SRC = rest. */ - CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */ - CC_OP_CLR, /* Z and P set, all other flags clear. */ - - CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */ + CC_OP_EFLAGS = 0, /* all cc are explicitly computed, CC_SRC = flags */ + CC_OP_ADCX = 1, /* CC_DST = C, CC_SRC = rest. */ + CC_OP_ADOX = 2, /* CC_SRC2 = O, CC_SRC = rest. */ + CC_OP_ADCOX = 3, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */ + + /* Low 2 bits = MemOp constant for the size */ +#define CC_OP_FIRST_BWLQ CC_OP_MULB + CC_OP_MULB = 4, /* modify all flags, C, O = (CC_SRC != 0) */ CC_OP_MULW, CC_OP_MULL, CC_OP_MULQ, @@ -1355,10 +1391,24 @@ typedef enum { CC_OP_POPCNTL__, CC_OP_POPCNTQ__, CC_OP_POPCNT = sizeof(target_ulong) == 8 ? CC_OP_POPCNTQ__ : CC_OP_POPCNTL__, +#define CC_OP_LAST_BWLQ CC_OP_POPCNTQ__ - CC_OP_NB, + CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */ } CCOp; -QEMU_BUILD_BUG_ON(CC_OP_NB >= 128); + +/* See X86DecodedInsn.cc_op, using int8_t. */ +QEMU_BUILD_BUG_ON(CC_OP_DYNAMIC > INT8_MAX); + +static inline MemOp cc_op_size(CCOp op) +{ + MemOp size = op & 3; + + QEMU_BUILD_BUG_ON(CC_OP_FIRST_BWLQ & 3); + assert(op >= CC_OP_FIRST_BWLQ && op <= CC_OP_LAST_BWLQ); + assert(size <= MO_TL); + + return size; +} typedef struct SegmentCache { uint32_t selector; @@ -1918,6 +1968,8 @@ typedef struct CPUArchState { uint32_t cpuid_vendor3; uint32_t cpuid_version; FeatureWordArray features; + /* AVX10 version */ + uint8_t avx10_version; /* Features that were explicitly enabled/disabled */ FeatureWordArray user_features; uint32_t cpuid_model[12]; |