diff options
Diffstat (limited to 'target')
31 files changed, 952 insertions, 252 deletions
diff --git a/target/arm/helper.c b/target/arm/helper.c index d1395f9b73..c83c901a86 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -8305,6 +8305,7 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa, &txattrs, &s2prot, &s2size, fi, NULL); if (ret) { + assert(fi->type != ARMFault_None); fi->s2addr = addr; fi->stage2 = true; fi->s1ptw = true; @@ -8328,7 +8329,9 @@ static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure, ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; MemTxAttrs attrs = {}; + MemTxResult result = MEMTX_OK; AddressSpace *as; + uint32_t data; attrs.secure = is_secure; as = arm_addressspace(cs, attrs); @@ -8337,10 +8340,16 @@ static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure, return 0; } if (regime_translation_big_endian(env, mmu_idx)) { - return address_space_ldl_be(as, addr, attrs, NULL); + data = address_space_ldl_be(as, addr, attrs, &result); } else { - return address_space_ldl_le(as, addr, attrs, NULL); + data = address_space_ldl_le(as, addr, attrs, &result); } + if (result == MEMTX_OK) { + return data; + } + fi->type = ARMFault_SyncExternalOnWalk; + fi->ea = arm_extabort_type(result); + return 0; } static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure, @@ -8349,7 +8358,9 @@ static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure, ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; MemTxAttrs attrs = {}; + MemTxResult result = MEMTX_OK; AddressSpace *as; + uint32_t data; attrs.secure = is_secure; as = arm_addressspace(cs, attrs); @@ -8358,10 +8369,16 @@ static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure, return 0; } if (regime_translation_big_endian(env, mmu_idx)) { - return address_space_ldq_be(as, addr, attrs, NULL); + data = address_space_ldq_be(as, addr, attrs, &result); } else { - return address_space_ldq_le(as, addr, attrs, NULL); + data = address_space_ldq_le(as, addr, attrs, &result); + } + if (result == MEMTX_OK) { + return data; } + fi->type = ARMFault_SyncExternalOnWalk; + fi->ea = arm_extabort_type(result); + return 0; } static bool get_phys_addr_v5(CPUARMState *env, uint32_t address, @@ -8390,6 +8407,9 @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address, } desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), mmu_idx, fi); + if (fi->type != ARMFault_None) { + goto do_fault; + } type = (desc & 3); domain = (desc >> 5) & 0x0f; if (regime_el(env, mmu_idx) == 1) { @@ -8426,6 +8446,9 @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address, } desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), mmu_idx, fi); + if (fi->type != ARMFault_None) { + goto do_fault; + } switch (desc & 3) { case 0: /* Page translation fault. */ fi->type = ARMFault_Translation; @@ -8508,6 +8531,9 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, } desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), mmu_idx, fi); + if (fi->type != ARMFault_None) { + goto do_fault; + } type = (desc & 3); if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) { /* Section translation fault, or attempt to use the encoding @@ -8559,6 +8585,9 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), mmu_idx, fi); + if (fi->type != ARMFault_None) { + goto do_fault; + } ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); switch (desc & 3) { case 0: /* Page translation fault. */ @@ -8964,7 +8993,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, descaddr &= ~7ULL; nstable = extract32(tableattrs, 4, 1); descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi); - if (fi->s1ptw) { + if (fi->type != ARMFault_None) { goto do_fault; } @@ -9272,6 +9301,13 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, case 6: *prot |= PAGE_READ | PAGE_EXEC; break; + case 7: + /* for v7M, same as 6; for R profile a reserved value */ + if (arm_feature(env, ARM_FEATURE_M)) { + *prot |= PAGE_READ | PAGE_EXEC; + break; + } + /* fall through */ default: qemu_log_mask(LOG_GUEST_ERROR, "DRACR[%d]: Bad value for AP bits: 0x%" @@ -9290,6 +9326,13 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, case 6: *prot |= PAGE_READ | PAGE_EXEC; break; + case 7: + /* for v7M, same as 6; for R profile a reserved value */ + if (arm_feature(env, ARM_FEATURE_M)) { + *prot |= PAGE_READ | PAGE_EXEC; + break; + } + /* fall through */ default: qemu_log_mask(LOG_GUEST_ERROR, "DRACR[%d]: Bad value for AP bits: 0x%" diff --git a/target/arm/internals.h b/target/arm/internals.h index 876854d876..89f5d2fe12 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -687,6 +687,16 @@ static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi) return fsc; } +static inline bool arm_extabort_type(MemTxResult result) +{ + /* The EA bit in syndromes and fault status registers is an + * IMPDEF classification of external aborts. ARM implementations + * usually use this to indicate AXI bus Decode error (0) or + * Slave error (1); in QEMU we follow that. + */ + return result != MEMTX_DECODE_ERROR; +} + /* Do a page table walk and add page to TLB if possible */ bool arm_tlb_fill(CPUState *cpu, vaddr address, MMUAccessType access_type, int mmu_idx, diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c index b36206343d..712c5c55b6 100644 --- a/target/arm/op_helper.c +++ b/target/arm/op_helper.c @@ -220,12 +220,7 @@ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, /* now we have a real cpu fault */ cpu_restore_state(cs, retaddr); - /* The EA bit in syndromes and fault status registers is an - * IMPDEF classification of external aborts. ARM implementations - * usually use this to indicate AXI bus Decode error (0) or - * Slave error (1); in QEMU we follow that. - */ - fi.ea = (response != MEMTX_DECODE_ERROR); + fi.ea = arm_extabort_type(response); fi.type = ARMFault_SyncExternal; deliver_fault(cpu, addr, access_type, mmu_idx, &fi); } diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index ba94f7d045..70c1e08a36 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -400,7 +400,7 @@ static void unallocated_encoding(DisasContext *s) "at pc=%016" PRIx64 "\n", \ __FILE__, __LINE__, insn, s->pc - 4); \ unallocated_encoding(s); \ - } while (0); + } while (0) static void init_tmp_a64_array(DisasContext *s) { @@ -4985,6 +4985,38 @@ static void disas_fp_3src(DisasContext *s, uint32_t insn) } } +/* The imm8 encodes the sign bit, enough bits to represent an exponent in + * the range 01....1xx to 10....0xx, and the most significant 4 bits of + * the mantissa; see VFPExpandImm() in the v8 ARM ARM. + */ +static uint64_t vfp_expand_imm(int size, uint8_t imm8) +{ + uint64_t imm; + + switch (size) { + case MO_64: + imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) | + (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) | + extract32(imm8, 0, 6); + imm <<= 48; + break; + case MO_32: + imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) | + (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) | + (extract32(imm8, 0, 6) << 3); + imm <<= 16; + break; + case MO_16: + imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) | + (extract32(imm8, 6, 1) ? 0x3000 : 0x4000) | + (extract32(imm8, 0, 6) << 6); + break; + default: + g_assert_not_reached(); + } + return imm; +} + /* Floating point immediate * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0 * +---+---+---+-----------+------+---+------------+-------+------+------+ @@ -5008,22 +5040,7 @@ static void disas_fp_imm(DisasContext *s, uint32_t insn) return; } - /* The imm8 encodes the sign bit, enough bits to represent - * an exponent in the range 01....1xx to 10....0xx, - * and the most significant 4 bits of the mantissa; see - * VFPExpandImm() in the v8 ARM ARM. - */ - if (is_double) { - imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) | - (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) | - extract32(imm8, 0, 6); - imm <<= 48; - } else { - imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) | - (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) | - (extract32(imm8, 0, 6) << 3); - imm <<= 16; - } + imm = vfp_expand_imm(MO_32 + is_double, imm8); tcg_res = tcg_const_i64(imm); write_fp_dreg(s, rd, tcg_res); diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 3818d72831..a49d2221ad 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -459,7 +459,7 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, + NULL, NULL, "spec-ctrl", NULL, NULL, NULL, NULL, NULL, }, .cpuid_eax = 7, @@ -483,6 +483,22 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { .tcg_features = TCG_APM_FEATURES, .unmigratable_flags = CPUID_APM_INVTSC, }, + [FEAT_8000_0008_EBX] = { + .feat_names = { + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + "ibpb", NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .cpuid_eax = 0x80000008, + .cpuid_reg = R_EBX, + .tcg_features = 0, + .unmigratable_flags = 0, + }, [FEAT_XSAVE] = { .feat_names = { "xsaveopt", "xsavec", "xgetbv1", "xsaves", @@ -754,7 +770,7 @@ struct X86CPUDefinition { int model; int stepping; FeatureWordArray features; - char model_id[48]; + const char *model_id; }; static X86CPUDefinition builtin_x86_defs[] = { @@ -923,6 +939,7 @@ static X86CPUDefinition builtin_x86_defs[] = { .features[FEAT_1_EDX] = I486_FEATURES, .xlevel = 0, + .model_id = "", }, { .name = "pentium", @@ -934,6 +951,7 @@ static X86CPUDefinition builtin_x86_defs[] = { .features[FEAT_1_EDX] = PENTIUM_FEATURES, .xlevel = 0, + .model_id = "", }, { .name = "pentium2", @@ -945,6 +963,7 @@ static X86CPUDefinition builtin_x86_defs[] = { .features[FEAT_1_EDX] = PENTIUM2_FEATURES, .xlevel = 0, + .model_id = "", }, { .name = "pentium3", @@ -956,6 +975,7 @@ static X86CPUDefinition builtin_x86_defs[] = { .features[FEAT_1_EDX] = PENTIUM3_FEATURES, .xlevel = 0, + .model_id = "", }, { .name = "athlon", @@ -1066,6 +1086,31 @@ static X86CPUDefinition builtin_x86_defs[] = { .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)", }, { + .name = "Nehalem-IBRS", + .level = 11, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 26, + .stepping = 3, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | + CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_SPEC_CTRL, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_LAHF_LM, + .xlevel = 0x80000008, + .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)", + }, + { .name = "Westmere", .level = 11, .vendor = CPUID_VENDOR_INTEL, @@ -1092,6 +1137,34 @@ static X86CPUDefinition builtin_x86_defs[] = { .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)", }, { + .name = "Westmere-IBRS", + .level = 11, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 44, + .stepping = 1, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | + CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | + CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_LAHF_LM, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_SPEC_CTRL, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .xlevel = 0x80000008, + .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)", + }, + { .name = "SandyBridge", .level = 0xd, .vendor = CPUID_VENDOR_INTEL, @@ -1123,6 +1196,39 @@ static X86CPUDefinition builtin_x86_defs[] = { .model_id = "Intel Xeon E312xx (Sandy Bridge)", }, { + .name = "SandyBridge-IBRS", + .level = 0xd, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 42, + .stepping = 1, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | + CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | + CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | + CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | + CPUID_EXT_SSE3, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_LAHF_LM, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_SPEC_CTRL, + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .xlevel = 0x80000008, + .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)", + }, + { .name = "IvyBridge", .level = 0xd, .vendor = CPUID_VENDOR_INTEL, @@ -1157,6 +1263,42 @@ static X86CPUDefinition builtin_x86_defs[] = { .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)", }, { + .name = "IvyBridge-IBRS", + .level = 0xd, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 58, + .stepping = 9, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | + CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | + CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | + CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | + CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | + CPUID_7_0_EBX_ERMS, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_LAHF_LM, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_SPEC_CTRL, + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .xlevel = 0x80000008, + .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)", + }, + { .name = "Haswell-noTSX", .level = 0xd, .vendor = CPUID_VENDOR_INTEL, @@ -1191,7 +1333,46 @@ static X86CPUDefinition builtin_x86_defs[] = { CPUID_6_EAX_ARAT, .xlevel = 0x80000008, .model_id = "Intel Core Processor (Haswell, no TSX)", - }, { + }, + { + .name = "Haswell-noTSX-IBRS", + .level = 0xd, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 60, + .stepping = 1, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | + CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | + CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | + CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | + CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | + CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_SPEC_CTRL, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | + CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | + CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID, + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .xlevel = 0x80000008, + .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)", + }, + { .name = "Haswell", .level = 0xd, .vendor = CPUID_VENDOR_INTEL, @@ -1229,6 +1410,45 @@ static X86CPUDefinition builtin_x86_defs[] = { .model_id = "Intel Core Processor (Haswell)", }, { + .name = "Haswell-IBRS", + .level = 0xd, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 60, + .stepping = 4, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | + CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | + CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | + CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | + CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | + CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_SPEC_CTRL, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | + CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | + CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | + CPUID_7_0_EBX_RTM, + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .xlevel = 0x80000008, + .model_id = "Intel Core Processor (Haswell, IBRS)", + }, + { .name = "Broadwell-noTSX", .level = 0xd, .vendor = CPUID_VENDOR_INTEL, @@ -1267,6 +1487,46 @@ static X86CPUDefinition builtin_x86_defs[] = { .model_id = "Intel Core Processor (Broadwell, no TSX)", }, { + .name = "Broadwell-noTSX-IBRS", + .level = 0xd, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 61, + .stepping = 2, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | + CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | + CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | + CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | + CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | + CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_SPEC_CTRL, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | + CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | + CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | + CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | + CPUID_7_0_EBX_SMAP, + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .xlevel = 0x80000008, + .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)", + }, + { .name = "Broadwell", .level = 0xd, .vendor = CPUID_VENDOR_INTEL, @@ -1305,6 +1565,46 @@ static X86CPUDefinition builtin_x86_defs[] = { .model_id = "Intel Core Processor (Broadwell)", }, { + .name = "Broadwell-IBRS", + .level = 0xd, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 61, + .stepping = 2, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | + CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | + CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | + CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | + CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | + CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_SPEC_CTRL, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | + CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | + CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | + CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | + CPUID_7_0_EBX_SMAP, + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .xlevel = 0x80000008, + .model_id = "Intel Core Processor (Broadwell, IBRS)", + }, + { .name = "Skylake-Client", .level = 0xd, .vendor = CPUID_VENDOR_INTEL, @@ -1350,6 +1650,53 @@ static X86CPUDefinition builtin_x86_defs[] = { .model_id = "Intel Core Processor (Skylake)", }, { + .name = "Skylake-Client-IBRS", + .level = 0xd, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 94, + .stepping = 3, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | + CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | + CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | + CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | + CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | + CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_SPEC_CTRL, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | + CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | + CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | + CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | + CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX, + /* Missing: XSAVES (not supported by some Linux versions, + * including v4.1 to v4.12). + * KVM doesn't yet expose any XSAVES state save component, + * and the only one defined in Skylake (processor tracing) + * probably will block migration anyway. + */ + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | + CPUID_XSAVE_XGETBV1, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .xlevel = 0x80000008, + .model_id = "Intel Core Processor (Skylake, IBRS)", + }, + { .name = "Skylake-Server", .level = 0xd, .vendor = CPUID_VENDOR_INTEL, @@ -1382,7 +1729,7 @@ static X86CPUDefinition builtin_x86_defs[] = { CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | - CPUID_7_0_EBX_AVX512VL, + CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, /* Missing: XSAVES (not supported by some Linux versions, * including v4.1 to v4.12). * KVM doesn't yet expose any XSAVES state save component, @@ -1398,6 +1745,56 @@ static X86CPUDefinition builtin_x86_defs[] = { .model_id = "Intel Xeon Processor (Skylake)", }, { + .name = "Skylake-Server-IBRS", + .level = 0xd, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 85, + .stepping = 4, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | + CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | + CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | + CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | + CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | + CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | + CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_SPEC_CTRL, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | + CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | + CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | + CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | + CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB | + CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | + CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | + CPUID_7_0_EBX_AVX512VL, + /* Missing: XSAVES (not supported by some Linux versions, + * including v4.1 to v4.12). + * KVM doesn't yet expose any XSAVES state save component, + * and the only one defined in Skylake (processor tracing) + * probably will block migration anyway. + */ + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | + CPUID_XSAVE_XGETBV1, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .xlevel = 0x80000008, + .model_id = "Intel Xeon Processor (Skylake, IBRS)", + }, + { .name = "Opteron_G1", .level = 5, .vendor = CPUID_VENDOR_AMD, @@ -1571,6 +1968,52 @@ static X86CPUDefinition builtin_x86_defs[] = { .xlevel = 0x8000000A, .model_id = "AMD EPYC Processor", }, + { + .name = "EPYC-IBPB", + .level = 0xd, + .vendor = CPUID_VENDOR_AMD, + .family = 23, + .model = 1, + .stepping = 2, + .features[FEAT_1_EDX] = + CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | + CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | + CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | + CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | + CPUID_VME | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | + CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | + CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | + CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | + CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | + CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | + CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | + CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, + .features[FEAT_8000_0008_EBX] = + CPUID_8000_0008_EBX_IBPB, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | + CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | + CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | + CPUID_7_0_EBX_SHA_NI, + /* Missing: XSAVES (not supported by some Linux versions, + * including v4.1 to v4.12). + * KVM doesn't yet expose any XSAVES state save component. + */ + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | + CPUID_XSAVE_XGETBV1, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .xlevel = 0x8000000A, + .model_id = "AMD EPYC Processor (with IBPB)", + }, }; typedef struct PropValue { @@ -2736,6 +3179,9 @@ static void x86_register_cpudef_type(X86CPUDefinition *def) * they shouldn't be set on the CPU model table. */ assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES)); + /* catch mistakes instead of silently truncating model_id when too long */ + assert(def->model_id && strlen(def->model_id) <= 48); + type_register(&ti); g_free(typename); @@ -3123,7 +3569,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, } else { *eax = cpu->phys_bits; } - *ebx = 0; + *ebx = env->features[FEAT_8000_0008_EBX]; *ecx = 0; *edx = 0; if (cs->nr_cores * cs->nr_threads > 1) { @@ -3586,6 +4032,7 @@ static void x86_cpu_expand_features(X86CPU *cpu, Error **errp) x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX); x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX); + x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX); x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX); x86_cpu_adjust_feat_level(cpu, FEAT_SVM); x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE); @@ -4147,6 +4594,48 @@ static void x86_disas_set_info(CPUState *cs, disassemble_info *info) info->cap_insn_split = 8; } +void x86_update_hflags(CPUX86State *env) +{ + uint32_t hflags; +#define HFLAG_COPY_MASK \ + ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ + HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ + HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ + HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) + + hflags = env->hflags & HFLAG_COPY_MASK; + hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; + hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); + hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & + (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); + hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); + + if (env->cr[4] & CR4_OSFXSR_MASK) { + hflags |= HF_OSFXSR_MASK; + } + + if (env->efer & MSR_EFER_LMA) { + hflags |= HF_LMA_MASK; + } + + if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { + hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; + } else { + hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> + (DESC_B_SHIFT - HF_CS32_SHIFT); + hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> + (DESC_B_SHIFT - HF_SS32_SHIFT); + if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || + !(hflags & HF_CS32_MASK)) { + hflags |= HF_ADDSEG_MASK; + } else { + hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | + env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; + } + } + env->hflags = hflags; +} + static Property x86_cpu_properties[] = { #ifdef CONFIG_USER_ONLY /* apic_id = 0 by default for *-user, see commit 9886e834 */ diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 62c4742703..30cc5628d2 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -353,6 +353,7 @@ typedef enum X86Seg { #define MSR_IA32_APICBASE_BASE (0xfffffU<<12) #define MSR_IA32_FEATURE_CONTROL 0x0000003a #define MSR_TSC_ADJUST 0x0000003b +#define MSR_IA32_SPEC_CTRL 0x48 #define MSR_IA32_TSCDEADLINE 0x6e0 #define FEATURE_CONTROL_LOCKED (1<<0) @@ -471,6 +472,7 @@ typedef enum FeatureWord { FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */ FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */ FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */ + FEAT_8000_0008_EBX, /* CPUID[8000_0008].EBX */ FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */ FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */ FEAT_HYPERV_EAX, /* CPUID[4000_0003].EAX */ @@ -666,6 +668,9 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS]; #define CPUID_7_0_EDX_AVX512_4VNNIW (1U << 2) /* AVX512 Neural Network Instructions */ #define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3) /* AVX512 Multiply Accumulation Single Precision */ +#define CPUID_7_0_EDX_SPEC_CTRL (1U << 26) /* Speculation Control */ + +#define CPUID_8000_0008_EBX_IBPB (1U << 12) /* Indirect Branch Prediction Barrier */ #define CPUID_XSAVE_XSAVEOPT (1U << 0) #define CPUID_XSAVE_XSAVEC (1U << 1) @@ -1125,6 +1130,8 @@ typedef struct CPUX86State { uint32_t pkru; + uint64_t spec_ctrl; + /* End of state preserved by INIT (dummy marker). */ struct {} end_init_save; @@ -1778,4 +1785,6 @@ bool cpu_is_bsp(X86CPU *cpu); void x86_cpu_xrstor_all_areas(X86CPU *cpu, const X86XSaveArea *buf); void x86_cpu_xsave_all_areas(X86CPU *cpu, X86XSaveArea *buf); +void x86_update_hflags(CPUX86State* env); + #endif /* I386_CPU_H */ diff --git a/target/i386/hax-all.c b/target/i386/hax-all.c index 3ce6950296..934ec4afd1 100644 --- a/target/i386/hax-all.c +++ b/target/i386/hax-all.c @@ -782,56 +782,6 @@ static int hax_set_segments(CPUArchState *env, struct vcpu_state_t *sregs) return 0; } -/* - * After get the state from the kernel module, some - * qemu emulator state need be updated also - */ -static int hax_setup_qemu_emulator(CPUArchState *env) -{ - -#define HFLAG_COPY_MASK (~( \ - HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ - HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ - HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ - HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)) - - uint32_t hflags; - - hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; - hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); - hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & - (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); - hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); - hflags |= (env->cr[4] & CR4_OSFXSR_MASK) << - (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT); - - if (env->efer & MSR_EFER_LMA) { - hflags |= HF_LMA_MASK; - } - - if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { - hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; - } else { - hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> - (DESC_B_SHIFT - HF_CS32_SHIFT); - hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> - (DESC_B_SHIFT - HF_SS32_SHIFT); - if (!(env->cr[0] & CR0_PE_MASK) || - (env->eflags & VM_MASK) || !(hflags & HF_CS32_MASK)) { - hflags |= HF_ADDSEG_MASK; - } else { - hflags |= ((env->segs[R_DS].base | - env->segs[R_ES].base | - env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; - } - } - - hflags &= ~HF_SMM_MASK; - - env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags; - return 0; -} - static int hax_sync_vcpu_register(CPUArchState *env, int set) { struct vcpu_state_t regs; @@ -887,9 +837,6 @@ static int hax_sync_vcpu_register(CPUArchState *env, int set) return -1; } } - if (!set) { - hax_setup_qemu_emulator(env); - } return 0; } @@ -1070,6 +1017,7 @@ static int hax_arch_get_registers(CPUArchState *env) return ret; } + x86_update_hflags(env); return 0; } diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c index 71c0515073..7803e09a28 100644 --- a/target/i386/hvf/x86hvf.c +++ b/target/i386/hvf/x86hvf.c @@ -297,7 +297,6 @@ int hvf_get_registers(CPUState *cpu_state) X86CPU *x86cpu = X86_CPU(cpu_state); CPUX86State *env = &x86cpu->env; - env->regs[R_EAX] = rreg(cpu_state->hvf_fd, HV_X86_RAX); env->regs[R_EBX] = rreg(cpu_state->hvf_fd, HV_X86_RBX); env->regs[R_ECX] = rreg(cpu_state->hvf_fd, HV_X86_RCX); @@ -333,6 +332,7 @@ int hvf_get_registers(CPUState *cpu_state) env->dr[6] = rreg(cpu_state->hvf_fd, HV_X86_DR6); env->dr[7] = rreg(cpu_state->hvf_fd, HV_X86_DR7); + x86_update_hflags(env); return 0; } diff --git a/target/i386/kvm.c b/target/i386/kvm.c index 6f69e2fcfd..ad4b159b28 100644 --- a/target/i386/kvm.c +++ b/target/i386/kvm.c @@ -91,9 +91,11 @@ static bool has_msr_hv_synic; static bool has_msr_hv_stimer; static bool has_msr_hv_frequencies; static bool has_msr_xss; +static bool has_msr_spec_ctrl; -static bool has_msr_architectural_pmu; -static uint32_t num_architectural_pmu_counters; +static uint32_t has_architectural_pmu_version; +static uint32_t num_architectural_pmu_gp_counters; +static uint32_t num_architectural_pmu_fixed_counters; static int has_xsave; static int has_xcrs; @@ -872,19 +874,28 @@ int kvm_arch_init_vcpu(CPUState *cs) } if (limit >= 0x0a) { - uint32_t ver; + uint32_t eax, edx; - cpu_x86_cpuid(env, 0x0a, 0, &ver, &unused, &unused, &unused); - if ((ver & 0xff) > 0) { - has_msr_architectural_pmu = true; - num_architectural_pmu_counters = (ver & 0xff00) >> 8; + cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx); + + has_architectural_pmu_version = eax & 0xff; + if (has_architectural_pmu_version > 0) { + num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8; /* Shouldn't be more than 32, since that's the number of bits * available in EBX to tell us _which_ counters are available. * Play it safe. */ - if (num_architectural_pmu_counters > MAX_GP_COUNTERS) { - num_architectural_pmu_counters = MAX_GP_COUNTERS; + if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) { + num_architectural_pmu_gp_counters = MAX_GP_COUNTERS; + } + + if (has_architectural_pmu_version > 1) { + num_architectural_pmu_fixed_counters = edx & 0x1f; + + if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) { + num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS; + } } } } @@ -1143,6 +1154,9 @@ static int kvm_get_supported_msrs(KVMState *s) case HV_X64_MSR_TSC_FREQUENCY: has_msr_hv_frequencies = true; break; + case MSR_IA32_SPEC_CTRL: + has_msr_spec_ctrl = true; + break; } } } @@ -1625,6 +1639,9 @@ static int kvm_put_msrs(X86CPU *cpu, int level) if (has_msr_xss) { kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss); } + if (has_msr_spec_ctrl) { + kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl); + } #ifdef TARGET_X86_64 if (lm_capable_kernel) { kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar); @@ -1633,6 +1650,7 @@ static int kvm_put_msrs(X86CPU *cpu, int level) kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar); } #endif + /* * The following MSRs have side effects on the guest or are too heavy * for normal writeback. Limit them to reset or full state updates. @@ -1650,32 +1668,36 @@ static int kvm_put_msrs(X86CPU *cpu, int level) if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) { kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr); } - if (has_msr_architectural_pmu) { - /* Stop the counter. */ - kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); - kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); + if (has_architectural_pmu_version > 0) { + if (has_architectural_pmu_version > 1) { + /* Stop the counter. */ + kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); + kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); + } /* Set the counter values. */ - for (i = 0; i < MAX_FIXED_COUNTERS; i++) { + for (i = 0; i < num_architectural_pmu_fixed_counters; i++) { kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, env->msr_fixed_counters[i]); } - for (i = 0; i < num_architectural_pmu_counters; i++) { + for (i = 0; i < num_architectural_pmu_gp_counters; i++) { kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, env->msr_gp_counters[i]); kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, env->msr_gp_evtsel[i]); } - kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, - env->msr_global_status); - kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, - env->msr_global_ovf_ctrl); - - /* Now start the PMU. */ - kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, - env->msr_fixed_ctr_ctrl); - kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, - env->msr_global_ctrl); + if (has_architectural_pmu_version > 1) { + kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, + env->msr_global_status); + kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, + env->msr_global_ovf_ctrl); + + /* Now start the PMU. */ + kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, + env->msr_fixed_ctr_ctrl); + kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, + env->msr_global_ctrl); + } } /* * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add, @@ -1877,7 +1899,6 @@ static int kvm_get_sregs(X86CPU *cpu) { CPUX86State *env = &cpu->env; struct kvm_sregs sregs; - uint32_t hflags; int bit, i, ret; ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs); @@ -1919,44 +1940,7 @@ static int kvm_get_sregs(X86CPU *cpu) env->efer = sregs.efer; /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */ - -#define HFLAG_COPY_MASK \ - ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ - HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ - HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ - HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) - - hflags = env->hflags & HFLAG_COPY_MASK; - hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; - hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); - hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & - (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); - hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); - - if (env->cr[4] & CR4_OSFXSR_MASK) { - hflags |= HF_OSFXSR_MASK; - } - - if (env->efer & MSR_EFER_LMA) { - hflags |= HF_LMA_MASK; - } - - if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { - hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; - } else { - hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> - (DESC_B_SHIFT - HF_CS32_SHIFT); - hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> - (DESC_B_SHIFT - HF_SS32_SHIFT); - if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || - !(hflags & HF_CS32_MASK)) { - hflags |= HF_ADDSEG_MASK; - } else { - hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | - env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; - } - } - env->hflags = hflags; + x86_update_hflags(env); return 0; } @@ -2004,6 +1988,9 @@ static int kvm_get_msrs(X86CPU *cpu) if (has_msr_xss) { kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0); } + if (has_msr_spec_ctrl) { + kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0); + } if (!env->tsc_valid) { @@ -2030,15 +2017,17 @@ static int kvm_get_msrs(X86CPU *cpu) if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) { kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0); } - if (has_msr_architectural_pmu) { - kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); - kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); - kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0); - kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0); - for (i = 0; i < MAX_FIXED_COUNTERS; i++) { + if (has_architectural_pmu_version > 0) { + if (has_architectural_pmu_version > 1) { + kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); + kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); + kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0); + kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0); + } + for (i = 0; i < num_architectural_pmu_fixed_counters; i++) { kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0); } - for (i = 0; i < num_architectural_pmu_counters; i++) { + for (i = 0; i < num_architectural_pmu_gp_counters; i++) { kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0); kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0); } @@ -2349,6 +2338,9 @@ static int kvm_get_msrs(X86CPU *cpu) env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data; } break; + case MSR_IA32_SPEC_CTRL: + env->spec_ctrl = msrs[i].data; + break; } } @@ -3492,6 +3484,7 @@ int kvm_arch_release_virq_post(int virq) if (entry->virq == virq) { trace_kvm_x86_remove_msi_route(virq); QLIST_REMOVE(entry, list); + g_free(entry); break; } } diff --git a/target/i386/machine.c b/target/i386/machine.c index df5ec359eb..361c05aedf 100644 --- a/target/i386/machine.c +++ b/target/i386/machine.c @@ -818,6 +818,25 @@ static const VMStateDescription vmstate_mcg_ext_ctl = { } }; +static bool spec_ctrl_needed(void *opaque) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + + return env->spec_ctrl != 0; +} + +static const VMStateDescription vmstate_spec_ctrl = { + .name = "cpu/spec_ctrl", + .version_id = 1, + .minimum_version_id = 1, + .needed = spec_ctrl_needed, + .fields = (VMStateField[]){ + VMSTATE_UINT64(env.spec_ctrl, X86CPU), + VMSTATE_END_OF_LIST() + } +}; + VMStateDescription vmstate_x86_cpu = { .name = "cpu", .version_id = 12, @@ -936,6 +955,7 @@ VMStateDescription vmstate_x86_cpu = { #ifdef TARGET_X86_64 &vmstate_pkru, #endif + &vmstate_spec_ctrl, &vmstate_mcg_ext_ctl, NULL } diff --git a/target/mips/msa_helper.c b/target/mips/msa_helper.c index f167a42655..8fb7a369ca 100644 --- a/target/mips/msa_helper.c +++ b/target/mips/msa_helper.c @@ -682,13 +682,13 @@ static inline int64_t msa_mod_u_df(uint32_t df, int64_t arg1, int64_t arg2) do { \ e = SIGNED_EVEN(a, df); \ o = SIGNED_ODD(a, df); \ - } while (0); + } while (0) #define UNSIGNED_EXTRACT(e, o, a, df) \ do { \ e = UNSIGNED_EVEN(a, df); \ o = UNSIGNED_ODD(a, df); \ - } while (0); + } while (0) static inline int64_t msa_dotp_s_df(uint32_t df, int64_t arg1, int64_t arg2) { @@ -1120,9 +1120,11 @@ void helper_msa_splat_df(CPUMIPSState *env, uint32_t df, uint32_t wd, #define MSA_LOOP_COND_D MSA_LOOP_COND(DF_DOUBLE) #define MSA_LOOP(DF) \ + do { \ for (i = 0; i < (MSA_LOOP_COND_ ## DF) ; i++) { \ - MSA_DO_ ## DF \ - } + MSA_DO_ ## DF; \ + } \ + } while (0) #define MSA_FN_DF(FUNC) \ void helper_msa_##FUNC(CPUMIPSState *env, uint32_t df, uint32_t wd, \ @@ -1135,17 +1137,17 @@ void helper_msa_##FUNC(CPUMIPSState *env, uint32_t df, uint32_t wd, \ uint32_t i; \ switch (df) { \ case DF_BYTE: \ - MSA_LOOP_B \ + MSA_LOOP_B; \ break; \ case DF_HALF: \ - MSA_LOOP_H \ + MSA_LOOP_H; \ break; \ case DF_WORD: \ - MSA_LOOP_W \ + MSA_LOOP_W; \ break; \ case DF_DOUBLE: \ - MSA_LOOP_D \ - break; \ + MSA_LOOP_D; \ + break; \ default: \ assert(0); \ } \ @@ -1168,7 +1170,7 @@ void helper_msa_##FUNC(CPUMIPSState *env, uint32_t df, uint32_t wd, \ do { \ R##DF(pwx, i) = pwt->DF[2*i]; \ L##DF(pwx, i) = pws->DF[2*i]; \ - } while (0); + } while (0) MSA_FN_DF(pckev_df) #undef MSA_DO @@ -1176,7 +1178,7 @@ MSA_FN_DF(pckev_df) do { \ R##DF(pwx, i) = pwt->DF[2*i+1]; \ L##DF(pwx, i) = pws->DF[2*i+1]; \ - } while (0); + } while (0) MSA_FN_DF(pckod_df) #undef MSA_DO @@ -1184,7 +1186,7 @@ MSA_FN_DF(pckod_df) do { \ pwx->DF[2*i] = L##DF(pwt, i); \ pwx->DF[2*i+1] = L##DF(pws, i); \ - } while (0); + } while (0) MSA_FN_DF(ilvl_df) #undef MSA_DO @@ -1192,7 +1194,7 @@ MSA_FN_DF(ilvl_df) do { \ pwx->DF[2*i] = R##DF(pwt, i); \ pwx->DF[2*i+1] = R##DF(pws, i); \ - } while (0); + } while (0) MSA_FN_DF(ilvr_df) #undef MSA_DO @@ -1200,7 +1202,7 @@ MSA_FN_DF(ilvr_df) do { \ pwx->DF[2*i] = pwt->DF[2*i]; \ pwx->DF[2*i+1] = pws->DF[2*i]; \ - } while (0); + } while (0) MSA_FN_DF(ilvev_df) #undef MSA_DO @@ -1208,7 +1210,7 @@ MSA_FN_DF(ilvev_df) do { \ pwx->DF[2*i] = pwt->DF[2*i+1]; \ pwx->DF[2*i+1] = pws->DF[2*i+1]; \ - } while (0); + } while (0) MSA_FN_DF(ilvod_df) #undef MSA_DO #undef MSA_LOOP_COND @@ -1222,7 +1224,7 @@ MSA_FN_DF(ilvod_df) uint32_t k = (pwd->DF[i] & 0x3f) % (2 * n); \ pwx->DF[i] = \ (pwd->DF[i] & 0xc0) ? 0 : k < n ? pwt->DF[k] : pws->DF[k - n]; \ - } while (0); + } while (0) MSA_FN_DF(vshf_df) #undef MSA_DO #undef MSA_LOOP_COND diff --git a/target/ppc/compat.c b/target/ppc/compat.c index ad8f93c064..807c906f68 100644 --- a/target/ppc/compat.c +++ b/target/ppc/compat.c @@ -32,7 +32,16 @@ typedef struct { uint32_t pvr; uint64_t pcr; uint64_t pcr_level; - int max_threads; + + /* + * Maximum allowed virtual threads per virtual core + * + * This is to stop older guests getting confused by seeing more + * threads than they think the cpu can support. Usually it's + * equal to the number of threads supported on bare metal + * hardware, but not always (see POWER9). + */ + int max_vthreads; } CompatInfo; static const CompatInfo compat_table[] = { @@ -45,35 +54,42 @@ static const CompatInfo compat_table[] = { .pcr = PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05 | PCR_TM_DIS | PCR_VSX_DIS, .pcr_level = PCR_COMPAT_2_05, - .max_threads = 2, + .max_vthreads = 2, }, { /* POWER7, ISA2.06 */ .name = "power7", .pvr = CPU_POWERPC_LOGICAL_2_06, .pcr = PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_TM_DIS, .pcr_level = PCR_COMPAT_2_06, - .max_threads = 4, + .max_vthreads = 4, }, { .name = "power7+", .pvr = CPU_POWERPC_LOGICAL_2_06_PLUS, .pcr = PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_TM_DIS, .pcr_level = PCR_COMPAT_2_06, - .max_threads = 4, + .max_vthreads = 4, }, { /* POWER8, ISA2.07 */ .name = "power8", .pvr = CPU_POWERPC_LOGICAL_2_07, .pcr = PCR_COMPAT_3_00 | PCR_COMPAT_2_07, .pcr_level = PCR_COMPAT_2_07, - .max_threads = 8, + .max_vthreads = 8, }, { /* POWER9, ISA3.00 */ .name = "power9", .pvr = CPU_POWERPC_LOGICAL_3_00, .pcr = PCR_COMPAT_3_00, .pcr_level = PCR_COMPAT_3_00, - .max_threads = 4, + /* + * POWER9 hardware only supports 4 threads / core, but this + * limit is for guests. We need to support 8 vthreads/vcore + * on POWER9 for POWER8 compatibility guests, and it's very + * confusing if half of the threads disappear from the guest + * if it announces it's POWER9 aware at CAS time. + */ + .max_vthreads = 8, }, }; @@ -185,14 +201,14 @@ void ppc_set_compat_all(uint32_t compat_pvr, Error **errp) } } -int ppc_compat_max_threads(PowerPCCPU *cpu) +int ppc_compat_max_vthreads(PowerPCCPU *cpu) { const CompatInfo *compat = compat_by_pvr(cpu->compat_pvr); int n_threads = CPU(cpu)->nr_threads; if (cpu->compat_pvr) { g_assert(compat); - n_threads = MIN(n_threads, compat->max_threads); + n_threads = MIN(n_threads, compat->max_vthreads); } return n_threads; diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index a5e49f23e9..603a38cae8 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -140,9 +140,6 @@ enum { POWERPC_EXCP_HYPPRIV = 41, /* Embedded hypervisor priv instruction */ /* Vectors 42 to 63 are reserved */ /* Exceptions defined in the PowerPC server specification */ - /* Server doorbell variants */ -#define POWERPC_EXCP_SDOOR POWERPC_EXCP_GDOORI -#define POWERPC_EXCP_SDOOR_HV POWERPC_EXCP_DOORI POWERPC_EXCP_RESET = 64, /* System reset exception */ POWERPC_EXCP_DSEG = 65, /* Data segment exception */ POWERPC_EXCP_ISEG = 66, /* Instruction segment exception */ @@ -189,8 +186,11 @@ enum { POWERPC_EXCP_HV_EMU = 96, /* HV emulation assistance */ POWERPC_EXCP_HV_MAINT = 97, /* HMI */ POWERPC_EXCP_HV_FU = 98, /* Hypervisor Facility unavailable */ + /* Server doorbell variants */ + POWERPC_EXCP_SDOOR = 99, + POWERPC_EXCP_SDOOR_HV = 100, /* EOL */ - POWERPC_EXCP_NB = 99, + POWERPC_EXCP_NB = 101, /* QEMU exceptions: used internally during code translation */ POWERPC_EXCP_STOP = 0x200, /* stop translation */ POWERPC_EXCP_BRANCH = 0x201, /* branch instruction */ @@ -930,7 +930,7 @@ enum { #define BOOKE206_MAX_TLBN 4 /*****************************************************************************/ -/* Embedded.Processor Control */ +/* Server and Embedded Processor Control */ #define DBELL_TYPE_SHIFT 27 #define DBELL_TYPE_MASK (0x1f << DBELL_TYPE_SHIFT) @@ -940,11 +940,15 @@ enum { #define DBELL_TYPE_G_DBELL_CRIT (0x03 << DBELL_TYPE_SHIFT) #define DBELL_TYPE_G_DBELL_MC (0x04 << DBELL_TYPE_SHIFT) -#define DBELL_BRDCAST (1 << 26) +#define DBELL_TYPE_DBELL_SERVER (0x05 << DBELL_TYPE_SHIFT) + +#define DBELL_BRDCAST PPC_BIT(37) #define DBELL_LPIDTAG_SHIFT 14 #define DBELL_LPIDTAG_MASK (0xfff << DBELL_LPIDTAG_SHIFT) #define DBELL_PIRTAG_MASK 0x3fff +#define DBELL_PROCIDTAG_MASK PPC_BITMASK(44, 63) + /*****************************************************************************/ /* Segment page size information, used by recent hash MMUs * The format of this structure mirrors kvm_ppc_smmu_info @@ -1395,7 +1399,7 @@ void ppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr, Error **errp); #if !defined(CONFIG_USER_ONLY) void ppc_set_compat_all(uint32_t compat_pvr, Error **errp); #endif -int ppc_compat_max_threads(PowerPCCPU *cpu); +int ppc_compat_max_vthreads(PowerPCCPU *cpu); void ppc_compat_add_property(Object *obj, const char *name, uint32_t *compat_pvr, const char *basedesc, Error **errp); @@ -2012,6 +2016,7 @@ void ppc_compat_add_property(Object *obj, const char *name, #define HID0_DOZE (1 << 23) /* pre-2.06 */ #define HID0_NAP (1 << 22) /* pre-2.06 */ #define HID0_HILE PPC_BIT(19) /* POWER8 */ +#define HID0_POWER9_HILE PPC_BIT(4) /*****************************************************************************/ /* PowerPC Instructions types definitions */ diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c index 37d2410726..c092fbead0 100644 --- a/target/ppc/excp_helper.c +++ b/target/ppc/excp_helper.c @@ -417,6 +417,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */ case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */ case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */ + case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */ case POWERPC_EXCP_HV_EMU: srr0 = SPR_HSRR0; srr1 = SPR_HSRR1; @@ -654,7 +655,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) } } else if (excp_model == POWERPC_EXCP_POWER8) { if (new_msr & MSR_HVB) { - if (env->spr[SPR_HID0] & HID0_HILE) { + if (env->spr[SPR_HID0] & (HID0_HILE | HID0_POWER9_HILE)) { new_msr |= (target_ulong)1 << MSR_LE; } } else if (env->spr[SPR_LPCR] & LPCR_ILE) { @@ -846,6 +847,11 @@ static void ppc_hw_interrupt(CPUPPCState *env) powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORI); return; } + if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDOORBELL)) { + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL); + powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR_HV); + return; + } if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) { env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM); powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PERFM); @@ -1145,4 +1151,50 @@ void helper_msgsnd(target_ulong rb) } qemu_mutex_unlock_iothread(); } + +/* Server Processor Control */ +static int book3s_dbell2irq(target_ulong rb) +{ + int msg = rb & DBELL_TYPE_MASK; + + /* A Directed Hypervisor Doorbell message is sent only if the + * message type is 5. All other types are reserved and the + * instruction is a no-op */ + return msg == DBELL_TYPE_DBELL_SERVER ? PPC_INTERRUPT_HDOORBELL : -1; +} + +void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb) +{ + int irq = book3s_dbell2irq(rb); + + if (irq < 0) { + return; + } + + env->pending_interrupts &= ~(1 << irq); +} + +void helper_book3s_msgsnd(target_ulong rb) +{ + int irq = book3s_dbell2irq(rb); + int pir = rb & DBELL_PROCIDTAG_MASK; + CPUState *cs; + + if (irq < 0) { + return; + } + + qemu_mutex_lock_iothread(); + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *cenv = &cpu->env; + + /* TODO: broadcast message to all threads of the same processor */ + if (cenv->spr_cb[SPR_PIR].default_value == pir) { + cenv->pending_interrupts |= 1 << irq; + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + } + } + qemu_mutex_unlock_iothread(); +} #endif diff --git a/target/ppc/helper.h b/target/ppc/helper.h index bb6a94a8b3..5b739179b8 100644 --- a/target/ppc/helper.h +++ b/target/ppc/helper.h @@ -679,6 +679,8 @@ DEF_HELPER_FLAGS_3(store_sr, TCG_CALL_NO_RWG, void, env, tl, tl) DEF_HELPER_FLAGS_1(602_mfrom, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_1(msgsnd, void, tl) DEF_HELPER_2(msgclr, void, env, tl) +DEF_HELPER_1(book3s_msgsnd, void, tl) +DEF_HELPER_2(book3s_msgclr, void, env, tl) #endif DEF_HELPER_4(dlmzb, tl, env, tl, tl, i32) diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c index 4664a3ce9d..914be687e7 100644 --- a/target/ppc/kvm.c +++ b/target/ppc/kvm.c @@ -2011,16 +2011,6 @@ uint64_t kvmppc_get_clockfreq(void) return kvmppc_read_int_cpu_dt("clock-frequency"); } -uint32_t kvmppc_get_vmx(void) -{ - return kvmppc_read_int_cpu_dt("ibm,vmx"); -} - -uint32_t kvmppc_get_dfp(void) -{ - return kvmppc_read_int_cpu_dt("ibm,dfp"); -} - static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo) { PowerPCCPU *cpu = ppc_env_get_cpu(env); @@ -2404,23 +2394,18 @@ static void alter_insns(uint64_t *word, uint64_t flags, bool on) static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); - uint32_t vmx = kvmppc_get_vmx(); - uint32_t dfp = kvmppc_get_dfp(); uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size"); uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size"); /* Now fix up the class with information we can query from the host */ pcc->pvr = mfpvr(); - if (vmx != -1) { - /* Only override when we know what the host supports */ - alter_insns(&pcc->insns_flags, PPC_ALTIVEC, vmx > 0); - alter_insns(&pcc->insns_flags2, PPC2_VSX, vmx > 1); - } - if (dfp != -1) { - /* Only override when we know what the host supports */ - alter_insns(&pcc->insns_flags2, PPC2_DFP, dfp); - } + alter_insns(&pcc->insns_flags, PPC_ALTIVEC, + qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_ALTIVEC); + alter_insns(&pcc->insns_flags2, PPC2_VSX, + qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_VSX); + alter_insns(&pcc->insns_flags2, PPC2_DFP, + qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_DFP); if (dcache_size != -1) { pcc->l1_dcache_size = dcache_size; @@ -2667,21 +2652,24 @@ void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n) hdr = (struct kvm_get_htab_header *)buf; while ((i < n) && ((char *)hdr < (buf + rc))) { - int invalid = hdr->n_invalid; + int invalid = hdr->n_invalid, valid = hdr->n_valid; if (hdr->index != (ptex + i)) { hw_error("kvmppc_read_hptes: Unexpected HPTE index %"PRIu32 " != (%"HWADDR_PRIu" + %d", hdr->index, ptex, i); } - memcpy(hptes + i, hdr + 1, HASH_PTE_SIZE_64 * hdr->n_valid); - i += hdr->n_valid; + if (n - i < valid) { + valid = n - i; + } + memcpy(hptes + i, hdr + 1, HASH_PTE_SIZE_64 * valid); + i += valid; if ((n - i) < invalid) { invalid = n - i; } memset(hptes + i, 0, invalid * HASH_PTE_SIZE_64); - i += hdr->n_invalid; + i += invalid; hdr = (struct kvm_get_htab_header *) ((char *)(hdr + 1) + HASH_PTE_SIZE_64 * hdr->n_valid); diff --git a/target/ppc/kvm_ppc.h b/target/ppc/kvm_ppc.h index d6be38ecaf..ecb55493cc 100644 --- a/target/ppc/kvm_ppc.h +++ b/target/ppc/kvm_ppc.h @@ -15,8 +15,6 @@ uint32_t kvmppc_get_tbfreq(void); uint64_t kvmppc_get_clockfreq(void); -uint32_t kvmppc_get_vmx(void); -uint32_t kvmppc_get_dfp(void); bool kvmppc_get_host_model(char **buf); bool kvmppc_get_host_serial(char **buf); int kvmppc_get_hasidle(CPUPPCState *env); diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c index 2a1f9902c9..298c15e961 100644 --- a/target/ppc/mmu_helper.c +++ b/target/ppc/mmu_helper.c @@ -2570,6 +2570,17 @@ void helper_booke_setpid(CPUPPCState *env, uint32_t pidn, target_ulong pid) tlb_flush(CPU(cpu)); } +static inline void flush_page(CPUPPCState *env, ppcmas_tlb_t *tlb) +{ + PowerPCCPU *cpu = ppc_env_get_cpu(env); + + if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) { + tlb_flush_page(CPU(cpu), tlb->mas2 & MAS2_EPN_MASK); + } else { + tlb_flush(CPU(cpu)); + } +} + void helper_booke206_tlbwe(CPUPPCState *env) { PowerPCCPU *cpu = ppc_env_get_cpu(env); @@ -2628,6 +2639,21 @@ void helper_booke206_tlbwe(CPUPPCState *env) if (msr_gs) { cpu_abort(CPU(cpu), "missing HV implementation\n"); } + + if (tlb->mas1 & MAS1_VALID) { + /* Invalidate the page in QEMU TLB if it was a valid entry. + * + * In "PowerPC e500 Core Family Reference Manual, Rev. 1", + * Section "12.4.2 TLB Write Entry (tlbwe) Instruction": + * (https://www.nxp.com/docs/en/reference-manual/E500CORERM.pdf) + * + * "Note that when an L2 TLB entry is written, it may be displacing an + * already valid entry in the same L2 TLB location (a victim). If a + * valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1 + * TLB entry is automatically invalidated." */ + flush_page(env, tlb); + } + tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) | env->spr[SPR_BOOKE_MAS3]; tlb->mas1 = env->spr[SPR_BOOKE_MAS1]; @@ -2663,11 +2689,7 @@ void helper_booke206_tlbwe(CPUPPCState *env) tlb->mas1 &= ~MAS1_IPROT; } - if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) { - tlb_flush_page(CPU(cpu), tlb->mas2 & MAS2_EPN_MASK); - } else { - tlb_flush(CPU(cpu)); - } + flush_page(env, tlb); } static inline void booke206_tlb_to_mas(CPUPPCState *env, ppcmas_tlb_t *tlb) diff --git a/target/ppc/translate.c b/target/ppc/translate.c index 0ef21cce33..4132f67bb1 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -605,27 +605,22 @@ static opc_handler_t invalid_handler = { static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf) { TCGv t0 = tcg_temp_new(); - TCGv_i32 t1 = tcg_temp_new_i32(); - - tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so); - - tcg_gen_setcond_tl((s ? TCG_COND_LT: TCG_COND_LTU), t0, arg0, arg1); - tcg_gen_trunc_tl_i32(t1, t0); - tcg_gen_shli_i32(t1, t1, CRF_LT_BIT); - tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1); + TCGv t1 = tcg_temp_new(); + TCGv_i32 t = tcg_temp_new_i32(); - tcg_gen_setcond_tl((s ? TCG_COND_GT: TCG_COND_GTU), t0, arg0, arg1); - tcg_gen_trunc_tl_i32(t1, t0); - tcg_gen_shli_i32(t1, t1, CRF_GT_BIT); - tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1); + tcg_gen_movi_tl(t0, CRF_EQ); + tcg_gen_movi_tl(t1, CRF_LT); + tcg_gen_movcond_tl((s ? TCG_COND_LT : TCG_COND_LTU), t0, arg0, arg1, t1, t0); + tcg_gen_movi_tl(t1, CRF_GT); + tcg_gen_movcond_tl((s ? TCG_COND_GT : TCG_COND_GTU), t0, arg0, arg1, t1, t0); - tcg_gen_setcond_tl(TCG_COND_EQ, t0, arg0, arg1); - tcg_gen_trunc_tl_i32(t1, t0); - tcg_gen_shli_i32(t1, t1, CRF_EQ_BIT); - tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1); + tcg_gen_trunc_tl_i32(t, t0); + tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so); + tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t); tcg_temp_free(t0); - tcg_temp_free_i32(t1); + tcg_temp_free(t1); + tcg_temp_free_i32(t); } static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf) @@ -6174,8 +6169,13 @@ static void gen_msgclr(DisasContext *ctx) #if defined(CONFIG_USER_ONLY) GEN_PRIV; #else - CHK_SV; - gen_helper_msgclr(cpu_env, cpu_gpr[rB(ctx->opcode)]); + CHK_HV; + /* 64-bit server processors compliant with arch 2.x */ + if (ctx->insns_flags & PPC_SEGMENT_64B) { + gen_helper_book3s_msgclr(cpu_env, cpu_gpr[rB(ctx->opcode)]); + } else { + gen_helper_msgclr(cpu_env, cpu_gpr[rB(ctx->opcode)]); + } #endif /* defined(CONFIG_USER_ONLY) */ } @@ -6184,11 +6184,25 @@ static void gen_msgsnd(DisasContext *ctx) #if defined(CONFIG_USER_ONLY) GEN_PRIV; #else - CHK_SV; - gen_helper_msgsnd(cpu_gpr[rB(ctx->opcode)]); + CHK_HV; + /* 64-bit server processors compliant with arch 2.x */ + if (ctx->insns_flags & PPC_SEGMENT_64B) { + gen_helper_book3s_msgsnd(cpu_gpr[rB(ctx->opcode)]); + } else { + gen_helper_msgsnd(cpu_gpr[rB(ctx->opcode)]); + } #endif /* defined(CONFIG_USER_ONLY) */ } +static void gen_msgsync(DisasContext *ctx) +{ +#if defined(CONFIG_USER_ONLY) + GEN_PRIV; +#else + CHK_HV; +#endif /* defined(CONFIG_USER_ONLY) */ + /* interpreted as no-op */ +} #if defined(TARGET_PPC64) static void gen_maddld(DisasContext *ctx) @@ -6669,6 +6683,8 @@ GEN_HANDLER2_E(msgsnd, "msgsnd", 0x1F, 0x0E, 0x06, 0x03ff0001, PPC_NONE, PPC2_PRCNTL), GEN_HANDLER2_E(msgclr, "msgclr", 0x1F, 0x0E, 0x07, 0x03ff0001, PPC_NONE, PPC2_PRCNTL), +GEN_HANDLER2_E(msgsync, "msgsync", 0x1F, 0x16, 0x1B, 0x00000000, + PPC_NONE, PPC2_PRCNTL), GEN_HANDLER(wrtee, 0x1F, 0x03, 0x04, 0x000FFC01, PPC_WRTEE), GEN_HANDLER(wrteei, 0x1F, 0x03, 0x05, 0x000E7C01, PPC_WRTEE), GEN_HANDLER(dlmzb, 0x1F, 0x0E, 0x02, 0x00000000, PPC_440_SPEC), diff --git a/target/ppc/translate_init.c b/target/ppc/translate_init.c index 70ff15a51a..55c99c97e3 100644 --- a/target/ppc/translate_init.c +++ b/target/ppc/translate_init.c @@ -8866,7 +8866,7 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data) PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | - PPC2_TM | PPC2_PM_ISA206 | PPC2_ISA300; + PPC2_TM | PPC2_PM_ISA206 | PPC2_ISA300 | PPC2_PRCNTL; pcc->msr_mask = (1ull << MSR_SF) | (1ull << MSR_TM) | (1ull << MSR_VR) | diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c index ae3cee91a2..d2e6b9f5c7 100644 --- a/target/s390x/cpu.c +++ b/target/s390x/cpu.c @@ -89,6 +89,7 @@ static void s390_cpu_reset(CPUState *s) CPUS390XState *env = &cpu->env; env->pfault_token = -1UL; + env->bpbc = false; scc->parent_reset(s); cpu->env.sigp_order = 0; s390_cpu_set_state(CPU_STATE_STOPPED, cpu); diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h index 1a8b6b9ae9..a1123ad621 100644 --- a/target/s390x/cpu.h +++ b/target/s390x/cpu.h @@ -93,6 +93,7 @@ struct CPUS390XState { uint32_t fpc; /* floating-point control register */ uint32_t cc_op; + bool bpbc; /* branch prediction blocking */ float_status fpu_status; /* passed to softfloat lib */ @@ -759,6 +760,8 @@ int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf, s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, false) #define s390_cpu_virt_mem_write(cpu, laddr, ar, dest, len) \ s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, true) +#define s390_cpu_virt_mem_check_read(cpu, laddr, ar, len) \ + s390_cpu_virt_mem_rw(cpu, laddr, ar, NULL, len, false) #define s390_cpu_virt_mem_check_write(cpu, laddr, ar, len) \ s390_cpu_virt_mem_rw(cpu, laddr, ar, NULL, len, true) void s390_cpu_virt_mem_handle_exc(S390CPU *cpu, uintptr_t ra); diff --git a/target/s390x/cpu_features.c b/target/s390x/cpu_features.c index 31a4676f05..85d10b5710 100644 --- a/target/s390x/cpu_features.c +++ b/target/s390x/cpu_features.c @@ -89,6 +89,8 @@ static const S390FeatDef s390_features[] = { FEAT_INIT("msa4-base", S390_FEAT_TYPE_STFL, 77, "Message-security-assist-extension-4 facility (excluding subfunctions)"), FEAT_INIT("edat2", S390_FEAT_TYPE_STFL, 78, "Enhanced-DAT facility 2"), FEAT_INIT("dfppc", S390_FEAT_TYPE_STFL, 80, "Decimal-floating-point packed-conversion facility"), + FEAT_INIT("ppa15", S390_FEAT_TYPE_STFL, 81, "PPA15 is installed"), + FEAT_INIT("bpb", S390_FEAT_TYPE_STFL, 82, "Branch prediction blocking"), FEAT_INIT("vx", S390_FEAT_TYPE_STFL, 129, "Vector facility"), FEAT_INIT("iep", S390_FEAT_TYPE_STFL, 130, "Instruction-execution-protection facility"), FEAT_INIT("sea_esop2", S390_FEAT_TYPE_STFL, 131, "Side-effect-access facility and Enhanced-suppression-on-protection facility 2"), diff --git a/target/s390x/cpu_features_def.h b/target/s390x/cpu_features_def.h index 4b6d4e9cc0..4d930871b4 100644 --- a/target/s390x/cpu_features_def.h +++ b/target/s390x/cpu_features_def.h @@ -80,6 +80,8 @@ typedef enum { S390_FEAT_MSA_EXT_4, S390_FEAT_EDAT_2, S390_FEAT_DFP_PACKED_CONVERSION, + S390_FEAT_PPA15, + S390_FEAT_BPB, S390_FEAT_VECTOR, S390_FEAT_INSTRUCTION_EXEC_PROT, S390_FEAT_SIDE_EFFECT_ACCESS_ESOP2, diff --git a/target/s390x/gen-features.c b/target/s390x/gen-features.c index b24f6ada5b..0570f597ec 100644 --- a/target/s390x/gen-features.c +++ b/target/s390x/gen-features.c @@ -352,6 +352,8 @@ static uint16_t base_GEN14_GA1[] = { * support these features yet. */ static uint16_t full_GEN7_GA1[] = { + S390_FEAT_PPA15, + S390_FEAT_BPB, S390_FEAT_SIE_F2, S390_FEAT_SIE_SKEY, S390_FEAT_SIE_GPERE, diff --git a/target/s390x/helper.h b/target/s390x/helper.h index 2f17b62d3d..59a1d9869b 100644 --- a/target/s390x/helper.h +++ b/target/s390x/helper.h @@ -137,7 +137,7 @@ DEF_HELPER_FLAGS_4(lctlg, TCG_CALL_NO_WG, void, env, i32, i64, i32) DEF_HELPER_FLAGS_4(stctl, TCG_CALL_NO_WG, void, env, i32, i64, i32) DEF_HELPER_FLAGS_4(stctg, TCG_CALL_NO_WG, void, env, i32, i64, i32) DEF_HELPER_FLAGS_2(testblock, TCG_CALL_NO_WG, i32, env, i64) -DEF_HELPER_FLAGS_2(tprot, TCG_CALL_NO_RWG, i32, i64, i64) +DEF_HELPER_FLAGS_3(tprot, TCG_CALL_NO_WG, i32, env, i64, i64) DEF_HELPER_FLAGS_2(iske, TCG_CALL_NO_RWG_SE, i64, env, i64) DEF_HELPER_FLAGS_3(sske, TCG_CALL_NO_RWG, void, env, i64, i64) DEF_HELPER_FLAGS_2(rrbe, TCG_CALL_NO_RWG, i32, env, i64) diff --git a/target/s390x/kvm.c b/target/s390x/kvm.c index 9b8b59f2a2..8736001156 100644 --- a/target/s390x/kvm.c +++ b/target/s390x/kvm.c @@ -58,7 +58,7 @@ if (DEBUG_KVM) { \ fprintf(stderr, fmt, ## __VA_ARGS__); \ } \ -} while (0); +} while (0) #define kvm_vm_check_mem_attr(s, attr) \ kvm_vm_check_attr(s, KVM_S390_VM_MEM_CTRL, attr) @@ -490,6 +490,11 @@ int kvm_arch_put_registers(CPUState *cs, int level) cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GSCB; } + if (can_sync_regs(cs, KVM_SYNC_BPBC)) { + cs->kvm_run->s.regs.bpbc = env->bpbc; + cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_BPBC; + } + /* Finally the prefix */ if (can_sync_regs(cs, KVM_SYNC_PREFIX)) { cs->kvm_run->s.regs.prefix = env->psa; @@ -600,6 +605,10 @@ int kvm_arch_get_registers(CPUState *cs) memcpy(env->gscb, cs->kvm_run->s.regs.gscb, 32); } + if (can_sync_regs(cs, KVM_SYNC_BPBC)) { + env->bpbc = cs->kvm_run->s.regs.bpbc; + } + /* pfault parameters */ if (can_sync_regs(cs, KVM_SYNC_PFAULT)) { env->pfault_token = cs->kvm_run->s.regs.pft; @@ -2278,6 +2287,11 @@ void kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp) clear_bit(S390_FEAT_CMM_NT, model->features); } + /* bpb needs kernel support for migration, VSIE and reset */ + if (!kvm_check_extension(kvm_state, KVM_CAP_S390_BPB)) { + clear_bit(S390_FEAT_BPB, model->features); + } + /* We emulate a zPCI bus and AEN, therefore we don't need HW support */ if (pci_available) { set_bit(S390_FEAT_ZPCI, model->features); diff --git a/target/s390x/machine.c b/target/s390x/machine.c index b78f326d3a..84b4928755 100644 --- a/target/s390x/machine.c +++ b/target/s390x/machine.c @@ -194,6 +194,22 @@ const VMStateDescription vmstate_gscb = { } }; +static bool bpbc_needed(void *opaque) +{ + return s390_has_feat(S390_FEAT_BPB); +} + +const VMStateDescription vmstate_bpbc = { + .name = "cpu/bpbc", + .version_id = 1, + .minimum_version_id = 1, + .needed = bpbc_needed, + .fields = (VMStateField[]) { + VMSTATE_BOOL(env.bpbc, S390CPU), + VMSTATE_END_OF_LIST() + } +}; + const VMStateDescription vmstate_s390_cpu = { .name = "cpu", .post_load = cpu_post_load, @@ -228,6 +244,7 @@ const VMStateDescription vmstate_s390_cpu = { &vmstate_riccb, &vmstate_exval, &vmstate_gscb, + &vmstate_bpbc, NULL }, }; diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c index 2625d843b3..c957febc6d 100644 --- a/target/s390x/mem_helper.c +++ b/target/s390x/mem_helper.c @@ -1717,10 +1717,44 @@ uint32_t HELPER(testblock)(CPUS390XState *env, uint64_t real_addr) return 0; } -uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2) +uint32_t HELPER(tprot)(CPUS390XState *env, uint64_t a1, uint64_t a2) { - /* XXX implement */ - return 0; + S390CPU *cpu = s390_env_get_cpu(env); + CPUState *cs = CPU(cpu); + + /* + * TODO: we currently don't handle all access protection types + * (including access-list and key-controlled) as well as AR mode. + */ + if (!s390_cpu_virt_mem_check_write(cpu, a1, 0, 1)) { + /* Fetching permitted; storing permitted */ + return 0; + } + + if (env->int_pgm_code == PGM_PROTECTION) { + /* retry if reading is possible */ + cs->exception_index = 0; + if (!s390_cpu_virt_mem_check_read(cpu, a1, 0, 1)) { + /* Fetching permitted; storing not permitted */ + return 1; + } + } + + switch (env->int_pgm_code) { + case PGM_PROTECTION: + /* Fetching not permitted; storing not permitted */ + cs->exception_index = 0; + return 2; + case PGM_ADDRESSING: + case PGM_TRANS_SPEC: + /* exceptions forwarded to the guest */ + s390_cpu_virt_mem_handle_exc(cpu, GETPC()); + return 0; + } + + /* Translation not available */ + cs->exception_index = 0; + return 3; } /* insert storage key extended */ diff --git a/target/s390x/translate.c b/target/s390x/translate.c index ac55886792..df0b41606d 100644 --- a/target/s390x/translate.c +++ b/target/s390x/translate.c @@ -4532,7 +4532,7 @@ static ExitStatus op_testblock(DisasContext *s, DisasOps *o) static ExitStatus op_tprot(DisasContext *s, DisasOps *o) { - gen_helper_tprot(cc_op, o->addr1, o->in2); + gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2); set_cc_static(s); return NO_EXIT; } diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c index 3f439203ac..671d934ff4 100644 --- a/target/xtensa/translate.c +++ b/target/xtensa/translate.c @@ -50,7 +50,7 @@ /* is_jmp field values */ #define DISAS_UPDATE DISAS_TARGET_0 /* cpu state was modified dynamically */ -typedef struct DisasContext { +struct DisasContext { const XtensaConfig *config; TranslationBlock *tb; uint32_t pc; @@ -78,7 +78,7 @@ typedef struct DisasContext { uint32_t *raw_arg; xtensa_insnbuf insnbuf; xtensa_insnbuf slotbuf; -} DisasContext; +}; static TCGv_i32 cpu_pc; static TCGv_i32 cpu_R[16]; |