diff options
32 files changed, 540 insertions, 93 deletions
diff --git a/backends/hostmem.c b/backends/hostmem.c index 987f6f591e..81a72ce40b 100644 --- a/backends/hostmem.c +++ b/backends/hostmem.c @@ -20,6 +20,7 @@ #include "qom/object_interfaces.h" #include "qemu/mmap-alloc.h" #include "qemu/madvise.h" +#include "hw/qdev-core.h" #ifdef CONFIG_NUMA #include <numaif.h> @@ -237,7 +238,7 @@ static void host_memory_backend_set_prealloc(Object *obj, bool value, uint64_t sz = memory_region_size(&backend->mr); if (!qemu_prealloc_mem(fd, ptr, sz, backend->prealloc_threads, - backend->prealloc_context, errp)) { + backend->prealloc_context, false, errp)) { return; } backend->prealloc = true; @@ -323,6 +324,7 @@ host_memory_backend_memory_complete(UserCreatable *uc, Error **errp) HostMemoryBackendClass *bc = MEMORY_BACKEND_GET_CLASS(uc); void *ptr; uint64_t sz; + bool async = !phase_check(PHASE_LATE_BACKENDS_CREATED); if (!bc->alloc) { return; @@ -402,7 +404,8 @@ host_memory_backend_memory_complete(UserCreatable *uc, Error **errp) if (backend->prealloc && !qemu_prealloc_mem(memory_region_get_fd(&backend->mr), ptr, sz, backend->prealloc_threads, - backend->prealloc_context, errp)) { + backend->prealloc_context, + async, errp)) { return; } } diff --git a/docs/about/deprecated.rst b/docs/about/deprecated.rst index d4492b9460..c7b95e6068 100644 --- a/docs/about/deprecated.rst +++ b/docs/about/deprecated.rst @@ -183,6 +183,15 @@ Nios II CPU (since 8.2) The Nios II architecture is orphan. The ``nios2`` guest CPU support is deprecated and will be removed in a future version of QEMU. +``power5+`` and ``power7+`` CPU names (since 9.0) +''''''''''''''''''''''''''''''''''''''''''''''''' + +The character "+" in device (and thus also CPU) names is not allowed +in the QEMU object model anymore. ``power5+``, ``power5+_v2.1``, +``power7+`` and ``power7+_v2.1`` are currently still supported via +an alias, but for consistency these will get removed in a future +release, too. Use ``power5p_v2.1`` and ``power7p_v2.1`` instead. + System emulator machines ------------------------ diff --git a/hw/hyperv/hv-balloon.c b/hw/hyperv/hv-balloon.c index 0238365712..ade283335a 100644 --- a/hw/hyperv/hv-balloon.c +++ b/hw/hyperv/hv-balloon.c @@ -1477,22 +1477,7 @@ static void hv_balloon_ensure_mr(HvBalloon *balloon) balloon->mr = g_new0(MemoryRegion, 1); memory_region_init(balloon->mr, OBJECT(balloon), TYPE_HV_BALLOON, memory_region_size(hostmem_mr)); - - /* - * The VM can indicate an alignment up to 32 GiB. Memory device core can - * usually only handle/guarantee 1 GiB alignment. The user will have to - * specify a larger maxmem eventually. - * - * The memory device core will warn the user in case maxmem might have to be - * increased and will fail plugging the device if there is not sufficient - * space after alignment. - * - * TODO: we could do the alignment ourselves in a slightly bigger region. - * But this feels better, although the warning might be annoying. Maybe - * we can optimize that in the future (e.g., with such a device on the - * cmdline place/size the device memory region differently. - */ - balloon->mr->align = MAX(32 * GiB, memory_region_get_alignment(hostmem_mr)); + balloon->mr->align = memory_region_get_alignment(hostmem_mr); } static void hv_balloon_free_mr(HvBalloon *balloon) @@ -1654,6 +1639,25 @@ static MemoryRegion *hv_balloon_md_get_memory_region(MemoryDeviceState *md, return balloon->mr; } +static uint64_t hv_balloon_md_get_min_alignment(const MemoryDeviceState *md) +{ + /* + * The VM can indicate an alignment up to 32 GiB. Memory device core can + * usually only handle/guarantee 1 GiB alignment. The user will have to + * specify a larger maxmem eventually. + * + * The memory device core will warn the user in case maxmem might have to be + * increased and will fail plugging the device if there is not sufficient + * space after alignment. + * + * TODO: we could do the alignment ourselves in a slightly bigger region. + * But this feels better, although the warning might be annoying. Maybe + * we can optimize that in the future (e.g., with such a device on the + * cmdline place/size the device memory region differently. + */ + return 32 * GiB; +} + static void hv_balloon_md_fill_device_info(const MemoryDeviceState *md, MemoryDeviceInfo *info) { @@ -1766,5 +1770,6 @@ static void hv_balloon_class_init(ObjectClass *klass, void *data) mdc->get_memory_region = hv_balloon_md_get_memory_region; mdc->decide_memslots = hv_balloon_decide_memslots; mdc->get_memslots = hv_balloon_get_memslots; + mdc->get_min_alignment = hv_balloon_md_get_min_alignment; mdc->fill_device_info = hv_balloon_md_fill_device_info; } diff --git a/hw/mem/memory-device.c b/hw/mem/memory-device.c index a1b1af26bc..e098585cda 100644 --- a/hw/mem/memory-device.c +++ b/hw/mem/memory-device.c @@ -374,6 +374,20 @@ void memory_device_pre_plug(MemoryDeviceState *md, MachineState *ms, goto out; } + /* + * We always want the memory region size to be multiples of the memory + * region alignment: for example, DIMMs with 1G+1byte size don't make + * any sense. Note that we don't check that the size is multiples + * of any additional alignment requirements the memory device might + * have when it comes to the address in physical address space. + */ + if (!QEMU_IS_ALIGNED(memory_region_size(mr), + memory_region_get_alignment(mr))) { + error_setg(errp, "backend memory size must be multiple of 0x%" + PRIx64, memory_region_get_alignment(mr)); + return; + } + if (legacy_align) { align = *legacy_align; } else { diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c index 5aa1ed474a..0c0fb3f1b0 100644 --- a/hw/ppc/spapr_cpu_core.c +++ b/hw/ppc/spapr_cpu_core.c @@ -389,9 +389,9 @@ static const TypeInfo spapr_cpu_core_type_infos[] = { DEFINE_SPAPR_CPU_CORE_TYPE("970_v2.2"), DEFINE_SPAPR_CPU_CORE_TYPE("970mp_v1.0"), DEFINE_SPAPR_CPU_CORE_TYPE("970mp_v1.1"), - DEFINE_SPAPR_CPU_CORE_TYPE("power5+_v2.1"), + DEFINE_SPAPR_CPU_CORE_TYPE("power5p_v2.1"), DEFINE_SPAPR_CPU_CORE_TYPE("power7_v2.3"), - DEFINE_SPAPR_CPU_CORE_TYPE("power7+_v2.1"), + DEFINE_SPAPR_CPU_CORE_TYPE("power7p_v2.1"), DEFINE_SPAPR_CPU_CORE_TYPE("power8_v2.0"), DEFINE_SPAPR_CPU_CORE_TYPE("power8e_v2.1"), DEFINE_SPAPR_CPU_CORE_TYPE("power8nvl_v1.0"), diff --git a/hw/scsi/lsi53c895a.c b/hw/scsi/lsi53c895a.c index 34e3b89287..d607a5f9fb 100644 --- a/hw/scsi/lsi53c895a.c +++ b/hw/scsi/lsi53c895a.c @@ -1159,6 +1159,7 @@ again: lsi_script_scsi_interrupt(s, LSI_SIST0_UDC, 0); lsi_disconnect(s); trace_lsi_execute_script_stop(); + reentrancy_level--; return; } insn = read_dword(s, s->dsp); diff --git a/hw/virtio/virtio-mem.c b/hw/virtio/virtio-mem.c index 99ab989852..ffd119ebac 100644 --- a/hw/virtio/virtio-mem.c +++ b/hw/virtio/virtio-mem.c @@ -605,7 +605,7 @@ static int virtio_mem_set_block_state(VirtIOMEM *vmem, uint64_t start_gpa, int fd = memory_region_get_fd(&vmem->memdev->mr); Error *local_err = NULL; - if (!qemu_prealloc_mem(fd, area, size, 1, NULL, &local_err)) { + if (!qemu_prealloc_mem(fd, area, size, 1, NULL, false, &local_err)) { static bool warned; /* @@ -1248,7 +1248,7 @@ static int virtio_mem_prealloc_range_cb(VirtIOMEM *vmem, void *arg, int fd = memory_region_get_fd(&vmem->memdev->mr); Error *local_err = NULL; - if (!qemu_prealloc_mem(fd, area, size, 1, NULL, &local_err)) { + if (!qemu_prealloc_mem(fd, area, size, 1, NULL, false, &local_err)) { error_report_err(local_err); return -ENOMEM; } diff --git a/include/hw/qdev-core.h b/include/hw/qdev-core.h index d47536eadb..9228e96c87 100644 --- a/include/hw/qdev-core.h +++ b/include/hw/qdev-core.h @@ -1084,6 +1084,11 @@ typedef enum MachineInitPhase { PHASE_ACCEL_CREATED, /* + * Late backend objects have been created and initialized. + */ + PHASE_LATE_BACKENDS_CREATED, + + /* * machine_class->init has been called, thus creating any embedded * devices and validating machine properties. Devices created at * this time are considered to be cold-plugged. diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h index c9692cc314..7d359dabc4 100644 --- a/include/qemu/osdep.h +++ b/include/qemu/osdep.h @@ -680,6 +680,8 @@ typedef struct ThreadContext ThreadContext; * @area: start address of the are to preallocate * @sz: the size of the area to preallocate * @max_threads: maximum number of threads to use + * @tc: prealloc context threads pointer, NULL if not in use + * @async: request asynchronous preallocation, requires @tc * @errp: returns an error if this function fails * * Preallocate memory (populate/prefault page tables writable) for the virtual @@ -687,10 +689,24 @@ typedef struct ThreadContext ThreadContext; * each page in the area was faulted in writable at least once, for example, * after allocating file blocks for mapped files. * + * When setting @async, allocation might be performed asynchronously. + * qemu_finish_async_prealloc_mem() must be called to finish any asynchronous + * preallocation. + * * Return: true on success, else false setting @errp with error. */ bool qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, - ThreadContext *tc, Error **errp); + ThreadContext *tc, bool async, Error **errp); + +/** + * qemu_finish_async_prealloc_mem: + * @errp: returns an error if this function fails + * + * Finish all outstanding asynchronous memory preallocation. + * + * Return: true on success, else false setting @errp with error. + */ +bool qemu_finish_async_prealloc_mem(Error **errp); /** * qemu_get_pid_name: diff --git a/meson.build b/meson.build index b5d6dc94a8..e5d6f2d057 100644 --- a/meson.build +++ b/meson.build @@ -2384,6 +2384,22 @@ else endif config_host_data.set('CONFIG_ASAN_IFACE_FIBER', have_asan_fiber) +have_inotify_init = cc.has_header_symbol('sys/inotify.h', 'inotify_init') +have_inotify_init1 = cc.has_header_symbol('sys/inotify.h', 'inotify_init1') +inotify = not_found +if (have_inotify_init or have_inotify_init1) and host_os == 'freebsd' + # libinotify-kqueue + inotify = cc.find_library('inotify') + if have_inotify_init + have_inotify_init = inotify.found() + endif + if have_inotify_init1 + have_inotify_init1 = inotify.found() + endif +endif +config_host_data.set('CONFIG_INOTIFY', have_inotify_init) +config_host_data.set('CONFIG_INOTIFY1', have_inotify_init1) + # has_header_symbol config_host_data.set('CONFIG_BLKZONED', cc.has_header_symbol('linux/blkzoned.h', 'BLKOPENZONE')) @@ -2400,10 +2416,6 @@ config_host_data.set('CONFIG_FIEMAP', config_host_data.set('CONFIG_GETRANDOM', cc.has_function('getrandom') and cc.has_header_symbol('sys/random.h', 'GRND_NONBLOCK')) -config_host_data.set('CONFIG_INOTIFY', - cc.has_header_symbol('sys/inotify.h', 'inotify_init')) -config_host_data.set('CONFIG_INOTIFY1', - cc.has_header_symbol('sys/inotify.h', 'inotify_init1')) config_host_data.set('CONFIG_PRCTL_PR_SET_TIMERSLACK', cc.has_header_symbol('sys/prctl.h', 'PR_SET_TIMERSLACK')) config_host_data.set('CONFIG_RTNETLINK', @@ -4407,6 +4419,9 @@ summary_info += {'libudev': libudev} summary_info += {'FUSE lseek': fuse_lseek.found()} summary_info += {'selinux': selinux} summary_info += {'libdw': libdw} +if host_os == 'freebsd' + summary_info += {'libinotify-kqueue': inotify} +endif summary(summary_info, bool_yn: true, section: 'Dependencies') if host_arch == 'unknown' diff --git a/qom/object.c b/qom/object.c index 654e1afaf2..2c4c64d2b6 100644 --- a/qom/object.c +++ b/qom/object.c @@ -160,10 +160,6 @@ static bool type_name_is_valid(const char *name) /* Allow some legacy names with '+' in it for compatibility reasons */ if (name[plen] == '+') { - if (plen == 6 && g_str_has_prefix(name, "power")) { - /* Allow "power5+" and "power7+" CPU names*/ - return true; - } if (plen >= 17 && g_str_has_prefix(name, "Sun-UltraSparc-I")) { /* Allow "Sun-UltraSparc-IV+" and "Sun-UltraSparc-IIIi+" */ return true; diff --git a/system/vl.c b/system/vl.c index bb959cbc44..2a0bd08ff1 100644 --- a/system/vl.c +++ b/system/vl.c @@ -2013,6 +2013,14 @@ static void qemu_create_late_backends(void) object_option_foreach_add(object_create_late); + /* + * Wait for any outstanding memory prealloc from created memory + * backends to complete. + */ + if (!qemu_finish_async_prealloc_mem(&error_fatal)) { + exit(1); + } + if (tpm_init() < 0) { exit(1); } @@ -3699,6 +3707,7 @@ void qemu_init(int argc, char **argv) * over memory-backend-file objects). */ qemu_create_late_backends(); + phase_advance(PHASE_LATE_BACKENDS_CREATED); /* * Note: creates a QOM object, must run only after global and diff --git a/target/ppc/cpu-models.c b/target/ppc/cpu-models.c index 7dbb47de64..36e465b390 100644 --- a/target/ppc/cpu-models.c +++ b/target/ppc/cpu-models.c @@ -716,11 +716,11 @@ "PowerPC 970MP v1.0") POWERPC_DEF("970mp_v1.1", CPU_POWERPC_970MP_v11, 970, "PowerPC 970MP v1.1") - POWERPC_DEF("power5+_v2.1", CPU_POWERPC_POWER5P_v21, POWER5P, + POWERPC_DEF("power5p_v2.1", CPU_POWERPC_POWER5P_v21, POWER5P, "POWER5+ v2.1") POWERPC_DEF("power7_v2.3", CPU_POWERPC_POWER7_v23, POWER7, "POWER7 v2.3") - POWERPC_DEF("power7+_v2.1", CPU_POWERPC_POWER7P_v21, POWER7, + POWERPC_DEF("power7p_v2.1", CPU_POWERPC_POWER7P_v21, POWER7, "POWER7+ v2.1") POWERPC_DEF("power8e_v2.1", CPU_POWERPC_POWER8E_v21, POWER8, "POWER8E v2.1") @@ -902,10 +902,12 @@ PowerPCCPUAlias ppc_cpu_aliases[] = { { "970", "970_v2.2" }, { "970fx", "970fx_v3.1" }, { "970mp", "970mp_v1.1" }, - { "power5+", "power5+_v2.1" }, + { "power5+", "power5p_v2.1" }, + { "power5+_v2.1", "power5p_v2.1" }, { "power5gs", "power5+_v2.1" }, { "power7", "power7_v2.3" }, - { "power7+", "power7+_v2.1" }, + { "power7+", "power7p_v2.1" }, + { "power7+_v2.1", "power7p_v2.1" }, { "power8e", "power8e_v2.1" }, { "power8", "power8_v2.0" }, { "power8nvl", "power8nvl_v1.0" }, diff --git a/target/s390x/cpu-dump.c b/target/s390x/cpu-dump.c index ffa9e94d84..69cc9f7746 100644 --- a/target/s390x/cpu-dump.c +++ b/target/s390x/cpu-dump.c @@ -27,8 +27,7 @@ void s390_cpu_dump_state(CPUState *cs, FILE *f, int flags) { - S390CPU *cpu = S390_CPU(cs); - CPUS390XState *env = &cpu->env; + CPUS390XState *env = cpu_env(cs); int i; qemu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64, diff --git a/target/s390x/gdbstub.c b/target/s390x/gdbstub.c index 6fbfd41bc8..f02fa316e5 100644 --- a/target/s390x/gdbstub.c +++ b/target/s390x/gdbstub.c @@ -30,8 +30,7 @@ int s390_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { - S390CPU *cpu = S390_CPU(cs); - CPUS390XState *env = &cpu->env; + CPUS390XState *env = cpu_env(cs); switch (n) { case S390_PSWM_REGNUM: @@ -46,8 +45,7 @@ int s390_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) int s390_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { - S390CPU *cpu = S390_CPU(cs); - CPUS390XState *env = &cpu->env; + CPUS390XState *env = cpu_env(cs); target_ulong tmpl = ldtul_p(mem_buf); switch (n) { diff --git a/target/s390x/helper.c b/target/s390x/helper.c index d76c06381b..00d5d403f3 100644 --- a/target/s390x/helper.c +++ b/target/s390x/helper.c @@ -139,8 +139,7 @@ void do_restart_interrupt(CPUS390XState *env) void s390_cpu_recompute_watchpoints(CPUState *cs) { const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS; - S390CPU *cpu = S390_CPU(cs); - CPUS390XState *env = &cpu->env; + CPUS390XState *env = cpu_env(cs); /* We are called when the watchpoints have changed. First remove them all. */ diff --git a/target/s390x/helper.h b/target/s390x/helper.h index 05102578fc..cc1c20e9e3 100644 --- a/target/s390x/helper.h +++ b/target/s390x/helper.h @@ -88,7 +88,10 @@ DEF_HELPER_FLAGS_3(tcxb, TCG_CALL_NO_RWG_SE, i32, env, i128, i64) DEF_HELPER_FLAGS_2(sqeb, TCG_CALL_NO_WG, i64, env, i64) DEF_HELPER_FLAGS_2(sqdb, TCG_CALL_NO_WG, i64, env, i64) DEF_HELPER_FLAGS_2(sqxb, TCG_CALL_NO_WG, i128, env, i128) +DEF_HELPER_3(cvb, void, env, i32, i64) +DEF_HELPER_FLAGS_2(cvbg, TCG_CALL_NO_WG, i64, env, i128) DEF_HELPER_FLAGS_1(cvd, TCG_CALL_NO_RWG_SE, i64, s32) +DEF_HELPER_FLAGS_1(cvdg, TCG_CALL_NO_RWG_SE, i128, s64) DEF_HELPER_FLAGS_4(pack, TCG_CALL_NO_WG, void, env, i32, i64, i64) DEF_HELPER_FLAGS_4(pka, TCG_CALL_NO_WG, void, env, i64, i64, i32) DEF_HELPER_FLAGS_4(pku, TCG_CALL_NO_WG, void, env, i64, i64, i32) diff --git a/target/s390x/kvm/kvm.c b/target/s390x/kvm/kvm.c index 888d6c1a1c..4ce809c5d4 100644 --- a/target/s390x/kvm/kvm.c +++ b/target/s390x/kvm/kvm.c @@ -474,8 +474,7 @@ static int can_sync_regs(CPUState *cs, int regs) int kvm_arch_put_registers(CPUState *cs, int level) { - S390CPU *cpu = S390_CPU(cs); - CPUS390XState *env = &cpu->env; + CPUS390XState *env = cpu_env(cs); struct kvm_fpu fpu = {}; int r; int i; @@ -601,8 +600,7 @@ int kvm_arch_put_registers(CPUState *cs, int level) int kvm_arch_get_registers(CPUState *cs) { - S390CPU *cpu = S390_CPU(cs); - CPUS390XState *env = &cpu->env; + CPUS390XState *env = cpu_env(cs); struct kvm_fpu fpu; int i, r; diff --git a/target/s390x/tcg/excp_helper.c b/target/s390x/tcg/excp_helper.c index b875bf14e5..f1c33f7967 100644 --- a/target/s390x/tcg/excp_helper.c +++ b/target/s390x/tcg/excp_helper.c @@ -90,10 +90,7 @@ void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc) static G_NORETURN void do_unaligned_access(CPUState *cs, uintptr_t retaddr) { - S390CPU *cpu = S390_CPU(cs); - CPUS390XState *env = &cpu->env; - - tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr); + tcg_s390_program_interrupt(cpu_env(cs), PGM_SPECIFICATION, retaddr); } #if defined(CONFIG_USER_ONLY) @@ -146,8 +143,7 @@ bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr) { - S390CPU *cpu = S390_CPU(cs); - CPUS390XState *env = &cpu->env; + CPUS390XState *env = cpu_env(cs); target_ulong vaddr, raddr; uint64_t asc, tec; int prot, excp; @@ -600,8 +596,7 @@ bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request) void s390x_cpu_debug_excp_handler(CPUState *cs) { - S390CPU *cpu = S390_CPU(cs); - CPUS390XState *env = &cpu->env; + CPUS390XState *env = cpu_env(cs); CPUWatchpoint *wp_hit = cs->watchpoint_hit; if (wp_hit && wp_hit->flags & BP_CPU) { diff --git a/target/s390x/tcg/insn-data.h.inc b/target/s390x/tcg/insn-data.h.inc index 2f07f39d9c..e7d61cdec2 100644 --- a/target/s390x/tcg/insn-data.h.inc +++ b/target/s390x/tcg/insn-data.h.inc @@ -293,9 +293,14 @@ D(0xec73, CLFIT, RIE_a, GIE, r1_32u, i2_16u, 0, 0, ct, 0, 1) D(0xec71, CLGIT, RIE_a, GIE, r1_o, i2_16u, 0, 0, ct, 0, 1) +/* CONVERT TO BINARY */ + C(0x4f00, CVB, RX_a, Z, la2, 0, 0, 0, cvb, 0) + C(0xe306, CVBY, RXY_a, LD, la2, 0, 0, 0, cvb, 0) + C(0xe30e, CVBG, RXY_a, Z, la2, 0, r1, 0, cvbg, 0) /* CONVERT TO DECIMAL */ C(0x4e00, CVD, RX_a, Z, r1_o, a2, 0, 0, cvd, 0) C(0xe326, CVDY, RXY_a, LD, r1_o, a2, 0, 0, cvd, 0) + C(0xe32e, CVDG, RXY_a, Z, r1_o, a2, 0, 0, cvdg, 0) /* CONVERT TO FIXED */ F(0xb398, CFEBR, RRF_e, Z, 0, e2, new, r1_32, cfeb, 0, IF_BFP) F(0xb399, CFDBR, RRF_e, Z, 0, f2, new, r1_32, cfdb, 0, IF_BFP) diff --git a/target/s390x/tcg/int_helper.c b/target/s390x/tcg/int_helper.c index eb8e6dd1b5..2af970f2c8 100644 --- a/target/s390x/tcg/int_helper.c +++ b/target/s390x/tcg/int_helper.c @@ -25,6 +25,7 @@ #include "exec/exec-all.h" #include "qemu/host-utils.h" #include "exec/helper-proto.h" +#include "exec/cpu_ldst.h" /* #define DEBUG_HELPER */ #ifdef DEBUG_HELPER @@ -98,6 +99,81 @@ Int128 HELPER(divu64)(CPUS390XState *env, uint64_t ah, uint64_t al, uint64_t b) tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); } +void HELPER(cvb)(CPUS390XState *env, uint32_t r1, uint64_t dec) +{ + int64_t pow10 = 1, bin = 0; + int digit, sign; + + sign = dec & 0xf; + if (sign < 0xa) { + tcg_s390_data_exception(env, 0, GETPC()); + } + dec >>= 4; + + while (dec) { + digit = dec & 0xf; + if (digit > 0x9) { + tcg_s390_data_exception(env, 0, GETPC()); + } + dec >>= 4; + bin += digit * pow10; + pow10 *= 10; + } + + if (sign == 0xb || sign == 0xd) { + bin = -bin; + } + + /* R1 is updated even on fixed-point-divide exception. */ + env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | (uint32_t)bin; + if (bin != (int32_t)bin) { + tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); + } +} + +uint64_t HELPER(cvbg)(CPUS390XState *env, Int128 dec) +{ + uint64_t dec64[] = {int128_getlo(dec), int128_gethi(dec)}; + int64_t bin = 0, pow10, tmp; + int digit, i, sign; + + sign = dec64[0] & 0xf; + if (sign < 0xa) { + tcg_s390_data_exception(env, 0, GETPC()); + } + dec64[0] >>= 4; + pow10 = (sign == 0xb || sign == 0xd) ? -1 : 1; + + for (i = 1; i < 20; i++) { + digit = dec64[i >> 4] & 0xf; + if (digit > 0x9) { + tcg_s390_data_exception(env, 0, GETPC()); + } + dec64[i >> 4] >>= 4; + /* + * Prepend the next digit and check for overflow. The multiplication + * cannot overflow, since, conveniently, the int64_t limits are + * approximately +-9.2E+18. If bin is zero, the addition cannot + * overflow. Otherwise bin is known to have the same sign as the rhs + * addend, in which case overflow happens if and only if the result + * has a different sign. + */ + tmp = bin + pow10 * digit; + if (bin && ((tmp ^ bin) < 0)) { + tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); + } + bin = tmp; + pow10 *= 10; + } + + g_assert(!dec64[0]); + if (dec64[1]) { + tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); + } + + return bin; +} + uint64_t HELPER(cvd)(int32_t reg) { /* positive 0 */ @@ -118,6 +194,27 @@ uint64_t HELPER(cvd)(int32_t reg) return dec; } +Int128 HELPER(cvdg)(int64_t reg) +{ + /* positive 0 */ + Int128 dec = int128_make64(0x0c); + Int128 bin = int128_makes64(reg); + Int128 base = int128_make64(10); + int shift; + + if (!int128_nonneg(bin)) { + bin = int128_neg(bin); + dec = int128_make64(0x0d); + } + + for (shift = 4; (shift < 128) && int128_nz(bin); shift += 4) { + dec = int128_or(dec, int128_lshift(int128_remu(bin, base), shift)); + bin = int128_divu(bin, base); + } + + return dec; +} + uint64_t HELPER(popcnt)(uint64_t val) { /* Note that we don't fold past bytes. */ diff --git a/target/s390x/tcg/misc_helper.c b/target/s390x/tcg/misc_helper.c index 89b5268fd4..8764846ce8 100644 --- a/target/s390x/tcg/misc_helper.c +++ b/target/s390x/tcg/misc_helper.c @@ -214,9 +214,7 @@ void HELPER(sckc)(CPUS390XState *env, uint64_t ckc) void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque) { - S390CPU *cpu = S390_CPU(cs); - - update_ckc_timer(&cpu->env); + update_ckc_timer(cpu_env(cs)); } /* Set Clock */ diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c index 3d6a9f44a6..0d0c672c95 100644 --- a/target/s390x/tcg/translate.c +++ b/target/s390x/tcg/translate.c @@ -2197,6 +2197,22 @@ static DisasJumpType op_csp(DisasContext *s, DisasOps *o) } #endif +static DisasJumpType op_cvb(DisasContext *s, DisasOps *o) +{ + TCGv_i64 t = tcg_temp_new_i64(); + tcg_gen_qemu_ld_i64(t, o->addr1, get_mem_index(s), MO_TEUQ); + gen_helper_cvb(tcg_env, tcg_constant_i32(get_field(s, r1)), t); + return DISAS_NEXT; +} + +static DisasJumpType op_cvbg(DisasContext *s, DisasOps *o) +{ + TCGv_i128 t = tcg_temp_new_i128(); + tcg_gen_qemu_ld_i128(t, o->addr1, get_mem_index(s), MO_TE | MO_128); + gen_helper_cvbg(o->out, tcg_env, t); + return DISAS_NEXT; +} + static DisasJumpType op_cvd(DisasContext *s, DisasOps *o) { TCGv_i64 t1 = tcg_temp_new_i64(); @@ -2207,6 +2223,14 @@ static DisasJumpType op_cvd(DisasContext *s, DisasOps *o) return DISAS_NEXT; } +static DisasJumpType op_cvdg(DisasContext *s, DisasOps *o) +{ + TCGv_i128 t = tcg_temp_new_i128(); + gen_helper_cvdg(t, o->in1); + tcg_gen_qemu_st_i128(t, o->in2, get_mem_index(s), MO_TE | MO_128); + return DISAS_NEXT; +} + static DisasJumpType op_ct(DisasContext *s, DisasOps *o) { int m3 = get_field(s, m3); @@ -6532,8 +6556,7 @@ void s390x_restore_state_to_opc(CPUState *cs, const TranslationBlock *tb, const uint64_t *data) { - S390CPU *cpu = S390_CPU(cs); - CPUS390XState *env = &cpu->env; + CPUS390XState *env = cpu_env(cs); int cc_op = data[1]; env->psw.addr = data[0]; diff --git a/tests/tcg/s390x/Makefile.target b/tests/tcg/s390x/Makefile.target index 30994dcf9c..e2aba2ec27 100644 --- a/tests/tcg/s390x/Makefile.target +++ b/tests/tcg/s390x/Makefile.target @@ -45,6 +45,8 @@ TESTS+=clc TESTS+=laalg TESTS+=add-logical-with-carry TESTS+=lae +TESTS+=cvd +TESTS+=cvb cdsg: CFLAGS+=-pthread cdsg: LDFLAGS+=-pthread diff --git a/tests/tcg/s390x/cvb.c b/tests/tcg/s390x/cvb.c new file mode 100644 index 0000000000..e1735f6b81 --- /dev/null +++ b/tests/tcg/s390x/cvb.c @@ -0,0 +1,102 @@ +/* + * Test the CONVERT TO BINARY instruction. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include <assert.h> +#include <signal.h> +#include <stdint.h> +#include <stdlib.h> +#include <string.h> + +static int signum; + +static void signal_handler(int n) +{ + signum = n; +} + +#define FAIL 0x1234567887654321 +#define OK32(x) (0x1234567800000000 | (uint32_t)(x)) + +static int64_t cvb(uint64_t x) +{ + int64_t ret = FAIL; + + signum = -1; + asm("cvb %[ret],%[x]" : [ret] "+r" (ret) : [x] "R" (x)); + + return ret; +} + +static int64_t cvby(uint64_t x) +{ + int64_t ret = FAIL; + + signum = -1; + asm("cvby %[ret],%[x]" : [ret] "+r" (ret) : [x] "T" (x)); + + return ret; +} + +static int64_t cvbg(__uint128_t x) +{ + int64_t ret = FAIL; + + signum = -1; + asm("cvbg %[ret],%[x]" : [ret] "+r" (ret) : [x] "T" (x)); + + return ret; +} + +int main(void) +{ + __uint128_t m = (((__uint128_t)0x9223372036854775) << 16) | 0x8070; + struct sigaction act; + int err; + + memset(&act, 0, sizeof(act)); + act.sa_handler = signal_handler; + err = sigaction(SIGFPE, &act, NULL); + assert(err == 0); + err = sigaction(SIGILL, &act, NULL); + assert(err == 0); + + assert(cvb(0xc) == OK32(0) && signum == -1); + assert(cvb(0x1c) == OK32(1) && signum == -1); + assert(cvb(0x25594c) == OK32(25594) && signum == -1); + assert(cvb(0x1d) == OK32(-1) && signum == -1); + assert(cvb(0x2147483647c) == OK32(0x7fffffff) && signum == -1); + assert(cvb(0x2147483648d) == OK32(-0x80000000) && signum == -1); + assert(cvb(0x7) == FAIL && signum == SIGILL); + assert(cvb(0x2147483648c) == OK32(0x80000000) && signum == SIGFPE); + assert(cvb(0x3000000000c) == OK32(0xb2d05e00) && signum == SIGFPE); + assert(cvb(0x2147483649d) == OK32(0x7fffffff) && signum == SIGFPE); + assert(cvb(0x3000000000d) == OK32(0x4d2fa200) && signum == SIGFPE); + + assert(cvby(0xc) == OK32(0)); + assert(cvby(0x1c) == OK32(1)); + assert(cvby(0x25594c) == OK32(25594)); + assert(cvby(0x1d) == OK32(-1)); + assert(cvby(0x2147483647c) == OK32(0x7fffffff)); + assert(cvby(0x2147483648d) == OK32(-0x80000000)); + assert(cvby(0x7) == FAIL && signum == SIGILL); + assert(cvby(0x2147483648c) == OK32(0x80000000) && signum == SIGFPE); + assert(cvby(0x3000000000c) == OK32(0xb2d05e00) && signum == SIGFPE); + assert(cvby(0x2147483649d) == OK32(0x7fffffff) && signum == SIGFPE); + assert(cvby(0x3000000000d) == OK32(0x4d2fa200) && signum == SIGFPE); + + assert(cvbg(0xc) == 0); + assert(cvbg(0x1c) == 1); + assert(cvbg(0x25594c) == 25594); + assert(cvbg(0x1d) == -1); + assert(cvbg(m + 0xc) == 0x7fffffffffffffff); + assert(cvbg(m + 0x1d) == -0x8000000000000000); + assert(cvbg(0x7) == FAIL && signum == SIGILL); + assert(cvbg(m + 0x1c) == FAIL && signum == SIGFPE); + assert(cvbg(m + 0x2d) == FAIL && signum == SIGFPE); + assert(cvbg(((__uint128_t)1 << 80) + 0xc) == FAIL && signum == SIGFPE); + assert(cvbg(((__uint128_t)1 << 80) + 0xd) == FAIL && signum == SIGFPE); + + return EXIT_SUCCESS; +} diff --git a/tests/tcg/s390x/cvd.c b/tests/tcg/s390x/cvd.c new file mode 100644 index 0000000000..d776688985 --- /dev/null +++ b/tests/tcg/s390x/cvd.c @@ -0,0 +1,63 @@ +/* + * Test the CONVERT TO DECIMAL instruction. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include <assert.h> +#include <stdint.h> +#include <stdlib.h> + +static uint64_t cvd(int32_t x) +{ + uint64_t ret; + + asm("cvd %[x],%[ret]" : [ret] "=R" (ret) : [x] "r" (x)); + + return ret; +} + +static uint64_t cvdy(int32_t x) +{ + uint64_t ret; + + asm("cvdy %[x],%[ret]" : [ret] "=T" (ret) : [x] "r" (x)); + + return ret; +} + +static __uint128_t cvdg(int64_t x) +{ + __uint128_t ret; + + asm("cvdg %[x],%[ret]" : [ret] "=T" (ret) : [x] "r" (x)); + + return ret; +} + +int main(void) +{ + __uint128_t m = (((__uint128_t)0x9223372036854775) << 16) | 0x8070; + + assert(cvd(0) == 0xc); + assert(cvd(1) == 0x1c); + assert(cvd(25594) == 0x25594c); + assert(cvd(-1) == 0x1d); + assert(cvd(0x7fffffff) == 0x2147483647c); + assert(cvd(-0x80000000) == 0x2147483648d); + + assert(cvdy(0) == 0xc); + assert(cvdy(1) == 0x1c); + assert(cvdy(25594) == 0x25594c); + assert(cvdy(-1) == 0x1d); + assert(cvdy(0x7fffffff) == 0x2147483647c); + assert(cvdy(-0x80000000) == 0x2147483648d); + + assert(cvdg(0) == 0xc); + assert(cvdg(1) == 0x1c); + assert(cvdg(25594) == 0x25594c); + assert(cvdg(-1) == 0x1d); + assert(cvdg(0x7fffffffffffffff) == (m + 0xc)); + assert(cvdg(-0x8000000000000000) == (m + 0x1d)); + + return EXIT_SUCCESS; +} diff --git a/tests/unit/test-util-filemonitor.c b/tests/unit/test-util-filemonitor.c index a22de27595..02e67fc96a 100644 --- a/tests/unit/test-util-filemonitor.c +++ b/tests/unit/test-util-filemonitor.c @@ -360,6 +360,14 @@ test_file_monitor_events(void) { .type = QFILE_MONITOR_TEST_OP_EVENT, .filesrc = "one.txt", .watchid = &watch4, .eventid = QFILE_MONITOR_EVENT_DELETED }, +#ifdef __FreeBSD__ + { .type = QFILE_MONITOR_TEST_OP_EVENT, + .filesrc = "two.txt", .watchid = &watch0, + .eventid = QFILE_MONITOR_EVENT_DELETED }, + { .type = QFILE_MONITOR_TEST_OP_EVENT, + .filesrc = "two.txt", .watchid = &watch2, + .eventid = QFILE_MONITOR_EVENT_DELETED }, +#endif { .type = QFILE_MONITOR_TEST_OP_EVENT, .filesrc = "two.txt", .watchid = &watch0, .eventid = QFILE_MONITOR_EVENT_CREATED }, diff --git a/tests/vm/basevm.py b/tests/vm/basevm.py index 61725b8325..c0d62c0803 100644 --- a/tests/vm/basevm.py +++ b/tests/vm/basevm.py @@ -423,6 +423,8 @@ class BaseVM(object): def console_sshd_config(self, prompt): self.console_wait(prompt) self.console_send("echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config\n") + self.console_wait(prompt) + self.console_send("echo 'UseDNS no' >> /etc/ssh/sshd_config\n") for var in self.envvars: self.console_wait(prompt) self.console_send("echo 'AcceptEnv %s' >> /etc/ssh/sshd_config\n" % var) diff --git a/tests/vm/freebsd b/tests/vm/freebsd index b581bd17fb..1247f40a38 100755 --- a/tests/vm/freebsd +++ b/tests/vm/freebsd @@ -108,6 +108,7 @@ class FreeBSDVM(basevm.BaseVM): prompt = "root@freebsd:~ #" self.console_ssh_init(prompt, "root", self._config["root_pass"]) self.console_sshd_config(prompt) + self.console_wait_send(prompt, "service sshd reload\n") # setup virtio-blk #1 (tarfile) self.console_wait(prompt) diff --git a/util/meson.build b/util/meson.build index af3bf5692d..0ef9886be0 100644 --- a/util/meson.build +++ b/util/meson.build @@ -104,7 +104,11 @@ if have_block util_ss.add(files('throttle.c')) util_ss.add(files('timed-average.c')) if config_host_data.get('CONFIG_INOTIFY1') - util_ss.add(files('filemonitor-inotify.c')) + freebsd_dep = [] + if host_os == 'freebsd' + freebsd_dep = inotify + endif + util_ss.add(files('filemonitor-inotify.c'), freebsd_dep) else util_ss.add(files('filemonitor-stub.c')) endif diff --git a/util/oslib-posix.c b/util/oslib-posix.c index 7c297003b9..3c379f96c2 100644 --- a/util/oslib-posix.c +++ b/util/oslib-posix.c @@ -42,6 +42,7 @@ #include "qemu/cutils.h" #include "qemu/units.h" #include "qemu/thread-context.h" +#include "qemu/main-loop.h" #ifdef CONFIG_LINUX #include <sys/syscall.h> @@ -63,11 +64,15 @@ struct MemsetThread; +static QLIST_HEAD(, MemsetContext) memset_contexts = + QLIST_HEAD_INITIALIZER(memset_contexts); + typedef struct MemsetContext { bool all_threads_created; bool any_thread_failed; struct MemsetThread *threads; int num_threads; + QLIST_ENTRY(MemsetContext) next; } MemsetContext; struct MemsetThread { @@ -412,19 +417,44 @@ static inline int get_memset_num_threads(size_t hpagesize, size_t numpages, return ret; } +static int wait_and_free_mem_prealloc_context(MemsetContext *context) +{ + int i, ret = 0, tmp; + + for (i = 0; i < context->num_threads; i++) { + tmp = (uintptr_t)qemu_thread_join(&context->threads[i].pgthread); + + if (tmp) { + ret = tmp; + } + } + g_free(context->threads); + g_free(context); + return ret; +} + static int touch_all_pages(char *area, size_t hpagesize, size_t numpages, - int max_threads, ThreadContext *tc, + int max_threads, ThreadContext *tc, bool async, bool use_madv_populate_write) { static gsize initialized = 0; - MemsetContext context = { - .num_threads = get_memset_num_threads(hpagesize, numpages, max_threads), - }; + MemsetContext *context = g_malloc0(sizeof(MemsetContext)); size_t numpages_per_thread, leftover; void *(*touch_fn)(void *); - int ret = 0, i = 0; + int ret, i = 0; char *addr = area; + /* + * Asynchronous preallocation is only allowed when using MADV_POPULATE_WRITE + * and prealloc context for thread placement. + */ + if (!use_madv_populate_write || !tc) { + async = false; + } + + context->num_threads = + get_memset_num_threads(hpagesize, numpages, max_threads); + if (g_once_init_enter(&initialized)) { qemu_mutex_init(&page_mutex); qemu_cond_init(&page_cond); @@ -432,8 +462,11 @@ static int touch_all_pages(char *area, size_t hpagesize, size_t numpages, } if (use_madv_populate_write) { - /* Avoid creating a single thread for MADV_POPULATE_WRITE */ - if (context.num_threads == 1) { + /* + * Avoid creating a single thread for MADV_POPULATE_WRITE when + * preallocating synchronously. + */ + if (context->num_threads == 1 && !async) { if (qemu_madvise(area, hpagesize * numpages, QEMU_MADV_POPULATE_WRITE)) { return -errno; @@ -445,50 +478,86 @@ static int touch_all_pages(char *area, size_t hpagesize, size_t numpages, touch_fn = do_touch_pages; } - context.threads = g_new0(MemsetThread, context.num_threads); - numpages_per_thread = numpages / context.num_threads; - leftover = numpages % context.num_threads; - for (i = 0; i < context.num_threads; i++) { - context.threads[i].addr = addr; - context.threads[i].numpages = numpages_per_thread + (i < leftover); - context.threads[i].hpagesize = hpagesize; - context.threads[i].context = &context; + context->threads = g_new0(MemsetThread, context->num_threads); + numpages_per_thread = numpages / context->num_threads; + leftover = numpages % context->num_threads; + for (i = 0; i < context->num_threads; i++) { + context->threads[i].addr = addr; + context->threads[i].numpages = numpages_per_thread + (i < leftover); + context->threads[i].hpagesize = hpagesize; + context->threads[i].context = context; if (tc) { - thread_context_create_thread(tc, &context.threads[i].pgthread, + thread_context_create_thread(tc, &context->threads[i].pgthread, "touch_pages", - touch_fn, &context.threads[i], + touch_fn, &context->threads[i], QEMU_THREAD_JOINABLE); } else { - qemu_thread_create(&context.threads[i].pgthread, "touch_pages", - touch_fn, &context.threads[i], + qemu_thread_create(&context->threads[i].pgthread, "touch_pages", + touch_fn, &context->threads[i], QEMU_THREAD_JOINABLE); } - addr += context.threads[i].numpages * hpagesize; + addr += context->threads[i].numpages * hpagesize; + } + + if (async) { + /* + * async requests currently require the BQL. Add it to the list and kick + * preallocation off during qemu_finish_async_prealloc_mem(). + */ + assert(bql_locked()); + QLIST_INSERT_HEAD(&memset_contexts, context, next); + return 0; } if (!use_madv_populate_write) { - sigbus_memset_context = &context; + sigbus_memset_context = context; } qemu_mutex_lock(&page_mutex); - context.all_threads_created = true; + context->all_threads_created = true; qemu_cond_broadcast(&page_cond); qemu_mutex_unlock(&page_mutex); - for (i = 0; i < context.num_threads; i++) { - int tmp = (uintptr_t)qemu_thread_join(&context.threads[i].pgthread); + ret = wait_and_free_mem_prealloc_context(context); + if (!use_madv_populate_write) { + sigbus_memset_context = NULL; + } + return ret; +} + +bool qemu_finish_async_prealloc_mem(Error **errp) +{ + int ret = 0, tmp; + MemsetContext *context, *next_context; + + /* Waiting for preallocation requires the BQL. */ + assert(bql_locked()); + if (QLIST_EMPTY(&memset_contexts)) { + return true; + } + + qemu_mutex_lock(&page_mutex); + QLIST_FOREACH(context, &memset_contexts, next) { + context->all_threads_created = true; + } + qemu_cond_broadcast(&page_cond); + qemu_mutex_unlock(&page_mutex); + + QLIST_FOREACH_SAFE(context, &memset_contexts, next, next_context) { + QLIST_REMOVE(context, next); + tmp = wait_and_free_mem_prealloc_context(context); if (tmp) { ret = tmp; } } - if (!use_madv_populate_write) { - sigbus_memset_context = NULL; + if (ret) { + error_setg_errno(errp, -ret, + "qemu_prealloc_mem: preallocating memory failed"); + return false; } - g_free(context.threads); - - return ret; + return true; } static bool madv_populate_write_possible(char *area, size_t pagesize) @@ -498,7 +567,7 @@ static bool madv_populate_write_possible(char *area, size_t pagesize) } bool qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, - ThreadContext *tc, Error **errp) + ThreadContext *tc, bool async, Error **errp) { static gsize initialized; int ret; @@ -540,7 +609,7 @@ bool qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, } /* touch pages simultaneously */ - ret = touch_all_pages(area, hpagesize, numpages, max_threads, tc, + ret = touch_all_pages(area, hpagesize, numpages, max_threads, tc, async, use_madv_populate_write); if (ret) { error_setg_errno(errp, -ret, diff --git a/util/oslib-win32.c b/util/oslib-win32.c index c4a5f05a49..b623830d62 100644 --- a/util/oslib-win32.c +++ b/util/oslib-win32.c @@ -265,7 +265,7 @@ int getpagesize(void) } bool qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, - ThreadContext *tc, Error **errp) + ThreadContext *tc, bool async, Error **errp) { int i; size_t pagesize = qemu_real_host_page_size(); @@ -278,6 +278,12 @@ bool qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, return true; } +bool qemu_finish_async_prealloc_mem(Error **errp) +{ + /* async prealloc not supported, there is nothing to finish */ + return true; +} + char *qemu_get_pid_name(pid_t pid) { /* XXX Implement me */ |