diff options
109 files changed, 2568 insertions, 1079 deletions
diff --git a/.gitmodules b/.gitmodules index ca323b4d87..5b0c212622 100644 --- a/.gitmodules +++ b/.gitmodules @@ -34,3 +34,6 @@ [submodule "roms/skiboot"] path = roms/skiboot url = git://git.qemu.org/skiboot.git +[submodule "roms/QemuMacDrivers"] + path = roms/QemuMacDrivers + url = git://git.qemu.org/QemuMacDrivers.git diff --git a/Makefile b/Makefile index 31d41a7eae..c830d7a46c 100644 --- a/Makefile +++ b/Makefile @@ -552,7 +552,8 @@ multiboot.bin linuxboot.bin linuxboot_dma.bin kvmvapic.bin \ s390-ccw.img \ spapr-rtas.bin slof.bin skiboot.lid \ palcode-clipper \ -u-boot.e500 +u-boot.e500 \ +qemu_vga.ndrv else BLOBS= endif diff --git a/configure b/configure index 39e727ef7e..3df85fd77a 100755 --- a/configure +++ b/configure @@ -6101,12 +6101,14 @@ case "$target_name" in ppc64) TARGET_BASE_ARCH=ppc TARGET_ABI_DIR=ppc + mttcg=yes gdb_xml_files="power64-core.xml power-fpu.xml power-altivec.xml power-spe.xml power-vsx.xml" ;; ppc64le) TARGET_ARCH=ppc64 TARGET_BASE_ARCH=ppc TARGET_ABI_DIR=ppc + mttcg=yes gdb_xml_files="power64-core.xml power-fpu.xml power-altivec.xml power-spe.xml power-vsx.xml" ;; ppc64abi32) diff --git a/cpus.c b/cpus.c index 740b8dc3f8..516e5cbac1 100644 --- a/cpus.c +++ b/cpus.c @@ -50,6 +50,7 @@ #include "qapi-event.h" #include "hw/nmi.h" #include "sysemu/replay.h" +#include "hw/boards.h" #ifdef CONFIG_LINUX @@ -1483,6 +1484,12 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) /* Ignore everything else? */ break; } + } else if (cpu->unplug) { + qemu_tcg_destroy_vcpu(cpu); + cpu->created = false; + qemu_cond_signal(&qemu_cpu_cond); + qemu_mutex_unlock_iothread(); + return NULL; } atomic_mb_set(&cpu->exit_request, 0); @@ -1859,6 +1866,8 @@ void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg) CpuInfoList *qmp_query_cpus(Error **errp) { + MachineState *ms = MACHINE(qdev_get_machine()); + MachineClass *mc = MACHINE_GET_CLASS(ms); CpuInfoList *head = NULL, *cur_item = NULL; CPUState *cpu; @@ -1909,6 +1918,13 @@ CpuInfoList *qmp_query_cpus(Error **errp) #else info->value->arch = CPU_INFO_ARCH_OTHER; #endif + info->value->has_props = !!mc->cpu_index_to_instance_props; + if (info->value->has_props) { + CpuInstanceProperties *props; + props = g_malloc0(sizeof(*props)); + *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index); + info->value->props = props; + } /* XXX: waiting for the qapi to support GSList */ if (!cur_item) { diff --git a/cputlb.c b/cputlb.c index f5d056cc08..743776ae19 100644 --- a/cputlb.c +++ b/cputlb.c @@ -930,7 +930,13 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, tlb_addr = tlbe->addr_write; } - /* Notice an IO access, or a notdirty page. */ + /* Check notdirty */ + if (unlikely(tlb_addr & TLB_NOTDIRTY)) { + tlb_set_dirty(ENV_GET_CPU(env), addr); + tlb_addr = tlb_addr & ~TLB_NOTDIRTY; + } + + /* Notice an IO access */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { /* There's really nothing that can be done to support this apart from stop-the-world. */ diff --git a/docs/qdev-device-use.txt b/docs/qdev-device-use.txt index b059405e0e..4274fe9f25 100644 --- a/docs/qdev-device-use.txt +++ b/docs/qdev-device-use.txt @@ -182,15 +182,13 @@ The appropriate DEVNAME depends on the machine type. For type "pc": This lets you control I/O ports and IRQs. -* -usbdevice serial:vendorid=VID,productid=PRID becomes - -device usb-serial,vendorid=VID,productid=PRID +* -usbdevice serial::chardev becomes -device usb-serial,chardev=dev. * -usbdevice braille doesn't support LEGACY-CHARDEV syntax. It always uses "braille". With -device, this useful default is gone, so you have to use something like - -device usb-braille,chardev=braille,vendorid=VID,productid=PRID - -chardev braille,id=braille + -device usb-braille,chardev=braille -chardev braille,id=braille * -virtioconsole becomes -device virtio-serial-pci,class=C,vectors=V,ioeventfd=IOEVENTFD,max_ports=N diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c index c6f2032dec..be496c817c 100644 --- a/hw/acpi/aml-build.c +++ b/hw/acpi/aml-build.c @@ -24,6 +24,7 @@ #include "hw/acpi/aml-build.h" #include "qemu/bswap.h" #include "qemu/bitops.h" +#include "sysemu/numa.h" static GArray *build_alloc_array(void) { @@ -1609,3 +1610,28 @@ void build_srat_memory(AcpiSratMemoryAffinity *numamem, uint64_t base, numamem->base_addr = cpu_to_le64(base); numamem->range_length = cpu_to_le64(len); } + +/* + * ACPI spec 5.2.17 System Locality Distance Information Table + * (Revision 2.0 or later) + */ +void build_slit(GArray *table_data, BIOSLinker *linker) +{ + int slit_start, i, j; + slit_start = table_data->len; + + acpi_data_push(table_data, sizeof(AcpiTableHeader)); + + build_append_int_noprefix(table_data, nb_numa_nodes, 8); + for (i = 0; i < nb_numa_nodes; i++) { + for (j = 0; j < nb_numa_nodes; j++) { + assert(numa_info[i].distance[j]); + build_append_int_noprefix(table_data, numa_info[i].distance[j], 1); + } + } + + build_header(linker, table_data, + (void *)(table_data->data + slit_start), + "SLIT", + table_data->len - slit_start, 1, NULL, NULL); +} diff --git a/hw/acpi/cpu.c b/hw/acpi/cpu.c index 8c719d3f9d..a233fe17cf 100644 --- a/hw/acpi/cpu.c +++ b/hw/acpi/cpu.c @@ -503,7 +503,6 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts, /* build Processor object for each processor */ for (i = 0; i < arch_ids->len; i++) { - int j; Aml *dev; Aml *uid = aml_int(i); GArray *madt_buf = g_array_new(0, 1, 1); @@ -557,9 +556,9 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts, * as a result _PXM is required for all CPUs which might * be hot-plugged. For simplicity, add it for all CPUs. */ - j = numa_get_node_for_cpu(i); - if (j < nb_numa_nodes) { - aml_append(dev, aml_name_decl("_PXM", aml_int(j))); + if (arch_ids->cpus[i].props.has_node_id) { + aml_append(dev, aml_name_decl("_PXM", + aml_int(arch_ids->cpus[i].props.node_id))); } aml_append(cpus_dev, dev); diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c index 0835e59bb2..ce7499c9ca 100644 --- a/hw/arm/virt-acpi-build.c +++ b/hw/arm/virt-acpi-build.c @@ -486,30 +486,25 @@ build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) AcpiSystemResourceAffinityTable *srat; AcpiSratProcessorGiccAffinity *core; AcpiSratMemoryAffinity *numamem; - int i, j, srat_start; + int i, srat_start; uint64_t mem_base; - uint32_t *cpu_node = g_malloc0(vms->smp_cpus * sizeof(uint32_t)); - - for (i = 0; i < vms->smp_cpus; i++) { - j = numa_get_node_for_cpu(i); - if (j < nb_numa_nodes) { - cpu_node[i] = j; - } - } + MachineClass *mc = MACHINE_GET_CLASS(vms); + const CPUArchIdList *cpu_list = mc->possible_cpu_arch_ids(MACHINE(vms)); srat_start = table_data->len; srat = acpi_data_push(table_data, sizeof(*srat)); srat->reserved1 = cpu_to_le32(1); - for (i = 0; i < vms->smp_cpus; ++i) { + for (i = 0; i < cpu_list->len; ++i) { + int node_id = cpu_list->cpus[i].props.has_node_id ? + cpu_list->cpus[i].props.node_id : 0; core = acpi_data_push(table_data, sizeof(*core)); core->type = ACPI_SRAT_PROCESSOR_GICC; core->length = sizeof(*core); - core->proximity = cpu_to_le32(cpu_node[i]); + core->proximity = cpu_to_le32(node_id); core->acpi_processor_uid = cpu_to_le32(i); core->flags = cpu_to_le32(1); } - g_free(cpu_node); mem_base = vms->memmap[VIRT_MEM].base; for (i = 0; i < nb_numa_nodes; ++i) { diff --git a/hw/arm/virt.c b/hw/arm/virt.c index 5f62a0321e..c7c8159dfd 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -338,7 +338,7 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms) { int cpu; int addr_cells = 1; - unsigned int i; + const MachineState *ms = MACHINE(vms); /* * From Documentation/devicetree/bindings/arm/cpus.txt @@ -369,6 +369,7 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms) for (cpu = vms->smp_cpus - 1; cpu >= 0; cpu--) { char *nodename = g_strdup_printf("/cpus/cpu@%d", cpu); ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu)); + CPUState *cs = CPU(armcpu); qemu_fdt_add_subnode(vms->fdt, nodename); qemu_fdt_setprop_string(vms->fdt, nodename, "device_type", "cpu"); @@ -389,9 +390,9 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms) armcpu->mp_affinity); } - i = numa_get_node_for_cpu(cpu); - if (i < nb_numa_nodes) { - qemu_fdt_setprop_cell(vms->fdt, nodename, "numa-node-id", i); + if (ms->possible_cpus->cpus[cs->cpu_index].props.has_node_id) { + qemu_fdt_setprop_cell(vms->fdt, nodename, "numa-node-id", + ms->possible_cpus->cpus[cs->cpu_index].props.node_id); } g_free(nodename); @@ -1194,10 +1195,35 @@ void virt_machine_done(Notifier *notifier, void *data) virt_build_smbios(vms); } +static uint64_t virt_cpu_mp_affinity(VirtMachineState *vms, int idx) +{ + uint8_t clustersz = ARM_DEFAULT_CPUS_PER_CLUSTER; + VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); + + if (!vmc->disallow_affinity_adjustment) { + /* Adjust MPIDR like 64-bit KVM hosts, which incorporate the + * GIC's target-list limitations. 32-bit KVM hosts currently + * always create clusters of 4 CPUs, but that is expected to + * change when they gain support for gicv3. When KVM is enabled + * it will override the changes we make here, therefore our + * purposes are to make TCG consistent (with 64-bit KVM hosts) + * and to improve SGI efficiency. + */ + if (vms->gic_version == 3) { + clustersz = GICV3_TARGETLIST_BITS; + } else { + clustersz = GIC_TARGETLIST_BITS; + } + } + return arm_cpu_mp_affinity(idx, clustersz); +} + static void machvirt_init(MachineState *machine) { VirtMachineState *vms = VIRT_MACHINE(machine); VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(machine); + MachineClass *mc = MACHINE_GET_CLASS(machine); + const CPUArchIdList *possible_cpus; qemu_irq pic[NUM_IRQS]; MemoryRegion *sysmem = get_system_memory(); MemoryRegion *secure_sysmem = NULL; @@ -1210,7 +1236,6 @@ static void machvirt_init(MachineState *machine) CPUClass *cc; Error *err = NULL; bool firmware_loaded = bios_name || drive_get(IF_PFLASH, 0, 0); - uint8_t clustersz; if (!cpu_model) { cpu_model = "cortex-a15"; @@ -1263,10 +1288,8 @@ static void machvirt_init(MachineState *machine) */ if (vms->gic_version == 3) { virt_max_cpus = vms->memmap[VIRT_GIC_REDIST].size / 0x20000; - clustersz = GICV3_TARGETLIST_BITS; } else { virt_max_cpus = GIC_NCPU; - clustersz = GIC_TARGETLIST_BITS; } if (max_cpus > virt_max_cpus) { @@ -1324,21 +1347,35 @@ static void machvirt_init(MachineState *machine) exit(1); } - for (n = 0; n < smp_cpus; n++) { - Object *cpuobj = object_new(typename); - if (!vmc->disallow_affinity_adjustment) { - /* Adjust MPIDR like 64-bit KVM hosts, which incorporate the - * GIC's target-list limitations. 32-bit KVM hosts currently - * always create clusters of 4 CPUs, but that is expected to - * change when they gain support for gicv3. When KVM is enabled - * it will override the changes we make here, therefore our - * purposes are to make TCG consistent (with 64-bit KVM hosts) - * and to improve SGI efficiency. - */ - uint8_t aff1 = n / clustersz; - uint8_t aff0 = n % clustersz; - object_property_set_int(cpuobj, (aff1 << ARM_AFF1_SHIFT) | aff0, - "mp-affinity", NULL); + possible_cpus = mc->possible_cpu_arch_ids(machine); + for (n = 0; n < possible_cpus->len; n++) { + Object *cpuobj; + CPUState *cs; + int node_id; + + if (n >= smp_cpus) { + break; + } + + cpuobj = object_new(typename); + object_property_set_int(cpuobj, possible_cpus->cpus[n].arch_id, + "mp-affinity", NULL); + + cs = CPU(cpuobj); + cs->cpu_index = n; + + node_id = possible_cpus->cpus[cs->cpu_index].props.node_id; + if (!possible_cpus->cpus[cs->cpu_index].props.has_node_id) { + /* by default CPUState::numa_node was 0 if it's not set via CLI + * keep it this way for now but in future we probably should + * refuse to start up with incomplete numa mapping */ + node_id = 0; + } + if (cs->numa_node == CPU_UNSET_NUMA_NODE_ID) { + cs->numa_node = node_id; + } else { + /* CPU isn't device_add compatible yet, this shouldn't happen */ + error_setg(&error_abort, "user set node-id not implemented"); } if (!vms->secure) { @@ -1518,6 +1555,46 @@ static void virt_set_gic_version(Object *obj, const char *value, Error **errp) } } +static CpuInstanceProperties +virt_cpu_index_to_props(MachineState *ms, unsigned cpu_index) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms); + + assert(cpu_index < possible_cpus->len); + return possible_cpus->cpus[cpu_index].props; +} + +static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms) +{ + int n; + VirtMachineState *vms = VIRT_MACHINE(ms); + + if (ms->possible_cpus) { + assert(ms->possible_cpus->len == max_cpus); + return ms->possible_cpus; + } + + ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) + + sizeof(CPUArchId) * max_cpus); + ms->possible_cpus->len = max_cpus; + for (n = 0; n < ms->possible_cpus->len; n++) { + ms->possible_cpus->cpus[n].arch_id = + virt_cpu_mp_affinity(vms, n); + ms->possible_cpus->cpus[n].props.has_thread_id = true; + ms->possible_cpus->cpus[n].props.thread_id = n; + + /* default distribution of CPUs over NUMA nodes */ + if (nb_numa_nodes) { + /* preset values but do not enable them i.e. 'has_node_id = false', + * numa init code will enable them later if manual mapping wasn't + * present on CLI */ + ms->possible_cpus->cpus[n].props.node_id = n % nb_numa_nodes; + } + } + return ms->possible_cpus; +} + static void virt_machine_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -1534,6 +1611,8 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) mc->pci_allow_0_address = true; /* We know we will never create a pre-ARMv7 CPU which needs 1K pages */ mc->minimum_page_bits = 12; + mc->possible_cpu_arch_ids = virt_possible_cpu_arch_ids; + mc->cpu_index_to_instance_props = virt_cpu_index_to_props; } static const TypeInfo virt_machine_info = { diff --git a/hw/core/machine.c b/hw/core/machine.c index ada9eea483..fd6a436064 100644 --- a/hw/core/machine.c +++ b/hw/core/machine.c @@ -17,8 +17,10 @@ #include "qapi/visitor.h" #include "hw/sysbus.h" #include "sysemu/sysemu.h" +#include "sysemu/numa.h" #include "qemu/error-report.h" #include "qemu/cutils.h" +#include "sysemu/numa.h" static char *machine_get_accel(Object *obj, Error **errp) { @@ -388,6 +390,102 @@ HotpluggableCPUList *machine_query_hotpluggable_cpus(MachineState *machine) return head; } +/** + * machine_set_cpu_numa_node: + * @machine: machine object to modify + * @props: specifies which cpu objects to assign to + * numa node specified by @props.node_id + * @errp: if an error occurs, a pointer to an area to store the error + * + * Associate NUMA node specified by @props.node_id with cpu slots that + * match socket/core/thread-ids specified by @props. It's recommended to use + * query-hotpluggable-cpus.props values to specify affected cpu slots, + * which would lead to exact 1:1 mapping of cpu slots to NUMA node. + * + * However for CLI convenience it's possible to pass in subset of properties, + * which would affect all cpu slots that match it. + * Ex for pc machine: + * -smp 4,cores=2,sockets=2 -numa node,nodeid=0 -numa node,nodeid=1 \ + * -numa cpu,node-id=0,socket_id=0 \ + * -numa cpu,node-id=1,socket_id=1 + * will assign all child cores of socket 0 to node 0 and + * of socket 1 to node 1. + * + * On attempt of reassigning (already assigned) cpu slot to another NUMA node, + * return error. + * Empty subset is disallowed and function will return with error in this case. + */ +void machine_set_cpu_numa_node(MachineState *machine, + const CpuInstanceProperties *props, Error **errp) +{ + MachineClass *mc = MACHINE_GET_CLASS(machine); + bool match = false; + int i; + + if (!mc->possible_cpu_arch_ids) { + error_setg(errp, "mapping of CPUs to NUMA node is not supported"); + return; + } + + /* disabling node mapping is not supported, forbid it */ + assert(props->has_node_id); + + /* force board to initialize possible_cpus if it hasn't been done yet */ + mc->possible_cpu_arch_ids(machine); + + for (i = 0; i < machine->possible_cpus->len; i++) { + CPUArchId *slot = &machine->possible_cpus->cpus[i]; + + /* reject unsupported by board properties */ + if (props->has_thread_id && !slot->props.has_thread_id) { + error_setg(errp, "thread-id is not supported"); + return; + } + + if (props->has_core_id && !slot->props.has_core_id) { + error_setg(errp, "core-id is not supported"); + return; + } + + if (props->has_socket_id && !slot->props.has_socket_id) { + error_setg(errp, "socket-id is not supported"); + return; + } + + /* skip slots with explicit mismatch */ + if (props->has_thread_id && props->thread_id != slot->props.thread_id) { + continue; + } + + if (props->has_core_id && props->core_id != slot->props.core_id) { + continue; + } + + if (props->has_socket_id && props->socket_id != slot->props.socket_id) { + continue; + } + + /* reject assignment if slot is already assigned, for compatibility + * of legacy cpu_index mapping with SPAPR core based mapping do not + * error out if cpu thread and matched core have the same node-id */ + if (slot->props.has_node_id && + slot->props.node_id != props->node_id) { + error_setg(errp, "CPU is already assigned to node-id: %" PRId64, + slot->props.node_id); + return; + } + + /* assign slot to node as it's matched '-numa cpu' key */ + match = true; + slot->props.node_id = props->node_id; + slot->props.has_node_id = props->has_node_id; + } + + if (!match) { + error_setg(errp, "no match found"); + } +} + static void machine_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -400,6 +498,7 @@ static void machine_class_init(ObjectClass *oc, void *data) * On Linux, each node's border has to be 8MB aligned */ mc->numa_mem_align_shift = 23; + mc->numa_auto_assign_ram = numa_default_auto_assign_ram; object_class_property_add_str(oc, "accel", machine_get_accel, machine_set_accel, &error_abort); @@ -580,6 +679,69 @@ bool machine_mem_merge(MachineState *machine) return machine->mem_merge; } +static char *cpu_slot_to_string(const CPUArchId *cpu) +{ + GString *s = g_string_new(NULL); + if (cpu->props.has_socket_id) { + g_string_append_printf(s, "socket-id: %"PRId64, cpu->props.socket_id); + } + if (cpu->props.has_core_id) { + if (s->len) { + g_string_append_printf(s, ", "); + } + g_string_append_printf(s, "core-id: %"PRId64, cpu->props.core_id); + } + if (cpu->props.has_thread_id) { + if (s->len) { + g_string_append_printf(s, ", "); + } + g_string_append_printf(s, "thread-id: %"PRId64, cpu->props.thread_id); + } + return g_string_free(s, false); +} + +static void machine_numa_validate(MachineState *machine) +{ + int i; + GString *s = g_string_new(NULL); + MachineClass *mc = MACHINE_GET_CLASS(machine); + const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(machine); + + assert(nb_numa_nodes); + for (i = 0; i < possible_cpus->len; i++) { + const CPUArchId *cpu_slot = &possible_cpus->cpus[i]; + + /* at this point numa mappings are initilized by CLI options + * or with default mappings so it's sufficient to list + * all not yet mapped CPUs here */ + /* TODO: make it hard error in future */ + if (!cpu_slot->props.has_node_id) { + char *cpu_str = cpu_slot_to_string(cpu_slot); + g_string_append_printf(s, "%sCPU %d [%s]", s->len ? ", " : "", i, + cpu_str); + g_free(cpu_str); + } + } + if (s->len) { + error_report("warning: CPU(s) not present in any NUMA nodes: %s", + s->str); + error_report("warning: All CPU(s) up to maxcpus should be described " + "in NUMA config, ability to start up with partial NUMA " + "mappings is obsoleted and will be removed in future"); + } + g_string_free(s, true); +} + +void machine_run_board_init(MachineState *machine) +{ + MachineClass *machine_class = MACHINE_GET_CLASS(machine); + + if (nb_numa_nodes) { + machine_numa_validate(machine); + } + machine_class->init(machine); +} + static void machine_class_finalize(ObjectClass *klass, void *data) { MachineClass *mc = MACHINE_CLASS(klass); diff --git a/hw/display/cg3.c b/hw/display/cg3.c index 7ef8a96496..1de15a1d34 100644 --- a/hw/display/cg3.c +++ b/hw/display/cg3.c @@ -94,7 +94,8 @@ static void cg3_update_display(void *opaque) uint32_t dval; int x, y, y_start; unsigned int width, height; - ram_addr_t page, page_min, page_max; + ram_addr_t page; + DirtyBitmapSnapshot *snap = NULL; if (surface_bits_per_pixel(surface) != 32) { return; @@ -103,29 +104,32 @@ static void cg3_update_display(void *opaque) height = s->height; y_start = -1; - page_min = -1; - page_max = 0; - page = 0; pix = memory_region_get_ram_ptr(&s->vram_mem); data = (uint32_t *)surface_data(surface); - memory_region_sync_dirty_bitmap(&s->vram_mem); + if (!s->full_update) { + memory_region_sync_dirty_bitmap(&s->vram_mem); + snap = memory_region_snapshot_and_clear_dirty(&s->vram_mem, 0x0, + memory_region_size(&s->vram_mem), + DIRTY_MEMORY_VGA); + } + for (y = 0; y < height; y++) { - int update = s->full_update; + int update; page = (ram_addr_t)y * width; - update |= memory_region_get_dirty(&s->vram_mem, page, width, - DIRTY_MEMORY_VGA); + + if (s->full_update) { + update = 1; + } else { + update = memory_region_snapshot_get_dirty(&s->vram_mem, snap, page, + width); + } + if (update) { if (y_start < 0) { y_start = y; } - if (page < page_min) { - page_min = page; - } - if (page > page_max) { - page_max = page; - } for (x = 0; x < width; x++) { dval = *pix++; @@ -134,7 +138,7 @@ static void cg3_update_display(void *opaque) } } else { if (y_start >= 0) { - dpy_gfx_update(s->con, 0, y_start, s->width, y - y_start); + dpy_gfx_update(s->con, 0, y_start, width, y - y_start); y_start = -1; } pix += width; @@ -143,17 +147,14 @@ static void cg3_update_display(void *opaque) } s->full_update = 0; if (y_start >= 0) { - dpy_gfx_update(s->con, 0, y_start, s->width, y - y_start); - } - if (page_max >= page_min) { - memory_region_reset_dirty(&s->vram_mem, - page_min, page_max - page_min, DIRTY_MEMORY_VGA); + dpy_gfx_update(s->con, 0, y_start, width, y - y_start); } /* vsync interrupt? */ if (s->regs[0] & CG3_CR_ENABLE_INTS) { s->regs[1] |= CG3_SR_PENDING_INT; qemu_irq_raise(s->irq); } + g_free(snap); } static void cg3_invalidate_display(void *opaque) diff --git a/hw/display/sm501.c b/hw/display/sm501.c index 2094adbc9c..9d254ef2e1 100644 --- a/hw/display/sm501.c +++ b/hw/display/sm501.c @@ -1414,6 +1414,7 @@ static void sm501_update_display(void *opaque) { SM501State *s = (SM501State *)opaque; DisplaySurface *surface = qemu_console_surface(s->con); + DirtyBitmapSnapshot *snap; int y, c_x = 0, c_y = 0; int crt = (s->dc_crt_control & SM501_DC_CRT_CONTROL_SEL) ? 1 : 0; int width = get_width(s, crt); @@ -1425,9 +1426,7 @@ static void sm501_update_display(void *opaque) draw_hwc_line_func *draw_hwc_line = NULL; int full_update = 0; int y_start = -1; - ram_addr_t page_min = ~0l; - ram_addr_t page_max = 0l; - ram_addr_t offset; + ram_addr_t offset = 0; uint32_t *palette; uint8_t hwc_palette[3 * 3]; uint8_t *hwc_src = NULL; @@ -1479,17 +1478,17 @@ static void sm501_update_display(void *opaque) /* draw each line according to conditions */ memory_region_sync_dirty_bitmap(&s->local_mem_region); + snap = memory_region_snapshot_and_clear_dirty(&s->local_mem_region, + offset, width * height * src_bpp, DIRTY_MEMORY_VGA); for (y = 0, offset = 0; y < height; y++, offset += width * src_bpp) { int update, update_hwc; - ram_addr_t page0 = offset; - ram_addr_t page1 = offset + width * src_bpp - 1; /* check if hardware cursor is enabled and we're within its range */ update_hwc = draw_hwc_line && c_y <= y && y < c_y + SM501_HWC_HEIGHT; update = full_update || update_hwc; /* check dirty flags for each line */ - update |= memory_region_get_dirty(&s->local_mem_region, page0, - page1 - page0, DIRTY_MEMORY_VGA); + update |= memory_region_snapshot_get_dirty(&s->local_mem_region, snap, + offset, width * src_bpp); /* draw line and change status */ if (update) { @@ -1507,12 +1506,6 @@ static void sm501_update_display(void *opaque) if (y_start < 0) { y_start = y; } - if (page0 < page_min) { - page_min = page0; - } - if (page1 > page_max) { - page_max = page1; - } } else { if (y_start >= 0) { /* flush to display */ @@ -1521,18 +1514,12 @@ static void sm501_update_display(void *opaque) } } } + g_free(snap); /* complete flush to display */ if (y_start >= 0) { dpy_gfx_update(s->con, 0, y_start, width, y - y_start); } - - /* clear dirty flags */ - if (page_min != ~0l) { - memory_region_reset_dirty(&s->local_mem_region, - page_min, page_max + TARGET_PAGE_SIZE, - DIRTY_MEMORY_VGA); - } } static const GraphicHwOps sm501_ops = { diff --git a/hw/display/tcx.c b/hw/display/tcx.c index 0e66dcd055..6593c1d6af 100644 --- a/hw/display/tcx.c +++ b/hw/display/tcx.c @@ -104,36 +104,23 @@ static void tcx_set_dirty(TCXState *s, ram_addr_t addr, int len) } } -static int tcx_check_dirty(TCXState *s, ram_addr_t addr, int len) +static int tcx_check_dirty(TCXState *s, DirtyBitmapSnapshot *snap, + ram_addr_t addr, int len) { int ret; - ret = memory_region_get_dirty(&s->vram_mem, addr, len, DIRTY_MEMORY_VGA); + ret = memory_region_snapshot_get_dirty(&s->vram_mem, snap, addr, len); if (s->depth == 24) { - ret |= memory_region_get_dirty(&s->vram_mem, - s->vram24_offset + addr * 4, len * 4, - DIRTY_MEMORY_VGA); - ret |= memory_region_get_dirty(&s->vram_mem, - s->cplane_offset + addr * 4, len * 4, - DIRTY_MEMORY_VGA); + ret |= memory_region_snapshot_get_dirty(&s->vram_mem, snap, + s->vram24_offset + addr * 4, len * 4); + ret |= memory_region_snapshot_get_dirty(&s->vram_mem, snap, + s->cplane_offset + addr * 4, len * 4); } return ret; } -static void tcx_reset_dirty(TCXState *s, ram_addr_t addr, int len) -{ - memory_region_reset_dirty(&s->vram_mem, addr, len, DIRTY_MEMORY_VGA); - - if (s->depth == 24) { - memory_region_reset_dirty(&s->vram_mem, s->vram24_offset + addr * 4, - len * 4, DIRTY_MEMORY_VGA); - memory_region_reset_dirty(&s->vram_mem, s->cplane_offset + addr * 4, - len * 4, DIRTY_MEMORY_VGA); - } -} - static void update_palette_entries(TCXState *s, int start, int end) { DisplaySurface *surface = qemu_console_surface(s->con); @@ -233,7 +220,8 @@ static void tcx_update_display(void *opaque) { TCXState *ts = opaque; DisplaySurface *surface = qemu_console_surface(ts->con); - ram_addr_t page, page_min, page_max; + ram_addr_t page; + DirtyBitmapSnapshot *snap = NULL; int y, y_start, dd, ds; uint8_t *d, *s; @@ -243,22 +231,20 @@ static void tcx_update_display(void *opaque) page = 0; y_start = -1; - page_min = -1; - page_max = 0; d = surface_data(surface); s = ts->vram; dd = surface_stride(surface); ds = 1024; memory_region_sync_dirty_bitmap(&ts->vram_mem); + snap = memory_region_snapshot_and_clear_dirty(&ts->vram_mem, 0x0, + memory_region_size(&ts->vram_mem), + DIRTY_MEMORY_VGA); + for (y = 0; y < ts->height; y++, page += ds) { - if (tcx_check_dirty(ts, page, ds)) { + if (tcx_check_dirty(ts, snap, page, ds)) { if (y_start < 0) y_start = y; - if (page < page_min) - page_min = page; - if (page > page_max) - page_max = page; tcx_draw_line32(ts, d, s, ts->width); if (y >= ts->cursy && y < ts->cursy + 32 && ts->cursx < ts->width) { @@ -280,17 +266,15 @@ static void tcx_update_display(void *opaque) dpy_gfx_update(ts->con, 0, y_start, ts->width, y - y_start); } - /* reset modified pages */ - if (page_max >= page_min) { - tcx_reset_dirty(ts, page_min, page_max - page_min); - } + g_free(snap); } static void tcx24_update_display(void *opaque) { TCXState *ts = opaque; DisplaySurface *surface = qemu_console_surface(ts->con); - ram_addr_t page, page_min, page_max; + ram_addr_t page; + DirtyBitmapSnapshot *snap = NULL; int y, y_start, dd, ds; uint8_t *d, *s; uint32_t *cptr, *s24; @@ -301,8 +285,6 @@ static void tcx24_update_display(void *opaque) page = 0; y_start = -1; - page_min = -1; - page_max = 0; d = surface_data(surface); s = ts->vram; s24 = ts->vram24; @@ -311,14 +293,15 @@ static void tcx24_update_display(void *opaque) ds = 1024; memory_region_sync_dirty_bitmap(&ts->vram_mem); + snap = memory_region_snapshot_and_clear_dirty(&ts->vram_mem, 0x0, + memory_region_size(&ts->vram_mem), + DIRTY_MEMORY_VGA); + for (y = 0; y < ts->height; y++, page += ds) { - if (tcx_check_dirty(ts, page, ds)) { + if (tcx_check_dirty(ts, snap, page, ds)) { if (y_start < 0) y_start = y; - if (page < page_min) - page_min = page; - if (page > page_max) - page_max = page; + tcx24_draw_line32(ts, d, s, ts->width, cptr, s24); if (y >= ts->cursy && y < ts->cursy+32 && ts->cursx < ts->width) { tcx_draw_cursor32(ts, d, y, ts->width); @@ -341,10 +324,7 @@ static void tcx24_update_display(void *opaque) dpy_gfx_update(ts->con, 0, y_start, ts->width, y - y_start); } - /* reset modified pages */ - if (page_max >= page_min) { - tcx_reset_dirty(ts, page_min, page_max - page_min); - } + g_free(snap); } static void tcx_invalidate_display(void *opaque) diff --git a/hw/display/vga.c b/hw/display/vga.c index b2516c8d21..dcc95f88e2 100644 --- a/hw/display/vga.c +++ b/hw/display/vga.c @@ -1630,7 +1630,7 @@ static void vga_draw_graphic(VGACommonState *s, int full_update) if (!full_update) { vga_sync_dirty_bitmap(s); snap = memory_region_snapshot_and_clear_dirty(&s->vram, addr1, - bwidth * height, + line_offset * height, DIRTY_MEMORY_VGA); } diff --git a/hw/display/virtio-gpu-3d.c b/hw/display/virtio-gpu-3d.c index f49b7fe8cd..8c106a662d 100644 --- a/hw/display/virtio-gpu-3d.c +++ b/hw/display/virtio-gpu-3d.c @@ -600,6 +600,22 @@ void virtio_gpu_virgl_reset(VirtIOGPU *g) } } +void virtio_gpu_gl_block(void *opaque, bool block) +{ + VirtIOGPU *g = opaque; + + if (block) { + g->renderer_blocked++; + } else { + g->renderer_blocked--; + } + assert(g->renderer_blocked >= 0); + + if (g->renderer_blocked == 0) { + virtio_gpu_process_cmdq(g); + } +} + int virtio_gpu_virgl_init(VirtIOGPU *g) { int ret; diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index e1056f34df..cfb5dfa336 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -929,28 +929,14 @@ static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) return 0; } -static void virtio_gpu_gl_block(void *opaque, bool block) -{ - VirtIOGPU *g = opaque; - - if (block) { - g->renderer_blocked++; - } else { - g->renderer_blocked--; - } - assert(g->renderer_blocked >= 0); - - if (g->renderer_blocked == 0) { - virtio_gpu_process_cmdq(g); - } -} - const GraphicHwOps virtio_gpu_ops = { .invalidate = virtio_gpu_invalidate_display, .gfx_update = virtio_gpu_update_display, .text_update = virtio_gpu_text_update, .ui_info = virtio_gpu_ui_info, +#ifdef CONFIG_VIRGL .gl_block = virtio_gpu_gl_block, +#endif }; static const VMStateDescription vmstate_virtio_gpu_scanout = { diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c index 1d8c645ed3..cc0418f327 100644 --- a/hw/i386/acpi-build.c +++ b/hw/i386/acpi-build.c @@ -2335,7 +2335,8 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) srat->reserved1 = cpu_to_le32(1); for (i = 0; i < apic_ids->len; i++) { - int j = numa_get_node_for_cpu(i); + int node_id = apic_ids->cpus[i].props.has_node_id ? + apic_ids->cpus[i].props.node_id : 0; uint32_t apic_id = apic_ids->cpus[i].arch_id; if (apic_id < 255) { @@ -2345,9 +2346,7 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) core->type = ACPI_SRAT_PROCESSOR_APIC; core->length = sizeof(*core); core->local_apic_id = apic_id; - if (j < nb_numa_nodes) { - core->proximity_lo = j; - } + core->proximity_lo = node_id; memset(core->proximity_hi, 0, 3); core->local_sapic_eid = 0; core->flags = cpu_to_le32(1); @@ -2358,9 +2357,7 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) core->type = ACPI_SRAT_PROCESSOR_x2APIC; core->length = sizeof(*core); core->x2apic_id = cpu_to_le32(apic_id); - if (j < nb_numa_nodes) { - core->proximity_domain = cpu_to_le32(j); - } + core->proximity_domain = cpu_to_le32(node_id); core->flags = cpu_to_le32(1); } } @@ -2707,6 +2704,10 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine) if (pcms->numa_nodes) { acpi_add_table(table_offsets, tables_blob); build_srat(tables_blob, tables->linker, machine); + if (have_numa_distance) { + acpi_add_table(table_offsets, tables_blob); + build_slit(tables_blob, tables->linker); + } } if (acpi_get_mcfg(&mcfg)) { acpi_add_table(table_offsets, tables_blob); diff --git a/hw/i386/pc.c b/hw/i386/pc.c index f3b372a18f..e36a375683 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -747,7 +747,9 @@ static FWCfgState *bochs_bios_init(AddressSpace *as, PCMachineState *pcms) { FWCfgState *fw_cfg; uint64_t *numa_fw_cfg; - int i, j; + int i; + const CPUArchIdList *cpus; + MachineClass *mc = MACHINE_GET_CLASS(pcms); fw_cfg = fw_cfg_init_io_dma(FW_CFG_IO_BASE, FW_CFG_IO_BASE + 4, as); fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, pcms->boot_cpus); @@ -782,12 +784,12 @@ static FWCfgState *bochs_bios_init(AddressSpace *as, PCMachineState *pcms) */ numa_fw_cfg = g_new0(uint64_t, 1 + pcms->apic_id_limit + nb_numa_nodes); numa_fw_cfg[0] = cpu_to_le64(nb_numa_nodes); - for (i = 0; i < max_cpus; i++) { - unsigned int apic_id = x86_cpu_apic_id_from_index(i); + cpus = mc->possible_cpu_arch_ids(MACHINE(pcms)); + for (i = 0; i < cpus->len; i++) { + unsigned int apic_id = cpus->cpus[i].arch_id; assert(apic_id < pcms->apic_id_limit); - j = numa_get_node_for_cpu(i); - if (j < nb_numa_nodes) { - numa_fw_cfg[apic_id + 1] = cpu_to_le64(j); + if (cpus->cpus[i].props.has_node_id) { + numa_fw_cfg[apic_id + 1] = cpu_to_le64(cpus->cpus[i].props.node_id); } } for (i = 0; i < nb_numa_nodes; i++) { @@ -1893,6 +1895,7 @@ static void pc_cpu_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { int idx; + int node_id; CPUState *cs; CPUArchId *cpu_slot; X86CPUTopoInfo topo; @@ -1982,6 +1985,22 @@ static void pc_cpu_pre_plug(HotplugHandler *hotplug_dev, cs = CPU(cpu); cs->cpu_index = idx; + + node_id = cpu_slot->props.node_id; + if (!cpu_slot->props.has_node_id) { + /* by default CPUState::numa_node was 0 if it's not set via CLI + * keep it this way for now but in future we probably should + * refuse to start up with incomplete numa mapping */ + node_id = 0; + } + if (cs->numa_node == CPU_UNSET_NUMA_NODE_ID) { + cs->numa_node = node_id; + } else if (cs->numa_node != node_id) { + error_setg(errp, "node-id %d must match numa node specified" + "with -numa option for cpu-index %d", + cs->numa_node, cs->cpu_index); + return; + } } static void pc_machine_device_pre_plug_cb(HotplugHandler *hotplug_dev, @@ -2243,12 +2262,14 @@ static void pc_machine_reset(void) } } -static unsigned pc_cpu_index_to_socket_id(unsigned cpu_index) +static CpuInstanceProperties +pc_cpu_index_to_props(MachineState *ms, unsigned cpu_index) { - X86CPUTopoInfo topo; - x86_topo_ids_from_idx(smp_cores, smp_threads, cpu_index, - &topo); - return topo.pkg_id; + MachineClass *mc = MACHINE_GET_CLASS(ms); + const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms); + + assert(cpu_index < possible_cpus->len); + return possible_cpus->cpus[cpu_index].props; } static const CPUArchIdList *pc_possible_cpu_arch_ids(MachineState *ms) @@ -2280,6 +2301,15 @@ static const CPUArchIdList *pc_possible_cpu_arch_ids(MachineState *ms) ms->possible_cpus->cpus[i].props.core_id = topo.core_id; ms->possible_cpus->cpus[i].props.has_thread_id = true; ms->possible_cpus->cpus[i].props.thread_id = topo.smt_id; + + /* default distribution of CPUs over NUMA nodes */ + if (nb_numa_nodes) { + /* preset values but do not enable them i.e. 'has_node_id = false', + * numa init code will enable them later if manual mapping wasn't + * present on CLI */ + ms->possible_cpus->cpus[i].props.node_id = + topo.pkg_id % nb_numa_nodes; + } } return ms->possible_cpus; } @@ -2322,7 +2352,7 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) pcmc->acpi_data_size = 0x20000 + 0x8000; pcmc->save_tsc_khz = true; mc->get_hotplug_handler = pc_get_hotpug_handler; - mc->cpu_index_to_socket_id = pc_cpu_index_to_socket_id; + mc->cpu_index_to_instance_props = pc_cpu_index_to_props; mc->possible_cpu_arch_ids = pc_possible_cpu_arch_ids; mc->has_hotpluggable_cpus = true; mc->default_boot_order = "cad"; diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c index 9f102aa388..d468b963fb 100644 --- a/hw/i386/pc_piix.c +++ b/hw/i386/pc_piix.c @@ -54,6 +54,7 @@ #endif #include "migration/migration.h" #include "kvm_i386.h" +#include "sysemu/numa.h" #define MAX_IDE_BUS 2 @@ -442,6 +443,7 @@ static void pc_i440fx_2_9_machine_options(MachineClass *m) pc_i440fx_machine_options(m); m->alias = "pc"; m->is_default = 1; + m->numa_auto_assign_ram = numa_legacy_auto_assign_ram; } DEFINE_I440FX_MACHINE(v2_9, "pc-i440fx-2.9", NULL, diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c index dd792a8547..66303a78cf 100644 --- a/hw/i386/pc_q35.c +++ b/hw/i386/pc_q35.c @@ -47,6 +47,7 @@ #include "hw/usb.h" #include "qemu/error-report.h" #include "migration/migration.h" +#include "sysemu/numa.h" /* ICH9 AHCI has 6 ports */ #define MAX_SATA_PORTS 6 @@ -305,6 +306,7 @@ static void pc_q35_2_9_machine_options(MachineClass *m) { pc_q35_machine_options(m); m->alias = "q35"; + m->numa_auto_assign_ram = numa_legacy_auto_assign_ram; } DEFINE_Q35_MACHINE(v2_9, "pc-q35-2.9", NULL, diff --git a/hw/input/virtio-input-hid.c b/hw/input/virtio-input-hid.c index 3ee0c1814a..46c038110c 100644 --- a/hw/input/virtio-input-hid.c +++ b/hw/input/virtio-input-hid.c @@ -484,12 +484,14 @@ static struct virtio_input_config virtio_tablet_config[] = { .select = VIRTIO_INPUT_CFG_ABS_INFO, .subsel = ABS_X, .size = sizeof(virtio_input_absinfo), - .u.abs.max = const_le32(INPUT_EVENT_ABS_SIZE - 1), + .u.abs.min = const_le32(INPUT_EVENT_ABS_MIN), + .u.abs.max = const_le32(INPUT_EVENT_ABS_MAX), },{ .select = VIRTIO_INPUT_CFG_ABS_INFO, .subsel = ABS_Y, .size = sizeof(virtio_input_absinfo), - .u.abs.max = const_le32(INPUT_EVENT_ABS_SIZE - 1), + .u.abs.min = const_le32(INPUT_EVENT_ABS_MIN), + .u.abs.max = const_le32(INPUT_EVENT_ABS_MAX), }, { /* end of list */ }, }; diff --git a/hw/intc/xics_kvm.c b/hw/intc/xics_kvm.c index 42e0e0ef84..dd93531ae3 100644 --- a/hw/intc/xics_kvm.c +++ b/hw/intc/xics_kvm.c @@ -213,6 +213,7 @@ static void ics_get_kvm_state(ICSState *ics) irq->priority = irq->saved_priority; } + irq->status = 0; if (state & KVM_XICS_PENDING) { if (state & KVM_XICS_LEVEL_SENSITIVE) { irq->status |= XICS_STATUS_ASSERTED; @@ -228,6 +229,12 @@ static void ics_get_kvm_state(ICSState *ics) | XICS_STATUS_REJECTED; } } + if (state & KVM_XICS_PRESENTED) { + irq->status |= XICS_STATUS_PRESENTED; + } + if (state & KVM_XICS_QUEUED) { + irq->status |= XICS_STATUS_QUEUED; + } } } @@ -265,6 +272,12 @@ static int ics_set_kvm_state(ICSState *ics, int version_id) state |= KVM_XICS_PENDING; } } + if (irq->status & XICS_STATUS_PRESENTED) { + state |= KVM_XICS_PRESENTED; + } + if (irq->status & XICS_STATUS_QUEUED) { + state |= KVM_XICS_QUEUED; + } ret = ioctl(kernel_xics_fd, KVM_SET_DEVICE_ATTR, &attr); if (ret != 0) { diff --git a/hw/ppc/mac_newworld.c b/hw/ppc/mac_newworld.c index 68aaedc06d..bae1c0ac99 100644 --- a/hw/ppc/mac_newworld.c +++ b/hw/ppc/mac_newworld.c @@ -80,6 +80,8 @@ #define CLOCKFREQ (266UL * 1000UL * 1000UL) #define BUSFREQ (100UL * 1000UL * 1000UL) +#define NDRV_VGA_FILENAME "qemu_vga.ndrv" + /* UniN device */ static void unin_write(void *opaque, hwaddr addr, uint64_t value, unsigned size) @@ -160,7 +162,8 @@ static void ppc_core99_init(MachineState *machine) MACIOIDEState *macio_ide; BusState *adb_bus; MacIONVRAMState *nvr; - int bios_size; + int bios_size, ndrv_size; + uint8_t *ndrv_file; MemoryRegion *pic_mem, *escc_mem; MemoryRegion *escc_bar = g_new(MemoryRegion, 1); int ppc_boot_device; @@ -494,6 +497,19 @@ static void ppc_core99_init(MachineState *machine) fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_BUSFREQ, BUSFREQ); fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_NVRAM_ADDR, nvram_addr); + /* MacOS NDRV VGA driver */ + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, NDRV_VGA_FILENAME); + if (filename) { + ndrv_size = get_image_size(filename); + if (ndrv_size != -1) { + ndrv_file = g_malloc(ndrv_size); + ndrv_size = load_image(filename, ndrv_file); + + fw_cfg_add_file(fw_cfg, "ndrv/qemu_vga.ndrv", ndrv_file, ndrv_size); + } + g_free(filename); + } + qemu_register_boot_set(fw_cfg_boot_set, fw_cfg); } diff --git a/hw/ppc/mac_oldworld.c b/hw/ppc/mac_oldworld.c index 5df94e239b..97bb8541d7 100644 --- a/hw/ppc/mac_oldworld.c +++ b/hw/ppc/mac_oldworld.c @@ -53,6 +53,8 @@ #define CLOCKFREQ 266000000UL #define BUSFREQ 66000000UL +#define NDRV_VGA_FILENAME "qemu_vga.ndrv" + static void fw_cfg_boot_set(void *opaque, const char *boot_device, Error **errp) { @@ -99,7 +101,8 @@ static void ppc_heathrow_init(MachineState *machine) MACIOIDEState *macio_ide; DeviceState *dev; BusState *adb_bus; - int bios_size; + int bios_size, ndrv_size; + uint8_t *ndrv_file; MemoryRegion *pic_mem; MemoryRegion *escc_mem, *escc_bar = g_new(MemoryRegion, 1); uint16_t ppc_boot_device; @@ -355,6 +358,19 @@ static void ppc_heathrow_init(MachineState *machine) fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_CLOCKFREQ, CLOCKFREQ); fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_BUSFREQ, BUSFREQ); + /* MacOS NDRV VGA driver */ + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, NDRV_VGA_FILENAME); + if (filename) { + ndrv_size = get_image_size(filename); + if (ndrv_size != -1) { + ndrv_file = g_malloc(ndrv_size); + ndrv_size = load_image(filename, ndrv_file); + + fw_cfg_add_file(fw_cfg, "ndrv/qemu_vga.ndrv", ndrv_file, ndrv_size); + } + g_free(filename); + } + qemu_register_boot_set(fw_cfg_boot_set, fw_cfg); } diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c index 6a498565c7..231ed9735b 100644 --- a/hw/ppc/pnv.c +++ b/hw/ppc/pnv.c @@ -511,7 +511,7 @@ static void ppc_powernv_reset(void) * This is the internal simulator but it could also be an external * BMC. */ - obj = object_resolve_path_type("", TYPE_IPMI_BMC, NULL); + obj = object_resolve_path_type("", "ipmi-bmc-sim", NULL); if (obj) { pnv->bmc = IPMI_BMC(obj); } diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 80d12d005c..0980d733cd 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -219,7 +219,7 @@ static void spapr_populate_pa_features(CPUPPCState *env, void *fdt, int offset, /* 16: Vector */ 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */ /* 18: Vec. Scalar, 20: Vec. XOR, 22: HTM */ - 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 18 - 23 */ + 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */ /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */ 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */ /* 30: MMR, 32: LE atomic, 34: EBB + ext EBB */ @@ -855,6 +855,8 @@ static void spapr_dt_rtas(sPAPRMachineState *spapr, void *fdt) * option vector 5: */ static void spapr_dt_ov5_platform_support(void *fdt, int chosen) { + PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu); + char val[2 * 3] = { 24, 0x00, /* Hash/Radix, filled in below. */ 25, 0x00, /* Hash options: Segment Tables == no, GTSE == no. */ @@ -870,8 +872,13 @@ static void spapr_dt_ov5_platform_support(void *fdt, int chosen) val[1] = 0x00; /* Hash */ } } else { - /* TODO: TCG case, hash */ - val[1] = 0x00; + if (first_ppc_cpu->env.mmu_model & POWERPC_MMU_V3) { + /* V3 MMU supports both hash and radix (with dynamic switching) */ + val[1] = 0xC0; + } else { + /* Otherwise we can only do hash */ + val[1] = 0x00; + } } _FDT(fdt_setprop(fdt, chosen, "ibm,arch-vec-5-platform-support", val, sizeof(val))); @@ -2101,8 +2108,8 @@ static void ppc_spapr_init(MachineState *machine) } spapr_ovec_set(spapr->ov5, OV5_FORM1_AFFINITY); - if (kvmppc_has_cap_mmu_radix()) { - /* KVM always allows GTSE with radix... */ + if (!kvm_enabled() || kvmppc_has_cap_mmu_radix()) { + /* KVM and TCG always allow GTSE with radix... */ spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_GTSE); } /* ... but not with hash (currently). */ @@ -2824,9 +2831,11 @@ static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev); Error *local_err = NULL; CPUCore *cc = CPU_CORE(dev); + sPAPRCPUCore *sc = SPAPR_CPU_CORE(dev); char *base_core_type = spapr_get_cpu_core_type(machine->cpu_model); const char *type = object_get_typename(OBJECT(dev)); CPUArchId *core_slot; + int node_id; int index; if (dev->hotplugged && !mc->has_hotpluggable_cpus) { @@ -2861,6 +2870,21 @@ static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, goto out; } + node_id = core_slot->props.node_id; + if (!core_slot->props.has_node_id) { + /* by default CPUState::numa_node was 0 if it's not set via CLI + * keep it this way for now but in future we probably should + * refuse to start up with incomplete numa mapping */ + node_id = 0; + } + if (sc->node_id == CPU_UNSET_NUMA_NODE_ID) { + sc->node_id = node_id; + } else if (sc->node_id != node_id) { + error_setg(&local_err, "node-id %d must match numa node specified" + "with -numa option for cpu-index %d", sc->node_id, cc->core_id); + goto out; + } + out: g_free(base_core_type); error_propagate(errp, local_err); @@ -2981,11 +3005,18 @@ static HotplugHandler *spapr_get_hotplug_handler(MachineState *machine, return NULL; } -static unsigned spapr_cpu_index_to_socket_id(unsigned cpu_index) +static CpuInstanceProperties +spapr_cpu_index_to_props(MachineState *machine, unsigned cpu_index) { - /* Allocate to NUMA nodes on a "socket" basis (not that concept of - * socket means much for the paravirtualized PAPR platform) */ - return cpu_index / smp_threads / smp_cores; + CPUArchId *core_slot; + MachineClass *mc = MACHINE_GET_CLASS(machine); + + /* make sure possible_cpu are intialized */ + mc->possible_cpu_arch_ids(machine); + /* get CPU core slot containing thread that matches cpu_index */ + core_slot = spapr_find_cpu_slot(machine, cpu_index, NULL); + assert(core_slot); + return core_slot->props; } static const CPUArchIdList *spapr_possible_cpu_arch_ids(MachineState *machine) @@ -3012,8 +3043,15 @@ static const CPUArchIdList *spapr_possible_cpu_arch_ids(MachineState *machine) machine->possible_cpus->cpus[i].arch_id = core_id; machine->possible_cpus->cpus[i].props.has_core_id = true; machine->possible_cpus->cpus[i].props.core_id = core_id; - /* TODO: add 'has_node/node' here to describe - to which node core belongs */ + + /* default distribution of CPUs over NUMA nodes */ + if (nb_numa_nodes) { + /* preset values but do not enable them i.e. 'has_node_id = false', + * numa init code will enable them later if manual mapping wasn't + * present on CLI */ + machine->possible_cpus->cpus[i].props.node_id = + core_id / smp_threads / smp_cores % nb_numa_nodes; + } } return machine->possible_cpus; } @@ -3138,7 +3176,7 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data) hc->pre_plug = spapr_machine_device_pre_plug; hc->plug = spapr_machine_device_plug; hc->unplug = spapr_machine_device_unplug; - mc->cpu_index_to_socket_id = spapr_cpu_index_to_socket_id; + mc->cpu_index_to_instance_props = spapr_cpu_index_to_props; mc->possible_cpu_arch_ids = spapr_possible_cpu_arch_ids; hc->unplug_request = spapr_machine_device_unplug_request; @@ -3242,6 +3280,7 @@ static void spapr_machine_2_9_class_options(MachineClass *mc) { spapr_machine_2_10_class_options(mc); SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_9); + mc->numa_auto_assign_ram = numa_legacy_auto_assign_ram; } DEFINE_SPAPR_MACHINE(2_9, "2.9", false); diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c index 4389ef4c2a..a17ea07ef1 100644 --- a/hw/ppc/spapr_cpu_core.c +++ b/hw/ppc/spapr_cpu_core.c @@ -176,13 +176,11 @@ static void spapr_cpu_core_realize(DeviceState *dev, Error **errp) const char *typename = object_class_get_name(scc->cpu_class); size_t size = object_type_get_instance_size(typename); Error *local_err = NULL; - int core_node_id = numa_get_node_for_cpu(cc->core_id);; void *obj; int i, j; sc->threads = g_malloc0(size * cc->nr_threads); for (i = 0; i < cc->nr_threads; i++) { - int node_id; char id[32]; CPUState *cs; @@ -192,17 +190,8 @@ static void spapr_cpu_core_realize(DeviceState *dev, Error **errp) cs = CPU(obj); cs->cpu_index = cc->core_id + i; - /* Set NUMA node for the added CPUs */ - node_id = numa_get_node_for_cpu(cs->cpu_index); - if (node_id != core_node_id) { - error_setg(&local_err, "Invalid node-id=%d of thread[cpu-index: %d]" - " on CPU[core-id: %d, node-id: %d], node-id must be the same", - node_id, cs->cpu_index, cc->core_id, core_node_id); - goto err; - } - if (node_id < nb_numa_nodes) { - cs->numa_node = node_id; - } + /* Set NUMA node for the threads belonged to core */ + cs->numa_node = sc->node_id; snprintf(id, sizeof(id), "thread[%d]", i); object_property_add_child(OBJECT(sc), id, obj, &local_err); @@ -263,6 +252,11 @@ static const char *spapr_core_models[] = { "POWER9_v1.0", }; +static Property spapr_cpu_core_properties[] = { + DEFINE_PROP_INT32("node-id", sPAPRCPUCore, node_id, CPU_UNSET_NUMA_NODE_ID), + DEFINE_PROP_END_OF_LIST() +}; + void spapr_cpu_core_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); @@ -270,6 +264,7 @@ void spapr_cpu_core_class_init(ObjectClass *oc, void *data) dc->realize = spapr_cpu_core_realize; dc->unrealize = spapr_cpu_core_unrealizefn; + dc->props = spapr_cpu_core_properties; scc->cpu_class = cpu_class_by_name(TYPE_POWERPC_CPU, data); g_assert(scc->cpu_class); } diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c index 9f18f75b88..0d608d6e28 100644 --- a/hw/ppc/spapr_hcall.c +++ b/hw/ppc/spapr_hcall.c @@ -936,7 +936,7 @@ static target_ulong h_register_process_table(PowerPCCPU *cpu, target_ulong opcode, target_ulong *args) { - CPUPPCState *env = &cpu->env; + CPUState *cs; target_ulong flags = args[0]; target_ulong proc_tbl = args[1]; target_ulong page_size = args[2]; @@ -992,16 +992,12 @@ static target_ulong h_register_process_table(PowerPCCPU *cpu, spapr_check_setup_free_hpt(spapr, spapr->patb_entry, cproc); spapr->patb_entry = cproc; /* Save new process table */ - if ((flags & FLAG_RADIX) || (flags & FLAG_HASH_PROC_TBL)) { - /* Use Process TBL */ - env->spr[SPR_LPCR] |= LPCR_UPRT; - } else { - env->spr[SPR_LPCR] &= ~LPCR_UPRT; - } - if (flags & FLAG_GTSE) { /* Partition Uses Guest Translation Shootdwn */ - env->spr[SPR_LPCR] |= LPCR_GTSE; - } else { - env->spr[SPR_LPCR] &= ~LPCR_GTSE; + + /* Update the UPRT and GTSE bits in the LPCR for all cpus */ + CPU_FOREACH(cs) { + set_spr(cs, SPR_LPCR, LPCR_UPRT | LPCR_GTSE, + ((flags & (FLAG_RADIX | FLAG_HASH_PROC_TBL)) ? LPCR_UPRT : 0) | + ((flags & FLAG_GTSE) ? LPCR_GTSE : 0)); } if (kvm_enabled()) { diff --git a/hw/usb/dev-hub.c b/hw/usb/dev-hub.c index 9fe7333946..47b7519910 100644 --- a/hw/usb/dev-hub.c +++ b/hw/usb/dev-hub.c @@ -208,6 +208,7 @@ static void usb_hub_wakeup(USBPort *port1) USBHubPort *port = &s->ports[port1->index]; if (port->wPortStatus & PORT_STAT_SUSPEND) { + port->wPortStatus &= ~PORT_STAT_SUSPEND; port->wPortChange |= PORT_STAT_C_SUSPEND; usb_wakeup(s->intr, 0); } diff --git a/hw/usb/dev-serial.c b/hw/usb/dev-serial.c index 6d5137383b..83a4f0e6fb 100644 --- a/hw/usb/dev-serial.c +++ b/hw/usb/dev-serial.c @@ -513,27 +513,18 @@ static USBDevice *usb_serial_init(USBBus *bus, const char *filename) { USBDevice *dev; Chardev *cdrv; - uint32_t vendorid = 0, productid = 0; char label[32]; static int index; while (*filename && *filename != ':') { const char *p; - char *e; + if (strstart(filename, "vendorid=", &p)) { - vendorid = strtol(p, &e, 16); - if (e == p || (*e && *e != ',' && *e != ':')) { - error_report("bogus vendor ID %s", p); - return NULL; - } - filename = e; + error_report("vendorid is not supported anymore"); + return NULL; } else if (strstart(filename, "productid=", &p)) { - productid = strtol(p, &e, 16); - if (e == p || (*e && *e != ',' && *e != ':')) { - error_report("bogus product ID %s", p); - return NULL; - } - filename = e; + error_report("productid is not supported anymore"); + return NULL; } else { error_report("unrecognized serial USB option %s", filename); return NULL; @@ -554,10 +545,7 @@ static USBDevice *usb_serial_init(USBBus *bus, const char *filename) dev = usb_create(bus, "usb-serial"); qdev_prop_set_chr(&dev->qdev, "chardev", cdrv); - if (vendorid) - qdev_prop_set_uint16(&dev->qdev, "vendorid", vendorid); - if (productid) - qdev_prop_set_uint16(&dev->qdev, "productid", productid); + return dev; } diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c index a2d3143bf4..77d8e1137a 100644 --- a/hw/usb/hcd-xhci.c +++ b/hw/usb/hcd-xhci.c @@ -50,7 +50,7 @@ /* Very pessimistic, let's hope it's enough for all cases */ #define EV_QUEUE (((3 * 24) + 16) * MAXSLOTS) -#define TRB_LINK_LIMIT 4 +#define TRB_LINK_LIMIT 32 #define COMMAND_LIMIT 256 #define TRANSFER_LIMIT 256 @@ -1790,9 +1790,6 @@ static void xhci_stall_ep(XHCITransfer *xfer) } } -static int xhci_submit(XHCIState *xhci, XHCITransfer *xfer, - XHCIEPContext *epctx); - static int xhci_setup_packet(XHCITransfer *xfer) { USBEndpoint *ep; @@ -1806,7 +1803,7 @@ static int xhci_setup_packet(XHCITransfer *xfer) ep = xhci_epid_to_usbep(xfer->epctx); if (!ep) { DPRINTF("xhci: slot %d has no device\n", - xfer->slotid); + xfer->epctx->slotid); return -1; } } @@ -1980,7 +1977,7 @@ static int xhci_submit(XHCIState *xhci, XHCITransfer *xfer, XHCIEPContext *epctx { uint64_t mfindex; - DPRINTF("xhci_submit(slotid=%d,epid=%d)\n", xfer->slotid, xfer->epid); + DPRINTF("xhci_submit(slotid=%d,epid=%d)\n", epctx->slotid, epctx->epid); xfer->in_xfer = epctx->type>>2; diff --git a/hw/usb/redirect.c b/hw/usb/redirect.c index b001a27f05..ad5ef783a6 100644 --- a/hw/usb/redirect.c +++ b/hw/usb/redirect.c @@ -229,21 +229,10 @@ static void usbredir_log(void *priv, int level, const char *msg) static void usbredir_log_data(USBRedirDevice *dev, const char *desc, const uint8_t *data, int len) { - int i, j, n; - if (dev->debug < usbredirparser_debug_data) { return; } - - for (i = 0; i < len; i += j) { - char buf[128]; - - n = sprintf(buf, "%s", desc); - for (j = 0; j < 8 && i + j < len; j++) { - n += sprintf(buf + n, " %02X", data[i + j]); - } - error_report("%s", buf); - } + qemu_hexdump((char *)data, stderr, desc, len); } /* diff --git a/include/hw/acpi/aml-build.h b/include/hw/acpi/aml-build.h index 00c21f160c..329a0d0c90 100644 --- a/include/hw/acpi/aml-build.h +++ b/include/hw/acpi/aml-build.h @@ -389,4 +389,5 @@ GCC_FMT_ATTR(2, 3); void build_srat_memory(AcpiSratMemoryAffinity *numamem, uint64_t base, uint64_t len, int node, MemoryAffinityFlags flags); +void build_slit(GArray *table_data, BIOSLinker *linker); #endif diff --git a/include/hw/boards.h b/include/hw/boards.h index 31d9c72fb0..76ce0219ff 100644 --- a/include/hw/boards.h +++ b/include/hw/boards.h @@ -32,6 +32,7 @@ void memory_region_allocate_system_memory(MemoryRegion *mr, Object *owner, MachineClass *find_default_machine(void); extern MachineState *current_machine; +void machine_run_board_init(MachineState *machine); bool machine_usb(MachineState *machine); bool machine_kernel_irqchip_allowed(MachineState *machine); bool machine_kernel_irqchip_required(MachineState *machine); @@ -42,6 +43,9 @@ bool machine_dump_guest_core(MachineState *machine); bool machine_mem_merge(MachineState *machine); void machine_register_compat_props(MachineState *machine); HotpluggableCPUList *machine_query_hotpluggable_cpus(MachineState *machine); +void machine_set_cpu_numa_node(MachineState *machine, + const CpuInstanceProperties *props, + Error **errp); /** * CPUArchId: @@ -74,7 +78,10 @@ typedef struct { * of HotplugHandler object, which handles hotplug operation * for a given @dev. It may return NULL if @dev doesn't require * any actions to be performed by hotplug handler. - * @cpu_index_to_socket_id: + * @cpu_index_to_instance_props: + * used to provide @cpu_index to socket/core/thread number mapping, allowing + * legacy code to perform maping from cpu_index to topology properties + * Returns: tuple of socket/core/thread ids given cpu_index belongs to. * used to provide @cpu_index to socket number mapping, allowing * a machine to group CPU threads belonging to the same socket/package * Returns: socket number given cpu_index belongs to. @@ -136,10 +143,13 @@ struct MachineClass { int minimum_page_bits; bool has_hotpluggable_cpus; int numa_mem_align_shift; + void (*numa_auto_assign_ram)(MachineClass *mc, NodeInfo *nodes, + int nb_nodes, ram_addr_t size); HotplugHandler *(*get_hotplug_handler)(MachineState *machine, DeviceState *dev); - unsigned (*cpu_index_to_socket_id)(unsigned cpu_index); + CpuInstanceProperties (*cpu_index_to_instance_props)(MachineState *machine, + unsigned cpu_index); const CPUArchIdList *(*possible_cpu_arch_ids)(MachineState *machine); }; diff --git a/include/hw/ppc/pnv.h b/include/hw/ppc/pnv.h index c1288f974d..9c5437dabc 100644 --- a/include/hw/ppc/pnv.h +++ b/include/hw/ppc/pnv.h @@ -21,6 +21,7 @@ #include "hw/boards.h" #include "hw/sysbus.h" +#include "hw/ipmi/ipmi.h" #include "hw/ppc/pnv_lpc.h" #include "hw/ppc/pnv_psi.h" #include "hw/ppc/pnv_occ.h" @@ -118,8 +119,6 @@ typedef struct PnvChipClass { #define POWERNV_MACHINE(obj) \ OBJECT_CHECK(PnvMachineState, (obj), TYPE_POWERNV_MACHINE) -typedef struct IPMIBmc IPMIBmc; - typedef struct PnvMachineState { /*< private >*/ MachineState parent_obj; diff --git a/include/hw/ppc/pnv_lpc.h b/include/hw/ppc/pnv_lpc.h index ccf969af94..023b4f0fec 100644 --- a/include/hw/ppc/pnv_lpc.h +++ b/include/hw/ppc/pnv_lpc.h @@ -19,12 +19,12 @@ #ifndef _PPC_PNV_LPC_H #define _PPC_PNV_LPC_H +#include "hw/ppc/pnv_psi.h" + #define TYPE_PNV_LPC "pnv-lpc" #define PNV_LPC(obj) \ OBJECT_CHECK(PnvLpcController, (obj), TYPE_PNV_LPC) -typedef struct PnvPsi PnvPsi; - typedef struct PnvLpcController { DeviceState parent; diff --git a/include/hw/ppc/pnv_occ.h b/include/hw/ppc/pnv_occ.h index f8ec330abf..82f299dc76 100644 --- a/include/hw/ppc/pnv_occ.h +++ b/include/hw/ppc/pnv_occ.h @@ -19,11 +19,11 @@ #ifndef _PPC_PNV_OCC_H #define _PPC_PNV_OCC_H +#include "hw/ppc/pnv_psi.h" + #define TYPE_PNV_OCC "pnv-occ" #define PNV_OCC(obj) OBJECT_CHECK(PnvOCC, (obj), TYPE_PNV_OCC) -typedef struct PnvPsi PnvPsi; - typedef struct PnvOCC { DeviceState xd; diff --git a/include/hw/ppc/spapr_cpu_core.h b/include/hw/ppc/spapr_cpu_core.h index 3c35665221..93051e9ecf 100644 --- a/include/hw/ppc/spapr_cpu_core.h +++ b/include/hw/ppc/spapr_cpu_core.h @@ -27,6 +27,7 @@ typedef struct sPAPRCPUCore { /*< public >*/ void *threads; + int node_id; } sPAPRCPUCore; typedef struct sPAPRCPUCoreClass { diff --git a/include/hw/ppc/xics.h b/include/hw/ppc/xics.h index c215dc72a4..05e6acbb35 100644 --- a/include/hw/ppc/xics.h +++ b/include/hw/ppc/xics.h @@ -29,6 +29,7 @@ #define XICS_H #include "hw/qdev.h" +#include "target/ppc/cpu-qom.h" #define XICS_IPI 0x2 #define XICS_BUID 0x1 @@ -46,7 +47,6 @@ typedef struct ICSStateClass ICSStateClass; typedef struct ICSState ICSState; typedef struct ICSIRQState ICSIRQState; typedef struct XICSFabric XICSFabric; -typedef struct PowerPCCPU PowerPCCPU; #define TYPE_ICP "icp" #define ICP(obj) OBJECT_CHECK(ICPState, (obj), TYPE_ICP) @@ -144,6 +144,8 @@ struct ICSIRQState { #define XICS_STATUS_SENT 0x2 #define XICS_STATUS_REJECTED 0x4 #define XICS_STATUS_MASKED_PENDING 0x8 +#define XICS_STATUS_PRESENTED 0x10 +#define XICS_STATUS_QUEUED 0x20 uint8_t status; /* (flags & XICS_FLAGS_IRQ_MASK) == 0 means the interrupt is not allocated */ #define XICS_FLAGS_IRQ_LSI 0x1 diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h index f3ffdceca4..83f474ffc3 100644 --- a/include/hw/virtio/virtio-gpu.h +++ b/include/hw/virtio/virtio-gpu.h @@ -169,6 +169,7 @@ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd); void virtio_gpu_virgl_fence_poll(VirtIOGPU *g); void virtio_gpu_virgl_reset(VirtIOGPU *g); +void virtio_gpu_gl_block(void *opaque, bool block); int virtio_gpu_virgl_init(VirtIOGPU *g); #endif diff --git a/include/qemu/bitops.h b/include/qemu/bitops.h index 1881284cb5..3f0926cf40 100644 --- a/include/qemu/bitops.h +++ b/include/qemu/bitops.h @@ -201,16 +201,6 @@ static inline unsigned long find_first_zero_bit(const unsigned long *addr, return find_next_zero_bit(addr, size, 0); } -static inline unsigned long hweight_long(unsigned long w) -{ - unsigned long count; - - for (count = 0; w; w >>= 1) { - count += w & 1; - } - return count; -} - /** * rol8 - rotate an 8-bit value left * @word: value to rotate diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h index f08d327aec..7d8505730c 100644 --- a/include/qemu/typedefs.h +++ b/include/qemu/typedefs.h @@ -97,5 +97,6 @@ typedef struct SSIBus SSIBus; typedef struct uWireSlave uWireSlave; typedef struct VirtIODevice VirtIODevice; typedef struct Visitor Visitor; +typedef struct node_info NodeInfo; #endif /* QEMU_TYPEDEFS_H */ diff --git a/include/qom/cpu.h b/include/qom/cpu.h index 5d10359c8f..55214ce131 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -258,6 +258,8 @@ typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data); struct qemu_work_item; +#define CPU_UNSET_NUMA_NODE_ID -1 + /** * CPUState: * @cpu_index: CPU index (informative). diff --git a/include/sysemu/numa.h b/include/sysemu/numa.h index 8f09dcf918..7ffde5b119 100644 --- a/include/sysemu/numa.h +++ b/include/sysemu/numa.h @@ -8,6 +8,7 @@ #include "hw/boards.h" extern int nb_numa_nodes; /* Number of NUMA nodes */ +extern bool have_numa_distance; struct numa_addr_range { ram_addr_t mem_start; @@ -15,24 +16,23 @@ struct numa_addr_range { QLIST_ENTRY(numa_addr_range) entry; }; -typedef struct node_info { +struct node_info { uint64_t node_mem; - unsigned long *node_cpu; struct HostMemoryBackend *node_memdev; bool present; QLIST_HEAD(, numa_addr_range) addr; /* List to store address ranges */ -} NodeInfo; + uint8_t distance[MAX_NODES]; +}; extern NodeInfo numa_info[MAX_NODES]; -void parse_numa_opts(MachineClass *mc); -void numa_post_machine_init(void); +void parse_numa_opts(MachineState *ms); void query_numa_node_mem(uint64_t node_mem[]); extern QemuOptsList qemu_numa_opts; void numa_set_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node); void numa_unset_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node); uint32_t numa_get_node(ram_addr_t addr, Error **errp); - -/* on success returns node index in numa_info, - * on failure returns nb_numa_nodes */ -int numa_get_node_for_cpu(int idx); +void numa_legacy_auto_assign_ram(MachineClass *mc, NodeInfo *nodes, + int nb_nodes, ram_addr_t size); +void numa_default_auto_assign_ram(MachineClass *mc, NodeInfo *nodes, + int nb_nodes, ram_addr_t size); #endif diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h index 15656b7c36..be9e22c955 100644 --- a/include/sysemu/sysemu.h +++ b/include/sysemu/sysemu.h @@ -166,6 +166,10 @@ extern int mem_prealloc; #define MAX_NODES 128 #define NUMA_NODE_UNASSIGNED MAX_NODES +#define NUMA_DISTANCE_MIN 10 +#define NUMA_DISTANCE_DEFAULT 20 +#define NUMA_DISTANCE_MAX 254 +#define NUMA_DISTANCE_UNREACHABLE 255 #define MAX_OPTION_ROMS 16 typedef struct QEMUOptionRom { diff --git a/include/ui/console.h b/include/ui/console.h index d759338816..7262bef6d3 100644 --- a/include/ui/console.h +++ b/include/ui/console.h @@ -527,4 +527,7 @@ static inline void early_gtk_display_init(int opengl) } #endif +/* egl-headless.c */ +void egl_headless_init(void); + #endif diff --git a/include/ui/egl-helpers.h b/include/ui/egl-helpers.h index 88a13e827b..c785d60e91 100644 --- a/include/ui/egl-helpers.h +++ b/include/ui/egl-helpers.h @@ -21,7 +21,8 @@ int egl_get_fd_for_texture(uint32_t tex_id, EGLint *stride, EGLint *fourcc); EGLSurface qemu_egl_init_surface_x11(EGLContext ectx, Window win); -int qemu_egl_init_dpy(EGLNativeDisplayType dpy, bool gles, bool debug); +int qemu_egl_init_dpy_x11(EGLNativeDisplayType dpy); +int qemu_egl_init_dpy_mesa(EGLNativeDisplayType dpy); EGLContext qemu_egl_init_ctx(void); #endif /* EGL_HELPERS_H */ diff --git a/include/ui/input.h b/include/ui/input.h index d06a12dd4c..3cfd0f3363 100644 --- a/include/ui/input.h +++ b/include/ui/input.h @@ -8,7 +8,8 @@ #define INPUT_EVENT_MASK_REL (1<<INPUT_EVENT_KIND_REL) #define INPUT_EVENT_MASK_ABS (1<<INPUT_EVENT_KIND_ABS) -#define INPUT_EVENT_ABS_SIZE 0x8000 +#define INPUT_EVENT_ABS_MIN 0x0000 +#define INPUT_EVENT_ABS_MAX 0x7FFF typedef struct QemuInputHandler QemuInputHandler; typedef struct QemuInputHandlerState QemuInputHandlerState; @@ -54,12 +55,14 @@ void qemu_input_update_buttons(QemuConsole *src, uint32_t *button_map, uint32_t button_old, uint32_t button_new); bool qemu_input_is_absolute(void); -int qemu_input_scale_axis(int value, int size_in, int size_out); +int qemu_input_scale_axis(int value, + int min_in, int max_in, + int min_out, int max_out); InputEvent *qemu_input_event_new_move(InputEventKind kind, InputAxis axis, int value); void qemu_input_queue_rel(QemuConsole *src, InputAxis axis, int value); -void qemu_input_queue_abs(QemuConsole *src, InputAxis axis, - int value, int size); +void qemu_input_queue_abs(QemuConsole *src, InputAxis axis, int value, + int min_in, int max_in); void qemu_input_check_mode_change(void); void qemu_add_mouse_mode_change_notifier(Notifier *notify); diff --git a/numa.c b/numa.c index 39b743bd94..ca731455e9 100644 --- a/numa.c +++ b/numa.c @@ -51,6 +51,7 @@ static int max_numa_nodeid; /* Highest specified NUMA node ID, plus one. * For all nodes, nodeid < max_numa_nodeid */ int nb_numa_nodes; +bool have_numa_distance; NodeInfo numa_info[MAX_NODES]; void numa_set_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node) @@ -140,10 +141,12 @@ uint32_t numa_get_node(ram_addr_t addr, Error **errp) return -1; } -static void numa_node_parse(NumaNodeOptions *node, QemuOpts *opts, Error **errp) +static void parse_numa_node(MachineState *ms, NumaNodeOptions *node, + QemuOpts *opts, Error **errp) { uint16_t nodenr; uint16List *cpus = NULL; + MachineClass *mc = MACHINE_GET_CLASS(ms); if (node->has_nodeid) { nodenr = node->nodeid; @@ -162,7 +165,12 @@ static void numa_node_parse(NumaNodeOptions *node, QemuOpts *opts, Error **errp) return; } + if (!mc->cpu_index_to_instance_props) { + error_report("NUMA is not supported by this machine-type"); + exit(1); + } for (cpus = node->cpus; cpus; cpus = cpus->next) { + CpuInstanceProperties props; if (cpus->value >= max_cpus) { error_setg(errp, "CPU index (%" PRIu16 ")" @@ -170,7 +178,10 @@ static void numa_node_parse(NumaNodeOptions *node, QemuOpts *opts, Error **errp) cpus->value, max_cpus); return; } - bitmap_set(numa_info[nodenr].node_cpu, cpus->value, 1); + props = mc->cpu_index_to_instance_props(ms, cpus->value); + props.node_id = nodenr; + props.has_node_id = true; + machine_set_cpu_numa_node(ms, &props, &error_fatal); } if (node->has_mem && node->has_memdev) { @@ -212,9 +223,47 @@ static void numa_node_parse(NumaNodeOptions *node, QemuOpts *opts, Error **errp) max_numa_nodeid = MAX(max_numa_nodeid, nodenr + 1); } +static void parse_numa_distance(NumaDistOptions *dist, Error **errp) +{ + uint16_t src = dist->src; + uint16_t dst = dist->dst; + uint8_t val = dist->val; + + if (src >= MAX_NODES || dst >= MAX_NODES) { + error_setg(errp, + "Invalid node %" PRIu16 + ", max possible could be %" PRIu16, + MAX(src, dst), MAX_NODES); + return; + } + + if (!numa_info[src].present || !numa_info[dst].present) { + error_setg(errp, "Source/Destination NUMA node is missing. " + "Please use '-numa node' option to declare it first."); + return; + } + + if (val < NUMA_DISTANCE_MIN) { + error_setg(errp, "NUMA distance (%" PRIu8 ") is invalid, " + "it shouldn't be less than %d.", + val, NUMA_DISTANCE_MIN); + return; + } + + if (src == dst && val != NUMA_DISTANCE_MIN) { + error_setg(errp, "Local distance of node %d should be %d.", + src, NUMA_DISTANCE_MIN); + return; + } + + numa_info[src].distance[dst] = val; + have_numa_distance = true; +} + static int parse_numa(void *opaque, QemuOpts *opts, Error **errp) { NumaOptions *object = NULL; + MachineState *ms = opaque; Error *err = NULL; { @@ -229,12 +278,33 @@ static int parse_numa(void *opaque, QemuOpts *opts, Error **errp) switch (object->type) { case NUMA_OPTIONS_TYPE_NODE: - numa_node_parse(&object->u.node, opts, &err); + parse_numa_node(ms, &object->u.node, opts, &err); if (err) { goto end; } nb_numa_nodes++; break; + case NUMA_OPTIONS_TYPE_DIST: + parse_numa_distance(&object->u.dist, &err); + if (err) { + goto end; + } + break; + case NUMA_OPTIONS_TYPE_CPU: + if (!object->u.cpu.has_node_id) { + error_setg(&err, "Missing mandatory node-id property"); + goto end; + } + if (!numa_info[object->u.cpu.node_id].present) { + error_setg(&err, "Invalid node-id=%" PRId64 ", NUMA node must be " + "defined with -numa node,nodeid=ID before it's used with " + "-numa cpu,node-id=ID", object->u.cpu.node_id); + goto end; + } + + machine_set_cpu_numa_node(ms, qapi_NumaCpuOptions_base(&object->u.cpu), + &err); + break; default: abort(); } @@ -249,60 +319,118 @@ end: return 0; } -static char *enumerate_cpus(unsigned long *cpus, int max_cpus) +/* If all node pair distances are symmetric, then only distances + * in one direction are enough. If there is even one asymmetric + * pair, though, then all distances must be provided. The + * distance from a node to itself is always NUMA_DISTANCE_MIN, + * so providing it is never necessary. + */ +static void validate_numa_distance(void) { - int cpu; - bool first = true; - GString *s = g_string_new(NULL); + int src, dst; + bool is_asymmetrical = false; + + for (src = 0; src < nb_numa_nodes; src++) { + for (dst = src; dst < nb_numa_nodes; dst++) { + if (numa_info[src].distance[dst] == 0 && + numa_info[dst].distance[src] == 0) { + if (src != dst) { + error_report("The distance between node %d and %d is " + "missing, at least one distance value " + "between each nodes should be provided.", + src, dst); + exit(EXIT_FAILURE); + } + } - for (cpu = find_first_bit(cpus, max_cpus); - cpu < max_cpus; - cpu = find_next_bit(cpus, max_cpus, cpu + 1)) { - g_string_append_printf(s, "%s%d", first ? "" : " ", cpu); - first = false; + if (numa_info[src].distance[dst] != 0 && + numa_info[dst].distance[src] != 0 && + numa_info[src].distance[dst] != + numa_info[dst].distance[src]) { + is_asymmetrical = true; + } + } + } + + if (is_asymmetrical) { + for (src = 0; src < nb_numa_nodes; src++) { + for (dst = 0; dst < nb_numa_nodes; dst++) { + if (src != dst && numa_info[src].distance[dst] == 0) { + error_report("At least one asymmetrical pair of " + "distances is given, please provide distances " + "for both directions of all node pairs."); + exit(EXIT_FAILURE); + } + } + } } - return g_string_free(s, FALSE); } -static void validate_numa_cpus(void) +static void complete_init_numa_distance(void) { - int i; - unsigned long *seen_cpus = bitmap_new(max_cpus); + int src, dst; - for (i = 0; i < nb_numa_nodes; i++) { - if (bitmap_intersects(seen_cpus, numa_info[i].node_cpu, max_cpus)) { - bitmap_and(seen_cpus, seen_cpus, - numa_info[i].node_cpu, max_cpus); - error_report("CPU(s) present in multiple NUMA nodes: %s", - enumerate_cpus(seen_cpus, max_cpus)); - g_free(seen_cpus); - exit(EXIT_FAILURE); + /* Fixup NUMA distance by symmetric policy because if it is an + * asymmetric distance table, it should be a complete table and + * there would not be any missing distance except local node, which + * is verified by validate_numa_distance above. + */ + for (src = 0; src < nb_numa_nodes; src++) { + for (dst = 0; dst < nb_numa_nodes; dst++) { + if (numa_info[src].distance[dst] == 0) { + if (src == dst) { + numa_info[src].distance[dst] = NUMA_DISTANCE_MIN; + } else { + numa_info[src].distance[dst] = numa_info[dst].distance[src]; + } + } } - bitmap_or(seen_cpus, seen_cpus, - numa_info[i].node_cpu, max_cpus); } +} - if (!bitmap_full(seen_cpus, max_cpus)) { - char *msg; - bitmap_complement(seen_cpus, seen_cpus, max_cpus); - msg = enumerate_cpus(seen_cpus, max_cpus); - error_report("warning: CPU(s) not present in any NUMA nodes: %s", msg); - error_report("warning: All CPU(s) up to maxcpus should be described " - "in NUMA config"); - g_free(msg); +void numa_legacy_auto_assign_ram(MachineClass *mc, NodeInfo *nodes, + int nb_nodes, ram_addr_t size) +{ + int i; + uint64_t usedmem = 0; + + /* Align each node according to the alignment + * requirements of the machine class + */ + + for (i = 0; i < nb_nodes - 1; i++) { + nodes[i].node_mem = (size / nb_nodes) & + ~((1 << mc->numa_mem_align_shift) - 1); + usedmem += nodes[i].node_mem; } - g_free(seen_cpus); + nodes[i].node_mem = size - usedmem; } -void parse_numa_opts(MachineClass *mc) +void numa_default_auto_assign_ram(MachineClass *mc, NodeInfo *nodes, + int nb_nodes, ram_addr_t size) { int i; + uint64_t usedmem = 0, node_mem; + uint64_t granularity = size / nb_nodes; + uint64_t propagate = 0; + + for (i = 0; i < nb_nodes - 1; i++) { + node_mem = (granularity + propagate) & + ~((1 << mc->numa_mem_align_shift) - 1); + propagate = granularity + propagate - node_mem; + nodes[i].node_mem = node_mem; + usedmem += node_mem; + } + nodes[i].node_mem = size - usedmem; +} - for (i = 0; i < MAX_NODES; i++) { - numa_info[i].node_cpu = bitmap_new(max_cpus); - } +void parse_numa_opts(MachineState *ms) +{ + int i; + const CPUArchIdList *possible_cpus; + MachineClass *mc = MACHINE_GET_CLASS(ms); - if (qemu_opts_foreach(qemu_find_opts("numa"), parse_numa, NULL, NULL)) { + if (qemu_opts_foreach(qemu_find_opts("numa"), parse_numa, ms, NULL)) { exit(1); } @@ -336,17 +464,8 @@ void parse_numa_opts(MachineClass *mc) } } if (i == nb_numa_nodes) { - uint64_t usedmem = 0; - - /* Align each node according to the alignment - * requirements of the machine class - */ - for (i = 0; i < nb_numa_nodes - 1; i++) { - numa_info[i].node_mem = (ram_size / nb_numa_nodes) & - ~((1 << mc->numa_mem_align_shift) - 1); - usedmem += numa_info[i].node_mem; - } - numa_info[i].node_mem = ram_size - usedmem; + assert(mc->numa_auto_assign_ram); + mc->numa_auto_assign_ram(mc, numa_info, nb_numa_nodes, ram_size); } numa_total = 0; @@ -366,47 +485,52 @@ void parse_numa_opts(MachineClass *mc) numa_set_mem_ranges(); - for (i = 0; i < nb_numa_nodes; i++) { - if (!bitmap_empty(numa_info[i].node_cpu, max_cpus)) { + /* assign CPUs to nodes using board provided default mapping */ + if (!mc->cpu_index_to_instance_props || !mc->possible_cpu_arch_ids) { + error_report("default CPUs to NUMA node mapping isn't supported"); + exit(1); + } + + possible_cpus = mc->possible_cpu_arch_ids(ms); + for (i = 0; i < possible_cpus->len; i++) { + if (possible_cpus->cpus[i].props.has_node_id) { break; } } - /* Historically VCPUs were assigned in round-robin order to NUMA - * nodes. However it causes issues with guest not handling it nice - * in case where cores/threads from a multicore CPU appear on - * different nodes. So allow boards to override default distribution - * rule grouping VCPUs by socket so that VCPUs from the same socket - * would be on the same node. - */ - if (i == nb_numa_nodes) { + + /* no CPUs are assigned to NUMA nodes */ + if (i == possible_cpus->len) { for (i = 0; i < max_cpus; i++) { - unsigned node_id = i % nb_numa_nodes; - if (mc->cpu_index_to_socket_id) { - node_id = mc->cpu_index_to_socket_id(i) % nb_numa_nodes; - } + CpuInstanceProperties props; + /* fetch default mapping from board and enable it */ + props = mc->cpu_index_to_instance_props(ms, i); + props.has_node_id = true; - set_bit(i, numa_info[node_id].node_cpu); + machine_set_cpu_numa_node(ms, &props, &error_fatal); } } - validate_numa_cpus(); - } else { - numa_set_mem_node_id(0, ram_size, 0); - } -} - -void numa_post_machine_init(void) -{ - CPUState *cpu; - int i; + /* QEMU needs at least all unique node pair distances to build + * the whole NUMA distance table. QEMU treats the distance table + * as symmetric by default, i.e. distance A->B == distance B->A. + * Thus, QEMU is able to complete the distance table + * initialization even though only distance A->B is provided and + * distance B->A is not. QEMU knows the distance of a node to + * itself is always 10, so A->A distances may be omitted. When + * the distances of two nodes of a pair differ, i.e. distance + * A->B != distance B->A, then that means the distance table is + * asymmetric. In this case, the distances for both directions + * of all node pairs are required. + */ + if (have_numa_distance) { + /* Validate enough NUMA distance information was provided. */ + validate_numa_distance(); - CPU_FOREACH(cpu) { - for (i = 0; i < nb_numa_nodes; i++) { - assert(cpu->cpu_index < max_cpus); - if (test_bit(cpu->cpu_index, numa_info[i].node_cpu)) { - cpu->numa_node = i; - } + /* Validation succeeded, now fill in any missing distances. */ + complete_init_numa_distance(); } + } else { + numa_set_mem_node_id(0, ram_size, 0); } } @@ -560,20 +684,6 @@ MemdevList *qmp_query_memdev(Error **errp) return list; } -int numa_get_node_for_cpu(int idx) -{ - int i; - - assert(idx < max_cpus); - - for (i = 0; i < nb_numa_nodes; i++) { - if (test_bit(idx, numa_info[i].node_cpu)) { - break; - } - } - return i; -} - void ram_block_notifier_add(RAMBlockNotifier *n) { QLIST_INSERT_HEAD(&ram_list.ramblock_notifiers, n, next); diff --git a/pc-bios/README b/pc-bios/README index dcead369bf..ebc699d322 100644 --- a/pc-bios/README +++ b/pc-bios/README @@ -47,3 +47,6 @@ (OpenPower Abstraction Layer) firmware for OpenPOWER systems. It can run an hypervisor OS or simply a host OS on the "baremetal" platform, also known as the PowerNV (Non-Virtualized) platform. + +- QemuMacDrivers (https://github.com/ozbenh/QemuMacDrivers) is a project to + provide virtualised drivers for PPC MacOS guests. diff --git a/pc-bios/qemu_vga.ndrv b/pc-bios/qemu_vga.ndrv new file mode 100644 index 0000000000..6e02f74d61 --- /dev/null +++ b/pc-bios/qemu_vga.ndrv Binary files differdiff --git a/qapi-schema.json b/qapi-schema.json index 5728b7f363..80603cfc51 100644 --- a/qapi-schema.json +++ b/qapi-schema.json @@ -1325,6 +1325,9 @@ # # @thread_id: ID of the underlying host thread # +# @props: properties describing to which node/socket/core/thread +# virtual CPU belongs to, provided if supported by board (since 2.10) +# # @arch: architecture of the cpu, which determines which additional fields # will be listed (since 2.6) # @@ -1335,7 +1338,8 @@ ## { 'union': 'CpuInfo', 'base': {'CPU': 'int', 'current': 'bool', 'halted': 'bool', - 'qom_path': 'str', 'thread_id': 'int', 'arch': 'CpuInfoArch' }, + 'qom_path': 'str', 'thread_id': 'int', + '*props': 'CpuInstanceProperties', 'arch': 'CpuInfoArch' }, 'discriminator': 'arch', 'data': { 'x86': 'CpuInfoX86', 'sparc': 'CpuInfoSPARC', @@ -5682,10 +5686,16 @@ ## # @NumaOptionsType: # +# @node: NUMA nodes configuration +# +# @dist: NUMA distance configuration (since 2.10) +# +# @cpu: property based CPU(s) to node mapping (Since: 2.10) +# # Since: 2.1 ## { 'enum': 'NumaOptionsType', - 'data': [ 'node' ] } + 'data': [ 'node', 'dist', 'cpu' ] } ## # @NumaOptions: @@ -5698,7 +5708,9 @@ 'base': { 'type': 'NumaOptionsType' }, 'discriminator': 'type', 'data': { - 'node': 'NumaNodeOptions' }} + 'node': 'NumaNodeOptions', + 'dist': 'NumaDistOptions', + 'cpu': 'NumaCpuOptions' }} ## # @NumaNodeOptions: @@ -5727,6 +5739,41 @@ '*memdev': 'str' }} ## +# @NumaDistOptions: +# +# Set the distance between 2 NUMA nodes. +# +# @src: source NUMA node. +# +# @dst: destination NUMA node. +# +# @val: NUMA distance from source node to destination node. +# When a node is unreachable from another node, set the distance +# between them to 255. +# +# Since: 2.10 +## +{ 'struct': 'NumaDistOptions', + 'data': { + 'src': 'uint16', + 'dst': 'uint16', + 'val': 'uint8' }} + +## +# @NumaCpuOptions: +# +# Option "-numa cpu" overrides default cpu to node mapping. +# It accepts the same set of cpu properties as returned by +# query-hotpluggable-cpus[].props, where node-id could be used to +# override default node mapping. +# +# Since: 2.10 +## +{ 'struct': 'NumaCpuOptions', + 'base': 'CpuInstanceProperties', + 'data' : {} } + +## # @HostMemPolicy: # # Host memory policy types diff --git a/qemu-doc.texi b/qemu-doc.texi index 3dd9eac4f3..de0cc30790 100644 --- a/qemu-doc.texi +++ b/qemu-doc.texi @@ -182,7 +182,7 @@ Gravis Ultrasound GF1 sound card @item CS4231A compatible sound card @item -PCI UHCI USB controller and a virtual USB hub. +PCI UHCI, OHCI, EHCI or XHCI USB controller and a virtual USB-1.1 hub. @end itemize SMP is supported with up to 255 CPUs. @@ -1357,10 +1357,10 @@ monitor (@pxref{pcsys_keys}). @node pcsys_usb @section USB emulation -QEMU emulates a PCI UHCI USB controller. You can virtually plug -virtual USB devices or real host USB devices (experimental, works only -on Linux hosts). QEMU will automatically create and connect virtual USB hubs -as necessary to connect multiple USB devices. +QEMU can emulate a PCI UHCI, OHCI, EHCI or XHCI USB controller. You can +plug virtual USB devices or real host USB devices (only works with certain +host operating systems). QEMU will automatically create and connect virtual +USB hubs as necessary to connect multiple USB devices. @menu * usb_devices:: @@ -1369,53 +1369,64 @@ as necessary to connect multiple USB devices. @node usb_devices @subsection Connecting USB devices -USB devices can be connected with the @option{-usbdevice} commandline option -or the @code{usb_add} monitor command. Available devices are: +USB devices can be connected with the @option{-device usb-...} command line +option or the @code{device_add} monitor command. Available devices are: @table @code -@item mouse +@item usb-mouse Virtual Mouse. This will override the PS/2 mouse emulation when activated. -@item tablet +@item usb-tablet Pointer device that uses absolute coordinates (like a touchscreen). This means QEMU is able to report the mouse position without having to grab the mouse. Also overrides the PS/2 mouse emulation when activated. -@item disk:@var{file} -Mass storage device based on @var{file} (@pxref{disk_images}) -@item host:@var{bus.addr} -Pass through the host device identified by @var{bus.addr} -(Linux only) -@item host:@var{vendor_id:product_id} -Pass through the host device identified by @var{vendor_id:product_id} -(Linux only) -@item wacom-tablet +@item usb-storage,drive=@var{drive_id} +Mass storage device backed by @var{drive_id} (@pxref{disk_images}) +@item usb-uas +USB attached SCSI device, see +@url{http://git.qemu.org/?p=qemu.git;a=blob_plain;f=docs/usb-storage.txt,usb-storage.txt} +for details +@item usb-bot +Bulk-only transport storage device, see +@url{http://git.qemu.org/?p=qemu.git;a=blob_plain;f=docs/usb-storage.txt,usb-storage.txt} +for details here, too +@item usb-mtp,x-root=@var{dir} +Media transfer protocol device, using @var{dir} as root of the file tree +that is presented to the guest. +@item usb-host,hostbus=@var{bus},hostaddr=@var{addr} +Pass through the host device identified by @var{bus} and @var{addr} +@item usb-host,vendorid=@var{vendor},productid=@var{product} +Pass through the host device identified by @var{vendor} and @var{product} ID +@item usb-wacom-tablet Virtual Wacom PenPartner tablet. This device is similar to the @code{tablet} above but it can be used with the tslib library because in addition to touch coordinates it reports touch pressure. -@item keyboard +@item usb-kbd Standard USB keyboard. Will override the PS/2 keyboard (if present). -@item serial:[vendorid=@var{vendor_id}][,product_id=@var{product_id}]:@var{dev} +@item usb-serial,chardev=@var{id} Serial converter. This emulates an FTDI FT232BM chip connected to host character -device @var{dev}. The available character devices are the same as for the -@code{-serial} option. The @code{vendorid} and @code{productid} options can be -used to override the default 0403:6001. For instance, -@example -usb_add serial:productid=FA00:tcp:192.168.0.2:4444 -@end example -will connect to tcp port 4444 of ip 192.168.0.2, and plug that to the virtual -serial converter, faking a Matrix Orbital LCD Display (USB ID 0403:FA00). -@item braille +device @var{id}. +@item usb-braille,chardev=@var{id} Braille device. This will use BrlAPI to display the braille output on a real -or fake device. -@item net:@var{options} -Network adapter that supports CDC ethernet and RNDIS protocols. @var{options} -specifies NIC options as with @code{-net nic,}@var{options} (see description). +or fake device referenced by @var{id}. +@item usb-net[,netdev=@var{id}] +Network adapter that supports CDC ethernet and RNDIS protocols. @var{id} +specifies a netdev defined with @code{-netdev @dots{},id=@var{id}}. For instance, user-mode networking can be used with @example -qemu-system-i386 [...OPTIONS...] -net user,vlan=0 -usbdevice net:vlan=0 -@end example -Currently this cannot be used in machines that support PCI NICs. -@item bt[:@var{hci-type}] -Bluetooth dongle whose type is specified in the same format as with +qemu-system-i386 [...] -netdev user,id=net0 -device usb-net,netdev=net0 +@end example +@item usb-ccid +Smartcard reader device +@item usb-audio +USB audio device +@item usb-bt-dongle +Bluetooth dongle for the transport layer of HCI. It is connected to HCI +scatternet 0 by default (corresponds to @code{-bt hci,vlan=0}). +Note that the syntax for the @code{-device usb-bt-dongle} option is not as +useful yet as it was with the legacy @code{-usbdevice} option. So to +configure an USB bluetooth device, you might need to use +"@code{-usbdevice bt}[:@var{hci-type}]" instead. This configures a +bluetooth dongle whose type is specified in the same format as with the @option{-bt hci} option, @pxref{bt-hcis,,allowed HCI types}. If no type is given, the HCI logic corresponds to @code{-bt hci,vlan=0}. This USB device implements the USB Transport Layer of HCI. Example @@ -1460,11 +1471,11 @@ hubs, it won't work). @item Add the device in QEMU by using: @example -usb_add host:1234:5678 +device_add usb-host,vendorid=0x1234,productid=0x5678 @end example -Normally the guest OS should report that a new USB device is -plugged. You can use the option @option{-usbdevice} to do the same. +Normally the guest OS should report that a new USB device is plugged. +You can use the option @option{-device usb-host,...} to do the same. @item Now you can try to use the host USB device in QEMU. diff --git a/qemu-options.hx b/qemu-options.hx index f806af9f2d..f07a310eb1 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -139,13 +139,18 @@ ETEXI DEF("numa", HAS_ARG, QEMU_OPTION_numa, "-numa node[,mem=size][,cpus=firstcpu[-lastcpu]][,nodeid=node]\n" - "-numa node[,memdev=id][,cpus=firstcpu[-lastcpu]][,nodeid=node]\n", QEMU_ARCH_ALL) + "-numa node[,memdev=id][,cpus=firstcpu[-lastcpu]][,nodeid=node]\n" + "-numa dist,src=source,dst=destination,val=distance\n", QEMU_ARCH_ALL) STEXI @item -numa node[,mem=@var{size}][,cpus=@var{firstcpu}[-@var{lastcpu}]][,nodeid=@var{node}] @itemx -numa node[,memdev=@var{id}][,cpus=@var{firstcpu}[-@var{lastcpu}]][,nodeid=@var{node}] +@itemx -numa dist,src=@var{source},dst=@var{destination},val=@var{distance} +@itemx -numa cpu,node-id=@var{node}[,socket-id=@var{x}][,core-id=@var{y}][,thread-id=@var{z}] @findex -numa Define a NUMA node and assign RAM and VCPUs to it. +Set the NUMA distance from a source node to a destination node. +Legacy VCPU assignment uses @samp{cpus} option where @var{firstcpu} and @var{lastcpu} are CPU indexes. Each @samp{cpus} option represent a contiguous range of CPU indexes (or a single VCPU if @var{lastcpu} is omitted). A non-contiguous @@ -159,6 +164,24 @@ a NUMA node: -numa node,cpus=0-2,cpus=5 @end example +@samp{cpu} option is a new alternative to @samp{cpus} option +which uses @samp{socket-id|core-id|thread-id} properties to assign +CPU objects to a @var{node} using topology layout properties of CPU. +The set of properties is machine specific, and depends on used +machine type/@samp{smp} options. It could be queried with +@samp{hotpluggable-cpus} monitor command. +@samp{node-id} property specifies @var{node} to which CPU object +will be assigned, it's required for @var{node} to be declared +with @samp{node} option before it's used with @samp{cpu} option. + +For example: +@example +-M pc \ +-smp 1,sockets=2,maxcpus=2 \ +-numa node,nodeid=0 -numa node,nodeid=1 \ +-numa cpu,node-id=0,socket-id=0 -numa cpu,node-id=1,socket-id=1 +@end example + @samp{mem} assigns a given RAM amount to a node. @samp{memdev} assigns RAM from a given memory backend device to a node. If @samp{mem} and @samp{memdev} are omitted in all nodes, RAM is @@ -167,6 +190,17 @@ split equally between them. @samp{mem} and @samp{memdev} are mutually exclusive. Furthermore, if one node uses @samp{memdev}, all of them have to use it. +@var{source} and @var{destination} are NUMA node IDs. +@var{distance} is the NUMA distance from @var{source} to @var{destination}. +The distance from a node to itself is always 10. If any pair of nodes is +given a distance, then all pairs must be given distances. Although, when +distances are only given in one direction for each pair of nodes, then +the distances in the opposite directions are assumed to be the same. If, +however, an asymmetrical pair of distances is given for even one node +pair, then all node pairs must be provided distance values for both +directions, even when they are symmetrical. When a node is unreachable +from another node, set the pair's distance to 255. + Note that the -@option{numa} option doesn't allocate any of the specified resources, it just assigns existing resources to NUMA nodes. This means that one still has to use the @option{-m}, diff --git a/roms/QemuMacDrivers b/roms/QemuMacDrivers new file mode 160000 +Subproject d4e7d7ac663fcb55f1b93575445fcbca372f17a diff --git a/target/arm/cpu.c b/target/arm/cpu.c index b357aee778..c185eb19ac 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -458,6 +458,13 @@ static void arm_disas_set_info(CPUState *cpu, disassemble_info *info) } } +uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz) +{ + uint32_t Aff1 = idx / clustersz; + uint32_t Aff0 = idx % clustersz; + return (Aff1 << ARM_AFF1_SHIFT) | Aff0; +} + static void arm_cpu_initfn(Object *obj) { CPUState *cs = CPU(obj); @@ -709,9 +716,8 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) * so these bits always RAZ. */ if (cpu->mp_affinity == ARM64_AFFINITY_INVALID) { - uint32_t Aff1 = cs->cpu_index / ARM_DEFAULT_CPUS_PER_CLUSTER; - uint32_t Aff0 = cs->cpu_index % ARM_DEFAULT_CPUS_PER_CLUSTER; - cpu->mp_affinity = (Aff1 << ARM_AFF1_SHIFT) | Aff0; + cpu->mp_affinity = arm_cpu_mp_affinity(cs->cpu_index, + ARM_DEFAULT_CPUS_PER_CLUSTER); } if (cpu->reset_hivecs) { @@ -1567,6 +1573,7 @@ static Property arm_cpu_properties[] = { DEFINE_PROP_UINT32("midr", ARMCPU, midr, 0), DEFINE_PROP_UINT64("mp-affinity", ARMCPU, mp_affinity, ARM64_AFFINITY_INVALID), + DEFINE_PROP_INT32("node-id", CPUState, numa_node, CPU_UNSET_NUMA_NODE_ID), DEFINE_PROP_END_OF_LIST() }; diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 1055bfef3d..048faed9b9 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -710,6 +710,8 @@ static inline ARMCPU *arm_env_get_cpu(CPUARMState *env) return container_of(env, ARMCPU, env); } +uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz); + #define ENV_GET_CPU(e) CPU(arm_env_get_cpu(e)) #define ENV_OFFSET offsetof(ARMCPU, env) diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 7e87031fad..5e768404a1 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -2635,28 +2635,23 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, X86CPU *cpu = x86_env_get_cpu(env); CPUState *cs = CPU(cpu); uint32_t pkg_offset; + uint32_t limit; - /* test if maximum index reached */ - if (index & 0x80000000) { - if (index > env->cpuid_xlevel) { - if (env->cpuid_xlevel2 > 0) { - /* Handle the Centaur's CPUID instruction. */ - if (index > env->cpuid_xlevel2) { - index = env->cpuid_xlevel2; - } else if (index < 0xC0000000) { - index = env->cpuid_xlevel; - } - } else { - /* Intel documentation states that invalid EAX input will - * return the same information as EAX=cpuid_level - * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) - */ - index = env->cpuid_level; - } - } + /* Calculate & apply limits for different index ranges */ + if (index >= 0xC0000000) { + limit = env->cpuid_xlevel2; + } else if (index >= 0x80000000) { + limit = env->cpuid_xlevel; } else { - if (index > env->cpuid_level) - index = env->cpuid_level; + limit = env->cpuid_level; + } + + if (index > limit) { + /* Intel documentation states that invalid EAX input will + * return the same information as EAX=cpuid_level + * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) + */ + index = env->cpuid_level; } switch(index) { @@ -3991,6 +3986,7 @@ static Property x86_cpu_properties[] = { DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1), DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1), #endif + DEFINE_PROP_INT32("node-id", CPUState, numa_node, CPU_UNSET_NUMA_NODE_ID), DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false), { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks }, DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false), diff --git a/target/i386/machine.c b/target/i386/machine.c index 78ae2f986b..3cb272948e 100644 --- a/target/i386/machine.c +++ b/target/i386/machine.c @@ -136,178 +136,48 @@ static const VMStateDescription vmstate_mtrr_var = { #define VMSTATE_MTRR_VARS(_field, _state, _n, _v) \ VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar) -static int put_fpreg_error(QEMUFile *f, void *opaque, size_t size, - VMStateField *field, QJSON *vmdesc) -{ - fprintf(stderr, "call put_fpreg() with invalid arguments\n"); - exit(0); - return 0; -} - -/* XXX: add that in a FPU generic layer */ -union x86_longdouble { - uint64_t mant; - uint16_t exp; -}; +typedef struct x86_FPReg_tmp { + FPReg *parent; + uint64_t tmp_mant; + uint16_t tmp_exp; +} x86_FPReg_tmp; -#define MANTD1(fp) (fp & ((1LL << 52) - 1)) -#define EXPBIAS1 1023 -#define EXPD1(fp) ((fp >> 52) & 0x7FF) -#define SIGND1(fp) ((fp >> 32) & 0x80000000) - -static void fp64_to_fp80(union x86_longdouble *p, uint64_t temp) +static void fpreg_pre_save(void *opaque) { - int e; - /* mantissa */ - p->mant = (MANTD1(temp) << 11) | (1LL << 63); - /* exponent + sign */ - e = EXPD1(temp) - EXPBIAS1 + 16383; - e |= SIGND1(temp) >> 16; - p->exp = e; -} + x86_FPReg_tmp *tmp = opaque; -static int get_fpreg(QEMUFile *f, void *opaque, size_t size, - VMStateField *field) -{ - FPReg *fp_reg = opaque; - uint64_t mant; - uint16_t exp; - - qemu_get_be64s(f, &mant); - qemu_get_be16s(f, &exp); - fp_reg->d = cpu_set_fp80(mant, exp); - return 0; -} - -static int put_fpreg(QEMUFile *f, void *opaque, size_t size, - VMStateField *field, QJSON *vmdesc) -{ - FPReg *fp_reg = opaque; - uint64_t mant; - uint16_t exp; /* we save the real CPU data (in case of MMX usage only 'mant' contains the MMX register */ - cpu_get_fp80(&mant, &exp, fp_reg->d); - qemu_put_be64s(f, &mant); - qemu_put_be16s(f, &exp); - - return 0; + cpu_get_fp80(&tmp->tmp_mant, &tmp->tmp_exp, tmp->parent->d); } -static const VMStateInfo vmstate_fpreg = { - .name = "fpreg", - .get = get_fpreg, - .put = put_fpreg, -}; - -static int get_fpreg_1_mmx(QEMUFile *f, void *opaque, size_t size, - VMStateField *field) +static int fpreg_post_load(void *opaque, int version) { - union x86_longdouble *p = opaque; - uint64_t mant; + x86_FPReg_tmp *tmp = opaque; - qemu_get_be64s(f, &mant); - p->mant = mant; - p->exp = 0xffff; + tmp->parent->d = cpu_set_fp80(tmp->tmp_mant, tmp->tmp_exp); return 0; } -static const VMStateInfo vmstate_fpreg_1_mmx = { - .name = "fpreg_1_mmx", - .get = get_fpreg_1_mmx, - .put = put_fpreg_error, -}; - -static int get_fpreg_1_no_mmx(QEMUFile *f, void *opaque, size_t size, - VMStateField *field) -{ - union x86_longdouble *p = opaque; - uint64_t mant; - - qemu_get_be64s(f, &mant); - fp64_to_fp80(p, mant); - return 0; -} - -static const VMStateInfo vmstate_fpreg_1_no_mmx = { - .name = "fpreg_1_no_mmx", - .get = get_fpreg_1_no_mmx, - .put = put_fpreg_error, +static const VMStateDescription vmstate_fpreg_tmp = { + .name = "fpreg_tmp", + .post_load = fpreg_post_load, + .pre_save = fpreg_pre_save, + .fields = (VMStateField[]) { + VMSTATE_UINT64(tmp_mant, x86_FPReg_tmp), + VMSTATE_UINT16(tmp_exp, x86_FPReg_tmp), + VMSTATE_END_OF_LIST() + } }; -static bool fpregs_is_0(void *opaque, int version_id) -{ - X86CPU *cpu = opaque; - CPUX86State *env = &cpu->env; - - return (env->fpregs_format_vmstate == 0); -} - -static bool fpregs_is_1_mmx(void *opaque, int version_id) -{ - X86CPU *cpu = opaque; - CPUX86State *env = &cpu->env; - int guess_mmx; - - guess_mmx = ((env->fptag_vmstate == 0xff) && - (env->fpus_vmstate & 0x3800) == 0); - return (guess_mmx && (env->fpregs_format_vmstate == 1)); -} - -static bool fpregs_is_1_no_mmx(void *opaque, int version_id) -{ - X86CPU *cpu = opaque; - CPUX86State *env = &cpu->env; - int guess_mmx; - - guess_mmx = ((env->fptag_vmstate == 0xff) && - (env->fpus_vmstate & 0x3800) == 0); - return (!guess_mmx && (env->fpregs_format_vmstate == 1)); -} - -#define VMSTATE_FP_REGS(_field, _state, _n) \ - VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_0, vmstate_fpreg, FPReg), \ - VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_1_mmx, vmstate_fpreg_1_mmx, FPReg), \ - VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_1_no_mmx, vmstate_fpreg_1_no_mmx, FPReg) - -static bool version_is_5(void *opaque, int version_id) -{ - return version_id == 5; -} - -#ifdef TARGET_X86_64 -static bool less_than_7(void *opaque, int version_id) -{ - return version_id < 7; -} - -static int get_uint64_as_uint32(QEMUFile *f, void *pv, size_t size, - VMStateField *field) -{ - uint64_t *v = pv; - *v = qemu_get_be32(f); - return 0; -} - -static int put_uint64_as_uint32(QEMUFile *f, void *pv, size_t size, - VMStateField *field, QJSON *vmdesc) -{ - uint64_t *v = pv; - qemu_put_be32(f, *v); - - return 0; -} - -static const VMStateInfo vmstate_hack_uint64_as_uint32 = { - .name = "uint64_as_uint32", - .get = get_uint64_as_uint32, - .put = put_uint64_as_uint32, +static const VMStateDescription vmstate_fpreg = { + .name = "fpreg", + .fields = (VMStateField[]) { + VMSTATE_WITH_TMP(FPReg, x86_FPReg_tmp, vmstate_fpreg_tmp), + VMSTATE_END_OF_LIST() + } }; -#define VMSTATE_HACK_UINT32(_f, _s, _t) \ - VMSTATE_SINGLE_TEST(_f, _s, _t, 0, vmstate_hack_uint64_as_uint32, uint64_t) -#endif - static void cpu_pre_save(void *opaque) { X86CPU *cpu = opaque; @@ -356,6 +226,10 @@ static int cpu_post_load(void *opaque, int version_id) return -EINVAL; } + if (env->fpregs_format_vmstate) { + error_report("Unsupported old non-softfloat CPU state"); + return -EINVAL; + } /* * Real mode guest segments register DPL should be zero. * Older KVM version were setting it wrongly. @@ -930,7 +804,7 @@ static const VMStateDescription vmstate_mcg_ext_ctl = { VMStateDescription vmstate_x86_cpu = { .name = "cpu", .version_id = 12, - .minimum_version_id = 3, + .minimum_version_id = 11, .pre_save = cpu_pre_save, .post_load = cpu_post_load, .fields = (VMStateField[]) { @@ -943,7 +817,8 @@ VMStateDescription vmstate_x86_cpu = { VMSTATE_UINT16(env.fpus_vmstate, X86CPU), VMSTATE_UINT16(env.fptag_vmstate, X86CPU), VMSTATE_UINT16(env.fpregs_format_vmstate, X86CPU), - VMSTATE_FP_REGS(env.fpregs, X86CPU, 8), + + VMSTATE_STRUCT_ARRAY(env.fpregs, X86CPU, 8, 0, vmstate_fpreg, FPReg), VMSTATE_SEGMENT_ARRAY(env.segs, X86CPU, 6), VMSTATE_SEGMENT(env.ldt, X86CPU), @@ -952,16 +827,8 @@ VMStateDescription vmstate_x86_cpu = { VMSTATE_SEGMENT(env.idt, X86CPU), VMSTATE_UINT32(env.sysenter_cs, X86CPU), -#ifdef TARGET_X86_64 - /* Hack: In v7 size changed from 32 to 64 bits on x86_64 */ - VMSTATE_HACK_UINT32(env.sysenter_esp, X86CPU, less_than_7), - VMSTATE_HACK_UINT32(env.sysenter_eip, X86CPU, less_than_7), - VMSTATE_UINTTL_V(env.sysenter_esp, X86CPU, 7), - VMSTATE_UINTTL_V(env.sysenter_eip, X86CPU, 7), -#else VMSTATE_UINTTL(env.sysenter_esp, X86CPU), VMSTATE_UINTTL(env.sysenter_eip, X86CPU), -#endif VMSTATE_UINTTL(env.cr[0], X86CPU), VMSTATE_UINTTL(env.cr[2], X86CPU), @@ -982,46 +849,45 @@ VMStateDescription vmstate_x86_cpu = { VMSTATE_UINT64(env.fmask, X86CPU), VMSTATE_UINT64(env.kernelgsbase, X86CPU), #endif - VMSTATE_UINT32_V(env.smbase, X86CPU, 4), - - VMSTATE_UINT64_V(env.pat, X86CPU, 5), - VMSTATE_UINT32_V(env.hflags2, X86CPU, 5), - - VMSTATE_UINT32_TEST(parent_obj.halted, X86CPU, version_is_5), - VMSTATE_UINT64_V(env.vm_hsave, X86CPU, 5), - VMSTATE_UINT64_V(env.vm_vmcb, X86CPU, 5), - VMSTATE_UINT64_V(env.tsc_offset, X86CPU, 5), - VMSTATE_UINT64_V(env.intercept, X86CPU, 5), - VMSTATE_UINT16_V(env.intercept_cr_read, X86CPU, 5), - VMSTATE_UINT16_V(env.intercept_cr_write, X86CPU, 5), - VMSTATE_UINT16_V(env.intercept_dr_read, X86CPU, 5), - VMSTATE_UINT16_V(env.intercept_dr_write, X86CPU, 5), - VMSTATE_UINT32_V(env.intercept_exceptions, X86CPU, 5), - VMSTATE_UINT8_V(env.v_tpr, X86CPU, 5), + VMSTATE_UINT32(env.smbase, X86CPU), + + VMSTATE_UINT64(env.pat, X86CPU), + VMSTATE_UINT32(env.hflags2, X86CPU), + + VMSTATE_UINT64(env.vm_hsave, X86CPU), + VMSTATE_UINT64(env.vm_vmcb, X86CPU), + VMSTATE_UINT64(env.tsc_offset, X86CPU), + VMSTATE_UINT64(env.intercept, X86CPU), + VMSTATE_UINT16(env.intercept_cr_read, X86CPU), + VMSTATE_UINT16(env.intercept_cr_write, X86CPU), + VMSTATE_UINT16(env.intercept_dr_read, X86CPU), + VMSTATE_UINT16(env.intercept_dr_write, X86CPU), + VMSTATE_UINT32(env.intercept_exceptions, X86CPU), + VMSTATE_UINT8(env.v_tpr, X86CPU), /* MTRRs */ - VMSTATE_UINT64_ARRAY_V(env.mtrr_fixed, X86CPU, 11, 8), - VMSTATE_UINT64_V(env.mtrr_deftype, X86CPU, 8), + VMSTATE_UINT64_ARRAY(env.mtrr_fixed, X86CPU, 11), + VMSTATE_UINT64(env.mtrr_deftype, X86CPU), VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, MSR_MTRRcap_VCNT, 8), /* KVM-related states */ - VMSTATE_INT32_V(env.interrupt_injected, X86CPU, 9), - VMSTATE_UINT32_V(env.mp_state, X86CPU, 9), - VMSTATE_UINT64_V(env.tsc, X86CPU, 9), - VMSTATE_INT32_V(env.exception_injected, X86CPU, 11), - VMSTATE_UINT8_V(env.soft_interrupt, X86CPU, 11), - VMSTATE_UINT8_V(env.nmi_injected, X86CPU, 11), - VMSTATE_UINT8_V(env.nmi_pending, X86CPU, 11), - VMSTATE_UINT8_V(env.has_error_code, X86CPU, 11), - VMSTATE_UINT32_V(env.sipi_vector, X86CPU, 11), + VMSTATE_INT32(env.interrupt_injected, X86CPU), + VMSTATE_UINT32(env.mp_state, X86CPU), + VMSTATE_UINT64(env.tsc, X86CPU), + VMSTATE_INT32(env.exception_injected, X86CPU), + VMSTATE_UINT8(env.soft_interrupt, X86CPU), + VMSTATE_UINT8(env.nmi_injected, X86CPU), + VMSTATE_UINT8(env.nmi_pending, X86CPU), + VMSTATE_UINT8(env.has_error_code, X86CPU), + VMSTATE_UINT32(env.sipi_vector, X86CPU), /* MCE */ - VMSTATE_UINT64_V(env.mcg_cap, X86CPU, 10), - VMSTATE_UINT64_V(env.mcg_status, X86CPU, 10), - VMSTATE_UINT64_V(env.mcg_ctl, X86CPU, 10), - VMSTATE_UINT64_ARRAY_V(env.mce_banks, X86CPU, MCE_BANKS_DEF * 4, 10), + VMSTATE_UINT64(env.mcg_cap, X86CPU), + VMSTATE_UINT64(env.mcg_status, X86CPU), + VMSTATE_UINT64(env.mcg_ctl, X86CPU), + VMSTATE_UINT64_ARRAY(env.mce_banks, X86CPU, MCE_BANKS_DEF * 4), /* rdtscp */ - VMSTATE_UINT64_V(env.tsc_aux, X86CPU, 11), + VMSTATE_UINT64(env.tsc_aux, X86CPU), /* KVM pvclock msr */ - VMSTATE_UINT64_V(env.system_time_msr, X86CPU, 11), - VMSTATE_UINT64_V(env.wall_clock_msr, X86CPU, 11), + VMSTATE_UINT64(env.system_time_msr, X86CPU), + VMSTATE_UINT64(env.wall_clock_msr, X86CPU), /* XSAVE related fields */ VMSTATE_UINT64_V(env.xcr0, X86CPU, 12), VMSTATE_UINT64_V(env.xstate_bv, X86CPU, 12), diff --git a/target/ppc/Makefile.objs b/target/ppc/Makefile.objs index f963777277..f92ba67ebd 100644 --- a/target/ppc/Makefile.objs +++ b/target/ppc/Makefile.objs @@ -4,6 +4,7 @@ obj-y += translate.o ifeq ($(CONFIG_SOFTMMU),y) obj-y += machine.o mmu_helper.o mmu-hash32.o monitor.o arch_dump.o obj-$(TARGET_PPC64) += mmu-hash64.o mmu-book3s-v3.o compat.o +obj-$(TARGET_PPC64) += mmu-radix64.o endif obj-$(CONFIG_KVM) += kvm.o obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o diff --git a/target/ppc/cpu-models.h b/target/ppc/cpu-models.h index d587e69bbc..b563c45b68 100644 --- a/target/ppc/cpu-models.h +++ b/target/ppc/cpu-models.h @@ -561,6 +561,7 @@ enum { CPU_POWERPC_POWER8NVL_BASE = 0x004C0000, CPU_POWERPC_POWER8NVL_v10 = 0x004C0100, CPU_POWERPC_POWER9_BASE = 0x004E0000, + CPU_POWERPC_POWER9_DD1 = 0x004E0100, CPU_POWERPC_970_v22 = 0x00390202, CPU_POWERPC_970FX_v10 = 0x00391100, CPU_POWERPC_970FX_v20 = 0x003C0200, diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index e0ff0412d6..401e10e7da 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -30,6 +30,8 @@ #define TARGET_LONG_BITS 64 #define TARGET_PAGE_BITS 12 +#define TCG_GUEST_DEFAULT_MO 0 + /* Note that the official physical address space bits is 62-M where M is implementation dependent. I've not looked up M for the set of cpus we emulate at the system level. */ @@ -480,6 +482,8 @@ struct ppc_slb_t { #define DSISR_ISSTORE 0x02000000 /* Not permitted by virtual page class key protection */ #define DSISR_AMR 0x00200000 +/* Unsupported Radix Tree Configuration */ +#define DSISR_R_BADCONFIG 0x00080000 /* SRR1 error code fields */ @@ -1221,6 +1225,7 @@ static inline PowerPCCPU *ppc_env_get_cpu(CPUPPCState *env) PowerPCCPUClass *ppc_cpu_class_by_pvr(uint32_t pvr); PowerPCCPUClass *ppc_cpu_class_by_pvr_mask(uint32_t pvr); +PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc); struct PPCVirtualHypervisor { Object parent; diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c index f4ee7aacd2..a6bcb47aa2 100644 --- a/target/ppc/excp_helper.c +++ b/target/ppc/excp_helper.c @@ -728,6 +728,9 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) cs->exception_index = POWERPC_EXCP_NONE; env->error_code = 0; + /* Reset the reservation */ + env->reserve_addr = -1; + /* Any interrupt is context synchronizing, check if TCG TLB * needs a delayed flush on ppc64 */ diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c index 8574c369e6..51249ce79e 100644 --- a/target/ppc/kvm.c +++ b/target/ppc/kvm.c @@ -2380,6 +2380,17 @@ static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data) #if defined(TARGET_PPC64) pcc->radix_page_info = kvm_get_radix_page_info(); + + if ((pcc->pvr & 0xffffff00) == CPU_POWERPC_POWER9_DD1) { + /* + * POWER9 DD1 has some bugs which make it not really ISA 3.00 + * compliant. More importantly, advertising ISA 3.00 + * architected mode may prevent guests from activating + * necessary DD1 workarounds. + */ + pcc->pcr_supported &= ~(PCR_COMPAT_3_00 | PCR_COMPAT_2_07 + | PCR_COMPAT_2_06 | PCR_COMPAT_2_05); + } #endif /* defined(TARGET_PPC64) */ } @@ -2413,18 +2424,6 @@ bool kvmppc_has_cap_mmu_hash_v3(void) return cap_mmu_hash_v3; } -static PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc) -{ - ObjectClass *oc = OBJECT_CLASS(pcc); - - while (oc && !object_class_is_abstract(oc)) { - oc = object_class_get_parent(oc); - } - assert(oc); - - return POWERPC_CPU_CLASS(oc); -} - PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void) { uint32_t host_pvr = mfpvr(); diff --git a/target/ppc/mmu-book3s-v3.c b/target/ppc/mmu-book3s-v3.c index 005c96340a..e7798b3582 100644 --- a/target/ppc/mmu-book3s-v3.c +++ b/target/ppc/mmu-book3s-v3.c @@ -22,15 +22,13 @@ #include "cpu.h" #include "mmu-hash64.h" #include "mmu-book3s-v3.h" -#include "qemu/error-report.h" +#include "mmu-radix64.h" int ppc64_v3_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, int mmu_idx) { if (ppc64_radix_guest(cpu)) { /* Guest uses radix */ - /* TODO - Unsupported */ - error_report("Guest Radix Support Unimplemented"); - exit(1); + return ppc_radix64_handle_mmu_fault(cpu, eaddr, rwx, mmu_idx); } else { /* Guest uses hash */ return ppc_hash64_handle_mmu_fault(cpu, eaddr, rwx, mmu_idx); } diff --git a/target/ppc/mmu-book3s-v3.h b/target/ppc/mmu-book3s-v3.h index 636f6ab95f..56095dab52 100644 --- a/target/ppc/mmu-book3s-v3.h +++ b/target/ppc/mmu-book3s-v3.h @@ -25,6 +25,11 @@ /* Partition Table Entry Fields */ #define PATBE1_GR 0x8000000000000000 +/* Process Table Entry */ +struct prtb_entry { + uint64_t prtbe0, prtbe1; +}; + #ifdef TARGET_PPC64 static inline bool ppc64_use_proc_tbl(PowerPCCPU *cpu) diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c new file mode 100644 index 0000000000..de18c0b69e --- /dev/null +++ b/target/ppc/mmu-radix64.c @@ -0,0 +1,259 @@ +/* + * PowerPC Radix MMU mulation helpers for QEMU. + * + * Copyright (c) 2016 Suraj Jitindar Singh, IBM Corporation + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" +#include "qemu/error-report.h" +#include "sysemu/kvm.h" +#include "kvm_ppc.h" +#include "exec/log.h" +#include "mmu-radix64.h" +#include "mmu-book3s-v3.h" + +static bool ppc_radix64_get_fully_qualified_addr(CPUPPCState *env, vaddr eaddr, + uint64_t *lpid, uint64_t *pid) +{ + /* We don't have HV support yet and shouldn't get here with it set anyway */ + assert(!msr_hv); + + if (!msr_hv) { /* !MSR[HV] -> Guest */ + switch (eaddr & R_EADDR_QUADRANT) { + case R_EADDR_QUADRANT0: /* Guest application */ + *lpid = env->spr[SPR_LPIDR]; + *pid = env->spr[SPR_BOOKS_PID]; + break; + case R_EADDR_QUADRANT1: /* Illegal */ + case R_EADDR_QUADRANT2: + return false; + case R_EADDR_QUADRANT3: /* Guest OS */ + *lpid = env->spr[SPR_LPIDR]; + *pid = 0; /* pid set to 0 -> addresses guest operating system */ + break; + } + } + + return true; +} + +static void ppc_radix64_raise_segi(PowerPCCPU *cpu, int rwx, vaddr eaddr) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + + if (rwx == 2) { /* Instruction Segment Interrupt */ + cs->exception_index = POWERPC_EXCP_ISEG; + } else { /* Data Segment Interrupt */ + cs->exception_index = POWERPC_EXCP_DSEG; + env->spr[SPR_DAR] = eaddr; + } + env->error_code = 0; +} + +static void ppc_radix64_raise_si(PowerPCCPU *cpu, int rwx, vaddr eaddr, + uint32_t cause) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + + if (rwx == 2) { /* Instruction Storage Interrupt */ + cs->exception_index = POWERPC_EXCP_ISI; + env->error_code = cause; + } else { /* Data Storage Interrupt */ + cs->exception_index = POWERPC_EXCP_DSI; + if (rwx == 1) { /* Write -> Store */ + cause |= DSISR_ISSTORE; + } + env->spr[SPR_DSISR] = cause; + env->spr[SPR_DAR] = eaddr; + env->error_code = 0; + } +} + + +static bool ppc_radix64_check_prot(PowerPCCPU *cpu, int rwx, uint64_t pte, + int *fault_cause, int *prot) +{ + CPUPPCState *env = &cpu->env; + const int need_prot[] = { PAGE_READ, PAGE_WRITE, PAGE_EXEC }; + + /* Check Page Attributes (pte58:59) */ + if (((pte & R_PTE_ATT) == R_PTE_ATT_NI_IO) && (rwx == 2)) { + /* + * Radix PTE entries with the non-idempotent I/O attribute are treated + * as guarded storage + */ + *fault_cause |= SRR1_NOEXEC_GUARD; + return true; + } + + /* Determine permissions allowed by Encoded Access Authority */ + if ((pte & R_PTE_EAA_PRIV) && msr_pr) { /* Insufficient Privilege */ + *prot = 0; + } else if (msr_pr || (pte & R_PTE_EAA_PRIV)) { + *prot = ppc_radix64_get_prot_eaa(pte); + } else { /* !msr_pr && !(pte & R_PTE_EAA_PRIV) */ + *prot = ppc_radix64_get_prot_eaa(pte); + *prot &= ppc_radix64_get_prot_amr(cpu); /* Least combined permissions */ + } + + /* Check if requested access type is allowed */ + if (need_prot[rwx] & ~(*prot)) { /* Page Protected for that Access */ + *fault_cause |= DSISR_PROTFAULT; + return true; + } + + return false; +} + +static void ppc_radix64_set_rc(PowerPCCPU *cpu, int rwx, uint64_t pte, + hwaddr pte_addr, int *prot) +{ + CPUState *cs = CPU(cpu); + uint64_t npte; + + npte = pte | R_PTE_R; /* Always set reference bit */ + + if (rwx == 1) { /* Store/Write */ + npte |= R_PTE_C; /* Set change bit */ + } else { + /* + * Treat the page as read-only for now, so that a later write + * will pass through this function again to set the C bit. + */ + *prot &= ~PAGE_WRITE; + } + + if (pte ^ npte) { /* If pte has changed then write it back */ + stq_phys(cs->as, pte_addr, npte); + } +} + +static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, int rwx, vaddr eaddr, + uint64_t base_addr, uint64_t nls, + hwaddr *raddr, int *psize, + int *fault_cause, int *prot, + hwaddr *pte_addr) +{ + CPUState *cs = CPU(cpu); + uint64_t index, pde; + + if (nls < 5) { /* Directory maps less than 2**5 entries */ + *fault_cause |= DSISR_R_BADCONFIG; + return 0; + } + + /* Read page <directory/table> entry from guest address space */ + index = eaddr >> (*psize - nls); /* Shift */ + index &= ((1UL << nls) - 1); /* Mask */ + pde = ldq_phys(cs->as, base_addr + (index * sizeof(pde))); + if (!(pde & R_PTE_VALID)) { /* Invalid Entry */ + *fault_cause |= DSISR_NOPTE; + return 0; + } + + *psize -= nls; + + /* Check if Leaf Entry -> Page Table Entry -> Stop the Search */ + if (pde & R_PTE_LEAF) { + uint64_t rpn = pde & R_PTE_RPN; + uint64_t mask = (1UL << *psize) - 1; + + if (ppc_radix64_check_prot(cpu, rwx, pde, fault_cause, prot)) { + return 0; /* Protection Denied Access */ + } + + /* Or high bits of rpn and low bits to ea to form whole real addr */ + *raddr = (rpn & ~mask) | (eaddr & mask); + *pte_addr = base_addr + (index * sizeof(pde)); + return pde; + } + + /* Next Level of Radix Tree */ + return ppc_radix64_walk_tree(cpu, rwx, eaddr, pde & R_PDE_NLB, + pde & R_PDE_NLS, raddr, psize, + fault_cause, prot, pte_addr); +} + +int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, + int mmu_idx) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + PPCVirtualHypervisorClass *vhc = + PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); + hwaddr raddr, pte_addr; + uint64_t lpid = 0, pid = 0, offset, size, patbe, prtbe0, pte; + int page_size, prot, fault_cause = 0; + + assert((rwx == 0) || (rwx == 1) || (rwx == 2)); + assert(!msr_hv); /* For now there is no Radix PowerNV Support */ + assert(cpu->vhyp); + assert(ppc64_use_proc_tbl(cpu)); + + /* Real Mode Access */ + if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { + /* In real mode top 4 effective addr bits (mostly) ignored */ + raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; + + tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, + PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, + TARGET_PAGE_SIZE); + return 0; + } + + /* Virtual Mode Access - get the fully qualified address */ + if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) { + ppc_radix64_raise_segi(cpu, rwx, eaddr); + return 1; + } + + /* Get Process Table */ + patbe = vhc->get_patbe(cpu->vhyp); + + /* Index Process Table by PID to Find Corresponding Process Table Entry */ + offset = pid * sizeof(struct prtb_entry); + size = 1ULL << ((patbe & PATBE1_R_PRTS) + 12); + if (offset >= size) { + /* offset exceeds size of the process table */ + ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE); + return 1; + } + prtbe0 = ldq_phys(cs->as, (patbe & PATBE1_R_PRTB) + offset); + + /* Walk Radix Tree from Process Table Entry to Convert EA to RA */ + page_size = PRTBE_R_GET_RTS(prtbe0); + pte = ppc_radix64_walk_tree(cpu, rwx, eaddr & R_EADDR_MASK, + prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS, + &raddr, &page_size, &fault_cause, &prot, + &pte_addr); + if (!pte) { + ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause); + return 1; + } + + /* Update Reference and Change Bits */ + ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, &prot); + + tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, + prot, mmu_idx, 1UL << page_size); + return 1; +} diff --git a/target/ppc/mmu-radix64.h b/target/ppc/mmu-radix64.h new file mode 100644 index 0000000000..1d5c7cfea5 --- /dev/null +++ b/target/ppc/mmu-radix64.h @@ -0,0 +1,72 @@ +#ifndef MMU_RADIX64_H +#define MMU_RADIX64_H + +#ifndef CONFIG_USER_ONLY + +/* Radix Quadrants */ +#define R_EADDR_MASK 0x3FFFFFFFFFFFFFFF +#define R_EADDR_QUADRANT 0xC000000000000000 +#define R_EADDR_QUADRANT0 0x0000000000000000 +#define R_EADDR_QUADRANT1 0x4000000000000000 +#define R_EADDR_QUADRANT2 0x8000000000000000 +#define R_EADDR_QUADRANT3 0xC000000000000000 + +/* Radix Partition Table Entry Fields */ +#define PATBE1_R_PRTB 0x0FFFFFFFFFFFF000 +#define PATBE1_R_PRTS 0x000000000000001F + +/* Radix Process Table Entry Fields */ +#define PRTBE_R_GET_RTS(rts) \ + ((((rts >> 58) & 0x18) | ((rts >> 5) & 0x7)) + 31) +#define PRTBE_R_RPDB 0x0FFFFFFFFFFFFF00 +#define PRTBE_R_RPDS 0x000000000000001F + +/* Radix Page Directory/Table Entry Fields */ +#define R_PTE_VALID 0x8000000000000000 +#define R_PTE_LEAF 0x4000000000000000 +#define R_PTE_SW0 0x2000000000000000 +#define R_PTE_RPN 0x01FFFFFFFFFFF000 +#define R_PTE_SW1 0x0000000000000E00 +#define R_GET_SW(sw) (((sw >> 58) & 0x8) | ((sw >> 9) & 0x7)) +#define R_PTE_R 0x0000000000000100 +#define R_PTE_C 0x0000000000000080 +#define R_PTE_ATT 0x0000000000000030 +#define R_PTE_ATT_NORMAL 0x0000000000000000 +#define R_PTE_ATT_SAO 0x0000000000000010 +#define R_PTE_ATT_NI_IO 0x0000000000000020 +#define R_PTE_ATT_TOLERANT_IO 0x0000000000000030 +#define R_PTE_EAA_PRIV 0x0000000000000008 +#define R_PTE_EAA_R 0x0000000000000004 +#define R_PTE_EAA_RW 0x0000000000000002 +#define R_PTE_EAA_X 0x0000000000000001 +#define R_PDE_NLB PRTBE_R_RPDB +#define R_PDE_NLS PRTBE_R_RPDS + +#ifdef TARGET_PPC64 + +int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, + int mmu_idx); + +static inline int ppc_radix64_get_prot_eaa(uint64_t pte) +{ + return (pte & R_PTE_EAA_R ? PAGE_READ : 0) | + (pte & R_PTE_EAA_RW ? PAGE_READ | PAGE_WRITE : 0) | + (pte & R_PTE_EAA_X ? PAGE_EXEC : 0); +} + +static inline int ppc_radix64_get_prot_amr(PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; + int amr = env->spr[SPR_AMR] >> 62; /* We only care about key0 AMR63:62 */ + int iamr = env->spr[SPR_IAMR] >> 62; /* We only care about key0 IAMR63:62 */ + + return (amr & 0x2 ? 0 : PAGE_WRITE) | /* Access denied if bit is set */ + (amr & 0x1 ? 0 : PAGE_READ) | + (iamr & 0x1 ? 0 : PAGE_EXEC); +} + +#endif /* TARGET_PPC64 */ + +#endif /* CONFIG_USER_ONLY */ + +#endif /* MMU_RADIX64_H */ diff --git a/target/ppc/translate.c b/target/ppc/translate.c index f40b5a1abf..c0cd64d927 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -73,6 +73,7 @@ static TCGv cpu_cfar; #endif static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca, cpu_ov32, cpu_ca32; static TCGv cpu_reserve; +static TCGv cpu_reserve_val; static TCGv cpu_fpscr; static TCGv_i32 cpu_access_type; @@ -181,6 +182,9 @@ void ppc_translate_init(void) cpu_reserve = tcg_global_mem_new(cpu_env, offsetof(CPUPPCState, reserve_addr), "reserve_addr"); + cpu_reserve_val = tcg_global_mem_new(cpu_env, + offsetof(CPUPPCState, reserve_val), + "reserve_val"); cpu_fpscr = tcg_global_mem_new(cpu_env, offsetof(CPUPPCState, fpscr), "fpscr"); @@ -214,6 +218,7 @@ struct DisasContext { bool vsx_enabled; bool spe_enabled; bool tm_enabled; + bool gtse; ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */ int singlestep_enabled; uint64_t insns_flags; @@ -2967,6 +2972,7 @@ static void gen_stswx(DisasContext *ctx) /* eieio */ static void gen_eieio(DisasContext *ctx) { + tcg_gen_mb(TCG_MO_LD_ST | TCG_BAR_SC); } #if !defined(CONFIG_USER_ONLY) @@ -3004,6 +3010,7 @@ static void gen_isync(DisasContext *ctx) if (!ctx->pr) { gen_check_tlb_flush(ctx, false); } + tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); gen_stop_exception(ctx); } @@ -3023,7 +3030,8 @@ static void gen_##name(DisasContext *ctx) \ } \ tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop); \ tcg_gen_mov_tl(cpu_reserve, t0); \ - tcg_gen_st_tl(gpr, cpu_env, offsetof(CPUPPCState, reserve_val)); \ + tcg_gen_mov_tl(cpu_reserve_val, gpr); \ + tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); \ tcg_temp_free(t0); \ } @@ -3155,14 +3163,31 @@ static void gen_conditional_store(DisasContext *ctx, TCGv EA, static void gen_conditional_store(DisasContext *ctx, TCGv EA, int reg, int memop) { - TCGLabel *l1; + TCGLabel *l1 = gen_new_label(); + TCGLabel *l2 = gen_new_label(); + TCGv t0; - tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so); - l1 = gen_new_label(); tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, l1); - tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], CRF_EQ); - tcg_gen_qemu_st_tl(cpu_gpr[reg], EA, ctx->mem_idx, memop); + + t0 = tcg_temp_new(); + tcg_gen_atomic_cmpxchg_tl(t0, cpu_reserve, cpu_reserve_val, + cpu_gpr[reg], ctx->mem_idx, + DEF_MEMOP(memop) | MO_ALIGN); + tcg_gen_setcond_tl(TCG_COND_EQ, t0, t0, cpu_reserve_val); + tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT); + tcg_gen_or_tl(t0, t0, cpu_so); + tcg_gen_trunc_tl_i32(cpu_crf[0], t0); + tcg_temp_free(t0); + tcg_gen_br(l2); + gen_set_label(l1); + + /* Address mismatch implies failure. But we still need to provide the + memory barrier semantics of the instruction. */ + tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); + tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so); + + gen_set_label(l2); tcg_gen_movi_tl(cpu_reserve, -1); } #endif @@ -3291,6 +3316,7 @@ static void gen_sync(DisasContext *ctx) if (((l == 2) || !(ctx->insns_flags & PPC_64B)) && !ctx->pr) { gen_check_tlb_flush(ctx, true); } + tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); } /* wait */ @@ -4513,7 +4539,12 @@ static void gen_tlbie(DisasContext *ctx) GEN_PRIV; #else TCGv_i32 t1; - CHK_HV; + + if (ctx->gtse) { + CHK_SV; /* If gtse is set then tblie is supervisor privileged */ + } else { + CHK_HV; /* Else hypervisor privileged */ + } if (NARROW_MODE(ctx)) { TCGv t0 = tcg_temp_new(); @@ -6547,6 +6578,8 @@ GEN_HANDLER(tlbia, 0x1F, 0x12, 0x0B, 0x03FFFC01, PPC_MEM_TLBIA), * different ISA versions */ GEN_HANDLER(tlbiel, 0x1F, 0x12, 0x08, 0x001F0001, PPC_MEM_TLBIE), GEN_HANDLER(tlbie, 0x1F, 0x12, 0x09, 0x001F0001, PPC_MEM_TLBIE), +GEN_HANDLER_E(tlbiel, 0x1F, 0x12, 0x08, 0x00100001, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(tlbie, 0x1F, 0x12, 0x09, 0x00100001, PPC_NONE, PPC2_ISA300), GEN_HANDLER(tlbsync, 0x1F, 0x16, 0x11, 0x03FFF801, PPC_MEM_TLBSYNC), #if defined(TARGET_PPC64) GEN_HANDLER(slbia, 0x1F, 0x12, 0x0F, 0x031FFC01, PPC_SLBI), @@ -7227,6 +7260,7 @@ void gen_intermediate_code(CPUPPCState *env, struct TranslationBlock *tb) ctx.tm_enabled = false; } #endif + ctx.gtse = !!(env->spr[SPR_LPCR] & LPCR_GTSE); if ((env->flags & POWERPC_FLAG_SE) && msr_se) ctx.singlestep_enabled = CPU_SINGLE_STEP; else diff --git a/target/ppc/translate_init.c b/target/ppc/translate_init.c index e82e3e65e1..56a0ab22cf 100644 --- a/target/ppc/translate_init.c +++ b/target/ppc/translate_init.c @@ -8960,7 +8960,7 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data) PPC_FLOAT_EXT | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | - PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | + PPC_MEM_TLBSYNC | PPC_64B | PPC_64BX | PPC_ALTIVEC | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD | @@ -10285,6 +10285,18 @@ PowerPCCPU *cpu_ppc_init(const char *cpu_model) return POWERPC_CPU(cpu_generic_init(TYPE_POWERPC_CPU, cpu_model)); } +PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc) +{ + ObjectClass *oc = OBJECT_CLASS(pcc); + + while (oc && !object_class_is_abstract(oc)) { + oc = object_class_get_parent(oc); + } + assert(oc); + + return POWERPC_CPU_CLASS(oc); +} + /* Sort by PVR, ordering special case "host" last. */ static gint ppc_cpu_list_compare(gconstpointer a, gconstpointer b) { @@ -10316,6 +10328,7 @@ static void ppc_cpu_list_entry(gpointer data, gpointer user_data) ObjectClass *oc = data; CPUListState *s = user_data; PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + DeviceClass *family = DEVICE_CLASS(ppc_cpu_get_family_class(pcc)); const char *typename = object_class_get_name(oc); char *name; int i; @@ -10338,8 +10351,18 @@ static void ppc_cpu_list_entry(gpointer data, gpointer user_data) if (alias_oc != oc) { continue; } - (*s->cpu_fprintf)(s->file, "PowerPC %-16s (alias for %s)\n", - alias->alias, name); + /* + * If running with KVM, we might update the family alias later, so + * avoid printing the wrong alias here and use "preferred" instead + */ + if (strcmp(alias->alias, family->desc) == 0) { + (*s->cpu_fprintf)(s->file, + "PowerPC %-16s (alias for preferred %s CPU)\n", + alias->alias, family->desc); + } else { + (*s->cpu_fprintf)(s->file, "PowerPC %-16s (alias for %s)\n", + alias->alias, name); + } } g_free(name); } @@ -10436,14 +10459,6 @@ static bool ppc_cpu_has_work(CPUState *cs) return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD); } -static void ppc_cpu_exec_enter(CPUState *cs) -{ - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; - - env->reserve_addr = -1; -} - /* CPUClass::reset() */ static void ppc_cpu_reset(CPUState *s) { @@ -10660,7 +10675,6 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data) cc->get_phys_page_debug = ppc_cpu_get_phys_page_debug; cc->vmsd = &vmstate_ppc_cpu; #endif - cc->cpu_exec_enter = ppc_cpu_exec_enter; #if defined(CONFIG_SOFTMMU) cc->write_elf64_note = ppc64_cpu_write_elf64_note; cc->write_elf32_note = ppc32_cpu_write_elf32_note; diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c index 066dcd17df..a1bf2ba5a7 100644 --- a/target/s390x/cpu.c +++ b/target/s390x/cpu.c @@ -430,6 +430,7 @@ static void s390_cpu_class_init(ObjectClass *oc, void *data) cc->write_elf64_note = s390_cpu_write_elf64_note; cc->cpu_exec_interrupt = s390_cpu_exec_interrupt; cc->debug_excp_handler = s390x_cpu_debug_excp_handler; + cc->do_unaligned_access = s390x_cpu_do_unaligned_access; #endif cc->disas_set_info = s390_cpu_disas_set_info; diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h index 058ddad83a..240b8a5c22 100644 --- a/target/s390x/cpu.h +++ b/target/s390x/cpu.h @@ -480,6 +480,9 @@ int s390_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, #ifndef CONFIG_USER_ONLY void do_restart_interrupt(CPUS390XState *env); +void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr); static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb, uint8_t *ar) @@ -1075,6 +1078,9 @@ struct sysib_322 { #define SIGP_MODE_Z_ARCH_TRANS_ALL_PSW 1 #define SIGP_MODE_Z_ARCH_TRANS_CUR_PSW 2 +/* SIGP order code mask corresponding to bit positions 56-63 */ +#define SIGP_ORDER_MASK 0x000000ff + void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr); int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc, target_ulong *raddr, int *flags, bool exc); diff --git a/target/s390x/helper.c b/target/s390x/helper.c index 68bd2f9784..997849008f 100644 --- a/target/s390x/helper.c +++ b/target/s390x/helper.c @@ -718,4 +718,20 @@ void s390x_cpu_debug_excp_handler(CPUState *cs) cpu_loop_exit_noexc(cs); } } + +/* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment, + this is only for the atomic operations, for which we want to raise a + specification exception. */ +void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr) +{ + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; + + if (retaddr) { + cpu_restore_state(cs, retaddr); + } + program_interrupt(env, PGM_SPECIFICATION, ILEN_LATER); +} #endif /* CONFIG_USER_ONLY */ diff --git a/target/s390x/helper.h b/target/s390x/helper.h index 9102071d0a..0b70770e4e 100644 --- a/target/s390x/helper.h +++ b/target/s390x/helper.h @@ -25,6 +25,7 @@ DEF_HELPER_3(cxgb, i64, env, s64, i32) DEF_HELPER_3(celgb, i64, env, i64, i32) DEF_HELPER_3(cdlgb, i64, env, i64, i32) DEF_HELPER_3(cxlgb, i64, env, i64, i32) +DEF_HELPER_4(cdsg, void, env, i64, i32, i32) DEF_HELPER_FLAGS_3(aeb, TCG_CALL_NO_WG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(adb, TCG_CALL_NO_WG, i64, env, i64, i64) DEF_HELPER_FLAGS_5(axb, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i64) @@ -83,6 +84,8 @@ DEF_HELPER_FLAGS_5(calc_cc, TCG_CALL_NO_RWG_SE, i32, env, i32, i64, i64, i64) DEF_HELPER_FLAGS_2(sfpc, TCG_CALL_NO_RWG, void, env, i64) DEF_HELPER_FLAGS_2(sfas, TCG_CALL_NO_WG, void, env, i64) DEF_HELPER_FLAGS_1(popcnt, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_1(stfl, TCG_CALL_NO_RWG, void, env) +DEF_HELPER_2(stfle, i32, env, i64) #ifndef CONFIG_USER_ONLY DEF_HELPER_3(servc, i32, env, i64, i64) diff --git a/target/s390x/insn-data.def b/target/s390x/insn-data.def index 075ff597c3..55a7c529b4 100644 --- a/target/s390x/insn-data.def +++ b/target/s390x/insn-data.def @@ -239,12 +239,12 @@ D(0xec7d, CLGIJ, RIE_c, GIE, r1_o, i2_8u, 0, 0, cj, 0, 1) /* COMPARE AND SWAP */ - D(0xba00, CS, RS_a, Z, r3_32u, r1_32u, new, r1_32, cs, 0, 0) - D(0xeb14, CSY, RSY_a, LD, r3_32u, r1_32u, new, r1_32, cs, 0, 0) - D(0xeb30, CSG, RSY_a, Z, r3_o, r1_o, new, r1, cs, 0, 1) + D(0xba00, CS, RS_a, Z, r3_32u, r1_32u, new, r1_32, cs, 0, MO_TEUL) + D(0xeb14, CSY, RSY_a, LD, r3_32u, r1_32u, new, r1_32, cs, 0, MO_TEUL) + D(0xeb30, CSG, RSY_a, Z, r3_o, r1_o, new, r1, cs, 0, MO_TEQ) /* COMPARE DOUBLE AND SWAP */ - D(0xbb00, CDS, RS_a, Z, r3_D32, r1_D32, new, r1_D32, cs, 0, 1) - D(0xeb31, CDSY, RSY_a, LD, r3_D32, r1_D32, new, r1_D32, cs, 0, 1) + D(0xbb00, CDS, RS_a, Z, r3_D32, r1_D32, new, r1_D32, cs, 0, MO_TEQ) + D(0xeb31, CDSY, RSY_a, LD, r3_D32, r1_D32, new, r1_D32, cs, 0, MO_TEQ) C(0xeb3e, CDSG, RSY_a, Z, 0, 0, 0, 0, cdsg, 0) /* COMPARE AND TRAP */ @@ -390,20 +390,20 @@ /* LOAD ADDRESS RELATIVE LONG */ C(0xc000, LARL, RIL_b, Z, 0, ri2, 0, r1, mov2, 0) /* LOAD AND ADD */ - C(0xebf8, LAA, RSY_a, ILA, r3_32s, m2_32s_atomic, new, m2_32_r1_atomic, add, adds32) - C(0xebe8, LAAG, RSY_a, ILA, r3, m2_64_atomic, new, m2_64_r1_atomic, add, adds64) + D(0xebf8, LAA, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, laa, adds32, MO_TESL) + D(0xebe8, LAAG, RSY_a, ILA, r3, a2, new, in2_r1, laa, adds64, MO_TEQ) /* LOAD AND ADD LOGICAL */ - C(0xebfa, LAAL, RSY_a, ILA, r3_32s, m2_32s_atomic, new, m2_32_r1_atomic, add, addu32) - C(0xebea, LAALG, RSY_a, ILA, r3, m2_64_atomic, new, m2_64_r1_atomic, add, addu64) + D(0xebfa, LAAL, RSY_a, ILA, r3_32u, a2, new, in2_r1_32, laa, addu32, MO_TEUL) + D(0xebea, LAALG, RSY_a, ILA, r3, a2, new, in2_r1, laa, addu64, MO_TEQ) /* LOAD AND AND */ - C(0xebf4, LAN, RSY_a, ILA, r3_32s, m2_32s_atomic, new, m2_32_r1_atomic, and, nz32) - C(0xebe4, LANG, RSY_a, ILA, r3, m2_64_atomic, new, m2_64_r1_atomic, and, nz64) + D(0xebf4, LAN, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lan, nz32, MO_TESL) + D(0xebe4, LANG, RSY_a, ILA, r3, a2, new, in2_r1, lan, nz64, MO_TEQ) /* LOAD AND EXCLUSIVE OR */ - C(0xebf7, LAX, RSY_a, ILA, r3_32s, m2_32s_atomic, new, m2_32_r1_atomic, xor, nz32) - C(0xebe7, LAXG, RSY_a, ILA, r3, m2_64_atomic, new, m2_64_r1_atomic, xor, nz64) + D(0xebf7, LAX, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lax, nz32, MO_TESL) + D(0xebe7, LAXG, RSY_a, ILA, r3, a2, new, in2_r1, lax, nz64, MO_TEQ) /* LOAD AND OR */ - C(0xebf6, LAO, RSY_a, ILA, r3_32s, m2_32s_atomic, new, m2_32_r1_atomic, or, nz32) - C(0xebe6, LAOG, RSY_a, ILA, r3, m2_64_atomic, new, m2_64_r1_atomic, or, nz64) + D(0xebf6, LAO, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lao, nz32, MO_TESL) + D(0xebe6, LAOG, RSY_a, ILA, r3, a2, new, in2_r1, lao, nz64, MO_TEQ) /* LOAD AND TEST */ C(0x1200, LTR, RR_a, Z, 0, r2_o, 0, cond_r1r2_32, mov2, s32) C(0xb902, LTGR, RRE, Z, 0, r2_o, 0, r1, mov2, s64) @@ -504,7 +504,9 @@ C(0xb9e2, LOCGR, RRF_c, LOC, r1, r2, r1, 0, loc, 0) C(0xebf2, LOC, RSY_b, LOC, r1, m2_32u, new, r1_32, loc, 0) C(0xebe2, LOCG, RSY_b, LOC, r1, m2_64, r1, 0, loc, 0) -/* LOAD PAIR DISJOINT TODO */ +/* LOAD PAIR DISJOINT */ + D(0xc804, LPD, SSF, ILA, 0, 0, new_P, r3_P32, lpd, 0, MO_TEUL) + D(0xc805, LPDG, SSF, ILA, 0, 0, new_P, r3_P64, lpd, 0, MO_TEQ) /* LOAD POSITIVE */ C(0x1000, LPR, RR_a, Z, 0, r2_32s, new, r1_32, abs, abs32) C(0xb900, LPGR, RRE, Z, 0, r2, r1, 0, abs, abs64) @@ -747,6 +749,8 @@ C(0xe33e, STRV, RXY_a, Z, la2, r1_32u, new, m1_32, rev32, 0) C(0xe32f, STRVG, RXY_a, Z, la2, r1_o, new, m1_64, rev64, 0) +/* STORE FACILITY LIST EXTENDED */ + C(0xb2b0, STFLE, S, SFLE, 0, a2, 0, 0, stfle, 0) /* STORE FPC */ C(0xb29c, STFPC, S, Z, 0, a2, new, m2_32, efpc, 0) @@ -843,6 +847,8 @@ /* LOAD CONTROL */ C(0xb700, LCTL, RS_a, Z, 0, a2, 0, 0, lctl, 0) C(0xeb2f, LCTLG, RSY_a, Z, 0, a2, 0, 0, lctlg, 0) +/* LOAD PROGRAM PARAMETER */ + C(0xb280, LPP, S, LPP, 0, m2_64, 0, 0, lpp, 0) /* LOAD PSW */ C(0x8200, LPSW, S, Z, 0, a2, 0, 0, lpsw, 0) /* LOAD PSW EXTENDED */ diff --git a/target/s390x/kvm.c b/target/s390x/kvm.c index 1a249d8359..fb105429be 100644 --- a/target/s390x/kvm.c +++ b/target/s390x/kvm.c @@ -1764,8 +1764,6 @@ static int sigp_set_architecture(S390CPU *cpu, uint32_t param, return SIGP_CC_ORDER_CODE_ACCEPTED; } -#define SIGP_ORDER_MASK 0x000000ff - static int handle_sigp(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) { CPUS390XState *env = &cpu->env; diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c index 675aba2e44..f6e5bcec5d 100644 --- a/target/s390x/mem_helper.c +++ b/target/s390x/mem_helper.c @@ -23,6 +23,7 @@ #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" +#include "qemu/int128.h" #if !defined(CONFIG_USER_ONLY) #include "hw/s390x/storage-keys.h" @@ -844,6 +845,45 @@ uint32_t HELPER(trt)(CPUS390XState *env, uint32_t len, uint64_t array, return cc; } +void HELPER(cdsg)(CPUS390XState *env, uint64_t addr, + uint32_t r1, uint32_t r3) +{ + uintptr_t ra = GETPC(); + Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]); + Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]); + Int128 oldv; + bool fail; + + if (parallel_cpus) { +#ifndef CONFIG_ATOMIC128 + cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); +#else + int mem_idx = cpu_mmu_index(env, false); + TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra); + fail = !int128_eq(oldv, cmpv); +#endif + } else { + uint64_t oldh, oldl; + + oldh = cpu_ldq_data_ra(env, addr + 0, ra); + oldl = cpu_ldq_data_ra(env, addr + 8, ra); + + oldv = int128_make128(oldl, oldh); + fail = !int128_eq(oldv, cmpv); + if (fail) { + newv = oldv; + } + + cpu_stq_data_ra(env, addr + 0, int128_gethi(newv), ra); + cpu_stq_data_ra(env, addr + 8, int128_getlo(newv), ra); + } + + env->cc_op = fail; + env->regs[r1] = int128_gethi(oldv); + env->regs[r1 + 1] = int128_getlo(oldv); +} + #if !defined(CONFIG_USER_ONLY) void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) { diff --git a/target/s390x/misc_helper.c b/target/s390x/misc_helper.c index eca82441d0..23ec52cf35 100644 --- a/target/s390x/misc_helper.c +++ b/target/s390x/misc_helper.c @@ -517,8 +517,7 @@ uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1, /* Remember: Use "R1 or R1 + 1, whichever is the odd-numbered register" as parameter (input). Status (output) is always R1. */ - /* sigp contains the order code in bit positions 56-63, mask it here. */ - switch (order_code & 0xff) { + switch (order_code & SIGP_ORDER_MASK) { case SIGP_SET_ARCH: /* switch arch */ break; @@ -678,3 +677,62 @@ void HELPER(per_ifetch)(CPUS390XState *env, uint64_t addr) } } #endif + +/* The maximum bit defined at the moment is 129. */ +#define MAX_STFL_WORDS 3 + +/* Canonicalize the current cpu's features into the 64-bit words required + by STFLE. Return the index-1 of the max word that is non-zero. */ +static unsigned do_stfle(CPUS390XState *env, uint64_t words[MAX_STFL_WORDS]) +{ + S390CPU *cpu = s390_env_get_cpu(env); + const unsigned long *features = cpu->model->features; + unsigned max_bit = 0; + S390Feat feat; + + memset(words, 0, sizeof(uint64_t) * MAX_STFL_WORDS); + + if (test_bit(S390_FEAT_ZARCH, features)) { + /* z/Architecture is always active if around */ + words[0] = 1ull << (63 - 2); + } + + for (feat = find_first_bit(features, S390_FEAT_MAX); + feat < S390_FEAT_MAX; + feat = find_next_bit(features, S390_FEAT_MAX, feat + 1)) { + const S390FeatDef *def = s390_feat_def(feat); + if (def->type == S390_FEAT_TYPE_STFL) { + unsigned bit = def->bit; + if (bit > max_bit) { + max_bit = bit; + } + assert(bit / 64 < MAX_STFL_WORDS); + words[bit / 64] |= 1ULL << (63 - bit % 64); + } + } + + return max_bit / 64; +} + +void HELPER(stfl)(CPUS390XState *env) +{ + uint64_t words[MAX_STFL_WORDS]; + + do_stfle(env, words); + cpu_stl_data(env, 200, words[0] >> 32); +} + +uint32_t HELPER(stfle)(CPUS390XState *env, uint64_t addr) +{ + uint64_t words[MAX_STFL_WORDS]; + unsigned count_m1 = env->regs[0] & 0xff; + unsigned max_m1 = do_stfle(env, words); + unsigned i; + + for (i = 0; i <= count_m1; ++i) { + cpu_stq_data(env, addr + 8 * i, words[i]); + } + + env->regs[0] = deposit64(env->regs[0], 0, 8, max_m1); + return (count_m1 >= max_m1 ? 0 : 3); +} diff --git a/target/s390x/translate.c b/target/s390x/translate.c index 01c62176bf..4c48c593cd 100644 --- a/target/s390x/translate.c +++ b/target/s390x/translate.c @@ -1194,6 +1194,7 @@ typedef enum DisasFacility { FAC_SCF, /* store clock fast */ FAC_SFLE, /* store facility list extended */ FAC_ILA, /* interlocked access facility 1 */ + FAC_LPP, /* load-program-parameter */ } DisasFacility; struct DisasInsn { @@ -1517,6 +1518,21 @@ static ExitStatus op_bc(DisasContext *s, DisasOps *o) int imm = is_imm ? get_field(s->fields, i2) : 0; DisasCompare c; + /* BCR with R2 = 0 causes no branching */ + if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) { + if (m1 == 14) { + /* Perform serialization */ + /* FIXME: check for fast-BCR-serialization facility */ + tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); + } + if (m1 == 15) { + /* Perform serialization */ + /* FIXME: perform checkpoint-synchronisation */ + tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); + } + return NO_EXIT; + } + disas_jcc(s, &c, m1); return help_branch(s, &c, is_imm, imm, o->in2); } @@ -1942,102 +1958,47 @@ static ExitStatus op_cps(DisasContext *s, DisasOps *o) static ExitStatus op_cs(DisasContext *s, DisasOps *o) { - /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */ int d2 = get_field(s->fields, d2); int b2 = get_field(s->fields, b2); - int is_64 = s->insn->data; - TCGv_i64 addr, mem, cc, z; + TCGv_i64 addr, cc; /* Note that in1 = R3 (new value) and in2 = (zero-extended) R1 (expected value). */ - /* Load the memory into the (temporary) output. While the PoO only talks - about moving the memory to R1 on inequality, if we include equality it - means that R1 is equal to the memory in all conditions. */ addr = get_address(s, 0, b2, d2); - if (is_64) { - tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s)); - } else { - tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s)); - } + tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1, + get_mem_index(s), s->insn->data | MO_ALIGN); + tcg_temp_free_i64(addr); /* Are the memory and expected values (un)equal? Note that this setcond produces the output CC value, thus the NE sense of the test. */ cc = tcg_temp_new_i64(); tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out); - - /* If the memory and expected values are equal (CC==0), copy R3 to MEM. - Recall that we are allowed to unconditionally issue the store (and - thus any possible write trap), so (re-)store the original contents - of MEM in case of inequality. */ - z = tcg_const_i64(0); - mem = tcg_temp_new_i64(); - tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out); - if (is_64) { - tcg_gen_qemu_st64(mem, addr, get_mem_index(s)); - } else { - tcg_gen_qemu_st32(mem, addr, get_mem_index(s)); - } - tcg_temp_free_i64(z); - tcg_temp_free_i64(mem); - tcg_temp_free_i64(addr); - - /* Store CC back to cc_op. Wait until after the store so that any - exception gets the old cc_op value. */ tcg_gen_extrl_i64_i32(cc_op, cc); tcg_temp_free_i64(cc); set_cc_static(s); + return NO_EXIT; } static ExitStatus op_cdsg(DisasContext *s, DisasOps *o) { - /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */ int r1 = get_field(s->fields, r1); int r3 = get_field(s->fields, r3); int d2 = get_field(s->fields, d2); int b2 = get_field(s->fields, b2); - TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z; + TCGv_i64 addr; + TCGv_i32 t_r1, t_r3; /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */ + addr = get_address(s, 0, b2, d2); + t_r1 = tcg_const_i32(r1); + t_r3 = tcg_const_i32(r3); + gen_helper_cdsg(cpu_env, addr, t_r1, t_r3); + tcg_temp_free_i64(addr); + tcg_temp_free_i32(t_r1); + tcg_temp_free_i32(t_r3); - addrh = get_address(s, 0, b2, d2); - addrl = get_address(s, 0, b2, d2 + 8); - outh = tcg_temp_new_i64(); - outl = tcg_temp_new_i64(); - - tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s)); - tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s)); - - /* Fold the double-word compare with arithmetic. */ - cc = tcg_temp_new_i64(); - z = tcg_temp_new_i64(); - tcg_gen_xor_i64(cc, outh, regs[r1]); - tcg_gen_xor_i64(z, outl, regs[r1 + 1]); - tcg_gen_or_i64(cc, cc, z); - tcg_gen_movi_i64(z, 0); - tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z); - - memh = tcg_temp_new_i64(); - meml = tcg_temp_new_i64(); - tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh); - tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl); - tcg_temp_free_i64(z); - - tcg_gen_qemu_st64(memh, addrh, get_mem_index(s)); - tcg_gen_qemu_st64(meml, addrl, get_mem_index(s)); - tcg_temp_free_i64(memh); - tcg_temp_free_i64(meml); - tcg_temp_free_i64(addrh); - tcg_temp_free_i64(addrl); - - /* Save back state now that we've passed all exceptions. */ - tcg_gen_mov_i64(regs[r1], outh); - tcg_gen_mov_i64(regs[r1 + 1], outl); - tcg_gen_extrl_i64_i32(cc_op, cc); - tcg_temp_free_i64(outh); - tcg_temp_free_i64(outl); - tcg_temp_free_i64(cc); set_cc_static(s); return NO_EXIT; } @@ -2363,6 +2324,50 @@ static ExitStatus op_iske(DisasContext *s, DisasOps *o) } #endif +static ExitStatus op_laa(DisasContext *s, DisasOps *o) +{ + /* The real output is indeed the original value in memory; + recompute the addition for the computation of CC. */ + tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s), + s->insn->data | MO_ALIGN); + /* However, we need to recompute the addition for setting CC. */ + tcg_gen_add_i64(o->out, o->in1, o->in2); + return NO_EXIT; +} + +static ExitStatus op_lan(DisasContext *s, DisasOps *o) +{ + /* The real output is indeed the original value in memory; + recompute the addition for the computation of CC. */ + tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s), + s->insn->data | MO_ALIGN); + /* However, we need to recompute the operation for setting CC. */ + tcg_gen_and_i64(o->out, o->in1, o->in2); + return NO_EXIT; +} + +static ExitStatus op_lao(DisasContext *s, DisasOps *o) +{ + /* The real output is indeed the original value in memory; + recompute the addition for the computation of CC. */ + tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s), + s->insn->data | MO_ALIGN); + /* However, we need to recompute the operation for setting CC. */ + tcg_gen_or_i64(o->out, o->in1, o->in2); + return NO_EXIT; +} + +static ExitStatus op_lax(DisasContext *s, DisasOps *o) +{ + /* The real output is indeed the original value in memory; + recompute the addition for the computation of CC. */ + tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s), + s->insn->data | MO_ALIGN); + /* However, we need to recompute the operation for setting CC. */ + tcg_gen_xor_i64(o->out, o->in1, o->in2); + return NO_EXIT; +} + static ExitStatus op_ldeb(DisasContext *s, DisasOps *o) { gen_helper_ldeb(o->out, cpu_env, o->in2); @@ -2558,6 +2563,7 @@ static ExitStatus op_lctlg(DisasContext *s, DisasOps *o) tcg_temp_free_i32(r3); return NO_EXIT; } + static ExitStatus op_lra(DisasContext *s, DisasOps *o) { check_privileged(s); @@ -2567,6 +2573,14 @@ static ExitStatus op_lra(DisasContext *s, DisasOps *o) return NO_EXIT; } +static ExitStatus op_lpp(DisasContext *s, DisasOps *o) +{ + check_privileged(s); + + tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp)); + return NO_EXIT; +} + static ExitStatus op_lpsw(DisasContext *s, DisasOps *o) { TCGv_i64 t1, t2; @@ -2750,6 +2764,31 @@ static ExitStatus op_lm64(DisasContext *s, DisasOps *o) return NO_EXIT; } +static ExitStatus op_lpd(DisasContext *s, DisasOps *o) +{ + TCGv_i64 a1, a2; + TCGMemOp mop = s->insn->data; + + /* In a parallel context, stop the world and single step. */ + if (parallel_cpus) { + potential_page_fault(s); + gen_exception(EXCP_ATOMIC); + return EXIT_NORETURN; + } + + /* In a serial context, perform the two loads ... */ + a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1)); + a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2)); + tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN); + tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN); + tcg_temp_free_i64(a1); + tcg_temp_free_i64(a2); + + /* ... and indicate that we performed them while interlocked. */ + gen_op_movi_cc(s, 0); + return NO_EXIT; +} + #ifndef CONFIG_USER_ONLY static ExitStatus op_lura(DisasContext *s, DisasOps *o) { @@ -3382,6 +3421,7 @@ static ExitStatus op_sigp(DisasContext *s, DisasOps *o) check_privileged(s); potential_page_fault(s); gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1); + set_cc_static(s); tcg_temp_free_i32(r1); return NO_EXIT; } @@ -3628,15 +3668,8 @@ static ExitStatus op_spt(DisasContext *s, DisasOps *o) static ExitStatus op_stfl(DisasContext *s, DisasOps *o) { - TCGv_i64 f, a; - /* We really ought to have more complete indication of facilities - that we implement. Address this when STFLE is implemented. */ check_privileged(s); - f = tcg_const_i64(0xc0000000); - a = tcg_const_i64(200); - tcg_gen_qemu_st32(f, a, get_mem_index(s)); - tcg_temp_free_i64(f); - tcg_temp_free_i64(a); + gen_helper_stfl(cpu_env); return NO_EXIT; } @@ -3802,6 +3835,14 @@ static ExitStatus op_sturg(DisasContext *s, DisasOps *o) } #endif +static ExitStatus op_stfle(DisasContext *s, DisasOps *o) +{ + potential_page_fault(s); + gen_helper_stfle(cc_op, cpu_env, o->in2); + set_cc_static(s); + return NO_EXIT; +} + static ExitStatus op_st8(DisasContext *s, DisasOps *o) { tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s)); @@ -4420,6 +4461,22 @@ static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o) } #define SPEC_wout_r1_D32 SPEC_r1_even +static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o) +{ + int r3 = get_field(f, r3); + store_reg32_i64(r3, o->out); + store_reg32_i64(r3 + 1, o->out2); +} +#define SPEC_wout_r3_P32 SPEC_r3_even + +static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o) +{ + int r3 = get_field(f, r3); + store_reg(r3, o->out); + store_reg(r3 + 1, o->out2); +} +#define SPEC_wout_r3_P64 SPEC_r3_even + static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o) { store_freg32_i64(get_field(f, r1), o->out); @@ -4486,21 +4543,17 @@ static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o) } #define SPEC_wout_m2_32 0 -static void wout_m2_32_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o) +static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o) { - /* XXX release reservation */ - tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s)); - store_reg32_i64(get_field(f, r1), o->in2); + store_reg(get_field(f, r1), o->in2); } -#define SPEC_wout_m2_32_r1_atomic 0 +#define SPEC_wout_in2_r1 0 -static void wout_m2_64_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o) +static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o) { - /* XXX release reservation */ - tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s)); - store_reg(get_field(f, r1), o->in2); + store_reg32_i64(get_field(f, r1), o->in2); } -#define SPEC_wout_m2_64_r1_atomic 0 +#define SPEC_wout_in2_r1_32 0 /* ====================================================================== */ /* The "INput 1" generators. These load the first operand to an insn. */ @@ -4944,24 +4997,6 @@ static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o) } #define SPEC_in2_mri2_64 0 -static void in2_m2_32s_atomic(DisasContext *s, DisasFields *f, DisasOps *o) -{ - /* XXX should reserve the address */ - in1_la2(s, f, o); - o->in2 = tcg_temp_new_i64(); - tcg_gen_qemu_ld32s(o->in2, o->addr1, get_mem_index(s)); -} -#define SPEC_in2_m2_32s_atomic 0 - -static void in2_m2_64_atomic(DisasContext *s, DisasFields *f, DisasOps *o) -{ - /* XXX should reserve the address */ - in1_la2(s, f, o); - o->in2 = tcg_temp_new_i64(); - tcg_gen_qemu_ld64(o->in2, o->addr1, get_mem_index(s)); -} -#define SPEC_in2_m2_64_atomic 0 - static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o) { o->in2 = tcg_const_i64(get_field(f, i2)); diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c index 9a481c35dc..9da7e1ed38 100644 --- a/target/sh4/cpu.c +++ b/target/sh4/cpu.c @@ -301,6 +301,7 @@ static void superh_cpu_class_init(ObjectClass *oc, void *data) #ifdef CONFIG_USER_ONLY cc->handle_mmu_fault = superh_cpu_handle_mmu_fault; #else + cc->do_unaligned_access = superh_cpu_do_unaligned_access; cc->get_phys_page_debug = superh_cpu_get_phys_page_debug; #endif cc->disas_set_info = superh_cpu_disas_set_info; diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h index cad8989f7e..6c07c6b24b 100644 --- a/target/sh4/cpu.h +++ b/target/sh4/cpu.h @@ -24,6 +24,7 @@ #include "cpu-qom.h" #define TARGET_LONG_BITS 32 +#define ALIGNED_ONLY /* CPU Subtypes */ #define SH_CPU_SH7750 (1 << 0) @@ -92,14 +93,6 @@ #define DELAY_SLOT (1 << 0) #define DELAY_SLOT_CONDITIONAL (1 << 1) -#define DELAY_SLOT_TRUE (1 << 2) -#define DELAY_SLOT_CLEARME (1 << 3) -/* The dynamic value of the DELAY_SLOT_TRUE flag determines whether the jump - * after the delay slot should be taken or not. It is calculated from SR_T. - * - * It is unclear if it is permitted to modify the SR_T flag in a delay slot. - * The use of DELAY_SLOT_TRUE flag makes us accept such SR_T modification. - */ typedef struct tlb_t { uint32_t vpn; /* virtual page number */ @@ -149,7 +142,8 @@ typedef struct CPUSH4State { uint32_t sgr; /* saved global register 15 */ uint32_t dbr; /* debug base register */ uint32_t pc; /* program counter */ - uint32_t delayed_pc; /* target of delayed jump */ + uint32_t delayed_pc; /* target of delayed branch */ + uint32_t delayed_cond; /* condition of delayed branch */ uint32_t mach; /* multiply and accumulate high */ uint32_t macl; /* multiply and accumulate low */ uint32_t pr; /* procedure register */ @@ -222,6 +216,9 @@ void superh_cpu_dump_state(CPUState *cpu, FILE *f, hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int superh_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); int superh_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); +void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr); void sh4_translate_init(void); SuperHCPU *cpu_sh4_init(const char *cpu_model); @@ -383,8 +380,7 @@ static inline void cpu_get_tb_cpu_state(CPUSH4State *env, target_ulong *pc, { *pc = env->pc; *cs_base = 0; - *flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL - | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */ + *flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) /* Bits 0-1 */ | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */ | (env->sr & ((1u << SR_MD) | (1u << SR_RB))) /* Bits 29-30 */ | (env->sr & (1u << SR_FD)) /* Bit 15 */ diff --git a/target/sh4/helper.c b/target/sh4/helper.c index 036c5ca56c..8f8ce81401 100644 --- a/target/sh4/helper.c +++ b/target/sh4/helper.c @@ -168,10 +168,8 @@ void superh_cpu_do_interrupt(CPUState *cs) /* Branch instruction should be executed again before delay slot. */ env->spc -= 2; /* Clear flags for exception/interrupt routine. */ - env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL | DELAY_SLOT_TRUE); + env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); } - if (env->flags & DELAY_SLOT_CLEARME) - env->flags = 0; if (do_exp) { env->expevt = cs->exception_index; diff --git a/target/sh4/op_helper.c b/target/sh4/op_helper.c index 684d3f3758..528a40ac1d 100644 --- a/target/sh4/op_helper.c +++ b/target/sh4/op_helper.c @@ -24,6 +24,22 @@ #ifndef CONFIG_USER_ONLY +void superh_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr) +{ + switch (access_type) { + case MMU_INST_FETCH: + case MMU_DATA_LOAD: + cs->exception_index = 0x0e0; + break; + case MMU_DATA_STORE: + cs->exception_index = 0x100; + break; + } + cpu_loop_exit_restore(cs, retaddr); +} + void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { @@ -32,10 +48,7 @@ void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type, ret = superh_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx); if (ret) { /* now we have a real cpu fault */ - if (retaddr) { - cpu_restore_state(cs, retaddr); - } - cpu_loop_exit(cs); + cpu_loop_exit_restore(cs, retaddr); } } @@ -59,10 +72,7 @@ static inline void QEMU_NORETURN raise_exception(CPUSH4State *env, int index, CPUState *cs = CPU(sh_env_get_cpu(env)); cs->exception_index = index; - if (retaddr) { - cpu_restore_state(cs, retaddr); - } - cpu_loop_exit(cs); + cpu_loop_exit_restore(cs, retaddr); } void helper_raise_illegal_instruction(CPUSH4State *env) diff --git a/target/sh4/translate.c b/target/sh4/translate.c index c89a14733f..0bc2f9ff19 100644 --- a/target/sh4/translate.c +++ b/target/sh4/translate.c @@ -37,7 +37,8 @@ typedef struct DisasContext { struct TranslationBlock *tb; target_ulong pc; uint16_t opcode; - uint32_t flags; + uint32_t tbflags; /* should stay unmodified during the TB translation */ + uint32_t envflags; /* should stay in sync with env->flags using TCG ops */ int bstate; int memidx; uint32_t delayed_pc; @@ -49,7 +50,7 @@ typedef struct DisasContext { #if defined(CONFIG_USER_ONLY) #define IS_USER(ctx) 1 #else -#define IS_USER(ctx) (!(ctx->flags & (1u << SR_MD))) +#define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD))) #endif enum { @@ -71,7 +72,7 @@ static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst; static TCGv cpu_fregs[32]; /* internal register indexes */ -static TCGv cpu_flags, cpu_delayed_pc; +static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond; #include "exec/gen-icount.h" @@ -146,6 +147,10 @@ void sh4_translate_init(void) cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSH4State, delayed_pc), "_delayed_pc_"); + cpu_delayed_cond = tcg_global_mem_new_i32(cpu_env, + offsetof(CPUSH4State, + delayed_cond), + "_delayed_cond_"); cpu_ldst = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSH4State, ldst), "_ldst_"); @@ -199,12 +204,23 @@ static void gen_write_sr(TCGv src) { tcg_gen_andi_i32(cpu_sr, src, ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T))); - tcg_gen_shri_i32(cpu_sr_q, src, SR_Q); - tcg_gen_andi_i32(cpu_sr_q, cpu_sr_q, 1); - tcg_gen_shri_i32(cpu_sr_m, src, SR_M); - tcg_gen_andi_i32(cpu_sr_m, cpu_sr_m, 1); - tcg_gen_shri_i32(cpu_sr_t, src, SR_T); - tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1); + tcg_gen_extract_i32(cpu_sr_q, src, SR_Q, 1); + tcg_gen_extract_i32(cpu_sr_m, src, SR_M, 1); + tcg_gen_extract_i32(cpu_sr_t, src, SR_T, 1); +} + +static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc) +{ + if (save_pc) { + tcg_gen_movi_i32(cpu_pc, ctx->pc); + } + if (ctx->delayed_pc != (uint32_t) -1) { + tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc); + } + if ((ctx->tbflags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) + != ctx->envflags) { + tcg_gen_movi_i32(cpu_flags, ctx->envflags); + } } static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest) @@ -241,6 +257,7 @@ static void gen_jump(DisasContext * ctx) /* Target is not statically known, it comes necessarily from a delayed jump as immediate jump are conditinal jumps */ tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc); + tcg_gen_discard_i32(cpu_delayed_pc); if (ctx->singlestep_enabled) gen_helper_debug(cpu_env); tcg_gen_exit_tb(0); @@ -249,24 +266,17 @@ static void gen_jump(DisasContext * ctx) } } -static inline void gen_branch_slot(uint32_t delayed_pc, int t) -{ - TCGLabel *label = gen_new_label(); - tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc); - tcg_gen_brcondi_i32(t ? TCG_COND_EQ : TCG_COND_NE, cpu_sr_t, 0, label); - tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE); - gen_set_label(label); -} - /* Immediate conditional jump (bt or bf) */ static void gen_conditional_jump(DisasContext * ctx, target_ulong ift, target_ulong ifnott) { TCGLabel *l1 = gen_new_label(); + gen_save_cpu_state(ctx, false); tcg_gen_brcondi_i32(TCG_COND_NE, cpu_sr_t, 0, l1); gen_goto_tb(ctx, 0, ifnott); gen_set_label(l1); gen_goto_tb(ctx, 1, ift); + ctx->bstate = BS_BRANCH; } /* Delayed conditional jump (bt or bf) */ @@ -277,20 +287,14 @@ static void gen_delayed_conditional_jump(DisasContext * ctx) l1 = gen_new_label(); ds = tcg_temp_new(); - tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE); + tcg_gen_mov_i32(ds, cpu_delayed_cond); + tcg_gen_discard_i32(cpu_delayed_cond); tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1); gen_goto_tb(ctx, 1, ctx->pc + 2); gen_set_label(l1); - tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE); gen_jump(ctx); } -static inline void gen_store_flags(uint32_t flags) -{ - tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE); - tcg_gen_ori_i32(cpu_flags, cpu_flags, flags); -} - static inline void gen_load_fpr64(TCGv_i64 t, int reg) { tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]); @@ -298,13 +302,7 @@ static inline void gen_load_fpr64(TCGv_i64 t, int reg) static inline void gen_store_fpr64 (TCGv_i64 t, int reg) { - TCGv_i32 tmp = tcg_temp_new_i32(); - tcg_gen_extrl_i64_i32(tmp, t); - tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp); - tcg_gen_shri_i64(t, t, 32); - tcg_gen_extrl_i64_i32(tmp, t); - tcg_gen_mov_i32(cpu_fregs[reg], tmp); - tcg_temp_free_i32(tmp); + tcg_gen_extr_i64_i32(cpu_fregs[reg + 1], cpu_fregs[reg], t); } #define B3_0 (ctx->opcode & 0xf) @@ -317,51 +315,50 @@ static inline void gen_store_fpr64 (TCGv_i64 t, int reg) #define B11_8 ((ctx->opcode >> 8) & 0xf) #define B15_12 ((ctx->opcode >> 12) & 0xf) -#define REG(x) ((x) < 8 && (ctx->flags & (1u << SR_MD))\ - && (ctx->flags & (1u << SR_RB))\ +#define REG(x) ((x) < 8 && (ctx->tbflags & (1u << SR_MD))\ + && (ctx->tbflags & (1u << SR_RB))\ ? (cpu_gregs[x + 16]) : (cpu_gregs[x])) -#define ALTREG(x) ((x) < 8 && (!(ctx->flags & (1u << SR_MD))\ - || !(ctx->flags & (1u << SR_RB)))\ +#define ALTREG(x) ((x) < 8 && (!(ctx->tbflags & (1u << SR_MD))\ + || !(ctx->tbflags & (1u << SR_RB)))\ ? (cpu_gregs[x + 16]) : (cpu_gregs[x])) -#define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x)) +#define FREG(x) (ctx->tbflags & FPSCR_FR ? (x) ^ 0x10 : (x)) #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe)) -#define XREG(x) (ctx->flags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x)) +#define XREG(x) (ctx->tbflags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x)) #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */ #define CHECK_NOT_DELAY_SLOT \ - if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \ - { \ - tcg_gen_movi_i32(cpu_pc, ctx->pc); \ - gen_helper_raise_slot_illegal_instruction(cpu_env); \ - ctx->bstate = BS_BRANCH; \ - return; \ - } - -#define CHECK_PRIVILEGED \ - if (IS_USER(ctx)) { \ - tcg_gen_movi_i32(cpu_pc, ctx->pc); \ - if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \ - gen_helper_raise_slot_illegal_instruction(cpu_env); \ - } else { \ - gen_helper_raise_illegal_instruction(cpu_env); \ - } \ - ctx->bstate = BS_BRANCH; \ - return; \ - } - -#define CHECK_FPU_ENABLED \ - if (ctx->flags & (1u << SR_FD)) { \ - tcg_gen_movi_i32(cpu_pc, ctx->pc); \ - if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \ - gen_helper_raise_slot_fpu_disable(cpu_env); \ - } else { \ - gen_helper_raise_fpu_disable(cpu_env); \ - } \ - ctx->bstate = BS_BRANCH; \ - return; \ - } + if (ctx->envflags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \ + gen_save_cpu_state(ctx, true); \ + gen_helper_raise_slot_illegal_instruction(cpu_env); \ + ctx->bstate = BS_EXCP; \ + return; \ + } + +#define CHECK_PRIVILEGED \ + if (IS_USER(ctx)) { \ + gen_save_cpu_state(ctx, true); \ + if (ctx->envflags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \ + gen_helper_raise_slot_illegal_instruction(cpu_env); \ + } else { \ + gen_helper_raise_illegal_instruction(cpu_env); \ + } \ + ctx->bstate = BS_EXCP; \ + return; \ + } + +#define CHECK_FPU_ENABLED \ + if (ctx->tbflags & (1u << SR_FD)) { \ + gen_save_cpu_state(ctx, true); \ + if (ctx->envflags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \ + gen_helper_raise_slot_fpu_disable(cpu_env); \ + } else { \ + gen_helper_raise_fpu_disable(cpu_env); \ + } \ + ctx->bstate = BS_EXCP; \ + return; \ + } static void _decode_opc(DisasContext * ctx) { @@ -409,7 +406,7 @@ static void _decode_opc(DisasContext * ctx) case 0x000b: /* rts */ CHECK_NOT_DELAY_SLOT tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr); - ctx->flags |= DELAY_SLOT; + ctx->envflags |= DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x0028: /* clrmac */ @@ -431,7 +428,7 @@ static void _decode_opc(DisasContext * ctx) CHECK_NOT_DELAY_SLOT gen_write_sr(cpu_ssr); tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc); - ctx->flags |= DELAY_SLOT; + ctx->envflags |= DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x0058: /* sets */ @@ -497,15 +494,13 @@ static void _decode_opc(DisasContext * ctx) case 0xa000: /* bra disp */ CHECK_NOT_DELAY_SLOT ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2; - tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc); - ctx->flags |= DELAY_SLOT; + ctx->envflags |= DELAY_SLOT; return; case 0xb000: /* bsr disp */ CHECK_NOT_DELAY_SLOT tcg_gen_movi_i32(cpu_pr, ctx->pc + 4); ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2; - tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc); - ctx->flags |= DELAY_SLOT; + ctx->envflags |= DELAY_SLOT; return; } @@ -939,7 +934,7 @@ static void _decode_opc(DisasContext * ctx) return; case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */ CHECK_FPU_ENABLED - if (ctx->flags & FPSCR_SZ) { + if (ctx->tbflags & FPSCR_SZ) { TCGv_i64 fp = tcg_temp_new_i64(); gen_load_fpr64(fp, XREG(B7_4)); gen_store_fpr64(fp, XREG(B11_8)); @@ -950,7 +945,7 @@ static void _decode_opc(DisasContext * ctx) return; case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */ CHECK_FPU_ENABLED - if (ctx->flags & FPSCR_SZ) { + if (ctx->tbflags & FPSCR_SZ) { TCGv addr_hi = tcg_temp_new(); int fr = XREG(B7_4); tcg_gen_addi_i32(addr_hi, REG(B11_8), 4); @@ -966,7 +961,7 @@ static void _decode_opc(DisasContext * ctx) return; case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */ CHECK_FPU_ENABLED - if (ctx->flags & FPSCR_SZ) { + if (ctx->tbflags & FPSCR_SZ) { TCGv addr_hi = tcg_temp_new(); int fr = XREG(B11_8); tcg_gen_addi_i32(addr_hi, REG(B7_4), 4); @@ -980,7 +975,7 @@ static void _decode_opc(DisasContext * ctx) return; case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */ CHECK_FPU_ENABLED - if (ctx->flags & FPSCR_SZ) { + if (ctx->tbflags & FPSCR_SZ) { TCGv addr_hi = tcg_temp_new(); int fr = XREG(B11_8); tcg_gen_addi_i32(addr_hi, REG(B7_4), 4); @@ -998,7 +993,7 @@ static void _decode_opc(DisasContext * ctx) CHECK_FPU_ENABLED TCGv addr = tcg_temp_new_i32(); tcg_gen_subi_i32(addr, REG(B11_8), 4); - if (ctx->flags & FPSCR_SZ) { + if (ctx->tbflags & FPSCR_SZ) { int fr = XREG(B7_4); tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr, ctx->memidx, MO_TEUL); tcg_gen_subi_i32(addr, addr, 4); @@ -1015,7 +1010,7 @@ static void _decode_opc(DisasContext * ctx) { TCGv addr = tcg_temp_new_i32(); tcg_gen_add_i32(addr, REG(B7_4), REG(0)); - if (ctx->flags & FPSCR_SZ) { + if (ctx->tbflags & FPSCR_SZ) { int fr = XREG(B11_8); tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr, ctx->memidx, MO_TEUL); @@ -1034,7 +1029,7 @@ static void _decode_opc(DisasContext * ctx) { TCGv addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(B11_8), REG(0)); - if (ctx->flags & FPSCR_SZ) { + if (ctx->tbflags & FPSCR_SZ) { int fr = XREG(B7_4); tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr, ctx->memidx, MO_TEUL); @@ -1056,7 +1051,7 @@ static void _decode_opc(DisasContext * ctx) case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */ { CHECK_FPU_ENABLED - if (ctx->flags & FPSCR_PR) { + if (ctx->tbflags & FPSCR_PR) { TCGv_i64 fp0, fp1; if (ctx->opcode & 0x0110) @@ -1125,7 +1120,7 @@ static void _decode_opc(DisasContext * ctx) case 0xf00e: /* fmac FR0,RM,Rn */ { CHECK_FPU_ENABLED - if (ctx->flags & FPSCR_PR) { + if (ctx->tbflags & FPSCR_PR) { break; /* illegal instruction */ } else { gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)], cpu_env, @@ -1155,25 +1150,23 @@ static void _decode_opc(DisasContext * ctx) return; case 0x8b00: /* bf label */ CHECK_NOT_DELAY_SLOT - gen_conditional_jump(ctx, ctx->pc + 2, - ctx->pc + 4 + B7_0s * 2); - ctx->bstate = BS_BRANCH; + gen_conditional_jump(ctx, ctx->pc + 2, ctx->pc + 4 + B7_0s * 2); return; case 0x8f00: /* bf/s label */ CHECK_NOT_DELAY_SLOT - gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0); - ctx->flags |= DELAY_SLOT_CONDITIONAL; + tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1); + ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2; + ctx->envflags |= DELAY_SLOT_CONDITIONAL; return; case 0x8900: /* bt label */ CHECK_NOT_DELAY_SLOT - gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2, - ctx->pc + 2); - ctx->bstate = BS_BRANCH; + gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2, ctx->pc + 2); return; case 0x8d00: /* bt/s label */ CHECK_NOT_DELAY_SLOT - gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1); - ctx->flags |= DELAY_SLOT_CONDITIONAL; + tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t); + ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2; + ctx->envflags |= DELAY_SLOT_CONDITIONAL; return; case 0x8800: /* cmp/eq #imm,R0 */ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s); @@ -1281,11 +1274,11 @@ static void _decode_opc(DisasContext * ctx) { TCGv imm; CHECK_NOT_DELAY_SLOT - tcg_gen_movi_i32(cpu_pc, ctx->pc); + gen_save_cpu_state(ctx, true); imm = tcg_const_i32(B7_0); gen_helper_trapa(cpu_env, imm); tcg_temp_free(imm); - ctx->bstate = BS_BRANCH; + ctx->bstate = BS_EXCP; } return; case 0xc800: /* tst #imm,R0 */ @@ -1354,14 +1347,14 @@ static void _decode_opc(DisasContext * ctx) case 0x0023: /* braf Rn */ CHECK_NOT_DELAY_SLOT tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4); - ctx->flags |= DELAY_SLOT; + ctx->envflags |= DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x0003: /* bsrf Rn */ CHECK_NOT_DELAY_SLOT tcg_gen_movi_i32(cpu_pr, ctx->pc + 4); tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr); - ctx->flags |= DELAY_SLOT; + ctx->envflags |= DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x4015: /* cmp/pl Rn */ @@ -1377,14 +1370,14 @@ static void _decode_opc(DisasContext * ctx) case 0x402b: /* jmp @Rn */ CHECK_NOT_DELAY_SLOT tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8)); - ctx->flags |= DELAY_SLOT; + ctx->envflags |= DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x400b: /* jsr @Rn */ CHECK_NOT_DELAY_SLOT tcg_gen_movi_i32(cpu_pr, ctx->pc + 4); tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8)); - ctx->flags |= DELAY_SLOT; + ctx->envflags |= DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x400e: /* ldc Rm,SR */ @@ -1508,17 +1501,23 @@ static void _decode_opc(DisasContext * ctx) } ctx->has_movcal = 1; return; - case 0x40a9: - /* MOVUA.L @Rm,R0 (Rm) -> R0 - Load non-boundary-aligned data */ - tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL); - return; - case 0x40e9: - /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm - Load non-boundary-aligned data */ - tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL); - tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); - return; + case 0x40a9: /* movua.l @Rm,R0 */ + /* Load non-boundary-aligned data */ + if (ctx->features & SH_FEATURE_SH4A) { + tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, + MO_TEUL | MO_UNALN); + return; + } + break; + case 0x40e9: /* movua.l @Rm+,R0 */ + /* Load non-boundary-aligned data */ + if (ctx->features & SH_FEATURE_SH4A) { + tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, + MO_TEUL | MO_UNALN); + tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); + return; + } + break; case 0x0029: /* movt Rn */ tcg_gen_mov_i32(REG(B11_8), cpu_sr_t); return; @@ -1576,10 +1575,11 @@ static void _decode_opc(DisasContext * ctx) else break; case 0x00ab: /* synco */ - if (ctx->features & SH_FEATURE_SH4A) - return; - else - break; + if (ctx->features & SH_FEATURE_SH4A) { + tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); + return; + } + break; case 0x4024: /* rotcl Rn */ { TCGv tmp = tcg_temp_new(); @@ -1640,19 +1640,14 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16); return; case 0x401b: /* tas.b @Rn */ - { - TCGv addr, val; - addr = tcg_temp_local_new(); - tcg_gen_mov_i32(addr, REG(B11_8)); - val = tcg_temp_local_new(); - tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB); + { + TCGv val = tcg_const_i32(0x80); + tcg_gen_atomic_fetch_or_i32(val, REG(B11_8), val, + ctx->memidx, MO_UB); tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0); - tcg_gen_ori_i32(val, val, 0x80); - tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB); - tcg_temp_free(val); - tcg_temp_free(addr); - } - return; + tcg_temp_free(val); + } + return; case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */ CHECK_FPU_ENABLED tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul); @@ -1663,7 +1658,7 @@ static void _decode_opc(DisasContext * ctx) return; case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */ CHECK_FPU_ENABLED - if (ctx->flags & FPSCR_PR) { + if (ctx->tbflags & FPSCR_PR) { TCGv_i64 fp; if (ctx->opcode & 0x0100) break; /* illegal instruction */ @@ -1678,7 +1673,7 @@ static void _decode_opc(DisasContext * ctx) return; case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */ CHECK_FPU_ENABLED - if (ctx->flags & FPSCR_PR) { + if (ctx->tbflags & FPSCR_PR) { TCGv_i64 fp; if (ctx->opcode & 0x0100) break; /* illegal instruction */ @@ -1699,7 +1694,7 @@ static void _decode_opc(DisasContext * ctx) return; case 0xf05d: /* fabs FRn/DRn */ CHECK_FPU_ENABLED - if (ctx->flags & FPSCR_PR) { + if (ctx->tbflags & FPSCR_PR) { if (ctx->opcode & 0x0100) break; /* illegal instruction */ TCGv_i64 fp = tcg_temp_new_i64(); @@ -1713,7 +1708,7 @@ static void _decode_opc(DisasContext * ctx) return; case 0xf06d: /* fsqrt FRn */ CHECK_FPU_ENABLED - if (ctx->flags & FPSCR_PR) { + if (ctx->tbflags & FPSCR_PR) { if (ctx->opcode & 0x0100) break; /* illegal instruction */ TCGv_i64 fp = tcg_temp_new_i64(); @@ -1731,13 +1726,13 @@ static void _decode_opc(DisasContext * ctx) break; case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */ CHECK_FPU_ENABLED - if (!(ctx->flags & FPSCR_PR)) { + if (!(ctx->tbflags & FPSCR_PR)) { tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0); } return; case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */ CHECK_FPU_ENABLED - if (!(ctx->flags & FPSCR_PR)) { + if (!(ctx->tbflags & FPSCR_PR)) { tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000); } return; @@ -1761,7 +1756,7 @@ static void _decode_opc(DisasContext * ctx) return; case 0xf0ed: /* fipr FVm,FVn */ CHECK_FPU_ENABLED - if ((ctx->flags & FPSCR_PR) == 0) { + if ((ctx->tbflags & FPSCR_PR) == 0) { TCGv m, n; m = tcg_const_i32((ctx->opcode >> 8) & 3); n = tcg_const_i32((ctx->opcode >> 10) & 3); @@ -1774,7 +1769,7 @@ static void _decode_opc(DisasContext * ctx) case 0xf0fd: /* ftrv XMTRX,FVn */ CHECK_FPU_ENABLED if ((ctx->opcode & 0x0300) == 0x0100 && - (ctx->flags & FPSCR_PR) == 0) { + (ctx->tbflags & FPSCR_PR) == 0) { TCGv n; n = tcg_const_i32((ctx->opcode >> 10) & 3); gen_helper_ftrv(cpu_env, n); @@ -1788,31 +1783,25 @@ static void _decode_opc(DisasContext * ctx) ctx->opcode, ctx->pc); fflush(stderr); #endif - tcg_gen_movi_i32(cpu_pc, ctx->pc); - if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { + gen_save_cpu_state(ctx, true); + if (ctx->envflags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { gen_helper_raise_slot_illegal_instruction(cpu_env); } else { gen_helper_raise_illegal_instruction(cpu_env); } - ctx->bstate = BS_BRANCH; + ctx->bstate = BS_EXCP; } static void decode_opc(DisasContext * ctx) { - uint32_t old_flags = ctx->flags; + uint32_t old_flags = ctx->envflags; _decode_opc(ctx); if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { - if (ctx->flags & DELAY_SLOT_CLEARME) { - gen_store_flags(0); - } else { - /* go out of the delay slot */ - uint32_t new_flags = ctx->flags; - new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); - gen_store_flags(new_flags); - } - ctx->flags = 0; + /* go out of the delay slot */ + ctx->envflags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); + tcg_gen_movi_i32(cpu_flags, ctx->envflags); ctx->bstate = BS_BRANCH; if (old_flags & DELAY_SLOT_CONDITIONAL) { gen_delayed_conditional_jump(ctx); @@ -1821,10 +1810,6 @@ static void decode_opc(DisasContext * ctx) } } - - /* go into a delay slot */ - if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) - gen_store_flags(ctx->flags); } void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb) @@ -1838,16 +1823,17 @@ void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb) pc_start = tb->pc; ctx.pc = pc_start; - ctx.flags = (uint32_t)tb->flags; + ctx.tbflags = (uint32_t)tb->flags; + ctx.envflags = tb->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL); ctx.bstate = BS_NONE; - ctx.memidx = (ctx.flags & (1u << SR_MD)) == 0 ? 1 : 0; + ctx.memidx = (ctx.tbflags & (1u << SR_MD)) == 0 ? 1 : 0; /* We don't know if the delayed pc came from a dynamic or static branch, so assume it is a dynamic branch. */ ctx.delayed_pc = -1; /* use delayed pc from env pointer */ ctx.tb = tb; ctx.singlestep_enabled = cs->singlestep_enabled; ctx.features = env->features; - ctx.has_movcal = (ctx.flags & TB_FLAG_PENDING_MOVCA); + ctx.has_movcal = (ctx.tbflags & TB_FLAG_PENDING_MOVCA); num_insns = 0; max_insns = tb->cflags & CF_COUNT_MASK; @@ -1860,14 +1846,14 @@ void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb) gen_tb_start(tb); while (ctx.bstate == BS_NONE && !tcg_op_buf_full()) { - tcg_gen_insn_start(ctx.pc, ctx.flags); + tcg_gen_insn_start(ctx.pc, ctx.envflags); num_insns++; if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) { /* We have hit a breakpoint - make sure PC is up-to-date */ - tcg_gen_movi_i32(cpu_pc, ctx.pc); + gen_save_cpu_state(&ctx, true); gen_helper_debug(cpu_env); - ctx.bstate = BS_BRANCH; + ctx.bstate = BS_EXCP; /* The address covered by the breakpoint must be included in [tb->pc, tb->pc + tb->size) in order to for it to be properly cleared -- thus we increment the PC here so that @@ -1896,23 +1882,20 @@ void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb) if (tb->cflags & CF_LAST_IO) gen_io_end(); if (cs->singlestep_enabled) { - tcg_gen_movi_i32(cpu_pc, ctx.pc); + gen_save_cpu_state(&ctx, true); gen_helper_debug(cpu_env); } else { switch (ctx.bstate) { case BS_STOP: - /* gen_op_interrupt_restart(); */ - /* fall through */ + gen_save_cpu_state(&ctx, true); + tcg_gen_exit_tb(0); + break; case BS_NONE: - if (ctx.flags) { - gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME); - } + gen_save_cpu_state(&ctx, false); gen_goto_tb(&ctx, 0, ctx.pc); break; case BS_EXCP: - /* gen_op_interrupt_restart(); */ - tcg_gen_exit_tb(0); - break; + /* fall through */ case BS_BRANCH: default: break; @@ -1941,4 +1924,7 @@ void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb, { env->pc = data[0]; env->flags = data[1]; + /* Theoretically delayed_pc should also be restored. In practice the + branch instruction is re-executed after exception, so the delayed + branch target will be recomputed. */ } diff --git a/tests/Makefile.include b/tests/Makefile.include index 31931c0d77..16ff8f399f 100644 --- a/tests/Makefile.include +++ b/tests/Makefile.include @@ -260,6 +260,7 @@ check-qtest-i386-y += tests/test-filter-mirror$(EXESUF) check-qtest-i386-y += tests/test-filter-redirector$(EXESUF) check-qtest-i386-y += tests/postcopy-test$(EXESUF) check-qtest-i386-y += tests/test-x86-cpuid-compat$(EXESUF) +check-qtest-i386-y += tests/numa-test$(EXESUF) check-qtest-x86_64-y += $(check-qtest-i386-y) gcov-files-i386-y += i386-softmmu/hw/timer/mc146818rtc.c gcov-files-x86_64-y = $(subst i386-softmmu/,x86_64-softmmu/,$(gcov-files-i386-y)) @@ -300,6 +301,7 @@ check-qtest-ppc64-y += tests/test-netfilter$(EXESUF) check-qtest-ppc64-y += tests/test-filter-mirror$(EXESUF) check-qtest-ppc64-y += tests/test-filter-redirector$(EXESUF) check-qtest-ppc64-y += tests/display-vga-test$(EXESUF) +check-qtest-ppc64-y += tests/numa-test$(EXESUF) check-qtest-ppc64-$(CONFIG_EVENTFD) += tests/ivshmem-test$(EXESUF) check-qtest-sh4-y = tests/endianness-test$(EXESUF) @@ -324,6 +326,8 @@ gcov-files-arm-y += arm-softmmu/hw/block/virtio-blk.c check-qtest-arm-y += tests/test-arm-mptimer$(EXESUF) gcov-files-arm-y += hw/timer/arm_mptimer.c +check-qtest-aarch64-y = tests/numa-test$(EXESUF) + check-qtest-microblazeel-y = $(check-qtest-microblaze-y) check-qtest-xtensaeb-y = $(check-qtest-xtensa-y) @@ -753,6 +757,7 @@ tests/vhost-user-bridge$(EXESUF): tests/vhost-user-bridge.o contrib/libvhost-use tests/test-uuid$(EXESUF): tests/test-uuid.o $(test-util-obj-y) tests/test-arm-mptimer$(EXESUF): tests/test-arm-mptimer.o tests/test-qapi-util$(EXESUF): tests/test-qapi-util.o $(test-util-obj-y) +tests/numa-test$(EXESUF): tests/numa-test.o tests/migration/stress$(EXESUF): tests/migration/stress.o $(call quiet-command, $(LINKPROG) -static -O3 $(PTHREAD_LIB) -o $@ $< ,"LINK","$(TARGET_DIR)$@") diff --git a/tests/acpi-test-data/pc/SLIT.cphp b/tests/acpi-test-data/pc/SLIT.cphp new file mode 100644 index 0000000000..74ec3b4b46 --- /dev/null +++ b/tests/acpi-test-data/pc/SLIT.cphp Binary files differdiff --git a/tests/acpi-test-data/pc/SLIT.memhp b/tests/acpi-test-data/pc/SLIT.memhp new file mode 100644 index 0000000000..74ec3b4b46 --- /dev/null +++ b/tests/acpi-test-data/pc/SLIT.memhp Binary files differdiff --git a/tests/acpi-test-data/pc/SRAT.memhp b/tests/acpi-test-data/pc/SRAT.memhp index 66ce9a8981..a7dddf7760 100644 --- a/tests/acpi-test-data/pc/SRAT.memhp +++ b/tests/acpi-test-data/pc/SRAT.memhp Binary files differdiff --git a/tests/acpi-test-data/q35/SLIT.cphp b/tests/acpi-test-data/q35/SLIT.cphp new file mode 100644 index 0000000000..74ec3b4b46 --- /dev/null +++ b/tests/acpi-test-data/q35/SLIT.cphp Binary files differdiff --git a/tests/acpi-test-data/q35/SLIT.memhp b/tests/acpi-test-data/q35/SLIT.memhp new file mode 100644 index 0000000000..74ec3b4b46 --- /dev/null +++ b/tests/acpi-test-data/q35/SLIT.memhp Binary files differdiff --git a/tests/acpi-test-data/q35/SRAT.memhp b/tests/acpi-test-data/q35/SRAT.memhp index 66ce9a8981..a7dddf7760 100644 --- a/tests/acpi-test-data/q35/SRAT.memhp +++ b/tests/acpi-test-data/q35/SRAT.memhp Binary files differdiff --git a/tests/bios-tables-test.c b/tests/bios-tables-test.c index 9c96a67053..4e5c65a022 100644 --- a/tests/bios-tables-test.c +++ b/tests/bios-tables-test.c @@ -723,7 +723,8 @@ static void test_acpi_piix4_tcg_cphp(void) data.machine = MACHINE_PC; data.variant = ".cphp"; test_acpi_one("-smp 2,cores=3,sockets=2,maxcpus=6" - " -numa node -numa node", + " -numa node -numa node" + " -numa dist,src=0,dst=1,val=21", &data); free_test_data(&data); } @@ -736,7 +737,8 @@ static void test_acpi_q35_tcg_cphp(void) data.machine = MACHINE_Q35; data.variant = ".cphp"; test_acpi_one(" -smp 2,cores=3,sockets=2,maxcpus=6" - " -numa node -numa node", + " -numa node -numa node" + " -numa dist,src=0,dst=1,val=21", &data); free_test_data(&data); } @@ -785,7 +787,10 @@ static void test_acpi_q35_tcg_memhp(void) memset(&data, 0, sizeof(data)); data.machine = MACHINE_Q35; data.variant = ".memhp"; - test_acpi_one(" -m 128,slots=3,maxmem=1G -numa node", &data); + test_acpi_one(" -m 128,slots=3,maxmem=1G" + " -numa node -numa node" + " -numa dist,src=0,dst=1,val=21", + &data); free_test_data(&data); } @@ -796,7 +801,10 @@ static void test_acpi_piix4_tcg_memhp(void) memset(&data, 0, sizeof(data)); data.machine = MACHINE_PC; data.variant = ".memhp"; - test_acpi_one(" -m 128,slots=3,maxmem=1G -numa node", &data); + test_acpi_one(" -m 128,slots=3,maxmem=1G" + " -numa node -numa node" + " -numa dist,src=0,dst=1,val=21", + &data); free_test_data(&data); } diff --git a/tests/numa-test.c b/tests/numa-test.c new file mode 100644 index 0000000000..c3475d6d5e --- /dev/null +++ b/tests/numa-test.c @@ -0,0 +1,302 @@ +/* + * NUMA configuration test cases + * + * Copyright (c) 2017 Red Hat Inc. + * Authors: + * Igor Mammedov <imammedo@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "libqtest.h" + +static char *make_cli(const char *generic_cli, const char *test_cli) +{ + return g_strdup_printf("%s %s", generic_cli ? generic_cli : "", test_cli); +} + +static char *hmp_info_numa(void) +{ + QDict *resp; + char *s; + + resp = qmp("{ 'execute': 'human-monitor-command', 'arguments': " + "{ 'command-line': 'info numa '} }"); + g_assert(resp); + g_assert(qdict_haskey(resp, "return")); + s = g_strdup(qdict_get_str(resp, "return")); + g_assert(s); + QDECREF(resp); + return s; +} + +static void test_mon_explicit(const void *data) +{ + char *s; + char *cli; + + cli = make_cli(data, "-smp 8 " + "-numa node,nodeid=0,cpus=0-3 " + "-numa node,nodeid=1,cpus=4-7 "); + qtest_start(cli); + + s = hmp_info_numa(); + g_assert(strstr(s, "node 0 cpus: 0 1 2 3")); + g_assert(strstr(s, "node 1 cpus: 4 5 6 7")); + g_free(s); + + qtest_end(); + g_free(cli); +} + +static void test_mon_default(const void *data) +{ + char *s; + char *cli; + + cli = make_cli(data, "-smp 8 -numa node -numa node"); + qtest_start(cli); + + s = hmp_info_numa(); + g_assert(strstr(s, "node 0 cpus: 0 2 4 6")); + g_assert(strstr(s, "node 1 cpus: 1 3 5 7")); + g_free(s); + + qtest_end(); + g_free(cli); +} + +static void test_mon_partial(const void *data) +{ + char *s; + char *cli; + + cli = make_cli(data, "-smp 8 " + "-numa node,nodeid=0,cpus=0-1 " + "-numa node,nodeid=1,cpus=4-5 "); + qtest_start(cli); + + s = hmp_info_numa(); + g_assert(strstr(s, "node 0 cpus: 0 1 2 3 6 7")); + g_assert(strstr(s, "node 1 cpus: 4 5")); + g_free(s); + + qtest_end(); + g_free(cli); +} + +static QList *get_cpus(QDict **resp) +{ + *resp = qmp("{ 'execute': 'query-cpus' }"); + g_assert(*resp); + g_assert(qdict_haskey(*resp, "return")); + return qdict_get_qlist(*resp, "return"); +} + +static void test_query_cpus(const void *data) +{ + char *cli; + QDict *resp; + QList *cpus; + const QObject *e; + + cli = make_cli(data, "-smp 8 -numa node,cpus=0-3 -numa node,cpus=4-7"); + qtest_start(cli); + cpus = get_cpus(&resp); + g_assert(cpus); + + while ((e = qlist_pop(cpus))) { + QDict *cpu, *props; + int64_t cpu_idx, node; + + cpu = qobject_to_qdict(e); + g_assert(qdict_haskey(cpu, "CPU")); + g_assert(qdict_haskey(cpu, "props")); + + cpu_idx = qdict_get_int(cpu, "CPU"); + props = qdict_get_qdict(cpu, "props"); + g_assert(qdict_haskey(props, "node-id")); + node = qdict_get_int(props, "node-id"); + if (cpu_idx >= 0 && cpu_idx < 4) { + g_assert_cmpint(node, ==, 0); + } else { + g_assert_cmpint(node, ==, 1); + } + } + + QDECREF(resp); + qtest_end(); + g_free(cli); +} + +static void pc_numa_cpu(const void *data) +{ + char *cli; + QDict *resp; + QList *cpus; + const QObject *e; + + cli = make_cli(data, "-cpu pentium -smp 8,sockets=2,cores=2,threads=2 " + "-numa node,nodeid=0 -numa node,nodeid=1 " + "-numa cpu,node-id=1,socket-id=0 " + "-numa cpu,node-id=0,socket-id=1,core-id=0 " + "-numa cpu,node-id=0,socket-id=1,core-id=1,thread-id=0 " + "-numa cpu,node-id=1,socket-id=1,core-id=1,thread-id=1"); + qtest_start(cli); + cpus = get_cpus(&resp); + g_assert(cpus); + + while ((e = qlist_pop(cpus))) { + QDict *cpu, *props; + int64_t socket, core, thread, node; + + cpu = qobject_to_qdict(e); + g_assert(qdict_haskey(cpu, "props")); + props = qdict_get_qdict(cpu, "props"); + + g_assert(qdict_haskey(props, "node-id")); + node = qdict_get_int(props, "node-id"); + g_assert(qdict_haskey(props, "socket-id")); + socket = qdict_get_int(props, "socket-id"); + g_assert(qdict_haskey(props, "core-id")); + core = qdict_get_int(props, "core-id"); + g_assert(qdict_haskey(props, "thread-id")); + thread = qdict_get_int(props, "thread-id"); + + if (socket == 0) { + g_assert_cmpint(node, ==, 1); + } else if (socket == 1 && core == 0) { + g_assert_cmpint(node, ==, 0); + } else if (socket == 1 && core == 1 && thread == 0) { + g_assert_cmpint(node, ==, 0); + } else if (socket == 1 && core == 1 && thread == 1) { + g_assert_cmpint(node, ==, 1); + } else { + g_assert(false); + } + } + + QDECREF(resp); + qtest_end(); + g_free(cli); +} + +static void spapr_numa_cpu(const void *data) +{ + char *cli; + QDict *resp; + QList *cpus; + const QObject *e; + + cli = make_cli(data, "-smp 4,cores=4 " + "-numa node,nodeid=0 -numa node,nodeid=1 " + "-numa cpu,node-id=0,core-id=0 " + "-numa cpu,node-id=0,core-id=1 " + "-numa cpu,node-id=0,core-id=2 " + "-numa cpu,node-id=1,core-id=3"); + qtest_start(cli); + cpus = get_cpus(&resp); + g_assert(cpus); + + while ((e = qlist_pop(cpus))) { + QDict *cpu, *props; + int64_t core, node; + + cpu = qobject_to_qdict(e); + g_assert(qdict_haskey(cpu, "props")); + props = qdict_get_qdict(cpu, "props"); + + g_assert(qdict_haskey(props, "node-id")); + node = qdict_get_int(props, "node-id"); + g_assert(qdict_haskey(props, "core-id")); + core = qdict_get_int(props, "core-id"); + + if (core >= 0 && core < 3) { + g_assert_cmpint(node, ==, 0); + } else if (core == 3) { + g_assert_cmpint(node, ==, 1); + } else { + g_assert(false); + } + } + + QDECREF(resp); + qtest_end(); + g_free(cli); +} + +static void aarch64_numa_cpu(const void *data) +{ + char *cli; + QDict *resp; + QList *cpus; + const QObject *e; + + cli = make_cli(data, "-smp 2 " + "-numa node,nodeid=0 -numa node,nodeid=1 " + "-numa cpu,node-id=1,thread-id=0 " + "-numa cpu,node-id=0,thread-id=1"); + qtest_start(cli); + cpus = get_cpus(&resp); + g_assert(cpus); + + while ((e = qlist_pop(cpus))) { + QDict *cpu, *props; + int64_t thread, node; + + cpu = qobject_to_qdict(e); + g_assert(qdict_haskey(cpu, "props")); + props = qdict_get_qdict(cpu, "props"); + + g_assert(qdict_haskey(props, "node-id")); + node = qdict_get_int(props, "node-id"); + g_assert(qdict_haskey(props, "thread-id")); + thread = qdict_get_int(props, "thread-id"); + + if (thread == 0) { + g_assert_cmpint(node, ==, 1); + } else if (thread == 1) { + g_assert_cmpint(node, ==, 0); + } else { + g_assert(false); + } + } + + QDECREF(resp); + qtest_end(); + g_free(cli); +} + +int main(int argc, char **argv) +{ + const char *args = NULL; + const char *arch = qtest_get_arch(); + + if (strcmp(arch, "aarch64") == 0) { + args = "-machine virt"; + } + + g_test_init(&argc, &argv, NULL); + + qtest_add_data_func("/numa/mon/default", args, test_mon_default); + qtest_add_data_func("/numa/mon/cpus/explicit", args, test_mon_explicit); + qtest_add_data_func("/numa/mon/cpus/partial", args, test_mon_partial); + qtest_add_data_func("/numa/qmp/cpus/query-cpus", args, test_query_cpus); + + if (!strcmp(arch, "i386") || !strcmp(arch, "x86_64")) { + qtest_add_data_func("/numa/pc/cpu/explicit", args, pc_numa_cpu); + } + + if (!strcmp(arch, "ppc64")) { + qtest_add_data_func("/numa/spapr/cpu/explicit", args, spapr_numa_cpu); + } + + if (!strcmp(arch, "aarch64")) { + qtest_add_data_func("/numa/aarch64/cpu/explicit", args, + aarch64_numa_cpu); + } + + return g_test_run(); +} diff --git a/ui/Makefile.objs b/ui/Makefile.objs index 27566b32f1..aac6ae8bef 100644 --- a/ui/Makefile.objs +++ b/ui/Makefile.objs @@ -33,6 +33,7 @@ common-obj-y += shader.o common-obj-y += console-gl.o common-obj-y += egl-helpers.o common-obj-y += egl-context.o +common-obj-y += egl-headless.o ifeq ($(CONFIG_GTK_GL),y) common-obj-$(CONFIG_GTK) += gtk-gl-area.o else diff --git a/ui/cocoa.m b/ui/cocoa.m index 207555edf7..3a9bc4da5f 100644 --- a/ui/cocoa.m +++ b/ui/cocoa.m @@ -749,8 +749,8 @@ QemuCocoaView *cocoaView; * clicks in the titlebar. */ if ([self screenContainsPoint:p]) { - qemu_input_queue_abs(dcl->con, INPUT_AXIS_X, p.x, screen.width); - qemu_input_queue_abs(dcl->con, INPUT_AXIS_Y, screen.height - p.y, screen.height); + qemu_input_queue_abs(dcl->con, INPUT_AXIS_X, p.x, 0, screen.width); + qemu_input_queue_abs(dcl->con, INPUT_AXIS_Y, screen.height - p.y, 0, screen.height); } } else { qemu_input_queue_rel(dcl->con, INPUT_AXIS_X, (int)[event deltaX]); diff --git a/ui/egl-context.c b/ui/egl-context.c index 3a02b68d1a..2161969abe 100644 --- a/ui/egl-context.c +++ b/ui/egl-context.c @@ -7,9 +7,10 @@ QEMUGLContext qemu_egl_create_context(DisplayChangeListener *dcl, { EGLContext ctx; EGLint ctx_att[] = { - EGL_CONTEXT_CLIENT_VERSION, params->major_ver, - EGL_CONTEXT_MINOR_VERSION_KHR, params->minor_ver, - EGL_NONE + EGL_CONTEXT_OPENGL_PROFILE_MASK, EGL_CONTEXT_OPENGL_CORE_PROFILE_BIT, + EGL_CONTEXT_CLIENT_VERSION, params->major_ver, + EGL_CONTEXT_MINOR_VERSION_KHR, params->minor_ver, + EGL_NONE }; ctx = eglCreateContext(qemu_egl_display, qemu_egl_config, diff --git a/ui/egl-headless.c b/ui/egl-headless.c new file mode 100644 index 0000000000..d8d800f8a6 --- /dev/null +++ b/ui/egl-headless.c @@ -0,0 +1,158 @@ +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "sysemu/sysemu.h" +#include "ui/console.h" +#include "ui/egl-helpers.h" +#include "ui/egl-context.h" + +typedef struct egl_dpy { + DisplayChangeListener dcl; + DisplaySurface *ds; + int width, height; + GLuint texture; + GLuint framebuffer; + GLuint blit_texture; + GLuint blit_framebuffer; + bool y_0_top; +} egl_dpy; + +static void egl_refresh(DisplayChangeListener *dcl) +{ + graphic_hw_update(dcl->con); +} + +static void egl_gfx_update(DisplayChangeListener *dcl, + int x, int y, int w, int h) +{ +} + +static void egl_gfx_switch(DisplayChangeListener *dcl, + struct DisplaySurface *new_surface) +{ + egl_dpy *edpy = container_of(dcl, egl_dpy, dcl); + + edpy->ds = new_surface; +} + +static void egl_scanout_disable(DisplayChangeListener *dcl) +{ + egl_dpy *edpy = container_of(dcl, egl_dpy, dcl); + + edpy->texture = 0; + /* XXX: delete framebuffers here ??? */ +} + +static void egl_scanout_texture(DisplayChangeListener *dcl, + uint32_t backing_id, + bool backing_y_0_top, + uint32_t backing_width, + uint32_t backing_height, + uint32_t x, uint32_t y, + uint32_t w, uint32_t h) +{ + egl_dpy *edpy = container_of(dcl, egl_dpy, dcl); + + edpy->texture = backing_id; + edpy->y_0_top = backing_y_0_top; + + /* source framebuffer */ + if (!edpy->framebuffer) { + glGenFramebuffers(1, &edpy->framebuffer); + } + glBindFramebuffer(GL_FRAMEBUFFER_EXT, edpy->framebuffer); + glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, + GL_TEXTURE_2D, edpy->texture, 0); + + /* dest framebuffer */ + if (!edpy->blit_framebuffer) { + glGenFramebuffers(1, &edpy->blit_framebuffer); + glGenTextures(1, &edpy->blit_texture); + edpy->width = 0; + edpy->height = 0; + } + if (edpy->width != backing_width || edpy->height != backing_height) { + edpy->width = backing_width; + edpy->height = backing_height; + glBindTexture(GL_TEXTURE_2D, edpy->blit_texture); + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, + edpy->width, edpy->height, + 0, GL_BGRA, GL_UNSIGNED_BYTE, 0); + glBindFramebuffer(GL_FRAMEBUFFER_EXT, edpy->blit_framebuffer); + glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, + GL_TEXTURE_2D, edpy->blit_texture, 0); + } +} + +static void egl_scanout_flush(DisplayChangeListener *dcl, + uint32_t x, uint32_t y, + uint32_t w, uint32_t h) +{ + egl_dpy *edpy = container_of(dcl, egl_dpy, dcl); + GLuint y1, y2; + + if (!edpy->texture || !edpy->ds) { + return; + } + assert(surface_width(edpy->ds) == edpy->width); + assert(surface_height(edpy->ds) == edpy->height); + assert(surface_format(edpy->ds) == PIXMAN_x8r8g8b8); + + /* blit framebuffer, flip if needed */ + glBindFramebuffer(GL_READ_FRAMEBUFFER, edpy->framebuffer); + glBindFramebuffer(GL_DRAW_FRAMEBUFFER, edpy->blit_framebuffer); + glViewport(0, 0, edpy->width, edpy->height); + y1 = edpy->y_0_top ? edpy->height : 0; + y2 = edpy->y_0_top ? 0 : edpy->height; + glBlitFramebuffer(0, y1, edpy->width, y2, + 0, 0, edpy->width, edpy->height, + GL_COLOR_BUFFER_BIT, GL_NEAREST); + + /* read pixels to surface */ + glBindFramebuffer(GL_READ_FRAMEBUFFER, edpy->blit_framebuffer); + glReadBuffer(GL_COLOR_ATTACHMENT0_EXT); + glReadPixels(0, 0, edpy->width, edpy->height, + GL_BGRA, GL_UNSIGNED_BYTE, surface_data(edpy->ds)); + + /* notify about updates */ + dpy_gfx_update(edpy->dcl.con, x, y, w, h); +} + +static const DisplayChangeListenerOps egl_ops = { + .dpy_name = "egl-headless", + .dpy_refresh = egl_refresh, + .dpy_gfx_update = egl_gfx_update, + .dpy_gfx_switch = egl_gfx_switch, + + .dpy_gl_ctx_create = qemu_egl_create_context, + .dpy_gl_ctx_destroy = qemu_egl_destroy_context, + .dpy_gl_ctx_make_current = qemu_egl_make_context_current, + .dpy_gl_ctx_get_current = qemu_egl_get_current_context, + + .dpy_gl_scanout_disable = egl_scanout_disable, + .dpy_gl_scanout_texture = egl_scanout_texture, + .dpy_gl_update = egl_scanout_flush, +}; + +void egl_headless_init(void) +{ + QemuConsole *con; + egl_dpy *edpy; + int idx; + + if (egl_rendernode_init(NULL) < 0) { + error_report("egl: render node init failed"); + exit(1); + } + + for (idx = 0;; idx++) { + con = qemu_console_lookup_by_index(idx); + if (!con || !qemu_console_is_graphic(con)) { + break; + } + + edpy = g_new0(egl_dpy, 1); + edpy->dcl.con = con; + edpy->dcl.ops = &egl_ops; + register_displaychangelistener(&edpy->dcl); + } +} diff --git a/ui/egl-helpers.c b/ui/egl-helpers.c index b7b6b2e3cc..4a4d3370ee 100644 --- a/ui/egl-helpers.c +++ b/ui/egl-helpers.c @@ -26,18 +26,6 @@ EGLConfig qemu_egl_config; /* ---------------------------------------------------------------------- */ -static bool egl_gles; -static int egl_debug; - -#define egl_dbg(_x ...) \ - do { \ - if (egl_debug) { \ - fprintf(stderr, "egl: " _x); \ - } \ - } while (0); - -/* ---------------------------------------------------------------------- */ - #ifdef CONFIG_OPENGL_DMABUF int qemu_egl_rn_fd; @@ -92,6 +80,7 @@ static int qemu_egl_rendernode_open(const char *rendernode) int egl_rendernode_init(const char *rendernode) { qemu_egl_rn_fd = -1; + int rc; qemu_egl_rn_fd = qemu_egl_rendernode_open(rendernode); if (qemu_egl_rn_fd == -1) { @@ -105,7 +94,11 @@ int egl_rendernode_init(const char *rendernode) goto err; } - qemu_egl_init_dpy((EGLNativeDisplayType)qemu_egl_rn_gbm_dev, false, false); + rc = qemu_egl_init_dpy_mesa((EGLNativeDisplayType)qemu_egl_rn_gbm_dev); + if (rc != 0) { + /* qemu_egl_init_dpy_mesa reports error */ + goto err; + } if (!epoxy_has_egl_extension(qemu_egl_display, "EGL_KHR_surfaceless_context")) { @@ -171,8 +164,6 @@ EGLSurface qemu_egl_init_surface_x11(EGLContext ectx, Window win) EGLSurface esurface; EGLBoolean b; - egl_dbg("eglCreateWindowSurface (x11 win id 0x%lx) ...\n", - (unsigned long) win); esurface = eglCreateWindowSurface(qemu_egl_display, qemu_egl_config, (EGLNativeWindowType)win, NULL); @@ -220,20 +211,19 @@ EGLSurface qemu_egl_init_surface_x11(EGLContext ectx, Window win) * platform extensions (EGL_KHR_platform_gbm and friends) yet it doesn't seem * like mesa will be able to advertise these (even though it can do EGL 1.5). */ -static EGLDisplay qemu_egl_get_display(void *native) +static EGLDisplay qemu_egl_get_display(EGLNativeDisplayType native, + EGLenum platform) { EGLDisplay dpy = EGL_NO_DISPLAY; -#ifdef EGL_MESA_platform_gbm /* In practise any EGL 1.5 implementation would support the EXT extension */ if (epoxy_has_egl_extension(NULL, "EGL_EXT_platform_base")) { PFNEGLGETPLATFORMDISPLAYEXTPROC getPlatformDisplayEXT = (void *) eglGetProcAddress("eglGetPlatformDisplayEXT"); - if (getPlatformDisplayEXT) { - dpy = getPlatformDisplayEXT(EGL_PLATFORM_GBM_MESA, native, NULL); + if (getPlatformDisplayEXT && platform != 0) { + dpy = getPlatformDisplayEXT(platform, native, NULL); } } -#endif if (dpy == EGL_NO_DISPLAY) { /* fallback */ @@ -242,7 +232,8 @@ static EGLDisplay qemu_egl_get_display(void *native) return dpy; } -int qemu_egl_init_dpy(EGLNativeDisplayType dpy, bool gles, bool debug) +static int qemu_egl_init_dpy(EGLNativeDisplayType dpy, + EGLenum platform) { static const EGLint conf_att_gl[] = { EGL_SURFACE_TYPE, EGL_WINDOW_BIT, @@ -253,75 +244,66 @@ int qemu_egl_init_dpy(EGLNativeDisplayType dpy, bool gles, bool debug) EGL_ALPHA_SIZE, 0, EGL_NONE, }; - static const EGLint conf_att_gles[] = { - EGL_SURFACE_TYPE, EGL_WINDOW_BIT, - EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT, - EGL_RED_SIZE, 5, - EGL_GREEN_SIZE, 5, - EGL_BLUE_SIZE, 5, - EGL_ALPHA_SIZE, 0, - EGL_NONE, - }; EGLint major, minor; EGLBoolean b; EGLint n; - if (debug) { - egl_debug = 1; - setenv("EGL_LOG_LEVEL", "debug", true); - setenv("LIBGL_DEBUG", "verbose", true); - } - - egl_dbg("qemu_egl_get_display (dpy %p) ...\n", dpy); - qemu_egl_display = qemu_egl_get_display(dpy); + qemu_egl_display = qemu_egl_get_display(dpy, platform); if (qemu_egl_display == EGL_NO_DISPLAY) { error_report("egl: eglGetDisplay failed"); return -1; } - egl_dbg("eglInitialize ...\n"); b = eglInitialize(qemu_egl_display, &major, &minor); if (b == EGL_FALSE) { error_report("egl: eglInitialize failed"); return -1; } - egl_dbg("eglBindAPI ...\n"); - b = eglBindAPI(gles ? EGL_OPENGL_ES_API : EGL_OPENGL_API); + b = eglBindAPI(EGL_OPENGL_API); if (b == EGL_FALSE) { error_report("egl: eglBindAPI failed"); return -1; } - egl_dbg("eglChooseConfig ...\n"); - b = eglChooseConfig(qemu_egl_display, - gles ? conf_att_gles : conf_att_gl, + b = eglChooseConfig(qemu_egl_display, conf_att_gl, &qemu_egl_config, 1, &n); if (b == EGL_FALSE || n != 1) { error_report("egl: eglChooseConfig failed"); return -1; } - - egl_gles = gles; return 0; } +int qemu_egl_init_dpy_x11(EGLNativeDisplayType dpy) +{ +#ifdef EGL_KHR_platform_x11 + return qemu_egl_init_dpy(dpy, EGL_PLATFORM_X11_KHR); +#else + return qemu_egl_init_dpy(dpy, 0); +#endif +} + +int qemu_egl_init_dpy_mesa(EGLNativeDisplayType dpy) +{ +#ifdef EGL_MESA_platform_gbm + return qemu_egl_init_dpy(dpy, EGL_PLATFORM_GBM_MESA); +#else + return qemu_egl_init_dpy(dpy, 0); +#endif +} + EGLContext qemu_egl_init_ctx(void) { static const EGLint ctx_att_gl[] = { + EGL_CONTEXT_OPENGL_PROFILE_MASK, EGL_CONTEXT_OPENGL_CORE_PROFILE_BIT, EGL_NONE }; - static const EGLint ctx_att_gles[] = { - EGL_CONTEXT_CLIENT_VERSION, 2, - EGL_NONE - }; - EGLContext ectx; EGLBoolean b; - egl_dbg("eglCreateContext ...\n"); ectx = eglCreateContext(qemu_egl_display, qemu_egl_config, EGL_NO_CONTEXT, - egl_gles ? ctx_att_gles : ctx_att_gl); + ctx_att_gl); if (ectx == EGL_NO_CONTEXT) { error_report("egl: eglCreateContext failed"); return NULL; diff --git a/ui/gtk-egl.c b/ui/gtk-egl.c index d53288f027..cf48cca259 100644 --- a/ui/gtk-egl.c +++ b/ui/gtk-egl.c @@ -246,7 +246,7 @@ void gtk_egl_init(void) GdkDisplay *gdk_display = gdk_display_get_default(); Display *x11_display = gdk_x11_display_get_xdisplay(gdk_display); - if (qemu_egl_init_dpy(x11_display, false, false) < 0) { + if (qemu_egl_init_dpy_x11(x11_display) < 0) { return; } diff --git a/ui/gtk.c b/ui/gtk.c index 7479ceef35..0213ad0efc 100644 --- a/ui/gtk.c +++ b/ui/gtk.c @@ -912,9 +912,9 @@ static gboolean gd_motion_event(GtkWidget *widget, GdkEventMotion *motion, return TRUE; } qemu_input_queue_abs(vc->gfx.dcl.con, INPUT_AXIS_X, x, - surface_width(vc->gfx.ds)); + 0, surface_width(vc->gfx.ds)); qemu_input_queue_abs(vc->gfx.dcl.con, INPUT_AXIS_Y, y, - surface_height(vc->gfx.ds)); + 0, surface_height(vc->gfx.ds)); qemu_input_event_sync(); } else if (s->last_set && s->ptr_owner == vc) { qemu_input_queue_rel(vc->gfx.dcl.con, INPUT_AXIS_X, x - s->last_x); diff --git a/ui/input-linux.c b/ui/input-linux.c index dc0613ca1f..49d52a69cc 100644 --- a/ui/input-linux.c +++ b/ui/input-linux.c @@ -169,6 +169,10 @@ struct InputLinux { bool has_abs_x; int num_keys; int num_btns; + int abs_x_min; + int abs_x_max; + int abs_y_min; + int abs_y_max; struct input_event event; int read_offset; @@ -314,6 +318,18 @@ static void input_linux_handle_mouse(InputLinux *il, struct input_event *event) break; } break; + case EV_ABS: + switch (event->code) { + case ABS_X: + qemu_input_queue_abs(NULL, INPUT_AXIS_X, event->value, + il->abs_x_min, il->abs_x_max); + break; + case ABS_Y: + qemu_input_queue_abs(NULL, INPUT_AXIS_Y, event->value, + il->abs_y_min, il->abs_y_max); + break; + } + break; case EV_SYN: qemu_input_event_sync(); if (il->wheel != 0) { @@ -351,7 +367,7 @@ static void input_linux_event(void *opaque) if (il->num_keys) { input_linux_handle_keyboard(il, &il->event); } - if (il->has_rel_x && il->num_btns) { + if ((il->has_rel_x || il->has_abs_x) && il->num_btns) { input_linux_handle_mouse(il, &il->event); } } @@ -364,6 +380,7 @@ static void input_linux_complete(UserCreatable *uc, Error **errp) uint8_t keymap[KEY_CNT / 8], keystate[KEY_CNT / 8]; unsigned int i; int rc, ver; + struct input_absinfo absinfo; if (!il->evdev) { error_setg(errp, "no input device specified"); @@ -402,6 +419,12 @@ static void input_linux_complete(UserCreatable *uc, Error **errp) rc = ioctl(il->fd, EVIOCGBIT(EV_ABS, sizeof(absmap)), &absmap); if (absmap & (1 << ABS_X)) { il->has_abs_x = true; + rc = ioctl(il->fd, EVIOCGABS(ABS_X), &absinfo); + il->abs_x_min = absinfo.minimum; + il->abs_x_max = absinfo.maximum; + rc = ioctl(il->fd, EVIOCGABS(ABS_Y), &absinfo); + il->abs_y_min = absinfo.minimum; + il->abs_y_max = absinfo.maximum; } } diff --git a/ui/input.c b/ui/input.c index 830f912f99..290ca9f54d 100644 --- a/ui/input.c +++ b/ui/input.c @@ -166,6 +166,11 @@ void qmp_input_send_event(bool has_device, const char *device, qemu_input_event_sync(); } +static int qemu_input_transform_invert_abs_value(int value) +{ + return (int64_t)INPUT_EVENT_ABS_MAX - value + INPUT_EVENT_ABS_MIN; +} + static void qemu_input_transform_abs_rotate(InputEvent *evt) { InputMoveEvent *move = evt->u.abs.data; @@ -175,16 +180,16 @@ static void qemu_input_transform_abs_rotate(InputEvent *evt) move->axis = INPUT_AXIS_Y; } else if (move->axis == INPUT_AXIS_Y) { move->axis = INPUT_AXIS_X; - move->value = INPUT_EVENT_ABS_SIZE - 1 - move->value; + move->value = qemu_input_transform_invert_abs_value(move->value); } break; case 180: - move->value = INPUT_EVENT_ABS_SIZE - 1 - move->value; + move->value = qemu_input_transform_invert_abs_value(move->value); break; case 270: if (move->axis == INPUT_AXIS_X) { move->axis = INPUT_AXIS_Y; - move->value = INPUT_EVENT_ABS_SIZE - 1 - move->value; + move->value = qemu_input_transform_invert_abs_value(move->value); } else if (move->axis == INPUT_AXIS_Y) { move->axis = INPUT_AXIS_X; } @@ -467,12 +472,17 @@ bool qemu_input_is_absolute(void) return (s != NULL) && (s->handler->mask & INPUT_EVENT_MASK_ABS); } -int qemu_input_scale_axis(int value, int size_in, int size_out) +int qemu_input_scale_axis(int value, + int min_in, int max_in, + int min_out, int max_out) { - if (size_in < 2) { - return size_out / 2; + int64_t range_in = (int64_t)max_in - min_in; + int64_t range_out = (int64_t)max_out - min_out; + + if (range_in < 1) { + return min_out + range_out / 2; } - return (int64_t)value * (size_out - 1) / (size_in - 1); + return ((int64_t)value - min_in) * range_out / range_in + min_out; } InputEvent *qemu_input_event_new_move(InputEventKind kind, @@ -496,10 +506,13 @@ void qemu_input_queue_rel(QemuConsole *src, InputAxis axis, int value) qapi_free_InputEvent(evt); } -void qemu_input_queue_abs(QemuConsole *src, InputAxis axis, int value, int size) +void qemu_input_queue_abs(QemuConsole *src, InputAxis axis, int value, + int min_in, int max_in) { InputEvent *evt; - int scaled = qemu_input_scale_axis(value, size, INPUT_EVENT_ABS_SIZE); + int scaled = qemu_input_scale_axis(value, min_in, max_in, + INPUT_EVENT_ABS_MIN, + INPUT_EVENT_ABS_MAX); evt = qemu_input_event_new_move(INPUT_EVENT_KIND_ABS, axis, scaled); qemu_input_event_send(src, evt); qapi_free_InputEvent(evt); diff --git a/ui/sdl.c b/ui/sdl.c index 37c21a00fb..b35a67855f 100644 --- a/ui/sdl.c +++ b/ui/sdl.c @@ -490,9 +490,9 @@ static void sdl_send_mouse_event(int dx, int dy, int x, int y, int state) if (qemu_input_is_absolute()) { qemu_input_queue_abs(dcl->con, INPUT_AXIS_X, x, - real_screen->w); + 0, real_screen->w); qemu_input_queue_abs(dcl->con, INPUT_AXIS_Y, y, - real_screen->h); + 0, real_screen->h); } else { if (guest_cursor) { x -= guest_x; diff --git a/ui/sdl2.c b/ui/sdl2.c index faf9bdff5c..21de05200e 100644 --- a/ui/sdl2.c +++ b/ui/sdl2.c @@ -298,8 +298,8 @@ static void sdl_send_mouse_event(struct sdl2_console *scon, int dx, int dy, } } } - qemu_input_queue_abs(scon->dcl.con, INPUT_AXIS_X, off_x + x, max_w); - qemu_input_queue_abs(scon->dcl.con, INPUT_AXIS_Y, off_y + y, max_h); + qemu_input_queue_abs(scon->dcl.con, INPUT_AXIS_X, off_x + x, 0, max_w); + qemu_input_queue_abs(scon->dcl.con, INPUT_AXIS_Y, off_y + y, 0, max_h); } else { if (guest_cursor) { x -= guest_x; diff --git a/ui/spice-input.c b/ui/spice-input.c index 8eeebdbb2e..86293dd2ce 100644 --- a/ui/spice-input.c +++ b/ui/spice-input.c @@ -172,8 +172,8 @@ static void tablet_position(SpiceTabletInstance* sin, int x, int y, QemuSpicePointer *pointer = container_of(sin, QemuSpicePointer, tablet); spice_update_buttons(pointer, 0, buttons_state); - qemu_input_queue_abs(NULL, INPUT_AXIS_X, x, pointer->width); - qemu_input_queue_abs(NULL, INPUT_AXIS_Y, y, pointer->height); + qemu_input_queue_abs(NULL, INPUT_AXIS_X, x, 0, pointer->width); + qemu_input_queue_abs(NULL, INPUT_AXIS_Y, y, 0, pointer->height); qemu_input_event_sync(); } diff --git a/ui/vnc-enc-zrle.c b/ui/vnc-enc-zrle.c index 5489870e70..fd63d4f688 100644 --- a/ui/vnc-enc-zrle.c +++ b/ui/vnc-enc-zrle.c @@ -163,7 +163,6 @@ static void zrle_choose_palette_rle(VncState *vs, int w, int h, if (packed_bytes < estimated_bytes) { *use_rle = false; *use_palette = true; - estimated_bytes = packed_bytes; } } } diff --git a/ui/vnc.c b/ui/vnc.c index 9c4edcdbf5..47b49c7318 100644 --- a/ui/vnc.c +++ b/ui/vnc.c @@ -1556,8 +1556,8 @@ static void pointer_event(VncState *vs, int button_mask, int x, int y) } if (vs->absolute) { - qemu_input_queue_abs(con, INPUT_AXIS_X, x, width); - qemu_input_queue_abs(con, INPUT_AXIS_Y, y, height); + qemu_input_queue_abs(con, INPUT_AXIS_X, x, 0, width); + qemu_input_queue_abs(con, INPUT_AXIS_Y, y, 0, height); } else if (vnc_has_feature(vs, VNC_FEATURE_POINTER_TYPE_CHANGE)) { qemu_input_queue_rel(con, INPUT_AXIS_X, x - 0x7FFF); qemu_input_queue_rel(con, INPUT_AXIS_Y, y - 0x7FFF); @@ -2061,15 +2061,15 @@ static void set_pixel_format(VncState *vs, int bits_per_pixel, } vs->client_pf.rmax = red_max ? red_max : 0xFF; - vs->client_pf.rbits = hweight_long(red_max); + vs->client_pf.rbits = ctpopl(red_max); vs->client_pf.rshift = red_shift; vs->client_pf.rmask = red_max << red_shift; vs->client_pf.gmax = green_max ? green_max : 0xFF; - vs->client_pf.gbits = hweight_long(green_max); + vs->client_pf.gbits = ctpopl(green_max); vs->client_pf.gshift = green_shift; vs->client_pf.gmask = green_max << green_shift; vs->client_pf.bmax = blue_max ? blue_max : 0xFF; - vs->client_pf.bbits = hweight_long(blue_max); + vs->client_pf.bbits = ctpopl(blue_max); vs->client_pf.bshift = blue_shift; vs->client_pf.bmask = blue_max << blue_shift; vs->client_pf.bits_per_pixel = bits_per_pixel; diff --git a/vl.c b/vl.c index ce2b392d8b..5c9b40eb1c 100644 --- a/vl.c +++ b/vl.c @@ -2050,6 +2050,7 @@ typedef enum DisplayType { DT_SDL, DT_COCOA, DT_GTK, + DT_EGL, DT_NONE, } DisplayType; @@ -2127,6 +2128,15 @@ static DisplayType select_display(const char *p) error_report("VNC requires a display argument vnc=<display>"); exit(1); } + } else if (strstart(p, "egl-headless", &opts)) { +#ifdef CONFIG_OPENGL + request_opengl = 1; + display_opengl = 1; + display = DT_EGL; +#else + fprintf(stderr, "egl support is disabled\n"); + exit(1); +#endif } else if (strstart(p, "curses", &opts)) { #ifdef CONFIG_CURSES display = DT_CURSES; @@ -4504,7 +4514,7 @@ int main(int argc, char **argv, char **envp) default_drive(default_floppy, snapshot, IF_FLOPPY, 0, FD_OPTS); default_drive(default_sdcard, snapshot, IF_SD, 0, SD_OPTS); - parse_numa_opts(machine_class); + parse_numa_opts(current_machine); if (qemu_opts_foreach(qemu_find_opts("mon"), mon_init_func, NULL, NULL)) { @@ -4560,7 +4570,7 @@ int main(int argc, char **argv, char **envp) current_machine->boot_order = boot_order; current_machine->cpu_model = cpu_model; - machine_class->init(current_machine); + machine_run_board_init(current_machine); realtime_init(); @@ -4593,8 +4603,6 @@ int main(int argc, char **argv, char **envp) cpu_synchronize_all_post_init(); - numa_post_machine_init(); - rom_reset_order_override(); /* @@ -4660,6 +4668,12 @@ int main(int argc, char **argv, char **envp) qemu_spice_display_init(); } +#ifdef CONFIG_OPENGL + if (display_type == DT_EGL) { + egl_headless_init(); + } +#endif + if (foreach_device_config(DEV_GDB, gdbserver_start) < 0) { exit(1); } |