diff options
Diffstat (limited to 'hw')
43 files changed, 2922 insertions, 237 deletions
diff --git a/hw/acpi/acpi_generic_initiator.c b/hw/acpi/acpi_generic_initiator.c new file mode 100644 index 0000000000..17b9a052f5 --- /dev/null +++ b/hw/acpi/acpi_generic_initiator.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved + */ + +#include "qemu/osdep.h" +#include "hw/acpi/acpi_generic_initiator.h" +#include "hw/acpi/aml-build.h" +#include "hw/boards.h" +#include "hw/pci/pci_device.h" +#include "qemu/error-report.h" + +typedef struct AcpiGenericInitiatorClass { + ObjectClass parent_class; +} AcpiGenericInitiatorClass; + +OBJECT_DEFINE_TYPE_WITH_INTERFACES(AcpiGenericInitiator, acpi_generic_initiator, + ACPI_GENERIC_INITIATOR, OBJECT, + { TYPE_USER_CREATABLE }, + { NULL }) + +OBJECT_DECLARE_SIMPLE_TYPE(AcpiGenericInitiator, ACPI_GENERIC_INITIATOR) + +static void acpi_generic_initiator_init(Object *obj) +{ + AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj); + + gi->node = MAX_NODES; + gi->pci_dev = NULL; +} + +static void acpi_generic_initiator_finalize(Object *obj) +{ + AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj); + + g_free(gi->pci_dev); +} + +static void acpi_generic_initiator_set_pci_device(Object *obj, const char *val, + Error **errp) +{ + AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj); + + gi->pci_dev = g_strdup(val); +} + +static void acpi_generic_initiator_set_node(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj); + MachineState *ms = MACHINE(qdev_get_machine()); + uint32_t value; + + if (!visit_type_uint32(v, name, &value, errp)) { + return; + } + + if (value >= MAX_NODES) { + error_printf("%s: Invalid NUMA node specified\n", + TYPE_ACPI_GENERIC_INITIATOR); + exit(1); + } + + gi->node = value; + ms->numa_state->nodes[gi->node].has_gi = true; +} + +static void acpi_generic_initiator_class_init(ObjectClass *oc, void *data) +{ + object_class_property_add_str(oc, "pci-dev", NULL, + acpi_generic_initiator_set_pci_device); + object_class_property_add(oc, "node", "int", NULL, + acpi_generic_initiator_set_node, NULL, NULL); +} + +/* + * ACPI 6.3: + * Table 5-78 Generic Initiator Affinity Structure + */ +static void +build_srat_generic_pci_initiator_affinity(GArray *table_data, int node, + PCIDeviceHandle *handle) +{ + uint8_t index; + + build_append_int_noprefix(table_data, 5, 1); /* Type */ + build_append_int_noprefix(table_data, 32, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 1); /* Reserved */ + build_append_int_noprefix(table_data, 1, 1); /* Device Handle Type: PCI */ + build_append_int_noprefix(table_data, node, 4); /* Proximity Domain */ + + /* Device Handle - PCI */ + build_append_int_noprefix(table_data, handle->segment, 2); + build_append_int_noprefix(table_data, handle->bdf, 2); + for (index = 0; index < 12; index++) { + build_append_int_noprefix(table_data, 0, 1); + } + + build_append_int_noprefix(table_data, GEN_AFFINITY_ENABLED, 4); /* Flags */ + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ +} + +static int build_all_acpi_generic_initiators(Object *obj, void *opaque) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + AcpiGenericInitiator *gi; + GArray *table_data = opaque; + PCIDeviceHandle dev_handle; + PCIDevice *pci_dev; + Object *o; + + if (!object_dynamic_cast(obj, TYPE_ACPI_GENERIC_INITIATOR)) { + return 0; + } + + gi = ACPI_GENERIC_INITIATOR(obj); + if (gi->node >= ms->numa_state->num_nodes) { + error_printf("%s: Specified node %d is invalid.\n", + TYPE_ACPI_GENERIC_INITIATOR, gi->node); + exit(1); + } + + o = object_resolve_path_type(gi->pci_dev, TYPE_PCI_DEVICE, NULL); + if (!o) { + error_printf("%s: Specified device must be a PCI device.\n", + TYPE_ACPI_GENERIC_INITIATOR); + exit(1); + } + + pci_dev = PCI_DEVICE(o); + + dev_handle.segment = 0; + dev_handle.bdf = PCI_BUILD_BDF(pci_bus_num(pci_get_bus(pci_dev)), + pci_dev->devfn); + + build_srat_generic_pci_initiator_affinity(table_data, + gi->node, &dev_handle); + + return 0; +} + +void build_srat_generic_pci_initiator(GArray *table_data) +{ + object_child_foreach_recursive(object_get_root(), + build_all_acpi_generic_initiators, + table_data); +} diff --git a/hw/acpi/hmat.c b/hw/acpi/hmat.c index 3042d223c8..9b1662b6b8 100644 --- a/hw/acpi/hmat.c +++ b/hw/acpi/hmat.c @@ -78,6 +78,7 @@ static void build_hmat_lb(GArray *table_data, HMAT_LB_Info *hmat_lb, uint32_t *initiator_list) { int i, index; + uint32_t initiator_to_index[MAX_NODES] = {}; HMAT_LB_Data *lb_data; uint16_t *entry_list; uint32_t base; @@ -121,6 +122,8 @@ static void build_hmat_lb(GArray *table_data, HMAT_LB_Info *hmat_lb, /* Initiator Proximity Domain List */ for (i = 0; i < num_initiator; i++) { build_append_int_noprefix(table_data, initiator_list[i], 4); + /* Reverse mapping for array possitions */ + initiator_to_index[initiator_list[i]] = i; } /* Target Proximity Domain List */ @@ -132,7 +135,8 @@ static void build_hmat_lb(GArray *table_data, HMAT_LB_Info *hmat_lb, entry_list = g_new0(uint16_t, num_initiator * num_target); for (i = 0; i < hmat_lb->list->len; i++) { lb_data = &g_array_index(hmat_lb->list, HMAT_LB_Data, i); - index = lb_data->initiator * num_target + lb_data->target; + index = initiator_to_index[lb_data->initiator] * num_target + + lb_data->target; entry_list[index] = (uint16_t)(lb_data->data / hmat_lb->base); } @@ -204,6 +208,13 @@ static void hmat_build_table_structs(GArray *table_data, NumaState *numa_state) build_append_int_noprefix(table_data, 0, 4); /* Reserved */ for (i = 0; i < numa_state->num_nodes; i++) { + /* + * Linux rejects whole HMAT table if a node with no memory + * has one of these structures listing it as a target. + */ + if (!numa_state->nodes[i].node_mem) { + continue; + } flags = 0; if (numa_state->nodes[i].initiator < MAX_NODES) { @@ -214,7 +225,7 @@ static void hmat_build_table_structs(GArray *table_data, NumaState *numa_state) } for (i = 0; i < numa_state->num_nodes; i++) { - if (numa_state->nodes[i].has_cpu) { + if (numa_state->nodes[i].has_cpu || numa_state->nodes[i].has_gi) { initiator_list[num_initiator++] = i; } } diff --git a/hw/acpi/meson.build b/hw/acpi/meson.build index 5441c9b1e4..fa5c07db90 100644 --- a/hw/acpi/meson.build +++ b/hw/acpi/meson.build @@ -1,5 +1,6 @@ acpi_ss = ss.source_set() acpi_ss.add(files( + 'acpi_generic_initiator.c', 'acpi_interface.c', 'aml-build.c', 'bios-linker-loader.c', diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c index 6a1bde61ce..c3ccfef026 100644 --- a/hw/arm/virt-acpi-build.c +++ b/hw/arm/virt-acpi-build.c @@ -57,6 +57,7 @@ #include "migration/vmstate.h" #include "hw/acpi/ghes.h" #include "hw/acpi/viot.h" +#include "hw/acpi/acpi_generic_initiator.h" #include "hw/virtio/virtio-acpi.h" #include "target/arm/multiprocessing.h" @@ -504,6 +505,8 @@ build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) } } + build_srat_generic_pci_initiator(table_data); + if (ms->nvdimms_state->is_enabled) { nvdimm_build_srat(table_data); } diff --git a/hw/arm/virt.c b/hw/arm/virt.c index 0af1943697..e5cd935232 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -85,11 +85,28 @@ #include "hw/char/pl011.h" #include "qemu/guest-random.h" +static GlobalProperty arm_virt_compat[] = { + { TYPE_VIRTIO_IOMMU_PCI, "aw-bits", "48" }, +}; +static const size_t arm_virt_compat_len = G_N_ELEMENTS(arm_virt_compat); + +/* + * This cannot be called from the virt_machine_class_init() because + * TYPE_VIRT_MACHINE is abstract and mc->compat_props g_ptr_array_new() + * only is called on virt non abstract class init. + */ +static void arm_virt_compat_set(MachineClass *mc) +{ + compat_props_add(mc->compat_props, arm_virt_compat, + arm_virt_compat_len); +} + #define DEFINE_VIRT_MACHINE_LATEST(major, minor, latest) \ static void virt_##major##_##minor##_class_init(ObjectClass *oc, \ void *data) \ { \ MachineClass *mc = MACHINE_CLASS(oc); \ + arm_virt_compat_set(mc); \ virt_machine_##major##_##minor##_options(mc); \ mc->desc = "QEMU " # major "." # minor " ARM Virtual Machine"; \ if (latest) { \ diff --git a/hw/audio/virtio-snd.c b/hw/audio/virtio-snd.c index ea2aeaef14..e604d8f30c 100644 --- a/hw/audio/virtio-snd.c +++ b/hw/audio/virtio-snd.c @@ -243,12 +243,13 @@ static void virtio_snd_handle_pcm_info(VirtIOSound *s, memset(&pcm_info[i].padding, 0, 5); } + cmd->payload_size = sizeof(virtio_snd_pcm_info) * count; cmd->resp.code = cpu_to_le32(VIRTIO_SND_S_OK); iov_from_buf(cmd->elem->in_sg, cmd->elem->in_num, sizeof(virtio_snd_hdr), pcm_info, - sizeof(virtio_snd_pcm_info) * count); + cmd->payload_size); } /* @@ -749,7 +750,8 @@ process_cmd(VirtIOSound *s, virtio_snd_ctrl_command *cmd) 0, &cmd->resp, sizeof(virtio_snd_hdr)); - virtqueue_push(cmd->vq, cmd->elem, sizeof(virtio_snd_hdr)); + virtqueue_push(cmd->vq, cmd->elem, + sizeof(virtio_snd_hdr) + cmd->payload_size); virtio_notify(VIRTIO_DEVICE(s), cmd->vq); } @@ -808,6 +810,7 @@ static void virtio_snd_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) cmd->elem = elem; cmd->vq = vq; cmd->resp.code = cpu_to_le32(VIRTIO_SND_S_OK); + /* implicit cmd->payload_size = 0; */ QTAILQ_INSERT_TAIL(&s->cmdq, cmd, next); elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); } diff --git a/hw/core/machine.c b/hw/core/machine.c index f64dc5c432..37ede0e7d4 100644 --- a/hw/core/machine.c +++ b/hw/core/machine.c @@ -30,10 +30,13 @@ #include "exec/confidential-guest-support.h" #include "hw/virtio/virtio-pci.h" #include "hw/virtio/virtio-net.h" +#include "hw/virtio/virtio-iommu.h" #include "audio/audio.h" GlobalProperty hw_compat_8_2[] = { { "migration", "zero-page-detection", "legacy"}, + { TYPE_VIRTIO_IOMMU_PCI, "granule", "4k" }, + { TYPE_VIRTIO_IOMMU_PCI, "aw-bits", "64" }, }; const size_t hw_compat_8_2_len = G_N_ELEMENTS(hw_compat_8_2); @@ -102,6 +105,7 @@ GlobalProperty hw_compat_5_2[] = { { "PIIX4_PM", "smm-compat", "on"}, { "virtio-blk-device", "report-discard-granularity", "off" }, { "virtio-net-pci-base", "vectors", "3"}, + { "nvme", "msix-exclusive-bar", "on"}, }; const size_t hw_compat_5_2_len = G_N_ELEMENTS(hw_compat_5_2); diff --git a/hw/core/numa.c b/hw/core/numa.c index 81d2124349..f8ce332cfe 100644 --- a/hw/core/numa.c +++ b/hw/core/numa.c @@ -227,7 +227,8 @@ void parse_numa_hmat_lb(NumaState *numa_state, NumaHmatLBOptions *node, node->target, numa_state->num_nodes); return; } - if (!numa_info[node->initiator].has_cpu) { + if (!numa_info[node->initiator].has_cpu && + !numa_info[node->initiator].has_gi) { error_setg(errp, "Invalid initiator=%d, it isn't an " "initiator proximity domain", node->initiator); return; diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c index f52073b7c8..d79d6f4b53 100644 --- a/hw/core/qdev-properties-system.c +++ b/hw/core/qdev-properties-system.c @@ -966,7 +966,7 @@ const PropertyInfo qdev_prop_off_auto_pcibar = { .set_default_value = qdev_propinfo_set_default_value_enum, }; -/* --- PCIELinkSpeed 2_5/5/8/16 -- */ +/* --- PCIELinkSpeed 2_5/5/8/16/32/64 -- */ static void get_prop_pcielinkspeed(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) @@ -988,6 +988,12 @@ static void get_prop_pcielinkspeed(Object *obj, Visitor *v, const char *name, case QEMU_PCI_EXP_LNK_16GT: speed = PCIE_LINK_SPEED_16; break; + case QEMU_PCI_EXP_LNK_32GT: + speed = PCIE_LINK_SPEED_32; + break; + case QEMU_PCI_EXP_LNK_64GT: + speed = PCIE_LINK_SPEED_64; + break; default: /* Unreachable */ abort(); @@ -1021,6 +1027,12 @@ static void set_prop_pcielinkspeed(Object *obj, Visitor *v, const char *name, case PCIE_LINK_SPEED_16: *p = QEMU_PCI_EXP_LNK_16GT; break; + case PCIE_LINK_SPEED_32: + *p = QEMU_PCI_EXP_LNK_32GT; + break; + case PCIE_LINK_SPEED_64: + *p = QEMU_PCI_EXP_LNK_64GT; + break; default: /* Unreachable */ abort(); @@ -1029,7 +1041,7 @@ static void set_prop_pcielinkspeed(Object *obj, Visitor *v, const char *name, const PropertyInfo qdev_prop_pcie_link_speed = { .name = "PCIELinkSpeed", - .description = "2_5/5/8/16", + .description = "2_5/5/8/16/32/64", .enum_table = &PCIELinkSpeed_lookup, .get = get_prop_pcielinkspeed, .set = set_prop_pcielinkspeed, diff --git a/hw/cxl/cxl-component-utils.c b/hw/cxl/cxl-component-utils.c index 84ab503325..cd116c0401 100644 --- a/hw/cxl/cxl-component-utils.c +++ b/hw/cxl/cxl-component-utils.c @@ -297,6 +297,7 @@ void cxl_component_register_init_common(uint32_t *reg_state, caps = 3; break; case CXL2_ROOT_PORT: + case CXL2_RC: /* + Extended Security, + Snoop */ caps = 5; break; @@ -326,8 +327,19 @@ void cxl_component_register_init_common(uint32_t *reg_state, CXL_##reg##_REGISTERS_OFFSET); \ } while (0) + switch (type) { + case CXL2_DEVICE: + case CXL2_TYPE3_DEVICE: + case CXL2_LOGICAL_DEVICE: + case CXL2_ROOT_PORT: + case CXL2_UPSTREAM_PORT: + case CXL2_DOWNSTREAM_PORT: init_cap_reg(RAS, 2, CXL_RAS_CAPABILITY_VERSION); - ras_init_common(reg_state, write_msk); + ras_init_common(reg_state, write_msk); + break; + default: + break; + } init_cap_reg(LINK, 4, CXL_LINK_CAPABILITY_VERSION); @@ -335,9 +347,10 @@ void cxl_component_register_init_common(uint32_t *reg_state, return; } - init_cap_reg(HDM, 5, CXL_HDM_CAPABILITY_VERSION); - hdm_init_common(reg_state, write_msk, type); - + if (type != CXL2_ROOT_PORT) { + init_cap_reg(HDM, 5, CXL_HDM_CAPABILITY_VERSION); + hdm_init_common(reg_state, write_msk, type); + } if (caps < 5) { return; } diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c index 15242b9096..53f804ac16 100644 --- a/hw/i386/acpi-build.c +++ b/hw/i386/acpi-build.c @@ -68,6 +68,7 @@ #include "hw/acpi/utils.h" #include "hw/acpi/pci.h" #include "hw/acpi/cxl.h" +#include "hw/acpi/acpi_generic_initiator.h" #include "qom/qom-qobject.h" #include "hw/i386/amd_iommu.h" @@ -2046,6 +2047,8 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) build_srat_memory(table_data, 0, 0, 0, MEM_AFFINITY_NOFLAGS); } + build_srat_generic_pci_initiator(table_data); + /* * Entry is required for Windows to enable memory hotplug in OS * and for Linux to enable SWIOTLB when booted with less than diff --git a/hw/i386/pc.c b/hw/i386/pc.c index f5ff970acf..feb7a93083 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -425,9 +425,10 @@ static void set_boot_dev(PCMachineState *pcms, MC146818RtcState *s, static void pc_boot_set(void *opaque, const char *boot_device, Error **errp) { - PCMachineState *pcms = PC_MACHINE(current_machine); + PCMachineState *pcms = opaque; + X86MachineState *x86ms = X86_MACHINE(pcms); - set_boot_dev(pcms, opaque, boot_device, errp); + set_boot_dev(pcms, MC146818_RTC(x86ms->rtc), boot_device, errp); } static void pc_cmos_init_floppy(MC146818RtcState *rtc_state, ISADevice *floppy) @@ -569,14 +570,6 @@ static void pc_cmos_init_late(PCMachineState *pcms) mc146818rtc_set_cmos_data(s, 0x39, val); pc_cmos_init_floppy(s, pc_find_fdc0()); -} - -void pc_cmos_init(PCMachineState *pcms, - ISADevice *rtc) -{ - int val; - X86MachineState *x86ms = X86_MACHINE(pcms); - MC146818RtcState *s = MC146818_RTC(rtc); /* various important CMOS locations needed by PC/Bochs bios */ @@ -613,22 +606,10 @@ void pc_cmos_init(PCMachineState *pcms, mc146818rtc_set_cmos_data(s, 0x5c, val >> 8); mc146818rtc_set_cmos_data(s, 0x5d, val >> 16); - object_property_add_link(OBJECT(pcms), "rtc_state", - TYPE_ISA_DEVICE, - (Object **)&x86ms->rtc, - object_property_allow_set_link, - OBJ_PROP_LINK_STRONG); - object_property_set_link(OBJECT(pcms), "rtc_state", OBJECT(s), - &error_abort); - - set_boot_dev(pcms, s, MACHINE(pcms)->boot_config.order, &error_fatal); - val = 0; val |= 0x02; /* FPU is there */ val |= 0x04; /* PS/2 mouse installed */ mc146818rtc_set_cmos_data(s, REG_EQUIPMENT_BYTE, val); - - /* hard drives and FDC are handled by pc_cmos_init_late() */ } static void handle_a20_line_change(void *opaque, int irq, int level) @@ -718,7 +699,8 @@ void xen_load_linux(PCMachineState *pcms) assert(MACHINE(pcms)->kernel_filename != NULL); - fw_cfg = fw_cfg_init_io(FW_CFG_IO_BASE); + fw_cfg = fw_cfg_init_io_dma(FW_CFG_IO_BASE, FW_CFG_IO_BASE + 4, + &address_space_memory); fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, x86ms->boot_cpus); rom_set_fw(fw_cfg); @@ -1260,7 +1242,9 @@ void pc_basic_device_init(struct PCMachineState *pcms, } #endif - qemu_register_boot_set(pc_boot_set, rtc_state); + qemu_register_boot_set(pc_boot_set, pcms); + set_boot_dev(pcms, MC146818_RTC(rtc_state), + MACHINE(pcms)->boot_config.order, &error_fatal); if (!xen_enabled() && (x86ms->pit == ON_OFF_AUTO_AUTO || x86ms->pit == ON_OFF_AUTO_ON)) { @@ -1750,6 +1734,7 @@ static void pc_machine_initfn(Object *obj) pcms->fd_bootchk = true; pcms->default_bus_bypass_iommu = false; + pc_system_flash_create(pcms); pcms->pcspk = isa_new(TYPE_PC_SPEAKER); object_property_add_alias(OBJECT(pcms), "pcspk-audiodev", OBJECT(pcms->pcspk), "audiodev"); diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c index 319bc4b180..c9a6c0aa68 100644 --- a/hw/i386/pc_piix.c +++ b/hw/i386/pc_piix.c @@ -228,6 +228,7 @@ static void pc_init1(MachineState *machine, const char *pci_type) assert(machine->ram_size == x86ms->below_4g_mem_size + x86ms->above_4g_mem_size); + pc_system_flash_cleanup_unused(pcms); if (machine->kernel_filename != NULL) { /* For xen HVM direct kernel boot, load linux here */ xen_load_linux(pcms); @@ -343,8 +344,6 @@ static void pc_init1(MachineState *machine, const char *pci_type) } #endif - pc_cmos_init(pcms, x86ms->rtc); - if (piix4_pm) { smi_irq = qemu_allocate_irq(pc_acpi_smi_interrupt, first_cpu, 0); diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c index 45a4102e75..8a427c4647 100644 --- a/hw/i386/pc_q35.c +++ b/hw/i386/pc_q35.c @@ -45,6 +45,7 @@ #include "hw/i386/pc.h" #include "hw/i386/amd_iommu.h" #include "hw/i386/intel_iommu.h" +#include "hw/virtio/virtio-iommu.h" #include "hw/display/ramfb.h" #include "hw/ide/pci.h" #include "hw/ide/ahci-pci.h" @@ -63,6 +64,12 @@ /* ICH9 AHCI has 6 ports */ #define MAX_SATA_PORTS 6 +static GlobalProperty pc_q35_compat_defaults[] = { + { TYPE_VIRTIO_IOMMU_PCI, "aw-bits", "39" }, +}; +static const size_t pc_q35_compat_defaults_len = + G_N_ELEMENTS(pc_q35_compat_defaults); + struct ehci_companions { const char *name; int func; @@ -311,8 +318,6 @@ static void pc_q35_init(MachineState *machine) smbus_eeprom_init(pcms->smbus, 8, NULL, 0); } - pc_cmos_init(pcms, x86ms->rtc); - /* the rest devices to which pci devfn is automatically assigned */ pc_vga_init(isa_bus, pcms->pcibus); pc_nic_init(pcmc, isa_bus, pcms->pcibus); @@ -350,12 +355,14 @@ static void pc_q35_machine_options(MachineClass *m) m->default_nic = "e1000e"; m->default_kernel_irqchip_split = false; m->no_floppy = 1; - m->max_cpus = 1024; + m->max_cpus = 4096; m->no_parallel = !module_object_class_by_name(TYPE_ISA_PARALLEL); machine_class_allow_dynamic_sysbus_dev(m, TYPE_AMD_IOMMU_DEVICE); machine_class_allow_dynamic_sysbus_dev(m, TYPE_INTEL_IOMMU_DEVICE); machine_class_allow_dynamic_sysbus_dev(m, TYPE_RAMFB_DEVICE); machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE); + compat_props_add(m->compat_props, + pc_q35_compat_defaults, pc_q35_compat_defaults_len); } static void pc_q35_9_0_machine_options(MachineClass *m) @@ -371,6 +378,7 @@ static void pc_q35_8_2_machine_options(MachineClass *m) { pc_q35_9_0_machine_options(m); m->alias = NULL; + m->max_cpus = 1024; compat_props_add(m->compat_props, hw_compat_8_2, hw_compat_8_2_len); compat_props_add(m->compat_props, pc_compat_8_2, pc_compat_8_2_len); } diff --git a/hw/i386/pc_sysfw.c b/hw/i386/pc_sysfw.c index b02e285579..3efabbbab2 100644 --- a/hw/i386/pc_sysfw.c +++ b/hw/i386/pc_sysfw.c @@ -91,7 +91,19 @@ static PFlashCFI01 *pc_pflash_create(PCMachineState *pcms, return PFLASH_CFI01(dev); } -static void pc_system_flash_cleanup_unused(PCMachineState *pcms) +void pc_system_flash_create(PCMachineState *pcms) +{ + PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); + + if (pcmc->pci_enabled) { + pcms->flash[0] = pc_pflash_create(pcms, "system.flash0", + "pflash0"); + pcms->flash[1] = pc_pflash_create(pcms, "system.flash1", + "pflash1"); + } +} + +void pc_system_flash_cleanup_unused(PCMachineState *pcms) { char *prop_name; int i; @@ -198,9 +210,6 @@ void pc_system_firmware_init(PCMachineState *pcms, return; } - pcms->flash[0] = pc_pflash_create(pcms, "system.flash0", "pflash0"); - pcms->flash[1] = pc_pflash_create(pcms, "system.flash1", "pflash1"); - /* Map legacy -drive if=pflash to machine properties */ for (i = 0; i < ARRAY_SIZE(pcms->flash); i++) { pflash_cfi01_legacy_drive(pcms->flash[i], diff --git a/hw/net/igb.c b/hw/net/igb.c index 0b5c31a58b..9b37523d6d 100644 --- a/hw/net/igb.c +++ b/hw/net/igb.c @@ -488,12 +488,10 @@ static void igb_pci_uninit(PCIDevice *pci_dev) static void igb_qdev_reset_hold(Object *obj) { - PCIDevice *d = PCI_DEVICE(obj); IGBState *s = IGB(obj); trace_e1000e_cb_qdev_reset_hold(); - pcie_sriov_pf_disable_vfs(d); igb_core_reset(&s->core); } diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 403a693baf..9959f1932b 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -2039,6 +2039,22 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf, goto err; } + /* Mark dirty page's bitmap of guest memory */ + if (vdev->lm_logging_ctrl == LM_ENABLE) { + uint64_t chunk = elem->in_addr[i] / VHOST_LOG_CHUNK; + /* Get chunk index */ + BitmapMemoryRegionCaches *caches = qatomic_rcu_read(&vdev->caches); + uint64_t index = chunk / 8; + uint64_t shift = chunk % 8; + uint8_t val = 0; + address_space_read_cached(&caches->bitmap, index, &val, + sizeof(val)); + val |= 1 << shift; + address_space_write_cached(&caches->bitmap, index, &val, + sizeof(val)); + address_space_cache_invalidate(&caches->bitmap, index, sizeof(val)); + } + elems[i] = elem; lens[i] = total; i++; diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c index 76fe039704..c2b17de987 100644 --- a/hw/nvme/ctrl.c +++ b/hw/nvme/ctrl.c @@ -2855,7 +2855,7 @@ static inline uint16_t nvme_check_copy_mcl(NvmeNamespace *ns, uint32_t nlb; nvme_copy_source_range_parse(iocb->ranges, idx, iocb->format, NULL, &nlb, NULL, NULL, NULL); - copy_len += nlb + 1; + copy_len += nlb; } if (copy_len > ns->id_ns.mcl) { @@ -5642,6 +5642,10 @@ static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req) } QEMU_PACKED uuid = {}; struct { NvmeIdNsDescr hdr; + uint8_t v[NVME_NIDL_NGUID]; + } QEMU_PACKED nguid = {}; + struct { + NvmeIdNsDescr hdr; uint64_t v; } QEMU_PACKED eui64 = {}; struct { @@ -5668,6 +5672,14 @@ static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req) pos += sizeof(uuid); } + if (!nvme_nguid_is_null(&ns->params.nguid)) { + nguid.hdr.nidt = NVME_NIDT_NGUID; + nguid.hdr.nidl = NVME_NIDL_NGUID; + memcpy(nguid.v, ns->params.nguid.data, NVME_NIDL_NGUID); + memcpy(pos, &nguid, sizeof(nguid)); + pos += sizeof(nguid); + } + if (ns->params.eui64) { eui64.hdr.nidt = NVME_NIDT_EUI64; eui64.hdr.nidl = NVME_NIDL_EUI64; @@ -7114,10 +7126,6 @@ static void nvme_ctrl_reset(NvmeCtrl *n, NvmeResetType rst) sctrl = &n->sec_ctrl_list.sec[i]; nvme_virt_set_state(n, le16_to_cpu(sctrl->scid), false); } - - if (rst != NVME_RESET_CONTROLLER) { - pcie_sriov_pf_disable_vfs(pci_dev); - } } if (rst != NVME_RESET_CONTROLLER) { @@ -7798,6 +7806,11 @@ static bool nvme_check_params(NvmeCtrl *n, Error **errp) } if (n->pmr.dev) { + if (params->msix_exclusive_bar) { + error_setg(errp, "not enough BARs available to enable PMR"); + return false; + } + if (host_memory_backend_is_mapped(n->pmr.dev)) { error_setg(errp, "can't use already busy memdev: %s", object_get_canonical_path_component(OBJECT(n->pmr.dev))); @@ -8003,13 +8016,18 @@ static void nvme_init_pmr(NvmeCtrl *n, PCIDevice *pci_dev) memory_region_set_enabled(&n->pmr.dev->mr, false); } -static uint64_t nvme_bar_size(unsigned total_queues, unsigned total_irqs, - unsigned *msix_table_offset, - unsigned *msix_pba_offset) +static uint64_t nvme_mbar_size(unsigned total_queues, unsigned total_irqs, + unsigned *msix_table_offset, + unsigned *msix_pba_offset) { - uint64_t bar_size, msix_table_size, msix_pba_size; + uint64_t bar_size, msix_table_size; bar_size = sizeof(NvmeBar) + 2 * total_queues * NVME_DB_SIZE; + + if (total_irqs == 0) { + goto out; + } + bar_size = QEMU_ALIGN_UP(bar_size, 4 * KiB); if (msix_table_offset) { @@ -8024,11 +8042,10 @@ static uint64_t nvme_bar_size(unsigned total_queues, unsigned total_irqs, *msix_pba_offset = bar_size; } - msix_pba_size = QEMU_ALIGN_UP(total_irqs, 64) / 8; - bar_size += msix_pba_size; + bar_size += QEMU_ALIGN_UP(total_irqs, 64) / 8; - bar_size = pow2ceil(bar_size); - return bar_size; +out: + return pow2ceil(bar_size); } static void nvme_init_sriov(NvmeCtrl *n, PCIDevice *pci_dev, uint16_t offset) @@ -8036,7 +8053,7 @@ static void nvme_init_sriov(NvmeCtrl *n, PCIDevice *pci_dev, uint16_t offset) uint16_t vf_dev_id = n->params.use_intel_id ? PCI_DEVICE_ID_INTEL_NVME : PCI_DEVICE_ID_REDHAT_NVME; NvmePriCtrlCap *cap = &n->pri_ctrl_cap; - uint64_t bar_size = nvme_bar_size(le16_to_cpu(cap->vqfrsm), + uint64_t bar_size = nvme_mbar_size(le16_to_cpu(cap->vqfrsm), le16_to_cpu(cap->vifrsm), NULL, NULL); @@ -8075,7 +8092,7 @@ static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp) ERRP_GUARD(); uint8_t *pci_conf = pci_dev->config; uint64_t bar_size; - unsigned msix_table_offset, msix_pba_offset; + unsigned msix_table_offset = 0, msix_pba_offset = 0; int ret; pci_conf[PCI_INTERRUPT_PIN] = 1; @@ -8097,24 +8114,38 @@ static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp) pcie_ari_init(pci_dev, 0x100); } - /* add one to max_ioqpairs to account for the admin queue pair */ - bar_size = nvme_bar_size(n->params.max_ioqpairs + 1, n->params.msix_qsize, - &msix_table_offset, &msix_pba_offset); + if (n->params.msix_exclusive_bar && !pci_is_vf(pci_dev)) { + bar_size = nvme_mbar_size(n->params.max_ioqpairs + 1, 0, NULL, NULL); + memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n, "nvme", + bar_size); + pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_TYPE_64, &n->iomem); + ret = msix_init_exclusive_bar(pci_dev, n->params.msix_qsize, 4, errp); + } else { + assert(n->params.msix_qsize >= 1); - memory_region_init(&n->bar0, OBJECT(n), "nvme-bar0", bar_size); - memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n, "nvme", - msix_table_offset); - memory_region_add_subregion(&n->bar0, 0, &n->iomem); + /* add one to max_ioqpairs to account for the admin queue pair */ + bar_size = nvme_mbar_size(n->params.max_ioqpairs + 1, + n->params.msix_qsize, &msix_table_offset, + &msix_pba_offset); - if (pci_is_vf(pci_dev)) { - pcie_sriov_vf_register_bar(pci_dev, 0, &n->bar0); - } else { - pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY | - PCI_BASE_ADDRESS_MEM_TYPE_64, &n->bar0); + memory_region_init(&n->bar0, OBJECT(n), "nvme-bar0", bar_size); + memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n, "nvme", + msix_table_offset); + memory_region_add_subregion(&n->bar0, 0, &n->iomem); + + if (pci_is_vf(pci_dev)) { + pcie_sriov_vf_register_bar(pci_dev, 0, &n->bar0); + } else { + pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_TYPE_64, &n->bar0); + } + + ret = msix_init(pci_dev, n->params.msix_qsize, + &n->bar0, 0, msix_table_offset, + &n->bar0, 0, msix_pba_offset, 0, errp); } - ret = msix_init(pci_dev, n->params.msix_qsize, - &n->bar0, 0, msix_table_offset, - &n->bar0, 0, msix_pba_offset, 0, errp); + if (ret == -ENOTSUP) { /* report that msix is not supported, but do not error out */ warn_report_err(*errp); @@ -8309,9 +8340,15 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp) if (pci_is_vf(pci_dev)) { /* * VFs derive settings from the parent. PF's lifespan exceeds - * that of VF's, so it's safe to share params.serial. + * that of VF's. */ memcpy(&n->params, &pn->params, sizeof(NvmeParams)); + + /* + * Set PF's serial value to a new string memory to prevent 'serial' + * property object release of PF when a VF is removed from the system. + */ + n->params.serial = g_strdup(pn->params.serial); n->subsys = pn->subsys; } @@ -8412,6 +8449,8 @@ static Property nvme_props[] = { params.sriov_max_vi_per_vf, 0), DEFINE_PROP_UINT8("sriov_max_vq_per_vf", NvmeCtrl, params.sriov_max_vq_per_vf, 0), + DEFINE_PROP_BOOL("msix-exclusive-bar", NvmeCtrl, params.msix_exclusive_bar, + false), DEFINE_PROP_END_OF_LIST(), }; @@ -8466,36 +8505,26 @@ static void nvme_pci_reset(DeviceState *qdev) nvme_ctrl_reset(n, NVME_RESET_FUNCTION); } -static void nvme_sriov_pre_write_ctrl(PCIDevice *dev, uint32_t address, - uint32_t val, int len) +static void nvme_sriov_post_write_config(PCIDevice *dev, uint16_t old_num_vfs) { NvmeCtrl *n = NVME(dev); NvmeSecCtrlEntry *sctrl; - uint16_t sriov_cap = dev->exp.sriov_cap; - uint32_t off = address - sriov_cap; - int i, num_vfs; + int i; - if (!sriov_cap) { - return; - } - - if (range_covers_byte(off, len, PCI_SRIOV_CTRL)) { - if (!(val & PCI_SRIOV_CTRL_VFE)) { - num_vfs = pci_get_word(dev->config + sriov_cap + PCI_SRIOV_NUM_VF); - for (i = 0; i < num_vfs; i++) { - sctrl = &n->sec_ctrl_list.sec[i]; - nvme_virt_set_state(n, le16_to_cpu(sctrl->scid), false); - } - } + for (i = pcie_sriov_num_vfs(dev); i < old_num_vfs; i++) { + sctrl = &n->sec_ctrl_list.sec[i]; + nvme_virt_set_state(n, le16_to_cpu(sctrl->scid), false); } } static void nvme_pci_write_config(PCIDevice *dev, uint32_t address, uint32_t val, int len) { - nvme_sriov_pre_write_ctrl(dev, address, val, len); + uint16_t old_num_vfs = pcie_sriov_num_vfs(dev); + pci_default_write_config(dev, address, val, len); pcie_cap_flr_write_config(dev, address, val, len); + nvme_sriov_post_write_config(dev, old_num_vfs); } static const VMStateDescription nvme_vmstate = { diff --git a/hw/nvme/meson.build b/hw/nvme/meson.build index 1a6a2ca2f3..7d5caa53c2 100644 --- a/hw/nvme/meson.build +++ b/hw/nvme/meson.build @@ -1 +1 @@ -system_ss.add(when: 'CONFIG_NVME_PCI', if_true: files('ctrl.c', 'dif.c', 'ns.c', 'subsys.c')) +system_ss.add(when: 'CONFIG_NVME_PCI', if_true: files('ctrl.c', 'dif.c', 'ns.c', 'subsys.c', 'nguid.c')) \ No newline at end of file diff --git a/hw/nvme/nguid.c b/hw/nvme/nguid.c new file mode 100644 index 0000000000..829832bd9f --- /dev/null +++ b/hw/nvme/nguid.c @@ -0,0 +1,187 @@ +/* + * QEMU NVMe NGUID functions + * + * Copyright 2024 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" +#include "qapi/visitor.h" +#include "qemu/ctype.h" +#include "nvme.h" + +#define NGUID_SEPARATOR '-' + +#define NGUID_VALUE_AUTO "auto" + +#define NGUID_FMT \ + "%02hhx%02hhx%02hhx%02hhx" \ + "%02hhx%02hhx%02hhx%02hhx" \ + "%02hhx%02hhx%02hhx%02hhx" \ + "%02hhx%02hhx%02hhx%02hhx" + +#define NGUID_STR_LEN (2 * NGUID_LEN + 1) + +bool nvme_nguid_is_null(const NvmeNGUID *nguid) +{ + static NvmeNGUID null_nguid; + return memcmp(nguid, &null_nguid, sizeof(NvmeNGUID)) == 0; +} + +static void nvme_nguid_generate(NvmeNGUID *out) +{ + int i; + uint32_t x; + + QEMU_BUILD_BUG_ON((NGUID_LEN % sizeof(x)) != 0); + + for (i = 0; i < NGUID_LEN; i += sizeof(x)) { + x = g_random_int(); + memcpy(&out->data[i], &x, sizeof(x)); + } +} + +/* + * The Linux Kernel typically prints the NGUID of an NVMe namespace using the + * same format as the UUID. For instance: + * + * $ cat /sys/class/block/nvme0n1/nguid + * e9accd3b-8390-4e13-167c-f0593437f57d + * + * When there is no UUID but there is NGUID the Kernel will print the NGUID as + * wwid and it won't use the UUID format: + * + * $ cat /sys/class/block/nvme0n1/wwid + * eui.e9accd3b83904e13167cf0593437f57d + * + * The NGUID has different fields compared to the UUID, so the grouping used in + * the UUID format has no relation with the 3 fields of the NGUID. + * + * This implementation won't expect a strict format as the UUID one and instead + * it will admit any string of hexadecimal digits. Byte groups could be created + * using the '-' separator. The number of bytes needs to be exactly 16 and the + * separator '-' has to be exactly in a byte boundary. The following are + * examples of accepted formats for the NGUID string: + * + * nguid="e9accd3b-8390-4e13-167c-f0593437f57d" + * nguid="e9accd3b83904e13167cf0593437f57d" + * nguid="FEDCBA9876543210-ABCDEF-0123456789" + */ +static bool nvme_nguid_is_valid(const char *str) +{ + int i; + int digit_count = 0; + + for (i = 0; i < strlen(str); i++) { + const char c = str[i]; + if (qemu_isxdigit(c)) { + digit_count++; + continue; + } + if (c == NGUID_SEPARATOR) { + /* + * We need to make sure the separator is in a byte boundary, the + * string does not start with the separator and they are not back to + * back "--". + */ + if ((i > 0) && (str[i - 1] != NGUID_SEPARATOR) && + (digit_count % 2) == 0) { + continue; + } + } + return false; + } + /* + * The string should have the correct byte length and not finish with the + * separator + */ + return (digit_count == (2 * NGUID_LEN)) && (str[i - 1] != NGUID_SEPARATOR); +} + +static int nvme_nguid_parse(const char *str, NvmeNGUID *nguid) +{ + uint8_t *id = &nguid->data[0]; + int ret = 0; + int i; + const char *ptr = str; + + if (!nvme_nguid_is_valid(str)) { + return -1; + } + + for (i = 0; i < NGUID_LEN; i++) { + ret = sscanf(ptr, "%02hhx", &id[i]); + if (ret != 1) { + return -1; + } + ptr += 2; + if (*ptr == NGUID_SEPARATOR) { + ptr++; + } + } + + return 0; +} + +/* + * When converted back to string this implementation will use a raw hex number + * with no separators, for instance: + * + * "e9accd3b83904e13167cf0593437f57d" + */ +static void nvme_nguid_stringify(const NvmeNGUID *nguid, char *out) +{ + const uint8_t *id = &nguid->data[0]; + snprintf(out, NGUID_STR_LEN, NGUID_FMT, + id[0], id[1], id[2], id[3], id[4], id[5], id[6], id[7], + id[8], id[9], id[10], id[11], id[12], id[13], id[14], id[15]); +} + +static void get_nguid(Object *obj, Visitor *v, const char *name, void *opaque, + Error **errp) +{ + Property *prop = opaque; + NvmeNGUID *nguid = object_field_prop_ptr(obj, prop); + char buffer[NGUID_STR_LEN]; + char *p = buffer; + + nvme_nguid_stringify(nguid, buffer); + + visit_type_str(v, name, &p, errp); +} + +static void set_nguid(Object *obj, Visitor *v, const char *name, void *opaque, + Error **errp) +{ + Property *prop = opaque; + NvmeNGUID *nguid = object_field_prop_ptr(obj, prop); + char *str; + + if (!visit_type_str(v, name, &str, errp)) { + return; + } + + if (!strcmp(str, NGUID_VALUE_AUTO)) { + nvme_nguid_generate(nguid); + } else if (nvme_nguid_parse(str, nguid) < 0) { + error_set_from_qdev_prop_error(errp, EINVAL, obj, name, str); + } + g_free(str); +} + +const PropertyInfo qdev_prop_nguid = { + .name = "str", + .description = + "NGUID or \"" NGUID_VALUE_AUTO "\" for random value", + .get = get_nguid, + .set = set_nguid, +}; diff --git a/hw/nvme/ns.c b/hw/nvme/ns.c index 0eabcf5cf5..ea8db175db 100644 --- a/hw/nvme/ns.c +++ b/hw/nvme/ns.c @@ -89,6 +89,7 @@ static int nvme_ns_init(NvmeNamespace *ns, Error **errp) id_ns->mcl = cpu_to_le32(ns->params.mcl); id_ns->msrc = ns->params.msrc; id_ns->eui64 = cpu_to_be64(ns->params.eui64); + memcpy(&id_ns->nguid, &ns->params.nguid.data, sizeof(id_ns->nguid)); ds = 31 - clz32(ns->blkconf.logical_block_size); ms = ns->params.ms; @@ -797,6 +798,7 @@ static Property nvme_ns_props[] = { DEFINE_PROP_BOOL("shared", NvmeNamespace, params.shared, true), DEFINE_PROP_UINT32("nsid", NvmeNamespace, params.nsid, 0), DEFINE_PROP_UUID_NODEFAULT("uuid", NvmeNamespace, params.uuid), + DEFINE_PROP_NGUID_NODEFAULT("nguid", NvmeNamespace, params.nguid), DEFINE_PROP_UINT64("eui64", NvmeNamespace, params.eui64, 0), DEFINE_PROP_UINT16("ms", NvmeNamespace, params.ms, 0), DEFINE_PROP_UINT8("mset", NvmeNamespace, params.mset, 0), diff --git a/hw/nvme/nvme.h b/hw/nvme/nvme.h index 5f2ae7b28b..bed8191bd5 100644 --- a/hw/nvme/nvme.h +++ b/hw/nvme/nvme.h @@ -171,13 +171,27 @@ static const uint8_t nvme_fdp_evf_shifts[FDP_EVT_MAX] = { [FDP_EVT_RUH_IMPLICIT_RU_CHANGE] = 33, }; +#define NGUID_LEN 16 + +typedef struct { + uint8_t data[NGUID_LEN]; +} NvmeNGUID; + +bool nvme_nguid_is_null(const NvmeNGUID *nguid); + +extern const PropertyInfo qdev_prop_nguid; + +#define DEFINE_PROP_NGUID_NODEFAULT(_name, _state, _field) \ + DEFINE_PROP(_name, _state, _field, qdev_prop_nguid, NvmeNGUID) + typedef struct NvmeNamespaceParams { - bool detached; - bool shared; - uint32_t nsid; - QemuUUID uuid; - uint64_t eui64; - bool eui64_default; + bool detached; + bool shared; + uint32_t nsid; + QemuUUID uuid; + NvmeNGUID nguid; + uint64_t eui64; + bool eui64_default; uint16_t ms; uint8_t mset; @@ -522,6 +536,7 @@ typedef struct NvmeParams { uint16_t sriov_vi_flexible; uint8_t sriov_max_vq_per_vf; uint8_t sriov_max_vi_per_vf; + bool msix_exclusive_bar; } NvmeParams; typedef struct NvmeCtrl { diff --git a/hw/pci-bridge/pci_expander_bridge.c b/hw/pci-bridge/pci_expander_bridge.c index 535889f7c2..0411ad31ea 100644 --- a/hw/pci-bridge/pci_expander_bridge.c +++ b/hw/pci-bridge/pci_expander_bridge.c @@ -290,7 +290,7 @@ static void pxb_cxl_dev_reset(DeviceState *dev) uint32_t *write_msk = cxl_cstate->crb.cache_mem_regs_write_mask; int dsp_count = 0; - cxl_component_register_init_common(reg_state, write_msk, CXL2_ROOT_PORT); + cxl_component_register_init_common(reg_state, write_msk, CXL2_RC); /* * The CXL specification allows for host bridges with no HDM decoders * if they only have a single root port. diff --git a/hw/pci/pci.c b/hw/pci/pci.c index 6496d027ca..e7a39cb203 100644 --- a/hw/pci/pci.c +++ b/hw/pci/pci.c @@ -409,6 +409,7 @@ static void pci_do_device_reset(PCIDevice *dev) msi_reset(dev); msix_reset(dev); + pcie_sriov_pf_reset(dev); } /* diff --git a/hw/pci/pcie.c b/hw/pci/pcie.c index f56079acf5..4b2f0805c6 100644 --- a/hw/pci/pcie.c +++ b/hw/pci/pcie.c @@ -171,6 +171,14 @@ static void pcie_cap_fill_slot_lnk(PCIDevice *dev) pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2, PCI_EXP_LNKCAP2_SLS_16_0GB); } + if (s->speed > QEMU_PCI_EXP_LNK_16GT) { + pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2, + PCI_EXP_LNKCAP2_SLS_32_0GB); + } + if (s->speed > QEMU_PCI_EXP_LNK_32GT) { + pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2, + PCI_EXP_LNKCAP2_SLS_64_0GB); + } } } diff --git a/hw/pci/pcie_sriov.c b/hw/pci/pcie_sriov.c index a1fe65f5d8..e9b23221d7 100644 --- a/hw/pci/pcie_sriov.c +++ b/hw/pci/pcie_sriov.c @@ -176,6 +176,9 @@ static void register_vfs(PCIDevice *dev) assert(sriov_cap > 0); num_vfs = pci_get_word(dev->config + sriov_cap + PCI_SRIOV_NUM_VF); + if (num_vfs > pci_get_word(dev->config + sriov_cap + PCI_SRIOV_TOTAL_VF)) { + return; + } dev->exp.sriov_pf.vf = g_new(PCIDevice *, num_vfs); @@ -212,7 +215,6 @@ static void unregister_vfs(PCIDevice *dev) g_free(dev->exp.sriov_pf.vf); dev->exp.sriov_pf.vf = NULL; dev->exp.sriov_pf.num_vfs = 0; - pci_set_word(dev->config + dev->exp.sriov_cap + PCI_SRIOV_NUM_VF, 0); } void pcie_sriov_config_write(PCIDevice *dev, uint32_t address, @@ -246,16 +248,28 @@ void pcie_sriov_config_write(PCIDevice *dev, uint32_t address, } -/* Reset SR/IOV VF Enable bit to trigger an unregister of all VFs */ -void pcie_sriov_pf_disable_vfs(PCIDevice *dev) +/* Reset SR/IOV */ +void pcie_sriov_pf_reset(PCIDevice *dev) { uint16_t sriov_cap = dev->exp.sriov_cap; - if (sriov_cap) { - uint32_t val = pci_get_byte(dev->config + sriov_cap + PCI_SRIOV_CTRL); - if (val & PCI_SRIOV_CTRL_VFE) { - val &= ~PCI_SRIOV_CTRL_VFE; - pcie_sriov_config_write(dev, sriov_cap + PCI_SRIOV_CTRL, val, 1); - } + if (!sriov_cap) { + return; + } + + pci_set_word(dev->config + sriov_cap + PCI_SRIOV_CTRL, 0); + unregister_vfs(dev); + + pci_set_word(dev->config + sriov_cap + PCI_SRIOV_NUM_VF, 0); + + /* + * Default is to use 4K pages, software can modify it + * to any of the supported bits + */ + pci_set_word(dev->config + sriov_cap + PCI_SRIOV_SYS_PGSIZE, 0x1); + + for (uint16_t i = 0; i < PCI_NUM_REGIONS; i++) { + pci_set_quad(dev->config + sriov_cap + PCI_SRIOV_BAR + i * 4, + dev->exp.sriov_pf.vf_bar_type[i]); } } diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c index c2f2cc27be..6e3a5ccdec 100644 --- a/hw/ppc/pnv.c +++ b/hw/ppc/pnv.c @@ -133,7 +133,7 @@ static int get_cpus_node(void *fdt) * device tree, used in XSCOM to address cores and in interrupt * servers. */ -static void pnv_dt_core(PnvChip *chip, PnvCore *pc, void *fdt) +static int pnv_dt_core(PnvChip *chip, PnvCore *pc, void *fdt) { PowerPCCPU *cpu = pc->threads[0]; CPUState *cs = CPU(cpu); @@ -141,32 +141,31 @@ static void pnv_dt_core(PnvChip *chip, PnvCore *pc, void *fdt) int smt_threads = CPU_CORE(pc)->nr_threads; CPUPPCState *env = &cpu->env; PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs); + PnvChipClass *pnv_cc = PNV_CHIP_GET_CLASS(chip); g_autofree uint32_t *servers_prop = g_new(uint32_t, smt_threads); int i; + uint32_t pir; uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40), 0xffffffff, 0xffffffff}; uint32_t tbfreq = PNV_TIMEBASE_FREQ; uint32_t cpufreq = 1000000000; uint32_t page_sizes_prop[64]; size_t page_sizes_prop_size; - const uint8_t pa_features[] = { 24, 0, - 0xf6, 0x3f, 0xc7, 0xc0, 0x80, 0xf0, - 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, - 0x80, 0x00, 0x80, 0x00, 0x80, 0x00 }; int offset; char *nodename; int cpus_offset = get_cpus_node(fdt); - nodename = g_strdup_printf("%s@%x", dc->fw_name, pc->pir); + pir = pnv_cc->chip_pir(chip, pc->hwid, 0); + + nodename = g_strdup_printf("%s@%x", dc->fw_name, pir); offset = fdt_add_subnode(fdt, cpus_offset, nodename); _FDT(offset); g_free(nodename); _FDT((fdt_setprop_cell(fdt, offset, "ibm,chip-id", chip->chip_id))); - _FDT((fdt_setprop_cell(fdt, offset, "reg", pc->pir))); - _FDT((fdt_setprop_cell(fdt, offset, "ibm,pir", pc->pir))); + _FDT((fdt_setprop_cell(fdt, offset, "reg", pir))); + _FDT((fdt_setprop_cell(fdt, offset, "ibm,pir", pir))); _FDT((fdt_setprop_string(fdt, offset, "device_type", "cpu"))); _FDT((fdt_setprop_cell(fdt, offset, "cpu-version", env->spr[SPR_PVR]))); @@ -236,20 +235,21 @@ static void pnv_dt_core(PnvChip *chip, PnvCore *pc, void *fdt) page_sizes_prop, page_sizes_prop_size))); } - _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", - pa_features, sizeof(pa_features)))); - /* Build interrupt servers properties */ for (i = 0; i < smt_threads; i++) { - servers_prop[i] = cpu_to_be32(pc->pir + i); + servers_prop[i] = cpu_to_be32(pnv_cc->chip_pir(chip, pc->hwid, i)); } _FDT((fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s", servers_prop, sizeof(*servers_prop) * smt_threads))); + + return offset; } -static void pnv_dt_icp(PnvChip *chip, void *fdt, uint32_t pir, +static void pnv_dt_icp(PnvChip *chip, void *fdt, uint32_t hwid, uint32_t nr_threads) { + PnvChipClass *pcc = PNV_CHIP_GET_CLASS(chip); + uint32_t pir = pcc->chip_pir(chip, hwid, 0); uint64_t addr = PNV_ICP_BASE(chip) | (pir << 12); char *name; const char compat[] = "IBM,power8-icp\0IBM,ppc-xicp"; @@ -263,6 +263,7 @@ static void pnv_dt_icp(PnvChip *chip, void *fdt, uint32_t pir, rsize = sizeof(uint64_t) * 2 * nr_threads; reg = g_malloc(rsize); for (i = 0; i < nr_threads; i++) { + /* We know P8 PIR is linear with thread id */ reg[i * 2] = cpu_to_be64(addr | ((pir + i) * 0x1000)); reg[i * 2 + 1] = cpu_to_be64(0x1000); } @@ -299,6 +300,17 @@ PnvChip *pnv_chip_add_phb(PnvChip *chip, PnvPHB *phb) return chip; } +/* + * Same as spapr pa_features_207 except pnv always enables CI largepages bit. + * HTM is always enabled because TCG does implement HTM, it's just a + * degenerate implementation. + */ +static const uint8_t pa_features_207[] = { 24, 0, + 0xf6, 0x3f, 0xc7, 0xc0, 0x00, 0xf0, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00 }; + static void pnv_chip_power8_dt_populate(PnvChip *chip, void *fdt) { static const char compat[] = "ibm,power8-xscom\0ibm,xscom"; @@ -311,11 +323,15 @@ static void pnv_chip_power8_dt_populate(PnvChip *chip, void *fdt) for (i = 0; i < chip->nr_cores; i++) { PnvCore *pnv_core = chip->cores[i]; + int offset; + + offset = pnv_dt_core(chip, pnv_core, fdt); - pnv_dt_core(chip, pnv_core, fdt); + _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", + pa_features_207, sizeof(pa_features_207)))); /* Interrupt Control Presenters (ICP). One per core. */ - pnv_dt_icp(chip, fdt, pnv_core->pir, CPU_CORE(pnv_core)->nr_threads); + pnv_dt_icp(chip, fdt, pnv_core->hwid, CPU_CORE(pnv_core)->nr_threads); } if (chip->ram_size) { @@ -323,6 +339,35 @@ static void pnv_chip_power8_dt_populate(PnvChip *chip, void *fdt) } } +/* + * Same as spapr pa_features_300 except pnv always enables CI largepages bit. + */ +static const uint8_t pa_features_300[] = { 66, 0, + /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: CILRG|fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */ + /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, 5: LE|CFAR|EB|LSQ */ + 0xf6, 0x3f, 0xc7, 0xc0, 0x00, 0xf0, /* 0 - 5 */ + /* 6: DS207 */ + 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */ + /* 16: Vector */ + 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */ + /* 18: Vec. Scalar, 20: Vec. XOR, 22: HTM */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 18 - 23 */ + /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */ + /* 32: LE atomic, 34: EBB + ext EBB */ + 0x00, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */ + /* 40: Radix MMU */ + 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 36 - 41 */ + /* 42: PM, 44: PC RA, 46: SC vec'd */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */ + /* 48: SIMD, 50: QP BFP, 52: String */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */ + /* 54: DecFP, 56: DecI, 58: SHA */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */ + /* 60: NM atomic, 62: RNG */ + 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */ +}; + static void pnv_chip_power9_dt_populate(PnvChip *chip, void *fdt) { static const char compat[] = "ibm,power9-xscom\0ibm,xscom"; @@ -335,8 +380,12 @@ static void pnv_chip_power9_dt_populate(PnvChip *chip, void *fdt) for (i = 0; i < chip->nr_cores; i++) { PnvCore *pnv_core = chip->cores[i]; + int offset; - pnv_dt_core(chip, pnv_core, fdt); + offset = pnv_dt_core(chip, pnv_core, fdt); + + _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", + pa_features_300, sizeof(pa_features_300)))); } if (chip->ram_size) { @@ -346,6 +395,40 @@ static void pnv_chip_power9_dt_populate(PnvChip *chip, void *fdt) pnv_dt_lpc(chip, fdt, 0, PNV9_LPCM_BASE(chip), PNV9_LPCM_SIZE); } +/* + * Same as spapr pa_features_31 except pnv always enables CI largepages bit, + * always disables copy/paste. + */ +static const uint8_t pa_features_31[] = { 74, 0, + /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: CILRG|fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */ + /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, 5: LE|CFAR|EB|LSQ */ + 0xf6, 0x3f, 0xc7, 0xc0, 0x00, 0xf0, /* 0 - 5 */ + /* 6: DS207 */ + 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */ + /* 16: Vector */ + 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */ + /* 18: Vec. Scalar, 20: Vec. XOR */ + 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */ + /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */ + /* 32: LE atomic, 34: EBB + ext EBB */ + 0x00, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */ + /* 40: Radix MMU */ + 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 36 - 41 */ + /* 42: PM, 44: PC RA, 46: SC vec'd */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */ + /* 48: SIMD, 50: QP BFP, 52: String */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */ + /* 54: DecFP, 56: DecI, 58: SHA */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */ + /* 60: NM atomic, 62: RNG */ + 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */ + /* 68: DEXCR[SBHE|IBRTPDUS|SRAPD|NPHIE|PHIE] */ + 0x00, 0x00, 0xce, 0x00, 0x00, 0x00, /* 66 - 71 */ + /* 72: [P]HASHST/[P]HASHCHK */ + 0x80, 0x00, /* 72 - 73 */ +}; + static void pnv_chip_power10_dt_populate(PnvChip *chip, void *fdt) { static const char compat[] = "ibm,power10-xscom\0ibm,xscom"; @@ -358,8 +441,12 @@ static void pnv_chip_power10_dt_populate(PnvChip *chip, void *fdt) for (i = 0; i < chip->nr_cores; i++) { PnvCore *pnv_core = chip->cores[i]; + int offset; + + offset = pnv_dt_core(chip, pnv_core, fdt); - pnv_dt_core(chip, pnv_core, fdt); + _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", + pa_features_31, sizeof(pa_features_31)))); } if (chip->ram_size) { @@ -995,9 +1082,10 @@ static void pnv_init(MachineState *machine) * 25:28 Core number * 29:31 Thread ID */ -static uint32_t pnv_chip_core_pir_p8(PnvChip *chip, uint32_t core_id) +static uint32_t pnv_chip_pir_p8(PnvChip *chip, uint32_t core_id, + uint32_t thread_id) { - return (chip->chip_id << 7) | (core_id << 3); + return (chip->chip_id << 7) | (core_id << 3) | thread_id; } static void pnv_chip_power8_intc_create(PnvChip *chip, PowerPCCPU *cpu, @@ -1049,14 +1137,37 @@ static void pnv_chip_power8_intc_print_info(PnvChip *chip, PowerPCCPU *cpu, * * We only care about the lower bits. uint32_t is fine for the moment. */ -static uint32_t pnv_chip_core_pir_p9(PnvChip *chip, uint32_t core_id) +static uint32_t pnv_chip_pir_p9(PnvChip *chip, uint32_t core_id, + uint32_t thread_id) { - return (chip->chip_id << 8) | (core_id << 2); + if (chip->nr_threads == 8) { + return (chip->chip_id << 8) | ((thread_id & 1) << 2) | (core_id << 3) | + (thread_id >> 1); + } else { + return (chip->chip_id << 8) | (core_id << 2) | thread_id; + } } -static uint32_t pnv_chip_core_pir_p10(PnvChip *chip, uint32_t core_id) +/* + * 0:48 Reserved - Read as zeroes + * 49:52 Node ID + * 53:55 Chip ID + * 56 Reserved - Read as zero + * 57:59 Quad ID + * 60 Core Chiplet Pair ID + * 61:63 Thread/Core Chiplet ID t0-t2 + * + * We only care about the lower bits. uint32_t is fine for the moment. + */ +static uint32_t pnv_chip_pir_p10(PnvChip *chip, uint32_t core_id, + uint32_t thread_id) { - return (chip->chip_id << 8) | (core_id << 2); + if (chip->nr_threads == 8) { + return (chip->chip_id << 8) | ((core_id / 4) << 4) | + ((core_id % 2) << 3) | thread_id; + } else { + return (chip->chip_id << 8) | (core_id << 2) | thread_id; + } } static void pnv_chip_power9_intc_create(PnvChip *chip, PowerPCCPU *cpu, @@ -1235,7 +1346,7 @@ static void pnv_chip_icp_realize(Pnv8Chip *chip8, Error **errp) int core_hwid = CPU_CORE(pnv_core)->core_id; for (j = 0; j < CPU_CORE(pnv_core)->nr_threads; j++) { - uint32_t pir = pcc->core_pir(chip, core_hwid) + j; + uint32_t pir = pcc->chip_pir(chip, core_hwid, j); PnvICPState *icp = PNV_ICP(xics_icp_get(chip8->xics, pir)); memory_region_add_subregion(&chip8->icp_mmio, pir << 12, @@ -1348,7 +1459,7 @@ static void pnv_chip_power8e_class_init(ObjectClass *klass, void *data) k->chip_cfam_id = 0x221ef04980000000ull; /* P8 Murano DD2.1 */ k->cores_mask = POWER8E_CORE_MASK; k->num_phbs = 3; - k->core_pir = pnv_chip_core_pir_p8; + k->chip_pir = pnv_chip_pir_p8; k->intc_create = pnv_chip_power8_intc_create; k->intc_reset = pnv_chip_power8_intc_reset; k->intc_destroy = pnv_chip_power8_intc_destroy; @@ -1372,7 +1483,7 @@ static void pnv_chip_power8_class_init(ObjectClass *klass, void *data) k->chip_cfam_id = 0x220ea04980000000ull; /* P8 Venice DD2.0 */ k->cores_mask = POWER8_CORE_MASK; k->num_phbs = 3; - k->core_pir = pnv_chip_core_pir_p8; + k->chip_pir = pnv_chip_pir_p8; k->intc_create = pnv_chip_power8_intc_create; k->intc_reset = pnv_chip_power8_intc_reset; k->intc_destroy = pnv_chip_power8_intc_destroy; @@ -1396,7 +1507,7 @@ static void pnv_chip_power8nvl_class_init(ObjectClass *klass, void *data) k->chip_cfam_id = 0x120d304980000000ull; /* P8 Naples DD1.0 */ k->cores_mask = POWER8_CORE_MASK; k->num_phbs = 4; - k->core_pir = pnv_chip_core_pir_p8; + k->chip_pir = pnv_chip_pir_p8; k->intc_create = pnv_chip_power8_intc_create; k->intc_reset = pnv_chip_power8_intc_reset; k->intc_destroy = pnv_chip_power8_intc_destroy; @@ -1669,7 +1780,7 @@ static void pnv_chip_power9_class_init(ObjectClass *klass, void *data) k->chip_cfam_id = 0x220d104900008000ull; /* P9 Nimbus DD2.0 */ k->cores_mask = POWER9_CORE_MASK; - k->core_pir = pnv_chip_core_pir_p9; + k->chip_pir = pnv_chip_pir_p9; k->intc_create = pnv_chip_power9_intc_create; k->intc_reset = pnv_chip_power9_intc_reset; k->intc_destroy = pnv_chip_power9_intc_destroy; @@ -1981,7 +2092,7 @@ static void pnv_chip_power10_class_init(ObjectClass *klass, void *data) k->chip_cfam_id = 0x120da04900008000ull; /* P10 DD1.0 (with NX) */ k->cores_mask = POWER10_CORE_MASK; - k->core_pir = pnv_chip_core_pir_p10; + k->chip_pir = pnv_chip_pir_p10; k->intc_create = pnv_chip_power10_intc_create; k->intc_reset = pnv_chip_power10_intc_reset; k->intc_destroy = pnv_chip_power10_intc_destroy; @@ -2071,8 +2182,8 @@ static void pnv_chip_core_realize(PnvChip *chip, Error **errp) chip->nr_threads, &error_fatal); object_property_set_int(OBJECT(pnv_core), CPU_CORE_PROP_CORE_ID, core_hwid, &error_fatal); - object_property_set_int(OBJECT(pnv_core), "pir", - pcc->core_pir(chip, core_hwid), &error_fatal); + object_property_set_int(OBJECT(pnv_core), "hwid", core_hwid, + &error_fatal); object_property_set_int(OBJECT(pnv_core), "hrmor", pnv->fw_load_addr, &error_fatal); object_property_set_link(OBJECT(pnv_core), "chip", OBJECT(chip), diff --git a/hw/ppc/pnv_core.c b/hw/ppc/pnv_core.c index 8c7afe037f..f40ab721d6 100644 --- a/hw/ppc/pnv_core.c +++ b/hw/ppc/pnv_core.c @@ -226,7 +226,7 @@ static void pnv_core_cpu_realize(PnvCore *pc, PowerPCCPU *cpu, Error **errp, int thread_index) { CPUPPCState *env = &cpu->env; - int core_pir; + int core_hwid; ppc_spr_t *pir = &env->spr_cb[SPR_PIR]; ppc_spr_t *tir = &env->spr_cb[SPR_TIR]; Error *local_err = NULL; @@ -242,10 +242,10 @@ static void pnv_core_cpu_realize(PnvCore *pc, PowerPCCPU *cpu, Error **errp, return; } - core_pir = object_property_get_uint(OBJECT(pc), "pir", &error_abort); + core_hwid = object_property_get_uint(OBJECT(pc), "hwid", &error_abort); tir->default_value = thread_index; - pir->default_value = core_pir + thread_index; + pir->default_value = pcc->chip_pir(pc->chip, core_hwid, thread_index); /* Set time-base frequency to 512 MHz */ cpu_ppc_tb_init(env, PNV_TIMEBASE_FREQ); @@ -342,7 +342,7 @@ static void pnv_core_unrealize(DeviceState *dev) } static Property pnv_core_properties[] = { - DEFINE_PROP_UINT32("pir", PnvCore, pir, 0), + DEFINE_PROP_UINT32("hwid", PnvCore, hwid, 0), DEFINE_PROP_UINT64("hrmor", PnvCore, hrmor, 0), DEFINE_PROP_LINK("chip", PnvCore, chip, TYPE_PNV_CHIP, PnvChip *), DEFINE_PROP_END_OF_LIST(), diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c index fadb8f5239..e6fa5580c0 100644 --- a/hw/ppc/ppc.c +++ b/hw/ppc/ppc.c @@ -633,6 +633,16 @@ void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value) ((uint64_t)value << 32) | tb); } +void cpu_ppc_increase_tb_by_offset(CPUPPCState *env, int64_t offset) +{ + env->tb_env->tb_offset += offset; +} + +void cpu_ppc_decrease_tb_by_offset(CPUPPCState *env, int64_t offset) +{ + env->tb_env->tb_offset -= offset; +} + uint64_t cpu_ppc_load_vtb(CPUPPCState *env) { ppc_tb_t *tb_env = env->tb_env; diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 394091830d..c417f9dd52 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -233,29 +233,66 @@ static void spapr_dt_pa_features(SpaprMachineState *spapr, PowerPCCPU *cpu, void *fdt, int offset) { + /* + * SSO (SAO) ordering is supported on KVM and thread=single hosts, + * but not MTTCG, so disable it. To advertise it, a cap would have + * to be added, or support implemented for MTTCG. + * + * Copy/paste is not supported by TCG, so it is not advertised. KVM + * can execute them but it has no accelerator drivers which are usable, + * so there isn't much need for it anyway. + */ + + /* These should be kept in sync with pnv */ uint8_t pa_features_206[] = { 6, 0, - 0xf6, 0x1f, 0xc7, 0x00, 0x80, 0xc0 }; + 0xf6, 0x1f, 0xc7, 0x00, 0x00, 0xc0 }; uint8_t pa_features_207[] = { 24, 0, - 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0, + 0xf6, 0x1f, 0xc7, 0xc0, 0x00, 0xf0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x00, 0x00 }; uint8_t pa_features_300[] = { 66, 0, /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */ - /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, SSO, 5: LE|CFAR|EB|LSQ */ - 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0, /* 0 - 5 */ + /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, 5: LE|CFAR|EB|LSQ */ + 0xf6, 0x1f, 0xc7, 0xc0, 0x00, 0xf0, /* 0 - 5 */ + /* 6: DS207 */ + 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */ + /* 16: Vector */ + 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */ + /* 18: Vec. Scalar, 20: Vec. XOR */ + 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */ + /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */ + /* 32: LE atomic, 34: EBB + ext EBB */ + 0x00, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */ + /* 40: Radix MMU */ + 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 36 - 41 */ + /* 42: PM, 44: PC RA, 46: SC vec'd */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */ + /* 48: SIMD, 50: QP BFP, 52: String */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */ + /* 54: DecFP, 56: DecI, 58: SHA */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */ + /* 60: NM atomic, 62: RNG */ + 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */ + }; + /* 3.1 removes SAO, HTM support */ + uint8_t pa_features_31[] = { 74, 0, + /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */ + /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, 5: LE|CFAR|EB|LSQ */ + 0xf6, 0x1f, 0xc7, 0xc0, 0x00, 0xf0, /* 0 - 5 */ /* 6: DS207 */ 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */ /* 16: Vector */ 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */ - /* 18: Vec. Scalar, 20: Vec. XOR, 22: HTM */ + /* 18: Vec. Scalar, 20: Vec. XOR */ 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */ /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */ 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */ - /* 30: MMR, 32: LE atomic, 34: EBB + ext EBB */ - 0x80, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */ - /* 36: SPR SO, 38: Copy/Paste, 40: Radix MMU */ - 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 36 - 41 */ + /* 32: LE atomic, 34: EBB + ext EBB */ + 0x00, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */ + /* 40: Radix MMU */ + 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 36 - 41 */ /* 42: PM, 44: PC RA, 46: SC vec'd */ 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */ /* 48: SIMD, 50: QP BFP, 52: String */ @@ -264,6 +301,10 @@ static void spapr_dt_pa_features(SpaprMachineState *spapr, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */ /* 60: NM atomic, 62: RNG */ 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */ + /* 68: DEXCR[SBHE|IBRTPDUS|SRAPD|NPHIE|PHIE] */ + 0x00, 0x00, 0xce, 0x00, 0x00, 0x00, /* 66 - 71 */ + /* 72: [P]HASHST/[P]HASHCHK */ + 0x80, 0x00, /* 72 - 73 */ }; uint8_t *pa_features = NULL; size_t pa_size; @@ -280,6 +321,10 @@ static void spapr_dt_pa_features(SpaprMachineState *spapr, pa_features = pa_features_300; pa_size = sizeof(pa_features_300); } + if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0, cpu->compat_pvr)) { + pa_features = pa_features_31; + pa_size = sizeof(pa_features_31); + } if (!pa_features) { return; } @@ -1362,7 +1407,6 @@ void spapr_init_all_lpcrs(target_ulong value, target_ulong mask) } } - static bool spapr_get_pate(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu, target_ulong lpid, ppc_v3_pate_t *entry) { @@ -1375,33 +1419,16 @@ static bool spapr_get_pate(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu, /* Copy PATE1:GR into PATE0:HR */ entry->dw0 = spapr->patb_entry & PATE0_HR; entry->dw1 = spapr->patb_entry; - + return true; } else { - uint64_t patb, pats; - - assert(lpid != 0); - - patb = spapr->nested_ptcr & PTCR_PATB; - pats = spapr->nested_ptcr & PTCR_PATS; - - /* Check if partition table is properly aligned */ - if (patb & MAKE_64BIT_MASK(0, pats + 12)) { - return false; - } - - /* Calculate number of entries */ - pats = 1ull << (pats + 12 - 4); - if (pats <= lpid) { - return false; + if (spapr_nested_api(spapr) == NESTED_API_KVM_HV) { + return spapr_get_pate_nested_hv(spapr, cpu, lpid, entry); + } else if (spapr_nested_api(spapr) == NESTED_API_PAPR) { + return spapr_get_pate_nested_papr(spapr, cpu, lpid, entry); + } else { + g_assert_not_reached(); } - - /* Grab entry */ - patb += 16 * lpid; - entry->dw0 = ldq_phys(CPU(cpu)->as, patb); - entry->dw1 = ldq_phys(CPU(cpu)->as, patb + 8); } - - return true; } #define HPTE(_table, _i) (void *)(((uint64_t *)(_table)) + ((_i) * 2)) @@ -1689,6 +1716,7 @@ static void spapr_machine_reset(MachineState *machine, ShutdownCause reason) pef_kvm_reset(machine->cgs, &error_fatal); spapr_caps_apply(spapr); + spapr_nested_reset(spapr); first_ppc_cpu = POWERPC_CPU(first_cpu); if (kvm_enabled() && kvmppc_has_cap_mmu_radix() && @@ -2138,6 +2166,7 @@ static const VMStateDescription vmstate_spapr = { &vmstate_spapr_cap_fwnmi, &vmstate_spapr_fwnmi, &vmstate_spapr_cap_rpt_invalidate, + &vmstate_spapr_cap_nested_papr, NULL } }; @@ -4702,6 +4731,7 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data) smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_WORKAROUND; smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 16; /* 64kiB */ smc->default_caps.caps[SPAPR_CAP_NESTED_KVM_HV] = SPAPR_CAP_OFF; + smc->default_caps.caps[SPAPR_CAP_NESTED_PAPR] = SPAPR_CAP_OFF; smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_ON; smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_ON; smc->default_caps.caps[SPAPR_CAP_FWNMI] = SPAPR_CAP_ON; diff --git a/hw/ppc/spapr_caps.c b/hw/ppc/spapr_caps.c index cc91d59c57..0a15415a1d 100644 --- a/hw/ppc/spapr_caps.c +++ b/hw/ppc/spapr_caps.c @@ -484,6 +484,50 @@ static void cap_nested_kvm_hv_apply(SpaprMachineState *spapr, error_append_hint(errp, "Try appending -machine cap-nested-hv=off " "or use threads=1 with -smp\n"); } + if (spapr_nested_api(spapr) && + spapr_nested_api(spapr) != NESTED_API_KVM_HV) { + error_setg(errp, "Nested-HV APIs are mutually exclusive"); + error_append_hint(errp, "Please use either cap-nested-hv or " + "cap-nested-papr to proceed.\n"); + return; + } else { + spapr->nested.api = NESTED_API_KVM_HV; + } + } +} + +static void cap_nested_papr_apply(SpaprMachineState *spapr, + uint8_t val, Error **errp) +{ + ERRP_GUARD(); + PowerPCCPU *cpu = POWERPC_CPU(first_cpu); + CPUPPCState *env = &cpu->env; + + if (!val) { + /* capability disabled by default */ + return; + } + + if (tcg_enabled()) { + if (!(env->insns_flags2 & PPC2_ISA300)) { + error_setg(errp, "Nested-PAPR only supported on POWER9 and later"); + error_append_hint(errp, + "Try appending -machine cap-nested-papr=off\n"); + return; + } + if (spapr_nested_api(spapr) && + spapr_nested_api(spapr) != NESTED_API_PAPR) { + error_setg(errp, "Nested-HV APIs are mutually exclusive"); + error_append_hint(errp, "Please use either cap-nested-hv or " + "cap-nested-papr to proceed.\n"); + return; + } else { + spapr->nested.api = NESTED_API_PAPR; + } + } else if (kvm_enabled()) { + error_setg(errp, "KVM implementation does not support Nested-PAPR"); + error_append_hint(errp, + "Try appending -machine cap-nested-papr=off\n"); } } @@ -732,6 +776,15 @@ SpaprCapabilityInfo capability_table[SPAPR_CAP_NUM] = { .type = "bool", .apply = cap_nested_kvm_hv_apply, }, + [SPAPR_CAP_NESTED_PAPR] = { + .name = "nested-papr", + .description = "Allow Nested HV (PAPR API)", + .index = SPAPR_CAP_NESTED_PAPR, + .get = spapr_cap_get_bool, + .set = spapr_cap_set_bool, + .type = "bool", + .apply = cap_nested_papr_apply, + }, [SPAPR_CAP_LARGE_DECREMENTER] = { .name = "large-decr", .description = "Allow Large Decrementer", @@ -916,6 +969,7 @@ SPAPR_CAP_MIG_STATE(sbbc, SPAPR_CAP_SBBC); SPAPR_CAP_MIG_STATE(ibs, SPAPR_CAP_IBS); SPAPR_CAP_MIG_STATE(hpt_maxpagesize, SPAPR_CAP_HPT_MAXPAGESIZE); SPAPR_CAP_MIG_STATE(nested_kvm_hv, SPAPR_CAP_NESTED_KVM_HV); +SPAPR_CAP_MIG_STATE(nested_papr, SPAPR_CAP_NESTED_PAPR); SPAPR_CAP_MIG_STATE(large_decr, SPAPR_CAP_LARGE_DECREMENTER); SPAPR_CAP_MIG_STATE(ccf_assist, SPAPR_CAP_CCF_ASSIST); SPAPR_CAP_MIG_STATE(fwnmi, SPAPR_CAP_FWNMI); diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c index 40b7c52f7f..e7c9edd033 100644 --- a/hw/ppc/spapr_cpu_core.c +++ b/hw/ppc/spapr_cpu_core.c @@ -39,9 +39,13 @@ static void spapr_reset_vcpu(PowerPCCPU *cpu) /* * "PowerPC Processor binding to IEEE 1275" defines the initial MSR state - * as 32bit (MSR_SF=0) in "8.2.1. Initial Register Values". + * as 32bit (MSR_SF=0) with MSR_ME=1 and MSR_FP=1 in "8.2.1. Initial + * Register Values". This can also be found in "LoPAPR 1.1" "C.9.2.1 + * Initial Register Values". */ env->msr &= ~(1ULL << MSR_SF); + env->msr |= (1ULL << MSR_ME) | (1ULL << MSR_FP); + env->spr[SPR_HIOR] = 0; lpcr = env->spr[SPR_LPCR]; @@ -394,10 +398,8 @@ static const TypeInfo spapr_cpu_core_type_infos[] = { DEFINE_SPAPR_CPU_CORE_TYPE("power8_v2.0"), DEFINE_SPAPR_CPU_CORE_TYPE("power8e_v2.1"), DEFINE_SPAPR_CPU_CORE_TYPE("power8nvl_v1.0"), - DEFINE_SPAPR_CPU_CORE_TYPE("power9_v1.0"), DEFINE_SPAPR_CPU_CORE_TYPE("power9_v2.0"), DEFINE_SPAPR_CPU_CORE_TYPE("power9_v2.2"), - DEFINE_SPAPR_CPU_CORE_TYPE("power10_v1.0"), DEFINE_SPAPR_CPU_CORE_TYPE("power10_v2.0"), #ifdef CONFIG_KVM DEFINE_SPAPR_CPU_CORE_TYPE("host"), diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c index 75c2d12978..5e1d020e3d 100644 --- a/hw/ppc/spapr_hcall.c +++ b/hw/ppc/spapr_hcall.c @@ -1525,6 +1525,28 @@ void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn) *slot = fn; } +void spapr_unregister_hypercall(target_ulong opcode) +{ + spapr_hcall_fn *slot; + + if (opcode <= MAX_HCALL_OPCODE) { + assert((opcode & 0x3) == 0); + + slot = &papr_hypercall_table[opcode / 4]; + } else if (opcode >= SVM_HCALL_BASE && opcode <= SVM_HCALL_MAX) { + /* we only have SVM-related hcall numbers assigned in multiples of 4 */ + assert((opcode & 0x3) == 0); + + slot = &svm_hypercall_table[(opcode - SVM_HCALL_BASE) / 4]; + } else { + assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX)); + + slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE]; + } + + *slot = NULL; +} + target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode, target_ulong *args) { @@ -1638,8 +1660,6 @@ static void hypercall_register_types(void) spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support); spapr_register_hypercall(KVMPPC_H_UPDATE_DT, h_update_dt); - - spapr_register_nested(); } type_init(hypercall_register_types) diff --git a/hw/ppc/spapr_nested.c b/hw/ppc/spapr_nested.c index 121aa96ddc..936659b4c0 100644 --- a/hw/ppc/spapr_nested.c +++ b/hw/ppc/spapr_nested.c @@ -6,8 +6,85 @@ #include "hw/ppc/spapr.h" #include "hw/ppc/spapr_cpu_core.h" #include "hw/ppc/spapr_nested.h" +#include "mmu-book3s-v3.h" +#include "cpu-models.h" +#include "qemu/log.h" + +void spapr_nested_reset(SpaprMachineState *spapr) +{ + if (spapr_get_cap(spapr, SPAPR_CAP_NESTED_KVM_HV)) { + spapr_unregister_nested_hv(); + spapr_register_nested_hv(); + } else if (spapr_get_cap(spapr, SPAPR_CAP_NESTED_PAPR)) { + spapr->nested.capabilities_set = false; + spapr_unregister_nested_papr(); + spapr_register_nested_papr(); + spapr_nested_gsb_init(); + } else { + spapr->nested.api = 0; + } +} + +uint8_t spapr_nested_api(SpaprMachineState *spapr) +{ + return spapr->nested.api; +} #ifdef CONFIG_TCG + +bool spapr_get_pate_nested_hv(SpaprMachineState *spapr, PowerPCCPU *cpu, + target_ulong lpid, ppc_v3_pate_t *entry) +{ + uint64_t patb, pats; + + assert(lpid != 0); + + patb = spapr->nested.ptcr & PTCR_PATB; + pats = spapr->nested.ptcr & PTCR_PATS; + + /* Check if partition table is properly aligned */ + if (patb & MAKE_64BIT_MASK(0, pats + 12)) { + return false; + } + + /* Calculate number of entries */ + pats = 1ull << (pats + 12 - 4); + if (pats <= lpid) { + return false; + } + + /* Grab entry */ + patb += 16 * lpid; + entry->dw0 = ldq_phys(CPU(cpu)->as, patb); + entry->dw1 = ldq_phys(CPU(cpu)->as, patb + 8); + return true; +} + +static +SpaprMachineStateNestedGuest *spapr_get_nested_guest(SpaprMachineState *spapr, + target_ulong guestid) +{ + SpaprMachineStateNestedGuest *guest; + + guest = g_hash_table_lookup(spapr->nested.guests, GINT_TO_POINTER(guestid)); + return guest; +} + +bool spapr_get_pate_nested_papr(SpaprMachineState *spapr, PowerPCCPU *cpu, + target_ulong lpid, ppc_v3_pate_t *entry) +{ + SpaprMachineStateNestedGuest *guest; + assert(lpid != 0); + guest = spapr_get_nested_guest(spapr, lpid); + if (!guest) { + return false; + } + + entry->dw0 = guest->parttbl[0]; + entry->dw1 = guest->parttbl[1]; + return true; +} + #define PRTS_MASK 0x1f static target_ulong h_set_ptbl(PowerPCCPU *cpu, @@ -25,7 +102,7 @@ static target_ulong h_set_ptbl(PowerPCCPU *cpu, return H_PARAMETER; } - spapr->nested_ptcr = ptcr; /* Save new partition table */ + spapr->nested.ptcr = ptcr; /* Save new partition table */ return H_SUCCESS; } @@ -59,6 +136,7 @@ static target_ulong h_copy_tofrom_guest(PowerPCCPU *cpu, static void nested_save_state(struct nested_ppc_state *save, PowerPCCPU *cpu) { CPUPPCState *env = &cpu->env; + SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); memcpy(save->gpr, env->gpr, sizeof(save->gpr)); @@ -85,13 +163,79 @@ static void nested_save_state(struct nested_ppc_state *save, PowerPCCPU *cpu) save->pidr = env->spr[SPR_BOOKS_PID]; save->ppr = env->spr[SPR_PPR]; - save->tb_offset = env->tb_env->tb_offset; + if (spapr_nested_api(spapr) == NESTED_API_PAPR) { + save->amor = env->spr[SPR_AMOR]; + save->dawr0 = env->spr[SPR_DAWR0]; + save->dawrx0 = env->spr[SPR_DAWRX0]; + save->ciabr = env->spr[SPR_CIABR]; + save->purr = env->spr[SPR_PURR]; + save->spurr = env->spr[SPR_SPURR]; + save->ic = env->spr[SPR_IC]; + save->vtb = env->spr[SPR_VTB]; + save->hdar = env->spr[SPR_HDAR]; + save->hdsisr = env->spr[SPR_HDSISR]; + save->heir = env->spr[SPR_HEIR]; + save->asdr = env->spr[SPR_ASDR]; + save->dawr1 = env->spr[SPR_DAWR1]; + save->dawrx1 = env->spr[SPR_DAWRX1]; + save->dexcr = env->spr[SPR_DEXCR]; + save->hdexcr = env->spr[SPR_HDEXCR]; + save->hashkeyr = env->spr[SPR_HASHKEYR]; + save->hashpkeyr = env->spr[SPR_HASHPKEYR]; + memcpy(save->vsr, env->vsr, sizeof(save->vsr)); + save->ebbhr = env->spr[SPR_EBBHR]; + save->tar = env->spr[SPR_TAR]; + save->ebbrr = env->spr[SPR_EBBRR]; + save->bescr = env->spr[SPR_BESCR]; + save->iamr = env->spr[SPR_IAMR]; + save->amr = env->spr[SPR_AMR]; + save->uamor = env->spr[SPR_UAMOR]; + save->dscr = env->spr[SPR_DSCR]; + save->fscr = env->spr[SPR_FSCR]; + save->pspb = env->spr[SPR_PSPB]; + save->ctrl = env->spr[SPR_CTRL]; + save->vrsave = env->spr[SPR_VRSAVE]; + save->dar = env->spr[SPR_DAR]; + save->dsisr = env->spr[SPR_DSISR]; + save->pmc1 = env->spr[SPR_POWER_PMC1]; + save->pmc2 = env->spr[SPR_POWER_PMC2]; + save->pmc3 = env->spr[SPR_POWER_PMC3]; + save->pmc4 = env->spr[SPR_POWER_PMC4]; + save->pmc5 = env->spr[SPR_POWER_PMC5]; + save->pmc6 = env->spr[SPR_POWER_PMC6]; + save->mmcr0 = env->spr[SPR_POWER_MMCR0]; + save->mmcr1 = env->spr[SPR_POWER_MMCR1]; + save->mmcr2 = env->spr[SPR_POWER_MMCR2]; + save->mmcra = env->spr[SPR_POWER_MMCRA]; + save->sdar = env->spr[SPR_POWER_SDAR]; + save->siar = env->spr[SPR_POWER_SIAR]; + save->sier = env->spr[SPR_POWER_SIER]; + save->vscr = ppc_get_vscr(env); + save->fpscr = env->fpscr; + } else if (spapr_nested_api(spapr) == NESTED_API_KVM_HV) { + save->tb_offset = env->tb_env->tb_offset; + } +} + +static void nested_post_load_state(CPUPPCState *env, CPUState *cs) +{ + /* + * compute hflags and possible interrupts. + */ + hreg_compute_hflags(env); + ppc_maybe_interrupt(env); + /* + * Nested HV does not tag TLB entries between L1 and L2, so must + * flush on transition. + */ + tlb_flush(cs); + env->reserve_addr = -1; /* Reset the reservation */ } static void nested_load_state(PowerPCCPU *cpu, struct nested_ppc_state *load) { - CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; + SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); memcpy(env->gpr, load->gpr, sizeof(env->gpr)); @@ -118,20 +262,58 @@ static void nested_load_state(PowerPCCPU *cpu, struct nested_ppc_state *load) env->spr[SPR_BOOKS_PID] = load->pidr; env->spr[SPR_PPR] = load->ppr; - env->tb_env->tb_offset = load->tb_offset; - - /* - * MSR updated, compute hflags and possible interrupts. - */ - hreg_compute_hflags(env); - ppc_maybe_interrupt(env); - - /* - * Nested HV does not tag TLB entries between L1 and L2, so must - * flush on transition. - */ - tlb_flush(cs); - env->reserve_addr = -1; /* Reset the reservation */ + if (spapr_nested_api(spapr) == NESTED_API_PAPR) { + env->spr[SPR_AMOR] = load->amor; + env->spr[SPR_DAWR0] = load->dawr0; + env->spr[SPR_DAWRX0] = load->dawrx0; + env->spr[SPR_CIABR] = load->ciabr; + env->spr[SPR_PURR] = load->purr; + env->spr[SPR_SPURR] = load->purr; + env->spr[SPR_IC] = load->ic; + env->spr[SPR_VTB] = load->vtb; + env->spr[SPR_HDAR] = load->hdar; + env->spr[SPR_HDSISR] = load->hdsisr; + env->spr[SPR_HEIR] = load->heir; + env->spr[SPR_ASDR] = load->asdr; + env->spr[SPR_DAWR1] = load->dawr1; + env->spr[SPR_DAWRX1] = load->dawrx1; + env->spr[SPR_DEXCR] = load->dexcr; + env->spr[SPR_HDEXCR] = load->hdexcr; + env->spr[SPR_HASHKEYR] = load->hashkeyr; + env->spr[SPR_HASHPKEYR] = load->hashpkeyr; + memcpy(env->vsr, load->vsr, sizeof(env->vsr)); + env->spr[SPR_EBBHR] = load->ebbhr; + env->spr[SPR_TAR] = load->tar; + env->spr[SPR_EBBRR] = load->ebbrr; + env->spr[SPR_BESCR] = load->bescr; + env->spr[SPR_IAMR] = load->iamr; + env->spr[SPR_AMR] = load->amr; + env->spr[SPR_UAMOR] = load->uamor; + env->spr[SPR_DSCR] = load->dscr; + env->spr[SPR_FSCR] = load->fscr; + env->spr[SPR_PSPB] = load->pspb; + env->spr[SPR_CTRL] = load->ctrl; + env->spr[SPR_VRSAVE] = load->vrsave; + env->spr[SPR_DAR] = load->dar; + env->spr[SPR_DSISR] = load->dsisr; + env->spr[SPR_POWER_PMC1] = load->pmc1; + env->spr[SPR_POWER_PMC2] = load->pmc2; + env->spr[SPR_POWER_PMC3] = load->pmc3; + env->spr[SPR_POWER_PMC4] = load->pmc4; + env->spr[SPR_POWER_PMC5] = load->pmc5; + env->spr[SPR_POWER_PMC6] = load->pmc6; + env->spr[SPR_POWER_MMCR0] = load->mmcr0; + env->spr[SPR_POWER_MMCR1] = load->mmcr1; + env->spr[SPR_POWER_MMCR2] = load->mmcr2; + env->spr[SPR_POWER_MMCRA] = load->mmcra; + env->spr[SPR_POWER_SDAR] = load->sdar; + env->spr[SPR_POWER_SIAR] = load->siar; + env->spr[SPR_POWER_SIER] = load->sier; + ppc_store_vscr(env, load->vscr); + ppc_store_fpscr(env, load->fpscr); + } else if (spapr_nested_api(spapr) == NESTED_API_KVM_HV) { + env->tb_env->tb_offset = load->tb_offset; + } } /* @@ -146,6 +328,7 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu, { PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); CPUPPCState *env = &cpu->env; + CPUState *cs = CPU(cpu); SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); struct nested_ppc_state l2_state; target_ulong hv_ptr = args[0]; @@ -157,7 +340,7 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu, struct kvmppc_pt_regs *regs; hwaddr len; - if (spapr->nested_ptcr == 0) { + if (spapr->nested.ptcr == 0) { return H_NOT_AVAILABLE; } @@ -244,6 +427,7 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu, * Switch to the nested guest environment and start the "hdec" timer. */ nested_load_state(cpu, &l2_state); + nested_post_load_state(env, cs); hdec = hv_state.hdec_expiry - now; cpu_ppc_hdecr_init(env); @@ -272,9 +456,10 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu, return env->gpr[3]; } -void spapr_exit_nested(PowerPCCPU *cpu, int excp) +static void spapr_exit_nested_hv(PowerPCCPU *cpu, int excp) { CPUPPCState *env = &cpu->env; + CPUState *cs = CPU(cpu); SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); struct nested_ppc_state l2_state; target_ulong hv_ptr = spapr_cpu->nested_host_state->gpr[4]; @@ -284,8 +469,6 @@ void spapr_exit_nested(PowerPCCPU *cpu, int excp) struct kvmppc_pt_regs *regs; hwaddr len; - assert(spapr_cpu->in_nested); - nested_save_state(&l2_state, cpu); hsrr0 = env->spr[SPR_HSRR0]; hsrr1 = env->spr[SPR_HSRR1]; @@ -298,6 +481,7 @@ void spapr_exit_nested(PowerPCCPU *cpu, int excp) */ assert(env->spr[SPR_LPIDR] != 0); nested_load_state(cpu, spapr_cpu->nested_host_state); + nested_post_load_state(env, cs); env->gpr[3] = env->excp_vectors[excp]; /* hcall return value */ cpu_ppc_hdecr_exit(env); @@ -375,21 +559,1347 @@ void spapr_exit_nested(PowerPCCPU *cpu, int excp) address_space_unmap(CPU(cpu)->as, regs, len, len, true); } -void spapr_register_nested(void) +static bool spapr_nested_vcpu_check(SpaprMachineStateNestedGuest *guest, + target_ulong vcpuid, bool inoutbuf) +{ + struct SpaprMachineStateNestedGuestVcpu *vcpu; + /* + * Perform sanity checks for the provided vcpuid of a guest. + * For now, ensure its valid, allocated and enabled for use. + */ + + if (vcpuid >= PAPR_NESTED_GUEST_VCPU_MAX) { + return false; + } + + if (!(vcpuid < guest->nr_vcpus)) { + return false; + } + + vcpu = &guest->vcpus[vcpuid]; + if (!vcpu->enabled) { + return false; + } + + if (!inoutbuf) { + return true; + } + + /* Check to see if the in/out buffers are registered */ + if (vcpu->runbufin.addr && vcpu->runbufout.addr) { + return true; + } + + return false; +} + +static void *get_vcpu_state_ptr(SpaprMachineStateNestedGuest *guest, + target_ulong vcpuid) +{ + assert(spapr_nested_vcpu_check(guest, vcpuid, false)); + return &guest->vcpus[vcpuid].state; +} + +static void *get_vcpu_ptr(SpaprMachineStateNestedGuest *guest, + target_ulong vcpuid) +{ + assert(spapr_nested_vcpu_check(guest, vcpuid, false)); + return &guest->vcpus[vcpuid]; +} + +static void *get_guest_ptr(SpaprMachineStateNestedGuest *guest, + target_ulong vcpuid) +{ + return guest; /* for GSBE_NESTED */ +} + +/* + * set=1 means the L1 is trying to set some state + * set=0 means the L1 is trying to get some state + */ +static void copy_state_8to8(void *a, void *b, bool set) +{ + /* set takes from the Big endian element_buf and sets internal buffer */ + + if (set) { + *(uint64_t *)a = be64_to_cpu(*(uint64_t *)b); + } else { + *(uint64_t *)b = cpu_to_be64(*(uint64_t *)a); + } +} + +static void copy_state_4to4(void *a, void *b, bool set) +{ + if (set) { + *(uint32_t *)a = be32_to_cpu(*(uint32_t *)b); + } else { + *(uint32_t *)b = cpu_to_be32(*((uint32_t *)a)); + } +} + +static void copy_state_16to16(void *a, void *b, bool set) +{ + uint64_t *src, *dst; + + if (set) { + src = b; + dst = a; + + dst[1] = be64_to_cpu(src[0]); + dst[0] = be64_to_cpu(src[1]); + } else { + src = a; + dst = b; + + dst[1] = cpu_to_be64(src[0]); + dst[0] = cpu_to_be64(src[1]); + } +} + +static void copy_state_4to8(void *a, void *b, bool set) +{ + if (set) { + *(uint64_t *)a = (uint64_t) be32_to_cpu(*(uint32_t *)b); + } else { + *(uint32_t *)b = cpu_to_be32((uint32_t) (*((uint64_t *)a))); + } +} + +static void copy_state_pagetbl(void *a, void *b, bool set) +{ + uint64_t *pagetbl; + uint64_t *buf; /* 3 double words */ + uint64_t rts; + + assert(set); + + pagetbl = a; + buf = b; + + *pagetbl = be64_to_cpu(buf[0]); + /* as per ISA section 6.7.6.1 */ + *pagetbl |= PATE0_HR; /* Host Radix bit is 1 */ + + /* RTS */ + rts = be64_to_cpu(buf[1]); + assert(rts == 52); + rts = rts - 31; /* since radix tree size = 2^(RTS+31) */ + *pagetbl |= ((rts & 0x7) << 5); /* RTS2 is bit 56:58 */ + *pagetbl |= (((rts >> 3) & 0x3) << 61); /* RTS1 is bit 1:2 */ + + /* RPDS {Size = 2^(RPDS+3) , RPDS >=5} */ + *pagetbl |= 63 - clz64(be64_to_cpu(buf[2])) - 3; +} + +static void copy_state_proctbl(void *a, void *b, bool set) +{ + uint64_t *proctbl; + uint64_t *buf; /* 2 double words */ + + assert(set); + + proctbl = a; + buf = b; + /* PRTB: Process Table Base */ + *proctbl = be64_to_cpu(buf[0]); + /* PRTS: Process Table Size = 2^(12+PRTS) */ + if (be64_to_cpu(buf[1]) == (1ULL << 12)) { + *proctbl |= 0; + } else if (be64_to_cpu(buf[1]) == (1ULL << 24)) { + *proctbl |= 12; + } else { + g_assert_not_reached(); + } +} + +static void copy_state_runbuf(void *a, void *b, bool set) +{ + uint64_t *buf; /* 2 double words */ + struct SpaprMachineStateNestedGuestVcpuRunBuf *runbuf; + + assert(set); + + runbuf = a; + buf = b; + + runbuf->addr = be64_to_cpu(buf[0]); + assert(runbuf->addr); + + /* per spec */ + assert(be64_to_cpu(buf[1]) <= 16384); + + /* + * This will also hit in the input buffer but should be fine for + * now. If not we can split this function. + */ + assert(be64_to_cpu(buf[1]) >= VCPU_OUT_BUF_MIN_SZ); + + runbuf->size = be64_to_cpu(buf[1]); +} + +/* tell the L1 how big we want the output vcpu run buffer */ +static void out_buf_min_size(void *a, void *b, bool set) +{ + uint64_t *buf; /* 1 double word */ + + assert(!set); + + buf = b; + + buf[0] = cpu_to_be64(VCPU_OUT_BUF_MIN_SZ); +} + +static void copy_logical_pvr(void *a, void *b, bool set) +{ + SpaprMachineStateNestedGuest *guest; + uint32_t *buf; /* 1 word */ + uint32_t *pvr_logical_ptr; + uint32_t pvr_logical; + target_ulong pcr = 0; + + pvr_logical_ptr = a; + buf = b; + + if (!set) { + buf[0] = cpu_to_be32(*pvr_logical_ptr); + return; + } + + pvr_logical = be32_to_cpu(buf[0]); + + *pvr_logical_ptr = pvr_logical; + + if (*pvr_logical_ptr) { + switch (*pvr_logical_ptr) { + case CPU_POWERPC_LOGICAL_3_10: + pcr = PCR_COMPAT_3_10 | PCR_COMPAT_3_00; + break; + case CPU_POWERPC_LOGICAL_3_00: + pcr = PCR_COMPAT_3_00; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "Could not set PCR for LPVR=0x%08x\n", + *pvr_logical_ptr); + return; + } + } + + guest = container_of(pvr_logical_ptr, + struct SpaprMachineStateNestedGuest, + pvr_logical); + for (int i = 0; i < guest->nr_vcpus; i++) { + guest->vcpus[i].state.pcr = ~pcr | HVMASK_PCR; + } +} + +static void copy_tb_offset(void *a, void *b, bool set) +{ + SpaprMachineStateNestedGuest *guest; + uint64_t *buf; /* 1 double word */ + uint64_t *tb_offset_ptr; + uint64_t tb_offset; + + tb_offset_ptr = a; + buf = b; + + if (!set) { + buf[0] = cpu_to_be64(*tb_offset_ptr); + return; + } + + tb_offset = be64_to_cpu(buf[0]); + /* need to copy this to the individual tb_offset for each vcpu */ + guest = container_of(tb_offset_ptr, + struct SpaprMachineStateNestedGuest, + tb_offset); + for (int i = 0; i < guest->nr_vcpus; i++) { + guest->vcpus[i].tb_offset = tb_offset; + } +} + +static void copy_state_hdecr(void *a, void *b, bool set) +{ + uint64_t *buf; /* 1 double word */ + uint64_t *hdecr_expiry_tb; + + hdecr_expiry_tb = a; + buf = b; + + if (!set) { + buf[0] = cpu_to_be64(*hdecr_expiry_tb); + return; + } + + *hdecr_expiry_tb = be64_to_cpu(buf[0]); +} + +struct guest_state_element_type guest_state_element_types[] = { + GUEST_STATE_ELEMENT_NOP(GSB_HV_VCPU_IGNORED_ID, 0), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR0, gpr[0]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR1, gpr[1]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR2, gpr[2]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR3, gpr[3]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR4, gpr[4]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR5, gpr[5]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR6, gpr[6]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR7, gpr[7]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR8, gpr[8]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR9, gpr[9]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR10, gpr[10]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR11, gpr[11]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR12, gpr[12]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR13, gpr[13]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR14, gpr[14]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR15, gpr[15]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR16, gpr[16]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR17, gpr[17]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR18, gpr[18]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR19, gpr[19]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR20, gpr[20]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR21, gpr[21]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR22, gpr[22]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR23, gpr[23]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR24, gpr[24]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR25, gpr[25]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR26, gpr[26]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR27, gpr[27]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR28, gpr[28]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR29, gpr[29]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR30, gpr[30]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR31, gpr[31]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_NIA, nip), + GSE_ENV_DWM(GSB_VCPU_SPR_MSR, msr, HVMASK_MSR), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CTR, ctr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_LR, lr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_XER, xer), + GUEST_STATE_ELEMENT_ENV_WW(GSB_VCPU_SPR_CR, cr), + GUEST_STATE_ELEMENT_NOP_DW(GSB_VCPU_SPR_MMCR3), + GUEST_STATE_ELEMENT_NOP_DW(GSB_VCPU_SPR_SIER2), + GUEST_STATE_ELEMENT_NOP_DW(GSB_VCPU_SPR_SIER3), + GUEST_STATE_ELEMENT_NOP_W(GSB_VCPU_SPR_WORT), + GSE_ENV_DWM(GSB_VCPU_SPR_LPCR, lpcr, HVMASK_LPCR), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_AMOR, amor), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_HFSCR, hfscr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DAWR0, dawr0), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_DAWRX0, dawrx0), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CIABR, ciabr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_PURR, purr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPURR, spurr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_IC, ic), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_VTB, vtb), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_HDAR, hdar), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_HDSISR, hdsisr), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_HEIR, heir), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_ASDR, asdr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SRR0, srr0), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SRR1, srr1), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPRG0, sprg0), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPRG1, sprg1), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPRG2, sprg2), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPRG3, sprg3), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PIDR, pidr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CFAR, cfar), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_PPR, ppr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DAWR1, dawr1), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_DAWRX1, dawrx1), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DEXCR, dexcr), + GSE_ENV_DWM(GSB_VCPU_SPR_HDEXCR, hdexcr, HVMASK_HDEXCR), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_HASHKEYR, hashkeyr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_HASHPKEYR, hashpkeyr), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR0, vsr[0]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR1, vsr[1]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR2, vsr[2]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR3, vsr[3]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR4, vsr[4]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR5, vsr[5]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR6, vsr[6]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR7, vsr[7]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR8, vsr[8]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR9, vsr[9]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR10, vsr[10]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR11, vsr[11]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR12, vsr[12]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR13, vsr[13]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR14, vsr[14]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR15, vsr[15]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR16, vsr[16]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR17, vsr[17]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR18, vsr[18]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR19, vsr[19]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR20, vsr[20]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR21, vsr[21]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR22, vsr[22]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR23, vsr[23]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR24, vsr[24]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR25, vsr[25]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR26, vsr[26]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR27, vsr[27]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR28, vsr[28]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR29, vsr[29]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR30, vsr[30]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR31, vsr[31]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR32, vsr[32]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR33, vsr[33]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR34, vsr[34]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR35, vsr[35]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR36, vsr[36]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR37, vsr[37]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR38, vsr[38]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR39, vsr[39]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR40, vsr[40]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR41, vsr[41]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR42, vsr[42]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR43, vsr[43]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR44, vsr[44]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR45, vsr[45]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR46, vsr[46]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR47, vsr[47]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR48, vsr[48]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR49, vsr[49]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR50, vsr[50]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR51, vsr[51]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR52, vsr[52]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR53, vsr[53]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR54, vsr[54]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR55, vsr[55]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR56, vsr[56]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR57, vsr[57]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR58, vsr[58]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR59, vsr[59]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR60, vsr[60]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR61, vsr[61]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR62, vsr[62]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR63, vsr[63]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_EBBHR, ebbhr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_TAR, tar), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_EBBRR, ebbrr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_BESCR, bescr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_IAMR, iamr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_AMR, amr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_UAMOR, uamor), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DSCR, dscr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_FSCR, fscr), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PSPB, pspb), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CTRL, ctrl), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_VRSAVE, vrsave), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DAR, dar), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_DSISR, dsisr), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC1, pmc1), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC2, pmc2), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC3, pmc3), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC4, pmc4), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC5, pmc5), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC6, pmc6), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_MMCR0, mmcr0), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_MMCR1, mmcr1), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_MMCR2, mmcr2), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_MMCRA, mmcra), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SDAR , sdar), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SIAR , siar), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SIER , sier), + GUEST_STATE_ELEMENT_ENV_WW(GSB_VCPU_SPR_VSCR, vscr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_FPSCR, fpscr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_DEC_EXPIRE_TB, dec_expiry_tb), + GSBE_NESTED(GSB_PART_SCOPED_PAGETBL, 0x18, parttbl[0], copy_state_pagetbl), + GSBE_NESTED(GSB_PROCESS_TBL, 0x10, parttbl[1], copy_state_proctbl), + GSBE_NESTED(GSB_VCPU_LPVR, 0x4, pvr_logical, copy_logical_pvr), + GSBE_NESTED_MSK(GSB_TB_OFFSET, 0x8, tb_offset, copy_tb_offset, + HVMASK_TB_OFFSET), + GSBE_NESTED_VCPU(GSB_VCPU_IN_BUFFER, 0x10, runbufin, copy_state_runbuf), + GSBE_NESTED_VCPU(GSB_VCPU_OUT_BUFFER, 0x10, runbufout, copy_state_runbuf), + GSBE_NESTED_VCPU(GSB_VCPU_OUT_BUF_MIN_SZ, 0x8, runbufout, out_buf_min_size), + GSBE_NESTED_VCPU(GSB_VCPU_HDEC_EXPIRY_TB, 0x8, hdecr_expiry_tb, + copy_state_hdecr) +}; + +void spapr_nested_gsb_init(void) +{ + struct guest_state_element_type *type; + + /* Init the guest state elements lookup table, flags for now */ + for (int i = 0; i < ARRAY_SIZE(guest_state_element_types); i++) { + type = &guest_state_element_types[i]; + + assert(type->id <= GSB_LAST); + if (type->id >= GSB_VCPU_SPR_HDAR) + /* 0xf000 - 0xf005 Thread + RO */ + type->flags = GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY; + else if (type->id >= GSB_VCPU_IN_BUFFER) + /* 0x0c00 - 0xf000 Thread + RW */ + type->flags = 0; + else if (type->id >= GSB_VCPU_LPVR) + /* 0x0003 - 0x0bff Guest + RW */ + type->flags = GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE; + else if (type->id >= GSB_HV_VCPU_STATE_SIZE) + /* 0x0001 - 0x0002 Guest + RO */ + type->flags = GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY | + GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE; + } +} + +static struct guest_state_element *guest_state_element_next( + struct guest_state_element *element, + int64_t *len, + int64_t *num_elements) +{ + uint16_t size; + + /* size is of element->value[] only. Not whole guest_state_element */ + size = be16_to_cpu(element->size); + + if (len) { + *len -= size + offsetof(struct guest_state_element, value); + } + + if (num_elements) { + *num_elements -= 1; + } + + return (struct guest_state_element *)(element->value + size); +} + +static +struct guest_state_element_type *guest_state_element_type_find(uint16_t id) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(guest_state_element_types); i++) + if (id == guest_state_element_types[i].id) { + return &guest_state_element_types[i]; + } + + return NULL; +} + +static void log_element(struct guest_state_element *element, + struct guest_state_request *gsr) +{ + qemu_log_mask(LOG_GUEST_ERROR, "h_guest_%s_state id:0x%04x size:0x%04x", + gsr->flags & GUEST_STATE_REQUEST_SET ? "set" : "get", + be16_to_cpu(element->id), be16_to_cpu(element->size)); + qemu_log_mask(LOG_GUEST_ERROR, "buf:0x%016"PRIx64" ...\n", + be64_to_cpu(*(uint64_t *)element->value)); +} + +static bool guest_state_request_check(struct guest_state_request *gsr) +{ + int64_t num_elements, len = gsr->len; + struct guest_state_buffer *gsb = gsr->gsb; + struct guest_state_element *element; + struct guest_state_element_type *type; + uint16_t id, size; + + /* gsb->num_elements = 0 == 32 bits long */ + assert(len >= 4); + + num_elements = be32_to_cpu(gsb->num_elements); + element = gsb->elements; + len -= sizeof(gsb->num_elements); + + /* Walk the buffer to validate the length */ + while (num_elements) { + + id = be16_to_cpu(element->id); + size = be16_to_cpu(element->size); + + if (false) { + log_element(element, gsr); + } + /* buffer size too small */ + if (len < 0) { + return false; + } + + type = guest_state_element_type_find(id); + if (!type) { + qemu_log_mask(LOG_GUEST_ERROR, "Element ID %04x unknown\n", id); + log_element(element, gsr); + return false; + } + + if (id == GSB_HV_VCPU_IGNORED_ID) { + goto next_element; + } + + if (size != type->size) { + qemu_log_mask(LOG_GUEST_ERROR, "Size mismatch. Element ID:%04x." + "Size Exp:%i Got:%i\n", id, type->size, size); + log_element(element, gsr); + return false; + } + + if ((type->flags & GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY) && + (gsr->flags & GUEST_STATE_REQUEST_SET)) { + qemu_log_mask(LOG_GUEST_ERROR, "Trying to set a read-only Element " + "ID:%04x.\n", id); + return false; + } + + if (type->flags & GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE) { + /* guest wide element type */ + if (!(gsr->flags & GUEST_STATE_REQUEST_GUEST_WIDE)) { + qemu_log_mask(LOG_GUEST_ERROR, "trying to set a guest wide " + "Element ID:%04x.\n", id); + return false; + } + } else { + /* thread wide element type */ + if (gsr->flags & GUEST_STATE_REQUEST_GUEST_WIDE) { + qemu_log_mask(LOG_GUEST_ERROR, "trying to set a thread wide " + "Element ID:%04x.\n", id); + return false; + } + } +next_element: + element = guest_state_element_next(element, &len, &num_elements); + + } + return true; +} + +static bool is_gsr_invalid(struct guest_state_request *gsr, + struct guest_state_element *element, + struct guest_state_element_type *type) +{ + if ((gsr->flags & GUEST_STATE_REQUEST_SET) && + (*(uint64_t *)(element->value) & ~(type->mask))) { + log_element(element, gsr); + qemu_log_mask(LOG_GUEST_ERROR, "L1 can't set reserved bits " + "(allowed mask: 0x%08"PRIx64")\n", type->mask); + return true; + } + return false; +} + +static target_ulong h_guest_get_capabilities(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + CPUPPCState *env = &cpu->env; + target_ulong flags = args[0]; + + if (flags) { /* don't handle any flags capabilities for now */ + return H_PARAMETER; + } + + /* P10 capabilities */ + if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0, + spapr->max_compat_pvr)) { + env->gpr[4] |= H_GUEST_CAPABILITIES_P10_MODE; + } + + /* P9 capabilities */ + if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, + spapr->max_compat_pvr)) { + env->gpr[4] |= H_GUEST_CAPABILITIES_P9_MODE; + } + + return H_SUCCESS; +} + +static target_ulong h_guest_set_capabilities(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + CPUPPCState *env = &cpu->env; + target_ulong flags = args[0]; + target_ulong capabilities = args[1]; + env->gpr[4] = 0; + + if (flags) { /* don't handle any flags capabilities for now */ + return H_PARAMETER; + } + + if (capabilities & H_GUEST_CAPABILITIES_COPY_MEM) { + env->gpr[4] = 1; + return H_P2; /* isn't supported */ + } + + /* + * If there are no capabilities configured, set the R5 to the index of + * the first supported Power Processor Mode + */ + if (!capabilities) { + env->gpr[4] = 1; + + /* set R5 to the first supported Power Processor Mode */ + if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0, + spapr->max_compat_pvr)) { + env->gpr[5] = H_GUEST_CAP_P10_MODE_BMAP; + } else if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, + spapr->max_compat_pvr)) { + env->gpr[5] = H_GUEST_CAP_P9_MODE_BMAP; + } + + return H_P2; + } + + /* + * If an invalid capability is set, R5 should contain the index of the + * invalid capability bit + */ + if (capabilities & ~H_GUEST_CAP_VALID_MASK) { + env->gpr[4] = 1; + + /* Set R5 to the index of the invalid capability */ + env->gpr[5] = 63 - ctz64(capabilities); + + return H_P2; + } + + if (!spapr->nested.capabilities_set) { + spapr->nested.capabilities_set = true; + spapr->nested.pvr_base = env->spr[SPR_PVR]; + return H_SUCCESS; + } else { + return H_STATE; + } +} + +static void +destroy_guest_helper(gpointer value) +{ + struct SpaprMachineStateNestedGuest *guest = value; + g_free(guest->vcpus); + g_free(guest); +} + +static target_ulong h_guest_create(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + CPUPPCState *env = &cpu->env; + target_ulong flags = args[0]; + target_ulong continue_token = args[1]; + uint64_t guestid; + int nguests = 0; + struct SpaprMachineStateNestedGuest *guest; + + if (flags) { /* don't handle any flags for now */ + return H_UNSUPPORTED_FLAG; + } + + if (continue_token != -1) { + return H_P2; + } + + if (!spapr->nested.capabilities_set) { + return H_STATE; + } + + if (!spapr->nested.guests) { + spapr->nested.guests = g_hash_table_new_full(NULL, + NULL, + NULL, + destroy_guest_helper); + } + + nguests = g_hash_table_size(spapr->nested.guests); + + if (nguests == PAPR_NESTED_GUEST_MAX) { + return H_NO_MEM; + } + + /* Lookup for available guestid */ + for (guestid = 1; guestid < PAPR_NESTED_GUEST_MAX; guestid++) { + if (!(g_hash_table_lookup(spapr->nested.guests, + GINT_TO_POINTER(guestid)))) { + break; + } + } + + if (guestid == PAPR_NESTED_GUEST_MAX) { + return H_NO_MEM; + } + + guest = g_try_new0(struct SpaprMachineStateNestedGuest, 1); + if (!guest) { + return H_NO_MEM; + } + + guest->pvr_logical = spapr->nested.pvr_base; + g_hash_table_insert(spapr->nested.guests, GINT_TO_POINTER(guestid), guest); + env->gpr[4] = guestid; + + return H_SUCCESS; +} + +static target_ulong h_guest_delete(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + target_ulong flags = args[0]; + target_ulong guestid = args[1]; + struct SpaprMachineStateNestedGuest *guest; + + /* + * handle flag deleteAllGuests, if set: + * guestid is ignored and all guests are deleted + * + */ + if (flags & ~H_GUEST_DELETE_ALL_FLAG) { + return H_UNSUPPORTED_FLAG; /* other flag bits reserved */ + } else if (flags & H_GUEST_DELETE_ALL_FLAG) { + g_hash_table_destroy(spapr->nested.guests); + return H_SUCCESS; + } + + guest = g_hash_table_lookup(spapr->nested.guests, GINT_TO_POINTER(guestid)); + if (!guest) { + return H_P2; + } + + g_hash_table_remove(spapr->nested.guests, GINT_TO_POINTER(guestid)); + + return H_SUCCESS; +} + +static target_ulong h_guest_create_vcpu(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + target_ulong flags = args[0]; + target_ulong guestid = args[1]; + target_ulong vcpuid = args[2]; + SpaprMachineStateNestedGuest *guest; + + if (flags) { /* don't handle any flags for now */ + return H_UNSUPPORTED_FLAG; + } + + guest = spapr_get_nested_guest(spapr, guestid); + if (!guest) { + return H_P2; + } + + if (vcpuid < guest->nr_vcpus) { + qemu_log_mask(LOG_UNIMP, "vcpuid " TARGET_FMT_ld " already in use.", + vcpuid); + return H_IN_USE; + } + /* linear vcpuid allocation only */ + assert(vcpuid == guest->nr_vcpus); + + if (guest->nr_vcpus >= PAPR_NESTED_GUEST_VCPU_MAX) { + return H_P3; + } + + SpaprMachineStateNestedGuestVcpu *vcpus, *curr_vcpu; + vcpus = g_try_renew(struct SpaprMachineStateNestedGuestVcpu, + guest->vcpus, + guest->nr_vcpus + 1); + if (!vcpus) { + return H_NO_MEM; + } + guest->vcpus = vcpus; + curr_vcpu = &vcpus[guest->nr_vcpus]; + memset(curr_vcpu, 0, sizeof(SpaprMachineStateNestedGuestVcpu)); + + curr_vcpu->enabled = true; + guest->nr_vcpus++; + + return H_SUCCESS; +} + +static target_ulong getset_state(SpaprMachineStateNestedGuest *guest, + uint64_t vcpuid, + struct guest_state_request *gsr) +{ + void *ptr; + uint16_t id; + struct guest_state_element *element; + struct guest_state_element_type *type; + int64_t lenleft, num_elements; + + lenleft = gsr->len; + + if (!guest_state_request_check(gsr)) { + return H_P3; + } + + num_elements = be32_to_cpu(gsr->gsb->num_elements); + element = gsr->gsb->elements; + /* Process the elements */ + while (num_elements) { + type = NULL; + /* log_element(element, gsr); */ + + id = be16_to_cpu(element->id); + if (id == GSB_HV_VCPU_IGNORED_ID) { + goto next_element; + } + + type = guest_state_element_type_find(id); + assert(type); + + /* Get pointer to guest data to get/set */ + if (type->location && type->copy) { + ptr = type->location(guest, vcpuid); + assert(ptr); + if (!~(type->mask) && is_gsr_invalid(gsr, element, type)) { + return H_INVALID_ELEMENT_VALUE; + } + type->copy(ptr + type->offset, element->value, + gsr->flags & GUEST_STATE_REQUEST_SET ? true : false); + } + +next_element: + element = guest_state_element_next(element, &lenleft, &num_elements); + } + + return H_SUCCESS; +} + +static target_ulong map_and_getset_state(PowerPCCPU *cpu, + SpaprMachineStateNestedGuest *guest, + uint64_t vcpuid, + struct guest_state_request *gsr) +{ + target_ulong rc; + int64_t len; + bool is_write; + + len = gsr->len; + /* only get_state would require write access to the provided buffer */ + is_write = (gsr->flags & GUEST_STATE_REQUEST_SET) ? false : true; + gsr->gsb = address_space_map(CPU(cpu)->as, gsr->buf, (uint64_t *)&len, + is_write, MEMTXATTRS_UNSPECIFIED); + if (!gsr->gsb) { + rc = H_P3; + goto out1; + } + + if (len != gsr->len) { + rc = H_P3; + goto out1; + } + + rc = getset_state(guest, vcpuid, gsr); + +out1: + address_space_unmap(CPU(cpu)->as, gsr->gsb, len, is_write, len); + return rc; +} + +static target_ulong h_guest_getset_state(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong *args, + bool set) +{ + target_ulong flags = args[0]; + target_ulong lpid = args[1]; + target_ulong vcpuid = args[2]; + target_ulong buf = args[3]; + target_ulong buflen = args[4]; + struct guest_state_request gsr; + SpaprMachineStateNestedGuest *guest; + + guest = spapr_get_nested_guest(spapr, lpid); + if (!guest) { + return H_P2; + } + gsr.buf = buf; + assert(buflen <= GSB_MAX_BUF_SIZE); + gsr.len = buflen; + gsr.flags = 0; + if (flags & H_GUEST_GETSET_STATE_FLAG_GUEST_WIDE) { + gsr.flags |= GUEST_STATE_REQUEST_GUEST_WIDE; + } + if (flags & !H_GUEST_GETSET_STATE_FLAG_GUEST_WIDE) { + return H_PARAMETER; /* flag not supported yet */ + } + + if (set) { + gsr.flags |= GUEST_STATE_REQUEST_SET; + } + return map_and_getset_state(cpu, guest, vcpuid, &gsr); +} + +static target_ulong h_guest_set_state(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + return h_guest_getset_state(cpu, spapr, args, true); +} + +static target_ulong h_guest_get_state(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + return h_guest_getset_state(cpu, spapr, args, false); +} + +static void exit_nested_store_l2(PowerPCCPU *cpu, int excp, + SpaprMachineStateNestedGuestVcpu *vcpu) +{ + CPUPPCState *env = &cpu->env; + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); + target_ulong now, hdar, hdsisr, asdr; + + assert(sizeof(env->gpr) == sizeof(vcpu->state.gpr)); /* sanity check */ + + now = cpu_ppc_load_tbl(env); /* L2 timebase */ + now -= vcpu->tb_offset; /* L1 timebase */ + vcpu->state.dec_expiry_tb = now - cpu_ppc_load_decr(env); + cpu_ppc_store_decr(env, spapr_cpu->nested_host_state->dec_expiry_tb - now); + /* backup hdar, hdsisr, asdr if reqd later below */ + hdar = vcpu->state.hdar; + hdsisr = vcpu->state.hdsisr; + asdr = vcpu->state.asdr; + + nested_save_state(&vcpu->state, cpu); + + if (excp == POWERPC_EXCP_MCHECK || + excp == POWERPC_EXCP_RESET || + excp == POWERPC_EXCP_SYSCALL) { + vcpu->state.nip = env->spr[SPR_SRR0]; + vcpu->state.msr = env->spr[SPR_SRR1] & env->msr_mask; + } else { + vcpu->state.nip = env->spr[SPR_HSRR0]; + vcpu->state.msr = env->spr[SPR_HSRR1] & env->msr_mask; + } + + /* hdar, hdsisr, asdr should be retained unless certain exceptions */ + if ((excp != POWERPC_EXCP_HDSI) && (excp != POWERPC_EXCP_HISI)) { + vcpu->state.asdr = asdr; + } else if (excp != POWERPC_EXCP_HDSI) { + vcpu->state.hdar = hdar; + vcpu->state.hdsisr = hdsisr; + } +} + +static int get_exit_ids(uint64_t srr0, uint16_t ids[16]) +{ + int nr; + + switch (srr0) { + case 0xc00: + nr = 10; + ids[0] = GSB_VCPU_GPR3; + ids[1] = GSB_VCPU_GPR4; + ids[2] = GSB_VCPU_GPR5; + ids[3] = GSB_VCPU_GPR6; + ids[4] = GSB_VCPU_GPR7; + ids[5] = GSB_VCPU_GPR8; + ids[6] = GSB_VCPU_GPR9; + ids[7] = GSB_VCPU_GPR10; + ids[8] = GSB_VCPU_GPR11; + ids[9] = GSB_VCPU_GPR12; + break; + case 0xe00: + nr = 5; + ids[0] = GSB_VCPU_SPR_HDAR; + ids[1] = GSB_VCPU_SPR_HDSISR; + ids[2] = GSB_VCPU_SPR_ASDR; + ids[3] = GSB_VCPU_SPR_NIA; + ids[4] = GSB_VCPU_SPR_MSR; + break; + case 0xe20: + nr = 4; + ids[0] = GSB_VCPU_SPR_HDAR; + ids[1] = GSB_VCPU_SPR_ASDR; + ids[2] = GSB_VCPU_SPR_NIA; + ids[3] = GSB_VCPU_SPR_MSR; + break; + case 0xe40: + nr = 3; + ids[0] = GSB_VCPU_SPR_HEIR; + ids[1] = GSB_VCPU_SPR_NIA; + ids[2] = GSB_VCPU_SPR_MSR; + break; + case 0xf80: + nr = 3; + ids[0] = GSB_VCPU_SPR_HFSCR; + ids[1] = GSB_VCPU_SPR_NIA; + ids[2] = GSB_VCPU_SPR_MSR; + break; + default: + nr = 0; + break; + } + + return nr; +} + +static void exit_process_output_buffer(PowerPCCPU *cpu, + SpaprMachineStateNestedGuest *guest, + target_ulong vcpuid, + target_ulong *r3) +{ + SpaprMachineStateNestedGuestVcpu *vcpu = &guest->vcpus[vcpuid]; + struct guest_state_request gsr; + struct guest_state_buffer *gsb; + struct guest_state_element *element; + struct guest_state_element_type *type; + int exit_id_count = 0; + uint16_t exit_cause_ids[16]; + hwaddr len; + + len = vcpu->runbufout.size; + gsb = address_space_map(CPU(cpu)->as, vcpu->runbufout.addr, &len, true, + MEMTXATTRS_UNSPECIFIED); + if (!gsb || len != vcpu->runbufout.size) { + address_space_unmap(CPU(cpu)->as, gsb, len, true, len); + *r3 = H_P2; + return; + } + + exit_id_count = get_exit_ids(*r3, exit_cause_ids); + + /* Create a buffer of elements to send back */ + gsb->num_elements = cpu_to_be32(exit_id_count); + element = gsb->elements; + for (int i = 0; i < exit_id_count; i++) { + type = guest_state_element_type_find(exit_cause_ids[i]); + assert(type); + element->id = cpu_to_be16(exit_cause_ids[i]); + element->size = cpu_to_be16(type->size); + element = guest_state_element_next(element, NULL, NULL); + } + gsr.gsb = gsb; + gsr.len = VCPU_OUT_BUF_MIN_SZ; + gsr.flags = 0; /* get + never guest wide */ + getset_state(guest, vcpuid, &gsr); + + address_space_unmap(CPU(cpu)->as, gsb, len, true, len); + return; +} + +static +void spapr_exit_nested_papr(SpaprMachineState *spapr, PowerPCCPU *cpu, int excp) +{ + CPUPPCState *env = &cpu->env; + CPUState *cs = CPU(cpu); + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); + target_ulong r3_return = env->excp_vectors[excp]; /* hcall return value */ + target_ulong lpid = 0, vcpuid = 0; + struct SpaprMachineStateNestedGuestVcpu *vcpu = NULL; + struct SpaprMachineStateNestedGuest *guest = NULL; + + lpid = spapr_cpu->nested_host_state->gpr[5]; + vcpuid = spapr_cpu->nested_host_state->gpr[6]; + guest = spapr_get_nested_guest(spapr, lpid); + assert(guest); + spapr_nested_vcpu_check(guest, vcpuid, false); + vcpu = &guest->vcpus[vcpuid]; + + exit_nested_store_l2(cpu, excp, vcpu); + /* do the output buffer for run_vcpu*/ + exit_process_output_buffer(cpu, guest, vcpuid, &r3_return); + + assert(env->spr[SPR_LPIDR] != 0); + nested_load_state(cpu, spapr_cpu->nested_host_state); + cpu_ppc_decrease_tb_by_offset(env, vcpu->tb_offset); + env->gpr[3] = H_SUCCESS; + env->gpr[4] = r3_return; + nested_post_load_state(env, cs); + cpu_ppc_hdecr_exit(env); + + spapr_cpu->in_nested = false; + g_free(spapr_cpu->nested_host_state); + spapr_cpu->nested_host_state = NULL; +} + +void spapr_exit_nested(PowerPCCPU *cpu, int excp) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); + + assert(spapr_cpu->in_nested); + if (spapr_nested_api(spapr) == NESTED_API_KVM_HV) { + spapr_exit_nested_hv(cpu, excp); + } else if (spapr_nested_api(spapr) == NESTED_API_PAPR) { + spapr_exit_nested_papr(spapr, cpu, excp); + } else { + g_assert_not_reached(); + } +} + +static void nested_papr_load_l2(PowerPCCPU *cpu, + CPUPPCState *env, + SpaprMachineStateNestedGuestVcpu *vcpu, + target_ulong now) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + target_ulong lpcr, lpcr_mask, hdec; + lpcr_mask = LPCR_DPFD | LPCR_ILE | LPCR_AIL | LPCR_LD | LPCR_MER; + + assert(vcpu); + assert(sizeof(env->gpr) == sizeof(vcpu->state.gpr)); + nested_load_state(cpu, &vcpu->state); + lpcr = (env->spr[SPR_LPCR] & ~lpcr_mask) | + (vcpu->state.lpcr & lpcr_mask); + lpcr |= LPCR_HR | LPCR_UPRT | LPCR_GTSE | LPCR_HVICE | LPCR_HDICE; + lpcr &= ~LPCR_LPES0; + env->spr[SPR_LPCR] = lpcr & pcc->lpcr_mask; + + hdec = vcpu->hdecr_expiry_tb - now; + cpu_ppc_store_decr(env, vcpu->state.dec_expiry_tb - now); + cpu_ppc_hdecr_init(env); + cpu_ppc_store_hdecr(env, hdec); + + cpu_ppc_increase_tb_by_offset(env, vcpu->tb_offset); +} + +static void nested_papr_run_vcpu(PowerPCCPU *cpu, + uint64_t lpid, + SpaprMachineStateNestedGuestVcpu *vcpu) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); + CPUPPCState *env = &cpu->env; + CPUState *cs = CPU(cpu); + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); + target_ulong now = cpu_ppc_load_tbl(env); + + assert(env->spr[SPR_LPIDR] == 0); + assert(spapr->nested.api); /* ensure API version is initialized */ + spapr_cpu->nested_host_state = g_try_new(struct nested_ppc_state, 1); + assert(spapr_cpu->nested_host_state); + nested_save_state(spapr_cpu->nested_host_state, cpu); + spapr_cpu->nested_host_state->dec_expiry_tb = now - cpu_ppc_load_decr(env); + nested_papr_load_l2(cpu, env, vcpu, now); + env->spr[SPR_LPIDR] = lpid; /* post load l2 */ + + spapr_cpu->in_nested = true; + nested_post_load_state(env, cs); +} + +static target_ulong h_guest_run_vcpu(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + CPUPPCState *env = &cpu->env; + target_ulong flags = args[0]; + target_ulong lpid = args[1]; + target_ulong vcpuid = args[2]; + struct SpaprMachineStateNestedGuestVcpu *vcpu; + struct guest_state_request gsr; + SpaprMachineStateNestedGuest *guest; + target_ulong rc; + + if (flags) /* don't handle any flags for now */ + return H_PARAMETER; + + guest = spapr_get_nested_guest(spapr, lpid); + if (!guest) { + return H_P2; + } + if (!spapr_nested_vcpu_check(guest, vcpuid, true)) { + return H_P3; + } + + if (guest->parttbl[0] == 0) { + /* At least need a partition scoped radix tree */ + return H_NOT_AVAILABLE; + } + + vcpu = &guest->vcpus[vcpuid]; + + /* Read run_vcpu input buffer to update state */ + gsr.buf = vcpu->runbufin.addr; + gsr.len = vcpu->runbufin.size; + gsr.flags = GUEST_STATE_REQUEST_SET; /* Thread wide + writing */ + rc = map_and_getset_state(cpu, guest, vcpuid, &gsr); + if (rc == H_SUCCESS) { + nested_papr_run_vcpu(cpu, lpid, vcpu); + } else { + env->gpr[3] = rc; + } + return env->gpr[3]; +} + +void spapr_register_nested_hv(void) { spapr_register_hypercall(KVMPPC_H_SET_PARTITION_TABLE, h_set_ptbl); spapr_register_hypercall(KVMPPC_H_ENTER_NESTED, h_enter_nested); spapr_register_hypercall(KVMPPC_H_TLB_INVALIDATE, h_tlb_invalidate); spapr_register_hypercall(KVMPPC_H_COPY_TOFROM_GUEST, h_copy_tofrom_guest); } + +void spapr_unregister_nested_hv(void) +{ + spapr_unregister_hypercall(KVMPPC_H_SET_PARTITION_TABLE); + spapr_unregister_hypercall(KVMPPC_H_ENTER_NESTED); + spapr_unregister_hypercall(KVMPPC_H_TLB_INVALIDATE); + spapr_unregister_hypercall(KVMPPC_H_COPY_TOFROM_GUEST); +} + +void spapr_register_nested_papr(void) +{ + spapr_register_hypercall(H_GUEST_GET_CAPABILITIES, + h_guest_get_capabilities); + spapr_register_hypercall(H_GUEST_SET_CAPABILITIES, + h_guest_set_capabilities); + spapr_register_hypercall(H_GUEST_CREATE, h_guest_create); + spapr_register_hypercall(H_GUEST_DELETE, h_guest_delete); + spapr_register_hypercall(H_GUEST_CREATE_VCPU, h_guest_create_vcpu); + spapr_register_hypercall(H_GUEST_SET_STATE, h_guest_set_state); + spapr_register_hypercall(H_GUEST_GET_STATE, h_guest_get_state); + spapr_register_hypercall(H_GUEST_RUN_VCPU, h_guest_run_vcpu); +} + +void spapr_unregister_nested_papr(void) +{ + spapr_unregister_hypercall(H_GUEST_GET_CAPABILITIES); + spapr_unregister_hypercall(H_GUEST_SET_CAPABILITIES); + spapr_unregister_hypercall(H_GUEST_CREATE); + spapr_unregister_hypercall(H_GUEST_DELETE); + spapr_unregister_hypercall(H_GUEST_CREATE_VCPU); + spapr_unregister_hypercall(H_GUEST_SET_STATE); + spapr_unregister_hypercall(H_GUEST_GET_STATE); + spapr_unregister_hypercall(H_GUEST_RUN_VCPU); +} + #else void spapr_exit_nested(PowerPCCPU *cpu, int excp) { g_assert_not_reached(); } -void spapr_register_nested(void) +void spapr_register_nested_hv(void) { /* DO NOTHING */ } + +void spapr_unregister_nested_hv(void) +{ + /* DO NOTHING */ +} + +bool spapr_get_pate_nested_hv(SpaprMachineState *spapr, PowerPCCPU *cpu, + target_ulong lpid, ppc_v3_pate_t *entry) +{ + return false; +} + +bool spapr_get_pate_nested_papr(SpaprMachineState *spapr, PowerPCCPU *cpu, + target_ulong lpid, ppc_v3_pate_t *entry) +{ + return false; +} + +void spapr_register_nested_papr(void) +{ + /* DO NOTHING */ +} + +void spapr_unregister_nested_papr(void) +{ + /* DO NOTHING */ +} + +void spapr_nested_gsb_init(void) +{ + /* DO NOTHING */ +} + #endif diff --git a/hw/smbios/smbios.c b/hw/smbios/smbios.c index a3c4e52ce9..e3d5d8f2e2 100644 --- a/hw/smbios/smbios.c +++ b/hw/smbios/smbios.c @@ -121,6 +121,16 @@ struct type8_instance { }; static QTAILQ_HEAD(, type8_instance) type8 = QTAILQ_HEAD_INITIALIZER(type8); +/* type 9 instance for parsing */ +struct type9_instance { + const char *slot_designation, *pcidev; + uint8_t slot_type, slot_data_bus_width, current_usage, slot_length, + slot_characteristics1, slot_characteristics2; + uint16_t slot_id; + QTAILQ_ENTRY(type9_instance) next; +}; +static QTAILQ_HEAD(, type9_instance) type9 = QTAILQ_HEAD_INITIALIZER(type9); + static struct { size_t nvalues; char **values; @@ -380,6 +390,59 @@ static const QemuOptDesc qemu_smbios_type8_opts[] = { { /* end of list */ } }; +static const QemuOptDesc qemu_smbios_type9_opts[] = { + { + .name = "type", + .type = QEMU_OPT_NUMBER, + .help = "SMBIOS element type", + }, + { + .name = "slot_designation", + .type = QEMU_OPT_STRING, + .help = "string number for reference designation", + }, + { + .name = "slot_type", + .type = QEMU_OPT_NUMBER, + .help = "connector type", + }, + { + .name = "slot_data_bus_width", + .type = QEMU_OPT_NUMBER, + .help = "port type", + }, + { + .name = "current_usage", + .type = QEMU_OPT_NUMBER, + .help = "current usage", + }, + { + .name = "slot_length", + .type = QEMU_OPT_NUMBER, + .help = "system slot length", + }, + { + .name = "slot_id", + .type = QEMU_OPT_NUMBER, + .help = "system slot id", + }, + { + .name = "slot_characteristics1", + .type = QEMU_OPT_NUMBER, + .help = "slot characteristics1, see the spec", + }, + { + .name = "slot_characteristics2", + .type = QEMU_OPT_NUMBER, + .help = "slot characteristics2, see the spec", + }, + { + .name = "pci_device", + .type = QEMU_OPT_STRING, + .help = "PCI device, if provided." + } +}; + static const QemuOptDesc qemu_smbios_type11_opts[] = { { .name = "type", @@ -609,6 +672,7 @@ bool smbios_skip_table(uint8_t type, bool required_table) #define T2_BASE 0x200 #define T3_BASE 0x300 #define T4_BASE 0x400 +#define T9_BASE 0x900 #define T11_BASE 0xe00 #define T16_BASE 0x1000 @@ -807,6 +871,65 @@ static void smbios_build_type_8_table(void) } } +static void smbios_build_type_9_table(Error **errp) +{ + unsigned instance = 0; + struct type9_instance *t9; + + QTAILQ_FOREACH(t9, &type9, next) { + SMBIOS_BUILD_TABLE_PRE(9, T9_BASE + instance, true); + + SMBIOS_TABLE_SET_STR(9, slot_designation, t9->slot_designation); + t->slot_type = t9->slot_type; + t->slot_data_bus_width = t9->slot_data_bus_width; + t->current_usage = t9->current_usage; + t->slot_length = t9->slot_length; + t->slot_id = t9->slot_id; + t->slot_characteristics1 = t9->slot_characteristics1; + t->slot_characteristics2 = t9->slot_characteristics2; + + if (t9->pcidev) { + PCIDevice *pdev = NULL; + int rc = pci_qdev_find_device(t9->pcidev, &pdev); + if (rc != 0) { + error_setg(errp, + "No PCI device %s for SMBIOS type 9 entry %s", + t9->pcidev, t9->slot_designation); + return; + } + /* + * We only handle the case were the device is attached to + * the PCI root bus. The general case is more complex as + * bridges are enumerated later and the table would need + * to be updated at this moment. + */ + if (!pci_bus_is_root(pci_get_bus(pdev))) { + error_setg(errp, + "Cannot create type 9 entry for PCI device %s: " + "not attached to the root bus", + t9->pcidev); + return; + } + t->segment_group_number = cpu_to_le16(0); + t->bus_number = pci_dev_bus_num(pdev); + t->device_number = pdev->devfn; + } else { + /* + * Per SMBIOS spec, For slots that are not of the PCI, AGP, PCI-X, + * or PCI-Express type that do not have bus/device/function + * information, 0FFh should be populated in the fields of Segment + * Group Number, Bus Number, Device/Function Number. + */ + t->segment_group_number = 0xff; + t->bus_number = 0xff; + t->device_number = 0xff; + } + + SMBIOS_BUILD_TABLE_POST; + instance++; + } +} + static void smbios_build_type_11_table(void) { char count_str[128]; @@ -1126,6 +1249,7 @@ void smbios_get_tables(MachineState *ms, } smbios_build_type_8_table(); + smbios_build_type_9_table(errp); smbios_build_type_11_table(); #define MAX_DIMM_SZ (16 * GiB) @@ -1460,6 +1584,24 @@ void smbios_entry_add(QemuOpts *opts, Error **errp) t8_i->port_type = qemu_opt_get_number(opts, "port_type", 0); QTAILQ_INSERT_TAIL(&type8, t8_i, next); return; + case 9: { + if (!qemu_opts_validate(opts, qemu_smbios_type9_opts, errp)) { + return; + } + struct type9_instance *t; + t = g_new0(struct type9_instance, 1); + save_opt(&t->slot_designation, opts, "slot_designation"); + t->slot_type = qemu_opt_get_number(opts, "slot_type", 0); + t->slot_data_bus_width = qemu_opt_get_number(opts, "slot_data_bus_width", 0); + t->current_usage = qemu_opt_get_number(opts, "current_usage", 0); + t->slot_length = qemu_opt_get_number(opts, "slot_length", 0); + t->slot_id = qemu_opt_get_number(opts, "slot_id", 0); + t->slot_characteristics1 = qemu_opt_get_number(opts, "slot_characteristics1", 0); + t->slot_characteristics2 = qemu_opt_get_number(opts, "slot_characteristics2", 0); + save_opt(&t->pcidev, opts, "pcidev"); + QTAILQ_INSERT_TAIL(&type9, t, next); + return; + } case 11: if (!qemu_opts_validate(opts, qemu_smbios_type11_opts, errp)) { return; diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events index 77905d1994..13b6991179 100644 --- a/hw/virtio/trace-events +++ b/hw/virtio/trace-events @@ -30,6 +30,7 @@ vhost_user_write(uint32_t req, uint32_t flags) "req:%d flags:0x%"PRIx32"" vhost_user_create_notifier(int idx, void *n) "idx:%d n:%p" # vhost-vdpa.c +vhost_vdpa_skipped_memory_section(int is_ram, int is_iommu, int is_protected, int is_ram_device, uint64_t first, uint64_t last, int page_mask) "is_ram=%d, is_iommu=%d, is_protected=%d, is_ram_device=%d iova_min=0x%"PRIx64" iova_last=0x%"PRIx64" page_mask=0x%x" vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8 vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8 vhost_vdpa_listener_begin_batch(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8 @@ -57,8 +58,8 @@ vhost_vdpa_dev_start(void *dev, bool started) "dev: %p started: %d" vhost_vdpa_set_log_base(void *dev, uint64_t base, unsigned long long size, int refcnt, int fd, void *log) "dev: %p base: 0x%"PRIx64" size: %llu refcnt: %d fd: %d log: %p" vhost_vdpa_set_vring_addr(void *dev, unsigned int index, unsigned int flags, uint64_t desc_user_addr, uint64_t used_user_addr, uint64_t avail_user_addr, uint64_t log_guest_addr) "dev: %p index: %u flags: 0x%x desc_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64" avail_user_addr: 0x%"PRIx64" log_guest_addr: 0x%"PRIx64 vhost_vdpa_set_vring_num(void *dev, unsigned int index, unsigned int num) "dev: %p index: %u num: %u" -vhost_vdpa_set_vring_base(void *dev, unsigned int index, unsigned int num) "dev: %p index: %u num: %u" -vhost_vdpa_get_vring_base(void *dev, unsigned int index, unsigned int num) "dev: %p index: %u num: %u" +vhost_vdpa_set_dev_vring_base(void *dev, unsigned int index, unsigned int num, bool svq) "dev: %p index: %u num: %u svq: %d" +vhost_vdpa_get_vring_base(void *dev, unsigned int index, unsigned int num, bool svq) "dev: %p index: %u num: %u svq: %d" vhost_vdpa_set_vring_kick(void *dev, unsigned int index, int fd) "dev: %p index: %u fd: %d" vhost_vdpa_set_vring_call(void *dev, unsigned int index, int fd) "dev: %p index: %u fd: %d" vhost_vdpa_get_features(void *dev, uint64_t features) "dev: %p features: 0x%"PRIx64 @@ -111,7 +112,7 @@ virtio_iommu_device_reset(void) "reset!" virtio_iommu_system_reset(void) "system reset!" virtio_iommu_get_features(uint64_t features) "device supports features=0x%"PRIx64 virtio_iommu_device_status(uint8_t status) "driver status = %d" -virtio_iommu_get_config(uint64_t page_size_mask, uint64_t start, uint64_t end, uint32_t domain_start, uint32_t domain_end, uint32_t probe_size, uint8_t bypass) "page_size_mask=0x%"PRIx64" input range start=0x%"PRIx64" input range end=0x%"PRIx64" domain range start=%d domain range end=%d probe_size=0x%x bypass=0x%x" +virtio_iommu_get_config(uint64_t page_size_mask, uint64_t start, uint64_t end, uint32_t domain_start, uint32_t domain_end, uint32_t probe_size, uint8_t bypass) "page_size_mask=0x%"PRIx64" input range start=0x%"PRIx64" input range end=0x%"PRIx64" domain range start=%u domain range end=%u probe_size=0x%x bypass=0x%x" virtio_iommu_set_config(uint8_t bypass) "bypass=0x%x" virtio_iommu_attach(uint32_t domain_id, uint32_t ep_id) "domain=%d endpoint=%d" virtio_iommu_detach(uint32_t domain_id, uint32_t ep_id) "domain=%d endpoint=%d" diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c index 1af8621481..cdf9af4a4b 100644 --- a/hw/virtio/vhost-user.c +++ b/hw/virtio/vhost-user.c @@ -1610,11 +1610,27 @@ vhost_user_backend_handle_shared_object_add(struct vhost_dev *dev, } static int -vhost_user_backend_handle_shared_object_remove(VhostUserShared *object) +vhost_user_backend_handle_shared_object_remove(struct vhost_dev *dev, + VhostUserShared *object) { QemuUUID uuid; memcpy(uuid.data, object->uuid, sizeof(object->uuid)); + switch (virtio_object_type(&uuid)) { + case TYPE_VHOST_DEV: + { + struct vhost_dev *owner = virtio_lookup_vhost_device(&uuid); + if (dev != owner) { + /* Not allowed to remove non-owned entries */ + return 0; + } + break; + } + default: + /* Not allowed to remove non-owned entries */ + return 0; + } + return virtio_remove_resource(&uuid); } @@ -1793,7 +1809,8 @@ static gboolean backend_read(QIOChannel *ioc, GIOCondition condition, ret = vhost_user_backend_handle_shared_object_add(dev, &payload.object); break; case VHOST_USER_BACKEND_SHARED_OBJECT_REMOVE: - ret = vhost_user_backend_handle_shared_object_remove(&payload.object); + ret = vhost_user_backend_handle_shared_object_remove(dev, + &payload.object); break; case VHOST_USER_BACKEND_SHARED_OBJECT_LOOKUP: ret = vhost_user_backend_handle_shared_object_lookup(dev->opaque, ioc, diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c index ddae494ca8..3bcd05cc22 100644 --- a/hw/virtio/vhost-vdpa.c +++ b/hw/virtio/vhost-vdpa.c @@ -47,12 +47,17 @@ static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section, int page_mask) { Int128 llend; + bool is_ram = memory_region_is_ram(section->mr); + bool is_iommu = memory_region_is_iommu(section->mr); + bool is_protected = memory_region_is_protected(section->mr); - if ((!memory_region_is_ram(section->mr) && - !memory_region_is_iommu(section->mr)) || - memory_region_is_protected(section->mr) || - /* vhost-vDPA doesn't allow MMIO to be mapped */ - memory_region_is_ram_device(section->mr)) { + /* vhost-vDPA doesn't allow MMIO to be mapped */ + bool is_ram_device = memory_region_is_ram_device(section->mr); + + if ((!is_ram && !is_iommu) || is_protected || is_ram_device) { + trace_vhost_vdpa_skipped_memory_section(is_ram, is_iommu, is_protected, + is_ram_device, iova_min, + iova_max, page_mask); return true; } @@ -69,7 +74,7 @@ static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section, * size that maps to the kernel */ - if (!memory_region_is_iommu(section->mr)) { + if (!is_iommu) { llend = vhost_vdpa_section_end(section, page_mask); if (int128_gt(llend, int128_make64(iova_max))) { error_report("RAM section out of device range (max=0x%" PRIx64 @@ -555,6 +560,11 @@ static bool vhost_vdpa_first_dev(struct vhost_dev *dev) return v->index == 0; } +static bool vhost_vdpa_last_dev(struct vhost_dev *dev) +{ + return dev->vq_index + dev->nvqs == dev->vq_index_end; +} + static int vhost_vdpa_get_dev_features(struct vhost_dev *dev, uint64_t *features) { @@ -965,7 +975,10 @@ static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config, static int vhost_vdpa_set_dev_vring_base(struct vhost_dev *dev, struct vhost_vring_state *ring) { - trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num); + struct vhost_vdpa *v = dev->opaque; + + trace_vhost_vdpa_set_dev_vring_base(dev, ring->index, ring->num, + v->shadow_vqs_enabled); return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring); } @@ -1315,7 +1328,7 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); } - if (dev->vq_index + dev->nvqs != dev->vq_index_end) { + if (!vhost_vdpa_last_dev(dev)) { return 0; } @@ -1337,7 +1350,7 @@ static void vhost_vdpa_reset_status(struct vhost_dev *dev) { struct vhost_vdpa *v = dev->opaque; - if (dev->vq_index + dev->nvqs != dev->vq_index_end) { + if (!vhost_vdpa_last_dev(dev)) { return; } @@ -1407,6 +1420,7 @@ static int vhost_vdpa_get_vring_base(struct vhost_dev *dev, if (v->shadow_vqs_enabled) { ring->num = virtio_queue_get_last_avail_idx(dev->vdev, ring->index); + trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num, true); return 0; } @@ -1419,7 +1433,7 @@ static int vhost_vdpa_get_vring_base(struct vhost_dev *dev, } ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring); - trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num); + trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num, false); return ret; } @@ -1447,7 +1461,15 @@ static int vhost_vdpa_set_vring_call(struct vhost_dev *dev, /* Remember last call fd because we can switch to SVQ anytime. */ vhost_svq_set_svq_call_fd(svq, file->fd); - if (v->shadow_vqs_enabled) { + /* + * When SVQ is transitioning to off, shadow_vqs_enabled has + * not been set back to false yet, but the underlying call fd + * will have to switch back to the guest notifier to signal the + * passthrough virtqueues. In other situations, SVQ's own call + * fd shall be used to signal the device model. + */ + if (v->shadow_vqs_enabled && + v->shared->svq_switching != SVQ_TSTATE_DISABLING) { return 0; } diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c index 86623d55a5..1326c6ec41 100644 --- a/hw/virtio/virtio-iommu.c +++ b/hw/virtio/virtio-iommu.c @@ -29,6 +29,7 @@ #include "sysemu/reset.h" #include "sysemu/sysemu.h" #include "qemu/reserved-region.h" +#include "qemu/units.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "trace.h" @@ -1115,8 +1116,8 @@ static int virtio_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu_mr, } /* - * The default mask (TARGET_PAGE_MASK) is the smallest supported guest granule, - * for example 0xfffffffffffff000. When an assigned device has page size + * The default mask depends on the "granule" property. For example, with + * 4k granule, it is -(4 * KiB). When an assigned device has page size * restrictions due to the hardware IOMMU configuration, apply this restriction * to the mask. */ @@ -1313,8 +1314,32 @@ static void virtio_iommu_device_realize(DeviceState *dev, Error **errp) * in vfio realize */ s->config.bypass = s->boot_bypass; - s->config.page_size_mask = qemu_target_page_mask(); - s->config.input_range.end = UINT64_MAX; + if (s->aw_bits < 32 || s->aw_bits > 64) { + error_setg(errp, "aw-bits must be within [32,64]"); + return; + } + s->config.input_range.end = + s->aw_bits == 64 ? UINT64_MAX : BIT_ULL(s->aw_bits) - 1; + + switch (s->granule_mode) { + case GRANULE_MODE_4K: + s->config.page_size_mask = -(4 * KiB); + break; + case GRANULE_MODE_8K: + s->config.page_size_mask = -(8 * KiB); + break; + case GRANULE_MODE_16K: + s->config.page_size_mask = -(16 * KiB); + break; + case GRANULE_MODE_64K: + s->config.page_size_mask = -(64 * KiB); + break; + case GRANULE_MODE_HOST: + s->config.page_size_mask = qemu_real_host_page_mask(); + break; + default: + error_setg(errp, "Unsupported granule mode"); + } s->config.domain_range.end = UINT32_MAX; s->config.probe_size = VIOMMU_PROBE_SIZE; @@ -1522,6 +1547,9 @@ static Property virtio_iommu_properties[] = { DEFINE_PROP_LINK("primary-bus", VirtIOIOMMU, primary_bus, TYPE_PCI_BUS, PCIBus *), DEFINE_PROP_BOOL("boot-bypass", VirtIOIOMMU, boot_bypass, true), + DEFINE_PROP_GRANULE_MODE("granule", VirtIOIOMMU, granule_mode, + GRANULE_MODE_HOST), + DEFINE_PROP_UINT8("aw-bits", VirtIOIOMMU, aw_bits, 64), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index cb6940fc0e..eaaf86402c 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -1442,6 +1442,155 @@ int virtio_pci_add_shm_cap(VirtIOPCIProxy *proxy, return virtio_pci_add_mem_cap(proxy, &cap.cap); } +/* Called within call_rcu(). */ +static void bitmap_free_region_cache(BitmapMemoryRegionCaches *caches) +{ + assert(caches != NULL); + address_space_cache_destroy(&caches->bitmap); + g_free(caches); +} + +static void lm_disable(VirtIODevice *vdev) +{ + BitmapMemoryRegionCaches *caches; + caches = qatomic_read(&vdev->caches); + qatomic_rcu_set(&vdev->caches, NULL); + if (caches) { + call_rcu(caches, bitmap_free_region_cache, rcu); + } +} + +static void lm_enable(VirtIODevice *vdev) +{ + BitmapMemoryRegionCaches *old = vdev->caches; + BitmapMemoryRegionCaches *new = NULL; + hwaddr addr, end, size; + int64_t len; + + addr = vdev->lm_base_addr_low | ((hwaddr)(vdev->lm_base_addr_high) << 32); + end = vdev->lm_end_addr_low | ((hwaddr)(vdev->lm_end_addr_high) << 32); + size = end - addr; + if (size <= 0) { + error_report("Invalid lm size."); + return; + } + + new = g_new0(BitmapMemoryRegionCaches, 1); + len = address_space_cache_init(&new->bitmap, vdev->dma_as, addr, size, + true); + if (len < size) { + virtio_error(vdev, "Cannot map bitmap"); + goto err_bitmap; + } + qatomic_rcu_set(&vdev->caches, new); + + if (old) { + call_rcu(old, bitmap_free_region_cache, rcu); + } + + return; + +err_bitmap: + address_space_cache_destroy(&new->bitmap); + g_free(new); +} + +static uint64_t virtio_pci_lm_read(void *opaque, hwaddr addr, + unsigned size) +{ + VirtIOPCIProxy *proxy = opaque; + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + hwaddr offset_end = LM_VRING_STATE_OFFSET + + virtio_pci_queue_mem_mult(proxy) * VIRTIO_QUEUE_MAX; + uint32_t val; + int qid; + + if (vdev == NULL) { + return UINT64_MAX; + } + switch (addr) { + case LM_LOGGING_CTRL: + val = vdev->lm_logging_ctrl; + break; + case LM_BASE_ADDR_LOW: + val = vdev->lm_base_addr_low; + break; + case LM_BASE_ADDR_HIGH: + val = vdev->lm_base_addr_high; + break; + case LM_END_ADDR_LOW: + val = vdev->lm_end_addr_low; + break; + case LM_END_ADDR_HIGH: + val = vdev->lm_end_addr_high; + break; + default: + if (addr >= LM_VRING_STATE_OFFSET && addr <= offset_end) { + qid = (addr - LM_VRING_STATE_OFFSET) / + virtio_pci_queue_mem_mult(proxy); + val = virtio_queue_get_vring_states(vdev, qid); + } else + val = 0; + + break; + } + + return val; +} + +static void virtio_pci_lm_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + VirtIOPCIProxy *proxy = opaque; + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + hwaddr offset_end = LM_VRING_STATE_OFFSET + + virtio_pci_queue_mem_mult(proxy) * VIRTIO_QUEUE_MAX; + int qid; + + if (vdev == NULL) { + return; + } + + switch (addr) { + case LM_LOGGING_CTRL: + vdev->lm_logging_ctrl = val; + switch (val) { + case LM_DISABLE: + lm_disable(vdev); + break; + case LM_ENABLE: + lm_enable(vdev); + break; + default: + virtio_error(vdev, "Unsupport LM_LOGGING_CTRL value: %"PRIx64, + val); + break; + }; + + break; + case LM_BASE_ADDR_LOW: + vdev->lm_base_addr_low = val; + break; + case LM_BASE_ADDR_HIGH: + vdev->lm_base_addr_high = val; + break; + case LM_END_ADDR_LOW: + vdev->lm_end_addr_low = val; + break; + case LM_END_ADDR_HIGH: + vdev->lm_end_addr_high = val; + break; + default: + if (addr >= LM_VRING_STATE_OFFSET && addr <= offset_end) { + qid = (addr - LM_VRING_STATE_OFFSET) / + virtio_pci_queue_mem_mult(proxy); + virtio_queue_set_vring_states(vdev, qid, val); + } else + virtio_error(vdev, "Unsupport addr: %"PRIx64, addr); + break; + } +} + static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr, unsigned size) { @@ -1823,6 +1972,15 @@ static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy, }, .endianness = DEVICE_LITTLE_ENDIAN, }; + static const MemoryRegionOps lm_ops = { + .read = virtio_pci_lm_read, + .write = virtio_pci_lm_write, + .impl = { + .min_access_size = 1, + .max_access_size = 4, + }, + .endianness = DEVICE_LITTLE_ENDIAN, + }; g_autoptr(GString) name = g_string_new(NULL); g_string_printf(name, "virtio-pci-common-%s", vdev_name); @@ -1859,6 +2017,14 @@ static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy, proxy, name->str, proxy->notify_pio.size); + if (proxy->flags & VIRTIO_PCI_FLAG_VDPA) { + g_string_printf(name, "virtio-pci-lm-%s", vdev_name); + memory_region_init_io(&proxy->lm.mr, OBJECT(proxy), + &lm_ops, + proxy, + name->str, + proxy->lm.size); + } } static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy, @@ -2021,6 +2187,10 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp) virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap); virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap); virtio_pci_modern_mem_region_map(proxy, &proxy->notify, ¬ify.cap); + if (proxy->flags & VIRTIO_PCI_FLAG_VDPA) { + memory_region_add_subregion(&proxy->modern_bar, + proxy->lm.offset, &proxy->lm.mr); + } if (modern_pio) { memory_region_init(&proxy->io_bar, OBJECT(proxy), @@ -2090,6 +2260,9 @@ static void virtio_pci_device_unplugged(DeviceState *d) virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr); virtio_pci_modern_mem_region_unmap(proxy, &proxy->device); virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify); + if (proxy->flags & VIRTIO_PCI_FLAG_VDPA) { + memory_region_del_subregion(&proxy->modern_bar, &proxy->lm.mr); + } if (modern_pio) { virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio); } @@ -2144,9 +2317,17 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp) proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG; /* subclasses can enforce modern, so do this unconditionally */ - memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci", - /* PCI BAR regions must be powers of 2 */ - pow2ceil(proxy->notify.offset + proxy->notify.size)); + if (!(proxy->flags & VIRTIO_PCI_FLAG_VDPA)) { + memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci", + /* PCI BAR regions must be powers of 2 */ + pow2ceil(proxy->notify.offset + proxy->notify.size)); + } else { + proxy->lm.offset = proxy->notify.offset + proxy->notify.size; + proxy->lm.size = 0x20 + VIRTIO_QUEUE_MAX * 4; + memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci", + /* PCI BAR regions must be powers of 2 */ + pow2ceil(proxy->lm.offset + proxy->lm.size)); + } if (proxy->disable_legacy == ON_OFF_AUTO_AUTO) { proxy->disable_legacy = pcie_port ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; @@ -2301,6 +2482,8 @@ static Property virtio_pci_properties[] = { VIRTIO_PCI_FLAG_INIT_FLR_BIT, true), DEFINE_PROP_BIT("aer", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_AER_BIT, false), + DEFINE_PROP_BIT("vdpa", VirtIOPCIProxy, flags, + VIRTIO_PCI_FLAG_VDPA_BIT, false), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index d229755eae..fb6b4ccd83 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -3368,6 +3368,18 @@ static uint16_t virtio_queue_split_get_last_avail_idx(VirtIODevice *vdev, return vdev->vq[n].last_avail_idx; } +static uint32_t virtio_queue_split_get_vring_states(VirtIODevice *vdev, + int n) +{ + struct VirtQueue *vq = &vdev->vq[n]; + uint16_t avail, used; + + avail = vq->last_avail_idx; + used = vq->used_idx; + + return avail | (uint32_t)used << 16; +} + unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n) { if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { @@ -3377,6 +3389,33 @@ unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n) } } +unsigned int virtio_queue_get_vring_states(VirtIODevice *vdev, int n) +{ + if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + return -1; + } else { + return virtio_queue_split_get_vring_states(vdev, n); + } +} + +static void virtio_queue_split_set_vring_states(VirtIODevice *vdev, + int n, uint32_t idx) +{ + struct VirtQueue *vq = &vdev->vq[n]; + vq->last_avail_idx = (uint16_t)(idx & 0xffff); + vq->shadow_avail_idx = (uint16_t)(idx & 0xffff); + vq->used_idx = (uint16_t)(idx >> 16); +} + +void virtio_queue_set_vring_states(VirtIODevice *vdev, int n, uint32_t idx) +{ + if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + return; + } else { + virtio_queue_split_set_vring_states(vdev, n, idx); + } +} + static void virtio_queue_packed_set_last_avail_idx(VirtIODevice *vdev, int n, unsigned int idx) { diff --git a/hw/xen/xen-mapcache.c b/hw/xen/xen-mapcache.c index 4f956d048e..7f59080ba7 100644 --- a/hw/xen/xen-mapcache.c +++ b/hw/xen/xen-mapcache.c @@ -476,11 +476,37 @@ static void xen_invalidate_map_cache_entry_unlocked(uint8_t *buffer) g_free(entry); } -void xen_invalidate_map_cache_entry(uint8_t *buffer) +typedef struct XenMapCacheData { + Coroutine *co; + uint8_t *buffer; +} XenMapCacheData; + +static void xen_invalidate_map_cache_entry_bh(void *opaque) { + XenMapCacheData *data = opaque; + mapcache_lock(); - xen_invalidate_map_cache_entry_unlocked(buffer); + xen_invalidate_map_cache_entry_unlocked(data->buffer); mapcache_unlock(); + + aio_co_wake(data->co); +} + +void coroutine_mixed_fn xen_invalidate_map_cache_entry(uint8_t *buffer) +{ + if (qemu_in_coroutine()) { + XenMapCacheData data = { + .co = qemu_coroutine_self(), + .buffer = buffer, + }; + aio_bh_schedule_oneshot(qemu_get_current_aio_context(), + xen_invalidate_map_cache_entry_bh, &data); + qemu_coroutine_yield(); + } else { + mapcache_lock(); + xen_invalidate_map_cache_entry_unlocked(buffer); + mapcache_unlock(); + } } void xen_invalidate_map_cache(void) diff --git a/hw/xen/xen_pt_config_init.c b/hw/xen/xen_pt_config_init.c index ba4cd78238..3edaeab1e3 100644 --- a/hw/xen/xen_pt_config_init.c +++ b/hw/xen/xen_pt_config_init.c @@ -292,7 +292,10 @@ static int xen_pt_header_type_reg_init(XenPCIPassthroughState *s, uint32_t *data) { /* read PCI_HEADER_TYPE */ - *data = reg->init_val | 0x80; + *data = reg->init_val; + if ((PCI_DEVICE(s)->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)) { + *data |= PCI_HEADER_TYPE_MULTI_FUNCTION; + } return 0; } @@ -677,7 +680,7 @@ static XenPTRegInfo xen_pt_emu_reg_header0[] = { .size = 1, .init_val = 0x00, .ro_mask = 0xFF, - .emu_mask = 0x00, + .emu_mask = PCI_HEADER_TYPE_MULTI_FUNCTION, .init = xen_pt_header_type_reg_init, .u.b.read = xen_pt_byte_reg_read, .u.b.write = xen_pt_byte_reg_write, |