diff options
Diffstat (limited to 'hw')
200 files changed, 4443 insertions, 2024 deletions
diff --git a/hw/9pfs/trace-events b/hw/9pfs/trace-events index 881e4c4dd8..c0a0a4ab5d 100644 --- a/hw/9pfs/trace-events +++ b/hw/9pfs/trace-events @@ -1,6 +1,6 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/9pfs/virtio-9p.c +# 9p.c v9fs_rcancel(uint16_t tag, uint8_t id) "tag %d id %d" v9fs_rerror(uint16_t tag, uint8_t id, int err) "tag %d id %d err %d" v9fs_version(uint16_t tag, uint8_t id, int32_t msize, char* version) "tag %d id %d msize %d version %s" diff --git a/hw/Kconfig b/hw/Kconfig index d5ecd02070..88b9f15007 100644 --- a/hw/Kconfig +++ b/hw/Kconfig @@ -26,6 +26,7 @@ source pci-bridge/Kconfig source pci-host/Kconfig source pcmcia/Kconfig source pci/Kconfig +source rdma/Kconfig source scsi/Kconfig source sd/Kconfig source smbios/Kconfig diff --git a/hw/acpi/bios-linker-loader.c b/hw/acpi/bios-linker-loader.c index d16b8bbcb1..626c04a39f 100644 --- a/hw/acpi/bios-linker-loader.c +++ b/hw/acpi/bios-linker-loader.c @@ -283,6 +283,8 @@ void bios_linker_loader_add_pointer(BIOSLinker *linker, const BiosLinkerFileEntry *source_file = bios_linker_find_file(linker, src_file); + assert(dst_file); + assert(source_file); assert(dst_patched_offset < dst_file->blob->len); assert(dst_patched_offset + dst_patched_size <= dst_file->blob->len); assert(src_offset < source_file->blob->len); diff --git a/hw/acpi/ich9.c b/hw/acpi/ich9.c index c5d8646abc..e53dfe1ee3 100644 --- a/hw/acpi/ich9.c +++ b/hw/acpi/ich9.c @@ -483,13 +483,24 @@ void ich9_pm_add_properties(Object *obj, ICH9LPCPMRegs *pm, Error **errp) NULL); } +void ich9_pm_device_pre_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + ICH9LPCState *lpc = ICH9_LPC_DEVICE(hotplug_dev); + + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) && + !lpc->pm.acpi_memory_hotplug.is_enabled) + error_setg(errp, + "memory hotplug is not enabled: %s.memory-hotplug-support " + "is not set", object_get_typename(OBJECT(lpc))); +} + void ich9_pm_device_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { ICH9LPCState *lpc = ICH9_LPC_DEVICE(hotplug_dev); - if (lpc->pm.acpi_memory_hotplug.is_enabled && - object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) { nvdimm_acpi_plug_cb(hotplug_dev, dev); } else { diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c index e53b2cb681..9fdad6dc3f 100644 --- a/hw/acpi/nvdimm.c +++ b/hw/acpi/nvdimm.c @@ -382,7 +382,7 @@ nvdimm_build_structure_caps(GArray *structures, uint32_t capabilities) nfit_caps->capabilities = cpu_to_le32(capabilities); } -static GArray *nvdimm_build_device_structure(AcpiNVDIMMState *state) +static GArray *nvdimm_build_device_structure(NVDIMMState *state) { GSList *device_list = nvdimm_get_device_list(); GArray *structures = g_array_new(false, true /* clear */, 1); @@ -416,7 +416,7 @@ static void nvdimm_init_fit_buffer(NvdimmFitBuffer *fit_buf) fit_buf->fit = g_array_new(false, true /* clear */, 1); } -static void nvdimm_build_fit_buffer(AcpiNVDIMMState *state) +static void nvdimm_build_fit_buffer(NVDIMMState *state) { NvdimmFitBuffer *fit_buf = &state->fit_buf; @@ -425,12 +425,12 @@ static void nvdimm_build_fit_buffer(AcpiNVDIMMState *state) fit_buf->dirty = true; } -void nvdimm_plug(AcpiNVDIMMState *state) +void nvdimm_plug(NVDIMMState *state) { nvdimm_build_fit_buffer(state); } -static void nvdimm_build_nfit(AcpiNVDIMMState *state, GArray *table_offsets, +static void nvdimm_build_nfit(NVDIMMState *state, GArray *table_offsets, GArray *table_data, BIOSLinker *linker) { NvdimmFitBuffer *fit_buf = &state->fit_buf; @@ -570,7 +570,7 @@ nvdimm_dsm_no_payload(uint32_t func_ret_status, hwaddr dsm_mem_addr) #define NVDIMM_QEMU_RSVD_HANDLE_ROOT 0x10000 /* Read FIT data, defined in docs/specs/acpi_nvdimm.txt. */ -static void nvdimm_dsm_func_read_fit(AcpiNVDIMMState *state, NvdimmDsmIn *in, +static void nvdimm_dsm_func_read_fit(NVDIMMState *state, NvdimmDsmIn *in, hwaddr dsm_mem_addr) { NvdimmFitBuffer *fit_buf = &state->fit_buf; @@ -619,7 +619,7 @@ exit: } static void -nvdimm_dsm_handle_reserved_root_method(AcpiNVDIMMState *state, +nvdimm_dsm_handle_reserved_root_method(NVDIMMState *state, NvdimmDsmIn *in, hwaddr dsm_mem_addr) { switch (in->function) { @@ -863,7 +863,7 @@ nvdimm_dsm_read(void *opaque, hwaddr addr, unsigned size) static void nvdimm_dsm_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { - AcpiNVDIMMState *state = opaque; + NVDIMMState *state = opaque; NvdimmDsmIn *in; hwaddr dsm_mem_addr = val; @@ -925,7 +925,7 @@ void nvdimm_acpi_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev) } } -void nvdimm_init_acpi_state(AcpiNVDIMMState *state, MemoryRegion *io, +void nvdimm_init_acpi_state(NVDIMMState *state, MemoryRegion *io, FWCfgState *fw_cfg, Object *owner) { memory_region_init_io(&state->io_mr, owner, &nvdimm_dsm_ops, state, @@ -992,7 +992,7 @@ static void nvdimm_build_common_dsm(Aml *dev) field = aml_field(NVDIMM_DSM_IOPORT, AML_DWORD_ACC, AML_NOLOCK, AML_PRESERVE); aml_append(field, aml_named_field(NVDIMM_DSM_NOTIFY, - sizeof(uint32_t) * BITS_PER_BYTE)); + NVDIMM_ACPI_IO_LEN * BITS_PER_BYTE)); aml_append(method, field); /* @@ -1086,7 +1086,7 @@ static void nvdimm_build_common_dsm(Aml *dev) */ aml_append(method, aml_store(handle, aml_name(NVDIMM_DSM_HANDLE))); aml_append(method, aml_store(aml_arg(1), aml_name(NVDIMM_DSM_REVISION))); - aml_append(method, aml_store(aml_arg(2), aml_name(NVDIMM_DSM_FUNCTION))); + aml_append(method, aml_store(function, aml_name(NVDIMM_DSM_FUNCTION))); /* * The fourth parameter (Arg3) of _DSM is a package which contains @@ -1260,7 +1260,7 @@ static void nvdimm_build_nvdimm_devices(Aml *root_dev, uint32_t ram_slots) } static void nvdimm_build_ssdt(GArray *table_offsets, GArray *table_data, - BIOSLinker *linker, GArray *dsm_dma_arrea, + BIOSLinker *linker, GArray *dsm_dma_area, uint32_t ram_slots) { Aml *ssdt, *sb_scope, *dev; @@ -1307,7 +1307,7 @@ static void nvdimm_build_ssdt(GArray *table_offsets, GArray *table_data, NVDIMM_ACPI_MEM_ADDR); bios_linker_loader_alloc(linker, - NVDIMM_DSM_MEM_FILE, dsm_dma_arrea, + NVDIMM_DSM_MEM_FILE, dsm_dma_area, sizeof(NvdimmDsmIn), false /* high memory */); bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE, mem_addr_offset, sizeof(uint32_t), @@ -1319,7 +1319,7 @@ static void nvdimm_build_ssdt(GArray *table_offsets, GArray *table_data, } void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data, - BIOSLinker *linker, AcpiNVDIMMState *state, + BIOSLinker *linker, NVDIMMState *state, uint32_t ram_slots) { GSList *device_list; diff --git a/hw/acpi/piix4.c b/hw/acpi/piix4.c index 7b98121070..9c079d6834 100644 --- a/hw/acpi/piix4.c +++ b/hw/acpi/piix4.c @@ -380,9 +380,17 @@ static void piix4_pm_powerdown_req(Notifier *n, void *opaque) static void piix4_device_pre_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { + PIIX4PMState *s = PIIX4_PM(hotplug_dev); + if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { acpi_pcihp_device_pre_plug_cb(hotplug_dev, dev, errp); - } else if (!object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) && + } else if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + if (!s->acpi_memory_hotplug.is_enabled) { + error_setg(errp, + "memory hotplug is not enabled: %s.memory-hotplug-support " + "is not set", object_get_typename(OBJECT(s))); + } + } else if ( !object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { error_setg(errp, "acpi: device pre plug request for not supported" " device type: %s", object_get_typename(OBJECT(dev))); @@ -394,8 +402,7 @@ static void piix4_device_plug_cb(HotplugHandler *hotplug_dev, { PIIX4PMState *s = PIIX4_PM(hotplug_dev); - if (s->acpi_memory_hotplug.is_enabled && - object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) { nvdimm_acpi_plug_cb(hotplug_dev, dev); } else { diff --git a/hw/acpi/trace-events b/hw/acpi/trace-events index df0024f8b2..6272d8a9e7 100644 --- a/hw/acpi/trace-events +++ b/hw/acpi/trace-events @@ -1,6 +1,6 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/acpi/memory_hotplug.c +# memory_hotplug.c mhp_acpi_invalid_slot_selected(uint32_t slot) "0x%"PRIx32 mhp_acpi_ejecting_invalid_slot(uint32_t slot) "0x%"PRIx32 mhp_acpi_read_addr_lo(uint32_t slot, uint32_t addr) "slot[0x%"PRIx32"] addr lo: 0x%"PRIx32 @@ -17,7 +17,7 @@ mhp_acpi_clear_remove_evt(uint32_t slot) "slot[0x%"PRIx32"] clear remove event" mhp_acpi_pc_dimm_deleted(uint32_t slot) "slot[0x%"PRIx32"] pc-dimm deleted" mhp_acpi_pc_dimm_delete_failed(uint32_t slot) "slot[0x%"PRIx32"] pc-dimm delete failed" -# hw/acpi/cpu.c +# cpu.c cpuhp_acpi_invalid_idx_selected(uint32_t idx) "0x%"PRIx32 cpuhp_acpi_read_flags(uint32_t idx, uint8_t flags) "idx[0x%"PRIx32"] flags: 0x%"PRIx8 cpuhp_acpi_write_idx(uint32_t idx) "set active cpu idx: 0x%"PRIx32 @@ -31,6 +31,6 @@ cpuhp_acpi_ejecting_cpu(uint32_t idx) "0x%"PRIx32 cpuhp_acpi_write_ost_ev(uint32_t slot, uint32_t ev) "idx[0x%"PRIx32"] OST EVENT: 0x%"PRIx32 cpuhp_acpi_write_ost_status(uint32_t slot, uint32_t st) "idx[0x%"PRIx32"] OST STATUS: 0x%"PRIx32 -# hw/acpi/tco.c +# tco.c tco_timer_reload(int ticks, int msec) "ticks=%d (%d ms)" tco_timer_expired(int timeouts_no, bool strap, bool no_reboot) "timeouts_no=%d no_reboot=%d/%d" diff --git a/hw/alpha/Kconfig b/hw/alpha/Kconfig index 22cefd9577..15c59ff264 100644 --- a/hw/alpha/Kconfig +++ b/hw/alpha/Kconfig @@ -2,6 +2,7 @@ config DP264 bool imply PCI_DEVICES imply TEST_DEVICES + imply E1000_PCI select I82374 select I8254 select I8259 diff --git a/hw/alpha/trace-events b/hw/alpha/trace-events index 46024cca0b..5b8315f27f 100644 --- a/hw/alpha/trace-events +++ b/hw/alpha/trace-events @@ -1,4 +1,4 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/alpha/pci.c +# pci.c alpha_pci_iack_write(void) "" diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c index 996812498d..1c23ebd992 100644 --- a/hw/arm/aspeed.c +++ b/hw/arm/aspeed.c @@ -19,6 +19,8 @@ #include "hw/arm/aspeed_soc.h" #include "hw/boards.h" #include "hw/i2c/smbus_eeprom.h" +#include "hw/misc/pca9552.h" +#include "hw/misc/tmp105.h" #include "qemu/log.h" #include "sysemu/block-backend.h" #include "hw/loader.h" @@ -267,7 +269,8 @@ static void ast2500_evb_i2c_init(AspeedBoardState *bmc) eeprom_buf); /* The AST2500 EVB expects a LM75 but a TMP105 is compatible */ - i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 7), "tmp105", 0x4d); + i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 7), + TYPE_TMP105, 0x4d); /* The AST2500 EVB does not have an RTC. Let's pretend that one is * plugged on the I2C bus header */ @@ -288,13 +291,15 @@ static void witherspoon_bmc_i2c_init(AspeedBoardState *bmc) AspeedSoCState *soc = &bmc->soc; uint8_t *eeprom_buf = g_malloc0(8 * 1024); - i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 3), "pca9552", 0x60); + i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 3), TYPE_PCA9552, + 0x60); i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 4), "tmp423", 0x4c); i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 5), "tmp423", 0x4c); /* The Witherspoon expects a TMP275 but a TMP105 is compatible */ - i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 9), "tmp105", 0x4a); + i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 9), TYPE_TMP105, + 0x4a); /* The witherspoon board expects Epson RX8900 I2C RTC but a ds1338 is * good enough */ @@ -302,7 +307,7 @@ static void witherspoon_bmc_i2c_init(AspeedBoardState *bmc) smbus_eeprom_init_one(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 11), 0x51, eeprom_buf); - i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 11), "pca9552", + i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 11), TYPE_PCA9552, 0x60); } diff --git a/hw/arm/exynos4_boards.c b/hw/arm/exynos4_boards.c index 750162cc95..ea8100f65a 100644 --- a/hw/arm/exynos4_boards.c +++ b/hw/arm/exynos4_boards.c @@ -32,6 +32,7 @@ #include "hw/arm/arm.h" #include "exec/address-spaces.h" #include "hw/arm/exynos4210.h" +#include "hw/net/lan9118.h" #include "hw/boards.h" #undef DEBUG @@ -92,7 +93,7 @@ static void lan9215_init(uint32_t base, qemu_irq irq) /* This should be a 9215 but the 9118 is close enough */ if (nd_table[0].used) { qemu_check_nic_model(&nd_table[0], "lan9118"); - dev = qdev_create(NULL, "lan9118"); + dev = qdev_create(NULL, TYPE_LAN9118); qdev_set_nic_properties(dev, &nd_table[0]); qdev_prop_set_uint32(dev, "mode_16bit", 1); qdev_init_nofail(dev); diff --git a/hw/arm/gumstix.c b/hw/arm/gumstix.c index 79886ce378..343cbfd7da 100644 --- a/hw/arm/gumstix.c +++ b/hw/arm/gumstix.c @@ -40,7 +40,7 @@ #include "hw/arm/pxa.h" #include "net/net.h" #include "hw/block/flash.h" -#include "hw/devices.h" +#include "hw/net/smc91c111.h" #include "hw/boards.h" #include "exec/address-spaces.h" #include "sysemu/qtest.h" diff --git a/hw/arm/integratorcp.c b/hw/arm/integratorcp.c index 4eceebb9ea..0b6f24465e 100644 --- a/hw/arm/integratorcp.c +++ b/hw/arm/integratorcp.c @@ -12,10 +12,10 @@ #include "qemu-common.h" #include "cpu.h" #include "hw/sysbus.h" -#include "hw/devices.h" #include "hw/boards.h" #include "hw/arm/arm.h" #include "hw/misc/arm_integrator_debug.h" +#include "hw/net/smc91c111.h" #include "net/net.h" #include "exec/address-spaces.h" #include "sysemu/sysemu.h" diff --git a/hw/arm/kzm.c b/hw/arm/kzm.c index 864c7bd411..139934c4ec 100644 --- a/hw/arm/kzm.c +++ b/hw/arm/kzm.c @@ -22,7 +22,7 @@ #include "qemu/error-report.h" #include "exec/address-spaces.h" #include "net/net.h" -#include "hw/devices.h" +#include "hw/net/lan9118.h" #include "hw/char/serial.h" #include "sysemu/qtest.h" diff --git a/hw/arm/mainstone.c b/hw/arm/mainstone.c index e96738ad26..c1cec59037 100644 --- a/hw/arm/mainstone.c +++ b/hw/arm/mainstone.c @@ -18,7 +18,7 @@ #include "hw/arm/pxa.h" #include "hw/arm/arm.h" #include "net/net.h" -#include "hw/devices.h" +#include "hw/net/smc91c111.h" #include "hw/boards.h" #include "hw/block/flash.h" #include "hw/sysbus.h" diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c index f79f090a4a..7832408bb7 100644 --- a/hw/arm/mps2-tz.c +++ b/hw/arm/mps2-tz.c @@ -56,6 +56,7 @@ #include "hw/arm/armsse.h" #include "hw/dma/pl080.h" #include "hw/ssi/pl022.h" +#include "hw/net/lan9118.h" #include "net/net.h" #include "hw/core/split-irq.h" @@ -244,7 +245,7 @@ static MemoryRegion *make_eth_dev(MPS2TZMachineState *mms, void *opaque, * except that it doesn't support the checksum-offload feature. */ qemu_check_nic_model(nd, "lan9118"); - mms->lan9118 = qdev_create(NULL, "lan9118"); + mms->lan9118 = qdev_create(NULL, TYPE_LAN9118); qdev_set_nic_properties(mms->lan9118, nd); qdev_init_nofail(mms->lan9118); diff --git a/hw/arm/mps2.c b/hw/arm/mps2.c index e3d698ba6c..54b7395849 100644 --- a/hw/arm/mps2.c +++ b/hw/arm/mps2.c @@ -36,7 +36,7 @@ #include "hw/timer/cmsdk-apb-timer.h" #include "hw/timer/cmsdk-apb-dualtimer.h" #include "hw/misc/mps2-scc.h" -#include "hw/devices.h" +#include "hw/net/lan9118.h" #include "net/net.h" typedef enum MPS2FPGAType { diff --git a/hw/arm/nseries.c b/hw/arm/nseries.c index 906b7ca22d..303f7a31e1 100644 --- a/hw/arm/nseries.c +++ b/hw/arm/nseries.c @@ -30,7 +30,10 @@ #include "ui/console.h" #include "hw/boards.h" #include "hw/i2c/i2c.h" -#include "hw/devices.h" +#include "hw/display/blizzard.h" +#include "hw/input/tsc2xxx.h" +#include "hw/misc/cbus.h" +#include "hw/misc/tmp105.h" #include "hw/block/flash.h" #include "hw/hw.h" #include "hw/bt.h" @@ -218,7 +221,7 @@ static void n8x0_i2c_setup(struct n800_s *s) qemu_register_powerdown_notifier(&n8x0_system_powerdown_notifier); /* Attach a TMP105 PM chip (A0 wired to ground) */ - dev = i2c_create_slave(i2c, "tmp105", N8X0_TMP105_ADDR); + dev = i2c_create_slave(i2c, TYPE_TMP105, N8X0_TMP105_ADDR); qdev_connect_gpio_out(dev, 0, tmp_irq); } diff --git a/hw/arm/omap2.c b/hw/arm/omap2.c index 94dffb2f57..446223906e 100644 --- a/hw/arm/omap2.c +++ b/hw/arm/omap2.c @@ -273,7 +273,7 @@ static void omap_eac_format_update(struct omap_eac_s *s) * does I2S specify it? */ /* All register writes are 16 bits so we we store 16-bit samples * in the buffers regardless of AGCFR[B8_16] value. */ - fmt.fmt = AUD_FMT_U16; + fmt.fmt = AUDIO_FORMAT_U16; s->codec.in_voice = AUD_open_in(&s->codec.card, s->codec.in_voice, "eac.codec.in", s, omap_eac_in_cb, &fmt); diff --git a/hw/arm/palm.c b/hw/arm/palm.c index 285f43709d..139d27d1cc 100644 --- a/hw/arm/palm.c +++ b/hw/arm/palm.c @@ -26,7 +26,7 @@ #include "hw/arm/omap.h" #include "hw/boards.h" #include "hw/arm/arm.h" -#include "hw/devices.h" +#include "hw/input/tsc2xxx.h" #include "hw/loader.h" #include "exec/address-spaces.h" #include "cpu.h" diff --git a/hw/arm/realview.c b/hw/arm/realview.c index 242f5a87b6..05a244df25 100644 --- a/hw/arm/realview.c +++ b/hw/arm/realview.c @@ -14,7 +14,8 @@ #include "hw/sysbus.h" #include "hw/arm/arm.h" #include "hw/arm/primecell.h" -#include "hw/devices.h" +#include "hw/net/lan9118.h" +#include "hw/net/smc91c111.h" #include "hw/pci/pci.h" #include "net/net.h" #include "sysemu/sysemu.h" diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c index bbf4b8721a..e94be6db6c 100644 --- a/hw/arm/smmu-common.c +++ b/hw/arm/smmu-common.c @@ -412,10 +412,10 @@ inline void smmu_inv_notifiers_mr(IOMMUMemoryRegion *mr) /* Unmap all notifiers of all mr's */ void smmu_inv_notifiers_all(SMMUState *s) { - SMMUNotifierNode *node; + SMMUDevice *sdev; - QLIST_FOREACH(node, &s->notifiers_list, next) { - smmu_inv_notifiers_mr(&node->sdev->iommu); + QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) { + smmu_inv_notifiers_mr(&sdev->iommu); } } diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index 8c4e99fecc..fd8ec7860e 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -828,10 +828,10 @@ static void smmuv3_notify_iova(IOMMUMemoryRegion *mr, /* invalidate an asid/iova tuple in all mr's */ static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova) { - SMMUNotifierNode *node; + SMMUDevice *sdev; - QLIST_FOREACH(node, &s->notifiers_list, next) { - IOMMUMemoryRegion *mr = &node->sdev->iommu; + QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) { + IOMMUMemoryRegion *mr = &sdev->iommu; IOMMUNotifier *n; trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova); @@ -1472,8 +1472,6 @@ static void smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu, SMMUDevice *sdev = container_of(iommu, SMMUDevice, iommu); SMMUv3State *s3 = sdev->smmu; SMMUState *s = &(s3->smmu_state); - SMMUNotifierNode *node = NULL; - SMMUNotifierNode *next_node = NULL; if (new & IOMMU_NOTIFIER_MAP) { int bus_num = pci_bus_num(sdev->bus); @@ -1485,22 +1483,10 @@ static void smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu, if (old == IOMMU_NOTIFIER_NONE) { trace_smmuv3_notify_flag_add(iommu->parent_obj.name); - node = g_malloc0(sizeof(*node)); - node->sdev = sdev; - QLIST_INSERT_HEAD(&s->notifiers_list, node, next); - return; - } - - /* update notifier node with new flags */ - QLIST_FOREACH_SAFE(node, &s->notifiers_list, next, next_node) { - if (node->sdev == sdev) { - if (new == IOMMU_NOTIFIER_NONE) { - trace_smmuv3_notify_flag_del(iommu->parent_obj.name); - QLIST_REMOVE(node, next); - g_free(node); - } - return; - } + QLIST_INSERT_HEAD(&s->devices_with_notifiers, sdev, next); + } else if (new == IOMMU_NOTIFIER_NONE) { + trace_smmuv3_notify_flag_del(iommu->parent_obj.name); + QLIST_REMOVE(sdev, next); } } diff --git a/hw/arm/stellaris.c b/hw/arm/stellaris.c index 05f86749f4..5059aedbaa 100644 --- a/hw/arm/stellaris.c +++ b/hw/arm/stellaris.c @@ -12,7 +12,6 @@ #include "hw/sysbus.h" #include "hw/ssi/ssi.h" #include "hw/arm/arm.h" -#include "hw/devices.h" #include "qemu/timer.h" #include "hw/i2c/i2c.h" #include "net/net.h" @@ -22,6 +21,7 @@ #include "sysemu/sysemu.h" #include "hw/arm/armv7m.h" #include "hw/char/pl011.h" +#include "hw/input/gamepad.h" #include "hw/watchdog/cmsdk-apb-watchdog.h" #include "hw/misc/unimp.h" #include "cpu.h" diff --git a/hw/arm/tosa.c b/hw/arm/tosa.c index eef9d427e7..9a1247797f 100644 --- a/hw/arm/tosa.c +++ b/hw/arm/tosa.c @@ -16,10 +16,10 @@ #include "hw/hw.h" #include "hw/arm/pxa.h" #include "hw/arm/arm.h" -#include "hw/devices.h" #include "hw/arm/sharpsl.h" #include "hw/pcmcia.h" #include "hw/boards.h" +#include "hw/display/tc6393xb.h" #include "hw/i2c/i2c.h" #include "hw/ssi/ssi.h" #include "hw/sysbus.h" diff --git a/hw/arm/trace-events b/hw/arm/trace-events index 27b11d655d..0acedcedc6 100644 --- a/hw/arm/trace-events +++ b/hw/arm/trace-events @@ -1,25 +1,21 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/arm/virt-acpi-build.c +# virt-acpi-build.c virt_acpi_setup(void) "No fw cfg or ACPI disabled. Bailing out." -# hw/arm/smmu-common.c +# smmu-common.c smmu_add_mr(const char *name) "%s" -smmu_page_walk(int stage, uint64_t baseaddr, int first_level, uint64_t start, uint64_t end) "stage=%d, baseaddr=0x%"PRIx64", first level=%d, start=0x%"PRIx64", end=0x%"PRIx64 -smmu_lookup_table(int level, uint64_t baseaddr, int granule_sz, uint64_t start, uint64_t end, int flags, uint64_t subpage_size) "level=%d baseaddr=0x%"PRIx64" granule=%d, start=0x%"PRIx64" end=0x%"PRIx64" flags=%d subpage_size=0x%"PRIx64 smmu_ptw_level(int level, uint64_t iova, size_t subpage_size, uint64_t baseaddr, uint32_t offset, uint64_t pte) "level=%d iova=0x%"PRIx64" subpage_sz=0x%zx baseaddr=0x%"PRIx64" offset=%d => pte=0x%"PRIx64 smmu_ptw_invalid_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, uint32_t offset, uint64_t pte) "stage=%d level=%d base@=0x%"PRIx64" pte@=0x%"PRIx64" offset=%d pte=0x%"PRIx64 smmu_ptw_page_pte(int stage, int level, uint64_t iova, uint64_t baseaddr, uint64_t pteaddr, uint64_t pte, uint64_t address) "stage=%d level=%d iova=0x%"PRIx64" base@=0x%"PRIx64" pte@=0x%"PRIx64" pte=0x%"PRIx64" page address = 0x%"PRIx64 smmu_ptw_block_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, uint64_t pte, uint64_t iova, uint64_t gpa, int bsize_mb) "stage=%d level=%d base@=0x%"PRIx64" pte@=0x%"PRIx64" pte=0x%"PRIx64" iova=0x%"PRIx64" block address = 0x%"PRIx64" block size = %d MiB" smmu_get_pte(uint64_t baseaddr, int index, uint64_t pteaddr, uint64_t pte) "baseaddr=0x%"PRIx64" index=0x%x, pteaddr=0x%"PRIx64", pte=0x%"PRIx64 -smmu_iotlb_cache_hit(uint16_t asid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache HIT asid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d" -smmu_iotlb_cache_miss(uint16_t asid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache MISS asid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d" smmu_iotlb_inv_all(void) "IOTLB invalidate all" smmu_iotlb_inv_asid(uint16_t asid) "IOTLB invalidate asid=%d" smmu_iotlb_inv_iova(uint16_t asid, uint64_t addr) "IOTLB invalidate asid=%d addr=0x%"PRIx64 smmu_inv_notifiers_mr(const char *name) "iommu mr=%s" -#hw/arm/smmuv3.c +# smmuv3.c smmuv3_read_mmio(uint64_t addr, uint64_t val, unsigned size, uint32_t r) "addr: 0x%"PRIx64" val:0x%"PRIx64" size: 0x%x(%d)" smmuv3_trigger_irq(int irq) "irq=%d" smmuv3_write_gerror(uint32_t toggled, uint32_t gerror) "toggled=0x%x, new GERROR=0x%x" @@ -29,12 +25,7 @@ smmuv3_cmdq_consume(uint32_t prod, uint32_t cons, uint8_t prod_wrap, uint8_t con smmuv3_cmdq_opcode(const char *opcode) "<--- %s" smmuv3_cmdq_consume_out(uint32_t prod, uint32_t cons, uint8_t prod_wrap, uint8_t cons_wrap) "prod:%d, cons:%d, prod_wrap:%d, cons_wrap:%d " smmuv3_cmdq_consume_error(const char *cmd_name, uint8_t cmd_error) "Error on %s command execution: %d" -smmuv3_update(bool is_empty, uint32_t prod, uint32_t cons, uint8_t prod_wrap, uint8_t cons_wrap) "q empty:%d prod:%d cons:%d p.wrap:%d p.cons:%d" -smmuv3_update_check_cmd(int error) "cmdq not enabled or error :0x%x" smmuv3_write_mmio(uint64_t addr, uint64_t val, unsigned size, uint32_t r) "addr: 0x%"PRIx64" val:0x%"PRIx64" size: 0x%x(%d)" -smmuv3_write_mmio_idr(uint64_t addr, uint64_t val) "write to RO/Unimpl reg 0x%"PRIx64" val64:0x%"PRIx64 -smmuv3_write_mmio_evtq_cons_bef_clear(uint32_t prod, uint32_t cons, uint8_t prod_wrap, uint8_t cons_wrap) "Before clearing interrupt prod:0x%x cons:0x%x prod.w:%d cons.w:%d" -smmuv3_write_mmio_evtq_cons_after_clear(uint32_t prod, uint32_t cons, uint8_t prod_wrap, uint8_t cons_wrap) "after clearing interrupt prod:0x%x cons:0x%x prod.w:%d cons.w:%d" smmuv3_record_event(const char *type, uint32_t sid) "%s sid=%d" smmuv3_find_ste(uint16_t sid, uint32_t features, uint16_t sid_split) "SID:0x%x features:0x%x, sid_split:0x%x" smmuv3_find_ste_2lvl(uint64_t strtab_base, uint64_t l1ptr, int l1_ste_offset, uint64_t l2ptr, int l2_ste_offset, int max_l2_ste) "strtab_base:0x%"PRIx64" l1ptr:0x%"PRIx64" l1_off:0x%x, l2ptr:0x%"PRIx64" l2_off:0x%x max_l2_ste:%d" @@ -55,6 +46,8 @@ smmuv3_cmdq_tlbi_nh_va(int vmid, int asid, uint64_t addr, bool leaf) "vmid =%d a smmuv3_cmdq_tlbi_nh_vaa(int vmid, uint64_t addr) "vmid =%d addr=0x%"PRIx64 smmuv3_cmdq_tlbi_nh(void) "" smmuv3_cmdq_tlbi_nh_asid(uint16_t asid) "asid=%d" +smmu_iotlb_cache_hit(uint16_t asid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache HIT asid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d" +smmu_iotlb_cache_miss(uint16_t asid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache MISS asid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d" smmuv3_config_cache_inv(uint32_t sid) "Config cache INV for sid %d" smmuv3_notify_flag_add(const char *iommu) "ADD SMMUNotifier node for iommu mr=%s" smmuv3_notify_flag_del(const char *iommu) "DEL SMMUNotifier node for iommu mr=%s" diff --git a/hw/arm/versatilepb.c b/hw/arm/versatilepb.c index d67181810a..25166e1517 100644 --- a/hw/arm/versatilepb.c +++ b/hw/arm/versatilepb.c @@ -13,7 +13,7 @@ #include "cpu.h" #include "hw/sysbus.h" #include "hw/arm/arm.h" -#include "hw/devices.h" +#include "hw/net/smc91c111.h" #include "net/net.h" #include "sysemu/sysemu.h" #include "hw/pci/pci.h" diff --git a/hw/arm/vexpress.c b/hw/arm/vexpress.c index f07134c424..d8634f3dd2 100644 --- a/hw/arm/vexpress.c +++ b/hw/arm/vexpress.c @@ -28,7 +28,7 @@ #include "hw/sysbus.h" #include "hw/arm/arm.h" #include "hw/arm/primecell.h" -#include "hw/devices.h" +#include "hw/net/lan9118.h" #include "hw/i2c/i2c.h" #include "net/net.h" #include "sysemu/sysemu.h" diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c index d7e2e4885b..bf9c0bc2f4 100644 --- a/hw/arm/virt-acpi-build.c +++ b/hw/arm/virt-acpi-build.c @@ -405,7 +405,7 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) its->identifiers[0] = 0; /* MADT translation_id */ if (vms->iommu == VIRT_IOMMU_SMMUV3) { - int irq = vms->irqmap[VIRT_SMMU]; + int irq = vms->irqmap[VIRT_SMMU] + ARM_SPI_BASE; /* SMMUv3 node */ smmu_offset = iort_node_offset + node_size; @@ -560,8 +560,8 @@ build_mcfg(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) /* Only a single allocation so no need to play with segments */ mcfg->allocation[0].pci_segment = cpu_to_le16(0); mcfg->allocation[0].start_bus_number = 0; - mcfg->allocation[0].end_bus_number = (memmap[ecam_id].size - / PCIE_MMCFG_SIZE_MIN) - 1; + mcfg->allocation[0].end_bus_number = + PCIE_MMCFG_BUS(memmap[ecam_id].size - 1); build_header(linker, table_data, (void *)(table_data->data + mcfg_start), "MCFG", table_data->len - mcfg_start, 1, NULL, NULL); diff --git a/hw/arm/virt.c b/hw/arm/virt.c index ce2664a30b..16ba67f7a7 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -1978,10 +1978,17 @@ static void machvirt_machine_init(void) } type_init(machvirt_machine_init); +static void virt_machine_4_1_options(MachineClass *mc) +{ +} +DEFINE_VIRT_MACHINE_AS_LATEST(4, 1) + static void virt_machine_4_0_options(MachineClass *mc) { + virt_machine_4_1_options(mc); + compat_props_add(mc->compat_props, hw_compat_4_0, hw_compat_4_0_len); } -DEFINE_VIRT_MACHINE_AS_LATEST(4, 0) +DEFINE_VIRT_MACHINE(4, 0) static void virt_machine_3_1_options(MachineClass *mc) { diff --git a/hw/audio/ac97.c b/hw/audio/ac97.c index d799533aa9..2265622d44 100644 --- a/hw/audio/ac97.c +++ b/hw/audio/ac97.c @@ -365,7 +365,7 @@ static void open_voice (AC97LinkState *s, int index, int freq) as.freq = freq; as.nchannels = 2; - as.fmt = AUD_FMT_S16; + as.fmt = AUDIO_FORMAT_S16; as.endianness = 0; if (freq > 0) { diff --git a/hw/audio/adlib.c b/hw/audio/adlib.c index 97b876c7e0..0957780a3d 100644 --- a/hw/audio/adlib.c +++ b/hw/audio/adlib.c @@ -269,7 +269,7 @@ static void adlib_realizefn (DeviceState *dev, Error **errp) as.freq = s->freq; as.nchannels = SHIFT; - as.fmt = AUD_FMT_S16; + as.fmt = AUDIO_FORMAT_S16; as.endianness = AUDIO_HOST_ENDIANNESS; AUD_register_card ("adlib", &s->card); diff --git a/hw/audio/cs4231a.c b/hw/audio/cs4231a.c index 9089dcb47e..62da75eefe 100644 --- a/hw/audio/cs4231a.c +++ b/hw/audio/cs4231a.c @@ -288,7 +288,7 @@ static void cs_reset_voices (CSState *s, uint32_t val) switch ((val >> 5) & ((s->dregs[MODE_And_ID] & MODE2) ? 7 : 3)) { case 0: - as.fmt = AUD_FMT_U8; + as.fmt = AUDIO_FORMAT_U8; s->shift = as.nchannels == 2; break; @@ -298,7 +298,7 @@ static void cs_reset_voices (CSState *s, uint32_t val) case 3: s->tab = ALawDecompressTable; x_law: - as.fmt = AUD_FMT_S16; + as.fmt = AUDIO_FORMAT_S16; as.endianness = AUDIO_HOST_ENDIANNESS; s->shift = as.nchannels == 2; break; @@ -307,7 +307,7 @@ static void cs_reset_voices (CSState *s, uint32_t val) as.endianness = 1; /* fall through */ case 2: - as.fmt = AUD_FMT_S16; + as.fmt = AUDIO_FORMAT_S16; s->shift = as.nchannels; break; diff --git a/hw/audio/es1370.c b/hw/audio/es1370.c index 97789a0771..a5314d66fd 100644 --- a/hw/audio/es1370.c +++ b/hw/audio/es1370.c @@ -414,14 +414,14 @@ static void es1370_update_voices (ES1370State *s, uint32_t ctl, uint32_t sctl) i, new_freq, 1 << (new_fmt & 1), - (new_fmt & 2) ? AUD_FMT_S16 : AUD_FMT_U8, + (new_fmt & 2) ? AUDIO_FORMAT_S16 : AUDIO_FORMAT_U8, d->shift); if (new_freq) { struct audsettings as; as.freq = new_freq; as.nchannels = 1 << (new_fmt & 1); - as.fmt = (new_fmt & 2) ? AUD_FMT_S16 : AUD_FMT_U8; + as.fmt = (new_fmt & 2) ? AUDIO_FORMAT_S16 : AUDIO_FORMAT_U8; as.endianness = 0; if (i == ADC_CHANNEL) { diff --git a/hw/audio/gus.c b/hw/audio/gus.c index 8e0b27e0f2..b3e2a7fdd5 100644 --- a/hw/audio/gus.c +++ b/hw/audio/gus.c @@ -251,7 +251,7 @@ static void gus_realizefn (DeviceState *dev, Error **errp) as.freq = s->freq; as.nchannels = 2; - as.fmt = AUD_FMT_S16; + as.fmt = AUDIO_FORMAT_S16; as.endianness = GUS_ENDIANNESS; s->voice = AUD_open_out ( diff --git a/hw/audio/hda-codec.c b/hw/audio/hda-codec.c index 617a1c1016..c25bfa38b1 100644 --- a/hw/audio/hda-codec.c +++ b/hw/audio/hda-codec.c @@ -99,9 +99,9 @@ static void hda_codec_parse_fmt(uint32_t format, struct audsettings *as) } switch (format & AC_FMT_BITS_MASK) { - case AC_FMT_BITS_8: as->fmt = AUD_FMT_S8; break; - case AC_FMT_BITS_16: as->fmt = AUD_FMT_S16; break; - case AC_FMT_BITS_32: as->fmt = AUD_FMT_S32; break; + case AC_FMT_BITS_8: as->fmt = AUDIO_FORMAT_S8; break; + case AC_FMT_BITS_16: as->fmt = AUDIO_FORMAT_S16; break; + case AC_FMT_BITS_32: as->fmt = AUDIO_FORMAT_S32; break; } as->nchannels = ((format & AC_FMT_CHAN_MASK) >> AC_FMT_CHAN_SHIFT) + 1; @@ -134,12 +134,12 @@ static void hda_codec_parse_fmt(uint32_t format, struct audsettings *as) /* -------------------------------------------------------------------------- */ static const char *fmt2name[] = { - [ AUD_FMT_U8 ] = "PCM-U8", - [ AUD_FMT_S8 ] = "PCM-S8", - [ AUD_FMT_U16 ] = "PCM-U16", - [ AUD_FMT_S16 ] = "PCM-S16", - [ AUD_FMT_U32 ] = "PCM-U32", - [ AUD_FMT_S32 ] = "PCM-S32", + [ AUDIO_FORMAT_U8 ] = "PCM-U8", + [ AUDIO_FORMAT_S8 ] = "PCM-S8", + [ AUDIO_FORMAT_U16 ] = "PCM-U16", + [ AUDIO_FORMAT_S16 ] = "PCM-S16", + [ AUDIO_FORMAT_U32 ] = "PCM-U32", + [ AUDIO_FORMAT_S32 ] = "PCM-S32", }; typedef struct HDAAudioState HDAAudioState; diff --git a/hw/audio/lm4549.c b/hw/audio/lm4549.c index a46f2301af..af8b22b541 100644 --- a/hw/audio/lm4549.c +++ b/hw/audio/lm4549.c @@ -185,7 +185,7 @@ void lm4549_write(lm4549_state *s, struct audsettings as; as.freq = value; as.nchannels = 2; - as.fmt = AUD_FMT_S16; + as.fmt = AUDIO_FORMAT_S16; as.endianness = 0; s->voice = AUD_open_out( @@ -255,7 +255,7 @@ static int lm4549_post_load(void *opaque, int version_id) struct audsettings as; as.freq = freq; as.nchannels = 2; - as.fmt = AUD_FMT_S16; + as.fmt = AUDIO_FORMAT_S16; as.endianness = 0; s->voice = AUD_open_out( @@ -292,7 +292,7 @@ void lm4549_init(lm4549_state *s, lm4549_callback data_req_cb, void* opaque) /* Open a default voice */ as.freq = 48000; as.nchannels = 2; - as.fmt = AUD_FMT_S16; + as.fmt = AUDIO_FORMAT_S16; as.endianness = 0; s->voice = AUD_open_out( diff --git a/hw/audio/milkymist-ac97.c b/hw/audio/milkymist-ac97.c index bc8db71ae0..90cce1e6ed 100644 --- a/hw/audio/milkymist-ac97.c +++ b/hw/audio/milkymist-ac97.c @@ -308,7 +308,7 @@ static void milkymist_ac97_realize(DeviceState *dev, Error **errp) as.freq = 48000; as.nchannels = 2; - as.fmt = AUD_FMT_S16; + as.fmt = AUDIO_FORMAT_S16; as.endianness = 1; s->voice_in = AUD_open_in(&s->card, s->voice_in, diff --git a/hw/audio/pcspk.c b/hw/audio/pcspk.c index b80a62ce90..9c7fd74aeb 100644 --- a/hw/audio/pcspk.c +++ b/hw/audio/pcspk.c @@ -57,6 +57,7 @@ typedef struct { } PCSpkState; static const char *s_spk = "pcspk"; +static PCSpkState *pcspk_state; static inline void generate_samples(PCSpkState *s) { @@ -110,6 +111,22 @@ static void pcspk_callback(void *opaque, int free) } } +static int pcspk_audio_init(ISABus *bus) +{ + PCSpkState *s = pcspk_state; + struct audsettings as = {PCSPK_SAMPLE_RATE, 1, AUDIO_FORMAT_U8, 0}; + + AUD_register_card(s_spk, &s->card); + + s->voice = AUD_open_out(&s->card, s->voice, s_spk, s, pcspk_callback, &as); + if (!s->voice) { + AUD_log(s_spk, "Could not open voice\n"); + return -1; + } + + return 0; +} + static uint64_t pcspk_io_read(void *opaque, hwaddr addr, unsigned size) { @@ -162,20 +179,12 @@ static void pcspk_initfn(Object *obj) static void pcspk_realizefn(DeviceState *dev, Error **errp) { - struct audsettings as = {PCSPK_SAMPLE_RATE, 1, AUD_FMT_U8, 0}; ISADevice *isadev = ISA_DEVICE(dev); PCSpkState *s = PC_SPEAKER(dev); isa_register_ioport(isadev, &s->ioport, s->iobase); - AUD_register_card(s_spk, &s->card); - - s->voice = AUD_open_out(&s->card, s->voice, s_spk, s, pcspk_callback, &as); - if (!s->voice) { - error_setg(errp, "Initializing audio voice failed"); - AUD_remove_card(&s->card); - return; - } + pcspk_state = s; } static bool migrate_needed(void *opaque) @@ -212,6 +221,9 @@ static void pcspk_class_initfn(ObjectClass *klass, void *data) set_bit(DEVICE_CATEGORY_SOUND, dc->categories); dc->vmsd = &vmstate_spk; dc->props = pcspk_properties; + /* Reason: realize sets global pcspk_state */ + /* Reason: pit object link */ + dc->user_creatable = false; } static const TypeInfo pcspk_info = { @@ -222,12 +234,6 @@ static const TypeInfo pcspk_info = { .class_init = pcspk_class_initfn, }; -static int pcspk_audio_init(ISABus *bus) -{ - isa_create_simple(bus, TYPE_PC_SPEAKER); - return 0; -} - static void pcspk_register(void) { type_register_static(&pcspk_info); diff --git a/hw/audio/sb16.c b/hw/audio/sb16.c index c5b9bf79e8..65ea0cd938 100644 --- a/hw/audio/sb16.c +++ b/hw/audio/sb16.c @@ -66,7 +66,7 @@ typedef struct SB16State { int fmt_stereo; int fmt_signed; int fmt_bits; - audfmt_e fmt; + AudioFormat fmt; int dma_auto; int block_size; int fifo; @@ -224,7 +224,7 @@ static void continue_dma8 (SB16State *s) static void dma_cmd8 (SB16State *s, int mask, int dma_len) { - s->fmt = AUD_FMT_U8; + s->fmt = AUDIO_FORMAT_U8; s->use_hdma = 0; s->fmt_bits = 8; s->fmt_signed = 0; @@ -319,18 +319,18 @@ static void dma_cmd (SB16State *s, uint8_t cmd, uint8_t d0, int dma_len) if (16 == s->fmt_bits) { if (s->fmt_signed) { - s->fmt = AUD_FMT_S16; + s->fmt = AUDIO_FORMAT_S16; } else { - s->fmt = AUD_FMT_U16; + s->fmt = AUDIO_FORMAT_U16; } } else { if (s->fmt_signed) { - s->fmt = AUD_FMT_S8; + s->fmt = AUDIO_FORMAT_S8; } else { - s->fmt = AUD_FMT_U8; + s->fmt = AUDIO_FORMAT_U8; } } @@ -852,7 +852,7 @@ static void legacy_reset (SB16State *s) as.freq = s->freq; as.nchannels = 1; - as.fmt = AUD_FMT_U8; + as.fmt = AUDIO_FORMAT_U8; as.endianness = 0; s->voice = AUD_open_out ( diff --git a/hw/audio/trace-events b/hw/audio/trace-events index 5891b4e2b9..60556b4a97 100644 --- a/hw/audio/trace-events +++ b/hw/audio/trace-events @@ -1,12 +1,12 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/audio/cs4231.c +# cs4231.c cs4231_mem_readl_dreg(uint32_t reg, uint32_t ret) "read dreg %d: 0x%02x" cs4231_mem_readl_reg(uint32_t reg, uint32_t ret) "read reg %d: 0x%08x" cs4231_mem_writel_reg(uint32_t reg, uint32_t old, uint32_t val) "write reg %d: 0x%08x -> 0x%08x" cs4231_mem_writel_dreg(uint32_t reg, uint32_t old, uint32_t val) "write dreg %d: 0x%02x -> 0x%02x" -# hw/audio/milkymist-ac97.c +# milkymist-ac97.c milkymist_ac97_memory_read(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" milkymist_ac97_memory_write(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" milkymist_ac97_pulse_irq_crrequest(void) "Pulse IRQ CR request" @@ -18,7 +18,7 @@ milkymist_ac97_in_cb_transferred(int transferred) "transferred %d" milkymist_ac97_out_cb(int free, uint32_t remaining) "free %d remaining %u" milkymist_ac97_out_cb_transferred(int transferred) "transferred %d" -# hw/audio/hda-codec.c +# hda-codec.c hda_audio_running(const char *stream, int nr, bool running) "st %s, nr %d, run %d" hda_audio_format(const char *stream, int chan, const char *fmt, int freq) "st %s, %d x %s @ %d Hz" hda_audio_adjust(const char *stream, int pos) "st %s, pos %d" diff --git a/hw/audio/wm8750.c b/hw/audio/wm8750.c index 169b006ade..ca0ad73caf 100644 --- a/hw/audio/wm8750.c +++ b/hw/audio/wm8750.c @@ -201,7 +201,7 @@ static void wm8750_set_format(WM8750State *s) in_fmt.endianness = 0; in_fmt.nchannels = 2; in_fmt.freq = s->adc_hz; - in_fmt.fmt = AUD_FMT_S16; + in_fmt.fmt = AUDIO_FORMAT_S16; s->adc_voice[0] = AUD_open_in(&s->card, s->adc_voice[0], CODEC ".input1", s, wm8750_audio_in_cb, &in_fmt); @@ -214,7 +214,7 @@ static void wm8750_set_format(WM8750State *s) out_fmt.endianness = 0; out_fmt.nchannels = 2; out_fmt.freq = s->dac_hz; - out_fmt.fmt = AUD_FMT_S16; + out_fmt.fmt = AUDIO_FORMAT_S16; s->dac_voice[0] = AUD_open_out(&s->card, s->dac_voice[0], CODEC ".speaker", s, wm8750_audio_out_cb, &out_fmt); @@ -681,7 +681,7 @@ uint32_t wm8750_adc_dat(void *opaque) if (s->idx_in >= sizeof(s->data_in)) { wm8750_in_load(s); if (s->idx_in >= sizeof(s->data_in)) { - return 0x80008000; /* silence in AUD_FMT_S16 sample format */ + return 0x80008000; /* silence in AUDIO_FORMAT_S16 sample format */ } } diff --git a/hw/block/block.c b/hw/block/block.c index cf0eb826f1..bf56c7612b 100644 --- a/hw/block/block.c +++ b/hw/block/block.c @@ -13,7 +13,53 @@ #include "hw/block/block.h" #include "qapi/error.h" #include "qapi/qapi-types-block.h" -#include "qemu/error-report.h" + +/* + * Read the entire contents of @blk into @buf. + * @blk's contents must be @size bytes, and @size must be at most + * BDRV_REQUEST_MAX_BYTES. + * On success, return true. + * On failure, store an error through @errp and return false. + * Note that the error messages do not identify the block backend. + * TODO Since callers don't either, this can result in confusing + * errors. + * This function not intended for actual block devices, which read on + * demand. It's for things like memory devices that (ab)use a block + * backend to provide persistence. + */ +bool blk_check_size_and_read_all(BlockBackend *blk, void *buf, hwaddr size, + Error **errp) +{ + int64_t blk_len; + int ret; + + blk_len = blk_getlength(blk); + if (blk_len < 0) { + error_setg_errno(errp, -blk_len, + "can't get size of block backend"); + return false; + } + if (blk_len != size) { + error_setg(errp, "device requires %" HWADDR_PRIu " bytes, " + "block backend provides %" PRIu64 " bytes", + size, blk_len); + return false; + } + + /* + * We could loop for @size > BDRV_REQUEST_MAX_BYTES, but if we + * ever get to the point we want to read *gigabytes* here, we + * should probably rework the device to be more like an actual + * block device and read only on demand. + */ + assert(size <= BDRV_REQUEST_MAX_BYTES); + ret = blk_pread(blk, 0, buf, size); + if (ret < 0) { + error_setg_errno(errp, -ret, "can't read block backend"); + return false; + } + return true; +} void blkconf_blocksizes(BlockConf *conf) { diff --git a/hw/block/dataplane/trace-events b/hw/block/dataplane/trace-events index 1a7ea277b0..843cc4e7b1 100644 --- a/hw/block/dataplane/trace-events +++ b/hw/block/dataplane/trace-events @@ -1,5 +1,5 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/block/dataplane/virtio-blk.c +# virtio-blk.c virtio_blk_data_plane_start(void *s) "dataplane %p" virtio_blk_data_plane_stop(void *s) "dataplane %p" diff --git a/hw/block/dataplane/xen-block.c b/hw/block/dataplane/xen-block.c index f1523c5b45..bb8f1186e4 100644 --- a/hw/block/dataplane/xen-block.c +++ b/hw/block/dataplane/xen-block.c @@ -49,7 +49,6 @@ struct XenBlockDataPlane { unsigned int *ring_ref; unsigned int nr_ring_ref; void *sring; - int64_t file_blk; int protocol; blkif_back_rings_t rings; int more_work; @@ -168,7 +167,7 @@ static int xen_block_parse_request(XenBlockRequest *request) goto err; } - request->start = request->req.sector_number * dataplane->file_blk; + request->start = request->req.sector_number * XEN_BLKIF_SECTOR_SIZE; for (i = 0; i < request->req.nr_segments; i++) { if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) { error_report("error: nr_segments too big"); @@ -178,14 +177,14 @@ static int xen_block_parse_request(XenBlockRequest *request) error_report("error: first > last sector"); goto err; } - if (request->req.seg[i].last_sect * dataplane->file_blk >= + if (request->req.seg[i].last_sect * XEN_BLKIF_SECTOR_SIZE >= XC_PAGE_SIZE) { error_report("error: page crossing"); goto err; } len = (request->req.seg[i].last_sect - - request->req.seg[i].first_sect + 1) * dataplane->file_blk; + request->req.seg[i].first_sect + 1) * XEN_BLKIF_SECTOR_SIZE; request->size += len; } if (request->start + request->size > blk_getlength(dataplane->blk)) { @@ -205,7 +204,6 @@ static int xen_block_copy_request(XenBlockRequest *request) XenDevice *xendev = dataplane->xendev; XenDeviceGrantCopySegment segs[BLKIF_MAX_SEGMENTS_PER_REQUEST]; int i, count; - int64_t file_blk = dataplane->file_blk; bool to_domain = (request->req.operation == BLKIF_OP_READ); void *virt = request->buf; Error *local_err = NULL; @@ -220,16 +218,17 @@ static int xen_block_copy_request(XenBlockRequest *request) if (to_domain) { segs[i].dest.foreign.ref = request->req.seg[i].gref; segs[i].dest.foreign.offset = request->req.seg[i].first_sect * - file_blk; + XEN_BLKIF_SECTOR_SIZE; segs[i].source.virt = virt; } else { segs[i].source.foreign.ref = request->req.seg[i].gref; segs[i].source.foreign.offset = request->req.seg[i].first_sect * - file_blk; + XEN_BLKIF_SECTOR_SIZE; segs[i].dest.virt = virt; } segs[i].len = (request->req.seg[i].last_sect - - request->req.seg[i].first_sect + 1) * file_blk; + request->req.seg[i].first_sect + 1) * + XEN_BLKIF_SECTOR_SIZE; virt += segs[i].len; } @@ -331,22 +330,22 @@ static bool xen_block_split_discard(XenBlockRequest *request, XenBlockDataPlane *dataplane = request->dataplane; int64_t byte_offset; int byte_chunk; - uint64_t byte_remaining, limit; + uint64_t byte_remaining; uint64_t sec_start = sector_number; uint64_t sec_count = nr_sectors; /* Wrap around, or overflowing byte limit? */ if (sec_start + sec_count < sec_count || - sec_start + sec_count > INT64_MAX / dataplane->file_blk) { + sec_start + sec_count > INT64_MAX / XEN_BLKIF_SECTOR_SIZE) { return false; } - limit = BDRV_REQUEST_MAX_SECTORS * dataplane->file_blk; - byte_offset = sec_start * dataplane->file_blk; - byte_remaining = sec_count * dataplane->file_blk; + byte_offset = sec_start * XEN_BLKIF_SECTOR_SIZE; + byte_remaining = sec_count * XEN_BLKIF_SECTOR_SIZE; do { - byte_chunk = byte_remaining > limit ? limit : byte_remaining; + byte_chunk = byte_remaining > BDRV_REQUEST_MAX_BYTES ? + BDRV_REQUEST_MAX_BYTES : byte_remaining; request->aio_inflight++; blk_aio_pdiscard(dataplane->blk, byte_offset, byte_chunk, xen_block_complete_aio, request); @@ -632,7 +631,6 @@ XenBlockDataPlane *xen_block_dataplane_create(XenDevice *xendev, XenBlockDataPlane *dataplane = g_new0(XenBlockDataPlane, 1); dataplane->xendev = xendev; - dataplane->file_blk = conf->logical_block_size; dataplane->blk = conf->blk; QLIST_INIT(&dataplane->inflight); diff --git a/hw/block/nvme.c b/hw/block/nvme.c index 8325b5e88a..7caf92532a 100644 --- a/hw/block/nvme.c +++ b/hw/block/nvme.c @@ -324,8 +324,8 @@ static uint16_t nvme_write_zeros(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd, const uint8_t data_shift = ns->id_ns.lbaf[lba_index].ds; uint64_t slba = le64_to_cpu(rw->slba); uint32_t nlb = le16_to_cpu(rw->nlb) + 1; - uint64_t aio_slba = slba << (data_shift - BDRV_SECTOR_BITS); - uint32_t aio_nlb = nlb << (data_shift - BDRV_SECTOR_BITS); + uint64_t offset = slba << data_shift; + uint32_t count = nlb << data_shift; if (unlikely(slba + nlb > ns->id_ns.nsze)) { trace_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze); @@ -335,7 +335,7 @@ static uint16_t nvme_write_zeros(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd, req->has_sg = false; block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0, BLOCK_ACCT_WRITE); - req->aiocb = blk_aio_pwrite_zeroes(n->conf.blk, aio_slba, aio_nlb, + req->aiocb = blk_aio_pwrite_zeroes(n->conf.blk, offset, count, BDRV_REQ_MAY_UNMAP, nvme_rw_cb, req); return NVME_NO_COMPLETE; } diff --git a/hw/block/pflash_cfi01.c b/hw/block/pflash_cfi01.c index 125f70b8e4..16dfae14b8 100644 --- a/hw/block/pflash_cfi01.c +++ b/hw/block/pflash_cfi01.c @@ -38,6 +38,7 @@ #include "qemu/osdep.h" #include "hw/hw.h" +#include "hw/block/block.h" #include "hw/block/flash.h" #include "sysemu/block-backend.h" #include "qapi/error.h" @@ -730,13 +731,6 @@ static void pflash_cfi01_realize(DeviceState *dev, Error **errp) } device_len = sector_len_per_device * blocks_per_device; - /* XXX: to be fixed */ -#if 0 - if (total_len != (8 * 1024 * 1024) && total_len != (16 * 1024 * 1024) && - total_len != (32 * 1024 * 1024) && total_len != (64 * 1024 * 1024)) - return NULL; -#endif - memory_region_init_rom_device( &pfl->mem, OBJECT(dev), &pflash_cfi01_ops, @@ -763,12 +757,9 @@ static void pflash_cfi01_realize(DeviceState *dev, Error **errp) } if (pfl->blk) { - /* read the initial flash content */ - ret = blk_pread(pfl->blk, 0, pfl->storage, total_len); - - if (ret < 0) { + if (!blk_check_size_and_read_all(pfl->blk, pfl->storage, total_len, + errp)) { vmstate_unregister_ram(&pfl->mem, DEVICE(pfl)); - error_setg(errp, "failed to read the initial flash content"); return; } } diff --git a/hw/block/pflash_cfi02.c b/hw/block/pflash_cfi02.c index c9db430611..f2c6201f81 100644 --- a/hw/block/pflash_cfi02.c +++ b/hw/block/pflash_cfi02.c @@ -37,6 +37,7 @@ #include "qemu/osdep.h" #include "hw/hw.h" +#include "hw/block/block.h" #include "hw/block/flash.h" #include "qapi/error.h" #include "qemu/timer.h" @@ -550,12 +551,6 @@ static void pflash_cfi02_realize(DeviceState *dev, Error **errp) } chip_len = pfl->sector_len * pfl->nb_blocs; - /* XXX: to be fixed */ -#if 0 - if (total_len != (8 * 1024 * 1024) && total_len != (16 * 1024 * 1024) && - total_len != (32 * 1024 * 1024) && total_len != (64 * 1024 * 1024)) - return NULL; -#endif memory_region_init_rom_device(&pfl->orig_mem, OBJECT(pfl), pfl->be ? &pflash_cfi02_ops_be : &pflash_cfi02_ops_le, @@ -581,11 +576,9 @@ static void pflash_cfi02_realize(DeviceState *dev, Error **errp) } if (pfl->blk) { - /* read the initial flash content */ - ret = blk_pread(pfl->blk, 0, pfl->storage, chip_len); - if (ret < 0) { + if (!blk_check_size_and_read_all(pfl->blk, pfl->storage, chip_len, + errp)) { vmstate_unregister_ram(&pfl->orig_mem, DEVICE(pfl)); - error_setg(errp, "failed to read the initial flash content"); return; } } diff --git a/hw/block/trace-events b/hw/block/trace-events index 8020f9226a..b92039a573 100644 --- a/hw/block/trace-events +++ b/hw/block/trace-events @@ -1,10 +1,11 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/block/fdc.c +# fdc.c fdc_ioport_read(uint8_t reg, uint8_t value) "read reg 0x%02x val 0x%02x" fdc_ioport_write(uint8_t reg, uint8_t value) "write reg 0x%02x val 0x%02x" -# hw/block/pflash_cfi0?.c +# pflash_cfi02.c +# pflash_cfi01.c pflash_reset(void) "reset" pflash_read(uint64_t offset, uint8_t cmd, int width, uint8_t wcycle) "offset:0x%04"PRIx64" cmd:0x%02x width:%d wcycle:%u" pflash_write(uint64_t offset, uint32_t value, int width, uint8_t wcycle) "offset:0x%04"PRIx64" value:0x%03x width:%d wcycle:%u" @@ -17,18 +18,18 @@ pflash_manufacturer_id(uint16_t id) "Read Manufacturer ID: 0x%04x" pflash_device_id(uint16_t id) "Read Device ID: 0x%04x" pflash_device_info(uint64_t offset) "Read Device Information offset:0x%04"PRIx64 -# hw/block/virtio-blk.c +# virtio-blk.c virtio_blk_req_complete(void *vdev, void *req, int status) "vdev %p req %p status %d" virtio_blk_rw_complete(void *vdev, void *req, int ret) "vdev %p req %p ret %d" virtio_blk_handle_write(void *vdev, void *req, uint64_t sector, size_t nsectors) "vdev %p req %p sector %"PRIu64" nsectors %zu" virtio_blk_handle_read(void *vdev, void *req, uint64_t sector, size_t nsectors) "vdev %p req %p sector %"PRIu64" nsectors %zu" virtio_blk_submit_multireq(void *vdev, void *mrb, int start, int num_reqs, uint64_t offset, size_t size, bool is_write) "vdev %p mrb %p start %d num_reqs %d offset %"PRIu64" size %zu is_write %d" -# hw/block/hd-geometry.c +# hd-geometry.c hd_geometry_lchs_guess(void *blk, int cyls, int heads, int secs) "blk %p LCHS %d %d %d" hd_geometry_guess(void *blk, uint32_t cyls, uint32_t heads, uint32_t secs, int trans) "blk %p CHS %u %u %u trans %d" -# hw/block/nvme.c +# nvme.c # nvme traces for successful events nvme_irq_msix(uint32_t vector) "raising MSI-X IRQ vector %u" nvme_irq_pin(void) "pulsing IRQ pin" @@ -63,9 +64,7 @@ nvme_err_invalid_dma(void) "PRP/SGL is too small for transfer size" nvme_err_invalid_prplist_ent(uint64_t prplist) "PRP list entry is null or not page aligned: 0x%"PRIx64"" nvme_err_invalid_prp2_align(uint64_t prp2) "PRP2 is not page aligned: 0x%"PRIx64"" nvme_err_invalid_prp2_missing(void) "PRP2 is null and more data to be transferred" -nvme_err_invalid_field(void) "invalid field" nvme_err_invalid_prp(void) "invalid PRP" -nvme_err_invalid_sgl(void) "invalid SGL" nvme_err_invalid_ns(uint32_t ns, uint32_t limit) "invalid namespace %u not within 1-%u" nvme_err_invalid_opc(uint8_t opc) "invalid opcode 0x%"PRIx8"" nvme_err_invalid_admin_opc(uint8_t opc) "invalid admin opcode 0x%"PRIx8"" @@ -121,7 +120,7 @@ nvme_ub_db_wr_invalid_cqhead(uint32_t qid, uint16_t new_head) "completion queue nvme_ub_db_wr_invalid_sq(uint32_t qid) "submission queue doorbell write for nonexistent queue, sqid=%"PRIu32", ignoring" nvme_ub_db_wr_invalid_sqtail(uint32_t qid, uint16_t new_tail) "submission queue doorbell write value beyond queue size, sqid=%"PRIu32", new_head=%"PRIu16", ignoring" -# hw/block/xen-block.c +# xen-block.c xen_block_realize(const char *type, uint32_t disk, uint32_t partition) "%s d%up%u" xen_block_connect(const char *type, uint32_t disk, uint32_t partition) "%s d%up%u" xen_block_disconnect(const char *type, uint32_t disk, uint32_t partition) "%s d%up%u" diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c index 44ac814016..28b81368f7 100644 --- a/hw/block/vhost-user-blk.c +++ b/hw/block/vhost-user-blk.c @@ -128,6 +128,21 @@ static void vhost_user_blk_start(VirtIODevice *vdev) } s->dev.acked_features = vdev->guest_features; + + if (!s->inflight->addr) { + ret = vhost_dev_get_inflight(&s->dev, s->queue_size, s->inflight); + if (ret < 0) { + error_report("Error get inflight: %d", -ret); + goto err_guest_notifiers; + } + } + + ret = vhost_dev_set_inflight(&s->dev, s->inflight); + if (ret < 0) { + error_report("Error set inflight: %d", -ret); + goto err_guest_notifiers; + } + ret = vhost_dev_start(&s->dev, vdev); if (ret < 0) { error_report("Error starting vhost: %d", -ret); @@ -249,11 +264,17 @@ static void vhost_user_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) } } +static void vhost_user_blk_reset(VirtIODevice *vdev) +{ + VHostUserBlk *s = VHOST_USER_BLK(vdev); + + vhost_dev_free_inflight(s->inflight); +} + static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VHostUserBlk *s = VHOST_USER_BLK(vdev); - VhostUserState *user; struct vhost_virtqueue *vqs = NULL; int i, ret; @@ -272,15 +293,10 @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp) return; } - user = vhost_user_init(); - if (!user) { - error_setg(errp, "vhost-user-blk: failed to init vhost_user"); + if (!vhost_user_init(&s->vhost_user, &s->chardev, errp)) { return; } - user->chr = &s->chardev; - s->vhost_user = user; - virtio_init(vdev, "virtio-blk", VIRTIO_ID_BLOCK, sizeof(struct virtio_blk_config)); @@ -289,6 +305,8 @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp) vhost_user_blk_handle_output); } + s->inflight = g_new0(struct vhost_inflight, 1); + s->dev.nvqs = s->num_queues; s->dev.vqs = g_new(struct vhost_virtqueue, s->dev.nvqs); s->dev.vq_index = 0; @@ -297,7 +315,7 @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp) vhost_dev_set_config_notifier(&s->dev, &blk_ops); - ret = vhost_dev_init(&s->dev, s->vhost_user, VHOST_BACKEND_TYPE_USER, 0); + ret = vhost_dev_init(&s->dev, &s->vhost_user, VHOST_BACKEND_TYPE_USER, 0); if (ret < 0) { error_setg(errp, "vhost-user-blk: vhost initialization failed: %s", strerror(-ret)); @@ -321,11 +339,9 @@ vhost_err: vhost_dev_cleanup(&s->dev); virtio_err: g_free(vqs); + g_free(s->inflight); virtio_cleanup(vdev); - - vhost_user_cleanup(user); - g_free(user); - s->vhost_user = NULL; + vhost_user_cleanup(&s->vhost_user); } static void vhost_user_blk_device_unrealize(DeviceState *dev, Error **errp) @@ -336,14 +352,11 @@ static void vhost_user_blk_device_unrealize(DeviceState *dev, Error **errp) vhost_user_blk_set_status(vdev, 0); vhost_dev_cleanup(&s->dev); + vhost_dev_free_inflight(s->inflight); g_free(vqs); + g_free(s->inflight); virtio_cleanup(vdev); - - if (s->vhost_user) { - vhost_user_cleanup(s->vhost_user); - g_free(s->vhost_user); - s->vhost_user = NULL; - } + vhost_user_cleanup(&s->vhost_user); } static void vhost_user_blk_instance_init(Object *obj) @@ -386,6 +399,7 @@ static void vhost_user_blk_class_init(ObjectClass *klass, void *data) vdc->set_config = vhost_user_blk_set_config; vdc->get_features = vhost_user_blk_get_features; vdc->set_status = vhost_user_blk_set_status; + vdc->reset = vhost_user_blk_reset; } static const TypeInfo vhost_user_blk_info = { diff --git a/hw/block/xen-block.c b/hw/block/xen-block.c index 70fc2455e8..ef635be4c2 100644 --- a/hw/block/xen-block.c +++ b/hw/block/xen-block.c @@ -149,7 +149,7 @@ static void xen_block_set_size(XenBlockDevice *blockdev) const char *type = object_get_typename(OBJECT(blockdev)); XenBlockVdev *vdev = &blockdev->props.vdev; BlockConf *conf = &blockdev->props.conf; - int64_t sectors = blk_getlength(conf->blk) / conf->logical_block_size; + int64_t sectors = blk_getlength(conf->blk) / XEN_BLKIF_SECTOR_SIZE; XenDevice *xendev = XEN_DEVICE(blockdev); trace_xen_block_size(type, vdev->disk, vdev->partition, sectors); @@ -223,6 +223,12 @@ static void xen_block_realize(XenDevice *xendev, Error **errp) blkconf_blocksizes(conf); + if (conf->logical_block_size != XEN_BLKIF_SECTOR_SIZE) { + error_setg(errp, "logical_block_size != %u not supported", + XEN_BLKIF_SECTOR_SIZE); + return; + } + if (conf->logical_block_size > conf->physical_block_size) { error_setg( errp, "logical_block_size > physical_block_size not supported"); @@ -232,8 +238,14 @@ static void xen_block_realize(XenDevice *xendev, Error **errp) blk_set_dev_ops(conf->blk, &xen_block_dev_ops, blockdev); blk_set_guest_block_size(conf->blk, conf->logical_block_size); - if (conf->discard_granularity > 0) { + if (conf->discard_granularity == -1) { + conf->discard_granularity = conf->physical_block_size; + } + + if (blk_get_flags(conf->blk) & BDRV_O_UNMAP) { xen_device_backend_printf(xendev, "feature-discard", "%u", 1); + xen_device_backend_printf(xendev, "discard-granularity", "%u", + conf->discard_granularity); } xen_device_backend_printf(xendev, "feature-flush-cache", "%u", 1); @@ -247,7 +259,7 @@ static void xen_block_realize(XenDevice *xendev, Error **errp) blockdev->device_type); xen_device_backend_printf(xendev, "sector-size", "%u", - conf->logical_block_size); + XEN_BLKIF_SECTOR_SIZE); xen_block_set_size(blockdev); @@ -755,6 +767,7 @@ static XenBlockDrive *xen_block_drive_create(const char *id, drive->id = g_strdup(id); file_layer = qdict_new(); + driver_layer = qdict_new(); qdict_put_str(file_layer, "driver", "file"); qdict_put_str(file_layer, "filename", filename); @@ -771,7 +784,7 @@ static XenBlockDrive *xen_block_drive_create(const char *id, QDict *cache_qdict = qdict_new(); qdict_put_bool(cache_qdict, "direct", true); - qdict_put_obj(file_layer, "cache", QOBJECT(cache_qdict)); + qdict_put(file_layer, "cache", cache_qdict); qdict_put_str(file_layer, "aio", "native"); } @@ -782,6 +795,7 @@ static XenBlockDrive *xen_block_drive_create(const char *id, if (!qemu_strtoul(discard_enable, NULL, 2, &value) && !!value) { qdict_put_str(file_layer, "discard", "unmap"); + qdict_put_str(driver_layer, "discard", "unmap"); } } @@ -791,12 +805,10 @@ static XenBlockDrive *xen_block_drive_create(const char *id, */ qdict_put_str(file_layer, "locking", "off"); - driver_layer = qdict_new(); - qdict_put_str(driver_layer, "driver", driver); g_free(driver); - qdict_put_obj(driver_layer, "file", QOBJECT(file_layer)); + qdict_put(driver_layer, "file", file_layer); g_assert(!drive->node_name); drive->node_name = xen_block_blockdev_add(drive->id, driver_layer, diff --git a/hw/block/xen_blkif.h b/hw/block/xen_blkif.h index 3e6e1ea365..a353693ea0 100644 --- a/hw/block/xen_blkif.h +++ b/hw/block/xen_blkif.h @@ -143,4 +143,6 @@ static inline void blkif_get_x86_64_req(blkif_request_t *dst, } } +#define XEN_BLKIF_SECTOR_SIZE 512 + #endif /* XEN_BLKIF_H */ diff --git a/hw/char/Makefile.objs b/hw/char/Makefile.objs index c4947d7ae7..cf086e7114 100644 --- a/hw/char/Makefile.objs +++ b/hw/char/Makefile.objs @@ -2,7 +2,7 @@ common-obj-$(CONFIG_IPACK) += ipoctal232.o common-obj-$(CONFIG_ESCC) += escc.o common-obj-$(CONFIG_NRF51_SOC) += nrf51_uart.o common-obj-$(CONFIG_PARALLEL) += parallel.o -common-obj-$(CONFIG_PARALLEL) += parallel-isa.o +common-obj-$(CONFIG_ISA_BUS) += parallel-isa.o common-obj-$(CONFIG_PL011) += pl011.o common-obj-$(CONFIG_SERIAL) += serial.o common-obj-$(CONFIG_SERIAL_ISA) += serial-isa.o diff --git a/hw/char/parallel-isa.c b/hw/char/parallel-isa.c index 639e179585..a043832e72 100644 --- a/hw/char/parallel-isa.c +++ b/hw/char/parallel-isa.c @@ -1,6 +1,9 @@ /* * QEMU Parallel PORT (ISA bus helpers) * + * These functions reside in a separate file since they also might be + * required for linking when compiling QEMU without CONFIG_PARALLEL. + * * Copyright (c) 2003 Fabrice Bellard * * SPDX-License-Identifier: MIT diff --git a/hw/char/trace-events b/hw/char/trace-events index de34a74399..2ce7f2f998 100644 --- a/hw/char/trace-events +++ b/hw/char/trace-events @@ -1,47 +1,47 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/char/parallel.c +# parallel.c parallel_ioport_read(const char *desc, uint16_t addr, uint8_t value) "read [%s] addr 0x%02x val 0x%02x" parallel_ioport_write(const char *desc, uint16_t addr, uint8_t value) "write [%s] addr 0x%02x val 0x%02x" -# hw/char/serial.c +# serial.c serial_ioport_read(uint16_t addr, uint8_t value) "read addr 0x%02x val 0x%02x" serial_ioport_write(uint16_t addr, uint8_t value) "write addr 0x%02x val 0x%02x" -# hw/char/virtio-serial-bus.c +# virtio-serial-bus.c virtio_serial_send_control_event(unsigned int port, uint16_t event, uint16_t value) "port %u, event %u, value %u" virtio_serial_throttle_port(unsigned int port, bool throttle) "port %u, throttle %d" virtio_serial_handle_control_message(uint16_t event, uint16_t value) "event %u, value %u" virtio_serial_handle_control_message_port(unsigned int port) "port %u" -# hw/char/virtio-console.c +# virtio-console.c virtio_console_flush_buf(unsigned int port, size_t len, ssize_t ret) "port %u, in_len %zu, out_len %zd" virtio_console_chr_read(unsigned int port, int size) "port %u, size %d" virtio_console_chr_event(unsigned int port, int event) "port %u, event %d" -# hw/char/grlib_apbuart.c +# grlib_apbuart.c grlib_apbuart_event(int event) "event:%d" grlib_apbuart_writel_unknown(uint64_t addr, uint32_t value) "addr 0x%"PRIx64" value 0x%x" grlib_apbuart_readl_unknown(uint64_t addr) "addr 0x%"PRIx64 -# hw/char/lm32_juart.c +# lm32_juart.c lm32_juart_get_jtx(uint32_t value) "jtx 0x%08x" lm32_juart_set_jtx(uint32_t value) "jtx 0x%08x" lm32_juart_get_jrx(uint32_t value) "jrx 0x%08x" lm32_juart_set_jrx(uint32_t value) "jrx 0x%08x" -# hw/char/lm32_uart.c +# lm32_uart.c lm32_uart_memory_write(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" lm32_uart_memory_read(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" lm32_uart_irq_state(int level) "irq state %d" -# hw/char/milkymist-uart.c +# milkymist-uart.c milkymist_uart_memory_read(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" milkymist_uart_memory_write(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" milkymist_uart_raise_irq(void) "Raise IRQ" milkymist_uart_lower_irq(void) "Lower IRQ" -# hw/char/escc.c +# escc.c escc_put_queue(char channel, int b) "channel %c put: 0x%02x" escc_get_queue(char channel, int val) "channel %c get 0x%02x" escc_update_irq(int irq) "IRQ = %d" @@ -56,7 +56,7 @@ escc_sunkbd_event_out(int ch) "Translated keycode 0x%2.2x" escc_kbd_command(int val) "Command %d" escc_sunmouse_event(int dx, int dy, int buttons_state) "dx=%d dy=%d buttons=0x%01x" -# hw/char/pl011.c +# pl011.c pl011_irq_state(int level) "irq state %d" pl011_read(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" pl011_read_fifo(int read_count) "FIFO read, read_count now %d" @@ -65,7 +65,7 @@ pl011_can_receive(uint32_t lcr, int read_count, int r) "LCR 0x%08x read_count %d pl011_put_fifo(uint32_t c, int read_count) "new char 0x%x read_count now %d" pl011_put_fifo_full(void) "FIFO now full, RXFF set" -# hw/char/cmsdk_apb_uart.c +# cmsdk-apb-uart.c cmsdk_apb_uart_read(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB UART read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" cmsdk_apb_uart_write(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB UART write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" cmsdk_apb_uart_reset(void) "CMSDK APB UART: reset" @@ -74,6 +74,6 @@ cmsdk_apb_uart_tx_pending(void) "CMSDK APB UART: character send to backend pendi cmsdk_apb_uart_tx(uint8_t c) "CMSDK APB UART: character 0x%x sent to backend" cmsdk_apb_uart_set_params(int speed) "CMSDK APB UART: params set to %d 8N1" -# hw/char/nrf51_uart.c +# nrf51_uart.c nrf51_uart_read(uint64_t addr, uint64_t r, unsigned int size) "addr 0x%" PRIx64 " value 0x%" PRIx64 " size %u" nrf51_uart_write(uint64_t addr, uint64_t value, unsigned int size) "addr 0x%" PRIx64 " value 0x%" PRIx64 " size %u" diff --git a/hw/core/loader-fit.c b/hw/core/loader-fit.c index 447f60857d..f27b6af942 100644 --- a/hw/core/loader-fit.c +++ b/hw/core/loader-fit.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" +#include "qapi/error.h" #include "qemu/units.h" #include "exec/memory.h" #include "hw/loader.h" @@ -33,7 +34,7 @@ #define FIT_LOADER_MAX_PATH (128) static const void *fit_load_image_alloc(const void *itb, const char *name, - int *poff, size_t *psz) + int *poff, size_t *psz, Error **errp) { const void *data; const char *comp; @@ -46,6 +47,7 @@ static const void *fit_load_image_alloc(const void *itb, const char *name, off = fdt_path_offset(itb, path); if (off < 0) { + error_setg(errp, "can't find node %s", path); return NULL; } if (poff) { @@ -54,6 +56,7 @@ static const void *fit_load_image_alloc(const void *itb, const char *name, data = fdt_getprop(itb, off, "data", &sz); if (!data) { + error_setg(errp, "can't get %s/data", path); return NULL; } @@ -73,7 +76,7 @@ static const void *fit_load_image_alloc(const void *itb, const char *name, uncomp_len = gunzip(uncomp_data, uncomp_len, (void *) data, sz); if (uncomp_len < 0) { - error_printf("unable to decompress %s image\n", name); + error_setg(errp, "unable to decompress %s image", name); g_free(uncomp_data); return NULL; } @@ -85,18 +88,19 @@ static const void *fit_load_image_alloc(const void *itb, const char *name, return data; } - error_printf("unknown compression '%s'\n", comp); + error_setg(errp, "unknown compression '%s'", comp); return NULL; } static int fit_image_addr(const void *itb, int img, const char *name, - hwaddr *addr) + hwaddr *addr, Error **errp) { const void *prop; int len; prop = fdt_getprop(itb, img, name, &len); if (!prop) { + error_setg(errp, "can't find %s address", name); return -ENOENT; } @@ -108,13 +112,14 @@ static int fit_image_addr(const void *itb, int img, const char *name, *addr = fdt64_to_cpu(*(fdt64_t *)prop); return 0; default: - error_printf("invalid %s address length %d\n", name, len); + error_setg(errp, "invalid %s address length %d", name, len); return -EINVAL; } } static int fit_load_kernel(const struct fit_loader *ldr, const void *itb, - int cfg, void *opaque, hwaddr *pend) + int cfg, void *opaque, hwaddr *pend, + Error **errp) { const char *name; const void *data; @@ -126,26 +131,26 @@ static int fit_load_kernel(const struct fit_loader *ldr, const void *itb, name = fdt_getprop(itb, cfg, "kernel", NULL); if (!name) { - error_printf("no kernel specified by FIT configuration\n"); + error_setg(errp, "no kernel specified by FIT configuration"); return -EINVAL; } - load_data = data = fit_load_image_alloc(itb, name, &img_off, &sz); + load_data = data = fit_load_image_alloc(itb, name, &img_off, &sz, errp); if (!data) { - error_printf("unable to load kernel image from FIT\n"); + error_prepend(errp, "unable to load kernel image from FIT: "); return -EINVAL; } - err = fit_image_addr(itb, img_off, "load", &load_addr); + err = fit_image_addr(itb, img_off, "load", &load_addr, errp); if (err) { - error_printf("unable to read kernel load address from FIT\n"); + error_prepend(errp, "unable to read kernel load address from FIT: "); ret = err; goto out; } - err = fit_image_addr(itb, img_off, "entry", &entry_addr); + err = fit_image_addr(itb, img_off, "entry", &entry_addr, errp); if (err) { - error_printf("unable to read kernel entry address from FIT\n"); + error_prepend(errp, "unable to read kernel entry address from FIT: "); ret = err; goto out; } @@ -172,7 +177,7 @@ out: static int fit_load_fdt(const struct fit_loader *ldr, const void *itb, int cfg, void *opaque, const void *match_data, - hwaddr kernel_end) + hwaddr kernel_end, Error **errp) { const char *name; const void *data; @@ -187,16 +192,18 @@ static int fit_load_fdt(const struct fit_loader *ldr, const void *itb, return 0; } - load_data = data = fit_load_image_alloc(itb, name, &img_off, &sz); + load_data = data = fit_load_image_alloc(itb, name, &img_off, &sz, errp); if (!data) { - error_printf("unable to load FDT image from FIT\n"); + error_prepend(errp, "unable to load FDT image from FIT: "); return -EINVAL; } - err = fit_image_addr(itb, img_off, "load", &load_addr); + err = fit_image_addr(itb, img_off, "load", &load_addr, errp); if (err == -ENOENT) { load_addr = ROUND_UP(kernel_end, 64 * KiB) + (10 * MiB); + error_free(*errp); } else if (err) { + error_prepend(errp, "unable to read FDT load address from FIT: "); ret = err; goto out; } @@ -229,7 +236,7 @@ static bool fit_cfg_compatible(const void *itb, int cfg, const char *compat) return false; } - fdt = fit_load_image_alloc(itb, fdt_name, NULL, NULL); + fdt = fit_load_image_alloc(itb, fdt_name, NULL, NULL, NULL); if (!fdt) { return false; } @@ -252,11 +259,12 @@ out: int load_fit(const struct fit_loader *ldr, const char *filename, void *opaque) { + Error *err = NULL; const struct fit_loader_match *match; const void *itb, *match_data = NULL; const char *def_cfg_name; char path[FIT_LOADER_MAX_PATH]; - int itb_size, configs, cfg_off, off, err; + int itb_size, configs, cfg_off, off; hwaddr kernel_end; int ret; @@ -267,6 +275,7 @@ int load_fit(const struct fit_loader *ldr, const char *filename, void *opaque) configs = fdt_path_offset(itb, "/configurations"); if (configs < 0) { + error_report("can't find node /configurations"); ret = configs; goto out; } @@ -301,20 +310,21 @@ int load_fit(const struct fit_loader *ldr, const char *filename, void *opaque) } if (cfg_off < 0) { - /* couldn't find a configuration to use */ + error_report("can't find configuration"); ret = cfg_off; goto out; } - err = fit_load_kernel(ldr, itb, cfg_off, opaque, &kernel_end); - if (err) { - ret = err; + ret = fit_load_kernel(ldr, itb, cfg_off, opaque, &kernel_end, &err); + if (ret) { + error_report_err(err); goto out; } - err = fit_load_fdt(ldr, itb, cfg_off, opaque, match_data, kernel_end); - if (err) { - ret = err; + ret = fit_load_fdt(ldr, itb, cfg_off, opaque, match_data, kernel_end, + &err); + if (ret) { + error_report_err(err); goto out; } diff --git a/hw/core/machine.c b/hw/core/machine.c index 766ca5899d..5d046a43e3 100644 --- a/hw/core/machine.c +++ b/hw/core/machine.c @@ -22,6 +22,10 @@ #include "qemu/error-report.h" #include "sysemu/qtest.h" #include "hw/pci/pci.h" +#include "hw/mem/nvdimm.h" + +GlobalProperty hw_compat_4_0[] = {}; +const size_t hw_compat_4_0_len = G_N_ELEMENTS(hw_compat_4_0); GlobalProperty hw_compat_3_1[] = { { "pcie-root-port", "x-speed", "2_5" }, @@ -481,6 +485,47 @@ static void machine_set_memory_encryption(Object *obj, const char *value, ms->memory_encryption = g_strdup(value); } +static bool machine_get_nvdimm(Object *obj, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + return ms->nvdimms_state->is_enabled; +} + +static void machine_set_nvdimm(Object *obj, bool value, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + ms->nvdimms_state->is_enabled = value; +} + +static char *machine_get_nvdimm_persistence(Object *obj, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + return g_strdup(ms->nvdimms_state->persistence_string); +} + +static void machine_set_nvdimm_persistence(Object *obj, const char *value, + Error **errp) +{ + MachineState *ms = MACHINE(obj); + NVDIMMState *nvdimms_state = ms->nvdimms_state; + + if (strcmp(value, "cpu") == 0) { + nvdimms_state->persistence = 3; + } else if (strcmp(value, "mem-ctrl") == 0) { + nvdimms_state->persistence = 2; + } else { + error_setg(errp, "-machine nvdimm-persistence=%s: unsupported option", + value); + return; + } + + g_free(nvdimms_state->persistence_string); + nvdimms_state->persistence_string = g_strdup(value); +} + void machine_class_allow_dynamic_sysbus_dev(MachineClass *mc, const char *type) { strList *item = g_new0(strList, 1); @@ -791,6 +836,28 @@ static void machine_initfn(Object *obj) ms->mem_merge = true; ms->enable_graphics = true; + if (mc->nvdimm_supported) { + Object *obj = OBJECT(ms); + + ms->nvdimms_state = g_new0(NVDIMMState, 1); + object_property_add_bool(obj, "nvdimm", + machine_get_nvdimm, machine_set_nvdimm, + &error_abort); + object_property_set_description(obj, "nvdimm", + "Set on/off to enable/disable " + "NVDIMM instantiation", NULL); + + object_property_add_str(obj, "nvdimm-persistence", + machine_get_nvdimm_persistence, + machine_set_nvdimm_persistence, + &error_abort); + object_property_set_description(obj, "nvdimm-persistence", + "Set NVDIMM persistence" + "Valid values are cpu, mem-ctrl", + NULL); + } + + /* Register notifier when init is done for sysbus sanity checks */ ms->sysbus_notifier.notify = machine_init_notify; qemu_add_machine_init_done_notifier(&ms->sysbus_notifier); @@ -809,6 +876,7 @@ static void machine_finalize(Object *obj) g_free(ms->dt_compatible); g_free(ms->firmware); g_free(ms->device_memory); + g_free(ms->nvdimms_state); } bool machine_usb(MachineState *machine) diff --git a/hw/display/Kconfig b/hw/display/Kconfig index 86c1d544c5..72be57a403 100644 --- a/hw/display/Kconfig +++ b/hw/display/Kconfig @@ -100,7 +100,7 @@ config VIRTIO_GPU config VIRTIO_VGA bool - default y if PCI_DEVICES + # defaults to "N", enabled by specific boards depends on VIRTIO_PCI select VGA diff --git a/hw/display/ati.c b/hw/display/ati.c index 8322f52aff..db409be3c9 100644 --- a/hw/display/ati.c +++ b/hw/display/ati.c @@ -235,12 +235,9 @@ static uint64_t ati_mm_read(void *opaque, hwaddr addr, unsigned int size) case MM_DATA ... MM_DATA + 3: /* indexed access to regs or memory */ if (s->regs.mm_index & BIT(31)) { - if (s->regs.mm_index <= s->vga.vram_size - size) { - int i = size - 1; - while (i >= 0) { - val <<= 8; - val |= s->vga.vram_ptr[s->regs.mm_index + i--]; - } + uint32_t idx = s->regs.mm_index & ~BIT(31); + if (idx <= s->vga.vram_size - size) { + val = ldn_le_p(s->vga.vram_ptr + idx, size); } } else { val = ati_mm_read(s, s->regs.mm_index + addr - MM_DATA, size); @@ -434,12 +431,9 @@ static void ati_mm_write(void *opaque, hwaddr addr, case MM_DATA ... MM_DATA + 3: /* indexed access to regs or memory */ if (s->regs.mm_index & BIT(31)) { - if (s->regs.mm_index <= s->vga.vram_size - size) { - int i = 0; - while (i < size) { - s->vga.vram_ptr[s->regs.mm_index + i] = data & 0xff; - data >>= 8; - } + uint32_t idx = s->regs.mm_index & ~BIT(31); + if (idx <= s->vga.vram_size - size) { + stn_le_p(s->vga.vram_ptr + idx, size, data); } } else { ati_mm_write(s, s->regs.mm_index + addr - MM_DATA, data, size); diff --git a/hw/display/blizzard.c b/hw/display/blizzard.c index 291abe6fca..471bd0ed99 100644 --- a/hw/display/blizzard.c +++ b/hw/display/blizzard.c @@ -21,7 +21,7 @@ #include "qemu/osdep.h" #include "qemu-common.h" #include "ui/console.h" -#include "hw/devices.h" +#include "hw/display/blizzard.h" #include "ui/pixel_ops.h" typedef void (*blizzard_fn_t)(uint8_t *, const uint8_t *, unsigned int); diff --git a/hw/display/tc6393xb.c b/hw/display/tc6393xb.c index e1b1e302f2..0b7c59cde7 100644 --- a/hw/display/tc6393xb.c +++ b/hw/display/tc6393xb.c @@ -14,7 +14,7 @@ #include "qapi/error.h" #include "qemu/host-utils.h" #include "hw/hw.h" -#include "hw/devices.h" +#include "hw/display/tc6393xb.h" #include "hw/block/flash.h" #include "ui/console.h" #include "ui/pixel_ops.h" @@ -137,11 +137,6 @@ struct TC6393xbState { blanked : 1; }; -qemu_irq *tc6393xb_gpio_in_get(TC6393xbState *s) -{ - return s->gpio_in; -} - static void tc6393xb_gpio_set(void *opaque, int line, int level) { // TC6393xbState *s = opaque; @@ -154,17 +149,6 @@ static void tc6393xb_gpio_set(void *opaque, int line, int level) // FIXME: how does the chip reflect the GPIO input level change? } -void tc6393xb_gpio_out_set(TC6393xbState *s, int line, - qemu_irq handler) -{ - if (line >= TC6393XB_GPIOS) { - fprintf(stderr, "TC6393xb: no GPIO pin %d\n", line); - return; - } - - s->handler[line] = handler; -} - static void tc6393xb_gpio_handler_update(TC6393xbState *s) { uint32_t level, diff; diff --git a/hw/display/trace-events b/hw/display/trace-events index 80993cc4d9..ba7787b180 100644 --- a/hw/display/trace-events +++ b/hw/display/trace-events @@ -1,29 +1,29 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/display/jazz_led.c +# jazz_led.c jazz_led_read(uint64_t addr, uint8_t val) "read addr=0x%"PRIx64": 0x%x" jazz_led_write(uint64_t addr, uint8_t new) "write addr=0x%"PRIx64": 0x%x" -# hw/display/xenfb.c +# xenfb.c xenfb_mouse_event(void *opaque, int dx, int dy, int dz, int button_state, int abs_pointer_wanted) "%p x %d y %d z %d bs 0x%x abs %d" xenfb_key_event(void *opaque, int scancode, int button_state) "%p scancode %d bs 0x%x" xenfb_input_connected(void *xendev, int abs_pointer_wanted) "%p abs %d" -# hw/display/g364fb.c +# g364fb.c g364fb_read(uint64_t addr, uint32_t val) "read addr=0x%"PRIx64": 0x%x" g364fb_write(uint64_t addr, uint32_t new) "write addr=0x%"PRIx64": 0x%x" -# hw/display/milkymist-tmu2.c +# milkymist-tmu2.c milkymist_tmu2_memory_read(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" milkymist_tmu2_memory_write(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" milkymist_tmu2_start(void) "Start TMU" milkymist_tmu2_pulse_irq(void) "Pulse IRQ" -# hw/display/milkymist-vgafb.c +# milkymist-vgafb.c milkymist_vgafb_memory_read(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" milkymist_vgafb_memory_write(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" -# hw/display/vmware_vga.c +# vmware_vga.c vmware_value_read(uint32_t index, uint32_t value) "index %d, value 0x%x" vmware_value_write(uint32_t index, uint32_t value) "index %d, value 0x%x" vmware_palette_read(uint32_t index, uint32_t value) "index %d, value 0x%x" @@ -32,7 +32,8 @@ vmware_scratch_read(uint32_t index, uint32_t value) "index %d, value 0x%x" vmware_scratch_write(uint32_t index, uint32_t value) "index %d, value 0x%x" vmware_setmode(uint32_t w, uint32_t h, uint32_t bpp) "%dx%d @ %d bpp" -# hw/display/virtio-gpu.c +# virtio-gpu-3d.c +# virtio-gpu.c virtio_gpu_features(bool virgl) "virgl %d" virtio_gpu_cmd_get_display_info(void) "" virtio_gpu_cmd_get_edid(uint32_t scanout) "scanout %d" @@ -55,7 +56,7 @@ virtio_gpu_update_cursor(uint32_t scanout, uint32_t x, uint32_t y, const char *t virtio_gpu_fence_ctrl(uint64_t fence, uint32_t type) "fence 0x%" PRIx64 ", type 0x%x" virtio_gpu_fence_resp(uint64_t fence) "fence 0x%" PRIx64 -# hw/display/qxl.c +# qxl.c disable qxl_interface_set_mm_time(int qid, uint32_t mm_time) "%d %d" disable qxl_io_write_vga(int qid, const char *mode, uint32_t addr, uint32_t val) "%d %s addr=%u val=%u" qxl_create_guest_primary(int qid, uint32_t width, uint32_t height, uint64_t mem, uint32_t format, uint32_t position) "%d %ux%u mem=0x%" PRIx64 " %u,%u" @@ -117,28 +118,27 @@ qxl_client_monitors_config_capped(int qid, int requested, int limit) "%d %d %d" qxl_client_monitors_config_crc(int qid, unsigned size, uint32_t crc32) "%d %u %u" qxl_set_client_capabilities_unsupported_by_revision(int qid, int revision) "%d revision=%d" -# hw/display/qxl-render.c +# qxl-render.c qxl_render_blit(int32_t stride, int32_t left, int32_t right, int32_t top, int32_t bottom) "stride=%d [%d, %d, %d, %d]" qxl_render_guest_primary_resized(int32_t width, int32_t height, int32_t stride, int32_t bytes_pp, int32_t bits_pp) "%dx%d, stride %d, bpp %d, depth %d" qxl_render_update_area_done(void *cookie) "%p" -# hw/display/vga.c +# vga.c vga_std_read_io(uint32_t addr, uint32_t val) "addr 0x%x, val 0x%x" vga_std_write_io(uint32_t addr, uint32_t val) "addr 0x%x, val 0x%x" vga_vbe_read(uint32_t index, uint32_t val) "index 0x%x, val 0x%x" vga_vbe_write(uint32_t index, uint32_t val) "index 0x%x, val 0x%x" -# hw/display/cirrus_vga.c +# cirrus_vga.c vga_cirrus_read_io(uint32_t addr, uint32_t val) "addr 0x%x, val 0x%x" vga_cirrus_write_io(uint32_t addr, uint32_t val) "addr 0x%x, val 0x%x" -vga_cirrus_read_blt(uint32_t offset, uint32_t val) "offset 0x%x, val 0x%x" vga_cirrus_write_blt(uint32_t offset, uint32_t val) "offset 0x%x, val 0x%x" -# hw/display/sii9022.c +# sii9022.c sii9022_read_reg(uint8_t addr, uint8_t val) "addr 0x%02x, val 0x%02x" sii9022_write_reg(uint8_t addr, uint8_t val) "addr 0x%02x, val 0x%02x" sii9022_switch_mode(const char *mode) "mode: %s" -# hw/display/ati*.c -ati_mm_read(unsigned int size, uint64_t addr, const char *name, uint64_t val) "%u 0x%"HWADDR_PRIx " %s -> 0x%"PRIx64 -ati_mm_write(unsigned int size, uint64_t addr, const char *name, uint64_t val) "%u 0x%"HWADDR_PRIx " %s <- 0x%"PRIx64 +# ati.c +ati_mm_read(unsigned int size, uint64_t addr, const char *name, uint64_t val) "%u 0x%"PRIx64 " %s -> 0x%"PRIx64 +ati_mm_write(unsigned int size, uint64_t addr, const char *name, uint64_t val) "%u 0x%"PRIx64 " %s <- 0x%"PRIx64 diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index 4dbf48e424..9e37e0ac96 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -1084,6 +1084,12 @@ static void virtio_gpu_gl_block(void *opaque, bool block) assert(g->renderer_blocked >= 0); if (g->renderer_blocked == 0) { +#ifdef CONFIG_VIRGL + if (g->renderer_reset) { + g->renderer_reset = false; + virtio_gpu_virgl_reset(g); + } +#endif virtio_gpu_process_cmdq(g); } } @@ -1350,6 +1356,7 @@ static void virtio_gpu_reset(VirtIODevice *vdev) { VirtIOGPU *g = VIRTIO_GPU(vdev); struct virtio_gpu_simple_resource *res, *tmp; + struct virtio_gpu_ctrl_command *cmd; int i; g->enable = 0; @@ -1366,9 +1373,26 @@ static void virtio_gpu_reset(VirtIODevice *vdev) g->scanout[i].ds = NULL; } + while (!QTAILQ_EMPTY(&g->cmdq)) { + cmd = QTAILQ_FIRST(&g->cmdq); + QTAILQ_REMOVE(&g->cmdq, cmd, next); + g_free(cmd); + } + + while (!QTAILQ_EMPTY(&g->fenceq)) { + cmd = QTAILQ_FIRST(&g->fenceq); + QTAILQ_REMOVE(&g->fenceq, cmd, next); + g->inflight--; + g_free(cmd); + } + #ifdef CONFIG_VIRGL if (g->use_virgl_renderer) { - virtio_gpu_virgl_reset(g); + if (g->renderer_blocked) { + g->renderer_reset = true; + } else { + virtio_gpu_virgl_reset(g); + } g->use_virgl_renderer = 0; } #endif diff --git a/hw/display/xlnx_dp.c b/hw/display/xlnx_dp.c index cc0f9bc9cc..11b09bd18c 100644 --- a/hw/display/xlnx_dp.c +++ b/hw/display/xlnx_dp.c @@ -1260,7 +1260,7 @@ static void xlnx_dp_realize(DeviceState *dev, Error **errp) as.freq = 44100; as.nchannels = 2; - as.fmt = AUD_FMT_S16; + as.fmt = AUDIO_FORMAT_S16; as.endianness = 0; AUD_register_card("xlnx_dp.audio", &s->aud_card); diff --git a/hw/dma/Makefile.objs b/hw/dma/Makefile.objs index 79affecc39..8b39f9c600 100644 --- a/hw/dma/Makefile.objs +++ b/hw/dma/Makefile.objs @@ -14,4 +14,4 @@ common-obj-$(CONFIG_XLNX_ZYNQMP_ARM) += xlnx-zdma.o obj-$(CONFIG_OMAP) += omap_dma.o soc_dma.o obj-$(CONFIG_PXA2XX) += pxa2xx_dma.o -obj-$(CONFIG_RASPI) += bcm2835_dma.o +common-obj-$(CONFIG_RASPI) += bcm2835_dma.o diff --git a/hw/dma/trace-events b/hw/dma/trace-events index 22f53d0ff2..e4498428c5 100644 --- a/hw/dma/trace-events +++ b/hw/dma/trace-events @@ -1,12 +1,12 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/dma/rc4030.c +# rc4030.c jazzio_read(uint64_t addr, uint32_t ret) "read reg[0x%"PRIx64"] = 0x%x" jazzio_write(uint64_t addr, uint32_t val) "write reg[0x%"PRIx64"] = 0x%x" rc4030_read(uint64_t addr, uint32_t ret) "read reg[0x%"PRIx64"] = 0x%x" rc4030_write(uint64_t addr, uint32_t val) "write reg[0x%"PRIx64"] = 0x%x" -# hw/dma/sparc32_dma.c +# sparc32_dma.c ledma_memory_read(uint64_t addr, int len) "DMA read addr 0x%"PRIx64 " len %d" ledma_memory_write(uint64_t addr, int len) "DMA write addr 0x%"PRIx64 " len %d" sparc32_dma_set_irq_raise(void) "Raise IRQ" @@ -18,5 +18,5 @@ sparc32_dma_mem_writel(uint64_t addr, uint32_t old, uint32_t val) "write dmareg sparc32_dma_enable_raise(void) "Raise DMA enable" sparc32_dma_enable_lower(void) "Lower DMA enable" -# hw/dma/i8257.c +# i8257.c i8257_unregistered_dma(int nchan, int dma_pos, int dma_len) "unregistered DMA channel used nchan=%d dma_pos=%d dma_len=%d" diff --git a/hw/gpio/nrf51_gpio.c b/hw/gpio/nrf51_gpio.c index 86e047d649..87a2f2a0dc 100644 --- a/hw/gpio/nrf51_gpio.c +++ b/hw/gpio/nrf51_gpio.c @@ -43,6 +43,17 @@ static bool is_connected(uint32_t config, uint32_t level) return state; } +static int pull_value(uint32_t config) +{ + int pull = extract32(config, 2, 2); + if (pull == NRF51_GPIO_PULLDOWN) { + return 0; + } else if (pull == NRF51_GPIO_PULLUP) { + return 1; + } + return -1; +} + static void update_output_irq(NRF51GPIOState *s, size_t i, bool connected, bool level) { @@ -61,43 +72,47 @@ static void update_output_irq(NRF51GPIOState *s, size_t i, static void update_state(NRF51GPIOState *s) { - uint32_t pull; + int pull; size_t i; - bool connected_out, dir, connected_in, out, input; + bool connected_out, dir, connected_in, out, in, input; for (i = 0; i < NRF51_GPIO_PINS; i++) { - pull = extract32(s->cnf[i], 2, 2); + pull = pull_value(s->cnf[i]); dir = extract32(s->cnf[i], 0, 1); connected_in = extract32(s->in_mask, i, 1); out = extract32(s->out, i, 1); + in = extract32(s->in, i, 1); input = !extract32(s->cnf[i], 1, 1); connected_out = is_connected(s->cnf[i], out) && dir; - update_output_irq(s, i, connected_out, out); - - /* Pin both driven externally and internally */ - if (connected_out && connected_in) { - qemu_log_mask(LOG_GUEST_ERROR, "GPIO pin %zu short circuited\n", i); - } - - /* - * Input buffer disconnected from internal/external drives, so - * pull-up/pull-down becomes relevant - */ - if (!input || (input && !connected_in && !connected_out)) { - if (pull == NRF51_GPIO_PULLDOWN) { - s->in = deposit32(s->in, i, 1, 0); - } else if (pull == NRF51_GPIO_PULLUP) { - s->in = deposit32(s->in, i, 1, 1); + if (!input) { + if (pull >= 0) { + /* Input buffer disconnected from external drives */ + s->in = deposit32(s->in, i, 1, pull); + } + } else { + if (connected_out && connected_in && out != in) { + /* Pin both driven externally and internally */ + qemu_log_mask(LOG_GUEST_ERROR, + "GPIO pin %zu short circuited\n", i); + } + if (!connected_in) { + /* + * Floating input: the output stimulates IN if connected, + * otherwise pull-up/pull-down resistors put a value on both + * IN and OUT. + */ + if (pull >= 0 && !connected_out) { + connected_out = true; + out = pull; + } + if (connected_out) { + s->in = deposit32(s->in, i, 1, out); + } } } - - /* Self stimulation through internal output driver */ - if (connected_out && !connected_in && input) { - s->in = deposit32(s->in, i, 1, out); - } + update_output_irq(s, i, connected_out, out); } - } /* diff --git a/hw/gpio/trace-events b/hw/gpio/trace-events index 5d4dd200c2..c1271fdfb2 100644 --- a/hw/gpio/trace-events +++ b/hw/gpio/trace-events @@ -1,6 +1,6 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/gpio/nrf51_gpio.c +# nrf51_gpio.c nrf51_gpio_read(uint64_t offset, uint64_t r) "offset 0x%" PRIx64 " value 0x%" PRIx64 nrf51_gpio_write(uint64_t offset, uint64_t value) "offset 0x%" PRIx64 " value 0x%" PRIx64 nrf51_gpio_set(int64_t line, int64_t value) "line %" PRIi64 " value %" PRIi64 diff --git a/hw/hppa/Kconfig b/hw/hppa/Kconfig index 2d9b072c21..6e5d74a825 100644 --- a/hw/hppa/Kconfig +++ b/hw/hppa/Kconfig @@ -1,6 +1,8 @@ config DINO bool imply PCI_DEVICES + imply E1000_PCI + imply VIRTIO_VGA select PCI select SERIAL select ISA_BUS diff --git a/hw/hppa/hppa_hardware.h b/hw/hppa/hppa_hardware.h index 2c61b1f77c..af2f5ee2bd 100644 --- a/hw/hppa/hppa_hardware.h +++ b/hw/hppa/hppa_hardware.h @@ -19,7 +19,7 @@ #define LASI_PS2KBD_HPA 0xffd08000 #define LASI_PS2MOU_HPA 0xffd08100 #define LASI_GFX_HPA 0xf8000000 -#define CPU_HPA 0xfff10000 +#define CPU_HPA 0xfffb0000 #define MEMORY_HPA 0xfffbf000 #define PCI_HPA DINO_HPA /* PCI bus */ @@ -36,5 +36,5 @@ #define PORT_SERIAL1 (DINO_UART_HPA + 0x800) #define PORT_SERIAL2 (LASI_UART_HPA + 0x800) -#define HPPA_MAX_CPUS 32 /* max. number of SMP CPUs */ +#define HPPA_MAX_CPUS 8 /* max. number of SMP CPUs */ #define CPU_CLOCK_MHZ 250 /* emulate a 250 MHz CPU */ diff --git a/hw/hppa/trace-events b/hw/hppa/trace-events index 14c67937e1..4e2acb6176 100644 --- a/hw/hppa/trace-events +++ b/hw/hppa/trace-events @@ -1,4 +1,4 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/hppa/pci.c +# pci.c hppa_pci_iack_write(void) "" diff --git a/hw/i2c/trace-events b/hw/i2c/trace-events index d339b61202..e1c810d5bd 100644 --- a/hw/i2c/trace-events +++ b/hw/i2c/trace-events @@ -1,6 +1,6 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/i2c/core.c +# core.c i2c_event(const char *event, uint8_t address) "%s(addr:0x%02x)" i2c_send(uint8_t address, uint8_t data) "send(addr:0x%02x) data:0x%02x" diff --git a/hw/i386/Kconfig b/hw/i386/Kconfig index 78fd70396a..a6aed7c131 100644 --- a/hw/i386/Kconfig +++ b/hw/i386/Kconfig @@ -9,6 +9,7 @@ config PC imply ISA_IPMI_KCS imply ISA_IPMI_BT imply ISA_DEBUG + imply PARALLEL imply PCI_DEVICES imply PVPANIC imply QXL @@ -17,16 +18,15 @@ config PC imply TEST_DEVICES imply TPM_CRB imply TPM_TIS + imply VGA_PCI + imply VIRTIO_VGA select FDC select I8259 select I8254 select PCKBD select PCSPK - select I82374 select I8257 select MC146818RTC - # Needed by the board code: - select PARALLEL # For ACPI builder: select SERIAL_ISA select ACPI_VMGENID @@ -49,6 +49,7 @@ config PC_ACPI config I440FX bool + imply E1000_PCI select PC_PCI select PC_ACPI select ACPI_SMBUS @@ -74,6 +75,7 @@ config Q35 bool imply VTD imply AMD_IOMMU + imply E1000E_PCI_EXPRESS select PC_PCI select PC_ACPI select PCI_EXPRESS_Q35 diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c index 9ecc96dcc7..416da318ae 100644 --- a/hw/i386/acpi-build.c +++ b/hw/i386/acpi-build.c @@ -1867,7 +1867,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, aml_append(scope, method); } - if (pcms->acpi_nvdimm_state.is_enabled) { + if (machine->nvdimms_state->is_enabled) { method = aml_method("_E04", 0, AML_NOTSERIALIZED); aml_append(method, aml_notify(aml_name("\\_SB.NVDR"), aml_int(0x80))); @@ -2704,9 +2704,9 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine) build_dmar_q35(tables_blob, tables->linker); } } - if (pcms->acpi_nvdimm_state.is_enabled) { + if (machine->nvdimms_state->is_enabled) { nvdimm_build_acpi(table_offsets, tables_blob, tables->linker, - &pcms->acpi_nvdimm_state, machine->ram_slots); + machine->nvdimms_state, machine->ram_slots); } /* Add tables supplied by user (if any) */ diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c index 6eabdf9917..4a4e2c7fd4 100644 --- a/hw/i386/amd_iommu.c +++ b/hw/i386/amd_iommu.c @@ -1601,6 +1601,8 @@ static void amdvi_class_init(ObjectClass *klass, void* data) dc_class->int_remap = amdvi_int_remap; /* Supported by the pc-q35-* machine types */ dc->user_creatable = true; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->desc = "AMD IOMMU (AMD-Vi) DMA Remapping device"; } static const TypeInfo amdvi = { diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index ee22e754f0..44b1231157 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -37,6 +37,27 @@ #include "kvm_i386.h" #include "trace.h" +/* context entry operations */ +#define VTD_CE_GET_RID2PASID(ce) \ + ((ce)->val[1] & VTD_SM_CONTEXT_ENTRY_RID2PASID_MASK) +#define VTD_CE_GET_PASID_DIR_TABLE(ce) \ + ((ce)->val[0] & VTD_PASID_DIR_BASE_ADDR_MASK) + +/* pe operations */ +#define VTD_PE_GET_TYPE(pe) ((pe)->val[0] & VTD_SM_PASID_ENTRY_PGTT) +#define VTD_PE_GET_LEVEL(pe) (2 + (((pe)->val[0] >> 2) & VTD_SM_PASID_ENTRY_AW)) +#define VTD_PE_GET_FPD_ERR(ret_fr, is_fpd_set, s, source_id, addr, is_write) {\ + if (ret_fr) { \ + ret_fr = -ret_fr; \ + if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) { \ + trace_vtd_fault_disabled(); \ + } else { \ + vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write); \ + } \ + goto error; \ + } \ +} + static void vtd_address_space_refresh_all(IntelIOMMUState *s); static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n); @@ -141,6 +162,15 @@ static inline void vtd_iommu_unlock(IntelIOMMUState *s) qemu_mutex_unlock(&s->iommu_lock); } +static void vtd_update_scalable_state(IntelIOMMUState *s) +{ + uint64_t val = vtd_get_quad_raw(s, DMAR_RTADDR_REG); + + if (s->scalable_mode) { + s->root_scalable = val & VTD_RTADDR_SMT; + } +} + /* Whether the address space needs to notify new mappings */ static inline gboolean vtd_as_has_map_notifier(VTDAddressSpace *as) { @@ -512,9 +542,15 @@ static void vtd_generate_completion_event(IntelIOMMUState *s) } } -static inline bool vtd_root_entry_present(VTDRootEntry *root) +static inline bool vtd_root_entry_present(IntelIOMMUState *s, + VTDRootEntry *re, + uint8_t devfn) { - return root->val & VTD_ROOT_ENTRY_P; + if (s->root_scalable && devfn > UINT8_MAX / 2) { + return re->hi & VTD_ROOT_ENTRY_P; + } + + return re->lo & VTD_ROOT_ENTRY_P; } static int vtd_get_root_entry(IntelIOMMUState *s, uint8_t index, @@ -524,10 +560,11 @@ static int vtd_get_root_entry(IntelIOMMUState *s, uint8_t index, addr = s->root + index * sizeof(*re); if (dma_memory_read(&address_space_memory, addr, re, sizeof(*re))) { - re->val = 0; + re->lo = 0; return -VTD_FR_ROOT_TABLE_INV; } - re->val = le64_to_cpu(re->val); + re->lo = le64_to_cpu(re->lo); + re->hi = le64_to_cpu(re->hi); return 0; } @@ -536,18 +573,35 @@ static inline bool vtd_ce_present(VTDContextEntry *context) return context->lo & VTD_CONTEXT_ENTRY_P; } -static int vtd_get_context_entry_from_root(VTDRootEntry *root, uint8_t index, +static int vtd_get_context_entry_from_root(IntelIOMMUState *s, + VTDRootEntry *re, + uint8_t index, VTDContextEntry *ce) { - dma_addr_t addr; + dma_addr_t addr, ce_size; /* we have checked that root entry is present */ - addr = (root->val & VTD_ROOT_ENTRY_CTP) + index * sizeof(*ce); - if (dma_memory_read(&address_space_memory, addr, ce, sizeof(*ce))) { + ce_size = s->root_scalable ? VTD_CTX_ENTRY_SCALABLE_SIZE : + VTD_CTX_ENTRY_LEGACY_SIZE; + + if (s->root_scalable && index > UINT8_MAX / 2) { + index = index & (~VTD_DEVFN_CHECK_MASK); + addr = re->hi & VTD_ROOT_ENTRY_CTP; + } else { + addr = re->lo & VTD_ROOT_ENTRY_CTP; + } + + addr = addr + index * ce_size; + if (dma_memory_read(&address_space_memory, addr, ce, ce_size)) { return -VTD_FR_CONTEXT_TABLE_INV; } + ce->lo = le64_to_cpu(ce->lo); ce->hi = le64_to_cpu(ce->hi); + if (ce_size == VTD_CTX_ENTRY_SCALABLE_SIZE) { + ce->val[2] = le64_to_cpu(ce->val[2]); + ce->val[3] = le64_to_cpu(ce->val[3]); + } return 0; } @@ -600,6 +654,144 @@ static inline bool vtd_is_level_supported(IntelIOMMUState *s, uint32_t level) (1ULL << (level - 2 + VTD_CAP_SAGAW_SHIFT)); } +/* Return true if check passed, otherwise false */ +static inline bool vtd_pe_type_check(X86IOMMUState *x86_iommu, + VTDPASIDEntry *pe) +{ + switch (VTD_PE_GET_TYPE(pe)) { + case VTD_SM_PASID_ENTRY_FLT: + case VTD_SM_PASID_ENTRY_SLT: + case VTD_SM_PASID_ENTRY_NESTED: + break; + case VTD_SM_PASID_ENTRY_PT: + if (!x86_iommu->pt_supported) { + return false; + } + break; + default: + /* Unknwon type */ + return false; + } + return true; +} + +static int vtd_get_pasid_dire(dma_addr_t pasid_dir_base, + uint32_t pasid, + VTDPASIDDirEntry *pdire) +{ + uint32_t index; + dma_addr_t addr, entry_size; + + index = VTD_PASID_DIR_INDEX(pasid); + entry_size = VTD_PASID_DIR_ENTRY_SIZE; + addr = pasid_dir_base + index * entry_size; + if (dma_memory_read(&address_space_memory, addr, pdire, entry_size)) { + return -VTD_FR_PASID_TABLE_INV; + } + + return 0; +} + +static int vtd_get_pasid_entry(IntelIOMMUState *s, + uint32_t pasid, + VTDPASIDDirEntry *pdire, + VTDPASIDEntry *pe) +{ + uint32_t index; + dma_addr_t addr, entry_size; + X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); + + index = VTD_PASID_TABLE_INDEX(pasid); + entry_size = VTD_PASID_ENTRY_SIZE; + addr = pdire->val & VTD_PASID_TABLE_BASE_ADDR_MASK; + addr = addr + index * entry_size; + if (dma_memory_read(&address_space_memory, addr, pe, entry_size)) { + return -VTD_FR_PASID_TABLE_INV; + } + + /* Do translation type check */ + if (!vtd_pe_type_check(x86_iommu, pe)) { + return -VTD_FR_PASID_TABLE_INV; + } + + if (!vtd_is_level_supported(s, VTD_PE_GET_LEVEL(pe))) { + return -VTD_FR_PASID_TABLE_INV; + } + + return 0; +} + +static int vtd_get_pasid_entry_from_pasid(IntelIOMMUState *s, + dma_addr_t pasid_dir_base, + uint32_t pasid, + VTDPASIDEntry *pe) +{ + int ret; + VTDPASIDDirEntry pdire; + + ret = vtd_get_pasid_dire(pasid_dir_base, pasid, &pdire); + if (ret) { + return ret; + } + + ret = vtd_get_pasid_entry(s, pasid, &pdire, pe); + if (ret) { + return ret; + } + + return ret; +} + +static int vtd_ce_get_rid2pasid_entry(IntelIOMMUState *s, + VTDContextEntry *ce, + VTDPASIDEntry *pe) +{ + uint32_t pasid; + dma_addr_t pasid_dir_base; + int ret = 0; + + pasid = VTD_CE_GET_RID2PASID(ce); + pasid_dir_base = VTD_CE_GET_PASID_DIR_TABLE(ce); + ret = vtd_get_pasid_entry_from_pasid(s, pasid_dir_base, pasid, pe); + + return ret; +} + +static int vtd_ce_get_pasid_fpd(IntelIOMMUState *s, + VTDContextEntry *ce, + bool *pe_fpd_set) +{ + int ret; + uint32_t pasid; + dma_addr_t pasid_dir_base; + VTDPASIDDirEntry pdire; + VTDPASIDEntry pe; + + pasid = VTD_CE_GET_RID2PASID(ce); + pasid_dir_base = VTD_CE_GET_PASID_DIR_TABLE(ce); + + ret = vtd_get_pasid_dire(pasid_dir_base, pasid, &pdire); + if (ret) { + return ret; + } + + if (pdire.val & VTD_PASID_DIR_FPD) { + *pe_fpd_set = true; + return 0; + } + + ret = vtd_get_pasid_entry(s, pasid, &pdire, &pe); + if (ret) { + return ret; + } + + if (pe.val[0] & VTD_PASID_ENTRY_FPD) { + *pe_fpd_set = true; + } + + return 0; +} + /* Get the page-table level that hardware should use for the second-level * page-table walk from the Address Width field of context-entry. */ @@ -608,17 +800,43 @@ static inline uint32_t vtd_ce_get_level(VTDContextEntry *ce) return 2 + (ce->hi & VTD_CONTEXT_ENTRY_AW); } +static uint32_t vtd_get_iova_level(IntelIOMMUState *s, + VTDContextEntry *ce) +{ + VTDPASIDEntry pe; + + if (s->root_scalable) { + vtd_ce_get_rid2pasid_entry(s, ce, &pe); + return VTD_PE_GET_LEVEL(&pe); + } + + return vtd_ce_get_level(ce); +} + static inline uint32_t vtd_ce_get_agaw(VTDContextEntry *ce) { return 30 + (ce->hi & VTD_CONTEXT_ENTRY_AW) * 9; } +static uint32_t vtd_get_iova_agaw(IntelIOMMUState *s, + VTDContextEntry *ce) +{ + VTDPASIDEntry pe; + + if (s->root_scalable) { + vtd_ce_get_rid2pasid_entry(s, ce, &pe); + return 30 + ((pe.val[0] >> 2) & VTD_SM_PASID_ENTRY_AW) * 9; + } + + return vtd_ce_get_agaw(ce); +} + static inline uint32_t vtd_ce_get_type(VTDContextEntry *ce) { return ce->lo & VTD_CONTEXT_ENTRY_TT; } -/* Return true if check passed, otherwise false */ +/* Only for Legacy Mode. Return true if check passed, otherwise false */ static inline bool vtd_ce_type_check(X86IOMMUState *x86_iommu, VTDContextEntry *ce) { @@ -639,7 +857,7 @@ static inline bool vtd_ce_type_check(X86IOMMUState *x86_iommu, } break; default: - /* Unknwon type */ + /* Unknown type */ error_report_once("%s: unknown ce type: %"PRIu32, __func__, vtd_ce_get_type(ce)); return false; @@ -647,21 +865,36 @@ static inline bool vtd_ce_type_check(X86IOMMUState *x86_iommu, return true; } -static inline uint64_t vtd_iova_limit(VTDContextEntry *ce, uint8_t aw) +static inline uint64_t vtd_iova_limit(IntelIOMMUState *s, + VTDContextEntry *ce, uint8_t aw) { - uint32_t ce_agaw = vtd_ce_get_agaw(ce); + uint32_t ce_agaw = vtd_get_iova_agaw(s, ce); return 1ULL << MIN(ce_agaw, aw); } /* Return true if IOVA passes range check, otherwise false. */ -static inline bool vtd_iova_range_check(uint64_t iova, VTDContextEntry *ce, +static inline bool vtd_iova_range_check(IntelIOMMUState *s, + uint64_t iova, VTDContextEntry *ce, uint8_t aw) { /* * Check if @iova is above 2^X-1, where X is the minimum of MGAW * in CAP_REG and AW in context-entry. */ - return !(iova & ~(vtd_iova_limit(ce, aw) - 1)); + return !(iova & ~(vtd_iova_limit(s, ce, aw) - 1)); +} + +static dma_addr_t vtd_get_iova_pgtbl_base(IntelIOMMUState *s, + VTDContextEntry *ce) +{ + VTDPASIDEntry pe; + + if (s->root_scalable) { + vtd_ce_get_rid2pasid_entry(s, ce, &pe); + return pe.val[0] & VTD_SM_PASID_ENTRY_SLPTPTR; + } + + return vtd_ce_get_slpt_base(ce); } /* @@ -707,17 +940,18 @@ static VTDBus *vtd_find_as_from_bus_num(IntelIOMMUState *s, uint8_t bus_num) /* Given the @iova, get relevant @slptep. @slpte_level will be the last level * of the translation, can be used for deciding the size of large page. */ -static int vtd_iova_to_slpte(VTDContextEntry *ce, uint64_t iova, bool is_write, +static int vtd_iova_to_slpte(IntelIOMMUState *s, VTDContextEntry *ce, + uint64_t iova, bool is_write, uint64_t *slptep, uint32_t *slpte_level, bool *reads, bool *writes, uint8_t aw_bits) { - dma_addr_t addr = vtd_ce_get_slpt_base(ce); - uint32_t level = vtd_ce_get_level(ce); + dma_addr_t addr = vtd_get_iova_pgtbl_base(s, ce); + uint32_t level = vtd_get_iova_level(s, ce); uint32_t offset; uint64_t slpte; uint64_t access_right_check; - if (!vtd_iova_range_check(iova, ce, aw_bits)) { + if (!vtd_iova_range_check(s, iova, ce, aw_bits)) { error_report_once("%s: detected IOVA overflow (iova=0x%" PRIx64 ")", __func__, iova); return -VTD_FR_ADDR_BEYOND_MGAW; @@ -733,7 +967,7 @@ static int vtd_iova_to_slpte(VTDContextEntry *ce, uint64_t iova, bool is_write, if (slpte == (uint64_t)-1) { error_report_once("%s: detected read error on DMAR slpte " "(iova=0x%" PRIx64 ")", __func__, iova); - if (level == vtd_ce_get_level(ce)) { + if (level == vtd_get_iova_level(s, ce)) { /* Invalid programming of context-entry */ return -VTD_FR_CONTEXT_ENTRY_INV; } else { @@ -962,29 +1196,96 @@ next: /** * vtd_page_walk - walk specific IOVA range, and call the hook * + * @s: intel iommu state * @ce: context entry to walk upon * @start: IOVA address to start the walk * @end: IOVA range end address (start <= addr < end) * @info: page walking information struct */ -static int vtd_page_walk(VTDContextEntry *ce, uint64_t start, uint64_t end, +static int vtd_page_walk(IntelIOMMUState *s, VTDContextEntry *ce, + uint64_t start, uint64_t end, vtd_page_walk_info *info) { - dma_addr_t addr = vtd_ce_get_slpt_base(ce); - uint32_t level = vtd_ce_get_level(ce); + dma_addr_t addr = vtd_get_iova_pgtbl_base(s, ce); + uint32_t level = vtd_get_iova_level(s, ce); - if (!vtd_iova_range_check(start, ce, info->aw)) { + if (!vtd_iova_range_check(s, start, ce, info->aw)) { return -VTD_FR_ADDR_BEYOND_MGAW; } - if (!vtd_iova_range_check(end, ce, info->aw)) { + if (!vtd_iova_range_check(s, end, ce, info->aw)) { /* Fix end so that it reaches the maximum */ - end = vtd_iova_limit(ce, info->aw); + end = vtd_iova_limit(s, ce, info->aw); } return vtd_page_walk_level(addr, start, end, level, true, true, info); } +static int vtd_root_entry_rsvd_bits_check(IntelIOMMUState *s, + VTDRootEntry *re) +{ + /* Legacy Mode reserved bits check */ + if (!s->root_scalable && + (re->hi || (re->lo & VTD_ROOT_ENTRY_RSVD(s->aw_bits)))) + goto rsvd_err; + + /* Scalable Mode reserved bits check */ + if (s->root_scalable && + ((re->lo & VTD_ROOT_ENTRY_RSVD(s->aw_bits)) || + (re->hi & VTD_ROOT_ENTRY_RSVD(s->aw_bits)))) + goto rsvd_err; + + return 0; + +rsvd_err: + error_report_once("%s: invalid root entry: hi=0x%"PRIx64 + ", lo=0x%"PRIx64, + __func__, re->hi, re->lo); + return -VTD_FR_ROOT_ENTRY_RSVD; +} + +static inline int vtd_context_entry_rsvd_bits_check(IntelIOMMUState *s, + VTDContextEntry *ce) +{ + if (!s->root_scalable && + (ce->hi & VTD_CONTEXT_ENTRY_RSVD_HI || + ce->lo & VTD_CONTEXT_ENTRY_RSVD_LO(s->aw_bits))) { + error_report_once("%s: invalid context entry: hi=%"PRIx64 + ", lo=%"PRIx64" (reserved nonzero)", + __func__, ce->hi, ce->lo); + return -VTD_FR_CONTEXT_ENTRY_RSVD; + } + + if (s->root_scalable && + (ce->val[0] & VTD_SM_CONTEXT_ENTRY_RSVD_VAL0(s->aw_bits) || + ce->val[1] & VTD_SM_CONTEXT_ENTRY_RSVD_VAL1 || + ce->val[2] || + ce->val[3])) { + error_report_once("%s: invalid context entry: val[3]=%"PRIx64 + ", val[2]=%"PRIx64 + ", val[1]=%"PRIx64 + ", val[0]=%"PRIx64" (reserved nonzero)", + __func__, ce->val[3], ce->val[2], + ce->val[1], ce->val[0]); + return -VTD_FR_CONTEXT_ENTRY_RSVD; + } + + return 0; +} + +static int vtd_ce_rid2pasid_check(IntelIOMMUState *s, + VTDContextEntry *ce) +{ + VTDPASIDEntry pe; + + /* + * Make sure in Scalable Mode, a present context entry + * has valid rid2pasid setting, which includes valid + * rid2pasid field and corresponding pasid entry setting + */ + return vtd_ce_get_rid2pasid_entry(s, ce, &pe); +} + /* Map a device to its corresponding domain (context-entry) */ static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num, uint8_t devfn, VTDContextEntry *ce) @@ -998,20 +1299,18 @@ static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num, return ret_fr; } - if (!vtd_root_entry_present(&re)) { + if (!vtd_root_entry_present(s, &re, devfn)) { /* Not error - it's okay we don't have root entry. */ trace_vtd_re_not_present(bus_num); return -VTD_FR_ROOT_ENTRY_P; } - if (re.rsvd || (re.val & VTD_ROOT_ENTRY_RSVD(s->aw_bits))) { - error_report_once("%s: invalid root entry: rsvd=0x%"PRIx64 - ", val=0x%"PRIx64" (reserved nonzero)", - __func__, re.rsvd, re.val); - return -VTD_FR_ROOT_ENTRY_RSVD; + ret_fr = vtd_root_entry_rsvd_bits_check(s, &re); + if (ret_fr) { + return ret_fr; } - ret_fr = vtd_get_context_entry_from_root(&re, devfn, ce); + ret_fr = vtd_get_context_entry_from_root(s, &re, devfn, ce); if (ret_fr) { return ret_fr; } @@ -1022,26 +1321,38 @@ static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num, return -VTD_FR_CONTEXT_ENTRY_P; } - if ((ce->hi & VTD_CONTEXT_ENTRY_RSVD_HI) || - (ce->lo & VTD_CONTEXT_ENTRY_RSVD_LO(s->aw_bits))) { - error_report_once("%s: invalid context entry: hi=%"PRIx64 - ", lo=%"PRIx64" (reserved nonzero)", - __func__, ce->hi, ce->lo); - return -VTD_FR_CONTEXT_ENTRY_RSVD; + ret_fr = vtd_context_entry_rsvd_bits_check(s, ce); + if (ret_fr) { + return ret_fr; } /* Check if the programming of context-entry is valid */ - if (!vtd_is_level_supported(s, vtd_ce_get_level(ce))) { + if (!s->root_scalable && + !vtd_is_level_supported(s, vtd_ce_get_level(ce))) { error_report_once("%s: invalid context entry: hi=%"PRIx64 ", lo=%"PRIx64" (level %d not supported)", - __func__, ce->hi, ce->lo, vtd_ce_get_level(ce)); + __func__, ce->hi, ce->lo, + vtd_ce_get_level(ce)); return -VTD_FR_CONTEXT_ENTRY_INV; } - /* Do translation type check */ - if (!vtd_ce_type_check(x86_iommu, ce)) { - /* Errors dumped in vtd_ce_type_check() */ - return -VTD_FR_CONTEXT_ENTRY_INV; + if (!s->root_scalable) { + /* Do translation type check */ + if (!vtd_ce_type_check(x86_iommu, ce)) { + /* Errors dumped in vtd_ce_type_check() */ + return -VTD_FR_CONTEXT_ENTRY_INV; + } + } else { + /* + * Check if the programming of context-entry.rid2pasid + * and corresponding pasid setting is valid, and thus + * avoids to check pasid entry fetching result in future + * helper function calling. + */ + ret_fr = vtd_ce_rid2pasid_check(s, ce); + if (ret_fr) { + return ret_fr; + } } return 0; @@ -1054,6 +1365,19 @@ static int vtd_sync_shadow_page_hook(IOMMUTLBEntry *entry, return 0; } +static uint16_t vtd_get_domain_id(IntelIOMMUState *s, + VTDContextEntry *ce) +{ + VTDPASIDEntry pe; + + if (s->root_scalable) { + vtd_ce_get_rid2pasid_entry(s, ce, &pe); + return VTD_SM_PASID_ENTRY_DID(pe.val[1]); + } + + return VTD_CONTEXT_ENTRY_DID(ce->hi); +} + static int vtd_sync_shadow_page_table_range(VTDAddressSpace *vtd_as, VTDContextEntry *ce, hwaddr addr, hwaddr size) @@ -1065,10 +1389,10 @@ static int vtd_sync_shadow_page_table_range(VTDAddressSpace *vtd_as, .notify_unmap = true, .aw = s->aw_bits, .as = vtd_as, - .domain_id = VTD_CONTEXT_ENTRY_DID(ce->hi), + .domain_id = vtd_get_domain_id(s, ce), }; - return vtd_page_walk(ce, addr, addr + size, &info); + return vtd_page_walk(s, ce, addr, addr + size, &info); } static int vtd_sync_shadow_page_table(VTDAddressSpace *vtd_as) @@ -1103,35 +1427,24 @@ static int vtd_sync_shadow_page_table(VTDAddressSpace *vtd_as) } /* - * Fetch translation type for specific device. Returns <0 if error - * happens, otherwise return the shifted type to check against - * VTD_CONTEXT_TT_*. + * Check if specific device is configed to bypass address + * translation for DMA requests. In Scalable Mode, bypass + * 1st-level translation or 2nd-level translation, it depends + * on PGTT setting. */ -static int vtd_dev_get_trans_type(VTDAddressSpace *as) +static bool vtd_dev_pt_enabled(VTDAddressSpace *as) { IntelIOMMUState *s; VTDContextEntry ce; + VTDPASIDEntry pe; int ret; - s = as->iommu_state; + assert(as); + s = as->iommu_state; ret = vtd_dev_to_context_entry(s, pci_bus_num(as->bus), as->devfn, &ce); if (ret) { - return ret; - } - - return vtd_ce_get_type(&ce); -} - -static bool vtd_dev_pt_enabled(VTDAddressSpace *as) -{ - int ret; - - assert(as); - - ret = vtd_dev_get_trans_type(as); - if (ret < 0) { /* * Possibly failed to parse the context entry for some reason * (e.g., during init, or any guest configuration errors on @@ -1141,7 +1454,17 @@ static bool vtd_dev_pt_enabled(VTDAddressSpace *as) return false; } - return ret == VTD_CONTEXT_TT_PASS_THROUGH; + if (s->root_scalable) { + ret = vtd_ce_get_rid2pasid_entry(s, &ce, &pe); + if (ret) { + error_report_once("%s: vtd_ce_get_rid2pasid_entry error: %"PRId32, + __func__, ret); + return false; + } + return (VTD_PE_GET_TYPE(&pe) == VTD_SM_PASID_ENTRY_PT); + } + + return (vtd_ce_get_type(&ce) == VTD_CONTEXT_TT_PASS_THROUGH); } /* Return whether the device is using IOMMU translation. */ @@ -1171,11 +1494,11 @@ static bool vtd_switch_address_space(VTDAddressSpace *as) /* Turn off first then on the other */ if (use_iommu) { - memory_region_set_enabled(&as->sys_alias, false); + memory_region_set_enabled(&as->nodmar, false); memory_region_set_enabled(MEMORY_REGION(&as->iommu), true); } else { memory_region_set_enabled(MEMORY_REGION(&as->iommu), false); - memory_region_set_enabled(&as->sys_alias, true); + memory_region_set_enabled(&as->nodmar, true); } if (take_bql) { @@ -1221,6 +1544,7 @@ static const bool vtd_qualified_faults[] = { [VTD_FR_ROOT_ENTRY_RSVD] = false, [VTD_FR_PAGING_ENTRY_RSVD] = true, [VTD_FR_CONTEXT_ENTRY_TT] = true, + [VTD_FR_PASID_TABLE_INV] = false, [VTD_FR_RESERVED_ERR] = false, [VTD_FR_MAX] = false, }; @@ -1322,18 +1646,17 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus, cc_entry->context_cache_gen); ce = cc_entry->context_entry; is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD; + if (!is_fpd_set && s->root_scalable) { + ret_fr = vtd_ce_get_pasid_fpd(s, &ce, &is_fpd_set); + VTD_PE_GET_FPD_ERR(ret_fr, is_fpd_set, s, source_id, addr, is_write); + } } else { ret_fr = vtd_dev_to_context_entry(s, bus_num, devfn, &ce); is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD; - if (ret_fr) { - ret_fr = -ret_fr; - if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) { - trace_vtd_fault_disabled(); - } else { - vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write); - } - goto error; + if (!ret_fr && !is_fpd_set && s->root_scalable) { + ret_fr = vtd_ce_get_pasid_fpd(s, &ce, &is_fpd_set); } + VTD_PE_GET_FPD_ERR(ret_fr, is_fpd_set, s, source_id, addr, is_write); /* Update context-cache */ trace_vtd_iotlb_cc_update(bus_num, devfn, ce.hi, ce.lo, cc_entry->context_cache_gen, @@ -1367,21 +1690,13 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus, return true; } - ret_fr = vtd_iova_to_slpte(&ce, addr, is_write, &slpte, &level, + ret_fr = vtd_iova_to_slpte(s, &ce, addr, is_write, &slpte, &level, &reads, &writes, s->aw_bits); - if (ret_fr) { - ret_fr = -ret_fr; - if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) { - trace_vtd_fault_disabled(); - } else { - vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write); - } - goto error; - } + VTD_PE_GET_FPD_ERR(ret_fr, is_fpd_set, s, source_id, addr, is_write); page_mask = vtd_slpt_level_page_mask(level); access_flags = IOMMU_ACCESS_FLAG(reads, writes); - vtd_update_iotlb(s, source_id, VTD_CONTEXT_ENTRY_DID(ce.hi), addr, slpte, + vtd_update_iotlb(s, source_id, vtd_get_domain_id(s, &ce), addr, slpte, access_flags, level); out: vtd_iommu_unlock(s); @@ -1403,10 +1718,11 @@ error: static void vtd_root_table_setup(IntelIOMMUState *s) { s->root = vtd_get_quad_raw(s, DMAR_RTADDR_REG); - s->root_extended = s->root & VTD_RTADDR_RTT; s->root &= VTD_RTADDR_ADDR_MASK(s->aw_bits); - trace_vtd_reg_dmar_root(s->root, s->root_extended); + vtd_update_scalable_state(s); + + trace_vtd_reg_dmar_root(s->root, s->root_scalable); } static void vtd_iec_notify_all(IntelIOMMUState *s, bool global, @@ -1573,7 +1889,7 @@ static void vtd_iotlb_domain_invalidate(IntelIOMMUState *s, uint16_t domain_id) QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) { if (!vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), vtd_as->devfn, &ce) && - domain_id == VTD_CONTEXT_ENTRY_DID(ce.hi)) { + domain_id == vtd_get_domain_id(s, &ce)) { vtd_sync_shadow_page_table(vtd_as); } } @@ -1591,7 +1907,7 @@ static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s, QLIST_FOREACH(vtd_as, &(s->vtd_as_with_notifiers), next) { ret = vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), vtd_as->devfn, &ce); - if (!ret && domain_id == VTD_CONTEXT_ENTRY_DID(ce.hi)) { + if (!ret && domain_id == vtd_get_domain_id(s, &ce)) { if (vtd_as_has_map_notifier(vtd_as)) { /* * As long as we have MAP notifications registered in @@ -1699,7 +2015,7 @@ static void vtd_handle_gcmd_qie(IntelIOMMUState *s, bool en) if (en) { s->iq = iqa_val & VTD_IQA_IQA_MASK(s->aw_bits); /* 2^(x+8) entries */ - s->iq_size = 1UL << ((iqa_val & VTD_IQA_QS) + 8); + s->iq_size = 1UL << ((iqa_val & VTD_IQA_QS) + 8 - (s->iq_dw ? 1 : 0)); s->qi_enabled = true; trace_vtd_inv_qi_setup(s->iq, s->iq_size); /* Ok - report back to driver */ @@ -1866,19 +2182,24 @@ static void vtd_handle_iotlb_write(IntelIOMMUState *s) } /* Fetch an Invalidation Descriptor from the Invalidation Queue */ -static bool vtd_get_inv_desc(dma_addr_t base_addr, uint32_t offset, +static bool vtd_get_inv_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) { - dma_addr_t addr = base_addr + offset * sizeof(*inv_desc); - if (dma_memory_read(&address_space_memory, addr, inv_desc, - sizeof(*inv_desc))) { - error_report_once("Read INV DESC failed"); - inv_desc->lo = 0; - inv_desc->hi = 0; + dma_addr_t base_addr = s->iq; + uint32_t offset = s->iq_head; + uint32_t dw = s->iq_dw ? 32 : 16; + dma_addr_t addr = base_addr + offset * dw; + + if (dma_memory_read(&address_space_memory, addr, inv_desc, dw)) { + error_report_once("Read INV DESC failed."); return false; } inv_desc->lo = le64_to_cpu(inv_desc->lo); inv_desc->hi = le64_to_cpu(inv_desc->hi); + if (dw == 32) { + inv_desc->val[2] = le64_to_cpu(inv_desc->val[2]); + inv_desc->val[3] = le64_to_cpu(inv_desc->val[3]); + } return true; } @@ -2084,10 +2405,11 @@ static bool vtd_process_inv_desc(IntelIOMMUState *s) uint8_t desc_type; trace_vtd_inv_qi_head(s->iq_head); - if (!vtd_get_inv_desc(s->iq, s->iq_head, &inv_desc)) { + if (!vtd_get_inv_desc(s, &inv_desc)) { s->iq_last_desc_type = VTD_INV_DESC_NONE; return false; } + desc_type = inv_desc.lo & VTD_INV_DESC_TYPE; /* FIXME: should update at first or at last? */ s->iq_last_desc_type = desc_type; @@ -2107,6 +2429,17 @@ static bool vtd_process_inv_desc(IntelIOMMUState *s) } break; + /* + * TODO: the entity of below two cases will be implemented in future series. + * To make guest (which integrates scalable mode support patch set in + * iommu driver) work, just return true is enough so far. + */ + case VTD_INV_DESC_PC: + break; + + case VTD_INV_DESC_PIOTLB: + break; + case VTD_INV_DESC_WAIT: trace_vtd_inv_desc("wait", inv_desc.hi, inv_desc.lo); if (!vtd_process_wait_desc(s, &inv_desc)) { @@ -2172,7 +2505,12 @@ static void vtd_handle_iqt_write(IntelIOMMUState *s) { uint64_t val = vtd_get_quad_raw(s, DMAR_IQT_REG); - s->iq_tail = VTD_IQT_QT(val); + if (s->iq_dw && (val & VTD_IQT_QT_256_RSV_BIT)) { + error_report_once("%s: RSV bit is set: val=0x%"PRIx64, + __func__, val); + return; + } + s->iq_tail = VTD_IQT_QT(s->iq_dw, val); trace_vtd_inv_qi_tail(s->iq_tail); if (s->qi_enabled && !(vtd_get_long_raw(s, DMAR_FSTS_REG) & VTD_FSTS_IQE)) { @@ -2441,6 +2779,12 @@ static void vtd_mem_write(void *opaque, hwaddr addr, } else { vtd_set_quad(s, addr, val); } + if (s->ecap & VTD_ECAP_SMTS && + val & VTD_IQA_DW_MASK) { + s->iq_dw = true; + } else { + s->iq_dw = false; + } break; case DMAR_IQA_REG_HI: @@ -2582,7 +2926,7 @@ static void vtd_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu, IntelIOMMUState *s = vtd_as->iommu_state; if (!s->caching_mode && new & IOMMU_NOTIFIER_MAP) { - error_report("We need to set caching-mode=1 for intel-iommu to enable " + error_report("We need to set caching-mode=on for intel-iommu to enable " "device assignment with IOMMU protection."); exit(1); } @@ -2608,6 +2952,15 @@ static int vtd_post_load(void *opaque, int version_id) */ vtd_switch_address_space_all(iommu); + /* + * We don't need to migrate the root_scalable because we can + * simply do the calculation after the loading is complete. We + * can actually do similar things with root, dmar_enabled, etc. + * however since we've had them already so we'd better keep them + * for compatibility of migration. + */ + vtd_update_scalable_state(iommu); + return 0; } @@ -2628,7 +2981,7 @@ static const VMStateDescription vtd_vmstate = { VMSTATE_UINT16(next_frcd_reg, IntelIOMMUState), VMSTATE_UINT8_ARRAY(csr, IntelIOMMUState, DMAR_REG_SIZE), VMSTATE_UINT8(iq_last_desc_type, IntelIOMMUState), - VMSTATE_BOOL(root_extended, IntelIOMMUState), + VMSTATE_UNUSED(1), /* bool root_extended is obsolete by VT-d */ VMSTATE_BOOL(dmar_enabled, IntelIOMMUState), VMSTATE_BOOL(qi_enabled, IntelIOMMUState), VMSTATE_BOOL(intr_enabled, IntelIOMMUState), @@ -2659,6 +3012,7 @@ static Property vtd_properties[] = { DEFINE_PROP_UINT8("aw-bits", IntelIOMMUState, aw_bits, VTD_HOST_ADDRESS_WIDTH), DEFINE_PROP_BOOL("caching-mode", IntelIOMMUState, caching_mode, FALSE), + DEFINE_PROP_BOOL("x-scalable-mode", IntelIOMMUState, scalable_mode, FALSE), DEFINE_PROP_BOOL("dma-drain", IntelIOMMUState, dma_drain, true), DEFINE_PROP_END_OF_LIST(), }; @@ -2947,7 +3301,8 @@ VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn) vtd_dev_as = vtd_bus->dev_as[devfn]; if (!vtd_dev_as) { - snprintf(name, sizeof(name), "intel_iommu_devfn_%d", devfn); + snprintf(name, sizeof(name), "vtd-%02x.%x", PCI_SLOT(devfn), + PCI_FUNC(devfn)); vtd_bus->dev_as[devfn] = vtd_dev_as = g_malloc0(sizeof(VTDAddressSpace)); vtd_dev_as->bus = bus; @@ -2956,44 +3311,53 @@ VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn) vtd_dev_as->context_cache_entry.context_cache_gen = 0; vtd_dev_as->iova_tree = iova_tree_new(); + memory_region_init(&vtd_dev_as->root, OBJECT(s), name, UINT64_MAX); + address_space_init(&vtd_dev_as->as, &vtd_dev_as->root, "vtd-root"); + /* - * Memory region relationships looks like (Address range shows - * only lower 32 bits to make it short in length...): - * - * |-----------------+-------------------+----------| - * | Name | Address range | Priority | - * |-----------------+-------------------+----------+ - * | vtd_root | 00000000-ffffffff | 0 | - * | intel_iommu | 00000000-ffffffff | 1 | - * | vtd_sys_alias | 00000000-ffffffff | 1 | - * | intel_iommu_ir | fee00000-feefffff | 64 | - * |-----------------+-------------------+----------| + * Build the DMAR-disabled container with aliases to the + * shared MRs. Note that aliasing to a shared memory region + * could help the memory API to detect same FlatViews so we + * can have devices to share the same FlatView when DMAR is + * disabled (either by not providing "intel_iommu=on" or with + * "iommu=pt"). It will greatly reduce the total number of + * FlatViews of the system hence VM runs faster. + */ + memory_region_init_alias(&vtd_dev_as->nodmar, OBJECT(s), + "vtd-nodmar", &s->mr_nodmar, 0, + memory_region_size(&s->mr_nodmar)); + + /* + * Build the per-device DMAR-enabled container. * - * We enable/disable DMAR by switching enablement for - * vtd_sys_alias and intel_iommu regions. IR region is always - * enabled. + * TODO: currently we have per-device IOMMU memory region only + * because we have per-device IOMMU notifiers for devices. If + * one day we can abstract the IOMMU notifiers out of the + * memory regions then we can also share the same memory + * region here just like what we've done above with the nodmar + * region. */ + strcat(name, "-dmar"); memory_region_init_iommu(&vtd_dev_as->iommu, sizeof(vtd_dev_as->iommu), TYPE_INTEL_IOMMU_MEMORY_REGION, OBJECT(s), - "intel_iommu_dmar", - UINT64_MAX); - memory_region_init_alias(&vtd_dev_as->sys_alias, OBJECT(s), - "vtd_sys_alias", get_system_memory(), - 0, memory_region_size(get_system_memory())); - memory_region_init_io(&vtd_dev_as->iommu_ir, OBJECT(s), - &vtd_mem_ir_ops, s, "intel_iommu_ir", - VTD_INTERRUPT_ADDR_SIZE); - memory_region_init(&vtd_dev_as->root, OBJECT(s), - "vtd_root", UINT64_MAX); - memory_region_add_subregion_overlap(&vtd_dev_as->root, + name, UINT64_MAX); + memory_region_init_alias(&vtd_dev_as->iommu_ir, OBJECT(s), "vtd-ir", + &s->mr_ir, 0, memory_region_size(&s->mr_ir)); + memory_region_add_subregion_overlap(MEMORY_REGION(&vtd_dev_as->iommu), VTD_INTERRUPT_ADDR_FIRST, - &vtd_dev_as->iommu_ir, 64); - address_space_init(&vtd_dev_as->as, &vtd_dev_as->root, name); - memory_region_add_subregion_overlap(&vtd_dev_as->root, 0, - &vtd_dev_as->sys_alias, 1); + &vtd_dev_as->iommu_ir, 1); + + /* + * Hook both the containers under the root container, we + * switch between DMAR & noDMAR by enable/disable + * corresponding sub-containers + */ memory_region_add_subregion_overlap(&vtd_dev_as->root, 0, MEMORY_REGION(&vtd_dev_as->iommu), - 1); + 0); + memory_region_add_subregion_overlap(&vtd_dev_as->root, 0, + &vtd_dev_as->nodmar, 0); + vtd_switch_address_space(vtd_dev_as); } return vtd_dev_as; @@ -3098,9 +3462,11 @@ static void vtd_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) vtd_address_space_unmap(vtd_as, n); if (vtd_dev_to_context_entry(s, bus_n, vtd_as->devfn, &ce) == 0) { - trace_vtd_replay_ce_valid(bus_n, PCI_SLOT(vtd_as->devfn), + trace_vtd_replay_ce_valid(s->root_scalable ? "scalable mode" : + "legacy mode", + bus_n, PCI_SLOT(vtd_as->devfn), PCI_FUNC(vtd_as->devfn), - VTD_CONTEXT_ENTRY_DID(ce.hi), + vtd_get_domain_id(s, &ce), ce.hi, ce.lo); if (vtd_as_has_map_notifier(vtd_as)) { /* This is required only for MAP typed notifiers */ @@ -3110,10 +3476,10 @@ static void vtd_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) .notify_unmap = false, .aw = s->aw_bits, .as = vtd_as, - .domain_id = VTD_CONTEXT_ENTRY_DID(ce.hi), + .domain_id = vtd_get_domain_id(s, &ce), }; - vtd_page_walk(&ce, 0, ~0ULL, &info); + vtd_page_walk(s, &ce, 0, ~0ULL, &info); } } else { trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn), @@ -3136,7 +3502,7 @@ static void vtd_init(IntelIOMMUState *s) memset(s->womask, 0, DMAR_REG_SIZE); s->root = 0; - s->root_extended = false; + s->root_scalable = false; s->dmar_enabled = false; s->intr_enabled = false; s->iq_head = 0; @@ -3145,6 +3511,7 @@ static void vtd_init(IntelIOMMUState *s) s->iq_size = 0; s->qi_enabled = false; s->iq_last_desc_type = VTD_INV_DESC_NONE; + s->iq_dw = false; s->next_frcd_reg = 0; s->cap = VTD_CAP_FRO | VTD_CAP_NFR | VTD_CAP_ND | VTD_CAP_MAMV | VTD_CAP_PSI | VTD_CAP_SLLPS | @@ -3190,6 +3557,11 @@ static void vtd_init(IntelIOMMUState *s) s->cap |= VTD_CAP_CM; } + /* TODO: read cap/ecap from host to decide which cap to be exposed. */ + if (s->scalable_mode) { + s->ecap |= VTD_ECAP_SMTS | VTD_ECAP_SRS | VTD_ECAP_SLTS; + } + vtd_reset_caches(s); /* Define registers with default values and bit semantics */ @@ -3199,7 +3571,7 @@ static void vtd_init(IntelIOMMUState *s) vtd_define_long(s, DMAR_GCMD_REG, 0, 0xff800000UL, 0); vtd_define_long_wo(s, DMAR_GCMD_REG, 0xff800000UL); vtd_define_long(s, DMAR_GSTS_REG, 0, 0, 0); - vtd_define_quad(s, DMAR_RTADDR_REG, 0, 0xfffffffffffff000ULL, 0); + vtd_define_quad(s, DMAR_RTADDR_REG, 0, 0xfffffffffffffc00ULL, 0); vtd_define_quad(s, DMAR_CCMD_REG, 0, 0xe0000003ffffffffULL, 0); vtd_define_quad_wo(s, DMAR_CCMD_REG, 0x3ffff0000ULL); @@ -3222,7 +3594,7 @@ static void vtd_init(IntelIOMMUState *s) vtd_define_quad(s, DMAR_IQH_REG, 0, 0, 0); vtd_define_quad(s, DMAR_IQT_REG, 0, 0x7fff0ULL, 0); - vtd_define_quad(s, DMAR_IQA_REG, 0, 0xfffffffffffff007ULL, 0); + vtd_define_quad(s, DMAR_IQA_REG, 0, 0xfffffffffffff807ULL, 0); vtd_define_long(s, DMAR_ICS_REG, 0, 0, 0x1UL); vtd_define_long(s, DMAR_IECTL_REG, 0x80000000UL, 0x80000000UL, 0); vtd_define_long(s, DMAR_IEDATA_REG, 0, 0xffffffffUL, 0); @@ -3301,6 +3673,11 @@ static bool vtd_decide_config(IntelIOMMUState *s, Error **errp) return false; } + if (s->scalable_mode && !s->dma_drain) { + error_setg(errp, "Need to set dma_drain for scalable mode"); + return false; + } + return true; } @@ -3323,6 +3700,21 @@ static void vtd_realize(DeviceState *dev, Error **errp) memset(s->vtd_as_by_bus_num, 0, sizeof(s->vtd_as_by_bus_num)); memory_region_init_io(&s->csrmem, OBJECT(s), &vtd_mem_ops, s, "intel_iommu", DMAR_REG_SIZE); + + /* Create the shared memory regions by all devices */ + memory_region_init(&s->mr_nodmar, OBJECT(s), "vtd-nodmar", + UINT64_MAX); + memory_region_init_io(&s->mr_ir, OBJECT(s), &vtd_mem_ir_ops, + s, "vtd-ir", VTD_INTERRUPT_ADDR_SIZE); + memory_region_init_alias(&s->mr_sys_alias, OBJECT(s), + "vtd-sys-alias", get_system_memory(), 0, + memory_region_size(get_system_memory())); + memory_region_add_subregion_overlap(&s->mr_nodmar, 0, + &s->mr_sys_alias, 0); + memory_region_add_subregion_overlap(&s->mr_nodmar, + VTD_INTERRUPT_ADDR_FIRST, + &s->mr_ir, 1); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->csrmem); /* No corresponding destroy */ s->iotlb = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal, @@ -3349,6 +3741,8 @@ static void vtd_class_init(ObjectClass *klass, void *data) x86_class->int_remap = vtd_int_remap; /* Supported by the pc-q35-* machine types */ dc->user_creatable = true; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->desc = "Intel IOMMU (VT-d) DMA Remapping device"; } static const TypeInfo vtd_info = { diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h index 00e9edbc66..c1235a7063 100644 --- a/hw/i386/intel_iommu_internal.h +++ b/hw/i386/intel_iommu_internal.h @@ -171,7 +171,7 @@ #define VTD_CCMD_FM(val) (((val) >> 32) & 3ULL) /* RTADDR_REG */ -#define VTD_RTADDR_RTT (1ULL << 11) +#define VTD_RTADDR_SMT (1ULL << 10) #define VTD_RTADDR_ADDR_MASK(aw) (VTD_HAW_MASK(aw) ^ 0xfffULL) /* IRTA_REG */ @@ -189,6 +189,9 @@ #define VTD_ECAP_EIM (1ULL << 4) #define VTD_ECAP_PT (1ULL << 6) #define VTD_ECAP_MHMV (15ULL << 20) +#define VTD_ECAP_SRS (1ULL << 31) +#define VTD_ECAP_SMTS (1ULL << 43) +#define VTD_ECAP_SLTS (1ULL << 46) /* CAP_REG */ /* (offset >> 4) << 24 */ @@ -217,11 +220,14 @@ #define VTD_CAP_SAGAW_48bit (0x4ULL << VTD_CAP_SAGAW_SHIFT) /* IQT_REG */ -#define VTD_IQT_QT(val) (((val) >> 4) & 0x7fffULL) +#define VTD_IQT_QT(dw_bit, val) (dw_bit ? (((val) >> 5) & 0x3fffULL) : \ + (((val) >> 4) & 0x7fffULL)) +#define VTD_IQT_QT_256_RSV_BIT 0x10 /* IQA_REG */ #define VTD_IQA_IQA_MASK(aw) (VTD_HAW_MASK(aw) ^ 0xfffULL) #define VTD_IQA_QS 0x7ULL +#define VTD_IQA_DW_MASK 0x800 /* IQH_REG */ #define VTD_IQH_QH_SHIFT 4 @@ -294,6 +300,8 @@ typedef enum VTDFaultReason { * request while disabled */ VTD_FR_IR_SID_ERR = 0x26, /* Invalid Source-ID */ + VTD_FR_PASID_TABLE_INV = 0x58, /*Invalid PASID table entry */ + /* This is not a normal fault reason. We use this to indicate some faults * that are not referenced by the VT-d specification. * Fault event with such reason should not be recorded. @@ -321,6 +329,9 @@ union VTDInvDesc { uint64_t lo; uint64_t hi; }; + struct { + uint64_t val[4]; + }; union { VTDInvDescIEC iec; }; @@ -335,6 +346,8 @@ typedef union VTDInvDesc VTDInvDesc; #define VTD_INV_DESC_IEC 0x4 /* Interrupt Entry Cache Invalidate Descriptor */ #define VTD_INV_DESC_WAIT 0x5 /* Invalidation Wait Descriptor */ +#define VTD_INV_DESC_PIOTLB 0x6 /* PASID-IOTLB Invalidate Desc */ +#define VTD_INV_DESC_PC 0x7 /* PASID-cache Invalidate Desc */ #define VTD_INV_DESC_NONE 0 /* Not an Invalidate Descriptor */ /* Masks for Invalidation Wait Descriptor*/ @@ -411,8 +424,8 @@ typedef struct VTDIOTLBPageInvInfo VTDIOTLBPageInvInfo; #define VTD_PAGE_MASK_1G (~((1ULL << VTD_PAGE_SHIFT_1G) - 1)) struct VTDRootEntry { - uint64_t val; - uint64_t rsvd; + uint64_t lo; + uint64_t hi; }; typedef struct VTDRootEntry VTDRootEntry; @@ -423,6 +436,8 @@ typedef struct VTDRootEntry VTDRootEntry; #define VTD_ROOT_ENTRY_NR (VTD_PAGE_SIZE / sizeof(VTDRootEntry)) #define VTD_ROOT_ENTRY_RSVD(aw) (0xffeULL | ~VTD_HAW_MASK(aw)) +#define VTD_DEVFN_CHECK_MASK 0x80 + /* Masks for struct VTDContextEntry */ /* lo */ #define VTD_CONTEXT_ENTRY_P (1ULL << 0) @@ -441,6 +456,38 @@ typedef struct VTDRootEntry VTDRootEntry; #define VTD_CONTEXT_ENTRY_NR (VTD_PAGE_SIZE / sizeof(VTDContextEntry)) +#define VTD_CTX_ENTRY_LEGACY_SIZE 16 +#define VTD_CTX_ENTRY_SCALABLE_SIZE 32 + +#define VTD_SM_CONTEXT_ENTRY_RID2PASID_MASK 0xfffff +#define VTD_SM_CONTEXT_ENTRY_RSVD_VAL0(aw) (0x1e0ULL | ~VTD_HAW_MASK(aw)) +#define VTD_SM_CONTEXT_ENTRY_RSVD_VAL1 0xffffffffffe00000ULL + +/* PASID Table Related Definitions */ +#define VTD_PASID_DIR_BASE_ADDR_MASK (~0xfffULL) +#define VTD_PASID_TABLE_BASE_ADDR_MASK (~0xfffULL) +#define VTD_PASID_DIR_ENTRY_SIZE 8 +#define VTD_PASID_ENTRY_SIZE 64 +#define VTD_PASID_DIR_BITS_MASK (0x3fffULL) +#define VTD_PASID_DIR_INDEX(pasid) (((pasid) >> 6) & VTD_PASID_DIR_BITS_MASK) +#define VTD_PASID_DIR_FPD (1ULL << 1) /* Fault Processing Disable */ +#define VTD_PASID_TABLE_BITS_MASK (0x3fULL) +#define VTD_PASID_TABLE_INDEX(pasid) ((pasid) & VTD_PASID_TABLE_BITS_MASK) +#define VTD_PASID_ENTRY_FPD (1ULL << 1) /* Fault Processing Disable */ + +/* PASID Granular Translation Type Mask */ +#define VTD_SM_PASID_ENTRY_PGTT (7ULL << 6) +#define VTD_SM_PASID_ENTRY_FLT (1ULL << 6) +#define VTD_SM_PASID_ENTRY_SLT (2ULL << 6) +#define VTD_SM_PASID_ENTRY_NESTED (3ULL << 6) +#define VTD_SM_PASID_ENTRY_PT (4ULL << 6) + +#define VTD_SM_PASID_ENTRY_AW 7ULL /* Adjusted guest-address-width */ +#define VTD_SM_PASID_ENTRY_DID(val) ((val) & VTD_DOMAIN_ID_MASK) + +/* Second Level Page Translation Pointer*/ +#define VTD_SM_PASID_ENTRY_SLPTPTR (~0xfffULL) + /* Paging Structure common */ #define VTD_SL_PT_PAGE_SIZE_MASK (1ULL << 7) /* Bits to decide the offset for each level */ diff --git a/hw/i386/pc.c b/hw/i386/pc.c index d71dc28ef6..d98b737b8f 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -115,6 +115,9 @@ struct hpet_fw_config hpet_cfg = {.count = UINT8_MAX}; /* Physical Address of PVH entry point read from kernel ELF NOTE */ static size_t pvh_start_addr; +GlobalProperty pc_compat_4_0[] = {}; +const size_t pc_compat_4_0_len = G_N_ELEMENTS(pc_compat_4_0); + GlobalProperty pc_compat_3_1[] = { { "intel-iommu", "dma-drain", "off" }, { "Opteron_G3" "-" TYPE_X86_CPU, "rdtscp", "off" }, @@ -2075,8 +2078,10 @@ static void pc_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, { const PCMachineState *pcms = PC_MACHINE(hotplug_dev); const PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); + const MachineState *ms = MACHINE(hotplug_dev); const bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM); const uint64_t legacy_align = TARGET_PAGE_SIZE; + Error *local_err = NULL; /* * When -no-acpi is used with Q35 machine type, no ACPI is built, @@ -2089,11 +2094,17 @@ static void pc_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, return; } - if (is_nvdimm && !pcms->acpi_nvdimm_state.is_enabled) { + if (is_nvdimm && !ms->nvdimms_state->is_enabled) { error_setg(errp, "nvdimm is not enabled: missing 'nvdimm' in '-M'"); return; } + hotplug_handler_pre_plug(pcms->acpi_dev, dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + pc_dimm_pre_plug(PC_DIMM(dev), MACHINE(hotplug_dev), pcmc->enforce_aligned_dimm ? NULL : &legacy_align, errp); } @@ -2103,6 +2114,7 @@ static void pc_memory_plug(HotplugHandler *hotplug_dev, { Error *local_err = NULL; PCMachineState *pcms = PC_MACHINE(hotplug_dev); + MachineState *ms = MACHINE(hotplug_dev); bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM); pc_dimm_plug(PC_DIMM(dev), MACHINE(pcms), &local_err); @@ -2111,7 +2123,7 @@ static void pc_memory_plug(HotplugHandler *hotplug_dev, } if (is_nvdimm) { - nvdimm_plug(&pcms->acpi_nvdimm_state); + nvdimm_plug(ms->nvdimms_state); } hotplug_handler_plug(HOTPLUG_HANDLER(pcms->acpi_dev), dev, &error_abort); @@ -2552,47 +2564,6 @@ static void pc_machine_set_smm(Object *obj, Visitor *v, const char *name, visit_type_OnOffAuto(v, name, &pcms->smm, errp); } -static bool pc_machine_get_nvdimm(Object *obj, Error **errp) -{ - PCMachineState *pcms = PC_MACHINE(obj); - - return pcms->acpi_nvdimm_state.is_enabled; -} - -static void pc_machine_set_nvdimm(Object *obj, bool value, Error **errp) -{ - PCMachineState *pcms = PC_MACHINE(obj); - - pcms->acpi_nvdimm_state.is_enabled = value; -} - -static char *pc_machine_get_nvdimm_persistence(Object *obj, Error **errp) -{ - PCMachineState *pcms = PC_MACHINE(obj); - - return g_strdup(pcms->acpi_nvdimm_state.persistence_string); -} - -static void pc_machine_set_nvdimm_persistence(Object *obj, const char *value, - Error **errp) -{ - PCMachineState *pcms = PC_MACHINE(obj); - AcpiNVDIMMState *nvdimm_state = &pcms->acpi_nvdimm_state; - - if (strcmp(value, "cpu") == 0) - nvdimm_state->persistence = 3; - else if (strcmp(value, "mem-ctrl") == 0) - nvdimm_state->persistence = 2; - else { - error_setg(errp, "-machine nvdimm-persistence=%s: unsupported option", - value); - return; - } - - g_free(nvdimm_state->persistence_string); - nvdimm_state->persistence_string = g_strdup(value); -} - static bool pc_machine_get_smbus(Object *obj, Error **errp) { PCMachineState *pcms = PC_MACHINE(obj); @@ -2642,8 +2613,6 @@ static void pc_machine_initfn(Object *obj) pcms->max_ram_below_4g = 0; /* use default */ pcms->smm = ON_OFF_AUTO_AUTO; pcms->vmport = ON_OFF_AUTO_AUTO; - /* nvdimm is disabled on default. */ - pcms->acpi_nvdimm_state.is_enabled = false; /* acpi build is enabled by default if machine supports it */ pcms->acpi_build_enabled = PC_MACHINE_GET_CLASS(pcms)->has_acpi_build; pcms->smbus_enabled = true; @@ -2782,6 +2751,7 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) hc->unplug = pc_machine_device_unplug_cb; nc->nmi_monitor_handler = x86_nmi; mc->default_cpu_type = TARGET_DEFAULT_CPU_TYPE; + mc->nvdimm_supported = true; object_class_property_add(oc, PC_MACHINE_DEVMEM_REGION_SIZE, "int", pc_machine_get_device_memory_region_size, NULL, @@ -2806,13 +2776,6 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) object_class_property_set_description(oc, PC_MACHINE_VMPORT, "Enable vmport (pc & q35)", &error_abort); - object_class_property_add_bool(oc, PC_MACHINE_NVDIMM, - pc_machine_get_nvdimm, pc_machine_set_nvdimm, &error_abort); - - object_class_property_add_str(oc, PC_MACHINE_NVDIMM_PERSIST, - pc_machine_get_nvdimm_persistence, - pc_machine_set_nvdimm_persistence, &error_abort); - object_class_property_add_bool(oc, PC_MACHINE_SMBUS, pc_machine_get_smbus, pc_machine_set_smbus, &error_abort); diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c index 8770ecada9..c07c4a5b38 100644 --- a/hw/i386/pc_piix.c +++ b/hw/i386/pc_piix.c @@ -297,8 +297,8 @@ static void pc_init1(MachineState *machine, PC_MACHINE_ACPI_DEVICE_PROP, &error_abort); } - if (pcms->acpi_nvdimm_state.is_enabled) { - nvdimm_init_acpi_state(&pcms->acpi_nvdimm_state, system_io, + if (machine->nvdimms_state->is_enabled) { + nvdimm_init_acpi_state(machine->nvdimms_state, system_io, pcms->fw_cfg, OBJECT(pcms)); } } @@ -428,13 +428,25 @@ static void pc_i440fx_machine_options(MachineClass *m) machine_class_allow_dynamic_sysbus_dev(m, TYPE_RAMFB_DEVICE); } -static void pc_i440fx_4_0_machine_options(MachineClass *m) +static void pc_i440fx_4_1_machine_options(MachineClass *m) { pc_i440fx_machine_options(m); m->alias = "pc"; m->is_default = 1; } +DEFINE_I440FX_MACHINE(v4_1, "pc-i440fx-4.1", NULL, + pc_i440fx_4_1_machine_options); + +static void pc_i440fx_4_0_machine_options(MachineClass *m) +{ + pc_i440fx_4_1_machine_options(m); + m->alias = NULL; + m->is_default = 0; + compat_props_add(m->compat_props, hw_compat_4_0, hw_compat_4_0_len); + compat_props_add(m->compat_props, pc_compat_4_0, pc_compat_4_0_len); +} + DEFINE_I440FX_MACHINE(v4_0, "pc-i440fx-4.0", NULL, pc_i440fx_4_0_machine_options); @@ -911,6 +923,7 @@ static void isa_bridge_class_init(ObjectClass *klass, void *data) PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); dc->desc = "ISA bridge faked to support IGD PT"; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); k->vendor_id = PCI_VENDOR_ID_INTEL; k->class_id = PCI_CLASS_BRIDGE_ISA; }; diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c index cfb9043e12..37dd350511 100644 --- a/hw/i386/pc_q35.c +++ b/hw/i386/pc_q35.c @@ -329,8 +329,8 @@ static void pc_q35_init(MachineState *machine) pc_vga_init(isa_bus, host_bus); pc_nic_init(pcmc, isa_bus, host_bus); - if (pcms->acpi_nvdimm_state.is_enabled) { - nvdimm_init_acpi_state(&pcms->acpi_nvdimm_state, system_io, + if (machine->nvdimms_state->is_enabled) { + nvdimm_init_acpi_state(machine->nvdimms_state, system_io, pcms->fw_cfg, OBJECT(pcms)); } } @@ -365,12 +365,23 @@ static void pc_q35_machine_options(MachineClass *m) m->max_cpus = 288; } -static void pc_q35_4_0_machine_options(MachineClass *m) +static void pc_q35_4_1_machine_options(MachineClass *m) { pc_q35_machine_options(m); m->alias = "q35"; } +DEFINE_Q35_MACHINE(v4_1, "pc-q35-4.1", NULL, + pc_q35_4_1_machine_options); + +static void pc_q35_4_0_machine_options(MachineClass *m) +{ + pc_q35_4_1_machine_options(m); + m->alias = NULL; + compat_props_add(m->compat_props, hw_compat_4_0, hw_compat_4_0_len); + compat_props_add(m->compat_props, pc_compat_4_0, pc_compat_4_0_len); +} + DEFINE_Q35_MACHINE(v4_0, "pc-q35-4.0", NULL, pc_q35_4_0_machine_options); diff --git a/hw/i386/trace-events b/hw/i386/trace-events index 77244fc384..c8bc464bc5 100644 --- a/hw/i386/trace-events +++ b/hw/i386/trace-events @@ -1,9 +1,9 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/i386/x86-iommu.c +# x86-iommu.c x86_iommu_iec_notify(bool global, uint32_t index, uint32_t mask) "Notify IEC invalidation: global=%d index=%" PRIu32 " mask=%" PRIu32 -# hw/i386/intel_iommu.c +# intel_iommu.c vtd_inv_desc(const char *type, uint64_t hi, uint64_t lo) "invalidate desc type %s high 0x%"PRIx64" low 0x%"PRIx64 vtd_inv_desc_cc_domain(uint16_t domain) "context invalidate domain 0x%"PRIx16 vtd_inv_desc_cc_global(void) "context invalidate globally" @@ -30,7 +30,7 @@ vtd_iotlb_cc_hit(uint8_t bus, uint8_t devfn, uint64_t high, uint64_t low, uint32 vtd_iotlb_cc_update(uint8_t bus, uint8_t devfn, uint64_t high, uint64_t low, uint32_t gen1, uint32_t gen2) "IOTLB context update bus 0x%"PRIx8" devfn 0x%"PRIx8" high 0x%"PRIx64" low 0x%"PRIx64" gen %"PRIu32" -> gen %"PRIu32 vtd_iotlb_reset(const char *reason) "IOTLB reset (reason: %s)" vtd_fault_disabled(void) "Fault processing disabled for context entry" -vtd_replay_ce_valid(uint8_t bus, uint8_t dev, uint8_t fn, uint16_t domain, uint64_t hi, uint64_t lo) "replay valid context device %02"PRIx8":%02"PRIx8".%02"PRIx8" domain 0x%"PRIx16" hi 0x%"PRIx64" lo 0x%"PRIx64 +vtd_replay_ce_valid(const char *mode, uint8_t bus, uint8_t dev, uint8_t fn, uint16_t domain, uint64_t hi, uint64_t lo) "%s: replay valid context device %02"PRIx8":%02"PRIx8".%02"PRIx8" domain 0x%"PRIx16" hi 0x%"PRIx64" lo 0x%"PRIx64 vtd_replay_ce_invalid(uint8_t bus, uint8_t dev, uint8_t fn) "replay invalid context device %02"PRIx8":%02"PRIx8".%02"PRIx8 vtd_page_walk_level(uint64_t addr, uint32_t level, uint64_t start, uint64_t end) "walk (base=0x%"PRIx64", level=%"PRIu32") iova range 0x%"PRIx64" - 0x%"PRIx64 vtd_page_walk_one(uint16_t domain, uint64_t iova, uint64_t gpa, uint64_t mask, int perm) "domain 0x%"PRIu16" iova 0x%"PRIx64" -> gpa 0x%"PRIx64" mask 0x%"PRIx64" perm %d" @@ -45,7 +45,7 @@ vtd_pt_enable_fast_path(uint16_t sid, bool success) "sid 0x%"PRIu16" %d" vtd_irq_generate(uint64_t addr, uint64_t data) "addr 0x%"PRIx64" data 0x%"PRIx64 vtd_reg_read(uint64_t addr, uint64_t size) "addr 0x%"PRIx64" size 0x%"PRIx64 vtd_reg_write(uint64_t addr, uint64_t size, uint64_t val) "addr 0x%"PRIx64" size 0x%"PRIx64" value 0x%"PRIx64 -vtd_reg_dmar_root(uint64_t addr, bool extended) "addr 0x%"PRIx64" extended %d" +vtd_reg_dmar_root(uint64_t addr, bool scalable) "addr 0x%"PRIx64" scalable %d" vtd_reg_ir_root(uint64_t addr, uint32_t size) "addr 0x%"PRIx64" size 0x%"PRIx32 vtd_reg_write_gcmd(uint32_t status, uint32_t val) "status 0x%"PRIx32" value 0x%"PRIx32 vtd_reg_write_fectl(uint32_t value) "value 0x%"PRIx32 @@ -67,7 +67,7 @@ vtd_warn_invalid_qi_tail(uint16_t tail) "tail 0x%"PRIx16 vtd_warn_ir_vector(uint16_t sid, int index, int vec, int target) "sid 0x%"PRIx16" index %d vec %d (should be: %d)" vtd_warn_ir_trigger(uint16_t sid, int index, int trig, int target) "sid 0x%"PRIx16" index %d trigger %d (should be: %d)" -# hw/i386/amd_iommu.c +# amd_iommu.c amdvi_evntlog_fail(uint64_t addr, uint32_t head) "error: fail to write at addr 0x%"PRIx64" + offset 0x%"PRIx32 amdvi_cache_update(uint16_t domid, uint8_t bus, uint8_t slot, uint8_t func, uint64_t gpa, uint64_t txaddr) " update iotlb domid 0x%"PRIx16" devid: %02x:%02x.%x gpa 0x%"PRIx64" hpa 0x%"PRIx64 amdvi_completion_wait_fail(uint64_t addr) "error: fail to write at address 0x%"PRIx64 @@ -106,10 +106,8 @@ amdvi_ir_err(const char *str) "%s" amdvi_ir_intctl(uint8_t val) "int_ctl 0x%"PRIx8 amdvi_ir_target_abort(const char *str) "%s" amdvi_ir_delivery_mode(const char *str) "%s" -amdvi_ir_generate_msi_message(uint8_t vector, uint8_t delivery_mode, uint8_t dest_mode, uint8_t dest, uint8_t rh) "vector %d delivery-mode %d dest-mode %d dest-id %d rh %d" -amdvi_ir_irte_ga(uint64_t addr, uint64_t data) "addr 0x%"PRIx64" offset 0x%"PRIx64 amdvi_ir_irte_ga_val(uint64_t hi, uint64_t lo) "hi 0x%"PRIx64" lo 0x%"PRIx64 -# hw/i386/vmport.c +# vmport.c vmport_register(unsigned char command, void *func, void *opaque) "command: 0x%02x func: %p opaque: %p" vmport_command(unsigned char command) "command: 0x%02x" diff --git a/hw/i386/xen/trace-events b/hw/i386/xen/trace-events index 8a9077cd4e..ca3a4948ba 100644 --- a/hw/i386/xen/trace-events +++ b/hw/i386/xen/trace-events @@ -1,7 +1,9 @@ -# hw/i386/xen/xen_platform.c +# See docs/devel/tracing.txt for syntax documentation. + +# xen_platform.c xen_platform_log(char *s) "xen platform: %s" -# hw/i386/xen/xen_pvdevice.c +# xen_pvdevice.c xen_pv_mmio_read(uint64_t addr) "WARNING: read from Xen PV Device MMIO space (address 0x%"PRIx64")" xen_pv_mmio_write(uint64_t addr) "WARNING: write to Xen PV Device MMIO space (address 0x%"PRIx64")" diff --git a/hw/i386/xen/xen-mapcache.c b/hw/i386/xen/xen-mapcache.c index 349f72d00c..254759f776 100644 --- a/hw/i386/xen/xen-mapcache.c +++ b/hw/i386/xen/xen-mapcache.c @@ -184,9 +184,14 @@ static void xen_remap_bucket(MapCacheEntry *entry, pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i; } + /* + * If the caller has requested the mapping at a specific address use + * MAP_FIXED to make sure it's honored. + */ if (!dummy) { vaddr_base = xenforeignmemory_map2(xen_fmem, xen_domid, vaddr, - PROT_READ | PROT_WRITE, 0, + PROT_READ | PROT_WRITE, + vaddr ? MAP_FIXED : 0, nb_pfn, pfns, err); if (vaddr_base == NULL) { perror("xenforeignmemory_map2"); @@ -198,7 +203,8 @@ static void xen_remap_bucket(MapCacheEntry *entry, * mapping immediately due to certain circumstances (i.e. on resume now) */ vaddr_base = mmap(vaddr, size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_SHARED, -1, 0); + MAP_ANON | MAP_SHARED | (vaddr ? MAP_FIXED : 0), + -1, 0); if (vaddr_base == MAP_FAILED) { perror("mmap"); exit(-1); diff --git a/hw/ide/trace-events b/hw/ide/trace-events index 65d6f9034d..2e4162629f 100644 --- a/hw/ide/trace-events +++ b/hw/ide/trace-events @@ -1,6 +1,6 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/ide/core.c +# core.c # portio ide_ioport_read(uint32_t addr, const char *reg, uint32_t val, void *bus, void *s) "IDE PIO rd @ 0x%"PRIx32" (%s); val 0x%02"PRIx32"; bus %p IDEState %p" ide_ioport_write(uint32_t addr, const char *reg, uint32_t val, void *bus, void *s) "IDE PIO wr @ 0x%"PRIx32" (%s); val 0x%02"PRIx32"; bus %p IDEState %p" @@ -23,30 +23,30 @@ ide_dma_cb(void *s, int64_t sector_num, int n, const char *dma) "IDEState %p; se # BMDMA HBAs: -# hw/ide/cmd646.c +# cmd646.c bmdma_read_cmd646(uint64_t addr, uint32_t val) "bmdma: readb 0x%"PRIx64" : 0x%02x" bmdma_write_cmd646(uint64_t addr, uint64_t val) "bmdma: writeb 0x%"PRIx64" : 0x%02"PRIx64 -# hw/ide/pci.c +# pci.c bmdma_reset(void) "" bmdma_cmd_writeb(uint32_t val) "val: 0x%08x" bmdma_addr_read(uint64_t data) "data: 0x%016"PRIx64 bmdma_addr_write(uint64_t data) "data: 0x%016"PRIx64 -# hw/ide/piix.c +# piix.c bmdma_read(uint64_t addr, uint8_t val) "bmdma: readb 0x%"PRIx64" : 0x%02x" bmdma_write(uint64_t addr, uint64_t val) "bmdma: writeb 0x%"PRIx64" : 0x%02"PRIx64 -# hw/ide/sii3112.c +# sii3112.c sii3112_read(int size, uint64_t addr, uint64_t val) "bmdma: read (size %d) 0x%"PRIx64" : 0x%02"PRIx64 sii3112_write(int size, uint64_t addr, uint64_t val) "bmdma: write (size %d) 0x%"PRIx64" : 0x%02"PRIx64 sii3112_set_irq(int channel, int level) "channel %d level %d" -# hw/ide/via.c +# via.c bmdma_read_via(uint64_t addr, uint32_t val) "bmdma: readb 0x%"PRIx64" : 0x%02x" bmdma_write_via(uint64_t addr, uint64_t val) "bmdma: writeb 0x%"PRIx64" : 0x%02"PRIx64 -# hw/ide/atapi.c +# atapi.c cd_read_sector_sync(int lba) "lba=%d" cd_read_sector_cb(int lba, int ret) "lba=%d ret=%d" cd_read_sector(int lba) "lba=%d" @@ -62,7 +62,7 @@ ide_atapi_cmd_read_dma_cb_aio(void *s, int lba, int n) "IDEState: %p; aio read: # Warning: Verbose ide_atapi_cmd_packet(void *s, uint16_t limit, const char *packet) "IDEState: %p; limit=0x%x packet: %s" -# hw/ide/ahci.c +# ahci.c ahci_port_read(void *s, int port, const char *reg, int offset, uint32_t ret) "ahci(%p)[%d]: port read [reg:%s] @ 0x%x: 0x%08x" ahci_port_read_default(void *s, int port, const char *reg, int offset) "ahci(%p)[%d]: unimplemented port read [reg:%s] @ 0x%x" ahci_irq_raise(void *s) "ahci(%p): raise irq" @@ -91,7 +91,6 @@ ahci_populate_sglist_short_map(void *s, int port) "ahci(%p)[%d]: mapped less tha ahci_populate_sglist_bad_offset(void *s, int port, int off_idx, int64_t off_pos) "ahci(%p)[%d]: Incorrect offset! off_idx: %d, off_pos: %"PRId64 ncq_finish(void *s, int port, uint8_t tag) "ahci(%p)[%d][tag:%d]: NCQ transfer finished" execute_ncq_command_read(void *s, int port, uint8_t tag, int count, int64_t lba) "ahci(%p)[%d][tag:%d]: NCQ reading %d sectors from LBA %"PRId64 -execute_ncq_command_write(void *s, int port, uint8_t tag, int count, int64_t lba) "ahci(%p)[%d][tag:%d]: NCQ writing %d sectors to LBA %"PRId64 execute_ncq_command_unsup(void *s, int port, uint8_t tag, uint8_t cmd) "ahci(%p)[%d][tag:%d]: error: unsupported NCQ command (0x%02x) received" process_ncq_command_mismatch(void *s, int port, uint8_t tag, uint8_t slot) "ahci(%p)[%d][tag:%d]: Warning: NCQ slot (%d) did not match the given tag" process_ncq_command_aux(void *s, int port, uint8_t tag) "ahci(%p)[%d][tag:%d]: Warn: Attempt to use NCQ auxiliary fields" @@ -115,9 +114,11 @@ ahci_dma_prepare_buf_fail(void *s, int port) "ahci(%p)[%d]: sglist population fa ahci_dma_rw_buf(void *s, int port, int l) "ahci(%p)[%d] len=0x%x" ahci_cmd_done(void *s, int port) "ahci(%p)[%d]: cmd done" ahci_reset(void *s) "ahci(%p): HBA reset" -allwinner_ahci_mem_read(void *s, void *a, uint64_t addr, uint64_t val, unsigned size) "ahci(%p): read a=%p addr=0x%"PRIx64" val=0x%"PRIx64", size=%d" -allwinner_ahci_mem_write(void *s, void *a, uint64_t addr, uint64_t val, unsigned size) "ahci(%p): write a=%p addr=0x%"PRIx64" val=0x%"PRIx64", size=%d" # Warning: Verbose handle_reg_h2d_fis_dump(void *s, int port, const char *fis) "ahci(%p)[%d]: %s" handle_cmd_fis_dump(void *s, int port, const char *fis) "ahci(%p)[%d]: %s" + +# ahci-allwinner.c +allwinner_ahci_mem_read(void *s, void *a, uint64_t addr, uint64_t val, unsigned size) "ahci(%p): read a=%p addr=0x%"PRIx64" val=0x%"PRIx64", size=%d" +allwinner_ahci_mem_write(void *s, void *a, uint64_t addr, uint64_t val, unsigned size) "ahci(%p): write a=%p addr=0x%"PRIx64" val=0x%"PRIx64", size=%d" diff --git a/hw/input/stellaris_input.c b/hw/input/stellaris_input.c index 99168bfeef..20c87d86f4 100644 --- a/hw/input/stellaris_input.c +++ b/hw/input/stellaris_input.c @@ -8,7 +8,7 @@ */ #include "qemu/osdep.h" #include "hw/hw.h" -#include "hw/devices.h" +#include "hw/input/gamepad.h" #include "ui/console.h" typedef struct { diff --git a/hw/input/trace-events b/hw/input/trace-events index 8e53ae5bbf..cf072fa2f8 100644 --- a/hw/input/trace-events +++ b/hw/input/trace-events @@ -1,27 +1,27 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/input/adb-kbd.c +# adb-kbd.c adb_kbd_no_key(void) "Ignoring NO_KEY" adb_kbd_writereg(int reg, uint8_t val) "reg %d val 0x%2.2x" adb_kbd_readreg(int reg, uint8_t val0, uint8_t val1) "reg %d obuf[0] 0x%2.2x obuf[1] 0x%2.2x" adb_kbd_request_change_addr(int devaddr) "change addr to 0x%x" adb_kbd_request_change_addr_and_handler(int devaddr, int handler) "change addr and handler to 0x%x, 0x%x" -# hw/input/adb-mouse.c +# adb-mouse.c adb_mouse_flush(void) "flush" adb_mouse_writereg(int reg, uint8_t val) "reg %d val 0x%2.2x" adb_mouse_readreg(int reg, uint8_t val0, uint8_t val1) "reg %d obuf[0] 0x%2.2x obuf[1] 0x%2.2x" adb_mouse_request_change_addr(int devaddr) "change addr to 0x%x" adb_mouse_request_change_addr_and_handler(int devaddr, int handler) "change addr and handler to 0x%x, 0x%x" -# hw/input/pckbd.c +# pckbd.c pckbd_kbd_read_data(uint32_t val) "0x%02x" pckbd_kbd_read_status(int status) "0x%02x" pckbd_outport_write(uint32_t val) "0x%02x" pckbd_kbd_write_command(uint64_t val) "0x%02"PRIx64 pckbd_kbd_write_data(uint64_t val) "0x%02"PRIx64 -# hw/input/ps2.c +# ps2.c ps2_put_keycode(void *opaque, int keycode) "%p keycode 0x%02x" ps2_keyboard_event(void *opaque, int qcode, int down, unsigned int modifier, unsigned int modifiers) "%p qcode %d down %d modifier 0x%x modifiers 0x%x" ps2_read_data(void *opaque) "%p" @@ -37,19 +37,19 @@ ps2_mouse_reset(void *opaque) "%p" ps2_kbd_init(void *s) "%p" ps2_mouse_init(void *s) "%p" -# hw/input/milkymist-softusb.c +# milkymist-softusb.c milkymist_softusb_memory_read(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" milkymist_softusb_memory_write(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" milkymist_softusb_mevt(uint8_t m) "m %d" milkymist_softusb_kevt(uint8_t m) "m %d" milkymist_softusb_pulse_irq(void) "Pulse IRQ" -# hw/input/hid.c +# hid.c hid_kbd_queue_full(void) "queue full" hid_kbd_queue_empty(void) "queue empty" -# hw/input/tsc2005.c +# tsc2005.c tsc2005_sense(const char *state) "touchscreen sense %s" -# hw/input/virtio +# virtio-input.c virtio_input_queue_full(void) "queue full" diff --git a/hw/input/tsc2005.c b/hw/input/tsc2005.c index 2b9108a193..f82771e7a7 100644 --- a/hw/input/tsc2005.c +++ b/hw/input/tsc2005.c @@ -23,7 +23,7 @@ #include "hw/hw.h" #include "qemu/timer.h" #include "ui/console.h" -#include "hw/devices.h" +#include "hw/input/tsc2xxx.h" #include "trace.h" #define TSC_CUT_RESOLUTION(value, p) ((value) >> (16 - (p ? 12 : 10))) diff --git a/hw/input/tsc210x.c b/hw/input/tsc210x.c index 2eb3cb9518..f94cb4683b 100644 --- a/hw/input/tsc210x.c +++ b/hw/input/tsc210x.c @@ -24,8 +24,8 @@ #include "audio/audio.h" #include "qemu/timer.h" #include "ui/console.h" -#include "hw/arm/omap.h" /* For I2SCodec and uWireSlave */ -#include "hw/devices.h" +#include "hw/arm/omap.h" /* For I2SCodec */ +#include "hw/input/tsc2xxx.h" #define TSC_DATA_REGISTERS_PAGE 0x0 #define TSC_CONTROL_REGISTERS_PAGE 0x1 @@ -318,7 +318,7 @@ static void tsc2102_audio_output_update(TSC210xState *s) fmt.endianness = 0; fmt.nchannels = 2; fmt.freq = s->codec.tx_rate; - fmt.fmt = AUD_FMT_S16; + fmt.fmt = AUDIO_FORMAT_S16; s->dac_voice[0] = AUD_open_out(&s->card, s->dac_voice[0], "tsc2102.sink", s, (void *) tsc210x_audio_out_cb, &fmt); diff --git a/hw/intc/Kconfig b/hw/intc/Kconfig index de10a6bcbf..5347f8412c 100644 --- a/hw/intc/Kconfig +++ b/hw/intc/Kconfig @@ -12,12 +12,15 @@ config IOAPIC config ARM_GIC bool + select MSI_NONBROKEN config OPENPIC bool + select MSI_NONBROKEN config APIC bool + select MSI_NONBROKEN config ARM_GIC_KVM bool diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c index ab822f4251..fff6e694e6 100644 --- a/hw/intc/armv7m_nvic.c +++ b/hw/intc/armv7m_nvic.c @@ -655,6 +655,102 @@ void armv7m_nvic_set_pending_derived(void *opaque, int irq, bool secure) do_armv7m_nvic_set_pending(opaque, irq, secure, true); } +void armv7m_nvic_set_pending_lazyfp(void *opaque, int irq, bool secure) +{ + /* + * Pend an exception during lazy FP stacking. This differs + * from the usual exception pending because the logic for + * whether we should escalate depends on the saved context + * in the FPCCR register, not on the current state of the CPU/NVIC. + */ + NVICState *s = (NVICState *)opaque; + bool banked = exc_is_banked(irq); + VecInfo *vec; + bool targets_secure; + bool escalate = false; + /* + * We will only look at bits in fpccr if this is a banked exception + * (in which case 'secure' tells us whether it is the S or NS version). + * All the bits for the non-banked exceptions are in fpccr_s. + */ + uint32_t fpccr_s = s->cpu->env.v7m.fpccr[M_REG_S]; + uint32_t fpccr = s->cpu->env.v7m.fpccr[secure]; + + assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq); + assert(!secure || banked); + + vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq]; + + targets_secure = banked ? secure : exc_targets_secure(s, irq); + + switch (irq) { + case ARMV7M_EXCP_DEBUG: + if (!(fpccr_s & R_V7M_FPCCR_MONRDY_MASK)) { + /* Ignore DebugMonitor exception */ + return; + } + break; + case ARMV7M_EXCP_MEM: + escalate = !(fpccr & R_V7M_FPCCR_MMRDY_MASK); + break; + case ARMV7M_EXCP_USAGE: + escalate = !(fpccr & R_V7M_FPCCR_UFRDY_MASK); + break; + case ARMV7M_EXCP_BUS: + escalate = !(fpccr_s & R_V7M_FPCCR_BFRDY_MASK); + break; + case ARMV7M_EXCP_SECURE: + escalate = !(fpccr_s & R_V7M_FPCCR_SFRDY_MASK); + break; + default: + g_assert_not_reached(); + } + + if (escalate) { + /* + * Escalate to HardFault: faults that initially targeted Secure + * continue to do so, even if HF normally targets NonSecure. + */ + irq = ARMV7M_EXCP_HARD; + if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) && + (targets_secure || + !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) { + vec = &s->sec_vectors[irq]; + } else { + vec = &s->vectors[irq]; + } + } + + if (!vec->enabled || + nvic_exec_prio(s) <= exc_group_prio(s, vec->prio, secure)) { + if (!(fpccr_s & R_V7M_FPCCR_HFRDY_MASK)) { + /* + * We want to escalate to HardFault but the context the + * FP state belongs to prevents the exception pre-empting. + */ + cpu_abort(&s->cpu->parent_obj, + "Lockup: can't escalate to HardFault during " + "lazy FP register stacking\n"); + } + } + + if (escalate) { + s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK; + } + if (!vec->pending) { + vec->pending = 1; + /* + * We do not call nvic_irq_update(), because we know our caller + * is going to handle causing us to take the exception by + * raising EXCP_LAZYFP, so raising the IRQ line would be + * pointless extra work. We just need to recompute the + * priorities so that armv7m_nvic_can_take_pending_exception() + * returns the right answer. + */ + nvic_recompute_state(s); + } +} + /* Make pending IRQ active. */ void armv7m_nvic_acknowledge_irq(void *opaque) { @@ -746,6 +842,40 @@ int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure) return ret; } +bool armv7m_nvic_get_ready_status(void *opaque, int irq, bool secure) +{ + /* + * Return whether an exception is "ready", i.e. it is enabled and is + * configured at a priority which would allow it to interrupt the + * current execution priority. + * + * irq and secure have the same semantics as for armv7m_nvic_set_pending(): + * for non-banked exceptions secure is always false; for banked exceptions + * it indicates which of the exceptions is required. + */ + NVICState *s = (NVICState *)opaque; + bool banked = exc_is_banked(irq); + VecInfo *vec; + int running = nvic_exec_prio(s); + + assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq); + assert(!secure || banked); + + /* + * HardFault is an odd special case: we always check against -1, + * even if we're secure and HardFault has priority -3; we never + * need to check for enabled state. + */ + if (irq == ARMV7M_EXCP_HARD) { + return running > -1; + } + + vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq]; + + return vec->enabled && + exc_group_prio(s, vec->prio, secure) < running; +} + /* callback when external interrupt line is changed */ static void set_irq_level(void *opaque, int n, int level) { @@ -1077,6 +1207,16 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) } case 0xd84: /* CSSELR */ return cpu->env.v7m.csselr[attrs.secure]; + case 0xd88: /* CPACR */ + if (!arm_feature(&cpu->env, ARM_FEATURE_VFP)) { + return 0; + } + return cpu->env.v7m.cpacr[attrs.secure]; + case 0xd8c: /* NSACR */ + if (!attrs.secure || !arm_feature(&cpu->env, ARM_FEATURE_VFP)) { + return 0; + } + return cpu->env.v7m.nsacr; /* TODO: Implement debug registers. */ case 0xd90: /* MPU_TYPE */ /* Unified MPU; if the MPU is not present this value is zero */ @@ -1222,6 +1362,49 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) return 0; } return cpu->env.v7m.sfar; + case 0xf34: /* FPCCR */ + if (!arm_feature(&cpu->env, ARM_FEATURE_VFP)) { + return 0; + } + if (attrs.secure) { + return cpu->env.v7m.fpccr[M_REG_S]; + } else { + /* + * NS can read LSPEN, CLRONRET and MONRDY. It can read + * BFRDY and HFRDY if AIRCR.BFHFNMINS != 0; + * other non-banked bits RAZ. + * TODO: MONRDY should RAZ/WI if DEMCR.SDME is set. + */ + uint32_t value = cpu->env.v7m.fpccr[M_REG_S]; + uint32_t mask = R_V7M_FPCCR_LSPEN_MASK | + R_V7M_FPCCR_CLRONRET_MASK | + R_V7M_FPCCR_MONRDY_MASK; + + if (s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) { + mask |= R_V7M_FPCCR_BFRDY_MASK | R_V7M_FPCCR_HFRDY_MASK; + } + + value &= mask; + + value |= cpu->env.v7m.fpccr[M_REG_NS]; + return value; + } + case 0xf38: /* FPCAR */ + if (!arm_feature(&cpu->env, ARM_FEATURE_VFP)) { + return 0; + } + return cpu->env.v7m.fpcar[attrs.secure]; + case 0xf3c: /* FPDSCR */ + if (!arm_feature(&cpu->env, ARM_FEATURE_VFP)) { + return 0; + } + return cpu->env.v7m.fpdscr[attrs.secure]; + case 0xf40: /* MVFR0 */ + return cpu->isar.mvfr0; + case 0xf44: /* MVFR1 */ + return cpu->isar.mvfr1; + case 0xf48: /* MVFR2 */ + return cpu->isar.mvfr2; default: bad_offset: qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad read offset 0x%x\n", offset); @@ -1469,6 +1652,18 @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value, cpu->env.v7m.csselr[attrs.secure] = value & R_V7M_CSSELR_INDEX_MASK; } break; + case 0xd88: /* CPACR */ + if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) { + /* We implement only the Floating Point extension's CP10/CP11 */ + cpu->env.v7m.cpacr[attrs.secure] = value & (0xf << 20); + } + break; + case 0xd8c: /* NSACR */ + if (attrs.secure && arm_feature(&cpu->env, ARM_FEATURE_VFP)) { + /* We implement only the Floating Point extension's CP10/CP11 */ + cpu->env.v7m.nsacr = value & (3 << 10); + } + break; case 0xd90: /* MPU_TYPE */ return; /* RO */ case 0xd94: /* MPU_CTRL */ @@ -1697,6 +1892,72 @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value, } break; } + case 0xf34: /* FPCCR */ + if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) { + /* Not all bits here are banked. */ + uint32_t fpccr_s; + + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { + /* Don't allow setting of bits not present in v7M */ + value &= (R_V7M_FPCCR_LSPACT_MASK | + R_V7M_FPCCR_USER_MASK | + R_V7M_FPCCR_THREAD_MASK | + R_V7M_FPCCR_HFRDY_MASK | + R_V7M_FPCCR_MMRDY_MASK | + R_V7M_FPCCR_BFRDY_MASK | + R_V7M_FPCCR_MONRDY_MASK | + R_V7M_FPCCR_LSPEN_MASK | + R_V7M_FPCCR_ASPEN_MASK); + } + value &= ~R_V7M_FPCCR_RES0_MASK; + + if (!attrs.secure) { + /* Some non-banked bits are configurably writable by NS */ + fpccr_s = cpu->env.v7m.fpccr[M_REG_S]; + if (!(fpccr_s & R_V7M_FPCCR_LSPENS_MASK)) { + uint32_t lspen = FIELD_EX32(value, V7M_FPCCR, LSPEN); + fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, LSPEN, lspen); + } + if (!(fpccr_s & R_V7M_FPCCR_CLRONRETS_MASK)) { + uint32_t cor = FIELD_EX32(value, V7M_FPCCR, CLRONRET); + fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, CLRONRET, cor); + } + if ((s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { + uint32_t hfrdy = FIELD_EX32(value, V7M_FPCCR, HFRDY); + uint32_t bfrdy = FIELD_EX32(value, V7M_FPCCR, BFRDY); + fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, HFRDY, hfrdy); + fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, BFRDY, bfrdy); + } + /* TODO MONRDY should RAZ/WI if DEMCR.SDME is set */ + { + uint32_t monrdy = FIELD_EX32(value, V7M_FPCCR, MONRDY); + fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, MONRDY, monrdy); + } + + /* + * All other non-banked bits are RAZ/WI from NS; write + * just the banked bits to fpccr[M_REG_NS]. + */ + value &= R_V7M_FPCCR_BANKED_MASK; + cpu->env.v7m.fpccr[M_REG_NS] = value; + } else { + fpccr_s = value; + } + cpu->env.v7m.fpccr[M_REG_S] = fpccr_s; + } + break; + case 0xf38: /* FPCAR */ + if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) { + value &= ~7; + cpu->env.v7m.fpcar[attrs.secure] = value; + } + break; + case 0xf3c: /* FPDSCR */ + if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) { + value &= 0x07c00000; + cpu->env.v7m.fpdscr[attrs.secure] = value; + } + break; case 0xf50: /* ICIALLU */ case 0xf58: /* ICIMVAU */ case 0xf5c: /* DCIMVAC */ diff --git a/hw/intc/bcm2836_control.c b/hw/intc/bcm2836_control.c index cfa5bc7365..421469f2ef 100644 --- a/hw/intc/bcm2836_control.c +++ b/hw/intc/bcm2836_control.c @@ -7,7 +7,9 @@ * This code is licensed under the GNU GPLv2 and later. * * At present, only implements interrupt routing, and mailboxes (i.e., - * not local timer, PMU interrupt, or AXI counters). + * not PMU interrupt, or AXI counters). + * + * ARM Local Timer IRQ Copyright (c) 2019. Zoltán Baldaszti * * Ref: * https://www.raspberrypi.org/documentation/hardware/raspberrypi/bcm2836/QA7_rev3.4.pdf @@ -18,6 +20,9 @@ #include "qemu/log.h" #define REG_GPU_ROUTE 0x0c +#define REG_LOCALTIMERROUTING 0x24 +#define REG_LOCALTIMERCONTROL 0x34 +#define REG_LOCALTIMERACK 0x38 #define REG_TIMERCONTROL 0x40 #define REG_MBOXCONTROL 0x50 #define REG_IRQSRC 0x60 @@ -43,6 +48,13 @@ #define IRQ_TIMER 11 #define IRQ_MAX IRQ_TIMER +#define LOCALTIMER_FREQ 38400000 +#define LOCALTIMER_INTFLAG (1 << 31) +#define LOCALTIMER_RELOAD (1 << 30) +#define LOCALTIMER_INTENABLE (1 << 29) +#define LOCALTIMER_ENABLE (1 << 28) +#define LOCALTIMER_VALUE(x) ((x) & 0xfffffff) + static void deliver_local(BCM2836ControlState *s, uint8_t core, uint8_t irq, uint32_t controlreg, uint8_t controlidx) { @@ -78,6 +90,20 @@ static void bcm2836_control_update(BCM2836ControlState *s) s->fiqsrc[s->route_gpu_fiq] |= (uint32_t)1 << IRQ_GPU; } + /* + * handle the control module 'local timer' interrupt for one of the + * cores' IRQ/FIQ; this is distinct from the per-CPU timer + * interrupts handled below. + */ + if ((s->local_timer_control & LOCALTIMER_INTENABLE) && + (s->local_timer_control & LOCALTIMER_INTFLAG)) { + if (s->route_localtimer & 4) { + s->fiqsrc[(s->route_localtimer & 3)] |= (uint32_t)1 << IRQ_TIMER; + } else { + s->irqsrc[(s->route_localtimer & 3)] |= (uint32_t)1 << IRQ_TIMER; + } + } + for (i = 0; i < BCM2836_NCORES; i++) { /* handle local timer interrupts for this core */ if (s->timerirqs[i]) { @@ -162,6 +188,54 @@ static void bcm2836_control_set_gpu_fiq(void *opaque, int irq, int level) bcm2836_control_update(s); } +static void bcm2836_control_local_timer_set_next(void *opaque) +{ + BCM2836ControlState *s = opaque; + uint64_t next_event; + + assert(LOCALTIMER_VALUE(s->local_timer_control) > 0); + + next_event = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + muldiv64(LOCALTIMER_VALUE(s->local_timer_control), + NANOSECONDS_PER_SECOND, LOCALTIMER_FREQ); + timer_mod(&s->timer, next_event); +} + +static void bcm2836_control_local_timer_tick(void *opaque) +{ + BCM2836ControlState *s = opaque; + + bcm2836_control_local_timer_set_next(s); + + s->local_timer_control |= LOCALTIMER_INTFLAG; + bcm2836_control_update(s); +} + +static void bcm2836_control_local_timer_control(void *opaque, uint32_t val) +{ + BCM2836ControlState *s = opaque; + + s->local_timer_control = val; + if (val & LOCALTIMER_ENABLE) { + bcm2836_control_local_timer_set_next(s); + } else { + timer_del(&s->timer); + } +} + +static void bcm2836_control_local_timer_ack(void *opaque, uint32_t val) +{ + BCM2836ControlState *s = opaque; + + if (val & LOCALTIMER_INTFLAG) { + s->local_timer_control &= ~LOCALTIMER_INTFLAG; + } + if ((val & LOCALTIMER_RELOAD) && + (s->local_timer_control & LOCALTIMER_ENABLE)) { + bcm2836_control_local_timer_set_next(s); + } +} + static uint64_t bcm2836_control_read(void *opaque, hwaddr offset, unsigned size) { BCM2836ControlState *s = opaque; @@ -170,6 +244,12 @@ static uint64_t bcm2836_control_read(void *opaque, hwaddr offset, unsigned size) assert(s->route_gpu_fiq < BCM2836_NCORES && s->route_gpu_irq < BCM2836_NCORES); return ((uint32_t)s->route_gpu_fiq << 2) | s->route_gpu_irq; + } else if (offset == REG_LOCALTIMERROUTING) { + return s->route_localtimer; + } else if (offset == REG_LOCALTIMERCONTROL) { + return s->local_timer_control; + } else if (offset == REG_LOCALTIMERACK) { + return 0; } else if (offset >= REG_TIMERCONTROL && offset < REG_MBOXCONTROL) { return s->timercontrol[(offset - REG_TIMERCONTROL) >> 2]; } else if (offset >= REG_MBOXCONTROL && offset < REG_IRQSRC) { @@ -195,6 +275,12 @@ static void bcm2836_control_write(void *opaque, hwaddr offset, if (offset == REG_GPU_ROUTE) { s->route_gpu_irq = val & 0x3; s->route_gpu_fiq = (val >> 2) & 0x3; + } else if (offset == REG_LOCALTIMERROUTING) { + s->route_localtimer = val & 7; + } else if (offset == REG_LOCALTIMERCONTROL) { + bcm2836_control_local_timer_control(s, val); + } else if (offset == REG_LOCALTIMERACK) { + bcm2836_control_local_timer_ack(s, val); } else if (offset >= REG_TIMERCONTROL && offset < REG_MBOXCONTROL) { s->timercontrol[(offset - REG_TIMERCONTROL) >> 2] = val & 0xff; } else if (offset >= REG_MBOXCONTROL && offset < REG_IRQSRC) { @@ -227,6 +313,10 @@ static void bcm2836_control_reset(DeviceState *d) s->route_gpu_irq = s->route_gpu_fiq = 0; + timer_del(&s->timer); + s->route_localtimer = 0; + s->local_timer_control = 0; + for (i = 0; i < BCM2836_NCORES; i++) { s->timercontrol[i] = 0; s->mailboxcontrol[i] = 0; @@ -263,11 +353,15 @@ static void bcm2836_control_init(Object *obj) /* outputs to CPU cores */ qdev_init_gpio_out_named(dev, s->irq, "irq", BCM2836_NCORES); qdev_init_gpio_out_named(dev, s->fiq, "fiq", BCM2836_NCORES); + + /* create a qemu virtual timer */ + timer_init_ns(&s->timer, QEMU_CLOCK_VIRTUAL, + bcm2836_control_local_timer_tick, s); } static const VMStateDescription vmstate_bcm2836_control = { .name = TYPE_BCM2836_CONTROL, - .version_id = 1, + .version_id = 2, .minimum_version_id = 1, .fields = (VMStateField[]) { VMSTATE_UINT32_ARRAY(mailboxes, BCM2836ControlState, @@ -277,6 +371,9 @@ static const VMStateDescription vmstate_bcm2836_control = { VMSTATE_UINT32_ARRAY(timercontrol, BCM2836ControlState, BCM2836_NCORES), VMSTATE_UINT32_ARRAY(mailboxcontrol, BCM2836ControlState, BCM2836_NCORES), + VMSTATE_TIMER_V(timer, BCM2836ControlState, 2), + VMSTATE_UINT32_V(local_timer_control, BCM2836ControlState, 2), + VMSTATE_UINT8_V(route_localtimer, BCM2836ControlState, 2), VMSTATE_END_OF_LIST() } }; diff --git a/hw/intc/trace-events b/hw/intc/trace-events index 7769869a13..a28bdce925 100644 --- a/hw/intc/trace-events +++ b/hw/intc/trace-events @@ -1,13 +1,13 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/intc/i8259.c +# i8259.c pic_update_irq(bool master, uint8_t imr, uint8_t irr, uint8_t padd) "master %d imr %"PRIu8" irr %"PRIu8" padd %"PRIu8 pic_set_irq(bool master, int irq, int level) "master %d irq %d level %d" pic_interrupt(int irq, int intno) "irq %d intno %d" pic_ioport_write(bool master, uint64_t addr, uint64_t val) "master %d addr 0x%"PRIx64" val 0x%"PRIx64 pic_ioport_read(bool master, uint64_t addr, int val) "master %d addr 0x%"PRIx64" val 0x%x" -# hw/intc/apic_common.c +# apic_common.c cpu_set_apic_base(uint64_t val) "0x%016"PRIx64 cpu_get_apic_base(uint64_t val) "0x%016"PRIx64 # coalescing @@ -15,13 +15,13 @@ apic_report_irq_delivered(int apic_irq_delivered) "coalescing %d" apic_reset_irq_delivered(int apic_irq_delivered) "old coalescing %d" apic_get_irq_delivered(int apic_irq_delivered) "returning coalescing %d" -# hw/intc/apic.c +# apic.c apic_local_deliver(int vector, uint32_t lvt) "vector %d delivery mode %d" apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode, uint8_t vector_num, uint8_t trigger_mode) "dest %d dest_mode %d delivery_mode %d vector %d trigger_mode %d" apic_mem_readl(uint64_t addr, uint32_t val) "0x%"PRIx64" = 0x%08x" apic_mem_writel(uint64_t addr, uint32_t val) "0x%"PRIx64" = 0x%08x" -# hw/intc/ioapic.c +# ioapic.c ioapic_set_remote_irr(int n) "set remote irr for pin %d" ioapic_clear_remote_irr(int n, int vector) "clear remote irr for pin %d vector %d" ioapic_eoi_broadcast(int vector) "EOI broadcast for vector %d" @@ -29,7 +29,7 @@ ioapic_mem_read(uint8_t addr, uint8_t regsel, uint8_t size, uint32_t val) "ioapi ioapic_mem_write(uint8_t addr, uint8_t regsel, uint8_t size, uint32_t val) "ioapic mem write addr 0x%"PRIx8" regsel: 0x%"PRIx8" size 0x%"PRIx8" val 0x%"PRIx32 ioapic_set_irq(int vector, int level) "vector: %d level: %d" -# hw/intc/slavio_intctl.c +# slavio_intctl.c slavio_intctl_mem_readl(uint32_t cpu, uint64_t addr, uint32_t ret) "read cpu %d reg 0x%"PRIx64" = 0x%x" slavio_intctl_mem_writel(uint32_t cpu, uint64_t addr, uint32_t val) "write cpu %d reg 0x%"PRIx64" = 0x%x" slavio_intctl_mem_writel_clear(uint32_t cpu, uint32_t val, uint32_t intreg_pending) "Cleared cpu %d irq mask 0x%x, curmask 0x%x" @@ -43,14 +43,14 @@ slavio_check_interrupts(uint32_t pending, uint32_t intregm_disabled) "pending 0x slavio_set_irq(uint32_t target_cpu, int irq, uint32_t pil, int level) "Set cpu %d irq %d -> pil %d level %d" slavio_set_timer_irq_cpu(int cpu, int level) "Set cpu %d local timer level %d" -# hw/intc/grlib_irqmp.c +# grlib_irqmp.c grlib_irqmp_check_irqs(uint32_t pend, uint32_t force, uint32_t mask, uint32_t lvl1, uint32_t lvl2) "pend:0x%04x force:0x%04x mask:0x%04x lvl1:0x%04x lvl0:0x%04x" grlib_irqmp_ack(int intno) "interrupt:%d" grlib_irqmp_set_irq(int irq) "Raise CPU IRQ %d" grlib_irqmp_readl_unknown(uint64_t addr) "addr 0x%"PRIx64 grlib_irqmp_writel_unknown(uint64_t addr, uint32_t value) "addr 0x%"PRIx64" value 0x%x" -# hw/intc/lm32_pic.c +# lm32_pic.c lm32_pic_raise_irq(void) "Raise CPU interrupt" lm32_pic_lower_irq(void) "Lower CPU interrupt" lm32_pic_interrupt(int irq, int level) "Set IRQ%d %d" @@ -59,7 +59,7 @@ lm32_pic_set_ip(uint32_t ip) "ip 0x%08x" lm32_pic_get_im(uint32_t im) "im 0x%08x" lm32_pic_get_ip(uint32_t ip) "ip 0x%08x" -# hw/intc/xics.c +# xics.c xics_icp_check_ipi(int server, uint8_t mfrr) "CPU %d can take IPI mfrr=0x%x" xics_icp_accept(uint32_t old_xirr, uint32_t new_xirr) "icp_accept: XIRR 0x%"PRIx32"->0x%"PRIx32 xics_icp_eoi(int server, uint32_t xirr, uint32_t new_xirr) "icp_eoi: server %d given XIRR 0x%"PRIx32" new XIRR 0x%"PRIx32 @@ -72,23 +72,23 @@ xics_ics_simple_write_xive(int nr, int srcno, int server, uint8_t priority) "ics xics_ics_simple_reject(int nr, int srcno) "reject irq 0x%x [src %d]" xics_ics_simple_eoi(int nr) "ics_eoi: irq 0x%x" -# hw/intc/s390_flic_kvm.c +# s390_flic_kvm.c flic_create_device(int err) "flic: create device failed %d" flic_no_device_api(int err) "flic: no Device Contral API support %d" flic_reset_failed(int err) "flic: reset failed %d" -# hw/intc/s390_flic.c +# s390_flic.c qemu_s390_airq_suppressed(uint8_t type, uint8_t isc) "flic: adapter I/O interrupt suppressed (type 0x%x isc 0x%x)" qemu_s390_suppress_airq(uint8_t isc, const char *from, const char *to) "flic: for isc 0x%x, suppress airq by modifying ais mode from %s to %s" -# hw/intc/aspeed_vic.c +# aspeed_vic.c aspeed_vic_set_irq(int irq, int level) "Enabling IRQ %d: %d" aspeed_vic_update_fiq(int flags) "Raising FIQ: %d" aspeed_vic_update_irq(int flags) "Raising IRQ: %d" aspeed_vic_read(uint64_t offset, unsigned size, uint32_t value) "From 0x%" PRIx64 " of size %u: 0x%" PRIx32 aspeed_vic_write(uint64_t offset, unsigned size, uint32_t data) "To 0x%" PRIx64 " of size %u: 0x%" PRIx32 -# hw/intc/arm_gic.c +# arm_gic.c gic_enable_irq(int irq) "irq %d enabled" gic_disable_irq(int irq) "irq %d disabled" gic_set_irq(int irq, int level, int cpumask, int target) "irq %d level %d cpumask 0x%x target 0x%x" @@ -104,7 +104,7 @@ gic_dist_write(int addr, unsigned int size, uint32_t val) "dist write at 0x%08x gic_lr_entry(int cpu, int entry, uint32_t val) "cpu %d: new lr entry %d: 0x%08" PRIx32 gic_update_maintenance_irq(int cpu, int val) "cpu %d: maintenance = %d" -# hw/intc/arm_gicv3_cpuif.c +# arm_gicv3_cpuif.c gicv3_icc_pmr_read(uint32_t cpu, uint64_t val) "GICv3 ICC_PMR read cpu 0x%x value 0x%" PRIx64 gicv3_icc_pmr_write(uint32_t cpu, uint64_t val) "GICv3 ICC_PMR write cpu 0x%x value 0x%" PRIx64 gicv3_icc_bpr_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICC_BPR%d read cpu 0x%x value 0x%" PRIx64 @@ -163,14 +163,14 @@ gicv3_icv_eoir_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_EOIR%d writ gicv3_cpuif_virt_update(uint32_t cpuid, int idx) "GICv3 CPU i/f 0x%x virt HPPI update LR index %d" gicv3_cpuif_virt_set_irqs(uint32_t cpuid, int fiqlevel, int irqlevel, int maintlevel) "GICv3 CPU i/f 0x%x virt HPPI update: setting FIQ %d IRQ %d maintenance-irq %d" -# hw/intc/arm_gicv3_dist.c +# arm_gicv3_dist.c gicv3_dist_read(uint64_t offset, uint64_t data, unsigned size, bool secure) "GICv3 distributor read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u secure %d" gicv3_dist_badread(uint64_t offset, unsigned size, bool secure) "GICv3 distributor read: offset 0x%" PRIx64 " size %u secure %d: error" gicv3_dist_write(uint64_t offset, uint64_t data, unsigned size, bool secure) "GICv3 distributor write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u secure %d" gicv3_dist_badwrite(uint64_t offset, uint64_t data, unsigned size, bool secure) "GICv3 distributor write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u secure %d: error" gicv3_dist_set_irq(int irq, int level) "GICv3 distributor interrupt %d level changed to %d" -# hw/intc/arm_gicv3_redist.c +# arm_gicv3_redist.c gicv3_redist_read(uint32_t cpu, uint64_t offset, uint64_t data, unsigned size, bool secure) "GICv3 redistributor 0x%x read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u secure %d" gicv3_redist_badread(uint32_t cpu, uint64_t offset, unsigned size, bool secure) "GICv3 redistributor 0x%x read: offset 0x%" PRIx64 " size %u secure %d: error" gicv3_redist_write(uint32_t cpu, uint64_t offset, uint64_t data, unsigned size, bool secure) "GICv3 redistributor 0x%x write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u secure %d" @@ -178,7 +178,7 @@ gicv3_redist_badwrite(uint32_t cpu, uint64_t offset, uint64_t data, unsigned siz gicv3_redist_set_irq(uint32_t cpu, int irq, int level) "GICv3 redistributor 0x%x interrupt %d level changed to %d" gicv3_redist_send_sgi(uint32_t cpu, int irq) "GICv3 redistributor 0x%x pending SGI %d" -# hw/intc/armv7m_nvic.c +# armv7m_nvic.c nvic_recompute_state(int vectpending, int vectpending_prio, int exception_prio) "NVIC state recomputed: vectpending %d vectpending_prio %d exception_prio %d" nvic_recompute_state_secure(int vectpending, bool vectpending_is_s_banked, int vectpending_prio, int exception_prio) "NVIC state recomputed: vectpending %d is_s_banked %d vectpending_prio %d exception_prio %d" nvic_set_prio(int irq, bool secure, uint8_t prio) "NVIC set irq %d secure-bank %d priority %d" @@ -187,7 +187,6 @@ nvic_escalate_prio(int irq, int irqprio, int runprio) "NVIC escalating irq %d to nvic_escalate_disabled(int irq) "NVIC escalating irq %d to HardFault: disabled" nvic_set_pending(int irq, bool secure, bool targets_secure, bool derived, int en, int prio) "NVIC set pending irq %d secure-bank %d targets_secure %d derived %d (enabled: %d priority %d)" nvic_clear_pending(int irq, bool secure, int en, int prio) "NVIC clear pending irq %d secure-bank %d (enabled: %d priority %d)" -nvic_set_pending_level(int irq) "NVIC set pending: irq %d higher prio than vectpending: setting irq line to 1" nvic_acknowledge_irq(int irq, int prio) "NVIC acknowledge IRQ: %d now active (prio %d)" nvic_get_pending_irq_info(int irq, bool secure) "NVIC next IRQ %d: targets_secure: %d" nvic_complete_irq(int irq, bool secure) "NVIC complete IRQ %d (secure %d)" @@ -196,7 +195,7 @@ nvic_set_nmi_level(int level) "NVIC external NMI level set to %d" nvic_sysreg_read(uint64_t addr, uint32_t value, unsigned size) "NVIC sysreg read addr 0x%" PRIx64 " data 0x%" PRIx32 " size %u" nvic_sysreg_write(uint64_t addr, uint32_t value, unsigned size) "NVIC sysreg write addr 0x%" PRIx64 " data 0x%" PRIx32 " size %u" -# hw/intc/heathrow_pic.c +# heathrow_pic.c heathrow_write(uint64_t addr, unsigned int n, uint64_t value) "0x%"PRIx64" %u: 0x%"PRIx64 heathrow_read(uint64_t addr, unsigned int n, uint64_t value) "0x%"PRIx64" %u: 0x%"PRIx64 heathrow_set_irq(int num, int level) "set_irq: num=0x%02x level=%d" diff --git a/hw/intc/xics_spapr.c b/hw/intc/xics_spapr.c index 607e1c167b..9d2b8adef7 100644 --- a/hw/intc/xics_spapr.c +++ b/hw/intc/xics_spapr.c @@ -95,8 +95,15 @@ static target_ulong h_eoi(PowerPCCPU *cpu, SpaprMachineState *spapr, static target_ulong h_ipoll(PowerPCCPU *cpu, SpaprMachineState *spapr, target_ulong opcode, target_ulong *args) { + ICPState *icp = xics_icp_get(XICS_FABRIC(spapr), args[0]); uint32_t mfrr; - uint32_t xirr = icp_ipoll(spapr_cpu_state(cpu)->icp, &mfrr); + uint32_t xirr; + + if (!icp) { + return H_PARAMETER; + } + + xirr = icp_ipoll(icp, &mfrr); args[0] = xirr; args[1] = mfrr; diff --git a/hw/isa/Kconfig b/hw/isa/Kconfig index 57e09a0cb8..6db0d7970c 100644 --- a/hw/isa/Kconfig +++ b/hw/isa/Kconfig @@ -11,6 +11,7 @@ config I82378 select I8254 select I82374 select MC146818RTC + select PCSPK config PC87312 bool @@ -29,6 +30,7 @@ config PIIX4 # For historical reasons, SuperIO devices are created in the board # for PIIX4. select ISA_BUS + select USB_UHCI config VT82C686 bool diff --git a/hw/isa/lpc_ich9.c b/hw/isa/lpc_ich9.c index e692b9fdc1..ac44aa53be 100644 --- a/hw/isa/lpc_ich9.c +++ b/hw/isa/lpc_ich9.c @@ -805,6 +805,7 @@ static void ich9_lpc_class_init(ObjectClass *klass, void *data) * pc_q35_init() */ dc->user_creatable = false; + hc->pre_plug = ich9_pm_device_pre_plug_cb; hc->plug = ich9_pm_device_plug_cb; hc->unplug_request = ich9_pm_device_unplug_request_cb; hc->unplug = ich9_pm_device_unplug_cb; diff --git a/hw/isa/trace-events b/hw/isa/trace-events index 80ac6175d6..202f8938e7 100644 --- a/hw/isa/trace-events +++ b/hw/isa/trace-events @@ -1,11 +1,11 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/isa/isa-superio.c +# isa-superio.c superio_create_parallel(int id, uint16_t base, unsigned int irq) "id=%d, base 0x%03x, irq %u" superio_create_serial(int id, uint16_t base, unsigned int irq) "id=%d, base 0x%03x, irq %u" superio_create_floppy(int id, uint16_t base, unsigned int irq) "id=%d, base 0x%03x, irq %u" superio_create_ide(int id, uint16_t base, unsigned int irq) "id=%d, base 0x%03x, irq %u" -# hw/isa/pc87312.c +# pc87312.c pc87312_io_read(uint32_t addr, uint32_t val) "read addr=0x%x val=0x%x" pc87312_io_write(uint32_t addr, uint32_t val) "write addr=0x%x val=0x%x" diff --git a/hw/mem/trace-events b/hw/mem/trace-events index 0f2f278ff2..9f6b52acd7 100644 --- a/hw/mem/trace-events +++ b/hw/mem/trace-events @@ -1,8 +1,8 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/mem/pc-dimm.c +# pc-dimm.c mhp_pc_dimm_assigned_slot(int slot) "%d" -# hw/mem/memory-device.c +# memory-device.c memory_device_pre_plug(const char *id, uint64_t addr) "id=%s addr=0x%"PRIx64 memory_device_plug(const char *id, uint64_t addr) "id=%s addr=0x%"PRIx64 memory_device_unplug(const char *id, uint64_t addr) "id=%s addr=0x%"PRIx64 diff --git a/hw/mips/boston.c b/hw/mips/boston.c index e5bab3cadc..a8b29f62f5 100644 --- a/hw/mips/boston.c +++ b/hw/mips/boston.c @@ -528,21 +528,21 @@ static void boston_mach_init(MachineState *machine) fw_size = load_image_targphys(machine->firmware, 0x1fc00000, 4 * MiB); if (fw_size == -1) { - error_printf("unable to load firmware image '%s'\n", + error_report("unable to load firmware image '%s'", machine->firmware); exit(1); } } else if (machine->kernel_filename) { fit_err = load_fit(&boston_fit_loader, machine->kernel_filename, s); if (fit_err) { - error_printf("unable to load FIT image\n"); + error_report("unable to load FIT image"); exit(1); } gen_firmware(memory_region_get_ram_ptr(flash) + 0x7c00000, s->kernel_entry, s->fdt_base, is_64b); } else if (!qtest_enabled()) { - error_printf("Please provide either a -kernel or -bios argument\n"); + error_report("Please provide either a -kernel or -bios argument"); exit(1); } } diff --git a/hw/misc/Kconfig b/hw/misc/Kconfig index 2c60be99bc..5f67d0d6d9 100644 --- a/hw/misc/Kconfig +++ b/hw/misc/Kconfig @@ -34,7 +34,7 @@ config PCI_TESTDEV config EDU bool default y if TEST_DEVICES - depends on PCI + depends on PCI && MSI_NONBROKEN config PCA9552 bool @@ -67,7 +67,7 @@ config MACIO config IVSHMEM_DEVICE bool default y if PCI_DEVICES - depends on PCI && LINUX && IVSHMEM + depends on PCI && LINUX && IVSHMEM && MSI_NONBROKEN config ECCMEMCTL bool diff --git a/hw/misc/cbus.c b/hw/misc/cbus.c index 25e337ea77..16ee704bca 100644 --- a/hw/misc/cbus.c +++ b/hw/misc/cbus.c @@ -23,7 +23,7 @@ #include "qemu/osdep.h" #include "hw/hw.h" #include "hw/irq.h" -#include "hw/devices.h" +#include "hw/misc/cbus.h" #include "sysemu/sysemu.h" //#define DEBUG diff --git a/hw/misc/macio/trace-events b/hw/misc/macio/trace-events index 05019262fa..e4a1cc0d24 100644 --- a/hw/misc/macio/trace-events +++ b/hw/misc/macio/trace-events @@ -1,6 +1,6 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/misc/macio/cuda.c +# cuda.c cuda_delay_set_sr_int(void) "" cuda_data_send(uint8_t data) "send: 0x%02x" cuda_data_recv(uint8_t data) "recv: 0x%02x" @@ -10,18 +10,17 @@ cuda_packet_receive_data(int i, const uint8_t data) "[%d] 0x%02x" cuda_packet_send(int len) "length %d" cuda_packet_send_data(int i, const uint8_t data) "[%d] 0x%02x" -# hw/misc/macio/macio.c +# macio.c macio_timer_write(uint64_t addr, unsigned len, uint64_t val) "write addr 0x%"PRIx64 " len %d val 0x%"PRIx64 macio_timer_read(uint64_t addr, unsigned len, uint32_t val) "read addr 0x%"PRIx64 " len %d val 0x%"PRIx32 -# hw/misc/macio/gpio.c +# gpio.c macio_set_gpio(int gpio, bool state) "setting GPIO %d to %d" macio_gpio_irq_assert(int gpio) "asserting GPIO %d" macio_gpio_irq_deassert(int gpio) "deasserting GPIO %d" macio_gpio_write(uint64_t addr, uint64_t val) "addr: 0x%"PRIx64" value: 0x%"PRIx64 -macio_gpio_read(uint64_t addr, uint64_t val) "addr: 0x%"PRIx64" value: 0x%"PRIx64 -# hw/misc/macio/pmu.c +# pmu.c pmu_adb_poll(int olen) "ADB autopoll, olen=%d" pmu_one_sec_timer(void) "PMU one sec..." pmu_cmd_set_int_mask(int intmask) "Setting PMU int mask to 0x%02x" diff --git a/hw/misc/trace-events b/hw/misc/trace-events index c1795bb54b..47e1bccf71 100644 --- a/hw/misc/trace-events +++ b/hw/misc/trace-events @@ -1,6 +1,6 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/misc/eccmemctl.c +# eccmemctl.c ecc_mem_writel_mer(uint32_t val) "Write memory enable 0x%08x" ecc_mem_writel_mdr(uint32_t val) "Write memory delay 0x%08x" ecc_mem_writel_mfsr(uint32_t val) "Write memory fault status 0x%08x" @@ -20,7 +20,7 @@ ecc_mem_readl_ecr1(uint32_t ret) "Read event count 2 0x%08x" ecc_diag_mem_writeb(uint64_t addr, uint32_t val) "Write diagnostic %"PRId64" = 0x%02x" ecc_diag_mem_readb(uint64_t addr, uint32_t ret) "Read diagnostic %"PRId64"= 0x%02x" -# hw/misc/slavio_misc.c +# slavio_misc.c slavio_misc_update_irq_raise(void) "Raise IRQ" slavio_misc_update_irq_lower(void) "Lower IRQ" slavio_set_power_fail(int power_failing, uint8_t config) "Power fail: %d, config: %d" @@ -41,20 +41,20 @@ slavio_sysctrl_mem_readl(uint32_t ret) "Read system control 0x%08x" slavio_led_mem_writew(uint32_t val) "Write diagnostic LED 0x%04x" slavio_led_mem_readw(uint32_t ret) "Read diagnostic LED 0x%04x" -# hw/misc/milkymist-hpdmc.c +# milkymist-hpdmc.c milkymist_hpdmc_memory_read(uint32_t addr, uint32_t value) "addr=0x%08x value=0x%08x" milkymist_hpdmc_memory_write(uint32_t addr, uint32_t value) "addr=0x%08x value=0x%08x" -# hw/misc/milkymist-pfpu.c +# milkymist-pfpu.c milkymist_pfpu_memory_read(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" milkymist_pfpu_memory_write(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" milkymist_pfpu_vectout(uint32_t a, uint32_t b, uint32_t dma_ptr) "a 0x%08x b 0x%08x dma_ptr 0x%08x" milkymist_pfpu_pulse_irq(void) "Pulse IRQ" -# hw/misc/aspeed_scu.c +# aspeed_scu.c aspeed_scu_write(uint64_t offset, unsigned size, uint32_t data) "To 0x%" PRIx64 " of size %u: 0x%" PRIx32 -# hw/misc/mps2_scc.c +# mps2-scc.c mps2_scc_read(uint64_t offset, uint64_t data, unsigned size) "MPS2 SCC read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" mps2_scc_write(uint64_t offset, uint64_t data, unsigned size) "MPS2 SCC write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" mps2_scc_reset(void) "MPS2 SCC: reset" @@ -62,29 +62,29 @@ mps2_scc_leds(char led7, char led6, char led5, char led4, char led3, char led2, mps2_scc_cfg_write(unsigned function, unsigned device, uint32_t value) "MPS2 SCC config write: function %d device %d data 0x%" PRIx32 mps2_scc_cfg_read(unsigned function, unsigned device, uint32_t value) "MPS2 SCC config read: function %d device %d data 0x%" PRIx32 -# hw/misc/mps2_fpgaio.c +# mps2-fpgaio.c mps2_fpgaio_read(uint64_t offset, uint64_t data, unsigned size) "MPS2 FPGAIO read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" mps2_fpgaio_write(uint64_t offset, uint64_t data, unsigned size) "MPS2 FPGAIO write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" mps2_fpgaio_reset(void) "MPS2 FPGAIO: reset" mps2_fpgaio_leds(char led1, char led0) "MPS2 FPGAIO LEDs: %c%c" -# hw/misc/msf2-sysreg.c +# msf2-sysreg.c msf2_sysreg_write(uint64_t offset, uint32_t val, uint32_t prev) "msf2-sysreg write: addr 0x%08" PRIx64 " data 0x%" PRIx32 " prev 0x%" PRIx32 msf2_sysreg_read(uint64_t offset, uint32_t val) "msf2-sysreg read: addr 0x%08" PRIx64 " data 0x%08" PRIx32 msf2_sysreg_write_pll_status(void) "Invalid write to read only PLL status register" -#hw/misc/imx7_gpr.c +# imx7_gpr.c imx7_gpr_read(uint64_t offset) "addr 0x%08" PRIx64 imx7_gpr_write(uint64_t offset, uint64_t value) "addr 0x%08" PRIx64 "value 0x%08" PRIx64 -# hw/misc/mos6522.c +# mos6522.c mos6522_set_counter(int index, unsigned int val) "T%d.counter=%d" mos6522_get_next_irq_time(uint16_t latch, int64_t d, int64_t delta) "latch=%d counter=0x%"PRId64 " delta_next=0x%"PRId64 mos6522_set_sr_int(void) "set sr_int" mos6522_write(uint64_t addr, uint64_t val) "reg=0x%"PRIx64 " val=0x%"PRIx64 mos6522_read(uint64_t addr, unsigned val) "reg=0x%"PRIx64 " val=0x%x" -# hw/misc/tz-mpc.c +# tz-mpc.c tz_mpc_reg_read(uint32_t offset, uint64_t data, unsigned size) "TZ MPC regs read: offset 0x%x data 0x%" PRIx64 " size %u" tz_mpc_reg_write(uint32_t offset, uint64_t data, unsigned size) "TZ MPC regs write: offset 0x%x data 0x%" PRIx64 " size %u" tz_mpc_mem_blocked_read(uint64_t addr, unsigned size, bool secure) "TZ MPC blocked read: offset 0x%" PRIx64 " size %u secure %d" @@ -92,16 +92,15 @@ tz_mpc_mem_blocked_write(uint64_t addr, uint64_t data, unsigned size, bool secur tz_mpc_translate(uint64_t addr, int flags, const char *idx, const char *res) "TZ MPC translate: addr 0x%" PRIx64 " flags 0x%x iommu_idx %s: %s" tz_mpc_iommu_notify(uint64_t addr) "TZ MPC iommu: notifying UNMAP/MAP for 0x%" PRIx64 -# hw/misc/tz-msc.c +# tz-msc.c tz_msc_reset(void) "TZ MSC: reset" tz_msc_cfg_nonsec(int level) "TZ MSC: cfg_nonsec = %d" tz_msc_cfg_sec_resp(int level) "TZ MSC: cfg_sec_resp = %d" -tz_msc_irq_enable(int level) "TZ MSC: int_enable = %d" tz_msc_irq_clear(int level) "TZ MSC: int_clear = %d" tz_msc_update_irq(int level) "TZ MSC: setting irq line to %d" tz_msc_access_blocked(uint64_t offset) "TZ MSC: offset 0x%" PRIx64 " access blocked" -# hw/misc/tz-ppc.c +# tz-ppc.c tz_ppc_reset(void) "TZ PPC: reset" tz_ppc_cfg_nonsec(int n, int level) "TZ PPC: cfg_nonsec[%d] = %d" tz_ppc_cfg_ap(int n, int level) "TZ PPC: cfg_ap[%d] = %d" @@ -112,31 +111,32 @@ tz_ppc_update_irq(int level) "TZ PPC: setting irq line to %d" tz_ppc_read_blocked(int n, uint64_t offset, bool secure, bool user) "TZ PPC: port %d offset 0x%" PRIx64 " read (secure %d user %d) blocked" tz_ppc_write_blocked(int n, uint64_t offset, bool secure, bool user) "TZ PPC: port %d offset 0x%" PRIx64 " write (secure %d user %d) blocked" -# hw/misc/iotkit-secctl.c +# iotkit-secctl.c iotkit_secctl_s_read(uint32_t offset, uint64_t data, unsigned size) "IoTKit SecCtl S regs read: offset 0x%x data 0x%" PRIx64 " size %u" iotkit_secctl_s_write(uint32_t offset, uint64_t data, unsigned size) "IoTKit SecCtl S regs write: offset 0x%x data 0x%" PRIx64 " size %u" iotkit_secctl_ns_read(uint32_t offset, uint64_t data, unsigned size) "IoTKit SecCtl NS regs read: offset 0x%x data 0x%" PRIx64 " size %u" iotkit_secctl_ns_write(uint32_t offset, uint64_t data, unsigned size) "IoTKit SecCtl NS regs write: offset 0x%x data 0x%" PRIx64 " size %u" -iotkit_secctl_reset(void) "IoTKit SecCtl: reset" -# hw/misc/imx6ul_ccm.c +# imx6ul_ccm.c ccm_entry(void) "\n" ccm_freq(uint32_t freq) "freq = %d\n" ccm_clock_freq(uint32_t clock, uint32_t freq) "(Clock = %d) = %d\n" ccm_read_reg(const char *reg_name, uint32_t value) "reg[%s] <= 0x%" PRIx32 "\n" ccm_write_reg(const char *reg_name, uint32_t value) "reg[%s] => 0x%" PRIx32 "\n" -# hw/misc/iotkit-sysctl.c +# iotkit-sysinfo.c iotkit_sysinfo_read(uint64_t offset, uint64_t data, unsigned size) "IoTKit SysInfo read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" iotkit_sysinfo_write(uint64_t offset, uint64_t data, unsigned size) "IoTKit SysInfo write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" + +# iotkit-sysctl.c iotkit_sysctl_read(uint64_t offset, uint64_t data, unsigned size) "IoTKit SysCtl read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" iotkit_sysctl_write(uint64_t offset, uint64_t data, unsigned size) "IoTKit SysCtl write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" iotkit_sysctl_reset(void) "IoTKit SysCtl: reset" -# hw/misc/armsse-cpuid.c +# armsse-cpuid.c armsse_cpuid_read(uint64_t offset, uint64_t data, unsigned size) "SSE-200 CPU_IDENTITY read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" armsse_cpuid_write(uint64_t offset, uint64_t data, unsigned size) "SSE-200 CPU_IDENTITY write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" -# hw/misc/armsse-mhu.c +# armsse-mhu.c armsse_mhu_read(uint64_t offset, uint64_t data, unsigned size) "SSE-200 MHU read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" armsse_mhu_write(uint64_t offset, uint64_t data, unsigned size) "SSE-200 MHU write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" diff --git a/hw/net/Kconfig b/hw/net/Kconfig index c00ec03cd1..7d7bbc5d7c 100644 --- a/hw/net/Kconfig +++ b/hw/net/Kconfig @@ -28,7 +28,7 @@ config E1000_PCI config E1000E_PCI_EXPRESS bool default y if PCI_DEVICES - depends on PCI_EXPRESS + depends on PCI_EXPRESS && MSI_NONBROKEN config RTL8139_PCI bool @@ -107,7 +107,7 @@ config ETSEC config ROCKER bool default y if PCI_DEVICES - depends on PCI + depends on PCI && MSI_NONBROKEN config CAN_BUS bool diff --git a/hw/net/e1000.c b/hw/net/e1000.c index 5e144cb4e4..9b39bccfb2 100644 --- a/hw/net/e1000.c +++ b/hw/net/e1000.c @@ -120,6 +120,8 @@ typedef struct E1000State_st { bool mit_irq_level; /* Tracks interrupt pin level. */ uint32_t mit_ide; /* Tracks E1000_TXD_CMD_IDE bit. */ + QEMUTimer *flush_queue_timer; + /* Compatibility flags for migration to/from qemu 1.3.0 and older */ #define E1000_FLAG_AUTONEG_BIT 0 #define E1000_FLAG_MIT_BIT 1 @@ -366,6 +368,7 @@ static void e1000_reset(void *opaque) timer_del(d->autoneg_timer); timer_del(d->mit_timer); + timer_del(d->flush_queue_timer); d->mit_timer_on = 0; d->mit_irq_level = 0; d->mit_ide = 0; @@ -392,6 +395,14 @@ set_ctrl(E1000State *s, int index, uint32_t val) } static void +e1000_flush_queue_timer(void *opaque) +{ + E1000State *s = opaque; + + qemu_flush_queued_packets(qemu_get_queue(s->nic)); +} + +static void set_rx_control(E1000State *s, int index, uint32_t val) { s->mac_reg[RCTL] = val; @@ -399,7 +410,8 @@ set_rx_control(E1000State *s, int index, uint32_t val) s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1; DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT], s->mac_reg[RCTL]); - qemu_flush_queued_packets(qemu_get_queue(s->nic)); + timer_mod(s->flush_queue_timer, + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000); } static void @@ -837,7 +849,7 @@ e1000_can_receive(NetClientState *nc) E1000State *s = qemu_get_nic_opaque(nc); return e1000x_rx_ready(&s->parent_obj, s->mac_reg) && - e1000_has_rxbufs(s, 1); + e1000_has_rxbufs(s, 1) && !timer_pending(s->flush_queue_timer); } static uint64_t rx_desc_base(E1000State *s) @@ -881,6 +893,10 @@ e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt) return -1; } + if (timer_pending(s->flush_queue_timer)) { + return 0; + } + /* Pad to minimum Ethernet frame length */ if (size < sizeof(min_buf)) { iov_to_buf(iov, iovcnt, 0, min_buf, size); @@ -1637,6 +1653,8 @@ pci_e1000_uninit(PCIDevice *dev) timer_free(d->autoneg_timer); timer_del(d->mit_timer); timer_free(d->mit_timer); + timer_del(d->flush_queue_timer); + timer_free(d->flush_queue_timer); qemu_del_nic(d->nic); } @@ -1700,6 +1718,8 @@ static void pci_e1000_realize(PCIDevice *pci_dev, Error **errp) d->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, e1000_autoneg_timer, d); d->mit_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000_mit_timer, d); + d->flush_queue_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, + e1000_flush_queue_timer, d); } static void qdev_e1000_reset(DeviceState *dev) diff --git a/hw/net/lan9118.c b/hw/net/lan9118.c index a6269d9463..b29e3fee49 100644 --- a/hw/net/lan9118.c +++ b/hw/net/lan9118.c @@ -14,7 +14,7 @@ #include "hw/sysbus.h" #include "net/net.h" #include "net/eth.h" -#include "hw/devices.h" +#include "hw/net/lan9118.h" #include "sysemu/sysemu.h" #include "hw/ptimer.h" #include "qemu/log.h" @@ -175,7 +175,6 @@ static const VMStateDescription vmstate_lan9118_packet = { } }; -#define TYPE_LAN9118 "lan9118" #define LAN9118(obj) OBJECT_CHECK(lan9118_state, (obj), TYPE_LAN9118) typedef struct { diff --git a/hw/net/smc91c111.c b/hw/net/smc91c111.c index 99da2d9297..d19ea0750d 100644 --- a/hw/net/smc91c111.c +++ b/hw/net/smc91c111.c @@ -10,7 +10,7 @@ #include "qemu/osdep.h" #include "hw/sysbus.h" #include "net/net.h" -#include "hw/devices.h" +#include "hw/net/smc91c111.h" #include "qemu/log.h" /* For crc32 */ #include <zlib.h> diff --git a/hw/net/trace-events b/hw/net/trace-events index 3a86004154..3cd9e122df 100644 --- a/hw/net/trace-events +++ b/hw/net/trace-events @@ -1,15 +1,15 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/net/etraxfs_eth.c +# etraxfs_eth.c mdio_phy_read(int regnum, uint16_t value) "read phy_reg:%d value:0x%04x" mdio_phy_write(int regnum, uint16_t value) "write phy_reg:%d value:0x%04x" mdio_bitbang(bool mdc, bool mdio, int state, uint16_t cnt, unsigned int drive) "bitbang mdc=%u mdio=%u state=%d cnt=%u drv=%d" -# hw/net/lance.c +# lance.c lance_mem_readw(uint64_t addr, uint32_t ret) "addr=0x%"PRIx64"val=0x%04x" lance_mem_writew(uint64_t addr, uint32_t val) "addr=0x%"PRIx64"val=0x%04x" -# hw/net/milkymist-minimac2.c +# milkymist-minimac2.c milkymist_minimac2_memory_read(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" milkymist_minimac2_memory_write(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" milkymist_minimac2_mdio_write(uint8_t phy_addr, uint8_t addr, uint16_t value) "phy_addr 0x%02x addr 0x%02x value 0x%04x" @@ -21,20 +21,20 @@ milkymist_minimac2_raise_irq_rx(void) "Raise IRQ RX" milkymist_minimac2_lower_irq_rx(void) "Lower IRQ RX" milkymist_minimac2_pulse_irq_tx(void) "Pulse IRQ TX" -# hw/net/mipsnet.c +# mipsnet.c mipsnet_send(uint32_t size) "sending len=%u" mipsnet_receive(uint32_t size) "receiving len=%u" mipsnet_read(uint64_t addr, uint32_t val) "read addr=0x%" PRIx64 " val=0x%x" mipsnet_write(uint64_t addr, uint64_t val) "write addr=0x%" PRIx64 " val=0x%" PRIx64 mipsnet_irq(uint32_t isr, uint32_t intctl) "set irq to %d (0x%02x)" -# hw/net/ne2000.c +# ne2000.c ne2000_read(uint64_t addr, uint64_t val) "read addr=0x%" PRIx64 " val=0x%" PRIx64 ne2000_write(uint64_t addr, uint64_t val) "write addr=0x%" PRIx64 " val=0x%" PRIx64 ne2000_ioport_read(uint64_t addr, uint64_t val) "io read addr=0x%02" PRIx64 " val=0x%02" PRIx64 ne2000_ioport_write(uint64_t addr, uint64_t val) "io write addr=0x%02" PRIx64 " val=0x%02" PRIx64 -# hw/net/opencores_eth.c +# opencores_eth.c open_eth_mii_write(unsigned idx, uint16_t v) "MII[0x%02x] <- 0x%04x" open_eth_mii_read(unsigned idx, uint16_t v) "MII[0x%02x] -> 0x%04x" open_eth_update_irq(uint32_t v) "IRQ <- 0x%x" @@ -48,7 +48,7 @@ open_eth_reg_write(uint32_t addr, uint32_t v) "MAC[0x%02x] <- 0x%08x" open_eth_desc_read(uint32_t addr, uint32_t v) "DESC[0x%04x] -> 0x%08x" open_eth_desc_write(uint32_t addr, uint32_t v) "DESC[0x%04x] <- 0x%08x" -# hw/net/pcnet.c +# pcnet.c pcnet_s_reset(void *s) "s=%p" pcnet_user_int(void *s) "s=%p" pcnet_isr_change(void *s, uint32_t isr, uint32_t isr_old) "s=%p INTA=%d<=%d" @@ -56,13 +56,13 @@ pcnet_init(void *s, uint64_t init_addr) "s=%p init_addr=0x%"PRIx64 pcnet_rlen_tlen(void *s, uint32_t rlen, uint32_t tlen) "s=%p rlen=%d tlen=%d" pcnet_ss32_rdra_tdra(void *s, uint32_t ss32, uint32_t rdra, uint32_t rcvrl, uint32_t tdra, uint32_t xmtrl) "s=%p ss32=%d rdra=0x%08x[%d] tdra=0x%08x[%d]" -# hw/net/pcnet-pci.c +# pcnet-pci.c pcnet_aprom_writeb(void *opaque, uint32_t addr, uint32_t val) "opaque=%p addr=0x%08x val=0x%02x" pcnet_aprom_readb(void *opaque, uint32_t addr, uint32_t val) "opaque=%p addr=0x%08x val=0x%02x" pcnet_ioport_read(void *opaque, uint64_t addr, unsigned size) "opaque=%p addr=0x%"PRIx64" size=%d" pcnet_ioport_write(void *opaque, uint64_t addr, uint64_t data, unsigned size) "opaque=%p addr=0x%"PRIx64" data=0x%"PRIx64" size=%d" -# hw/net/net_rx_pkt.c +# net_rx_pkt.c net_rx_pkt_parsed(bool ip4, bool ip6, bool udp, bool tcp, size_t l3o, size_t l4o, size_t l5o) "RX packet parsed: ip4: %d, ip6: %d, udp: %d, tcp: %d, l3 offset: %zu, l4 offset: %zu, l5 offset: %zu" net_rx_pkt_l4_csum_validate_entry(void) "Starting L4 checksum validation" net_rx_pkt_l4_csum_validate_not_xxp(void) "Not a TCP/UDP packet" @@ -98,10 +98,10 @@ net_rx_pkt_rss_ip6_ex(void) "Calculating IPv6/EX RSS hash" net_rx_pkt_rss_hash(size_t rss_length, uint32_t rss_hash) "RSS hash for %zu bytes: 0x%X" net_rx_pkt_rss_add_chunk(void* ptr, size_t size, size_t input_offset) "Add RSS chunk %p, %zu bytes, RSS input offset %zu bytes" -# hw/net/e1000.c +# e1000.c e1000_receiver_overrun(size_t s, uint32_t rdh, uint32_t rdt) "Receiver overrun: dropped packet of %zu bytes, RDH=%u, RDT=%u" -# hw/net/e1000x_common.c +# e1000x_common.c e1000x_rx_can_recv_disabled(bool link_up, bool rx_enabled, bool pci_master) "link_up: %d, rx_enabled %d, pci_master %d" e1000x_vlan_is_vlan_pkt(bool is_vlan_pkt, uint16_t eth_proto, uint16_t vet) "Is VLAN packet: %d, ETH proto: 0x%X, VET: 0x%X" e1000x_rx_flt_ucast_match(uint32_t idx, uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5) "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x" @@ -114,7 +114,7 @@ e1000x_mac_indicate(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, e1000x_link_negotiation_start(void) "Start link auto negotiation" e1000x_link_negotiation_done(void) "Auto negotiation is completed" -# hw/net/e1000e_core.c +# e1000e_core.c e1000e_core_write(uint64_t index, uint32_t size, uint64_t val) "Write to register 0x%"PRIx64", %d byte(s), value: 0x%"PRIx64 e1000e_core_read(uint64_t index, uint32_t size, uint64_t val) "Read from register 0x%"PRIx64", %d byte(s), value: 0x%"PRIx64 e1000e_core_mdic_read(uint8_t page, uint32_t addr, uint32_t data) "MDIC READ: PHY[%u][%u] = 0x%x" @@ -242,10 +242,12 @@ e1000e_irq_msix_pending_clearing(uint32_t cause, uint32_t int_cfg, uint32_t vec) e1000e_wrn_msix_vec_wrong(uint32_t cause, uint32_t cfg) "Invalid configuration for cause 0x%x: 0x%x" e1000e_wrn_msix_invalid(uint32_t cause, uint32_t cfg) "Invalid entry for cause 0x%x: 0x%x" -e1000e_mac_set_permanent(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5) "Set permanent MAC: %02x:%02x:%02x:%02x:%02x:%02x" e1000e_mac_set_sw(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5) "Set SW MAC: %02x:%02x:%02x:%02x:%02x:%02x" -# hw/net/e1000e.c +e1000e_vm_state_running(void) "VM state is running" +e1000e_vm_state_stopped(void) "VM state is stopped" + +# e1000e.c e1000e_cb_pci_realize(void) "E1000E PCI realize entry" e1000e_cb_pci_uninit(void) "E1000E PCI unit entry" e1000e_cb_qdev_reset(void) "E1000E qdev reset entry" @@ -266,12 +268,10 @@ e1000e_msi_init_fail(int32_t res) "Failed to initialize MSI, error %d" e1000e_msix_init_fail(int32_t res) "Failed to initialize MSI-X, error %d" e1000e_msix_use_vector_fail(uint32_t vec, int32_t res) "Failed to use MSI-X vector %d, error %d" +e1000e_mac_set_permanent(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5) "Set permanent MAC: %02x:%02x:%02x:%02x:%02x:%02x" e1000e_cfg_support_virtio(bool support) "Virtio header supported: %d" -e1000e_vm_state_running(void) "VM state is running" -e1000e_vm_state_stopped(void) "VM state is stopped" - -# hw/net/spapr_llan.c +# spapr_llan.c spapr_vlan_get_rx_bd_from_pool_found(int pool, int32_t count, uint32_t rx_bufs) "pool=%d count=%"PRId32" rxbufs=%"PRIu32 spapr_vlan_get_rx_bd_from_page(int buf_ptr, uint64_t bd) "use_buf_ptr=%d bd=0x%016"PRIx64 spapr_vlan_get_rx_bd_from_page_found(uint32_t use_buf_ptr, uint32_t rx_bufs) "ptr=%"PRIu32" rxbufs=%"PRIu32 @@ -287,7 +287,7 @@ spapr_vlan_h_send_logical_lan_rxbufs(uint32_t rx_bufs) "rxbufs = %"PRIu32 spapr_vlan_h_send_logical_lan_buf_desc(uint64_t buf) " buf desc: 0x%"PRIx64 spapr_vlan_h_send_logical_lan_total(int nbufs, unsigned total_len) "%d buffers, total length 0x%x" -# hw/net/sungem.c +# sungem.c sungem_tx_checksum(uint16_t start, uint16_t off) "TX checksumming from byte %d, inserting at %d" sungem_tx_checksum_oob(void) "TX checksum out of packet bounds" sungem_tx_unfinished(void) "TX packet started without finishing the previous one" @@ -331,7 +331,7 @@ sungem_mmio_mif_read(uint64_t addr, uint64_t val) "MMIO mif read from 0x%"PRIx64 sungem_mmio_pcs_write(uint64_t addr, uint64_t val) "MMIO pcs write to 0x%"PRIx64" val=0x%"PRIx64 sungem_mmio_pcs_read(uint64_t addr, uint64_t val) "MMIO pcs read from 0x%"PRIx64" val=0x%"PRIx64 -# hw/net/sunhme.c +# sunhme.c sunhme_seb_write(uint64_t addr, uint64_t value) "addr 0x%"PRIx64" value 0x%"PRIx64 sunhme_seb_read(uint64_t addr, uint64_t value) "addr 0x%"PRIx64" value 0x%"PRIx64 sunhme_etx_write(uint64_t addr, uint64_t value) "addr 0x%"PRIx64" value 0x%"PRIx64 @@ -360,7 +360,7 @@ sunhme_rx_filter_accept(void) "accepting incoming frame" sunhme_rx_desc(uint32_t addr, int offset, uint32_t status, int len, int cr, int nr) "addr 0x%"PRIx32"(+0x%x) status 0x%"PRIx32 " len %d (ring %d/%d)" sunhme_rx_xsum_calc(uint16_t xsum) "calculated incoming xsum as 0x%x" -# hw/net/virtio-net.c +# virtio-net.c virtio_net_announce_notify(void) "" virtio_net_announce_timer(int round) "%d" virtio_net_handle_announce(int round) "%d" diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 7e2c2a6f6a..ffe0872fff 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -2281,7 +2281,7 @@ static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues) /* * We always need to remove and add ctrl vq if * old_num_queues != new_num_queues. Remove ctrl_vq first, - * and then we only enter one of the following too loops. + * and then we only enter one of the following two loops. */ virtio_del_queue(vdev, old_num_queues - 1); diff --git a/hw/nios2/Kconfig b/hw/nios2/Kconfig index ab953e0077..b10ea640da 100644 --- a/hw/nios2/Kconfig +++ b/hw/nios2/Kconfig @@ -4,5 +4,9 @@ config NIOS2_10M50 select SERIAL select ALTERA_TIMER +config NIOS2_GENERIC_NOMMU + bool + select NIOS2 + config NIOS2 bool diff --git a/hw/nios2/Makefile.objs b/hw/nios2/Makefile.objs index 89a419a9f5..3e017981ba 100644 --- a/hw/nios2/Makefile.objs +++ b/hw/nios2/Makefile.objs @@ -1,2 +1,3 @@ obj-y = boot.o cpu_pic.o obj-$(CONFIG_NIOS2_10M50) += 10m50_devboard.o +obj-$(CONFIG_NIOS2_GENERIC_NOMMU) += generic_nommu.o diff --git a/hw/nios2/boot.c b/hw/nios2/boot.c index 5f0ab2fbb9..276068c842 100644 --- a/hw/nios2/boot.c +++ b/hw/nios2/boot.c @@ -138,7 +138,6 @@ void nios2_load_kernel(Nios2CPU *cpu, hwaddr ddr_base, if (kernel_filename) { int kernel_size, fdt_size; uint64_t entry, low, high; - uint32_t base32; int big_endian = 0; #ifdef TARGET_WORDS_BIGENDIAN @@ -149,17 +148,24 @@ void nios2_load_kernel(Nios2CPU *cpu, hwaddr ddr_base, kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, &entry, &low, &high, big_endian, EM_ALTERA_NIOS2, 0, 0); - base32 = entry; - if (base32 == 0xc0000000) { + if ((uint32_t)entry == 0xc0000000) { + /* + * The Nios II processor reference guide documents that the + * kernel is placed at virtual memory address 0xc0000000, + * and we've got something that points there. Reload it + * and adjust the entry to get the address in physical RAM. + */ kernel_size = load_elf(kernel_filename, NULL, translate_kernel_address, NULL, &entry, NULL, NULL, big_endian, EM_ALTERA_NIOS2, 0, 0); + boot_info.bootstrap_pc = ddr_base + 0xc0000000 + + (entry & 0x07ffffff); + } else { + /* Use the entry point in the ELF image. */ + boot_info.bootstrap_pc = (uint32_t)entry; } - /* Always boot into physical ram. */ - boot_info.bootstrap_pc = ddr_base + 0xc0000000 + (entry & 0x07ffffff); - /* If it wasn't an ELF image, try an u-boot image. */ if (kernel_size < 0) { hwaddr uentry, loadaddr = LOAD_UIMAGE_LOADADDR_INVALID; diff --git a/hw/nios2/generic_nommu.c b/hw/nios2/generic_nommu.c new file mode 100644 index 0000000000..1788ffa7a4 --- /dev/null +++ b/hw/nios2/generic_nommu.c @@ -0,0 +1,105 @@ +/* + * Generic simulator target with no MMU or devices. This emulation is + * compatible with the libgloss qemu-hosted.ld linker script for using + * QEMU as an instruction set simulator. + * + * Copyright (c) 2018-2019 Mentor Graphics + * + * Copyright (c) 2016 Marek Vasut <marek.vasut@gmail.com> + * + * Based on LabX device code + * + * Copyright (c) 2012 Chris Wulff <crwulff@gmail.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/lgpl-2.1.html> + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "cpu.h" + +#include "hw/sysbus.h" +#include "hw/hw.h" +#include "hw/char/serial.h" +#include "sysemu/sysemu.h" +#include "hw/boards.h" +#include "exec/memory.h" +#include "exec/address-spaces.h" +#include "qemu/config-file.h" + +#include "boot.h" + +#define BINARY_DEVICE_TREE_FILE "generic-nommu.dtb" + +static void nios2_generic_nommu_init(MachineState *machine) +{ + Nios2CPU *cpu; + MemoryRegion *address_space_mem = get_system_memory(); + MemoryRegion *phys_tcm = g_new(MemoryRegion, 1); + MemoryRegion *phys_tcm_alias = g_new(MemoryRegion, 1); + MemoryRegion *phys_ram = g_new(MemoryRegion, 1); + MemoryRegion *phys_ram_alias = g_new(MemoryRegion, 1); + ram_addr_t tcm_base = 0x0; + ram_addr_t tcm_size = 0x1000; /* 1 kiB, but QEMU limit is 4 kiB */ + ram_addr_t ram_base = 0x10000000; + ram_addr_t ram_size = 0x08000000; + + /* Physical TCM (tb_ram_1k) with alias at 0xc0000000 */ + memory_region_init_ram(phys_tcm, NULL, "nios2.tcm", tcm_size, + &error_abort); + memory_region_init_alias(phys_tcm_alias, NULL, "nios2.tcm.alias", + phys_tcm, 0, tcm_size); + memory_region_add_subregion(address_space_mem, tcm_base, phys_tcm); + memory_region_add_subregion(address_space_mem, 0xc0000000 + tcm_base, + phys_tcm_alias); + + /* Physical DRAM with alias at 0xc0000000 */ + memory_region_init_ram(phys_ram, NULL, "nios2.ram", ram_size, + &error_abort); + memory_region_init_alias(phys_ram_alias, NULL, "nios2.ram.alias", + phys_ram, 0, ram_size); + memory_region_add_subregion(address_space_mem, ram_base, phys_ram); + memory_region_add_subregion(address_space_mem, 0xc0000000 + ram_base, + phys_ram_alias); + + cpu = NIOS2_CPU(cpu_create(TYPE_NIOS2_CPU)); + + /* Remove MMU */ + cpu->mmu_present = false; + + /* Reset vector is the first 32 bytes of RAM. */ + cpu->reset_addr = ram_base; + + /* The interrupt vector comes right after reset. */ + cpu->exception_addr = ram_base + 0x20; + + /* + * The linker script does have a TLB miss memory region declared, + * but this should never be used with no MMU. + */ + cpu->fast_tlb_miss_addr = 0x7fff400; + + nios2_load_kernel(cpu, ram_base, ram_size, machine->initrd_filename, + BINARY_DEVICE_TREE_FILE, NULL); +} + +static void nios2_generic_nommu_machine_init(struct MachineClass *mc) +{ + mc->desc = "Generic NOMMU Nios II design"; + mc->init = nios2_generic_nommu_init; +} + +DEFINE_MACHINE("nios2-generic-nommu", nios2_generic_nommu_machine_init); diff --git a/hw/nvram/trace-events b/hw/nvram/trace-events index 6b55ba7a09..e191991e2a 100644 --- a/hw/nvram/trace-events +++ b/hw/nvram/trace-events @@ -1,10 +1,10 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/nvram/ds1225y.c +# ds1225y.c nvram_read(uint32_t addr, uint32_t ret) "read addr %d: 0x%02x" nvram_write(uint32_t addr, uint32_t old, uint32_t val) "write addr %d: 0x%02x -> 0x%02x" -# hw/nvram/fw_cfg.c +# fw_cfg.c fw_cfg_select(void *s, uint16_t key, int ret) "%p key %d = %d" fw_cfg_read(void *s, uint64_t ret) "%p = 0x%"PRIx64 fw_cfg_add_file(void *s, int index, char *name, size_t len) "%p #%d: %s (%zd bytes)" diff --git a/hw/pci-bridge/Kconfig b/hw/pci-bridge/Kconfig index b167b98497..a51ec716f5 100644 --- a/hw/pci-bridge/Kconfig +++ b/hw/pci-bridge/Kconfig @@ -1,7 +1,7 @@ config PCIE_PORT bool default y if PCI_DEVICES - depends on PCI_EXPRESS + depends on PCI_EXPRESS && MSI_NONBROKEN config PXB bool @@ -10,12 +10,12 @@ config PXB config XIO3130 bool default y if PCI_DEVICES - depends on PCI_EXPRESS + depends on PCI_EXPRESS && MSI_NONBROKEN config IOH3420 bool default y if PCI_DEVICES - depends on PCI_EXPRESS + depends on PCI_EXPRESS && MSI_NONBROKEN config I82801B11 bool diff --git a/hw/pci-bridge/gen_pcie_root_port.c b/hw/pci-bridge/gen_pcie_root_port.c index 9766edb445..26bda73eae 100644 --- a/hw/pci-bridge/gen_pcie_root_port.c +++ b/hw/pci-bridge/gen_pcie_root_port.c @@ -20,6 +20,9 @@ OBJECT_CHECK(GenPCIERootPort, (obj), TYPE_GEN_PCIE_ROOT_PORT) #define GEN_PCIE_ROOT_PORT_AER_OFFSET 0x100 +#define GEN_PCIE_ROOT_PORT_ACS_OFFSET \ + (GEN_PCIE_ROOT_PORT_AER_OFFSET + PCI_ERR_SIZEOF) + #define GEN_PCIE_ROOT_PORT_MSIX_NR_VECTOR 1 typedef struct GenPCIERootPort { @@ -149,6 +152,7 @@ static void gen_rp_dev_class_init(ObjectClass *klass, void *data) rpc->interrupts_init = gen_rp_interrupts_init; rpc->interrupts_uninit = gen_rp_interrupts_uninit; rpc->aer_offset = GEN_PCIE_ROOT_PORT_AER_OFFSET; + rpc->acs_offset = GEN_PCIE_ROOT_PORT_ACS_OFFSET; } static const TypeInfo gen_rp_dev_info = { diff --git a/hw/pci-bridge/pcie_root_port.c b/hw/pci-bridge/pcie_root_port.c index 34ad76743c..e94d918b6d 100644 --- a/hw/pci-bridge/pcie_root_port.c +++ b/hw/pci-bridge/pcie_root_port.c @@ -47,6 +47,7 @@ static void rp_reset(DeviceState *qdev) pcie_cap_deverr_reset(d); pcie_cap_slot_reset(d); pcie_cap_arifwd_reset(d); + pcie_acs_reset(d); pcie_aer_root_reset(d); pci_bridge_reset(qdev); pci_bridge_disable_base_limit(d); @@ -106,6 +107,9 @@ static void rp_realize(PCIDevice *d, Error **errp) pcie_aer_root_init(d); rp_aer_vector_update(d); + if (rpc->acs_offset) { + pcie_acs_init(d, rpc->acs_offset); + } return; err: diff --git a/hw/pci-host/Kconfig b/hw/pci-host/Kconfig index b39ea297ba..8c16d96b3f 100644 --- a/hw/pci-host/Kconfig +++ b/hw/pci-host/Kconfig @@ -49,3 +49,4 @@ config PCI_EXPRESS_XILINX config PCI_EXPRESS_DESIGNWARE bool select PCI_EXPRESS + select MSI_NONBROKEN diff --git a/hw/pci-host/trace-events b/hw/pci-host/trace-events index dd7a398e96..d19ca9aef6 100644 --- a/hw/pci-host/trace-events +++ b/hw/pci-host/trace-events @@ -1,9 +1,9 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/pci-host/grackle.c +# grackle.c grackle_set_irq(int irq_num, int level) "set_irq num %d level %d" -# hw/pci-host/sabre.c +# sabre.c sabre_set_request(int irq_num) "request irq %d" sabre_clear_request(int irq_num) "clear request irq %d" sabre_config_write(uint64_t addr, uint64_t val) "addr 0x%"PRIx64" val 0x%"PRIx64 @@ -13,7 +13,7 @@ sabre_pci_config_read(uint64_t addr, uint64_t val) "addr 0x%"PRIx64" val 0x%"PRI sabre_pci_set_irq(int irq_num, int level) "set irq_in %d level %d" sabre_pci_set_obio_irq(int irq_num, int level) "set irq %d level %d" -# hw/pci-host/uninorth.c +# uninorth.c unin_set_irq(int irq_num, int level) "setting INT %d = %d" unin_get_config_reg(uint32_t reg, uint32_t addr, uint32_t retval) "converted config space accessor 0x%"PRIx32 "/0x%"PRIx32 " -> 0x%"PRIx32 unin_data_write(uint64_t addr, unsigned len, uint64_t val) "write addr 0x%"PRIx64 " len %d val 0x%"PRIx64 diff --git a/hw/pci/Kconfig b/hw/pci/Kconfig index 3b8638b51d..77f8b005ff 100644 --- a/hw/pci/Kconfig +++ b/hw/pci/Kconfig @@ -7,3 +7,9 @@ config PCI_EXPRESS config PCI_DEVICES bool + +config MSI_NONBROKEN + # selected by interrupt controllers that do not support MSI, + # or support it and have a good implementation. See commit + # 47d2b0f33c664533b8dbd5cb17faa8e6a01afe1f. + bool diff --git a/hw/pci/pci.c b/hw/pci/pci.c index 35451c1e99..a78023f669 100644 --- a/hw/pci/pci.c +++ b/hw/pci/pci.c @@ -147,6 +147,11 @@ static uint16_t pcibus_numa_node(PCIBus *bus) return NUMA_NODE_UNASSIGNED; } +static bool pcibus_allows_extended_config_space(PCIBus *bus) +{ + return false; +} + static void pci_bus_class_init(ObjectClass *klass, void *data) { BusClass *k = BUS_CLASS(klass); @@ -162,6 +167,7 @@ static void pci_bus_class_init(ObjectClass *klass, void *data) pbc->is_root = pcibus_is_root; pbc->bus_num = pcibus_num; pbc->numa_node = pcibus_numa_node; + pbc->allows_extended_config_space = pcibus_allows_extended_config_space; } static const TypeInfo pci_bus_info = { @@ -182,9 +188,22 @@ static const TypeInfo conventional_pci_interface_info = { .parent = TYPE_INTERFACE, }; +static bool pciebus_allows_extended_config_space(PCIBus *bus) +{ + return true; +} + +static void pcie_bus_class_init(ObjectClass *klass, void *data) +{ + PCIBusClass *pbc = PCI_BUS_CLASS(klass); + + pbc->allows_extended_config_space = pciebus_allows_extended_config_space; +} + static const TypeInfo pcie_bus_info = { .name = TYPE_PCIE_BUS, .parent = TYPE_PCI_BUS, + .class_init = pcie_bus_class_init, }; static PCIBus *pci_find_bus_nr(PCIBus *bus, int bus_num); @@ -401,6 +420,11 @@ bool pci_bus_is_root(PCIBus *bus) return PCI_BUS_GET_CLASS(bus)->is_root(bus); } +bool pci_bus_allows_extended_config_space(PCIBus *bus) +{ + return PCI_BUS_GET_CLASS(bus)->allows_extended_config_space(bus); +} + void pci_root_bus_new_inplace(PCIBus *bus, size_t bus_size, DeviceState *parent, const char *name, MemoryRegion *address_space_mem, @@ -927,7 +951,7 @@ static uint16_t pci_req_id_cache_extract(PCIReqIDCache *cache) result = PCI_BUILD_BDF(bus_n, 0); break; default: - error_printf("Invalid PCI requester ID cache type: %d\n", + error_report("Invalid PCI requester ID cache type: %d", cache->type); exit(1); break; @@ -1532,7 +1556,7 @@ void pci_device_set_intx_routing_notifier(PCIDevice *dev, */ int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin) { - return (pin + PCI_SLOT(pci_dev->devfn)) % PCI_NUM_PINS; + return pci_swizzle(PCI_SLOT(pci_dev->devfn), pin); } /***********************************************************/ diff --git a/hw/pci/pci_host.c b/hw/pci/pci_host.c index 5f5345dbac..9d64b2e12f 100644 --- a/hw/pci/pci_host.c +++ b/hw/pci/pci_host.c @@ -54,7 +54,7 @@ static inline PCIDevice *pci_dev_find_by_addr(PCIBus *bus, uint32_t addr) static void pci_adjust_config_limit(PCIBus *bus, uint32_t *limit) { if (*limit > PCI_CONFIG_SPACE_SIZE) { - if (!pci_bus_is_express(bus)) { + if (!pci_bus_allows_extended_config_space(bus)) { *limit = PCI_CONFIG_SPACE_SIZE; return; } diff --git a/hw/pci/pcie.c b/hw/pci/pcie.c index 640f678773..cf1ca30f93 100644 --- a/hw/pci/pcie.c +++ b/hw/pci/pcie.c @@ -914,3 +914,41 @@ void pcie_ats_init(PCIDevice *dev, uint16_t offset) pci_set_word(dev->wmask + dev->exp.ats_cap + PCI_ATS_CTRL, 0x800f); } + +/* ACS (Access Control Services) */ +void pcie_acs_init(PCIDevice *dev, uint16_t offset) +{ + bool is_downstream = pci_is_express_downstream_port(dev); + uint16_t cap_bits = 0; + + /* For endpoints, only multifunction devs may have an ACS capability: */ + assert(is_downstream || + (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) || + PCI_FUNC(dev->devfn)); + + pcie_add_capability(dev, PCI_EXT_CAP_ID_ACS, PCI_ACS_VER, offset, + PCI_ACS_SIZEOF); + dev->exp.acs_cap = offset; + + if (is_downstream) { + /* + * Downstream ports must implement SV, TB, RR, CR, UF, and DT (with + * caveats on the latter four that we ignore for simplicity). + * Endpoints may also implement a subset of ACS capabilities, + * but these are optional if the endpoint does not support + * peer-to-peer between functions and thus omitted here. + */ + cap_bits = PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | + PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT; + } + + pci_set_word(dev->config + offset + PCI_ACS_CAP, cap_bits); + pci_set_word(dev->wmask + offset + PCI_ACS_CTRL, cap_bits); +} + +void pcie_acs_reset(PCIDevice *dev) +{ + if (dev->exp.acs_cap) { + pci_set_word(dev->config + dev->exp.acs_cap + PCI_ACS_CTRL, 0); + } +} diff --git a/hw/pci/trace-events b/hw/pci/trace-events index f68c178afc..def4b3926d 100644 --- a/hw/pci/trace-events +++ b/hw/pci/trace-events @@ -1,12 +1,12 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/pci/pci.c +# pci.c pci_update_mappings_del(void *d, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "d=%p %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64 pci_update_mappings_add(void *d, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "d=%p %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64 -# hw/pci/pci_host.c +# pci_host.c pci_cfg_read(const char *dev, unsigned devid, unsigned fnid, unsigned offs, unsigned val) "%s %02u:%u @0x%x -> 0x%x" pci_cfg_write(const char *dev, unsigned devid, unsigned fnid, unsigned offs, unsigned val) "%s %02u:%u @0x%x <- 0x%x" -# hw/pci/msix.c +# msix.c msix_write_config(char *name, bool enabled, bool masked) "dev %s enabled %d masked %d" diff --git a/hw/ppc/Kconfig b/hw/ppc/Kconfig index 2b83637511..a3465155f0 100644 --- a/hw/ppc/Kconfig +++ b/hw/ppc/Kconfig @@ -2,12 +2,14 @@ config PSERIES bool imply PCI_DEVICES imply TEST_DEVICES + imply VIRTIO_VGA select DIMM select PCI select SPAPR_VSCSI select VFIO if LINUX # needed by spapr_pci_vfio.c select XICS_SPAPR select XIVE_SPAPR + select MSI_NONBROKEN config SPAPR_RNG bool @@ -36,6 +38,7 @@ config PPC440 bool imply PCI_DEVICES imply TEST_DEVICES + imply E1000_PCI select PCI_EXPRESS select PPC4XX select SERIAL @@ -63,7 +66,6 @@ config PREP imply TEST_DEVICES select CS4231A select PREP_PCI - select I82374 select I82378 select LSI_SCSI_PCI select M48T59 @@ -97,6 +99,7 @@ config MAC_NEWWORLD config E500 bool imply AT24C + imply VIRTIO_PCI select ETSEC select OPENPIC select PLATFORM_BUS diff --git a/hw/ppc/Makefile.objs b/hw/ppc/Makefile.objs index 1111b218a0..636e717f20 100644 --- a/hw/ppc/Makefile.objs +++ b/hw/ppc/Makefile.objs @@ -9,7 +9,7 @@ obj-$(CONFIG_SPAPR_RNG) += spapr_rng.o # IBM PowerNV obj-$(CONFIG_POWERNV) += pnv.o pnv_xscom.o pnv_core.o pnv_lpc.o pnv_psi.o pnv_occ.o pnv_bmc.o ifeq ($(CONFIG_PCI)$(CONFIG_PSERIES)$(CONFIG_LINUX), yyy) -obj-y += spapr_pci_vfio.o +obj-y += spapr_pci_vfio.o spapr_pci_nvlink2.o endif obj-$(CONFIG_PSERIES) += spapr_rtas_ddw.o # PowerPC 4xx boards diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c index 8be4d4cbf7..dfb4ea5742 100644 --- a/hw/ppc/pnv.c +++ b/hw/ppc/pnv.c @@ -755,7 +755,7 @@ static void pnv_chip_power9_intc_create(PnvChip *chip, PowerPCCPU *cpu, * controller object is initialized afterwards. Hopefully, it's * only used at runtime. */ - obj = xive_tctx_create(OBJECT(cpu), XIVE_ROUTER(&chip9->xive), errp); + obj = xive_tctx_create(OBJECT(cpu), XIVE_ROUTER(&chip9->xive), &local_err); if (local_err) { error_propagate(errp, local_err); return; diff --git a/hw/ppc/pnv_psi.c b/hw/ppc/pnv_psi.c index 5a923e4151..5345c8389e 100644 --- a/hw/ppc/pnv_psi.c +++ b/hw/ppc/pnv_psi.c @@ -786,7 +786,7 @@ static const MemoryRegionOps pnv_psi_p9_xscom_ops = { static void pnv_psi_power9_irq_set(PnvPsi *psi, int irq, bool state) { - uint32_t irq_method = psi->regs[PSIHB_REG(PSIHB9_INTERRUPT_CONTROL)]; + uint64_t irq_method = psi->regs[PSIHB_REG(PSIHB9_INTERRUPT_CONTROL)]; if (irq > PSIHB9_NUM_IRQS) { qemu_log_mask(LOG_GUEST_ERROR, "PSI: Unsupported irq %d\n", irq); diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c index 49d57469fb..ad20584f26 100644 --- a/hw/ppc/ppc.c +++ b/hw/ppc/ppc.c @@ -1101,7 +1101,7 @@ clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq) tb_env = g_malloc0(sizeof(ppc_tb_t)); env->tb_env = tb_env; tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED; - if (env->insns_flags & PPC_SEGMENT_64B) { + if (is_book3s_arch2x(env)) { /* All Book3S 64bit CPUs implement level based DEC logic */ tb_env->flags |= PPC_DECR_UNDERFLOW_LEVEL; } diff --git a/hw/ppc/prep.c b/hw/ppc/prep.c index 847d320465..b7f459d475 100644 --- a/hw/ppc/prep.c +++ b/hw/ppc/prep.c @@ -40,7 +40,6 @@ #include "hw/ide.h" #include "hw/loader.h" #include "hw/timer/mc146818rtc.h" -#include "hw/input/i8042.h" #include "hw/isa/pc87312.h" #include "hw/net/ne2000-isa.h" #include "sysemu/arch_init.h" diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 6c16d6cfaf..2ef3ce4362 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -1034,12 +1034,13 @@ static void spapr_dt_rtas(SpaprMachineState *spapr, void *fdt) 0, cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE), cpu_to_be32(max_cpus / smp_threads), }; + uint32_t maxdomain = cpu_to_be32(spapr->gpu_numa_id > 1 ? 1 : 0); uint32_t maxdomains[] = { cpu_to_be32(4), - cpu_to_be32(0), - cpu_to_be32(0), - cpu_to_be32(0), - cpu_to_be32(nb_numa_nodes ? nb_numa_nodes : 1), + maxdomain, + maxdomain, + maxdomain, + cpu_to_be32(spapr->gpu_numa_id), }; _FDT(rtas = fdt_add_subnode(fdt, 0, "rtas")); @@ -1252,38 +1253,8 @@ static void *spapr_build_fdt(SpaprMachineState *spapr) _FDT(fdt_setprop_string(fdt, 0, "model", "IBM pSeries (emulated by qemu)")); _FDT(fdt_setprop_string(fdt, 0, "compatible", "qemu,pseries")); - /* - * Add info to guest to indentify which host is it being run on - * and what is the uuid of the guest - */ - if (spapr->host_model && !g_str_equal(spapr->host_model, "none")) { - if (g_str_equal(spapr->host_model, "passthrough")) { - /* -M host-model=passthrough */ - if (kvmppc_get_host_model(&buf)) { - _FDT(fdt_setprop_string(fdt, 0, "host-model", buf)); - g_free(buf); - } - } else { - /* -M host-model=<user-string> */ - _FDT(fdt_setprop_string(fdt, 0, "host-model", spapr->host_model)); - } - } - - if (spapr->host_serial && !g_str_equal(spapr->host_serial, "none")) { - if (g_str_equal(spapr->host_serial, "passthrough")) { - /* -M host-serial=passthrough */ - if (kvmppc_get_host_serial(&buf)) { - _FDT(fdt_setprop_string(fdt, 0, "host-serial", buf)); - g_free(buf); - } - } else { - /* -M host-serial=<user-string> */ - _FDT(fdt_setprop_string(fdt, 0, "host-serial", spapr->host_serial)); - } - } - + /* Guest UUID & Name*/ buf = qemu_uuid_unparse_strdup(&qemu_uuid); - _FDT(fdt_setprop_string(fdt, 0, "vm,uuid", buf)); if (qemu_uuid_set) { _FDT(fdt_setprop_string(fdt, 0, "system-id", buf)); @@ -1295,6 +1266,21 @@ static void *spapr_build_fdt(SpaprMachineState *spapr) qemu_get_vm_name())); } + /* Host Model & Serial Number */ + if (spapr->host_model) { + _FDT(fdt_setprop_string(fdt, 0, "host-model", spapr->host_model)); + } else if (smc->broken_host_serial_model && kvmppc_get_host_model(&buf)) { + _FDT(fdt_setprop_string(fdt, 0, "host-model", buf)); + g_free(buf); + } + + if (spapr->host_serial) { + _FDT(fdt_setprop_string(fdt, 0, "host-serial", spapr->host_serial)); + } else if (smc->broken_host_serial_model && kvmppc_get_host_serial(&buf)) { + _FDT(fdt_setprop_string(fdt, 0, "host-serial", buf)); + g_free(buf); + } + _FDT(fdt_setprop_cell(fdt, 0, "#address-cells", 2)); _FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2)); @@ -1534,10 +1520,10 @@ static void spapr_unmap_hptes(PPCVirtualHypervisor *vhyp, /* Nothing to do for qemu managed HPT */ } -static void spapr_store_hpte(PPCVirtualHypervisor *vhyp, hwaddr ptex, - uint64_t pte0, uint64_t pte1) +void spapr_store_hpte(PowerPCCPU *cpu, hwaddr ptex, + uint64_t pte0, uint64_t pte1) { - SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); + SpaprMachineState *spapr = SPAPR_MACHINE(cpu->vhyp); hwaddr offset = ptex * HASH_PTE_SIZE_64; if (!spapr->htab) { @@ -1565,6 +1551,38 @@ static void spapr_store_hpte(PPCVirtualHypervisor *vhyp, hwaddr ptex, } } +static void spapr_hpte_set_c(PPCVirtualHypervisor *vhyp, hwaddr ptex, + uint64_t pte1) +{ + hwaddr offset = ptex * HASH_PTE_SIZE_64 + 15; + SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); + + if (!spapr->htab) { + /* There should always be a hash table when this is called */ + error_report("spapr_hpte_set_c called with no hash table !"); + return; + } + + /* The HW performs a non-atomic byte update */ + stb_p(spapr->htab + offset, (pte1 & 0xff) | 0x80); +} + +static void spapr_hpte_set_r(PPCVirtualHypervisor *vhyp, hwaddr ptex, + uint64_t pte1) +{ + hwaddr offset = ptex * HASH_PTE_SIZE_64 + 14; + SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); + + if (!spapr->htab) { + /* There should always be a hash table when this is called */ + error_report("spapr_hpte_set_r called with no hash table !"); + return; + } + + /* The HW performs a non-atomic byte update */ + stb_p(spapr->htab + offset, ((pte1 >> 8) & 0xff) | 0x01); +} + int spapr_hpt_shift_for_ramsize(uint64_t ramsize) { int shift; @@ -1713,6 +1731,16 @@ static void spapr_machine_reset(void) spapr_irq_msi_reset(spapr); } + /* + * NVLink2-connected GPU RAM needs to be placed on a separate NUMA node. + * We assign a new numa ID per GPU in spapr_pci_collect_nvgpu() which is + * called from vPHB reset handler so we initialize the counter here. + * If no NUMA is configured from the QEMU side, we start from 1 as GPU RAM + * must be equally distant from any other node. + * The final value of spapr->gpu_numa_id is going to be written to + * max-associativity-domains in spapr_build_fdt(). + */ + spapr->gpu_numa_id = MAX(1, nb_numa_nodes); qemu_devices_reset(); /* @@ -2795,13 +2823,7 @@ static void spapr_machine_init(MachineState *machine) /* advertise XIVE on POWER9 machines */ if (spapr->irq->ov5 & (SPAPR_OV5_XIVE_EXPLOIT | SPAPR_OV5_XIVE_BOTH)) { - if (ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, - 0, spapr->max_compat_pvr)) { - spapr_ovec_set(spapr->ov5, OV5_XIVE_EXPLOIT); - } else if (spapr->irq->ov5 & SPAPR_OV5_XIVE_EXPLOIT) { - error_report("XIVE-only machines require a POWER9 CPU"); - exit(1); - } + spapr_ovec_set(spapr->ov5, OV5_XIVE_EXPLOIT); } /* init CPUs */ @@ -3352,12 +3374,12 @@ static void spapr_instance_init(Object *obj) spapr_get_host_model, spapr_set_host_model, &error_abort); object_property_set_description(obj, "host-model", - "Set host's model-id to use - none|passthrough|string", &error_abort); + "Host model to advertise in guest device tree", &error_abort); object_property_add_str(obj, "host-serial", spapr_get_host_serial, spapr_set_host_serial, &error_abort); object_property_set_description(obj, "host-serial", - "Set host's system-id to use - none|passthrough|string", &error_abort); + "Host serial number to advertise in guest device tree", &error_abort); } static void spapr_machine_finalizefn(Object *obj) @@ -3928,7 +3950,9 @@ static void spapr_phb_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, smc->phb_placement(spapr, sphb->index, &sphb->buid, &sphb->io_win_addr, &sphb->mem_win_addr, &sphb->mem64_win_addr, - windows_supported, sphb->dma_liobn, errp); + windows_supported, sphb->dma_liobn, + &sphb->nv2_gpa_win_addr, &sphb->nv2_atsd_win_addr, + errp); } static void spapr_phb_plug(HotplugHandler *hotplug_dev, DeviceState *dev, @@ -4129,7 +4153,8 @@ static const CPUArchIdList *spapr_possible_cpu_arch_ids(MachineState *machine) static void spapr_phb_placement(SpaprMachineState *spapr, uint32_t index, uint64_t *buid, hwaddr *pio, hwaddr *mmio32, hwaddr *mmio64, - unsigned n_dma, uint32_t *liobns, Error **errp) + unsigned n_dma, uint32_t *liobns, + hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp) { /* * New-style PHB window placement. @@ -4174,6 +4199,9 @@ static void spapr_phb_placement(SpaprMachineState *spapr, uint32_t index, *pio = SPAPR_PCI_BASE + index * SPAPR_PCI_IO_WIN_SIZE; *mmio32 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM32_WIN_SIZE; *mmio64 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM64_WIN_SIZE; + + *nv2gpa = SPAPR_PCI_NV2RAM64_WIN_BASE + index * SPAPR_PCI_NV2RAM64_WIN_SIZE; + *nv2atsd = SPAPR_PCI_NV2ATSD_WIN_BASE + index * SPAPR_PCI_NV2ATSD_WIN_SIZE; } static ICSState *spapr_ics_get(XICSFabric *dev, int irq) @@ -4295,7 +4323,8 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data) vhc->hpt_mask = spapr_hpt_mask; vhc->map_hptes = spapr_map_hptes; vhc->unmap_hptes = spapr_unmap_hptes; - vhc->store_hpte = spapr_store_hpte; + vhc->hpte_set_c = spapr_hpte_set_c; + vhc->hpte_set_r = spapr_hpte_set_r; vhc->get_pate = spapr_get_pate; vhc->encode_hpt_for_kvm_pr = spapr_encode_hpt_for_kvm_pr; xic->ics_get = spapr_ics_get; @@ -4366,37 +4395,57 @@ static const TypeInfo spapr_machine_info = { type_init(spapr_machine_register_##suffix) /* + * pseries-4.1 + */ +static void spapr_machine_4_1_class_options(MachineClass *mc) +{ + /* Defaults for the latest behaviour inherited from the base class */ +} + +DEFINE_SPAPR_MACHINE(4_1, "4.1", true); + +/* * pseries-4.0 */ static void spapr_machine_4_0_class_options(MachineClass *mc) { - /* Defaults for the latest behaviour inherited from the base class */ + spapr_machine_4_1_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_4_0, hw_compat_4_0_len); } -DEFINE_SPAPR_MACHINE(4_0, "4.0", true); +DEFINE_SPAPR_MACHINE(4_0, "4.0", false); /* * pseries-3.1 */ +static void phb_placement_3_1(SpaprMachineState *spapr, uint32_t index, + uint64_t *buid, hwaddr *pio, + hwaddr *mmio32, hwaddr *mmio64, + unsigned n_dma, uint32_t *liobns, + hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp) +{ + spapr_phb_placement(spapr, index, buid, pio, mmio32, mmio64, n_dma, liobns, + nv2gpa, nv2atsd, errp); + *nv2gpa = 0; + *nv2atsd = 0; +} + static void spapr_machine_3_1_class_options(MachineClass *mc) { SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); - static GlobalProperty compat[] = { - { TYPE_SPAPR_MACHINE, "host-model", "passthrough" }, - { TYPE_SPAPR_MACHINE, "host-serial", "passthrough" }, - }; spapr_machine_4_0_class_options(mc); compat_props_add(mc->compat_props, hw_compat_3_1, hw_compat_3_1_len); - compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power8_v2.0"); smc->update_dt_enabled = false; smc->dr_phb_enabled = false; + smc->broken_host_serial_model = true; smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_BROKEN; smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_BROKEN; smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_BROKEN; smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_OFF; + smc->phb_placement = phb_placement_3_1; } DEFINE_SPAPR_MACHINE(3_1, "3.1", false); @@ -4528,7 +4577,8 @@ DEFINE_SPAPR_MACHINE(2_8, "2.8", false); static void phb_placement_2_7(SpaprMachineState *spapr, uint32_t index, uint64_t *buid, hwaddr *pio, hwaddr *mmio32, hwaddr *mmio64, - unsigned n_dma, uint32_t *liobns, Error **errp) + unsigned n_dma, uint32_t *liobns, + hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp) { /* Legacy PHB placement for pseries-2.7 and earlier machine types */ const uint64_t base_buid = 0x800000020000000ULL; @@ -4572,6 +4622,9 @@ static void phb_placement_2_7(SpaprMachineState *spapr, uint32_t index, * fallback behaviour of automatically splitting a large "32-bit" * window into contiguous 32-bit and 64-bit windows */ + + *nv2gpa = 0; + *nv2atsd = 0; } static void spapr_machine_2_7_class_options(MachineClass *mc) diff --git a/hw/ppc/spapr_caps.c b/hw/ppc/spapr_caps.c index edc5ed0e0c..9b1c10baa6 100644 --- a/hw/ppc/spapr_caps.c +++ b/hw/ppc/spapr_caps.c @@ -347,7 +347,7 @@ static void cap_hpt_maxpagesize_apply(SpaprMachineState *spapr, warn_report("Many guests require at least 64kiB hpt-max-page-size"); } - spapr_check_pagesize(spapr, qemu_getrampagesize(), errp); + spapr_check_pagesize(spapr, qemu_minrampagesize(), errp); } static bool spapr_pagesize_cb(void *opaque, uint32_t seg_pshift, @@ -609,7 +609,7 @@ static SpaprCapabilities default_caps_with_cpu(SpaprMachineState *spapr, uint8_t mps; if (kvmppc_hpt_needs_host_contiguous_pages()) { - mps = ctz64(qemu_getrampagesize()); + mps = ctz64(qemu_minrampagesize()); } else { mps = 34; /* allow everything up to 16GiB, i.e. everything */ } diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c index 0761e10142..6c16d2b120 100644 --- a/hw/ppc/spapr_hcall.c +++ b/hw/ppc/spapr_hcall.c @@ -118,7 +118,7 @@ static target_ulong h_enter(PowerPCCPU *cpu, SpaprMachineState *spapr, ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1); } - ppc_hash64_store_hpte(cpu, ptex + slot, pteh | HPTE64_V_HPTE_DIRTY, ptel); + spapr_store_hpte(cpu, ptex + slot, pteh | HPTE64_V_HPTE_DIRTY, ptel); args[0] = ptex + slot; return H_SUCCESS; @@ -131,7 +131,8 @@ typedef enum { REMOVE_HW = 3, } RemoveResult; -static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex, +static RemoveResult remove_hpte(PowerPCCPU *cpu + , target_ulong ptex, target_ulong avpn, target_ulong flags, target_ulong *vp, target_ulong *rp) @@ -155,7 +156,7 @@ static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex, } *vp = v; *rp = r; - ppc_hash64_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0); + spapr_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0); ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r); return REMOVE_SUCCESS; } @@ -289,13 +290,13 @@ static target_ulong h_protect(PowerPCCPU *cpu, SpaprMachineState *spapr, r |= (flags << 55) & HPTE64_R_PP0; r |= (flags << 48) & HPTE64_R_KEY_HI; r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO); - ppc_hash64_store_hpte(cpu, ptex, - (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0); + spapr_store_hpte(cpu, ptex, + (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0); ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r); /* Flush the tlb */ check_tlb_flush(env, true); /* Don't need a memory barrier, due to qemu's global lock */ - ppc_hash64_store_hpte(cpu, ptex, v | HPTE64_V_HPTE_DIRTY, r); + spapr_store_hpte(cpu, ptex, v | HPTE64_V_HPTE_DIRTY, r); return H_SUCCESS; } @@ -304,8 +305,8 @@ static target_ulong h_read(PowerPCCPU *cpu, SpaprMachineState *spapr, { target_ulong flags = args[0]; target_ulong ptex = args[1]; - uint8_t *hpte; int i, ridx, n_entries = 1; + const ppc_hash_pte64_t *hptes; if (!valid_ptex(cpu, ptex)) { return H_PARAMETER; @@ -317,13 +318,12 @@ static target_ulong h_read(PowerPCCPU *cpu, SpaprMachineState *spapr, n_entries = 4; } - hpte = spapr->htab + (ptex * HASH_PTE_SIZE_64); - + hptes = ppc_hash64_map_hptes(cpu, ptex, n_entries); for (i = 0, ridx = 0; i < n_entries; i++) { - args[ridx++] = ldq_p(hpte); - args[ridx++] = ldq_p(hpte + (HASH_PTE_SIZE_64/2)); - hpte += HASH_PTE_SIZE_64; + args[ridx++] = ppc_hash64_hpte0(cpu, hptes, i); + args[ridx++] = ppc_hash64_hpte1(cpu, hptes, i); } + ppc_hash64_unmap_hptes(cpu, hptes, ptex, n_entries); return H_SUCCESS; } @@ -1400,7 +1400,8 @@ static target_ulong h_register_process_table(PowerPCCPU *cpu, else if (flags & FLAG_HASH_PROC_TBL) /* Hash with process tables */ update_lpcr |= LPCR_UPRT; if (flags & FLAG_GTSE) /* Guest translation shootdown enable */ - update_lpcr |= FLAG_GTSE; + update_lpcr |= LPCR_GTSE; + spapr_set_all_lpcrs(update_lpcr, LPCR_UPRT | LPCR_HR | LPCR_GTSE); if (kvm_enabled()) { diff --git a/hw/ppc/spapr_irq.c b/hw/ppc/spapr_irq.c index 253e4de7fd..b1f79ea9de 100644 --- a/hw/ppc/spapr_irq.c +++ b/hw/ppc/spapr_irq.c @@ -16,6 +16,7 @@ #include "hw/ppc/spapr_xive.h" #include "hw/ppc/xics.h" #include "hw/ppc/xics_spapr.h" +#include "cpu-models.h" #include "sysemu/kvm.h" #include "trace.h" @@ -66,36 +67,11 @@ void spapr_irq_msi_reset(SpaprMachineState *spapr) * XICS IRQ backend. */ -static ICSState *spapr_ics_create(SpaprMachineState *spapr, - int nr_irqs, Error **errp) -{ - Error *local_err = NULL; - Object *obj; - - obj = object_new(TYPE_ICS_SIMPLE); - object_property_add_child(OBJECT(spapr), "ics", obj, &error_abort); - object_property_add_const_link(obj, ICS_PROP_XICS, OBJECT(spapr), - &error_abort); - object_property_set_int(obj, nr_irqs, "nr-irqs", &local_err); - if (local_err) { - goto error; - } - object_property_set_bool(obj, true, "realized", &local_err); - if (local_err) { - goto error; - } - - return ICS_BASE(obj); - -error: - error_propagate(errp, local_err); - return NULL; -} - static void spapr_irq_init_xics(SpaprMachineState *spapr, int nr_irqs, Error **errp) { MachineState *machine = MACHINE(spapr); + Object *obj; Error *local_err = NULL; bool xics_kvm = false; @@ -107,7 +83,8 @@ static void spapr_irq_init_xics(SpaprMachineState *spapr, int nr_irqs, if (machine_kernel_irqchip_required(machine) && !xics_kvm) { error_prepend(&local_err, "kernel_irqchip requested but unavailable: "); - goto error; + error_propagate(errp, local_err); + return; } error_free(local_err); local_err = NULL; @@ -117,10 +94,18 @@ static void spapr_irq_init_xics(SpaprMachineState *spapr, int nr_irqs, xics_spapr_init(spapr); } - spapr->ics = spapr_ics_create(spapr, nr_irqs, &local_err); + obj = object_new(TYPE_ICS_SIMPLE); + object_property_add_child(OBJECT(spapr), "ics", obj, &error_abort); + object_property_add_const_link(obj, ICS_PROP_XICS, OBJECT(spapr), + &error_fatal); + object_property_set_int(obj, nr_irqs, "nr-irqs", &error_fatal); + object_property_set_bool(obj, true, "realized", &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } -error: - error_propagate(errp, local_err); + spapr->ics = ICS_BASE(obj); } #define ICS_IRQ_FREE(ics, srcno) \ @@ -582,12 +567,55 @@ SpaprIrq spapr_irq_dual = { .get_nodename = spapr_irq_get_nodename_dual, }; + +static void spapr_irq_check(SpaprMachineState *spapr, Error **errp) +{ + MachineState *machine = MACHINE(spapr); + + /* + * Sanity checks on non-P9 machines. On these, XIVE is not + * advertised, see spapr_dt_ov5_platform_support() + */ + if (!ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, + 0, spapr->max_compat_pvr)) { + /* + * If the 'dual' interrupt mode is selected, force XICS as CAS + * negotiation is useless. + */ + if (spapr->irq == &spapr_irq_dual) { + spapr->irq = &spapr_irq_xics; + return; + } + + /* + * Non-P9 machines using only XIVE is a bogus setup. We have two + * scenarios to take into account because of the compat mode: + * + * 1. POWER7/8 machines should fail to init later on when creating + * the XIVE interrupt presenters because a POWER9 exception + * model is required. + + * 2. POWER9 machines using the POWER8 compat mode won't fail and + * will let the OS boot with a partial XIVE setup : DT + * properties but no hcalls. + * + * To cover both and not confuse the OS, add an early failure in + * QEMU. + */ + if (spapr->irq == &spapr_irq_xive) { + error_setg(errp, "XIVE-only machines require a POWER9 CPU"); + return; + } + } +} + /* * sPAPR IRQ frontend routines for devices */ void spapr_irq_init(SpaprMachineState *spapr, Error **errp) { MachineState *machine = MACHINE(spapr); + Error *local_err = NULL; if (machine_kernel_irqchip_split(machine)) { error_setg(errp, "kernel_irqchip split mode not supported on pseries"); @@ -600,6 +628,12 @@ void spapr_irq_init(SpaprMachineState *spapr, Error **errp) return; } + spapr_irq_check(spapr, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + /* Initialize the MSI IRQ allocator. */ if (!SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) { spapr_irq_msi_init(spapr, spapr->irq->nr_msis); diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c index 20915d2b3c..97961b0128 100644 --- a/hw/ppc/spapr_pci.c +++ b/hw/ppc/spapr_pci.c @@ -719,26 +719,10 @@ param_error_exit: rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); } -static int pci_spapr_swizzle(int slot, int pin) -{ - return (slot + pin) % PCI_NUM_PINS; -} - -static int pci_spapr_map_irq(PCIDevice *pci_dev, int irq_num) -{ - /* - * Here we need to convert pci_dev + irq_num to some unique value - * which is less than number of IRQs on the specific bus (4). We - * use standard PCI swizzling, that is (slot number + pin number) - * % 4. - */ - return pci_spapr_swizzle(PCI_SLOT(pci_dev->devfn), irq_num); -} - static void pci_spapr_set_irq(void *opaque, int irq_num, int level) { /* - * Here we use the number returned by pci_spapr_map_irq to find a + * Here we use the number returned by pci_swizzle_map_irq_fn to find a * corresponding qemu_irq. */ SpaprPhbState *phb = opaque; @@ -1355,6 +1339,8 @@ static void spapr_populate_pci_child_dt(PCIDevice *dev, void *fdt, int offset, if (sphb->pcie_ecs && pci_is_express(dev)) { _FDT(fdt_setprop_cell(fdt, offset, "ibm,pci-config-space-type", 0x1)); } + + spapr_phb_nvgpu_populate_pcidev_dt(dev, fdt, offset, sphb); } /* create OF node for pci device and required OF DT properties */ @@ -1488,9 +1474,7 @@ static void spapr_pci_plug(HotplugHandler *plug_handler, } out: - if (local_err) { - error_propagate(errp, local_err); - } + error_propagate(errp, local_err); } static void spapr_pci_unplug(HotplugHandler *plug_handler, @@ -1589,6 +1573,8 @@ static void spapr_phb_unrealize(DeviceState *dev, Error **errp) int i; const unsigned windows_supported = spapr_phb_windows_supported(sphb); + spapr_phb_nvgpu_free(sphb); + if (sphb->msi) { g_hash_table_unref(sphb->msi); sphb->msi = NULL; @@ -1640,6 +1626,28 @@ static void spapr_phb_unrealize(DeviceState *dev, Error **errp) memory_region_del_subregion(get_system_memory(), &sphb->mem32window); } +static bool spapr_phb_allows_extended_config_space(PCIBus *bus) +{ + SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(BUS(bus)->parent); + + return sphb->pcie_ecs; +} + +static void spapr_phb_root_bus_class_init(ObjectClass *klass, void *data) +{ + PCIBusClass *pbc = PCI_BUS_CLASS(klass); + + pbc->allows_extended_config_space = spapr_phb_allows_extended_config_space; +} + +#define TYPE_SPAPR_PHB_ROOT_BUS "pci" + +static const TypeInfo spapr_phb_root_bus_info = { + .name = TYPE_SPAPR_PHB_ROOT_BUS, + .parent = TYPE_PCI_BUS, + .class_init = spapr_phb_root_bus_class_init, +}; + static void spapr_phb_realize(DeviceState *dev, Error **errp) { /* We don't use SPAPR_MACHINE() in order to exit gracefully if the user @@ -1742,9 +1750,10 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp) &sphb->iowindow); bus = pci_register_root_bus(dev, NULL, - pci_spapr_set_irq, pci_spapr_map_irq, sphb, + pci_spapr_set_irq, pci_swizzle_map_irq_fn, sphb, &sphb->memspace, &sphb->iospace, - PCI_DEVFN(0, 0), PCI_NUM_PINS, TYPE_PCI_BUS); + PCI_DEVFN(0, 0), PCI_NUM_PINS, + TYPE_SPAPR_PHB_ROOT_BUS); phb->bus = bus; qbus_set_hotplug_handler(BUS(phb->bus), OBJECT(sphb), NULL); @@ -1877,8 +1886,14 @@ void spapr_phb_dma_reset(SpaprPhbState *sphb) static void spapr_phb_reset(DeviceState *qdev) { SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(qdev); + Error *errp = NULL; spapr_phb_dma_reset(sphb); + spapr_phb_nvgpu_free(sphb); + spapr_phb_nvgpu_setup(sphb, &errp); + if (errp) { + error_report_err(errp); + } /* Reset the IOMMU state */ object_child_foreach(OBJECT(qdev), spapr_phb_children_reset, NULL); @@ -1911,6 +1926,8 @@ static Property spapr_phb_properties[] = { pre_2_8_migration, false), DEFINE_PROP_BOOL("pcie-extended-configuration-space", SpaprPhbState, pcie_ecs, true), + DEFINE_PROP_UINT64("gpa", SpaprPhbState, nv2_gpa_win_addr, 0), + DEFINE_PROP_UINT64("atsd", SpaprPhbState, nv2_atsd_win_addr, 0), DEFINE_PROP_END_OF_LIST(), }; @@ -2143,7 +2160,6 @@ int spapr_populate_pci_dt(SpaprPhbState *phb, uint32_t intc_phandle, void *fdt, uint32_t nr_msis, int *node_offset) { int bus_off, i, j, ret; - gchar *nodename; uint32_t bus_range[] = { cpu_to_be32(0), cpu_to_be32(0xff) }; struct { uint32_t hi; @@ -2191,11 +2207,10 @@ int spapr_populate_pci_dt(SpaprPhbState *phb, uint32_t intc_phandle, void *fdt, PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus; SpaprFdt s_fdt; SpaprDrc *drc; + Error *errp = NULL; /* Start populating the FDT */ - nodename = g_strdup_printf("pci@%" PRIx64, phb->buid); - _FDT(bus_off = fdt_add_subnode(fdt, 0, nodename)); - g_free(nodename); + _FDT(bus_off = fdt_add_subnode(fdt, 0, phb->dtbusname)); if (node_offset) { *node_offset = bus_off; } @@ -2228,14 +2243,14 @@ int spapr_populate_pci_dt(SpaprPhbState *phb, uint32_t intc_phandle, void *fdt, } /* Build the interrupt-map, this must matches what is done - * in pci_spapr_map_irq + * in pci_swizzle_map_irq_fn */ _FDT(fdt_setprop(fdt, bus_off, "interrupt-map-mask", &interrupt_map_mask, sizeof(interrupt_map_mask))); for (i = 0; i < PCI_SLOT_MAX; i++) { for (j = 0; j < PCI_NUM_PINS; j++) { uint32_t *irqmap = interrupt_map[i*PCI_NUM_PINS + j]; - int lsi_num = pci_spapr_swizzle(i, j); + int lsi_num = pci_swizzle(i, j); irqmap[0] = cpu_to_be32(b_ddddd(i)|b_fff(0)); irqmap[1] = 0; @@ -2283,6 +2298,12 @@ int spapr_populate_pci_dt(SpaprPhbState *phb, uint32_t intc_phandle, void *fdt, return ret; } + spapr_phb_nvgpu_populate_dt(phb, fdt, bus_off, &errp); + if (errp) { + error_report_err(errp); + } + spapr_phb_nvgpu_ram_populate_dt(phb, fdt); + return 0; } @@ -2327,6 +2348,7 @@ void spapr_pci_rtas_init(void) static void spapr_pci_register_types(void) { type_register_static(&spapr_phb_info); + type_register_static(&spapr_phb_root_bus_info); } type_init(spapr_pci_register_types) diff --git a/hw/ppc/spapr_pci_nvlink2.c b/hw/ppc/spapr_pci_nvlink2.c new file mode 100644 index 0000000000..eda8c752aa --- /dev/null +++ b/hw/ppc/spapr_pci_nvlink2.c @@ -0,0 +1,450 @@ +/* + * QEMU sPAPR PCI for NVLink2 pass through + * + * Copyright (c) 2019 Alexey Kardashevskiy, IBM Corporation. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "hw/pci/pci.h" +#include "hw/pci-host/spapr.h" +#include "qemu/error-report.h" +#include "hw/ppc/fdt.h" +#include "hw/pci/pci_bridge.h" + +#define PHANDLE_PCIDEV(phb, pdev) (0x12000000 | \ + (((phb)->index) << 16) | ((pdev)->devfn)) +#define PHANDLE_GPURAM(phb, n) (0x110000FF | ((n) << 8) | \ + (((phb)->index) << 16)) +#define PHANDLE_NVLINK(phb, gn, nn) (0x00130000 | (((phb)->index) << 8) | \ + ((gn) << 4) | (nn)) + +#define SPAPR_GPU_NUMA_ID (cpu_to_be32(1)) + +struct spapr_phb_pci_nvgpu_config { + uint64_t nv2_ram_current; + uint64_t nv2_atsd_current; + int num; /* number of non empty (i.e. tgt!=0) entries in slots[] */ + struct spapr_phb_pci_nvgpu_slot { + uint64_t tgt; + uint64_t gpa; + unsigned numa_id; + PCIDevice *gpdev; + int linknum; + struct { + uint64_t atsd_gpa; + PCIDevice *npdev; + uint32_t link_speed; + } links[NVGPU_MAX_LINKS]; + } slots[NVGPU_MAX_NUM]; + Error *errp; +}; + +static struct spapr_phb_pci_nvgpu_slot * +spapr_nvgpu_get_slot(struct spapr_phb_pci_nvgpu_config *nvgpus, uint64_t tgt) +{ + int i; + + /* Search for partially collected "slot" */ + for (i = 0; i < nvgpus->num; ++i) { + if (nvgpus->slots[i].tgt == tgt) { + return &nvgpus->slots[i]; + } + } + + if (nvgpus->num == ARRAY_SIZE(nvgpus->slots)) { + return NULL; + } + + i = nvgpus->num; + nvgpus->slots[i].tgt = tgt; + ++nvgpus->num; + + return &nvgpus->slots[i]; +} + +static void spapr_pci_collect_nvgpu(struct spapr_phb_pci_nvgpu_config *nvgpus, + PCIDevice *pdev, uint64_t tgt, + MemoryRegion *mr, Error **errp) +{ + MachineState *machine = MACHINE(qdev_get_machine()); + SpaprMachineState *spapr = SPAPR_MACHINE(machine); + struct spapr_phb_pci_nvgpu_slot *nvslot = spapr_nvgpu_get_slot(nvgpus, tgt); + + if (!nvslot) { + error_setg(errp, "Found too many GPUs per vPHB"); + return; + } + g_assert(!nvslot->gpdev); + nvslot->gpdev = pdev; + + nvslot->gpa = nvgpus->nv2_ram_current; + nvgpus->nv2_ram_current += memory_region_size(mr); + nvslot->numa_id = spapr->gpu_numa_id; + ++spapr->gpu_numa_id; +} + +static void spapr_pci_collect_nvnpu(struct spapr_phb_pci_nvgpu_config *nvgpus, + PCIDevice *pdev, uint64_t tgt, + MemoryRegion *mr, Error **errp) +{ + struct spapr_phb_pci_nvgpu_slot *nvslot = spapr_nvgpu_get_slot(nvgpus, tgt); + int j; + + if (!nvslot) { + error_setg(errp, "Found too many NVLink bridges per vPHB"); + return; + } + + j = nvslot->linknum; + if (j == ARRAY_SIZE(nvslot->links)) { + error_setg(errp, "Found too many NVLink bridges per GPU"); + return; + } + ++nvslot->linknum; + + g_assert(!nvslot->links[j].npdev); + nvslot->links[j].npdev = pdev; + nvslot->links[j].atsd_gpa = nvgpus->nv2_atsd_current; + nvgpus->nv2_atsd_current += memory_region_size(mr); + nvslot->links[j].link_speed = + object_property_get_uint(OBJECT(pdev), "nvlink2-link-speed", NULL); +} + +static void spapr_phb_pci_collect_nvgpu(PCIBus *bus, PCIDevice *pdev, + void *opaque) +{ + PCIBus *sec_bus; + Object *po = OBJECT(pdev); + uint64_t tgt = object_property_get_uint(po, "nvlink2-tgt", NULL); + + if (tgt) { + Error *local_err = NULL; + struct spapr_phb_pci_nvgpu_config *nvgpus = opaque; + Object *mr_gpu = object_property_get_link(po, "nvlink2-mr[0]", NULL); + Object *mr_npu = object_property_get_link(po, "nvlink2-atsd-mr[0]", + NULL); + + g_assert(mr_gpu || mr_npu); + if (mr_gpu) { + spapr_pci_collect_nvgpu(nvgpus, pdev, tgt, MEMORY_REGION(mr_gpu), + &local_err); + } else { + spapr_pci_collect_nvnpu(nvgpus, pdev, tgt, MEMORY_REGION(mr_npu), + &local_err); + } + error_propagate(&nvgpus->errp, local_err); + } + if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) != + PCI_HEADER_TYPE_BRIDGE)) { + return; + } + + sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev)); + if (!sec_bus) { + return; + } + + pci_for_each_device(sec_bus, pci_bus_num(sec_bus), + spapr_phb_pci_collect_nvgpu, opaque); +} + +void spapr_phb_nvgpu_setup(SpaprPhbState *sphb, Error **errp) +{ + int i, j, valid_gpu_num; + PCIBus *bus; + + /* Search for GPUs and NPUs */ + if (!sphb->nv2_gpa_win_addr || !sphb->nv2_atsd_win_addr) { + return; + } + + sphb->nvgpus = g_new0(struct spapr_phb_pci_nvgpu_config, 1); + sphb->nvgpus->nv2_ram_current = sphb->nv2_gpa_win_addr; + sphb->nvgpus->nv2_atsd_current = sphb->nv2_atsd_win_addr; + + bus = PCI_HOST_BRIDGE(sphb)->bus; + pci_for_each_device(bus, pci_bus_num(bus), + spapr_phb_pci_collect_nvgpu, sphb->nvgpus); + + if (sphb->nvgpus->errp) { + error_propagate(errp, sphb->nvgpus->errp); + sphb->nvgpus->errp = NULL; + goto cleanup_exit; + } + + /* Add found GPU RAM and ATSD MRs if found */ + for (i = 0, valid_gpu_num = 0; i < sphb->nvgpus->num; ++i) { + Object *nvmrobj; + struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i]; + + if (!nvslot->gpdev) { + continue; + } + nvmrobj = object_property_get_link(OBJECT(nvslot->gpdev), + "nvlink2-mr[0]", NULL); + /* ATSD is pointless without GPU RAM MR so skip those */ + if (!nvmrobj) { + continue; + } + + ++valid_gpu_num; + memory_region_add_subregion(get_system_memory(), nvslot->gpa, + MEMORY_REGION(nvmrobj)); + + for (j = 0; j < nvslot->linknum; ++j) { + Object *atsdmrobj; + + atsdmrobj = object_property_get_link(OBJECT(nvslot->links[j].npdev), + "nvlink2-atsd-mr[0]", NULL); + if (!atsdmrobj) { + continue; + } + memory_region_add_subregion(get_system_memory(), + nvslot->links[j].atsd_gpa, + MEMORY_REGION(atsdmrobj)); + } + } + + if (valid_gpu_num) { + return; + } + /* We did not find any interesting GPU */ +cleanup_exit: + g_free(sphb->nvgpus); + sphb->nvgpus = NULL; +} + +void spapr_phb_nvgpu_free(SpaprPhbState *sphb) +{ + int i, j; + + if (!sphb->nvgpus) { + return; + } + + for (i = 0; i < sphb->nvgpus->num; ++i) { + struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i]; + Object *nv_mrobj = object_property_get_link(OBJECT(nvslot->gpdev), + "nvlink2-mr[0]", NULL); + + if (nv_mrobj) { + memory_region_del_subregion(get_system_memory(), + MEMORY_REGION(nv_mrobj)); + } + for (j = 0; j < nvslot->linknum; ++j) { + PCIDevice *npdev = nvslot->links[j].npdev; + Object *atsd_mrobj; + atsd_mrobj = object_property_get_link(OBJECT(npdev), + "nvlink2-atsd-mr[0]", NULL); + if (atsd_mrobj) { + memory_region_del_subregion(get_system_memory(), + MEMORY_REGION(atsd_mrobj)); + } + } + } + g_free(sphb->nvgpus); + sphb->nvgpus = NULL; +} + +void spapr_phb_nvgpu_populate_dt(SpaprPhbState *sphb, void *fdt, int bus_off, + Error **errp) +{ + int i, j, atsdnum = 0; + uint64_t atsd[8]; /* The existing limitation of known guests */ + + if (!sphb->nvgpus) { + return; + } + + for (i = 0; (i < sphb->nvgpus->num) && (atsdnum < ARRAY_SIZE(atsd)); ++i) { + struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i]; + + if (!nvslot->gpdev) { + continue; + } + for (j = 0; j < nvslot->linknum; ++j) { + if (!nvslot->links[j].atsd_gpa) { + continue; + } + + if (atsdnum == ARRAY_SIZE(atsd)) { + error_report("Only %"PRIuPTR" ATSD registers supported", + ARRAY_SIZE(atsd)); + break; + } + atsd[atsdnum] = cpu_to_be64(nvslot->links[j].atsd_gpa); + ++atsdnum; + } + } + + if (!atsdnum) { + error_setg(errp, "No ATSD registers found"); + return; + } + + if (!spapr_phb_eeh_available(sphb)) { + /* + * ibm,mmio-atsd contains ATSD registers; these belong to an NPU PHB + * which we do not emulate as a separate device. Instead we put + * ibm,mmio-atsd to the vPHB with GPU and make sure that we do not + * put GPUs from different IOMMU groups to the same vPHB to ensure + * that the guest will use ATSDs from the corresponding NPU. + */ + error_setg(errp, "ATSD requires separate vPHB per GPU IOMMU group"); + return; + } + + _FDT((fdt_setprop(fdt, bus_off, "ibm,mmio-atsd", atsd, + atsdnum * sizeof(atsd[0])))); +} + +void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt) +{ + int i, j, linkidx, npuoff; + char *npuname; + + if (!sphb->nvgpus) { + return; + } + + npuname = g_strdup_printf("npuphb%d", sphb->index); + npuoff = fdt_add_subnode(fdt, 0, npuname); + _FDT(npuoff); + _FDT(fdt_setprop_cell(fdt, npuoff, "#address-cells", 1)); + _FDT(fdt_setprop_cell(fdt, npuoff, "#size-cells", 0)); + /* Advertise NPU as POWER9 so the guest can enable NPU2 contexts */ + _FDT((fdt_setprop_string(fdt, npuoff, "compatible", "ibm,power9-npu"))); + g_free(npuname); + + for (i = 0, linkidx = 0; i < sphb->nvgpus->num; ++i) { + for (j = 0; j < sphb->nvgpus->slots[i].linknum; ++j) { + char *linkname = g_strdup_printf("link@%d", linkidx); + int off = fdt_add_subnode(fdt, npuoff, linkname); + + _FDT(off); + /* _FDT((fdt_setprop_cell(fdt, off, "reg", linkidx))); */ + _FDT((fdt_setprop_string(fdt, off, "compatible", + "ibm,npu-link"))); + _FDT((fdt_setprop_cell(fdt, off, "phandle", + PHANDLE_NVLINK(sphb, i, j)))); + _FDT((fdt_setprop_cell(fdt, off, "ibm,npu-link-index", linkidx))); + g_free(linkname); + ++linkidx; + } + } + + /* Add memory nodes for GPU RAM and mark them unusable */ + for (i = 0; i < sphb->nvgpus->num; ++i) { + struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i]; + Object *nv_mrobj = object_property_get_link(OBJECT(nvslot->gpdev), + "nvlink2-mr[0]", NULL); + uint32_t associativity[] = { + cpu_to_be32(0x4), + SPAPR_GPU_NUMA_ID, + SPAPR_GPU_NUMA_ID, + SPAPR_GPU_NUMA_ID, + cpu_to_be32(nvslot->numa_id) + }; + uint64_t size = object_property_get_uint(nv_mrobj, "size", NULL); + uint64_t mem_reg[2] = { cpu_to_be64(nvslot->gpa), cpu_to_be64(size) }; + char *mem_name = g_strdup_printf("memory@%"PRIx64, nvslot->gpa); + int off = fdt_add_subnode(fdt, 0, mem_name); + + _FDT(off); + _FDT((fdt_setprop_string(fdt, off, "device_type", "memory"))); + _FDT((fdt_setprop(fdt, off, "reg", mem_reg, sizeof(mem_reg)))); + _FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity, + sizeof(associativity)))); + + _FDT((fdt_setprop_string(fdt, off, "compatible", + "ibm,coherent-device-memory"))); + + mem_reg[1] = cpu_to_be64(0); + _FDT((fdt_setprop(fdt, off, "linux,usable-memory", mem_reg, + sizeof(mem_reg)))); + _FDT((fdt_setprop_cell(fdt, off, "phandle", + PHANDLE_GPURAM(sphb, i)))); + g_free(mem_name); + } + +} + +void spapr_phb_nvgpu_populate_pcidev_dt(PCIDevice *dev, void *fdt, int offset, + SpaprPhbState *sphb) +{ + int i, j; + + if (!sphb->nvgpus) { + return; + } + + for (i = 0; i < sphb->nvgpus->num; ++i) { + struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i]; + + /* Skip "slot" without attached GPU */ + if (!nvslot->gpdev) { + continue; + } + if (dev == nvslot->gpdev) { + uint32_t npus[nvslot->linknum]; + + for (j = 0; j < nvslot->linknum; ++j) { + PCIDevice *npdev = nvslot->links[j].npdev; + + npus[j] = cpu_to_be32(PHANDLE_PCIDEV(sphb, npdev)); + } + _FDT(fdt_setprop(fdt, offset, "ibm,npu", npus, + j * sizeof(npus[0]))); + _FDT((fdt_setprop_cell(fdt, offset, "phandle", + PHANDLE_PCIDEV(sphb, dev)))); + continue; + } + + for (j = 0; j < nvslot->linknum; ++j) { + if (dev != nvslot->links[j].npdev) { + continue; + } + + _FDT((fdt_setprop_cell(fdt, offset, "phandle", + PHANDLE_PCIDEV(sphb, dev)))); + _FDT(fdt_setprop_cell(fdt, offset, "ibm,gpu", + PHANDLE_PCIDEV(sphb, nvslot->gpdev))); + _FDT((fdt_setprop_cell(fdt, offset, "ibm,nvlink", + PHANDLE_NVLINK(sphb, i, j)))); + /* + * If we ever want to emulate GPU RAM at the same location as on + * the host - here is the encoding GPA->TGT: + * + * gta = ((sphb->nv2_gpa >> 42) & 0x1) << 42; + * gta |= ((sphb->nv2_gpa >> 45) & 0x3) << 43; + * gta |= ((sphb->nv2_gpa >> 49) & 0x3) << 45; + * gta |= sphb->nv2_gpa & ((1UL << 43) - 1); + */ + _FDT(fdt_setprop_cell(fdt, offset, "memory-region", + PHANDLE_GPURAM(sphb, i))); + _FDT(fdt_setprop_u64(fdt, offset, "ibm,device-tgt-addr", + nvslot->tgt)); + _FDT(fdt_setprop_cell(fdt, offset, "ibm,nvlink-speed", + nvslot->links[j].link_speed)); + } + } +} diff --git a/hw/ppc/spapr_rtas.c b/hw/ppc/spapr_rtas.c index 24c45b12d4..ee24212765 100644 --- a/hw/ppc/spapr_rtas.c +++ b/hw/ppc/spapr_rtas.c @@ -404,7 +404,7 @@ void spapr_rtas_register(int token, const char *name, spapr_rtas_fn fn) token -= RTAS_TOKEN_BASE; - assert(!rtas_table[token].name); + assert(!name || !rtas_table[token].name); rtas_table[token].name = name; rtas_table[token].fn = fn; diff --git a/hw/ppc/trace-events b/hw/ppc/trace-events index 0af155ed32..f76448f532 100644 --- a/hw/ppc/trace-events +++ b/hw/ppc/trace-events @@ -1,6 +1,6 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/ppc/spapr_pci.c +# spapr_pci.c spapr_pci_msi(const char *msg, uint32_t ca) "%s (cfg=0x%x)" spapr_pci_msi_setup(const char *name, unsigned vector, uint64_t addr) "dev\"%s\" vector %u, addr=0x%"PRIx64 spapr_pci_rtas_ibm_change_msi(unsigned cfg, unsigned func, unsigned req, unsigned first) "cfgaddr 0x%x func %u, requested %u, first irq %u" @@ -9,16 +9,15 @@ spapr_pci_msi_write(uint64_t addr, uint64_t data, uint32_t dt_irq) "@0x%"PRIx64" spapr_pci_lsi_set(const char *busname, int pin, uint32_t irq) "%s PIN%d IRQ %u" spapr_pci_msi_retry(unsigned config_addr, unsigned req_num, unsigned max_irqs) "Guest device at 0x%x asked %u, have only %u" -# hw/ppc/spapr.c +# spapr.c spapr_cas_failed(unsigned long n) "DT diff buffer is too small: %ld bytes" spapr_cas_continue(unsigned long n) "Copy changes to the guest: %ld bytes" -spapr_irq_alloc(int irq) "irq %d" -spapr_irq_alloc_block(int first, int num, bool lsi, int align) "first irq %d, %d irqs, lsi=%d, alignnum %d" + +# spapr_irq.c spapr_irq_free(int src, int irq, int num) "Source#%d, first irq %d, %d irqs" spapr_irq_free_warn(int src, int irq) "Source#%d, irq %d is already free" -# hw/ppc/spapr_hcall.c -spapr_cas_pvr_try(uint32_t pvr) "0x%x" +# spapr_hcall.c spapr_cas_pvr(uint32_t cur_pvr, bool explicit_match, uint32_t new_pvr) "current=0x%x, explicit_match=%u, new=0x%x" spapr_h_resize_hpt_prepare(uint64_t flags, uint64_t shift) "flags=0x%"PRIx64", shift=%"PRIu64 spapr_h_resize_hpt_commit(uint64_t flags, uint64_t shift) "flags=0x%"PRIx64", shift=%"PRIu64 @@ -26,7 +25,7 @@ spapr_update_dt(unsigned cb) "New blob %u bytes" spapr_update_dt_failed_size(unsigned cbold, unsigned cbnew, unsigned magic) "Old blob %u bytes, new blob %u bytes, magic 0x%x" spapr_update_dt_failed_check(unsigned cbold, unsigned cbnew, unsigned magic) "Old blob %u bytes, new blob %u bytes, magic 0x%x" -# hw/ppc/spapr_iommu.c +# spapr_iommu.c spapr_iommu_put(uint64_t liobn, uint64_t ioba, uint64_t tce, uint64_t ret) "liobn=0x%"PRIx64" ioba=0x%"PRIx64" tce=0x%"PRIx64" ret=%"PRId64 spapr_iommu_get(uint64_t liobn, uint64_t ioba, uint64_t ret, uint64_t tce) "liobn=0x%"PRIx64" ioba=0x%"PRIx64" ret=%"PRId64" tce=0x%"PRIx64 spapr_iommu_indirect(uint64_t liobn, uint64_t ioba, uint64_t tce, uint64_t iobaN, uint64_t tceN, uint64_t ret) "liobn=0x%"PRIx64" ioba=0x%"PRIx64" tcelist=0x%"PRIx64" iobaN=0x%"PRIx64" tceN=0x%"PRIx64" ret=%"PRId64 @@ -39,70 +38,67 @@ spapr_iommu_xlate(uint64_t liobn, uint64_t ioba, uint64_t tce, unsigned perm, un spapr_iommu_new_table(uint64_t liobn, void *table, int fd) "liobn=0x%"PRIx64" table=%p fd=%d" spapr_iommu_pre_save(uint64_t liobn, uint32_t nb, uint64_t offs, uint32_t ps) "liobn=%"PRIx64" %"PRIx32" bus_offset=0x%"PRIx64" ps=%"PRIu32 spapr_iommu_post_load(uint64_t liobn, uint32_t pre_nb, uint32_t post_nb, uint64_t offs, uint32_t ps) "liobn=%"PRIx64" %"PRIx32" => 0x%"PRIx32" bus_offset=0x%"PRIx64" ps=%"PRIu32 + +# spapr_rtas_ddw.c spapr_iommu_ddw_query(uint64_t buid, uint32_t cfgaddr, unsigned wa, uint64_t win_size, uint32_t pgmask) "buid=0x%"PRIx64" addr=0x%"PRIx32", %u windows available, max window size=0x%"PRIx64", mask=0x%"PRIx32 spapr_iommu_ddw_create(uint64_t buid, uint32_t cfgaddr, uint64_t pg_size, uint64_t req_size, uint64_t start, uint32_t liobn) "buid=0x%"PRIx64" addr=0x%"PRIx32", page size=0x%"PRIx64", requested=0x%"PRIx64", start addr=0x%"PRIx64", liobn=0x%"PRIx32 spapr_iommu_ddw_remove(uint32_t liobn) "liobn=0x%"PRIx32 spapr_iommu_ddw_reset(uint64_t buid, uint32_t cfgaddr) "buid=0x%"PRIx64" addr=0x%"PRIx32 -# hw/ppc/spapr_drc.c +# spapr_drc.c spapr_drc_set_isolation_state(uint32_t index, int state) "drc: 0x%"PRIx32", state: 0x%"PRIx32 spapr_drc_set_isolation_state_finalizing(uint32_t index) "drc: 0x%"PRIx32 -spapr_drc_set_isolation_state_deferring(uint32_t index) "drc: 0x%"PRIx32 spapr_drc_set_dr_indicator(uint32_t index, int state) "drc: 0x%"PRIx32", state: 0x%x" spapr_drc_set_allocation_state(uint32_t index, int state) "drc: 0x%"PRIx32", state: 0x%x" spapr_drc_set_allocation_state_finalizing(uint32_t index) "drc: 0x%"PRIx32 spapr_drc_set_configured(uint32_t index) "drc: 0x%"PRIx32 -spapr_drc_set_configured_skipping(uint32_t index) "drc: 0x%"PRIx32", isolated device" spapr_drc_attach(uint32_t index) "drc: 0x%"PRIx32 spapr_drc_detach(uint32_t index) "drc: 0x%"PRIx32 spapr_drc_awaiting_quiesce(uint32_t index) "drc: 0x%"PRIx32 -spapr_drc_awaiting_allocation(uint32_t index) "drc: 0x%"PRIx32 spapr_drc_reset(uint32_t index) "drc: 0x%"PRIx32 spapr_drc_realize(uint32_t index) "drc: 0x%"PRIx32 spapr_drc_realize_child(uint32_t index, char *childname) "drc: 0x%"PRIx32", child name: %s" spapr_drc_realize_complete(uint32_t index) "drc: 0x%"PRIx32 spapr_drc_unrealize(uint32_t index) "drc: 0x%"PRIx32 -# hw/ppc/spapr_ovec.c +# spapr_ovec.c spapr_ovec_parse_vector(int vector, int byte, uint16_t vec_len, uint8_t entry) "read guest vector %2d, byte %3d / %3d: 0x%.2x" spapr_ovec_populate_dt(int byte, uint16_t vec_len, uint8_t entry) "encoding guest vector byte %3d / %3d: 0x%.2x" -# hw/ppc/spapr_rtas.c +# spapr_drc.c spapr_rtas_get_sensor_state_not_supported(uint32_t index, uint32_t type) "sensor index: 0x%"PRIx32", type: %"PRIu32 spapr_rtas_get_sensor_state_invalid(uint32_t index) "sensor index: 0x%"PRIx32 spapr_rtas_ibm_configure_connector_invalid(uint32_t index) "DRC index: 0x%"PRIx32 -spapr_rtas_ibm_configure_connector_missing_fdt(uint32_t index) "DRC index: 0x%"PRIx32 -# hw/ppc/spapr_vio.c +# spapr_vio.c spapr_vio_h_reg_crq(uint64_t reg, uint64_t queue_addr, uint64_t queue_len) "CRQ for dev 0x%" PRIx64 " registered at 0x%" PRIx64 "/0x%" PRIx64 spapr_vio_free_crq(uint32_t reg) "CRQ for dev 0x%" PRIx32 " freed" -# hw/ppc/ppc.c +# ppc.c ppc_tb_adjust(uint64_t offs1, uint64_t offs2, int64_t diff, int64_t seconds) "adjusted from 0x%"PRIx64" to 0x%"PRIx64", diff %"PRId64" (%"PRId64"s)" -# hw/ppc/prep.c +# prep.c prep_io_800_writeb(uint32_t addr, uint32_t val) "0x%08" PRIx32 " => 0x%02" PRIx32 prep_io_800_readb(uint32_t addr, uint32_t retval) "0x%08" PRIx32 " <= 0x%02" PRIx32 -# hw/ppc/prep_systemio.c +# prep_systemio.c prep_systemio_read(uint32_t addr, uint32_t val) "read addr=0x%x val=0x%x" prep_systemio_write(uint32_t addr, uint32_t val) "write addr=0x%x val=0x%x" -# hw/ppc/rs6000_mc.c +# rs6000_mc.c rs6000mc_id_read(uint32_t addr, uint32_t val) "read addr=0x%x val=0x%x" rs6000mc_presence_read(uint32_t addr, uint32_t val) "read addr=0x%x val=0x%x" rs6000mc_size_read(uint32_t addr, uint32_t val) "read addr=0x%x val=0x%x" rs6000mc_size_write(uint32_t addr, uint32_t val) "write addr=0x%x val=0x%x" rs6000mc_parity_read(uint32_t addr, uint32_t val) "read addr=0x%x val=0x%x" -# hw/ppc/ppc4xx_pci.c +# ppc4xx_pci.c ppc4xx_pci_map_irq(int32_t devfn, int irq_num, int slot) "devfn 0x%x irq %d -> %d" ppc4xx_pci_set_irq(int irq_num) "PCI irq %d" -# hw/ppc/ppc440_pcix.c +# ppc440_pcix.c ppc440_pcix_map_irq(int32_t devfn, int irq_num, int slot) "devfn 0x%x irq %d -> %d" ppc440_pcix_set_irq(int irq_num) "PCI irq %d" ppc440_pcix_update_pim(int idx, uint64_t size, uint64_t la) "Added window %d of size=0x%" PRIx64 " to CPU=0x%" PRIx64 ppc440_pcix_update_pom(int idx, uint32_t size, uint64_t la, uint64_t pcia) "Added window %d of size=0x%x from CPU=0x%" PRIx64 " to PCI=0x%" PRIx64 ppc440_pcix_reg_read(uint64_t addr, uint32_t val) "addr 0x%" PRIx64 " = 0x%" PRIx32 -ppc440_pcix_reg_write(uint64_t addr, uint64_t val) "addr 0x%" PRIx64 " = 0x%" PRIx64 diff --git a/hw/rdma/Kconfig b/hw/rdma/Kconfig new file mode 100644 index 0000000000..8e2211288f --- /dev/null +++ b/hw/rdma/Kconfig @@ -0,0 +1,3 @@ +config VMW_PVRDMA + default y if PCI_DEVICES + depends on PVRDMA && PCI && MSI_NONBROKEN diff --git a/hw/rdma/Makefile.objs b/hw/rdma/Makefile.objs index bd36cbf51c..819bb12a35 100644 --- a/hw/rdma/Makefile.objs +++ b/hw/rdma/Makefile.objs @@ -1,5 +1,3 @@ -ifeq ($(CONFIG_PVRDMA),y) -obj-$(CONFIG_PCI) += rdma_utils.o rdma_backend.o rdma_rm.o -obj-$(CONFIG_PCI) += vmw/pvrdma_dev_ring.o vmw/pvrdma_cmd.o \ +obj-$(CONFIG_VMW_PVRDMA) += rdma_utils.o rdma_backend.o rdma_rm.o rdma.o +obj-$(CONFIG_VMW_PVRDMA) += vmw/pvrdma_dev_ring.o vmw/pvrdma_cmd.o \ vmw/pvrdma_qp_ops.o vmw/pvrdma_main.o -endif diff --git a/hw/rdma/rdma.c b/hw/rdma/rdma.c new file mode 100644 index 0000000000..7bec0d0d2c --- /dev/null +++ b/hw/rdma/rdma.c @@ -0,0 +1,30 @@ +/* + * RDMA device interface + * + * Copyright (C) 2018 Oracle + * Copyright (C) 2018 Red Hat Inc + * + * Authors: + * Yuval Shaia <yuval.shaia@oracle.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "hw/rdma/rdma.h" +#include "qemu/module.h" + +static const TypeInfo rdma_hmp_info = { + .name = INTERFACE_RDMA_PROVIDER, + .parent = TYPE_INTERFACE, + .class_size = sizeof(RdmaProviderClass), +}; + +static void rdma_register_types(void) +{ + type_register_static(&rdma_hmp_info); +} + +type_init(rdma_register_types) diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c index fd571f21e5..d1660b6474 100644 --- a/hw/rdma/rdma_backend.c +++ b/hw/rdma/rdma_backend.c @@ -14,7 +14,6 @@ */ #include "qemu/osdep.h" -#include "qemu/error-report.h" #include "sysemu/sysemu.h" #include "qapi/error.h" #include "qapi/qmp/qlist.h" @@ -39,8 +38,8 @@ typedef struct BackendCtx { void *up_ctx; - bool is_tx_req; struct ibv_sge sge; /* Used to save MAD recv buffer */ + RdmaBackendQP *backend_qp; /* To maintain recv buffers */ } BackendCtx; struct backend_umad { @@ -52,13 +51,13 @@ static void (*comp_handler)(void *ctx, struct ibv_wc *wc); static void dummy_comp_handler(void *ctx, struct ibv_wc *wc) { - pr_err("No completion handler is registered\n"); + rdma_error_report("No completion handler is registered"); } static inline void complete_work(enum ibv_wc_status status, uint32_t vendor_err, void *ctx) { - struct ibv_wc wc = {0}; + struct ibv_wc wc = {}; wc.status = status; wc.vendor_err = vendor_err; @@ -66,40 +65,74 @@ static inline void complete_work(enum ibv_wc_status status, uint32_t vendor_err, comp_handler(ctx, &wc); } -static void poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq) +static void free_cqe_ctx(gpointer data, gpointer user_data) { - int i, ne; + BackendCtx *bctx; + RdmaDeviceResources *rdma_dev_res = user_data; + unsigned long cqe_ctx_id = GPOINTER_TO_INT(data); + + bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, cqe_ctx_id); + if (bctx) { + rdma_rm_dealloc_cqe_ctx(rdma_dev_res, cqe_ctx_id); + atomic_dec(&rdma_dev_res->stats.missing_cqe); + } + g_free(bctx); +} + +static void clean_recv_mads(RdmaBackendDev *backend_dev) +{ + unsigned long cqe_ctx_id; + + do { + cqe_ctx_id = rdma_protected_qlist_pop_int64(&backend_dev-> + recv_mads_list); + if (cqe_ctx_id != -ENOENT) { + atomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); + free_cqe_ctx(GINT_TO_POINTER(cqe_ctx_id), + backend_dev->rdma_dev_res); + } + } while (cqe_ctx_id != -ENOENT); +} + +static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq) +{ + int i, ne, total_ne = 0; BackendCtx *bctx; struct ibv_wc wc[2]; - pr_dbg("Entering poll_cq loop on cq %p\n", ibcq); + qemu_mutex_lock(&rdma_dev_res->lock); do { ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc); - pr_dbg("Got %d completion(s) from cq %p\n", ne, ibcq); + trace_rdma_poll_cq(ne, ibcq); for (i = 0; i < ne; i++) { - pr_dbg("wr_id=0x%" PRIx64 "\n", wc[i].wr_id); - pr_dbg("status=%d\n", wc[i].status); - bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id); if (unlikely(!bctx)) { - pr_dbg("Error: Failed to find ctx for req %" PRId64 "\n", - wc[i].wr_id); + rdma_error_report("No matching ctx for req %"PRId64, + wc[i].wr_id); continue; } - pr_dbg("Processing %s CQE\n", bctx->is_tx_req ? "send" : "recv"); comp_handler(bctx->up_ctx, &wc[i]); + rdma_protected_gslist_remove_int32(&bctx->backend_qp->cqe_ctx_list, + wc[i].wr_id); rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id); g_free(bctx); } + total_ne += ne; } while (ne > 0); + atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne); + qemu_mutex_unlock(&rdma_dev_res->lock); if (ne < 0) { - pr_dbg("Got error %d from ibv_poll_cq\n", ne); + rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno); } + + rdma_dev_res->stats.completions += total_ne; + + return total_ne; } static void *comp_handler_thread(void *arg) @@ -115,12 +148,10 @@ static void *comp_handler_thread(void *arg) flags = fcntl(backend_dev->channel->fd, F_GETFL); rc = fcntl(backend_dev->channel->fd, F_SETFL, flags | O_NONBLOCK); if (rc < 0) { - pr_dbg("Fail to change to non-blocking mode\n"); + rdma_error_report("Failed to change backend channel FD to non-blocking"); return NULL; } - pr_dbg("Starting\n"); - pfds[0].fd = backend_dev->channel->fd; pfds[0].events = G_IO_IN | G_IO_HUP | G_IO_ERR; @@ -129,32 +160,32 @@ static void *comp_handler_thread(void *arg) while (backend_dev->comp_thread.run) { do { rc = qemu_poll_ns(pfds, 1, THR_POLL_TO * (int64_t)SCALE_MS); + if (!rc) { + backend_dev->rdma_dev_res->stats.poll_cq_ppoll_to++; + } } while (!rc && backend_dev->comp_thread.run); if (backend_dev->comp_thread.run) { - pr_dbg("Waiting for completion on channel %p\n", backend_dev->channel); rc = ibv_get_cq_event(backend_dev->channel, &ev_cq, &ev_ctx); - pr_dbg("ibv_get_cq_event=%d\n", rc); if (unlikely(rc)) { - pr_dbg("---> ibv_get_cq_event (%d)\n", rc); + rdma_error_report("ibv_get_cq_event fail, rc=%d, errno=%d", rc, + errno); continue; } rc = ibv_req_notify_cq(ev_cq, 0); if (unlikely(rc)) { - pr_dbg("Error %d from ibv_req_notify_cq\n", rc); + rdma_error_report("ibv_req_notify_cq fail, rc=%d, errno=%d", rc, + errno); } - poll_cq(backend_dev->rdma_dev_res, ev_cq); + backend_dev->rdma_dev_res->stats.poll_cq_from_bk++; + rdma_poll_cq(backend_dev->rdma_dev_res, ev_cq); ibv_ack_cq_events(ev_cq, 1); } } - pr_dbg("Going down\n"); - - /* TODO: Post cqe for all remaining buffs that were posted */ - backend_dev->comp_thread.is_running = false; qemu_thread_exit(0); @@ -177,55 +208,54 @@ static inline int rdmacm_mux_can_process_async(RdmaBackendDev *backend_dev) return atomic_read(&backend_dev->rdmacm_mux.can_receive); } -static int check_mux_op_status(CharBackend *mad_chr_be) +static int rdmacm_mux_check_op_status(CharBackend *mad_chr_be) { RdmaCmMuxMsg msg = {}; int ret; - pr_dbg("Reading response\n"); ret = qemu_chr_fe_read_all(mad_chr_be, (uint8_t *)&msg, sizeof(msg)); if (ret != sizeof(msg)) { - pr_dbg("Invalid message size %d, expecting %ld\n", ret, sizeof(msg)); + rdma_error_report("Got invalid message from mux: size %d, expecting %d", + ret, (int)sizeof(msg)); return -EIO; } - pr_dbg("msg_type=%d\n", msg.hdr.msg_type); - pr_dbg("op_code=%d\n", msg.hdr.op_code); - pr_dbg("err_code=%d\n", msg.hdr.err_code); + trace_rdmacm_mux_check_op_status(msg.hdr.msg_type, msg.hdr.op_code, + msg.hdr.err_code); if (msg.hdr.msg_type != RDMACM_MUX_MSG_TYPE_RESP) { - pr_dbg("Invalid message type %d\n", msg.hdr.msg_type); + rdma_error_report("Got invalid message type %d", msg.hdr.msg_type); return -EIO; } if (msg.hdr.err_code != RDMACM_MUX_ERR_CODE_OK) { - pr_dbg("Operation failed in mux, error code %d\n", msg.hdr.err_code); + rdma_error_report("Operation failed in mux, error code %d", + msg.hdr.err_code); return -EIO; } return 0; } -static int exec_rdmacm_mux_req(RdmaBackendDev *backend_dev, RdmaCmMuxMsg *msg) +static int rdmacm_mux_send(RdmaBackendDev *backend_dev, RdmaCmMuxMsg *msg) { int rc = 0; - pr_dbg("Executing request %d\n", msg->hdr.op_code); - msg->hdr.msg_type = RDMACM_MUX_MSG_TYPE_REQ; + trace_rdmacm_mux("send", msg->hdr.msg_type, msg->hdr.op_code); disable_rdmacm_mux_async(backend_dev); rc = qemu_chr_fe_write(backend_dev->rdmacm_mux.chr_be, (const uint8_t *)msg, sizeof(*msg)); if (rc != sizeof(*msg)) { enable_rdmacm_mux_async(backend_dev); - pr_dbg("Fail to send request to rdmacm_mux (rc=%d)\n", rc); + rdma_error_report("Failed to send request to rdmacm_mux (rc=%d)", rc); return -EIO; } - rc = check_mux_op_status(backend_dev->rdmacm_mux.chr_be); + rc = rdmacm_mux_check_op_status(backend_dev->rdmacm_mux.chr_be); if (rc) { - pr_dbg("Fail to execute rdmacm_mux request %d (rc=%d)\n", - msg->hdr.op_code, rc); + rdma_error_report("Failed to execute rdmacm_mux request %d (rc=%d)", + msg->hdr.op_code, rc); } enable_rdmacm_mux_async(backend_dev); @@ -237,14 +267,13 @@ static void stop_backend_thread(RdmaBackendThread *thread) { thread->run = false; while (thread->is_running) { - pr_dbg("Waiting for thread to complete\n"); sleep(THR_POLL_TO / SCALE_US / 2); } } static void start_comp_thread(RdmaBackendDev *backend_dev) { - char thread_name[THR_NAME_LEN] = {0}; + char thread_name[THR_NAME_LEN] = {}; stop_backend_thread(&backend_dev->comp_thread); @@ -273,7 +302,7 @@ int rdma_backend_query_port(RdmaBackendDev *backend_dev, rc = ibv_query_port(backend_dev->context, backend_dev->port_num, port_attr); if (rc) { - pr_dbg("Error %d from ibv_query_port\n", rc); + rdma_error_report("ibv_query_port fail, rc=%d, errno=%d", rc, errno); return -EIO; } @@ -282,7 +311,13 @@ int rdma_backend_query_port(RdmaBackendDev *backend_dev, void rdma_backend_poll_cq(RdmaDeviceResources *rdma_dev_res, RdmaBackendCQ *cq) { - poll_cq(rdma_dev_res, cq->ibcq); + int polled; + + rdma_dev_res->stats.poll_cq_from_guest++; + polled = rdma_poll_cq(rdma_dev_res, cq->ibcq); + if (!polled) { + rdma_dev_res->stats.poll_cq_from_guest_empty++; + } } static GHashTable *ah_hash; @@ -294,8 +329,8 @@ static struct ibv_ah *create_ah(RdmaBackendDev *backend_dev, struct ibv_pd *pd, struct ibv_ah *ah = g_hash_table_lookup(ah_hash, ah_key); if (ah) { - trace_create_ah_cache_hit(be64_to_cpu(dgid->global.subnet_prefix), - be64_to_cpu(dgid->global.interface_id)); + trace_rdma_create_ah_cache_hit(be64_to_cpu(dgid->global.subnet_prefix), + be64_to_cpu(dgid->global.interface_id)); g_bytes_unref(ah_key); } else { struct ibv_ah_attr ah_attr = { @@ -312,13 +347,13 @@ static struct ibv_ah *create_ah(RdmaBackendDev *backend_dev, struct ibv_pd *pd, g_hash_table_insert(ah_hash, ah_key, ah); } else { g_bytes_unref(ah_key); - pr_dbg("Fail to create AH for gid <0x%" PRIx64 ", 0x%" PRIx64 ">\n", - be64_to_cpu(dgid->global.subnet_prefix), - be64_to_cpu(dgid->global.interface_id)); + rdma_error_report("Failed to create AH for gid <0x%" PRIx64", 0x%"PRIx64">", + be64_to_cpu(dgid->global.subnet_prefix), + be64_to_cpu(dgid->global.interface_id)); } - trace_create_ah_cache_miss(be64_to_cpu(dgid->global.subnet_prefix), - be64_to_cpu(dgid->global.interface_id)); + trace_rdma_create_ah_cache_miss(be64_to_cpu(dgid->global.subnet_prefix), + be64_to_cpu(dgid->global.interface_id)); } return ah; @@ -344,17 +379,15 @@ static void ah_cache_init(void) static int build_host_sge_array(RdmaDeviceResources *rdma_dev_res, struct ibv_sge *dsge, struct ibv_sge *ssge, - uint8_t num_sge) + uint8_t num_sge, uint64_t *total_length) { RdmaRmMR *mr; int ssge_idx; - pr_dbg("num_sge=%d\n", num_sge); - for (ssge_idx = 0; ssge_idx < num_sge; ssge_idx++) { mr = rdma_rm_get_mr(rdma_dev_res, ssge[ssge_idx].lkey); if (unlikely(!mr)) { - pr_dbg("Invalid lkey 0x%x\n", ssge[ssge_idx].lkey); + rdma_error_report("Invalid lkey 0x%x", ssge[ssge_idx].lkey); return VENDOR_ERR_INVLKEY | ssge[ssge_idx].lkey; } @@ -362,10 +395,7 @@ static int build_host_sge_array(RdmaDeviceResources *rdma_dev_res, dsge->length = ssge[ssge_idx].length; dsge->lkey = rdma_backend_mr_lkey(&mr->backend_mr); - pr_dbg("ssge->addr=0x%" PRIx64 "\n", ssge[ssge_idx].addr); - pr_dbg("dsge->addr=0x%" PRIx64 "\n", dsge->addr); - pr_dbg("dsge->length=%d\n", dsge->length); - pr_dbg("dsge->lkey=0x%x\n", dsge->lkey); + *total_length += dsge->length; dsge++; } @@ -373,6 +403,22 @@ static int build_host_sge_array(RdmaDeviceResources *rdma_dev_res, return 0; } +static void trace_mad_message(const char *title, char *buf, int len) +{ + int i; + char *b = g_malloc0(len * 3 + 1); + char b1[4]; + + for (i = 0; i < len; i++) { + sprintf(b1, "%.2X ", buf[i] & 0x000000FF); + strcat(b, b1); + } + + trace_rdma_mad_message(title, len, b); + + g_free(b); +} + static int mad_send(RdmaBackendDev *backend_dev, uint8_t sgid_idx, union ibv_gid *sgid, struct ibv_sge *sge, uint32_t num_sge) { @@ -380,8 +426,6 @@ static int mad_send(RdmaBackendDev *backend_dev, uint8_t sgid_idx, char *hdr, *data; int ret; - pr_dbg("num_sge=%d\n", num_sge); - if (num_sge != 2) { return -EINVAL; } @@ -390,7 +434,6 @@ static int mad_send(RdmaBackendDev *backend_dev, uint8_t sgid_idx, memcpy(msg.hdr.sgid.raw, sgid->raw, sizeof(msg.hdr.sgid)); msg.umad_len = sge[0].length + sge[1].length; - pr_dbg("umad_len=%d\n", msg.umad_len); if (msg.umad_len > sizeof(msg.umad.mad)) { return -ENOMEM; @@ -398,36 +441,31 @@ static int mad_send(RdmaBackendDev *backend_dev, uint8_t sgid_idx, msg.umad.hdr.addr.qpn = htobe32(1); msg.umad.hdr.addr.grh_present = 1; - pr_dbg("sgid_idx=%d\n", sgid_idx); - pr_dbg("sgid=0x%llx\n", sgid->global.interface_id); msg.umad.hdr.addr.gid_index = sgid_idx; memcpy(msg.umad.hdr.addr.gid, sgid->raw, sizeof(msg.umad.hdr.addr.gid)); msg.umad.hdr.addr.hop_limit = 0xFF; hdr = rdma_pci_dma_map(backend_dev->dev, sge[0].addr, sge[0].length); if (!hdr) { - pr_dbg("Fail to map to sge[0]\n"); return -ENOMEM; } data = rdma_pci_dma_map(backend_dev->dev, sge[1].addr, sge[1].length); if (!data) { - pr_dbg("Fail to map to sge[1]\n"); rdma_pci_dma_unmap(backend_dev->dev, hdr, sge[0].length); return -ENOMEM; } - pr_dbg_buf("mad_hdr", hdr, sge[0].length); - pr_dbg_buf("mad_data", data, sge[1].length); - memcpy(&msg.umad.mad[0], hdr, sge[0].length); memcpy(&msg.umad.mad[sge[0].length], data, sge[1].length); rdma_pci_dma_unmap(backend_dev->dev, data, sge[1].length); rdma_pci_dma_unmap(backend_dev->dev, hdr, sge[0].length); - ret = exec_rdmacm_mux_req(backend_dev, &msg); + trace_mad_message("send", msg.umad.mad, msg.umad_len); + + ret = rdmacm_mux_send(backend_dev, &msg); if (ret) { - pr_dbg("Fail to send MAD to rdma_umadmux (%d)\n", ret); + rdma_error_report("Failed to send MAD to rdma_umadmux (%d)", ret); return -EIO; } @@ -445,49 +483,49 @@ void rdma_backend_post_send(RdmaBackendDev *backend_dev, struct ibv_sge new_sge[MAX_SGE]; uint32_t bctx_id; int rc; - struct ibv_send_wr wr = {0}, *bad_wr; + struct ibv_send_wr wr = {}, *bad_wr; - if (!qp->ibqp) { /* This field does not get initialized for QP0 and QP1 */ + if (!qp->ibqp) { /* This field is not initialized for QP0 and QP1 */ if (qp_type == IBV_QPT_SMI) { - pr_dbg("QP0 unsupported\n"); + rdma_error_report("Got QP0 request"); complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_QP0, ctx); } else if (qp_type == IBV_QPT_GSI) { - pr_dbg("QP1\n"); rc = mad_send(backend_dev, sgid_idx, sgid, sge, num_sge); if (rc) { complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_MAD_SEND, ctx); + backend_dev->rdma_dev_res->stats.mad_tx_err++; } else { complete_work(IBV_WC_SUCCESS, 0, ctx); + backend_dev->rdma_dev_res->stats.mad_tx++; } } return; } - pr_dbg("num_sge=%d\n", num_sge); - bctx = g_malloc0(sizeof(*bctx)); bctx->up_ctx = ctx; - bctx->is_tx_req = 1; + bctx->backend_qp = qp; rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx); if (unlikely(rc)) { - pr_dbg("Failed to allocate cqe_ctx\n"); complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_NOMEM, ctx); - goto out_free_bctx; + goto err_free_bctx; } - rc = build_host_sge_array(backend_dev->rdma_dev_res, new_sge, sge, num_sge); + rdma_protected_gslist_append_int32(&qp->cqe_ctx_list, bctx_id); + + rc = build_host_sge_array(backend_dev->rdma_dev_res, new_sge, sge, num_sge, + &backend_dev->rdma_dev_res->stats.tx_len); if (rc) { - pr_dbg("Error: Failed to build host SGE array\n"); complete_work(IBV_WC_GENERAL_ERR, rc, ctx); - goto out_dealloc_cqe_ctx; + goto err_dealloc_cqe_ctx; } if (qp_type == IBV_QPT_UD) { wr.wr.ud.ah = create_ah(backend_dev, qp->ibpd, sgid_idx, dgid); if (!wr.wr.ud.ah) { complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx); - goto out_dealloc_cqe_ctx; + goto err_dealloc_cqe_ctx; } wr.wr.ud.remote_qpn = dqpn; wr.wr.ud.remote_qkey = dqkey; @@ -500,20 +538,23 @@ void rdma_backend_post_send(RdmaBackendDev *backend_dev, wr.wr_id = bctx_id; rc = ibv_post_send(qp->ibqp, &wr, &bad_wr); - pr_dbg("ibv_post_send=%d\n", rc); if (rc) { - pr_dbg("Fail (%d, %d) to post send WQE to qpn %d\n", rc, errno, - qp->ibqp->qp_num); + rdma_error_report("ibv_post_send fail, qpn=0x%x, rc=%d, errno=%d", + qp->ibqp->qp_num, rc, errno); complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx); - goto out_dealloc_cqe_ctx; + goto err_dealloc_cqe_ctx; } + atomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); + backend_dev->rdma_dev_res->stats.tx++; + return; -out_dealloc_cqe_ctx: +err_dealloc_cqe_ctx: + backend_dev->rdma_dev_res->stats.tx_err++; rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, bctx_id); -out_free_bctx: +err_free_bctx: g_free(bctx); } @@ -526,41 +567,32 @@ static unsigned int save_mad_recv_buffer(RdmaBackendDev *backend_dev, uint32_t bctx_id; if (num_sge != 1) { - pr_dbg("Invalid num_sge (%d), expecting 1\n", num_sge); + rdma_error_report("Invalid num_sge (%d), expecting 1", num_sge); return VENDOR_ERR_INV_NUM_SGE; } if (sge[0].length < RDMA_MAX_PRIVATE_DATA + sizeof(struct ibv_grh)) { - pr_dbg("Too small buffer for MAD\n"); + rdma_error_report("Too small buffer for MAD"); return VENDOR_ERR_INV_MAD_BUFF; } - pr_dbg("addr=0x%" PRIx64"\n", sge[0].addr); - pr_dbg("length=%d\n", sge[0].length); - pr_dbg("lkey=%d\n", sge[0].lkey); - bctx = g_malloc0(sizeof(*bctx)); rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx); if (unlikely(rc)) { g_free(bctx); - pr_dbg("Fail to allocate cqe_ctx\n"); return VENDOR_ERR_NOMEM; } - pr_dbg("bctx_id %d, bctx %p, ctx %p\n", bctx_id, bctx, ctx); bctx->up_ctx = ctx; bctx->sge = *sge; - qemu_mutex_lock(&backend_dev->recv_mads_list.lock); - qlist_append_int(backend_dev->recv_mads_list.list, bctx_id); - qemu_mutex_unlock(&backend_dev->recv_mads_list.lock); + rdma_protected_qlist_append_int64(&backend_dev->recv_mads_list, bctx_id); return 0; } void rdma_backend_post_recv(RdmaBackendDev *backend_dev, - RdmaDeviceResources *rdma_dev_res, RdmaBackendQP *qp, uint8_t qp_type, struct ibv_sge *sge, uint32_t num_sge, void *ctx) { @@ -568,61 +600,65 @@ void rdma_backend_post_recv(RdmaBackendDev *backend_dev, struct ibv_sge new_sge[MAX_SGE]; uint32_t bctx_id; int rc; - struct ibv_recv_wr wr = {0}, *bad_wr; + struct ibv_recv_wr wr = {}, *bad_wr; if (!qp->ibqp) { /* This field does not get initialized for QP0 and QP1 */ if (qp_type == IBV_QPT_SMI) { - pr_dbg("QP0 unsupported\n"); + rdma_error_report("Got QP0 request"); complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_QP0, ctx); } if (qp_type == IBV_QPT_GSI) { - pr_dbg("QP1\n"); rc = save_mad_recv_buffer(backend_dev, sge, num_sge, ctx); if (rc) { complete_work(IBV_WC_GENERAL_ERR, rc, ctx); + backend_dev->rdma_dev_res->stats.mad_rx_bufs_err++; + } else { + backend_dev->rdma_dev_res->stats.mad_rx_bufs++; } } return; } - pr_dbg("num_sge=%d\n", num_sge); - bctx = g_malloc0(sizeof(*bctx)); bctx->up_ctx = ctx; - bctx->is_tx_req = 0; + bctx->backend_qp = qp; - rc = rdma_rm_alloc_cqe_ctx(rdma_dev_res, &bctx_id, bctx); + rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx); if (unlikely(rc)) { - pr_dbg("Failed to allocate cqe_ctx\n"); complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_NOMEM, ctx); - goto out_free_bctx; + goto err_free_bctx; } - rc = build_host_sge_array(rdma_dev_res, new_sge, sge, num_sge); + rdma_protected_gslist_append_int32(&qp->cqe_ctx_list, bctx_id); + + rc = build_host_sge_array(backend_dev->rdma_dev_res, new_sge, sge, num_sge, + &backend_dev->rdma_dev_res->stats.rx_bufs_len); if (rc) { - pr_dbg("Error: Failed to build host SGE array\n"); complete_work(IBV_WC_GENERAL_ERR, rc, ctx); - goto out_dealloc_cqe_ctx; + goto err_dealloc_cqe_ctx; } wr.num_sge = num_sge; wr.sg_list = new_sge; wr.wr_id = bctx_id; rc = ibv_post_recv(qp->ibqp, &wr, &bad_wr); - pr_dbg("ibv_post_recv=%d\n", rc); if (rc) { - pr_dbg("Fail (%d, %d) to post recv WQE to qpn %d\n", rc, errno, - qp->ibqp->qp_num); + rdma_error_report("ibv_post_recv fail, qpn=0x%x, rc=%d, errno=%d", + qp->ibqp->qp_num, rc, errno); complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx); - goto out_dealloc_cqe_ctx; + goto err_dealloc_cqe_ctx; } + atomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); + backend_dev->rdma_dev_res->stats.rx_bufs++; + return; -out_dealloc_cqe_ctx: - rdma_rm_dealloc_cqe_ctx(rdma_dev_res, bctx_id); +err_dealloc_cqe_ctx: + backend_dev->rdma_dev_res->stats.rx_bufs_err++; + rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, bctx_id); -out_free_bctx: +err_free_bctx: g_free(bctx); } @@ -630,7 +666,12 @@ int rdma_backend_create_pd(RdmaBackendDev *backend_dev, RdmaBackendPD *pd) { pd->ibpd = ibv_alloc_pd(backend_dev->context); - return pd->ibpd ? 0 : -EIO; + if (!pd->ibpd) { + rdma_error_report("ibv_alloc_pd fail, errno=%d", errno); + return -EIO; + } + + return 0; } void rdma_backend_destroy_pd(RdmaBackendPD *pd) @@ -643,16 +684,15 @@ void rdma_backend_destroy_pd(RdmaBackendPD *pd) int rdma_backend_create_mr(RdmaBackendMR *mr, RdmaBackendPD *pd, void *addr, size_t length, int access) { - pr_dbg("addr=0x%p\n", addr); - pr_dbg("len=%zu\n", length); mr->ibmr = ibv_reg_mr(pd->ibpd, addr, length, access); - if (mr->ibmr) { - pr_dbg("lkey=0x%x\n", mr->ibmr->lkey); - pr_dbg("rkey=0x%x\n", mr->ibmr->rkey); - mr->ibpd = pd->ibpd; + if (!mr->ibmr) { + rdma_error_report("ibv_reg_mr fail, errno=%d", errno); + return -EIO; } - return mr->ibmr ? 0 : -EIO; + mr->ibpd = pd->ibpd; + + return 0; } void rdma_backend_destroy_mr(RdmaBackendMR *mr) @@ -667,21 +707,21 @@ int rdma_backend_create_cq(RdmaBackendDev *backend_dev, RdmaBackendCQ *cq, { int rc; - pr_dbg("cqe=%d\n", cqe); - - pr_dbg("dev->channel=%p\n", backend_dev->channel); cq->ibcq = ibv_create_cq(backend_dev->context, cqe + 1, NULL, backend_dev->channel, 0); + if (!cq->ibcq) { + rdma_error_report("ibv_create_cq fail, errno=%d", errno); + return -EIO; + } - if (cq->ibcq) { - rc = ibv_req_notify_cq(cq->ibcq, 0); - if (rc) { - pr_dbg("Error %d from ibv_req_notify_cq\n", rc); - } - cq->backend_dev = backend_dev; + rc = ibv_req_notify_cq(cq->ibcq, 0); + if (rc) { + rdma_warn_report("ibv_req_notify_cq fail, rc=%d, errno=%d", rc, errno); } - return cq->ibcq ? 0 : -EIO; + cq->backend_dev = backend_dev; + + return 0; } void rdma_backend_destroy_cq(RdmaBackendCQ *cq) @@ -697,10 +737,9 @@ int rdma_backend_create_qp(RdmaBackendQP *qp, uint8_t qp_type, uint32_t max_recv_wr, uint32_t max_send_sge, uint32_t max_recv_sge) { - struct ibv_qp_init_attr attr = {0}; + struct ibv_qp_init_attr attr = {}; qp->ibqp = 0; - pr_dbg("qp_type=%d\n", qp_type); switch (qp_type) { case IBV_QPT_GSI: @@ -713,7 +752,7 @@ int rdma_backend_create_qp(RdmaBackendQP *qp, uint8_t qp_type, break; default: - pr_dbg("Unsupported QP type %d\n", qp_type); + rdma_error_report("Unsupported QP type %d", qp_type); return -EIO; } @@ -725,35 +764,27 @@ int rdma_backend_create_qp(RdmaBackendQP *qp, uint8_t qp_type, attr.cap.max_send_sge = max_send_sge; attr.cap.max_recv_sge = max_recv_sge; - pr_dbg("max_send_wr=%d\n", max_send_wr); - pr_dbg("max_recv_wr=%d\n", max_recv_wr); - pr_dbg("max_send_sge=%d\n", max_send_sge); - pr_dbg("max_recv_sge=%d\n", max_recv_sge); - qp->ibqp = ibv_create_qp(pd->ibpd, &attr); - if (likely(!qp->ibqp)) { - pr_dbg("Error from ibv_create_qp\n"); + if (!qp->ibqp) { + rdma_error_report("ibv_create_qp fail, errno=%d", errno); return -EIO; } + rdma_protected_gslist_init(&qp->cqe_ctx_list); + qp->ibpd = pd->ibpd; /* TODO: Query QP to get max_inline_data and save it to be used in send */ - pr_dbg("qpn=0x%x\n", qp->ibqp->qp_num); - return 0; } int rdma_backend_qp_state_init(RdmaBackendDev *backend_dev, RdmaBackendQP *qp, uint8_t qp_type, uint32_t qkey) { - struct ibv_qp_attr attr = {0}; + struct ibv_qp_attr attr = {}; int rc, attr_mask; - pr_dbg("qpn=0x%x\n", qp->ibqp->qp_num); - pr_dbg("sport_num=%d\n", backend_dev->port_num); - attr_mask = IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT; attr.qp_state = IBV_QPS_INIT; attr.pkey_index = 0; @@ -762,21 +793,23 @@ int rdma_backend_qp_state_init(RdmaBackendDev *backend_dev, RdmaBackendQP *qp, switch (qp_type) { case IBV_QPT_RC: attr_mask |= IBV_QP_ACCESS_FLAGS; + trace_rdma_backend_rc_qp_state_init(qp->ibqp->qp_num); break; case IBV_QPT_UD: attr.qkey = qkey; attr_mask |= IBV_QP_QKEY; + trace_rdma_backend_ud_qp_state_init(qp->ibqp->qp_num, qkey); break; default: - pr_dbg("Unsupported QP type %d\n", qp_type); + rdma_error_report("Unsupported QP type %d", qp_type); return -EIO; } rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask); if (rc) { - pr_dbg("Error %d from ibv_modify_qp\n", rc); + rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno); return -EIO; } @@ -788,7 +821,7 @@ int rdma_backend_qp_state_rtr(RdmaBackendDev *backend_dev, RdmaBackendQP *qp, union ibv_gid *dgid, uint32_t dqpn, uint32_t rq_psn, uint32_t qkey, bool use_qkey) { - struct ibv_qp_attr attr = {0}; + struct ibv_qp_attr attr = {}; union ibv_gid ibv_gid = { .global.interface_id = dgid->global.interface_id, .global.subnet_prefix = dgid->global.subnet_prefix @@ -802,14 +835,6 @@ int rdma_backend_qp_state_rtr(RdmaBackendDev *backend_dev, RdmaBackendQP *qp, switch (qp_type) { case IBV_QPT_RC: - pr_dbg("dgid=0x%" PRIx64 ",%" PRIx64 "\n", - be64_to_cpu(ibv_gid.global.subnet_prefix), - be64_to_cpu(ibv_gid.global.interface_id)); - pr_dbg("dqpn=0x%x\n", dqpn); - pr_dbg("sgid_idx=%d\n", qp->sgid_idx); - pr_dbg("sport_num=%d\n", backend_dev->port_num); - pr_dbg("rq_psn=0x%x\n", rq_psn); - attr.path_mtu = IBV_MTU_1024; attr.dest_qp_num = dqpn; attr.max_dest_rd_atomic = 1; @@ -824,20 +849,28 @@ int rdma_backend_qp_state_rtr(RdmaBackendDev *backend_dev, RdmaBackendQP *qp, attr_mask |= IBV_QP_AV | IBV_QP_PATH_MTU | IBV_QP_DEST_QPN | IBV_QP_RQ_PSN | IBV_QP_MAX_DEST_RD_ATOMIC | IBV_QP_MIN_RNR_TIMER; + + trace_rdma_backend_rc_qp_state_rtr(qp->ibqp->qp_num, + be64_to_cpu(ibv_gid.global. + subnet_prefix), + be64_to_cpu(ibv_gid.global. + interface_id), + qp->sgid_idx, dqpn, rq_psn); break; case IBV_QPT_UD: - pr_dbg("qkey=0x%x\n", qkey); if (use_qkey) { attr.qkey = qkey; attr_mask |= IBV_QP_QKEY; } + trace_rdma_backend_ud_qp_state_rtr(qp->ibqp->qp_num, use_qkey ? qkey : + 0); break; } rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask); if (rc) { - pr_dbg("Error %d from ibv_modify_qp\n", rc); + rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno); return -EIO; } @@ -847,12 +880,9 @@ int rdma_backend_qp_state_rtr(RdmaBackendDev *backend_dev, RdmaBackendQP *qp, int rdma_backend_qp_state_rts(RdmaBackendQP *qp, uint8_t qp_type, uint32_t sq_psn, uint32_t qkey, bool use_qkey) { - struct ibv_qp_attr attr = {0}; + struct ibv_qp_attr attr = {}; int rc, attr_mask; - pr_dbg("qpn=0x%x\n", qp->ibqp->qp_num); - pr_dbg("sq_psn=0x%x\n", sq_psn); - attr.qp_state = IBV_QPS_RTS; attr.sq_psn = sq_psn; attr_mask = IBV_QP_STATE | IBV_QP_SQ_PSN; @@ -866,20 +896,22 @@ int rdma_backend_qp_state_rts(RdmaBackendQP *qp, uint8_t qp_type, attr_mask |= IBV_QP_TIMEOUT | IBV_QP_RETRY_CNT | IBV_QP_RNR_RETRY | IBV_QP_MAX_QP_RD_ATOMIC; + trace_rdma_backend_rc_qp_state_rts(qp->ibqp->qp_num, sq_psn); break; case IBV_QPT_UD: if (use_qkey) { - pr_dbg("qkey=0x%x\n", qkey); attr.qkey = qkey; attr_mask |= IBV_QP_QKEY; } + trace_rdma_backend_ud_qp_state_rts(qp->ibqp->qp_num, sq_psn, + use_qkey ? qkey : 0); break; } rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask); if (rc) { - pr_dbg("Error %d from ibv_modify_qp\n", rc); + rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno); return -EIO; } @@ -890,7 +922,6 @@ int rdma_backend_query_qp(RdmaBackendQP *qp, struct ibv_qp_attr *attr, int attr_mask, struct ibv_qp_init_attr *init_attr) { if (!qp->ibqp) { - pr_dbg("QP1\n"); attr->qp_state = IBV_QPS_RTS; return 0; } @@ -898,28 +929,33 @@ int rdma_backend_query_qp(RdmaBackendQP *qp, struct ibv_qp_attr *attr, return ibv_query_qp(qp->ibqp, attr, attr_mask, init_attr); } -void rdma_backend_destroy_qp(RdmaBackendQP *qp) +void rdma_backend_destroy_qp(RdmaBackendQP *qp, RdmaDeviceResources *dev_res) { if (qp->ibqp) { ibv_destroy_qp(qp->ibqp); } + g_slist_foreach(qp->cqe_ctx_list.list, free_cqe_ctx, dev_res); + rdma_protected_gslist_destroy(&qp->cqe_ctx_list); } #define CHK_ATTR(req, dev, member, fmt) ({ \ - pr_dbg("%s="fmt","fmt"\n", #member, dev.member, req->member); \ + trace_rdma_check_dev_attr(#member, dev.member, req->member); \ if (req->member > dev.member) { \ - warn_report("%s = "fmt" is higher than host device capability "fmt, \ - #member, req->member, dev.member); \ + rdma_warn_report("%s = "fmt" is higher than host device capability "fmt, \ + #member, req->member, dev.member); \ req->member = dev.member; \ } \ - pr_dbg("%s="fmt"\n", #member, req->member); }) +}) static int init_device_caps(RdmaBackendDev *backend_dev, struct ibv_device_attr *dev_attr) { struct ibv_device_attr bk_dev_attr; + int rc; - if (ibv_query_device(backend_dev->context, &bk_dev_attr)) { + rc = ibv_query_device(backend_dev->context, &bk_dev_attr); + if (rc) { + rdma_error_report("ibv_query_device fail, rc=%d, errno=%d", rc, errno); return -EIO; } @@ -928,9 +964,7 @@ static int init_device_caps(RdmaBackendDev *backend_dev, CHK_ATTR(dev_attr, bk_dev_attr, max_mr_size, "%" PRId64); CHK_ATTR(dev_attr, bk_dev_attr, max_qp, "%d"); CHK_ATTR(dev_attr, bk_dev_attr, max_sge, "%d"); - CHK_ATTR(dev_attr, bk_dev_attr, max_qp_wr, "%d"); CHK_ATTR(dev_attr, bk_dev_attr, max_cq, "%d"); - CHK_ATTR(dev_attr, bk_dev_attr, max_cqe, "%d"); CHK_ATTR(dev_attr, bk_dev_attr, max_mr, "%d"); CHK_ATTR(dev_attr, bk_dev_attr, max_pd, "%d"); CHK_ATTR(dev_attr, bk_dev_attr, max_qp_rd_atom, "%d"); @@ -946,56 +980,39 @@ static inline void build_mad_hdr(struct ibv_grh *grh, union ibv_gid *sgid, grh->paylen = htons(paylen); grh->sgid = *sgid; grh->dgid = *my_gid; - - pr_dbg("paylen=%d (net=0x%x)\n", paylen, grh->paylen); - pr_dbg("dgid=0x%llx\n", my_gid->global.interface_id); - pr_dbg("sgid=0x%llx\n", sgid->global.interface_id); } static void process_incoming_mad_req(RdmaBackendDev *backend_dev, RdmaCmMuxMsg *msg) { - QObject *o_ctx_id; unsigned long cqe_ctx_id; BackendCtx *bctx; char *mad; - pr_dbg("umad_len=%d\n", msg->umad_len); - -#ifdef PVRDMA_DEBUG - struct umad_hdr *hdr = (struct umad_hdr *)&msg->umad.mad; - pr_dbg("bv %x cls %x cv %x mtd %x st %d tid %" PRIx64 " at %x atm %x\n", - hdr->base_version, hdr->mgmt_class, hdr->class_version, - hdr->method, hdr->status, be64toh(hdr->tid), - hdr->attr_id, hdr->attr_mod); -#endif - - qemu_mutex_lock(&backend_dev->recv_mads_list.lock); - o_ctx_id = qlist_pop(backend_dev->recv_mads_list.list); - qemu_mutex_unlock(&backend_dev->recv_mads_list.lock); - if (!o_ctx_id) { - pr_dbg("No more free MADs buffers, waiting for a while\n"); + trace_mad_message("recv", msg->umad.mad, msg->umad_len); + + cqe_ctx_id = rdma_protected_qlist_pop_int64(&backend_dev->recv_mads_list); + if (cqe_ctx_id == -ENOENT) { + rdma_warn_report("No more free MADs buffers, waiting for a while"); sleep(THR_POLL_TO); return; } - cqe_ctx_id = qnum_get_uint(qobject_to(QNum, o_ctx_id)); bctx = rdma_rm_get_cqe_ctx(backend_dev->rdma_dev_res, cqe_ctx_id); if (unlikely(!bctx)) { - pr_dbg("Error: Fail to find ctx for %ld\n", cqe_ctx_id); + rdma_error_report("No matching ctx for req %ld", cqe_ctx_id); + backend_dev->rdma_dev_res->stats.mad_rx_err++; return; } - pr_dbg("id %ld, bctx %p, ctx %p\n", cqe_ctx_id, bctx, bctx->up_ctx); - mad = rdma_pci_dma_map(backend_dev->dev, bctx->sge.addr, bctx->sge.length); if (!mad || bctx->sge.length < msg->umad_len + MAD_HDR_SIZE) { + backend_dev->rdma_dev_res->stats.mad_rx_err++; complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_INV_MAD_BUFF, bctx->up_ctx); } else { - struct ibv_wc wc = {0}; - pr_dbg_buf("mad", msg->umad.mad, msg->umad_len); + struct ibv_wc wc = {}; memset(mad, 0, bctx->sge.length); build_mad_hdr((struct ibv_grh *)mad, (union ibv_gid *)&msg->umad.hdr.addr.gid, &msg->hdr.sgid, @@ -1006,6 +1023,7 @@ static void process_incoming_mad_req(RdmaBackendDev *backend_dev, wc.byte_len = msg->umad_len; wc.status = IBV_WC_SUCCESS; wc.wc_flags = IBV_WC_GRH; + backend_dev->rdma_dev_res->stats.mad_rx++; comp_handler(bctx->up_ctx, &wc); } @@ -1025,13 +1043,11 @@ static void rdmacm_mux_read(void *opaque, const uint8_t *buf, int size) RdmaBackendDev *backend_dev = (RdmaBackendDev *)opaque; RdmaCmMuxMsg *msg = (RdmaCmMuxMsg *)buf; - pr_dbg("Got %d bytes\n", size); - pr_dbg("msg_type=%d\n", msg->hdr.msg_type); - pr_dbg("op_code=%d\n", msg->hdr.op_code); + trace_rdmacm_mux("read", msg->hdr.msg_type, msg->hdr.op_code); if (msg->hdr.msg_type != RDMACM_MUX_MSG_TYPE_REQ && msg->hdr.op_code != RDMACM_MUX_OP_CODE_MAD) { - pr_dbg("Error: Not a MAD request, skipping\n"); + rdma_error_report("Error: Not a MAD request, skipping"); return; } process_incoming_mad_req(backend_dev, msg); @@ -1045,12 +1061,11 @@ static int mad_init(RdmaBackendDev *backend_dev, CharBackend *mad_chr_be) ret = qemu_chr_fe_backend_connected(backend_dev->rdmacm_mux.chr_be); if (!ret) { - pr_dbg("Missing chardev for MAD multiplexer\n"); + rdma_error_report("Missing chardev for MAD multiplexer"); return -EIO; } - qemu_mutex_init(&backend_dev->recv_mads_list.lock); - backend_dev->recv_mads_list.list = qlist_new(); + rdma_protected_qlist_init(&backend_dev->recv_mads_list); enable_rdmacm_mux_async(backend_dev); @@ -1061,15 +1076,16 @@ static int mad_init(RdmaBackendDev *backend_dev, CharBackend *mad_chr_be) return 0; } +static void mad_stop(RdmaBackendDev *backend_dev) +{ + clean_recv_mads(backend_dev); +} + static void mad_fini(RdmaBackendDev *backend_dev) { - pr_dbg("Stopping MAD\n"); disable_rdmacm_mux_async(backend_dev); qemu_chr_fe_disconnect(backend_dev->rdmacm_mux.chr_be); - if (backend_dev->recv_mads_list.list) { - qlist_destroy_obj(QOBJECT(backend_dev->recv_mads_list.list)); - qemu_mutex_destroy(&backend_dev->recv_mads_list.lock); - } + rdma_protected_qlist_destroy(&backend_dev->recv_mads_list); } int rdma_backend_get_gid_index(RdmaBackendDev *backend_dev, @@ -1079,17 +1095,15 @@ int rdma_backend_get_gid_index(RdmaBackendDev *backend_dev, int ret; int i = 0; - pr_dbg("0x%llx, 0x%llx\n", - (long long unsigned int)be64_to_cpu(gid->global.subnet_prefix), - (long long unsigned int)be64_to_cpu(gid->global.interface_id)); - do { ret = ibv_query_gid(backend_dev->context, backend_dev->port_num, i, &sgid); i++; } while (!ret && (memcmp(&sgid, gid, sizeof(*gid)))); - pr_dbg("gid_index=%d\n", i - 1); + trace_rdma_backend_get_gid_index(be64_to_cpu(gid->global.subnet_prefix), + be64_to_cpu(gid->global.interface_id), + i - 1); return ret ? ret : i - 1; } @@ -1100,16 +1114,15 @@ int rdma_backend_add_gid(RdmaBackendDev *backend_dev, const char *ifname, RdmaCmMuxMsg msg = {}; int ret; - pr_dbg("0x%llx, 0x%llx\n", - (long long unsigned int)be64_to_cpu(gid->global.subnet_prefix), - (long long unsigned int)be64_to_cpu(gid->global.interface_id)); + trace_rdma_backend_gid_change("add", be64_to_cpu(gid->global.subnet_prefix), + be64_to_cpu(gid->global.interface_id)); msg.hdr.op_code = RDMACM_MUX_OP_CODE_REG; memcpy(msg.hdr.sgid.raw, gid->raw, sizeof(msg.hdr.sgid)); - ret = exec_rdmacm_mux_req(backend_dev, &msg); + ret = rdmacm_mux_send(backend_dev, &msg); if (ret) { - pr_dbg("Fail to register GID to rdma_umadmux (%d)\n", ret); + rdma_error_report("Failed to register GID to rdma_umadmux (%d)", ret); return -EIO; } @@ -1126,16 +1139,16 @@ int rdma_backend_del_gid(RdmaBackendDev *backend_dev, const char *ifname, RdmaCmMuxMsg msg = {}; int ret; - pr_dbg("0x%llx, 0x%llx\n", - (long long unsigned int)be64_to_cpu(gid->global.subnet_prefix), - (long long unsigned int)be64_to_cpu(gid->global.interface_id)); + trace_rdma_backend_gid_change("del", be64_to_cpu(gid->global.subnet_prefix), + be64_to_cpu(gid->global.interface_id)); msg.hdr.op_code = RDMACM_MUX_OP_CODE_UNREG; memcpy(msg.hdr.sgid.raw, gid->raw, sizeof(msg.hdr.sgid)); - ret = exec_rdmacm_mux_req(backend_dev, &msg); + ret = rdmacm_mux_send(backend_dev, &msg); if (ret) { - pr_dbg("Fail to unregister GID from rdma_umadmux (%d)\n", ret); + rdma_error_report("Failed to unregister GID from rdma_umadmux (%d)", + ret); return -EIO; } @@ -1149,8 +1162,7 @@ int rdma_backend_del_gid(RdmaBackendDev *backend_dev, const char *ifname, int rdma_backend_init(RdmaBackendDev *backend_dev, PCIDevice *pdev, RdmaDeviceResources *rdma_dev_res, const char *backend_device_name, uint8_t port_num, - struct ibv_device_attr *dev_attr, CharBackend *mad_chr_be, - Error **errp) + struct ibv_device_attr *dev_attr, CharBackend *mad_chr_be) { int i; int ret = 0; @@ -1167,12 +1179,12 @@ int rdma_backend_init(RdmaBackendDev *backend_dev, PCIDevice *pdev, dev_list = ibv_get_device_list(&num_ibv_devices); if (!dev_list) { - error_setg(errp, "Failed to get IB devices list"); + rdma_error_report("Failed to get IB devices list"); return -EIO; } if (num_ibv_devices == 0) { - error_setg(errp, "No IB devices were found"); + rdma_error_report("No IB devices were found"); ret = -ENXIO; goto out_free_dev_list; } @@ -1187,8 +1199,8 @@ int rdma_backend_init(RdmaBackendDev *backend_dev, PCIDevice *pdev, backend_dev->ib_dev = dev_list[i]; if (!backend_dev->ib_dev) { - error_setg(errp, "Failed to find IB device %s", - backend_device_name); + rdma_error_report("Failed to find IB device %s", + backend_device_name); ret = -EIO; goto out_free_dev_list; } @@ -1196,28 +1208,26 @@ int rdma_backend_init(RdmaBackendDev *backend_dev, PCIDevice *pdev, backend_dev->ib_dev = *dev_list; } - pr_dbg("Using backend device %s, port %d\n", - ibv_get_device_name(backend_dev->ib_dev), backend_dev->port_num); - pr_dbg("uverb device %s\n", backend_dev->ib_dev->dev_name); + rdma_info_report("uverb device %s", backend_dev->ib_dev->dev_name); backend_dev->context = ibv_open_device(backend_dev->ib_dev); if (!backend_dev->context) { - error_setg(errp, "Failed to open IB device"); + rdma_error_report("Failed to open IB device %s", + ibv_get_device_name(backend_dev->ib_dev)); ret = -EIO; goto out; } backend_dev->channel = ibv_create_comp_channel(backend_dev->context); if (!backend_dev->channel) { - error_setg(errp, "Failed to create IB communication channel"); + rdma_error_report("Failed to create IB communication channel"); ret = -EIO; goto out_close_device; } - pr_dbg("dev->backend_dev.channel=%p\n", backend_dev->channel); ret = init_device_caps(backend_dev, dev_attr); if (ret) { - error_setg(errp, "Failed to initialize device capabilities"); + rdma_error_report("Failed to initialize device capabilities"); ret = -EIO; goto out_destroy_comm_channel; } @@ -1225,7 +1235,7 @@ int rdma_backend_init(RdmaBackendDev *backend_dev, PCIDevice *pdev, ret = mad_init(backend_dev, mad_chr_be); if (ret) { - error_setg(errp, "Fail to initialize mad"); + rdma_error_report("Failed to initialize mad"); ret = -EIO; goto out_destroy_comm_channel; } @@ -1253,19 +1263,17 @@ out: void rdma_backend_start(RdmaBackendDev *backend_dev) { - pr_dbg("Starting rdma_backend\n"); start_comp_thread(backend_dev); } void rdma_backend_stop(RdmaBackendDev *backend_dev) { - pr_dbg("Stopping rdma_backend\n"); + mad_stop(backend_dev); stop_backend_thread(&backend_dev->comp_thread); } void rdma_backend_fini(RdmaBackendDev *backend_dev) { - rdma_backend_stop(backend_dev); mad_fini(backend_dev); g_hash_table_destroy(ah_hash); ibv_destroy_comp_channel(backend_dev->channel); diff --git a/hw/rdma/rdma_backend.h b/hw/rdma/rdma_backend.h index 5114c90e67..38056d97c7 100644 --- a/hw/rdma/rdma_backend.h +++ b/hw/rdma/rdma_backend.h @@ -58,8 +58,8 @@ static inline uint32_t rdma_backend_mr_rkey(const RdmaBackendMR *mr) int rdma_backend_init(RdmaBackendDev *backend_dev, PCIDevice *pdev, RdmaDeviceResources *rdma_dev_res, const char *backend_device_name, uint8_t port_num, - struct ibv_device_attr *dev_attr, CharBackend *mad_chr_be, - Error **errp); + struct ibv_device_attr *dev_attr, + CharBackend *mad_chr_be); void rdma_backend_fini(RdmaBackendDev *backend_dev); int rdma_backend_add_gid(RdmaBackendDev *backend_dev, const char *ifname, union ibv_gid *gid); @@ -102,7 +102,7 @@ int rdma_backend_qp_state_rts(RdmaBackendQP *qp, uint8_t qp_type, uint32_t sq_psn, uint32_t qkey, bool use_qkey); int rdma_backend_query_qp(RdmaBackendQP *qp, struct ibv_qp_attr *attr, int attr_mask, struct ibv_qp_init_attr *init_attr); -void rdma_backend_destroy_qp(RdmaBackendQP *qp); +void rdma_backend_destroy_qp(RdmaBackendQP *qp, RdmaDeviceResources *dev_res); void rdma_backend_post_send(RdmaBackendDev *backend_dev, RdmaBackendQP *qp, uint8_t qp_type, @@ -111,7 +111,6 @@ void rdma_backend_post_send(RdmaBackendDev *backend_dev, union ibv_gid *dgid, uint32_t dqpn, uint32_t dqkey, void *ctx); void rdma_backend_post_recv(RdmaBackendDev *backend_dev, - RdmaDeviceResources *rdma_dev_res, RdmaBackendQP *qp, uint8_t qp_type, struct ibv_sge *sge, uint32_t num_sge, void *ctx); diff --git a/hw/rdma/rdma_backend_defs.h b/hw/rdma/rdma_backend_defs.h index 15ae8b970e..817153dc8c 100644 --- a/hw/rdma/rdma_backend_defs.h +++ b/hw/rdma/rdma_backend_defs.h @@ -20,21 +20,16 @@ #include "chardev/char-fe.h" #include <infiniband/verbs.h> #include "contrib/rdmacm-mux/rdmacm-mux.h" +#include "rdma_utils.h" typedef struct RdmaDeviceResources RdmaDeviceResources; typedef struct RdmaBackendThread { QemuThread thread; - QemuMutex mutex; bool run; /* Set by thread manager to let thread know it should exit */ bool is_running; /* Set by the thread to report its status */ } RdmaBackendThread; -typedef struct RecvMadList { - QemuMutex lock; - QList *list; -} RecvMadList; - typedef struct RdmaCmMux { CharBackend *chr_be; int can_receive; @@ -48,7 +43,7 @@ typedef struct RdmaBackendDev { struct ibv_context *context; struct ibv_comp_channel *channel; uint8_t port_num; - RecvMadList recv_mads_list; + RdmaProtectedQList recv_mads_list; RdmaCmMux rdmacm_mux; } RdmaBackendDev; @@ -70,6 +65,7 @@ typedef struct RdmaBackendQP { struct ibv_pd *ibpd; struct ibv_qp *ibqp; uint8_t sgid_idx; + RdmaProtectedGSList cqe_ctx_list; } RdmaBackendQP; #endif diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c index 268ff633a4..bac3b2f4a6 100644 --- a/hw/rdma/rdma_rm.c +++ b/hw/rdma/rdma_rm.c @@ -16,7 +16,9 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "cpu.h" +#include "monitor/monitor.h" +#include "trace.h" #include "rdma_utils.h" #include "rdma_backend.h" #include "rdma_rm.h" @@ -25,6 +27,58 @@ #define PG_DIR_SZ { TARGET_PAGE_SIZE / sizeof(__u64) } #define PG_TBL_SZ { TARGET_PAGE_SIZE / sizeof(__u64) } +void rdma_dump_device_counters(Monitor *mon, RdmaDeviceResources *dev_res) +{ + monitor_printf(mon, "\ttx : %" PRId64 "\n", + dev_res->stats.tx); + monitor_printf(mon, "\ttx_len : %" PRId64 "\n", + dev_res->stats.tx_len); + monitor_printf(mon, "\ttx_err : %" PRId64 "\n", + dev_res->stats.tx_err); + monitor_printf(mon, "\trx_bufs : %" PRId64 "\n", + dev_res->stats.rx_bufs); + monitor_printf(mon, "\trx_bufs_len : %" PRId64 "\n", + dev_res->stats.rx_bufs_len); + monitor_printf(mon, "\trx_bufs_err : %" PRId64 "\n", + dev_res->stats.rx_bufs_err); + monitor_printf(mon, "\tcomps : %" PRId64 "\n", + dev_res->stats.completions); + monitor_printf(mon, "\tmissing_comps : %" PRId32 "\n", + dev_res->stats.missing_cqe); + monitor_printf(mon, "\tpoll_cq (bk) : %" PRId64 "\n", + dev_res->stats.poll_cq_from_bk); + monitor_printf(mon, "\tpoll_cq_ppoll_to : %" PRId64 "\n", + dev_res->stats.poll_cq_ppoll_to); + monitor_printf(mon, "\tpoll_cq (fe) : %" PRId64 "\n", + dev_res->stats.poll_cq_from_guest); + monitor_printf(mon, "\tpoll_cq_empty : %" PRId64 "\n", + dev_res->stats.poll_cq_from_guest_empty); + monitor_printf(mon, "\tmad_tx : %" PRId64 "\n", + dev_res->stats.mad_tx); + monitor_printf(mon, "\tmad_tx_err : %" PRId64 "\n", + dev_res->stats.mad_tx_err); + monitor_printf(mon, "\tmad_rx : %" PRId64 "\n", + dev_res->stats.mad_rx); + monitor_printf(mon, "\tmad_rx_err : %" PRId64 "\n", + dev_res->stats.mad_rx_err); + monitor_printf(mon, "\tmad_rx_bufs : %" PRId64 "\n", + dev_res->stats.mad_rx_bufs); + monitor_printf(mon, "\tmad_rx_bufs_err : %" PRId64 "\n", + dev_res->stats.mad_rx_bufs_err); + monitor_printf(mon, "\tPDs : %" PRId32 "\n", + dev_res->pd_tbl.used); + monitor_printf(mon, "\tMRs : %" PRId32 "\n", + dev_res->mr_tbl.used); + monitor_printf(mon, "\tUCs : %" PRId32 "\n", + dev_res->uc_tbl.used); + monitor_printf(mon, "\tQPs : %" PRId32 "\n", + dev_res->qp_tbl.used); + monitor_printf(mon, "\tCQs : %" PRId32 "\n", + dev_res->cq_tbl.used); + monitor_printf(mon, "\tCEQ_CTXs : %" PRId32 "\n", + dev_res->cqe_ctx_tbl.used); +} + static inline void res_tbl_init(const char *name, RdmaRmResTbl *tbl, uint32_t tbl_sz, uint32_t res_sz) { @@ -36,6 +90,7 @@ static inline void res_tbl_init(const char *name, RdmaRmResTbl *tbl, tbl->bitmap = bitmap_new(tbl_sz); tbl->tbl_sz = tbl_sz; tbl->res_sz = res_sz; + tbl->used = 0; qemu_mutex_init(&tbl->lock); } @@ -49,48 +104,52 @@ static inline void res_tbl_free(RdmaRmResTbl *tbl) g_free(tbl->bitmap); } -static inline void *res_tbl_get(RdmaRmResTbl *tbl, uint32_t handle) +static inline void *rdma_res_tbl_get(RdmaRmResTbl *tbl, uint32_t handle) { - pr_dbg("%s, handle=%d\n", tbl->name, handle); + trace_rdma_res_tbl_get(tbl->name, handle); if ((handle < tbl->tbl_sz) && (test_bit(handle, tbl->bitmap))) { return tbl->tbl + handle * tbl->res_sz; } else { - pr_dbg("Invalid handle %d\n", handle); + rdma_error_report("Table %s, invalid handle %d", tbl->name, handle); return NULL; } } -static inline void *res_tbl_alloc(RdmaRmResTbl *tbl, uint32_t *handle) +static inline void *rdma_res_tbl_alloc(RdmaRmResTbl *tbl, uint32_t *handle) { qemu_mutex_lock(&tbl->lock); *handle = find_first_zero_bit(tbl->bitmap, tbl->tbl_sz); if (*handle > tbl->tbl_sz) { - pr_dbg("Failed to alloc, bitmap is full\n"); + rdma_error_report("Table %s, failed to allocate, bitmap is full", + tbl->name); qemu_mutex_unlock(&tbl->lock); return NULL; } set_bit(*handle, tbl->bitmap); + tbl->used++; + qemu_mutex_unlock(&tbl->lock); memset(tbl->tbl + *handle * tbl->res_sz, 0, tbl->res_sz); - pr_dbg("%s, handle=%d\n", tbl->name, *handle); + trace_rdma_res_tbl_alloc(tbl->name, *handle); return tbl->tbl + *handle * tbl->res_sz; } -static inline void res_tbl_dealloc(RdmaRmResTbl *tbl, uint32_t handle) +static inline void rdma_res_tbl_dealloc(RdmaRmResTbl *tbl, uint32_t handle) { - pr_dbg("%s, handle=%d\n", tbl->name, handle); + trace_rdma_res_tbl_dealloc(tbl->name, handle); qemu_mutex_lock(&tbl->lock); if (handle < tbl->tbl_sz) { clear_bit(handle, tbl->bitmap); + tbl->used--; } qemu_mutex_unlock(&tbl->lock); @@ -102,7 +161,7 @@ int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, RdmaRmPD *pd; int ret = -ENOMEM; - pd = res_tbl_alloc(&dev_res->pd_tbl, pd_handle); + pd = rdma_res_tbl_alloc(&dev_res->pd_tbl, pd_handle); if (!pd) { goto out; } @@ -118,7 +177,7 @@ int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, return 0; out_tbl_dealloc: - res_tbl_dealloc(&dev_res->pd_tbl, *pd_handle); + rdma_res_tbl_dealloc(&dev_res->pd_tbl, *pd_handle); out: return ret; @@ -126,7 +185,7 @@ out: RdmaRmPD *rdma_rm_get_pd(RdmaDeviceResources *dev_res, uint32_t pd_handle) { - return res_tbl_get(&dev_res->pd_tbl, pd_handle); + return rdma_res_tbl_get(&dev_res->pd_tbl, pd_handle); } void rdma_rm_dealloc_pd(RdmaDeviceResources *dev_res, uint32_t pd_handle) @@ -135,14 +194,14 @@ void rdma_rm_dealloc_pd(RdmaDeviceResources *dev_res, uint32_t pd_handle) if (pd) { rdma_backend_destroy_pd(&pd->backend_pd); - res_tbl_dealloc(&dev_res->pd_tbl, pd_handle); + rdma_res_tbl_dealloc(&dev_res->pd_tbl, pd_handle); } } int rdma_rm_alloc_mr(RdmaDeviceResources *dev_res, uint32_t pd_handle, - uint64_t guest_start, size_t guest_length, void *host_virt, - int access_flags, uint32_t *mr_handle, uint32_t *lkey, - uint32_t *rkey) + uint64_t guest_start, uint64_t guest_length, + void *host_virt, int access_flags, uint32_t *mr_handle, + uint32_t *lkey, uint32_t *rkey) { RdmaRmMR *mr; int ret = 0; @@ -150,20 +209,15 @@ int rdma_rm_alloc_mr(RdmaDeviceResources *dev_res, uint32_t pd_handle, pd = rdma_rm_get_pd(dev_res, pd_handle); if (!pd) { - pr_dbg("Invalid PD\n"); return -EINVAL; } - mr = res_tbl_alloc(&dev_res->mr_tbl, mr_handle); + mr = rdma_res_tbl_alloc(&dev_res->mr_tbl, mr_handle); if (!mr) { - pr_dbg("Failed to allocate obj in table\n"); return -ENOMEM; } - pr_dbg("mr_handle=%d\n", *mr_handle); - - pr_dbg("host_virt=0x%p\n", host_virt); - pr_dbg("guest_start=0x%" PRIx64 "\n", guest_start); - pr_dbg("length=%zu\n", guest_length); + trace_rdma_rm_alloc_mr(*mr_handle, host_virt, guest_start, guest_length, + access_flags); if (host_virt) { mr->virt = host_virt; @@ -174,7 +228,6 @@ int rdma_rm_alloc_mr(RdmaDeviceResources *dev_res, uint32_t pd_handle, ret = rdma_backend_create_mr(&mr->backend_mr, &pd->backend_pd, mr->virt, mr->length, access_flags); if (ret) { - pr_dbg("Fail in rdma_backend_create_mr, err=%d\n", ret); ret = -EIO; goto out_dealloc_mr; } @@ -189,14 +242,14 @@ int rdma_rm_alloc_mr(RdmaDeviceResources *dev_res, uint32_t pd_handle, return 0; out_dealloc_mr: - res_tbl_dealloc(&dev_res->mr_tbl, *mr_handle); + rdma_res_tbl_dealloc(&dev_res->mr_tbl, *mr_handle); return ret; } RdmaRmMR *rdma_rm_get_mr(RdmaDeviceResources *dev_res, uint32_t mr_handle) { - return res_tbl_get(&dev_res->mr_tbl, mr_handle); + return rdma_res_tbl_get(&dev_res->mr_tbl, mr_handle); } void rdma_rm_dealloc_mr(RdmaDeviceResources *dev_res, uint32_t mr_handle) @@ -205,12 +258,12 @@ void rdma_rm_dealloc_mr(RdmaDeviceResources *dev_res, uint32_t mr_handle) if (mr) { rdma_backend_destroy_mr(&mr->backend_mr); - pr_dbg("start=0x%" PRIx64 "\n", mr->start); + trace_rdma_rm_dealloc_mr(mr_handle, mr->start); if (mr->start) { mr->virt -= (mr->start & (TARGET_PAGE_SIZE - 1)); munmap(mr->virt, mr->length); } - res_tbl_dealloc(&dev_res->mr_tbl, mr_handle); + rdma_res_tbl_dealloc(&dev_res->mr_tbl, mr_handle); } } @@ -222,12 +275,13 @@ int rdma_rm_alloc_uc(RdmaDeviceResources *dev_res, uint32_t pfn, /* TODO: Need to make sure pfn is between bar start address and * bsd+RDMA_BAR2_UAR_SIZE if (pfn > RDMA_BAR2_UAR_SIZE) { - pr_err("pfn out of range (%d > %d)\n", pfn, RDMA_BAR2_UAR_SIZE); + rdma_error_report("pfn out of range (%d > %d)", pfn, + RDMA_BAR2_UAR_SIZE); return -ENOMEM; } */ - uc = res_tbl_alloc(&dev_res->uc_tbl, uc_handle); + uc = rdma_res_tbl_alloc(&dev_res->uc_tbl, uc_handle); if (!uc) { return -ENOMEM; } @@ -237,7 +291,7 @@ int rdma_rm_alloc_uc(RdmaDeviceResources *dev_res, uint32_t pfn, RdmaRmUC *rdma_rm_get_uc(RdmaDeviceResources *dev_res, uint32_t uc_handle) { - return res_tbl_get(&dev_res->uc_tbl, uc_handle); + return rdma_res_tbl_get(&dev_res->uc_tbl, uc_handle); } void rdma_rm_dealloc_uc(RdmaDeviceResources *dev_res, uint32_t uc_handle) @@ -245,13 +299,13 @@ void rdma_rm_dealloc_uc(RdmaDeviceResources *dev_res, uint32_t uc_handle) RdmaRmUC *uc = rdma_rm_get_uc(dev_res, uc_handle); if (uc) { - res_tbl_dealloc(&dev_res->uc_tbl, uc_handle); + rdma_res_tbl_dealloc(&dev_res->uc_tbl, uc_handle); } } RdmaRmCQ *rdma_rm_get_cq(RdmaDeviceResources *dev_res, uint32_t cq_handle) { - return res_tbl_get(&dev_res->cq_tbl, cq_handle); + return rdma_res_tbl_get(&dev_res->cq_tbl, cq_handle); } int rdma_rm_alloc_cq(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, @@ -260,7 +314,7 @@ int rdma_rm_alloc_cq(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, int rc; RdmaRmCQ *cq; - cq = res_tbl_alloc(&dev_res->cq_tbl, cq_handle); + cq = rdma_res_tbl_alloc(&dev_res->cq_tbl, cq_handle); if (!cq) { return -ENOMEM; } @@ -287,8 +341,6 @@ void rdma_rm_req_notify_cq(RdmaDeviceResources *dev_res, uint32_t cq_handle, { RdmaRmCQ *cq; - pr_dbg("cq_handle=%d, notify=0x%x\n", cq_handle, notify); - cq = rdma_rm_get_cq(dev_res, cq_handle); if (!cq) { return; @@ -297,8 +349,6 @@ void rdma_rm_req_notify_cq(RdmaDeviceResources *dev_res, uint32_t cq_handle, if (cq->notify != CNT_SET) { cq->notify = notify ? CNT_ARM : CNT_CLEAR; } - - pr_dbg("notify=%d\n", cq->notify); } void rdma_rm_dealloc_cq(RdmaDeviceResources *dev_res, uint32_t cq_handle) @@ -312,7 +362,7 @@ void rdma_rm_dealloc_cq(RdmaDeviceResources *dev_res, uint32_t cq_handle) rdma_backend_destroy_cq(&cq->backend_cq); - res_tbl_dealloc(&dev_res->cq_tbl, cq_handle); + rdma_res_tbl_dealloc(&dev_res->cq_tbl, cq_handle); } RdmaRmQP *rdma_rm_get_qp(RdmaDeviceResources *dev_res, uint32_t qpn) @@ -323,6 +373,10 @@ RdmaRmQP *rdma_rm_get_qp(RdmaDeviceResources *dev_res, uint32_t qpn) g_bytes_unref(key); + if (!qp) { + rdma_error_report("Invalid QP handle %d", qpn); + } + return qp; } @@ -338,11 +392,8 @@ int rdma_rm_alloc_qp(RdmaDeviceResources *dev_res, uint32_t pd_handle, RdmaRmPD *pd; uint32_t rm_qpn; - pr_dbg("qp_type=%d\n", qp_type); - pd = rdma_rm_get_pd(dev_res, pd_handle); if (!pd) { - pr_err("Invalid pd handle (%d)\n", pd_handle); return -EINVAL; } @@ -350,8 +401,8 @@ int rdma_rm_alloc_qp(RdmaDeviceResources *dev_res, uint32_t pd_handle, rcq = rdma_rm_get_cq(dev_res, recv_cq_handle); if (!scq || !rcq) { - pr_err("Invalid send_cqn or recv_cqn (%d, %d)\n", - send_cq_handle, recv_cq_handle); + rdma_error_report("Invalid send_cqn or recv_cqn (%d, %d)", + send_cq_handle, recv_cq_handle); return -EINVAL; } @@ -360,11 +411,10 @@ int rdma_rm_alloc_qp(RdmaDeviceResources *dev_res, uint32_t pd_handle, rcq->notify = CNT_SET; } - qp = res_tbl_alloc(&dev_res->qp_tbl, &rm_qpn); + qp = rdma_res_tbl_alloc(&dev_res->qp_tbl, &rm_qpn); if (!qp) { return -ENOMEM; } - pr_dbg("rm_qpn=%d\n", rm_qpn); qp->qpn = rm_qpn; qp->qp_state = IBV_QPS_RESET; @@ -382,13 +432,13 @@ int rdma_rm_alloc_qp(RdmaDeviceResources *dev_res, uint32_t pd_handle, } *qpn = rdma_backend_qpn(&qp->backend_qp); - pr_dbg("rm_qpn=%d, backend_qpn=0x%x\n", rm_qpn, *qpn); + trace_rdma_rm_alloc_qp(rm_qpn, *qpn, qp_type); g_hash_table_insert(dev_res->qp_hash, g_bytes_new(qpn, sizeof(*qpn)), qp); return 0; out_dealloc_qp: - res_tbl_dealloc(&dev_res->qp_tbl, qp->qpn); + rdma_res_tbl_dealloc(&dev_res->qp_tbl, qp->qpn); return rc; } @@ -402,28 +452,22 @@ int rdma_rm_modify_qp(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, RdmaRmQP *qp; int ret; - pr_dbg("qpn=0x%x\n", qp_handle); - pr_dbg("qkey=0x%x\n", qkey); - qp = rdma_rm_get_qp(dev_res, qp_handle); if (!qp) { return -EINVAL; } - pr_dbg("qp_type=%d\n", qp->qp_type); - pr_dbg("attr_mask=0x%x\n", attr_mask); - if (qp->qp_type == IBV_QPT_SMI) { - pr_dbg("QP0 unsupported\n"); + rdma_error_report("Got QP0 request"); return -EPERM; } else if (qp->qp_type == IBV_QPT_GSI) { - pr_dbg("QP1\n"); return 0; } + trace_rdma_rm_modify_qp(qp_handle, attr_mask, qp_state, sgid_idx); + if (attr_mask & IBV_QP_STATE) { qp->qp_state = qp_state; - pr_dbg("qp_state=%d\n", qp->qp_state); if (qp->qp_state == IBV_QPS_INIT) { ret = rdma_backend_qp_state_init(backend_dev, &qp->backend_qp, @@ -435,11 +479,11 @@ int rdma_rm_modify_qp(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, if (qp->qp_state == IBV_QPS_RTR) { /* Get backend gid index */ - pr_dbg("Guest sgid_idx=%d\n", sgid_idx); sgid_idx = rdma_rm_get_backend_gid_index(dev_res, backend_dev, sgid_idx); if (sgid_idx <= 0) { /* TODO check also less than bk.max_sgid */ - pr_dbg("Fail to get bk sgid_idx for sgid_idx %d\n", sgid_idx); + rdma_error_report("Failed to get bk sgid_idx for sgid_idx %d", + sgid_idx); return -EIO; } @@ -471,15 +515,11 @@ int rdma_rm_query_qp(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, { RdmaRmQP *qp; - pr_dbg("qpn=0x%x\n", qp_handle); - qp = rdma_rm_get_qp(dev_res, qp_handle); if (!qp) { return -EINVAL; } - pr_dbg("qp_type=%d\n", qp->qp_type); - return rdma_backend_query_qp(&qp->backend_qp, attr, attr_mask, init_attr); } @@ -497,22 +537,20 @@ void rdma_rm_dealloc_qp(RdmaDeviceResources *dev_res, uint32_t qp_handle) return; } - rdma_backend_destroy_qp(&qp->backend_qp); + rdma_backend_destroy_qp(&qp->backend_qp, dev_res); - res_tbl_dealloc(&dev_res->qp_tbl, qp->qpn); + rdma_res_tbl_dealloc(&dev_res->qp_tbl, qp->qpn); } void *rdma_rm_get_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t cqe_ctx_id) { void **cqe_ctx; - cqe_ctx = res_tbl_get(&dev_res->cqe_ctx_tbl, cqe_ctx_id); + cqe_ctx = rdma_res_tbl_get(&dev_res->cqe_ctx_tbl, cqe_ctx_id); if (!cqe_ctx) { return NULL; } - pr_dbg("ctx=%p\n", *cqe_ctx); - return *cqe_ctx; } @@ -521,12 +559,11 @@ int rdma_rm_alloc_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t *cqe_ctx_id, { void **cqe_ctx; - cqe_ctx = res_tbl_alloc(&dev_res->cqe_ctx_tbl, cqe_ctx_id); + cqe_ctx = rdma_res_tbl_alloc(&dev_res->cqe_ctx_tbl, cqe_ctx_id); if (!cqe_ctx) { return -ENOMEM; } - pr_dbg("ctx=%p\n", ctx); *cqe_ctx = ctx; return 0; @@ -534,7 +571,7 @@ int rdma_rm_alloc_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t *cqe_ctx_id, void rdma_rm_dealloc_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t cqe_ctx_id) { - res_tbl_dealloc(&dev_res->cqe_ctx_tbl, cqe_ctx_id); + rdma_res_tbl_dealloc(&dev_res->cqe_ctx_tbl, cqe_ctx_id); } int rdma_rm_add_gid(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, @@ -544,7 +581,6 @@ int rdma_rm_add_gid(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, rc = rdma_backend_add_gid(backend_dev, ifname, gid); if (rc) { - pr_dbg("Fail to add gid\n"); return -EINVAL; } @@ -565,7 +601,6 @@ int rdma_rm_del_gid(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, rc = rdma_backend_del_gid(backend_dev, ifname, &dev_res->port.gid_tbl[gid_idx].gid); if (rc) { - pr_dbg("Fail to delete gid\n"); return -EINVAL; } @@ -580,7 +615,7 @@ int rdma_rm_get_backend_gid_index(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, int sgid_idx) { if (unlikely(sgid_idx < 0 || sgid_idx >= MAX_PORT_GIDS)) { - pr_dbg("Got invalid sgid_idx %d\n", sgid_idx); + rdma_error_report("Got invalid sgid_idx %d", sgid_idx); return -EINVAL; } @@ -590,9 +625,6 @@ int rdma_rm_get_backend_gid_index(RdmaDeviceResources *dev_res, &dev_res->port.gid_tbl[sgid_idx].gid); } - pr_dbg("backend_gid_index=%d\n", - dev_res->port.gid_tbl[sgid_idx].backend_gid_index); - return dev_res->port.gid_tbl[sgid_idx].backend_gid_index; } @@ -624,8 +656,7 @@ static void fini_ports(RdmaDeviceResources *dev_res, } } -int rdma_rm_init(RdmaDeviceResources *dev_res, struct ibv_device_attr *dev_attr, - Error **errp) +int rdma_rm_init(RdmaDeviceResources *dev_res, struct ibv_device_attr *dev_attr) { dev_res->qp_hash = g_hash_table_new_full(g_bytes_hash, g_bytes_equal, destroy_qp_hash_key, NULL); @@ -643,12 +674,19 @@ int rdma_rm_init(RdmaDeviceResources *dev_res, struct ibv_device_attr *dev_attr, init_ports(dev_res); + qemu_mutex_init(&dev_res->lock); + + memset(&dev_res->stats, 0, sizeof(dev_res->stats)); + atomic_set(&dev_res->stats.missing_cqe, 0); + return 0; } void rdma_rm_fini(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, const char *ifname) { + qemu_mutex_destroy(&dev_res->lock); + fini_ports(dev_res, backend_dev, ifname); res_tbl_free(&dev_res->uc_tbl); diff --git a/hw/rdma/rdma_rm.h b/hw/rdma/rdma_rm.h index 3c602c04c0..4f03f9b8c5 100644 --- a/hw/rdma/rdma_rm.h +++ b/hw/rdma/rdma_rm.h @@ -20,8 +20,8 @@ #include "rdma_backend_defs.h" #include "rdma_rm_defs.h" -int rdma_rm_init(RdmaDeviceResources *dev_res, struct ibv_device_attr *dev_attr, - Error **errp); +int rdma_rm_init(RdmaDeviceResources *dev_res, + struct ibv_device_attr *dev_attr); void rdma_rm_fini(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, const char *ifname); @@ -31,9 +31,9 @@ RdmaRmPD *rdma_rm_get_pd(RdmaDeviceResources *dev_res, uint32_t pd_handle); void rdma_rm_dealloc_pd(RdmaDeviceResources *dev_res, uint32_t pd_handle); int rdma_rm_alloc_mr(RdmaDeviceResources *dev_res, uint32_t pd_handle, - uint64_t guest_start, size_t guest_length, void *host_virt, - int access_flags, uint32_t *mr_handle, uint32_t *lkey, - uint32_t *rkey); + uint64_t guest_start, uint64_t guest_length, + void *host_virt, int access_flags, uint32_t *mr_handle, + uint32_t *lkey, uint32_t *rkey); RdmaRmMR *rdma_rm_get_mr(RdmaDeviceResources *dev_res, uint32_t mr_handle); void rdma_rm_dealloc_mr(RdmaDeviceResources *dev_res, uint32_t mr_handle); @@ -81,5 +81,6 @@ static inline union ibv_gid *rdma_rm_get_gid(RdmaDeviceResources *dev_res, { return &dev_res->port.gid_tbl[sgid_idx].gid; } +void rdma_dump_device_counters(Monitor *mon, RdmaDeviceResources *dev_res); #endif diff --git a/hw/rdma/rdma_rm_defs.h b/hw/rdma/rdma_rm_defs.h index 0ba61d1838..c200d311de 100644 --- a/hw/rdma/rdma_rm_defs.h +++ b/hw/rdma/rdma_rm_defs.h @@ -34,7 +34,9 @@ #define MAX_QP_INIT_RD_ATOM 16 #define MAX_AH 64 -#define MAX_RM_TBL_NAME 16 +#define MAX_RM_TBL_NAME 16 +#define MAX_CONSEQ_EMPTY_POLL_CQ 4096 /* considered as error above this */ + typedef struct RdmaRmResTbl { char name[MAX_RM_TBL_NAME]; QemuMutex lock; @@ -42,6 +44,7 @@ typedef struct RdmaRmResTbl { size_t tbl_sz; size_t res_sz; void *tbl; + uint32_t used; /* number of used entries in the table */ } RdmaRmResTbl; typedef struct RdmaRmPD { @@ -96,7 +99,28 @@ typedef struct RdmaRmPort { enum ibv_port_state state; } RdmaRmPort; -typedef struct RdmaDeviceResources { +typedef struct RdmaRmStats { + uint64_t tx; + uint64_t tx_len; + uint64_t tx_err; + uint64_t rx_bufs; + uint64_t rx_bufs_len; + uint64_t rx_bufs_err; + uint64_t completions; + uint64_t mad_tx; + uint64_t mad_tx_err; + uint64_t mad_rx; + uint64_t mad_rx_err; + uint64_t mad_rx_bufs; + uint64_t mad_rx_bufs_err; + uint64_t poll_cq_from_bk; + uint64_t poll_cq_from_guest; + uint64_t poll_cq_from_guest_empty; + uint64_t poll_cq_ppoll_to; + uint32_t missing_cqe; +} RdmaRmStats; + +struct RdmaDeviceResources { RdmaRmPort port; RdmaRmResTbl pd_tbl; RdmaRmResTbl mr_tbl; @@ -105,6 +129,8 @@ typedef struct RdmaDeviceResources { RdmaRmResTbl cq_tbl; RdmaRmResTbl cqe_ctx_tbl; GHashTable *qp_hash; /* Keeps mapping between real and emulated */ -} RdmaDeviceResources; + QemuMutex lock; + RdmaRmStats stats; +}; #endif diff --git a/hw/rdma/rdma_utils.c b/hw/rdma/rdma_utils.c index 4fbea8cde2..73f279104c 100644 --- a/hw/rdma/rdma_utils.c +++ b/hw/rdma/rdma_utils.c @@ -14,26 +14,25 @@ */ #include "qemu/osdep.h" +#include "qapi/qmp/qlist.h" +#include "qapi/qmp/qnum.h" +#include "trace.h" #include "rdma_utils.h" -#ifdef PVRDMA_DEBUG -unsigned long pr_dbg_cnt; -#endif - void *rdma_pci_dma_map(PCIDevice *dev, dma_addr_t addr, dma_addr_t plen) { void *p; hwaddr len = plen; if (!addr) { - pr_dbg("addr is NULL\n"); + rdma_error_report("addr is NULL"); return NULL; } p = pci_dma_map(dev, addr, &len, DMA_DIRECTION_TO_DEVICE); if (!p) { - pr_dbg("Fail in pci_dma_map, addr=0x%" PRIx64 ", len=%" PRId64 "\n", - addr, len); + rdma_error_report("pci_dma_map fail, addr=0x%"PRIx64", len=%"PRId64, + addr, len); return NULL; } @@ -42,15 +41,81 @@ void *rdma_pci_dma_map(PCIDevice *dev, dma_addr_t addr, dma_addr_t plen) return NULL; } - pr_dbg("0x%" PRIx64 " -> %p (len=% " PRId64 ")\n", addr, p, len); + trace_rdma_pci_dma_map(addr, p, len); return p; } void rdma_pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len) { - pr_dbg("%p\n", buffer); + trace_rdma_pci_dma_unmap(buffer); if (buffer) { pci_dma_unmap(dev, buffer, len, DMA_DIRECTION_TO_DEVICE, 0); } } + +void rdma_protected_qlist_init(RdmaProtectedQList *list) +{ + qemu_mutex_init(&list->lock); + list->list = qlist_new(); +} + +void rdma_protected_qlist_destroy(RdmaProtectedQList *list) +{ + if (list->list) { + qlist_destroy_obj(QOBJECT(list->list)); + qemu_mutex_destroy(&list->lock); + list->list = NULL; + } +} + +void rdma_protected_qlist_append_int64(RdmaProtectedQList *list, int64_t value) +{ + qemu_mutex_lock(&list->lock); + qlist_append_int(list->list, value); + qemu_mutex_unlock(&list->lock); +} + +int64_t rdma_protected_qlist_pop_int64(RdmaProtectedQList *list) +{ + QObject *obj; + + qemu_mutex_lock(&list->lock); + obj = qlist_pop(list->list); + qemu_mutex_unlock(&list->lock); + + if (!obj) { + return -ENOENT; + } + + return qnum_get_uint(qobject_to(QNum, obj)); +} + +void rdma_protected_gslist_init(RdmaProtectedGSList *list) +{ + qemu_mutex_init(&list->lock); +} + +void rdma_protected_gslist_destroy(RdmaProtectedGSList *list) +{ + if (list->list) { + g_slist_free(list->list); + list->list = NULL; + } +} + +void rdma_protected_gslist_append_int32(RdmaProtectedGSList *list, + int32_t value) +{ + qemu_mutex_lock(&list->lock); + list->list = g_slist_prepend(list->list, GINT_TO_POINTER(value)); + qemu_mutex_unlock(&list->lock); +} + +void rdma_protected_gslist_remove_int32(RdmaProtectedGSList *list, + int32_t value) +{ + qemu_mutex_lock(&list->lock); + list->list = g_slist_remove(list->list, GINT_TO_POINTER(value)); + qemu_mutex_unlock(&list->lock); +} diff --git a/hw/rdma/rdma_utils.h b/hw/rdma/rdma_utils.h index 4490ea0b94..2d42249691 100644 --- a/hw/rdma/rdma_utils.h +++ b/hw/rdma/rdma_utils.h @@ -17,51 +17,40 @@ #ifndef RDMA_UTILS_H #define RDMA_UTILS_H +#include "qemu/error-report.h" #include "hw/pci/pci.h" #include "sysemu/dma.h" #include "stdio.h" -#define pr_info(fmt, ...) \ - fprintf(stdout, "%s: %-20s (%3d): " fmt, "rdma", __func__, __LINE__,\ - ## __VA_ARGS__) +#define rdma_error_report(fmt, ...) \ + error_report("%s: " fmt, "rdma", ## __VA_ARGS__) +#define rdma_warn_report(fmt, ...) \ + warn_report("%s: " fmt, "rdma", ## __VA_ARGS__) +#define rdma_info_report(fmt, ...) \ + info_report("%s: " fmt, "rdma", ## __VA_ARGS__) -#define pr_err(fmt, ...) \ - fprintf(stderr, "%s: Error at %-20s (%3d): " fmt, "rdma", __func__, \ - __LINE__, ## __VA_ARGS__) +typedef struct RdmaProtectedQList { + QemuMutex lock; + QList *list; +} RdmaProtectedQList; -#ifdef PVRDMA_DEBUG -extern unsigned long pr_dbg_cnt; - -#define init_pr_dbg(void) \ -{ \ - pr_dbg_cnt = 0; \ -} - -#define pr_dbg(fmt, ...) \ - fprintf(stdout, "%lx %ld: %-20s (%3d): " fmt, pthread_self(), pr_dbg_cnt++, \ - __func__, __LINE__, ## __VA_ARGS__) - -#define pr_dbg_buf(title, buf, len) \ -{ \ - int i; \ - char *b = g_malloc0(len * 3 + 1); \ - char b1[4]; \ - for (i = 0; i < len; i++) { \ - sprintf(b1, "%.2X ", buf[i] & 0x000000FF); \ - strcat(b, b1); \ - } \ - pr_dbg("%s (%d): %s\n", title, len, b); \ - g_free(b); \ -} - -#else -#define init_pr_dbg(void) -#define pr_dbg(fmt, ...) -#define pr_dbg_buf(title, buf, len) -#endif +typedef struct RdmaProtectedGSList { + QemuMutex lock; + GSList *list; +} RdmaProtectedGSList; void *rdma_pci_dma_map(PCIDevice *dev, dma_addr_t addr, dma_addr_t plen); void rdma_pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len); +void rdma_protected_qlist_init(RdmaProtectedQList *list); +void rdma_protected_qlist_destroy(RdmaProtectedQList *list); +void rdma_protected_qlist_append_int64(RdmaProtectedQList *list, int64_t value); +int64_t rdma_protected_qlist_pop_int64(RdmaProtectedQList *list); +void rdma_protected_gslist_init(RdmaProtectedGSList *list); +void rdma_protected_gslist_destroy(RdmaProtectedGSList *list); +void rdma_protected_gslist_append_int32(RdmaProtectedGSList *list, + int32_t value); +void rdma_protected_gslist_remove_int32(RdmaProtectedGSList *list, + int32_t value); static inline void addrconf_addr_eui48(uint8_t *eui, const char *addr) { diff --git a/hw/rdma/trace-events b/hw/rdma/trace-events index c4c202e647..2022a820cb 100644 --- a/hw/rdma/trace-events +++ b/hw/rdma/trace-events @@ -1,5 +1,31 @@ -# See docs/tracing.txt for syntax documentation. +# See docs/devel/tracing.txt for syntax documentation. -#hw/rdma/rdma_backend.c -create_ah_cache_hit(uint64_t subnet, uint64_t net_id) "subnet = 0x%"PRIx64" net_id = 0x%"PRIx64 -create_ah_cache_miss(uint64_t subnet, uint64_t net_id) "subnet = 0x%"PRIx64" net_id = 0x%"PRIx64 +# rdma_backend.c +rdma_check_dev_attr(const char *name, int max_bk, int max_fe) "%s: be=%d, fe=%d" +rdma_create_ah_cache_hit(uint64_t subnet, uint64_t if_id) "subnet=0x%"PRIx64",if_id=0x%"PRIx64 +rdma_create_ah_cache_miss(uint64_t subnet, uint64_t if_id) "subnet=0x%"PRIx64",if_id=0x%"PRIx64 +rdma_poll_cq(int ne, void *ibcq) "Got %d completion(s) from cq %p" +rdmacm_mux(const char *title, int msg_type, int op_code) "%s: msg_type=%d, op_code=%d" +rdmacm_mux_check_op_status(int msg_type, int op_code, int err_code) "resp: msg_type=%d, op_code=%d, err_code=%d" +rdma_mad_message(const char *title, int len, char *data) "mad %s (%d): %s" +rdma_backend_rc_qp_state_init(uint32_t qpn) "RC QP 0x%x switch to INIT" +rdma_backend_ud_qp_state_init(uint32_t qpn, uint32_t qkey) "UD QP 0x%x switch to INIT, qkey=0x%x" +rdma_backend_rc_qp_state_rtr(uint32_t qpn, uint64_t subnet, uint64_t ifid, uint8_t sgid_idx, uint32_t dqpn, uint32_t rq_psn) "RC QP 0x%x switch to RTR, subnet = 0x%"PRIx64", ifid = 0x%"PRIx64 ", sgid_idx=%d, dqpn=0x%x, rq_psn=0x%x" +rdma_backend_ud_qp_state_rtr(uint32_t qpn, uint32_t qkey) "UD QP 0x%x switch to RTR, qkey=0x%x" +rdma_backend_rc_qp_state_rts(uint32_t qpn, uint32_t sq_psn) "RC QP 0x%x switch to RTS, sq_psn=0x%x, " +rdma_backend_ud_qp_state_rts(uint32_t qpn, uint32_t sq_psn, uint32_t qkey) "UD QP 0x%x switch to RTS, sq_psn=0x%x, qkey=0x%x" +rdma_backend_get_gid_index(uint64_t subnet, uint64_t ifid, int gid_idx) "subnet=0x%"PRIx64", ifid=0x%"PRIx64 ", gid_idx=%d" +rdma_backend_gid_change(const char *op, uint64_t subnet, uint64_t ifid) "%s subnet=0x%"PRIx64", ifid=0x%"PRIx64 + +# rdma_rm.c +rdma_res_tbl_get(char *name, uint32_t handle) "tbl %s, handle %d" +rdma_res_tbl_alloc(char *name, uint32_t handle) "tbl %s, handle %d" +rdma_res_tbl_dealloc(char *name, uint32_t handle) "tbl %s, handle %d" +rdma_rm_alloc_mr(uint32_t mr_handle, void *host_virt, uint64_t guest_start, uint64_t guest_length, int access_flags) "mr_handle=%d, host_virt=%p, guest_start=0x%"PRIx64", length=%" PRId64", access_flags=0x%x" +rdma_rm_dealloc_mr(uint32_t mr_handle, uint64_t guest_start) "mr_handle=%d, guest_start=0x%"PRIx64 +rdma_rm_alloc_qp(uint32_t rm_qpn, uint32_t backend_qpn, uint8_t qp_type) "rm_qpn=%d, backend_qpn=0x%x, qp_type=%d" +rdma_rm_modify_qp(uint32_t qpn, uint32_t attr_mask, int qp_state, uint8_t sgid_idx) "qpn=0x%x, attr_mask=0x%x, qp_state=%d, sgid_idx=%d" + +# rdma_utils.c +rdma_pci_dma_map(uint64_t addr, void *vaddr, uint64_t len) "0x%"PRIx64" -> %p (len=%" PRId64")" +rdma_pci_dma_unmap(void *vaddr) "%p" diff --git a/hw/rdma/vmw/pvrdma.h b/hw/rdma/vmw/pvrdma.h index ffae36986e..a8a04a253c 100644 --- a/hw/rdma/vmw/pvrdma.h +++ b/hw/rdma/vmw/pvrdma.h @@ -70,6 +70,14 @@ typedef struct DSRInfo { PvrdmaRing cq; } DSRInfo; +typedef struct PVRDMADevStats { + uint64_t commands; + uint64_t regs_reads; + uint64_t regs_writes; + uint64_t uar_writes; + uint64_t interrupts; +} PVRDMADevStats; + typedef struct PVRDMADev { PCIDevice parent_obj; MemoryRegion msix; @@ -89,6 +97,7 @@ typedef struct PVRDMADev { CharBackend mad_chr; VMXNET3State *func0; Notifier shutdown_notifier; + PVRDMADevStats stats; } PVRDMADev; #define PVRDMA_DEV(dev) OBJECT_CHECK(PVRDMADev, (dev), PVRDMA_HW_NAME) @@ -123,10 +132,11 @@ static inline void post_interrupt(PVRDMADev *dev, unsigned vector) PCIDevice *pci_dev = PCI_DEVICE(dev); if (likely(!dev->interrupt_mask)) { + dev->stats.interrupts++; msix_notify(pci_dev, vector); } } -int execute_command(PVRDMADev *dev); +int pvrdma_exec_cmd(PVRDMADev *dev); #endif diff --git a/hw/rdma/vmw/pvrdma_cmd.c b/hw/rdma/vmw/pvrdma_cmd.c index 89920887bf..4afcd2037d 100644 --- a/hw/rdma/vmw/pvrdma_cmd.c +++ b/hw/rdma/vmw/pvrdma_cmd.c @@ -14,7 +14,6 @@ */ #include "qemu/osdep.h" -#include "qemu/error-report.h" #include "cpu.h" #include "hw/hw.h" #include "hw/pci/pci.h" @@ -24,6 +23,7 @@ #include "../rdma_rm.h" #include "../rdma_utils.h" +#include "trace.h" #include "pvrdma.h" #include "standard-headers/rdma/vmw_pvrdma-abi.h" @@ -35,40 +35,38 @@ static void *pvrdma_map_to_pdir(PCIDevice *pdev, uint64_t pdir_dma, void *host_virt = NULL, *curr_page; if (!nchunks) { - pr_dbg("nchunks=0\n"); + rdma_error_report("Got nchunks=0"); return NULL; } dir = rdma_pci_dma_map(pdev, pdir_dma, TARGET_PAGE_SIZE); if (!dir) { - error_report("PVRDMA: Failed to map to page directory"); + rdma_error_report("Failed to map to page directory"); return NULL; } tbl = rdma_pci_dma_map(pdev, dir[0], TARGET_PAGE_SIZE); if (!tbl) { - error_report("PVRDMA: Failed to map to page table 0"); + rdma_error_report("Failed to map to page table 0"); goto out_unmap_dir; } curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[0], TARGET_PAGE_SIZE); if (!curr_page) { - error_report("PVRDMA: Failed to map the first page"); + rdma_error_report("Failed to map the page 0"); goto out_unmap_tbl; } host_virt = mremap(curr_page, 0, length, MREMAP_MAYMOVE); - pr_dbg("mremap %p -> %p\n", curr_page, host_virt); if (host_virt == MAP_FAILED) { host_virt = NULL; - error_report("PVRDMA: Failed to remap memory for host_virt"); + rdma_error_report("Failed to remap memory for host_virt"); goto out_unmap_tbl; } + trace_pvrdma_map_to_pdir_host_virt(curr_page, host_virt); rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE); - pr_dbg("host_virt=%p\n", host_virt); - dir_idx = 0; tbl_idx = 1; addr_idx = 1; @@ -76,28 +74,28 @@ static void *pvrdma_map_to_pdir(PCIDevice *pdev, uint64_t pdir_dma, if (tbl_idx == TARGET_PAGE_SIZE / sizeof(uint64_t)) { tbl_idx = 0; dir_idx++; - pr_dbg("Mapping to table %d\n", dir_idx); rdma_pci_dma_unmap(pdev, tbl, TARGET_PAGE_SIZE); tbl = rdma_pci_dma_map(pdev, dir[dir_idx], TARGET_PAGE_SIZE); if (!tbl) { - error_report("PVRDMA: Failed to map to page table %d", dir_idx); + rdma_error_report("Failed to map to page table %d", dir_idx); goto out_unmap_host_virt; } } - pr_dbg("guest_dma[%d]=0x%" PRIx64 "\n", addr_idx, tbl[tbl_idx]); - curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[tbl_idx], TARGET_PAGE_SIZE); if (!curr_page) { - error_report("PVRDMA: Failed to map to page %d, dir %d", tbl_idx, - dir_idx); + rdma_error_report("Failed to map to page %d, dir %d", tbl_idx, + dir_idx); goto out_unmap_host_virt; } mremap(curr_page, 0, TARGET_PAGE_SIZE, MREMAP_MAYMOVE | MREMAP_FIXED, host_virt + TARGET_PAGE_SIZE * addr_idx); + trace_pvrdma_map_to_pdir_next_page(addr_idx, curr_page, host_virt + + TARGET_PAGE_SIZE * addr_idx); + rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE); addr_idx++; @@ -125,9 +123,8 @@ static int query_port(PVRDMADev *dev, union pvrdma_cmd_req *req, { struct pvrdma_cmd_query_port *cmd = &req->query_port; struct pvrdma_cmd_query_port_resp *resp = &rsp->query_port_resp; - struct pvrdma_port_attr attrs = {0}; + struct pvrdma_port_attr attrs = {}; - pr_dbg("port=%d\n", cmd->port_num); if (cmd->port_num > MAX_PORTS) { return -EINVAL; } @@ -159,12 +156,10 @@ static int query_pkey(PVRDMADev *dev, union pvrdma_cmd_req *req, struct pvrdma_cmd_query_pkey *cmd = &req->query_pkey; struct pvrdma_cmd_query_pkey_resp *resp = &rsp->query_pkey_resp; - pr_dbg("port=%d\n", cmd->port_num); if (cmd->port_num > MAX_PORTS) { return -EINVAL; } - pr_dbg("index=%d\n", cmd->index); if (cmd->index > MAX_PKEYS) { return -EINVAL; } @@ -172,7 +167,6 @@ static int query_pkey(PVRDMADev *dev, union pvrdma_cmd_req *req, memset(resp, 0, sizeof(*resp)); resp->pkey = PVRDMA_PKEY; - pr_dbg("pkey=0x%x\n", resp->pkey); return 0; } @@ -184,8 +178,6 @@ static int create_pd(PVRDMADev *dev, union pvrdma_cmd_req *req, struct pvrdma_cmd_create_pd_resp *resp = &rsp->create_pd_resp; int rc; - pr_dbg("context=0x%x\n", cmd->ctx_handle ? cmd->ctx_handle : 0); - memset(resp, 0, sizeof(*resp)); rc = rdma_rm_alloc_pd(&dev->rdma_dev_res, &dev->backend_dev, &resp->pd_handle, cmd->ctx_handle); @@ -198,8 +190,6 @@ static int destroy_pd(PVRDMADev *dev, union pvrdma_cmd_req *req, { struct pvrdma_cmd_destroy_pd *cmd = &req->destroy_pd; - pr_dbg("pd_handle=%d\n", cmd->pd_handle); - rdma_rm_dealloc_pd(&dev->rdma_dev_res, cmd->pd_handle); return 0; @@ -216,15 +206,11 @@ static int create_mr(PVRDMADev *dev, union pvrdma_cmd_req *req, memset(resp, 0, sizeof(*resp)); - pr_dbg("pd_handle=%d\n", cmd->pd_handle); - pr_dbg("access_flags=0x%x\n", cmd->access_flags); - pr_dbg("flags=0x%x\n", cmd->flags); - if (!(cmd->flags & PVRDMA_MR_FLAG_DMA)) { host_virt = pvrdma_map_to_pdir(pci_dev, cmd->pdir_dma, cmd->nchunks, cmd->length); if (!host_virt) { - pr_dbg("Failed to map to pdir\n"); + rdma_error_report("Failed to map to pdir"); return -EINVAL; } } @@ -244,8 +230,6 @@ static int destroy_mr(PVRDMADev *dev, union pvrdma_cmd_req *req, { struct pvrdma_cmd_destroy_mr *cmd = &req->destroy_mr; - pr_dbg("mr_handle=%d\n", cmd->mr_handle); - rdma_rm_dealloc_mr(&dev->rdma_dev_res, cmd->mr_handle); return 0; @@ -260,20 +244,19 @@ static int create_cq_ring(PCIDevice *pci_dev , PvrdmaRing **ring, char ring_name[MAX_RING_NAME_SZ]; if (!nchunks || nchunks > PVRDMA_MAX_FAST_REG_PAGES) { - pr_dbg("invalid nchunks: %d\n", nchunks); + rdma_error_report("Got invalid nchunks: %d", nchunks); return rc; } - pr_dbg("pdir_dma=0x%llx\n", (long long unsigned int)pdir_dma); dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE); if (!dir) { - pr_dbg("Failed to map to CQ page directory\n"); + rdma_error_report("Failed to map to CQ page directory"); goto out; } tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE); if (!tbl) { - pr_dbg("Failed to map to CQ page table\n"); + rdma_error_report("Failed to map to CQ page table"); goto out; } @@ -284,7 +267,7 @@ static int create_cq_ring(PCIDevice *pci_dev , PvrdmaRing **ring, rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE); if (!r->ring_state) { - pr_dbg("Failed to map to CQ ring state\n"); + rdma_error_report("Failed to map to CQ ring state"); goto out_free_ring; } @@ -339,8 +322,6 @@ static int create_cq(PVRDMADev *dev, union pvrdma_cmd_req *req, return rc; } - pr_dbg("ring=%p\n", ring); - rc = rdma_rm_alloc_cq(&dev->rdma_dev_res, &dev->backend_dev, cmd->cqe, &resp->cq_handle, ring); if (rc) { @@ -359,11 +340,9 @@ static int destroy_cq(PVRDMADev *dev, union pvrdma_cmd_req *req, RdmaRmCQ *cq; PvrdmaRing *ring; - pr_dbg("cq_handle=%d\n", cmd->cq_handle); - cq = rdma_rm_get_cq(&dev->rdma_dev_res, cmd->cq_handle); if (!cq) { - pr_dbg("Invalid CQ handle\n"); + rdma_error_report("Got invalid CQ handle"); return -EINVAL; } @@ -388,42 +367,33 @@ static int create_qp_rings(PCIDevice *pci_dev, uint64_t pdir_dma, if (!spages || spages > PVRDMA_MAX_FAST_REG_PAGES || !rpages || rpages > PVRDMA_MAX_FAST_REG_PAGES) { - pr_dbg("invalid pages: %d, %d\n", spages, rpages); + rdma_error_report("Got invalid page count for QP ring: %d, %d", spages, + rpages); return rc; } - pr_dbg("pdir_dma=0x%llx\n", (long long unsigned int)pdir_dma); dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE); if (!dir) { - pr_dbg("Failed to map to CQ page directory\n"); + rdma_error_report("Failed to map to QP page directory"); goto out; } tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE); if (!tbl) { - pr_dbg("Failed to map to CQ page table\n"); + rdma_error_report("Failed to map to QP page table"); goto out; } sr = g_malloc(2 * sizeof(*rr)); rr = &sr[1]; - pr_dbg("sring=%p\n", sr); - pr_dbg("rring=%p\n", rr); *rings = sr; - pr_dbg("scqe=%d\n", scqe); - pr_dbg("smax_sge=%d\n", smax_sge); - pr_dbg("spages=%d\n", spages); - pr_dbg("rcqe=%d\n", rcqe); - pr_dbg("rmax_sge=%d\n", rmax_sge); - pr_dbg("rpages=%d\n", rpages); - /* Create send ring */ sr->ring_state = (struct pvrdma_ring *) rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE); if (!sr->ring_state) { - pr_dbg("Failed to map to CQ ring state\n"); + rdma_error_report("Failed to map to QP ring state"); goto out_free_sr_mem; } @@ -468,9 +438,7 @@ out: static void destroy_qp_rings(PvrdmaRing *ring) { - pr_dbg("sring=%p\n", &ring[0]); pvrdma_ring_free(&ring[0]); - pr_dbg("rring=%p\n", &ring[1]); pvrdma_ring_free(&ring[1]); rdma_pci_dma_unmap(ring->dev, ring->ring_state, TARGET_PAGE_SIZE); @@ -487,9 +455,6 @@ static int create_qp(PVRDMADev *dev, union pvrdma_cmd_req *req, memset(resp, 0, sizeof(*resp)); - pr_dbg("total_chunks=%d\n", cmd->total_chunks); - pr_dbg("send_chunks=%d\n", cmd->send_chunks); - rc = create_qp_rings(PCI_DEVICE(dev), cmd->pdir_dma, &rings, cmd->max_send_wr, cmd->max_send_sge, cmd->send_chunks, cmd->max_recv_wr, cmd->max_recv_sge, @@ -498,8 +463,6 @@ static int create_qp(PVRDMADev *dev, union pvrdma_cmd_req *req, return rc; } - pr_dbg("rings=%p\n", rings); - rc = rdma_rm_alloc_qp(&dev->rdma_dev_res, cmd->pd_handle, cmd->qp_type, cmd->max_send_wr, cmd->max_send_sge, cmd->send_cq_handle, cmd->max_recv_wr, @@ -525,10 +488,6 @@ static int modify_qp(PVRDMADev *dev, union pvrdma_cmd_req *req, struct pvrdma_cmd_modify_qp *cmd = &req->modify_qp; int rc; - pr_dbg("qp_handle=%d\n", cmd->qp_handle); - - memset(rsp, 0, sizeof(*rsp)); - /* No need to verify sgid_index since it is u8 */ rc = rdma_rm_modify_qp(&dev->rdma_dev_res, &dev->backend_dev, @@ -551,10 +510,7 @@ static int query_qp(PVRDMADev *dev, union pvrdma_cmd_req *req, struct ibv_qp_init_attr init_attr; int rc; - pr_dbg("qp_handle=%d\n", cmd->qp_handle); - pr_dbg("attr_mask=0x%x\n", cmd->attr_mask); - - memset(rsp, 0, sizeof(*rsp)); + memset(resp, 0, sizeof(*resp)); rc = rdma_rm_query_qp(&dev->rdma_dev_res, &dev->backend_dev, cmd->qp_handle, (struct ibv_qp_attr *)&resp->attrs, cmd->attr_mask, @@ -572,7 +528,6 @@ static int destroy_qp(PVRDMADev *dev, union pvrdma_cmd_req *req, qp = rdma_rm_get_qp(&dev->rdma_dev_res, cmd->qp_handle); if (!qp) { - pr_dbg("Invalid QP handle\n"); return -EINVAL; } @@ -591,16 +546,10 @@ static int create_bind(PVRDMADev *dev, union pvrdma_cmd_req *req, int rc; union ibv_gid *gid = (union ibv_gid *)&cmd->new_gid; - pr_dbg("index=%d\n", cmd->index); - if (cmd->index >= MAX_PORT_GIDS) { return -EINVAL; } - pr_dbg("gid[%d]=0x%llx,0x%llx\n", cmd->index, - (long long unsigned int)be64_to_cpu(gid->global.subnet_prefix), - (long long unsigned int)be64_to_cpu(gid->global.interface_id)); - rc = rdma_rm_add_gid(&dev->rdma_dev_res, &dev->backend_dev, dev->backend_eth_device_name, gid, cmd->index); @@ -614,8 +563,6 @@ static int destroy_bind(PVRDMADev *dev, union pvrdma_cmd_req *req, struct pvrdma_cmd_destroy_bind *cmd = &req->destroy_bind; - pr_dbg("index=%d\n", cmd->index); - if (cmd->index >= MAX_PORT_GIDS) { return -EINVAL; } @@ -633,8 +580,6 @@ static int create_uc(PVRDMADev *dev, union pvrdma_cmd_req *req, struct pvrdma_cmd_create_uc_resp *resp = &rsp->create_uc_resp; int rc; - pr_dbg("pfn=%d\n", cmd->pfn); - memset(resp, 0, sizeof(*resp)); rc = rdma_rm_alloc_uc(&dev->rdma_dev_res, cmd->pfn, &resp->ctx_handle); @@ -646,8 +591,6 @@ static int destroy_uc(PVRDMADev *dev, union pvrdma_cmd_req *req, { struct pvrdma_cmd_destroy_uc *cmd = &req->destroy_uc; - pr_dbg("ctx_handle=%d\n", cmd->ctx_handle); - rdma_rm_dealloc_uc(&dev->rdma_dev_res, cmd->ctx_handle); return 0; @@ -680,22 +623,21 @@ static struct cmd_handler cmd_handlers[] = { {PVRDMA_CMD_DESTROY_BIND, PVRDMA_CMD_DESTROY_BIND_RESP_NOOP, destroy_bind}, }; -int execute_command(PVRDMADev *dev) +int pvrdma_exec_cmd(PVRDMADev *dev) { int err = 0xFFFF; DSRInfo *dsr_info; dsr_info = &dev->dsr_info; - pr_dbg("cmd=%d\n", dsr_info->req->hdr.cmd); if (dsr_info->req->hdr.cmd >= sizeof(cmd_handlers) / sizeof(struct cmd_handler)) { - pr_dbg("Unsupported command\n"); + rdma_error_report("Unsupported command"); goto out; } if (!cmd_handlers[dsr_info->req->hdr.cmd].exec) { - pr_dbg("Unsupported command (not implemented yet)\n"); + rdma_error_report("Unsupported command (not implemented yet)"); goto out; } @@ -704,7 +646,10 @@ int execute_command(PVRDMADev *dev) dsr_info->rsp->hdr.response = dsr_info->req->hdr.response; dsr_info->rsp->hdr.ack = cmd_handlers[dsr_info->req->hdr.cmd].ack; dsr_info->rsp->hdr.err = err < 0 ? -err : 0; - pr_dbg("rsp->hdr.err=%d\n", dsr_info->rsp->hdr.err); + + trace_pvrdma_exec_cmd(dsr_info->req->hdr.cmd, dsr_info->rsp->hdr.err); + + dev->stats.commands++; out: set_reg_val(dev, PVRDMA_REG_ERR, err); diff --git a/hw/rdma/vmw/pvrdma_dev_ring.c b/hw/rdma/vmw/pvrdma_dev_ring.c index e8e5b502f6..d7bc7f5ccc 100644 --- a/hw/rdma/vmw/pvrdma_dev_ring.c +++ b/hw/rdma/vmw/pvrdma_dev_ring.c @@ -17,6 +17,8 @@ #include "hw/pci/pci.h" #include "cpu.h" +#include "trace.h" + #include "../rdma_utils.h" #include "standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h" #include "pvrdma_dev_ring.h" @@ -30,13 +32,10 @@ int pvrdma_ring_init(PvrdmaRing *ring, const char *name, PCIDevice *dev, strncpy(ring->name, name, MAX_RING_NAME_SZ); ring->name[MAX_RING_NAME_SZ - 1] = 0; - pr_dbg("Initializing %s ring\n", ring->name); ring->dev = dev; ring->ring_state = ring_state; ring->max_elems = max_elems; ring->elem_sz = elem_sz; - pr_dbg("ring->elem_sz=%zu\n", ring->elem_sz); - pr_dbg("npages=%d\n", npages); /* TODO: Give a moment to think if we want to redo driver settings atomic_set(&ring->ring_state->prod_tail, 0); atomic_set(&ring->ring_state->cons_head, 0); @@ -46,14 +45,14 @@ int pvrdma_ring_init(PvrdmaRing *ring, const char *name, PCIDevice *dev, for (i = 0; i < npages; i++) { if (!tbl[i]) { - pr_err("npages=%ld but tbl[%d] is NULL\n", (long)npages, i); + rdma_error_report("npages=%d but tbl[%d] is NULL", npages, i); continue; } ring->pages[i] = rdma_pci_dma_map(dev, tbl[i], TARGET_PAGE_SIZE); if (!ring->pages[i]) { rc = -ENOMEM; - pr_dbg("Failed to map to page %d\n", i); + rdma_error_report("Failed to map to page %d in ring %s", i, name); goto out_free; } memset(ring->pages[i], 0, TARGET_PAGE_SIZE); @@ -78,7 +77,7 @@ void *pvrdma_ring_next_elem_read(PvrdmaRing *ring) e = pvrdma_idx_ring_has_data(ring->ring_state, ring->max_elems, &idx); if (e <= 0) { - pr_dbg("No more data in ring\n"); + trace_pvrdma_ring_next_elem_read_no_data(ring->name); return NULL; } @@ -89,11 +88,6 @@ void *pvrdma_ring_next_elem_read(PvrdmaRing *ring) void pvrdma_ring_read_inc(PvrdmaRing *ring) { pvrdma_idx_ring_inc(&ring->ring_state->cons_head, ring->max_elems); - /* - pr_dbg("%s: t=%d, h=%d, m=%ld\n", ring->name, - ring->ring_state->prod_tail, ring->ring_state->cons_head, - ring->max_elems); - */ } void *pvrdma_ring_next_elem_write(PvrdmaRing *ring) @@ -103,13 +97,13 @@ void *pvrdma_ring_next_elem_write(PvrdmaRing *ring) idx = pvrdma_idx_ring_has_space(ring->ring_state, ring->max_elems, &tail); if (idx <= 0) { - pr_dbg("CQ is full\n"); + rdma_error_report("CQ is full"); return NULL; } idx = pvrdma_idx(&ring->ring_state->prod_tail, ring->max_elems); if (idx < 0 || tail != idx) { - pr_dbg("invalid idx\n"); + rdma_error_report("Invalid idx %d", idx); return NULL; } @@ -120,11 +114,6 @@ void *pvrdma_ring_next_elem_write(PvrdmaRing *ring) void pvrdma_ring_write_inc(PvrdmaRing *ring) { pvrdma_idx_ring_inc(&ring->ring_state->prod_tail, ring->max_elems); - /* - pr_dbg("%s: t=%d, h=%d, m=%ld\n", ring->name, - ring->ring_state->prod_tail, ring->ring_state->cons_head, - ring->max_elems); - */ } void pvrdma_ring_free(PvrdmaRing *ring) @@ -137,7 +126,6 @@ void pvrdma_ring_free(PvrdmaRing *ring) return; } - pr_dbg("ring->npages=%d\n", ring->npages); while (ring->npages--) { rdma_pci_dma_unmap(ring->dev, ring->pages[ring->npages], TARGET_PAGE_SIZE); diff --git a/hw/rdma/vmw/pvrdma_main.c b/hw/rdma/vmw/pvrdma_main.c index d2bdb5ba8c..0b46561bad 100644 --- a/hw/rdma/vmw/pvrdma_main.c +++ b/hw/rdma/vmw/pvrdma_main.c @@ -25,6 +25,8 @@ #include "cpu.h" #include "trace.h" #include "sysemu/sysemu.h" +#include "monitor/monitor.h" +#include "hw/rdma/rdma.h" #include "../rdma_rm.h" #include "../rdma_backend.h" @@ -55,6 +57,26 @@ static Property pvrdma_dev_properties[] = { DEFINE_PROP_END_OF_LIST(), }; +static void pvrdma_print_statistics(Monitor *mon, RdmaProvider *obj) +{ + PVRDMADev *dev = PVRDMA_DEV(obj); + PCIDevice *pdev = PCI_DEVICE(dev); + + monitor_printf(mon, "%s, %x.%x\n", pdev->name, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + monitor_printf(mon, "\tcommands : %" PRId64 "\n", + dev->stats.commands); + monitor_printf(mon, "\tregs_reads : %" PRId64 "\n", + dev->stats.regs_reads); + monitor_printf(mon, "\tregs_writes : %" PRId64 "\n", + dev->stats.regs_writes); + monitor_printf(mon, "\tuar_writes : %" PRId64 "\n", + dev->stats.uar_writes); + monitor_printf(mon, "\tinterrupts : %" PRId64 "\n", + dev->stats.interrupts); + rdma_dump_device_counters(mon, &dev->rdma_dev_res); +} + static void free_dev_ring(PCIDevice *pci_dev, PvrdmaRing *ring, void *ring_state) { @@ -69,25 +91,22 @@ static int init_dev_ring(PvrdmaRing *ring, struct pvrdma_ring **ring_state, uint64_t *dir, *tbl; int rc = 0; - pr_dbg("Initializing device ring %s\n", name); - pr_dbg("pdir_dma=0x%llx\n", (long long unsigned int)dir_addr); - pr_dbg("num_pages=%d\n", num_pages); dir = rdma_pci_dma_map(pci_dev, dir_addr, TARGET_PAGE_SIZE); if (!dir) { - pr_err("Failed to map to page directory\n"); + rdma_error_report("Failed to map to page directory (ring %s)", name); rc = -ENOMEM; goto out; } tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE); if (!tbl) { - pr_err("Failed to map to page table\n"); + rdma_error_report("Failed to map to page table (ring %s)", name); rc = -ENOMEM; goto out_free_dir; } *ring_state = rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE); if (!*ring_state) { - pr_err("Failed to map to ring state\n"); + rdma_error_report("Failed to map to ring state (ring %s)", name); rc = -ENOMEM; goto out_free_tbl; } @@ -100,7 +119,6 @@ static int init_dev_ring(PvrdmaRing *ring, struct pvrdma_ring **ring_state, sizeof(struct pvrdma_cqne), (dma_addr_t *)&tbl[1], (dma_addr_t)num_pages - 1); if (rc) { - pr_err("Failed to initialize ring\n"); rc = -ENOMEM; goto out_free_ring_state; } @@ -155,11 +173,10 @@ static int load_dsr(PVRDMADev *dev) free_dsr(dev); /* Map to DSR */ - pr_dbg("dsr_dma=0x%llx\n", (long long unsigned int)dev->dsr_info.dma); dev->dsr_info.dsr = rdma_pci_dma_map(pci_dev, dev->dsr_info.dma, sizeof(struct pvrdma_device_shared_region)); if (!dev->dsr_info.dsr) { - pr_err("Failed to map to DSR\n"); + rdma_error_report("Failed to map to DSR"); rc = -ENOMEM; goto out; } @@ -169,21 +186,19 @@ static int load_dsr(PVRDMADev *dev) dsr = dsr_info->dsr; /* Map to command slot */ - pr_dbg("cmd_dma=0x%llx\n", (long long unsigned int)dsr->cmd_slot_dma); dsr_info->req = rdma_pci_dma_map(pci_dev, dsr->cmd_slot_dma, sizeof(union pvrdma_cmd_req)); if (!dsr_info->req) { - pr_err("Failed to map to command slot address\n"); + rdma_error_report("Failed to map to command slot address"); rc = -ENOMEM; goto out_free_dsr; } /* Map to response slot */ - pr_dbg("rsp_dma=0x%llx\n", (long long unsigned int)dsr->resp_slot_dma); dsr_info->rsp = rdma_pci_dma_map(pci_dev, dsr->resp_slot_dma, sizeof(union pvrdma_cmd_resp)); if (!dsr_info->rsp) { - pr_err("Failed to map to response slot address\n"); + rdma_error_report("Failed to map to response slot address"); rc = -ENOMEM; goto out_free_req; } @@ -193,7 +208,6 @@ static int load_dsr(PVRDMADev *dev) pci_dev, dsr->cq_ring_pages.pdir_dma, dsr->cq_ring_pages.num_pages); if (rc) { - pr_err("Failed to map to initialize CQ ring\n"); rc = -ENOMEM; goto out_free_rsp; } @@ -203,7 +217,6 @@ static int load_dsr(PVRDMADev *dev) "dev_async", pci_dev, dsr->async_ring_pages.pdir_dma, dsr->async_ring_pages.num_pages); if (rc) { - pr_err("Failed to map to initialize event ring\n"); rc = -ENOMEM; goto out_free_rsp; } @@ -230,24 +243,15 @@ static void init_dsr_dev_caps(PVRDMADev *dev) struct pvrdma_device_shared_region *dsr; if (dev->dsr_info.dsr == NULL) { - pr_err("Can't initialized DSR\n"); + rdma_error_report("Can't initialized DSR"); return; } dsr = dev->dsr_info.dsr; - dsr->caps.fw_ver = PVRDMA_FW_VERSION; - pr_dbg("fw_ver=0x%" PRIx64 "\n", dsr->caps.fw_ver); - dsr->caps.mode = PVRDMA_DEVICE_MODE_ROCE; - pr_dbg("mode=%d\n", dsr->caps.mode); - dsr->caps.gid_types |= PVRDMA_GID_TYPE_FLAG_ROCE_V1; - pr_dbg("gid_types=0x%x\n", dsr->caps.gid_types); - dsr->caps.max_uar = RDMA_BAR2_UAR_SIZE; - pr_dbg("max_uar=%d\n", dsr->caps.max_uar); - dsr->caps.max_mr_size = dev->dev_attr.max_mr_size; dsr->caps.max_qp = dev->dev_attr.max_qp; dsr->caps.max_qp_wr = dev->dev_attr.max_qp_wr; @@ -257,23 +261,11 @@ static void init_dsr_dev_caps(PVRDMADev *dev) dsr->caps.max_mr = dev->dev_attr.max_mr; dsr->caps.max_pd = dev->dev_attr.max_pd; dsr->caps.max_ah = dev->dev_attr.max_ah; - dsr->caps.gid_tbl_len = MAX_GIDS; - pr_dbg("gid_tbl_len=%d\n", dsr->caps.gid_tbl_len); - dsr->caps.sys_image_guid = 0; - pr_dbg("sys_image_guid=%" PRIx64 "\n", dsr->caps.sys_image_guid); - dsr->caps.node_guid = dev->node_guid; - pr_dbg("node_guid=%" PRIx64 "\n", be64_to_cpu(dsr->caps.node_guid)); - dsr->caps.phys_port_cnt = MAX_PORTS; - pr_dbg("phys_port_cnt=%d\n", dsr->caps.phys_port_cnt); - dsr->caps.max_pkeys = MAX_PKEYS; - pr_dbg("max_pkeys=%d\n", dsr->caps.max_pkeys); - - pr_dbg("Initialized\n"); } static void uninit_msix(PCIDevice *pdev, int used_vectors) @@ -288,7 +280,7 @@ static void uninit_msix(PCIDevice *pdev, int used_vectors) msix_uninit(pdev, &dev->msix, &dev->msix); } -static int init_msix(PCIDevice *pdev, Error **errp) +static int init_msix(PCIDevice *pdev) { PVRDMADev *dev = PVRDMA_DEV(pdev); int i; @@ -299,14 +291,14 @@ static int init_msix(PCIDevice *pdev, Error **errp) RDMA_MSIX_PBA, 0, NULL); if (rc < 0) { - error_setg(errp, "Failed to initialize MSI-X"); + rdma_error_report("Failed to initialize MSI-X"); return rc; } for (i = 0; i < RDMA_MAX_INTRS; i++) { rc = msix_vector_use(PCI_DEVICE(dev), i); if (rc < 0) { - error_setg(errp, "Fail mark MSI-X vector %d", i); + rdma_error_report("Fail mark MSI-X vector %d", i); uninit_msix(pdev, i); return rc; } @@ -319,11 +311,12 @@ static void pvrdma_fini(PCIDevice *pdev) { PVRDMADev *dev = PVRDMA_DEV(pdev); - pr_dbg("Closing device %s %x.%x\n", pdev->name, PCI_SLOT(pdev->devfn), - PCI_FUNC(pdev->devfn)); + notifier_remove(&dev->shutdown_notifier); pvrdma_qp_ops_fini(); + rdma_backend_stop(&dev->backend_dev); + rdma_rm_fini(&dev->rdma_dev_res, &dev->backend_dev, dev->backend_eth_device_name); @@ -335,8 +328,8 @@ static void pvrdma_fini(PCIDevice *pdev) uninit_msix(pdev, RDMA_MAX_INTRS); } - pr_dbg("Device %s %x.%x is down\n", pdev->name, PCI_SLOT(pdev->devfn), - PCI_FUNC(pdev->devfn)); + rdma_info_report("Device %s %x.%x is down", pdev->name, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); } static void pvrdma_stop(PVRDMADev *dev) @@ -353,32 +346,28 @@ static void activate_device(PVRDMADev *dev) { pvrdma_start(dev); set_reg_val(dev, PVRDMA_REG_ERR, 0); - pr_dbg("Device activated\n"); } static int unquiesce_device(PVRDMADev *dev) { - pr_dbg("Device unquiesced\n"); return 0; } static void reset_device(PVRDMADev *dev) { pvrdma_stop(dev); - - pr_dbg("Device reset complete\n"); } -static uint64_t regs_read(void *opaque, hwaddr addr, unsigned size) +static uint64_t pvrdma_regs_read(void *opaque, hwaddr addr, unsigned size) { PVRDMADev *dev = opaque; uint32_t val; - /* pr_dbg("addr=0x%lx, size=%d\n", addr, size); */ + dev->stats.regs_reads++; if (get_reg_val(dev, addr, &val)) { - pr_dbg("Error trying to read REG value from address 0x%x\n", - (uint32_t)addr); + rdma_error_report("Failed to read REG value from address 0x%x", + (uint32_t)addr); return -EINVAL; } @@ -387,25 +376,26 @@ static uint64_t regs_read(void *opaque, hwaddr addr, unsigned size) return val; } -static void regs_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) +static void pvrdma_regs_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) { PVRDMADev *dev = opaque; - /* pr_dbg("addr=0x%lx, val=0x%x, size=%d\n", addr, (uint32_t)val, size); */ + dev->stats.regs_writes++; if (set_reg_val(dev, addr, val)) { - pr_err("Fail to set REG value, addr=0x%" PRIx64 ", val=0x%" PRIx64 "\n", - addr, val); + rdma_error_report("Failed to set REG value, addr=0x%"PRIx64 ", val=0x%"PRIx64, + addr, val); return; } - trace_pvrdma_regs_write(addr, val); - switch (addr) { case PVRDMA_REG_DSRLOW: + trace_pvrdma_regs_write(addr, val, "DSRLOW", ""); dev->dsr_info.dma = val; break; case PVRDMA_REG_DSRHIGH: + trace_pvrdma_regs_write(addr, val, "DSRHIGH", ""); dev->dsr_info.dma |= val << 32; load_dsr(dev); init_dsr_dev_caps(dev); @@ -413,23 +403,27 @@ static void regs_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) case PVRDMA_REG_CTL: switch (val) { case PVRDMA_DEVICE_CTL_ACTIVATE: + trace_pvrdma_regs_write(addr, val, "CTL", "ACTIVATE"); activate_device(dev); break; case PVRDMA_DEVICE_CTL_UNQUIESCE: + trace_pvrdma_regs_write(addr, val, "CTL", "UNQUIESCE"); unquiesce_device(dev); break; case PVRDMA_DEVICE_CTL_RESET: + trace_pvrdma_regs_write(addr, val, "CTL", "URESET"); reset_device(dev); break; } break; case PVRDMA_REG_IMR: - pr_dbg("Interrupt mask=0x%" PRIx64 "\n", val); + trace_pvrdma_regs_write(addr, val, "INTR_MASK", ""); dev->interrupt_mask = val; break; case PVRDMA_REG_REQUEST: if (val == 0) { - execute_command(dev); + trace_pvrdma_regs_write(addr, val, "REQUEST", ""); + pvrdma_exec_cmd(dev); } break; default: @@ -438,8 +432,8 @@ static void regs_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) } static const MemoryRegionOps regs_ops = { - .read = regs_read, - .write = regs_write, + .read = pvrdma_regs_read, + .write = pvrdma_regs_write, .endianness = DEVICE_LITTLE_ENDIAN, .impl = { .min_access_size = sizeof(uint32_t), @@ -447,54 +441,60 @@ static const MemoryRegionOps regs_ops = { }, }; -static uint64_t uar_read(void *opaque, hwaddr addr, unsigned size) +static uint64_t pvrdma_uar_read(void *opaque, hwaddr addr, unsigned size) { return 0xffffffff; } -static void uar_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) +static void pvrdma_uar_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) { PVRDMADev *dev = opaque; - /* pr_dbg("addr=0x%lx, val=0x%x, size=%d\n", addr, (uint32_t)val, size); */ + dev->stats.uar_writes++; switch (addr & 0xFFF) { /* Mask with 0xFFF as each UC gets page */ case PVRDMA_UAR_QP_OFFSET: - pr_dbg("UAR QP command, addr=0x%" PRIx64 ", val=0x%" PRIx64 "\n", - (uint64_t)addr, val); if (val & PVRDMA_UAR_QP_SEND) { + trace_pvrdma_uar_write(addr, val, "QP", "SEND", + val & PVRDMA_UAR_HANDLE_MASK, 0); pvrdma_qp_send(dev, val & PVRDMA_UAR_HANDLE_MASK); } if (val & PVRDMA_UAR_QP_RECV) { + trace_pvrdma_uar_write(addr, val, "QP", "RECV", + val & PVRDMA_UAR_HANDLE_MASK, 0); pvrdma_qp_recv(dev, val & PVRDMA_UAR_HANDLE_MASK); } break; case PVRDMA_UAR_CQ_OFFSET: - /* pr_dbg("UAR CQ cmd, addr=0x%x, val=0x%lx\n", (uint32_t)addr, val); */ if (val & PVRDMA_UAR_CQ_ARM) { + trace_pvrdma_uar_write(addr, val, "CQ", "ARM", + val & PVRDMA_UAR_HANDLE_MASK, + !!(val & PVRDMA_UAR_CQ_ARM_SOL)); rdma_rm_req_notify_cq(&dev->rdma_dev_res, val & PVRDMA_UAR_HANDLE_MASK, !!(val & PVRDMA_UAR_CQ_ARM_SOL)); } if (val & PVRDMA_UAR_CQ_ARM_SOL) { - pr_dbg("UAR_CQ_ARM_SOL (%" PRIx64 ")\n", - val & PVRDMA_UAR_HANDLE_MASK); + trace_pvrdma_uar_write(addr, val, "CQ", "ARMSOL - not supported", 0, + 0); } if (val & PVRDMA_UAR_CQ_POLL) { - pr_dbg("UAR_CQ_POLL (%" PRIx64 ")\n", val & PVRDMA_UAR_HANDLE_MASK); + trace_pvrdma_uar_write(addr, val, "CQ", "POLL", + val & PVRDMA_UAR_HANDLE_MASK, 0); pvrdma_cq_poll(&dev->rdma_dev_res, val & PVRDMA_UAR_HANDLE_MASK); } break; default: - pr_err("Unsupported command, addr=0x%" PRIx64 ", val=0x%" PRIx64 "\n", - addr, val); + rdma_error_report("Unsupported command, addr=0x%"PRIx64", val=0x%"PRIx64, + addr, val); break; } } static const MemoryRegionOps uar_ops = { - .read = uar_read, - .write = uar_write, + .read = pvrdma_uar_read, + .write = pvrdma_uar_write, .endianness = DEVICE_LITTLE_ENDIAN, .impl = { .min_access_size = sizeof(uint32_t), @@ -551,11 +551,9 @@ static void init_dev_caps(PVRDMADev *dev) (wr_sz + sizeof(struct pvrdma_sge) * dev->dev_attr.max_sge) - TARGET_PAGE_SIZE; /* First page is ring state ^^^^ */ - pr_dbg("max_qp_wr=%d\n", dev->dev_attr.max_qp_wr); dev->dev_attr.max_cqe = pg_tbl_bytes / sizeof(struct pvrdma_cqe) - TARGET_PAGE_SIZE; /* First page is ring state */ - pr_dbg("max_cqe=%d\n", dev->dev_attr.max_cqe); } static int pvrdma_check_ram_shared(Object *obj, void *opaque) @@ -585,10 +583,8 @@ static void pvrdma_realize(PCIDevice *pdev, Error **errp) bool ram_shared = false; PCIDevice *func0; - init_pr_dbg(); - - pr_dbg("Initializing device %s %x.%x\n", pdev->name, - PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + rdma_info_report("Initializing device %s %x.%x", pdev->name, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); if (TARGET_PAGE_SIZE != getpagesize()) { error_setg(errp, "Target page size must be the same as host page size"); @@ -597,9 +593,7 @@ static void pvrdma_realize(PCIDevice *pdev, Error **errp) func0 = pci_get_function_0(pdev); /* Break if not vmxnet3 device in slot 0 */ - if (strcmp(object_get_typename(&func0->qdev.parent_obj), TYPE_VMXNET3)) { - pr_dbg("func0 type is %s\n", - object_get_typename(&func0->qdev.parent_obj)); + if (strcmp(object_get_typename(OBJECT(func0)), TYPE_VMXNET3)) { error_setg(errp, "Device on %x.0 must be %s", PCI_SLOT(pdev->devfn), TYPE_VMXNET3); return; @@ -626,21 +620,21 @@ static void pvrdma_realize(PCIDevice *pdev, Error **errp) init_regs(pdev); - rc = init_msix(pdev, errp); + rc = init_msix(pdev); if (rc) { goto out; } rc = rdma_backend_init(&dev->backend_dev, pdev, &dev->rdma_dev_res, dev->backend_device_name, dev->backend_port_num, - &dev->dev_attr, &dev->mad_chr, errp); + &dev->dev_attr, &dev->mad_chr); if (rc) { goto out; } init_dev_caps(dev); - rc = rdma_rm_init(&dev->rdma_dev_res, &dev->dev_attr, errp); + rc = rdma_rm_init(&dev->rdma_dev_res, &dev->dev_attr); if (rc) { goto out; } @@ -650,28 +644,25 @@ static void pvrdma_realize(PCIDevice *pdev, Error **errp) goto out; } + memset(&dev->stats, 0, sizeof(dev->stats)); + dev->shutdown_notifier.notify = pvrdma_shutdown_notifier; qemu_register_shutdown_notifier(&dev->shutdown_notifier); out: if (rc) { pvrdma_fini(pdev); - error_append_hint(errp, "Device fail to load\n"); + error_append_hint(errp, "Device failed to load\n"); } } -static void pvrdma_exit(PCIDevice *pdev) -{ - pvrdma_fini(pdev); -} - static void pvrdma_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + RdmaProviderClass *ir = INTERFACE_RDMA_PROVIDER_CLASS(klass); k->realize = pvrdma_realize; - k->exit = pvrdma_exit; k->vendor_id = PCI_VENDOR_ID_VMWARE; k->device_id = PCI_DEVICE_ID_VMWARE_PVRDMA; k->revision = 0x00; @@ -680,6 +671,8 @@ static void pvrdma_class_init(ObjectClass *klass, void *data) dc->desc = "RDMA Device"; dc->props = pvrdma_dev_properties; set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); + + ir->print_statistics = pvrdma_print_statistics; } static const TypeInfo pvrdma_info = { @@ -689,6 +682,7 @@ static const TypeInfo pvrdma_info = { .class_init = pvrdma_class_init, .interfaces = (InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { INTERFACE_RDMA_PROVIDER }, { } } }; diff --git a/hw/rdma/vmw/pvrdma_qp_ops.c b/hw/rdma/vmw/pvrdma_qp_ops.c index ce5a60e184..5b9786efbe 100644 --- a/hw/rdma/vmw/pvrdma_qp_ops.c +++ b/hw/rdma/vmw/pvrdma_qp_ops.c @@ -19,6 +19,8 @@ #include "../rdma_rm.h" #include "../rdma_backend.h" +#include "trace.h" + #include "pvrdma.h" #include "standard-headers/rdma/vmw_pvrdma-abi.h" #include "pvrdma_qp_ops.h" @@ -55,18 +57,14 @@ static int pvrdma_post_cqe(PVRDMADev *dev, uint32_t cq_handle, RdmaRmCQ *cq = rdma_rm_get_cq(&dev->rdma_dev_res, cq_handle); if (unlikely(!cq)) { - pr_dbg("Invalid cqn %d\n", cq_handle); return -EINVAL; } ring = (PvrdmaRing *)cq->opaque; - pr_dbg("ring=%p\n", ring); /* Step #1: Put CQE on CQ ring */ - pr_dbg("Writing CQE\n"); cqe1 = pvrdma_ring_next_elem_write(ring); if (unlikely(!cqe1)) { - pr_dbg("No CQEs in ring\n"); return -EINVAL; } @@ -80,19 +78,13 @@ static int pvrdma_post_cqe(PVRDMADev *dev, uint32_t cq_handle, cqe1->wc_flags = wc->wc_flags; cqe1->vendor_err = wc->vendor_err; - pr_dbg("wr_id=%" PRIx64 "\n", cqe1->wr_id); - pr_dbg("qp=0x%lx\n", cqe1->qp); - pr_dbg("opcode=%d\n", cqe1->opcode); - pr_dbg("status=%d\n", cqe1->status); - pr_dbg("byte_len=%d\n", cqe1->byte_len); - pr_dbg("src_qp=%d\n", cqe1->src_qp); - pr_dbg("wc_flags=%d\n", cqe1->wc_flags); - pr_dbg("vendor_err=%d\n", cqe1->vendor_err); + trace_pvrdma_post_cqe(cq_handle, cq->notify, cqe1->wr_id, cqe1->qp, + cqe1->opcode, cqe1->status, cqe1->byte_len, + cqe1->src_qp, cqe1->wc_flags, cqe1->vendor_err); pvrdma_ring_write_inc(ring); /* Step #2: Put CQ number on dsr completion ring */ - pr_dbg("Writing CQNE\n"); cqne = pvrdma_ring_next_elem_write(&dev->dsr_info.cq); if (unlikely(!cqne)) { return -EINVAL; @@ -101,7 +93,6 @@ static int pvrdma_post_cqe(PVRDMADev *dev, uint32_t cq_handle, cqne->info = cq_handle; pvrdma_ring_write_inc(&dev->dsr_info.cq); - pr_dbg("cq->notify=%d\n", cq->notify); if (cq->notify != CNT_CLEAR) { if (cq->notify == CNT_ARM) { cq->notify = CNT_CLEAR; @@ -123,7 +114,7 @@ static void pvrdma_qp_ops_comp_handler(void *ctx, struct ibv_wc *wc) static void complete_with_error(uint32_t vendor_err, void *ctx) { - struct ibv_wc wc = {0}; + struct ibv_wc wc = {}; wc.status = IBV_WC_GENERAL_ERR; wc.vendor_err = vendor_err; @@ -151,23 +142,17 @@ void pvrdma_qp_send(PVRDMADev *dev, uint32_t qp_handle) int sgid_idx; union ibv_gid *sgid; - pr_dbg("qp_handle=0x%x\n", qp_handle); - qp = rdma_rm_get_qp(&dev->rdma_dev_res, qp_handle); if (unlikely(!qp)) { - pr_dbg("Invalid qpn\n"); return; } ring = (PvrdmaRing *)qp->opaque; - pr_dbg("sring=%p\n", ring); wqe = (struct PvrdmaSqWqe *)pvrdma_ring_next_elem_read(ring); while (wqe) { CompHandlerCtx *comp_ctx; - pr_dbg("wr_id=%" PRIx64 "\n", wqe->hdr.wr_id); - /* Prepare CQE */ comp_ctx = g_malloc(sizeof(CompHandlerCtx)); comp_ctx->dev = dev; @@ -178,26 +163,25 @@ void pvrdma_qp_send(PVRDMADev *dev, uint32_t qp_handle) sgid = rdma_rm_get_gid(&dev->rdma_dev_res, wqe->hdr.wr.ud.av.gid_index); if (!sgid) { - pr_dbg("Fail to get gid for idx %d\n", wqe->hdr.wr.ud.av.gid_index); + rdma_error_report("Failed to get gid for idx %d", + wqe->hdr.wr.ud.av.gid_index); complete_with_error(VENDOR_ERR_INV_GID_IDX, comp_ctx); continue; } - pr_dbg("sgid_id=%d, sgid=0x%llx\n", wqe->hdr.wr.ud.av.gid_index, - sgid->global.interface_id); sgid_idx = rdma_rm_get_backend_gid_index(&dev->rdma_dev_res, &dev->backend_dev, wqe->hdr.wr.ud.av.gid_index); if (sgid_idx <= 0) { - pr_dbg("Fail to get bk sgid_idx for sgid_idx %d\n", - wqe->hdr.wr.ud.av.gid_index); + rdma_error_report("Failed to get bk sgid_idx for sgid_idx %d", + wqe->hdr.wr.ud.av.gid_index); complete_with_error(VENDOR_ERR_INV_GID_IDX, comp_ctx); continue; } if (wqe->hdr.num_sge > dev->dev_attr.max_sge) { - pr_dbg("Invalid num_sge=%d (max %d)\n", wqe->hdr.num_sge, - dev->dev_attr.max_sge); + rdma_error_report("Invalid num_sge=%d (max %d)", wqe->hdr.num_sge, + dev->dev_attr.max_sge); complete_with_error(VENDOR_ERR_INV_NUM_SGE, comp_ctx); continue; } @@ -221,23 +205,17 @@ void pvrdma_qp_recv(PVRDMADev *dev, uint32_t qp_handle) PvrdmaRqWqe *wqe; PvrdmaRing *ring; - pr_dbg("qp_handle=0x%x\n", qp_handle); - qp = rdma_rm_get_qp(&dev->rdma_dev_res, qp_handle); if (unlikely(!qp)) { - pr_dbg("Invalid qpn\n"); return; } ring = &((PvrdmaRing *)qp->opaque)[1]; - pr_dbg("rring=%p\n", ring); wqe = (struct PvrdmaRqWqe *)pvrdma_ring_next_elem_read(ring); while (wqe) { CompHandlerCtx *comp_ctx; - pr_dbg("wr_id=%" PRIx64 "\n", wqe->hdr.wr_id); - /* Prepare CQE */ comp_ctx = g_malloc(sizeof(CompHandlerCtx)); comp_ctx->dev = dev; @@ -247,14 +225,13 @@ void pvrdma_qp_recv(PVRDMADev *dev, uint32_t qp_handle) comp_ctx->cqe.opcode = IBV_WC_RECV; if (wqe->hdr.num_sge > dev->dev_attr.max_sge) { - pr_dbg("Invalid num_sge=%d (max %d)\n", wqe->hdr.num_sge, - dev->dev_attr.max_sge); + rdma_error_report("Invalid num_sge=%d (max %d)", wqe->hdr.num_sge, + dev->dev_attr.max_sge); complete_with_error(VENDOR_ERR_INV_NUM_SGE, comp_ctx); continue; } - rdma_backend_post_recv(&dev->backend_dev, &dev->rdma_dev_res, - &qp->backend_qp, qp->qp_type, + rdma_backend_post_recv(&dev->backend_dev, &qp->backend_qp, qp->qp_type, (struct ibv_sge *)&wqe->sge[0], wqe->hdr.num_sge, comp_ctx); @@ -270,7 +247,6 @@ void pvrdma_cq_poll(RdmaDeviceResources *dev_res, uint32_t cq_handle) cq = rdma_rm_get_cq(dev_res, cq_handle); if (!cq) { - pr_dbg("Invalid CQ# %d\n", cq_handle); return; } diff --git a/hw/rdma/vmw/trace-events b/hw/rdma/vmw/trace-events index b3f9e2b19f..323fca8456 100644 --- a/hw/rdma/vmw/trace-events +++ b/hw/rdma/vmw/trace-events @@ -1,5 +1,17 @@ -# See docs/tracing.txt for syntax documentation. +# See docs/devel/tracing.txt for syntax documentation. -# hw/rdma/vmw/pvrdma_main.c -pvrdma_regs_read(uint64_t addr, uint64_t val) "regs[0x%"PRIx64"] = 0x%"PRIx64 -pvrdma_regs_write(uint64_t addr, uint64_t val) "regs[0x%"PRIx64"] = 0x%"PRIx64 +# pvrdma_main.c +pvrdma_regs_read(uint64_t addr, uint64_t val) "pvrdma.regs[0x%"PRIx64"]=0x%"PRIx64 +pvrdma_regs_write(uint64_t addr, uint64_t val, const char *reg_name, const char *val_name) "pvrdma.regs[0x%"PRIx64"]=0x%"PRIx64" (%s %s)" +pvrdma_uar_write(uint64_t addr, uint64_t val, const char *reg_name, const char *val_name, int val1, int val2) "uar[0x%"PRIx64"]=0x%"PRIx64" (cls=%s, op=%s, obj=%d, val=%d)" + +# pvrdma_cmd.c +pvrdma_map_to_pdir_host_virt(void *vfirst, void *vremaped) "mremap %p -> %p" +pvrdma_map_to_pdir_next_page(int page_idx, void *vnext, void *vremaped) "mremap [%d] %p -> %p" +pvrdma_exec_cmd(int cmd, int err) "cmd=%d, err=%d" + +# pvrdma_dev_ring.c +pvrdma_ring_next_elem_read_no_data(char *ring_name) "pvrdma_ring %s is empty" + +# pvrdma_qp_ops.c +pvrdma_post_cqe(uint32_t cq_handle, int notify, uint64_t wr_id, uint64_t qpn, uint32_t op_code, uint32_t status, uint32_t byte_len, uint32_t src_qp, uint32_t wc_flags, uint32_t vendor_err) "cq_handle=%d, notify=%d, wr_id=0x%"PRIx64", qpn=0x%"PRIx64", opcode=%d, status=%d, byte_len=%d, src_qp=%d, wc_flags=%d, vendor_err=%d" diff --git a/hw/riscv/Kconfig b/hw/riscv/Kconfig index 8c7fc1f31d..8674211085 100644 --- a/hw/riscv/Kconfig +++ b/hw/riscv/Kconfig @@ -6,6 +6,7 @@ config HART config SIFIVE bool + select MSI_NONBROKEN config SIFIVE_E bool diff --git a/hw/riscv/sifive_plic.c b/hw/riscv/sifive_plic.c index d12ec3fc9a..07a032d93d 100644 --- a/hw/riscv/sifive_plic.c +++ b/hw/riscv/sifive_plic.c @@ -22,7 +22,9 @@ #include "qemu/log.h" #include "qemu/error-report.h" #include "hw/sysbus.h" +#include "hw/pci/msi.h" #include "target/riscv/cpu.h" +#include "sysemu/sysemu.h" #include "hw/riscv/sifive_plic.h" #define RISCV_DEBUG_PLIC 0 @@ -205,7 +207,7 @@ static uint64_t sifive_plic_read(void *opaque, hwaddr addr, unsigned size) if (addr >= plic->priority_base && /* 4 bytes per source */ addr < plic->priority_base + (plic->num_sources << 2)) { - uint32_t irq = (addr - plic->priority_base) >> 2; + uint32_t irq = ((addr - plic->priority_base) >> 2) + 1; if (RISCV_DEBUG_PLIC) { qemu_log("plic: read priority: irq=%d priority=%d\n", irq, plic->source_priority[irq]); @@ -261,7 +263,9 @@ static uint64_t sifive_plic_read(void *opaque, hwaddr addr, unsigned size) } err: - error_report("plic: invalid register read: %08x", (uint32_t)addr); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Invalid register read 0x%" HWADDR_PRIx "\n", + __func__, addr); return 0; } @@ -278,7 +282,7 @@ static void sifive_plic_write(void *opaque, hwaddr addr, uint64_t value, if (addr >= plic->priority_base && /* 4 bytes per source */ addr < plic->priority_base + (plic->num_sources << 2)) { - uint32_t irq = (addr - plic->priority_base) >> 2; + uint32_t irq = ((addr - plic->priority_base) >> 2) + 1; plic->source_priority[irq] = value & 7; if (RISCV_DEBUG_PLIC) { qemu_log("plic: write priority: irq=%d priority=%d\n", @@ -288,7 +292,9 @@ static void sifive_plic_write(void *opaque, hwaddr addr, uint64_t value, } else if (addr >= plic->pending_base && /* 1 bit per source */ addr < plic->pending_base + (plic->num_sources >> 3)) { - error_report("plic: invalid pending write: %08x", (uint32_t)addr); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid pending write: 0x%" HWADDR_PRIx "", + __func__, addr); return; } else if (addr >= plic->enable_base && /* 1 bit per source */ addr < plic->enable_base + plic->num_addrs * plic->enable_stride) @@ -338,7 +344,9 @@ static void sifive_plic_write(void *opaque, hwaddr addr, uint64_t value, } err: - error_report("plic: invalid register write: %08x", (uint32_t)addr); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Invalid register write 0x%" HWADDR_PRIx "\n", + __func__, addr); } static const MemoryRegionOps sifive_plic_ops = { @@ -383,7 +391,7 @@ static void parse_hart_config(SiFivePLICState *plic) p = plic->hart_config; while ((c = *p++)) { if (c == ',') { - addrid += __builtin_popcount(modes); + addrid += ctpop8(modes); modes = 0; hartid++; } else { @@ -397,7 +405,7 @@ static void parse_hart_config(SiFivePLICState *plic) } } if (modes) { - addrid += __builtin_popcount(modes); + addrid += ctpop8(modes); } hartid++; @@ -431,6 +439,7 @@ static void sifive_plic_irq_request(void *opaque, int irq, int level) static void sifive_plic_realize(DeviceState *dev, Error **errp) { SiFivePLICState *plic = SIFIVE_PLIC(dev); + int i; memory_region_init_io(&plic->mmio, OBJECT(dev), &sifive_plic_ops, plic, TYPE_SIFIVE_PLIC, plic->aperture_size); @@ -443,6 +452,21 @@ static void sifive_plic_realize(DeviceState *dev, Error **errp) plic->enable = g_new0(uint32_t, plic->bitfield_words * plic->num_addrs); sysbus_init_mmio(SYS_BUS_DEVICE(dev), &plic->mmio); qdev_init_gpio_in(dev, sifive_plic_irq_request, plic->num_sources); + + /* We can't allow the supervisor to control SEIP as this would allow the + * supervisor to clear a pending external interrupt which will result in + * lost a interrupt in the case a PLIC is attached. The SEIP bit must be + * hardware controlled when a PLIC is attached. + */ + for (i = 0; i < smp_cpus; i++) { + RISCVCPU *cpu = RISCV_CPU(qemu_get_cpu(i)); + if (riscv_cpu_claim_interrupts(cpu, MIP_SEIP) < 0) { + error_report("SEIP already claimed"); + exit(1); + } + } + + msi_nonbroken = true; } static void sifive_plic_class_init(ObjectClass *klass, void *data) diff --git a/hw/riscv/sifive_u.c b/hw/riscv/sifive_u.c index 7bc25820fe..5ecc47cea3 100644 --- a/hw/riscv/sifive_u.c +++ b/hw/riscv/sifive_u.c @@ -244,7 +244,7 @@ static void create_fdt(SiFiveUState *s, const struct MemmapEntry *memmap, qemu_fdt_setprop_cell(fdt, nodename, "clock-frequency", SIFIVE_U_CLOCK_FREQ / 2); qemu_fdt_setprop_cells(fdt, nodename, "interrupt-parent", plic_phandle); - qemu_fdt_setprop_cells(fdt, nodename, "interrupts", 1); + qemu_fdt_setprop_cells(fdt, nodename, "interrupts", SIFIVE_U_UART0_IRQ); qemu_fdt_add_subnode(fdt, "/chosen"); qemu_fdt_setprop_string(fdt, "/chosen", "stdout-path", nodename); @@ -398,7 +398,10 @@ static void riscv_sifive_u_machine_init(MachineClass *mc) { mc->desc = "RISC-V Board compatible with SiFive U SDK"; mc->init = riscv_sifive_u_init; - mc->max_cpus = 1; + /* The real hardware has 5 CPUs, but one of them is a small embedded power + * management CPU. + */ + mc->max_cpus = 4; } DEFINE_MACHINE("sifive_u", riscv_sifive_u_machine_init) diff --git a/hw/riscv/sifive_uart.c b/hw/riscv/sifive_uart.c index 456a3d3697..3b3f94f51d 100644 --- a/hw/riscv/sifive_uart.c +++ b/hw/riscv/sifive_uart.c @@ -51,7 +51,8 @@ static uint64_t uart_ip(SiFiveUARTState *s) static void update_irq(SiFiveUARTState *s) { int cond = 0; - if ((s->ie & SIFIVE_UART_IE_RXWM) && s->rx_fifo_len) { + if ((s->ie & SIFIVE_UART_IE_TXWM) || + ((s->ie & SIFIVE_UART_IE_RXWM) && s->rx_fifo_len)) { cond = 1; } if (cond) { @@ -108,6 +109,7 @@ uart_write(void *opaque, hwaddr addr, switch (addr) { case SIFIVE_UART_TXFIFO: qemu_chr_fe_write(&s->chr, &ch, 1); + update_irq(s); return; case SIFIVE_UART_IE: s->ie = val64; diff --git a/hw/s390x/3270-ccw.c b/hw/s390x/3270-ccw.c index 2c8d16ccf7..14882242c3 100644 --- a/hw/s390x/3270-ccw.c +++ b/hw/s390x/3270-ccw.c @@ -78,13 +78,13 @@ static int emulated_ccw_3270_cb(SubchDev *sch, CCW1 ccw) if (rc == -EIO) { /* I/O error, specific devices generate specific conditions */ - SCSW *s = &sch->curr_status.scsw; + SCHIB *schib = &sch->curr_status; sch->curr_status.scsw.dstat = SCSW_DSTAT_UNIT_CHECK; sch->sense_data[0] = 0x40; /* intervention-req */ - s->ctrl &= ~SCSW_ACTL_START_PEND; - s->ctrl &= ~SCSW_CTRL_MASK_STCTL; - s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | + schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND; + schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; + schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; } diff --git a/hw/s390x/Kconfig b/hw/s390x/Kconfig index a7046ea41f..5e7d8a2bae 100644 --- a/hw/s390x/Kconfig +++ b/hw/s390x/Kconfig @@ -9,3 +9,4 @@ config S390_CCW_VIRTIO select S390_FLIC select SCLPCONSOLE select VIRTIO_CCW + select MSI_NONBROKEN diff --git a/hw/s390x/css.c b/hw/s390x/css.c index f92b046cd3..8fc9e35ba5 100644 --- a/hw/s390x/css.c +++ b/hw/s390x/css.c @@ -695,35 +695,32 @@ void css_adapter_interrupt(CssIoAdapterType type, uint8_t isc) static void sch_handle_clear_func(SubchDev *sch) { - PMCW *p = &sch->curr_status.pmcw; - SCSW *s = &sch->curr_status.scsw; + SCHIB *schib = &sch->curr_status; int path; /* Path management: In our simple css, we always choose the only path. */ path = 0x80; /* Reset values prior to 'issuing the clear signal'. */ - p->lpum = 0; - p->pom = 0xff; - s->flags &= ~SCSW_FLAGS_MASK_PNO; + schib->pmcw.lpum = 0; + schib->pmcw.pom = 0xff; + schib->scsw.flags &= ~SCSW_FLAGS_MASK_PNO; /* We always 'attempt to issue the clear signal', and we always succeed. */ sch->channel_prog = 0x0; sch->last_cmd_valid = false; - s->ctrl &= ~SCSW_ACTL_CLEAR_PEND; - s->ctrl |= SCSW_STCTL_STATUS_PEND; + schib->scsw.ctrl &= ~SCSW_ACTL_CLEAR_PEND; + schib->scsw.ctrl |= SCSW_STCTL_STATUS_PEND; - s->dstat = 0; - s->cstat = 0; - p->lpum = path; + schib->scsw.dstat = 0; + schib->scsw.cstat = 0; + schib->pmcw.lpum = path; } static void sch_handle_halt_func(SubchDev *sch) { - - PMCW *p = &sch->curr_status.pmcw; - SCSW *s = &sch->curr_status.scsw; + SCHIB *schib = &sch->curr_status; hwaddr curr_ccw = sch->channel_prog; int path; @@ -733,20 +730,22 @@ static void sch_handle_halt_func(SubchDev *sch) /* We always 'attempt to issue the halt signal', and we always succeed. */ sch->channel_prog = 0x0; sch->last_cmd_valid = false; - s->ctrl &= ~SCSW_ACTL_HALT_PEND; - s->ctrl |= SCSW_STCTL_STATUS_PEND; + schib->scsw.ctrl &= ~SCSW_ACTL_HALT_PEND; + schib->scsw.ctrl |= SCSW_STCTL_STATUS_PEND; - if ((s->ctrl & (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) || - !((s->ctrl & SCSW_ACTL_START_PEND) || - (s->ctrl & SCSW_ACTL_SUSP))) { - s->dstat = SCSW_DSTAT_DEVICE_END; + if ((schib->scsw.ctrl & (SCSW_ACTL_SUBCH_ACTIVE | + SCSW_ACTL_DEVICE_ACTIVE)) || + !((schib->scsw.ctrl & SCSW_ACTL_START_PEND) || + (schib->scsw.ctrl & SCSW_ACTL_SUSP))) { + schib->scsw.dstat = SCSW_DSTAT_DEVICE_END; } - if ((s->ctrl & (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) || - (s->ctrl & SCSW_ACTL_SUSP)) { - s->cpa = curr_ccw + 8; + if ((schib->scsw.ctrl & (SCSW_ACTL_SUBCH_ACTIVE | + SCSW_ACTL_DEVICE_ACTIVE)) || + (schib->scsw.ctrl & SCSW_ACTL_SUSP)) { + schib->scsw.cpa = curr_ccw + 8; } - s->cstat = 0; - p->lpum = path; + schib->scsw.cstat = 0; + schib->pmcw.lpum = path; } @@ -1111,9 +1110,7 @@ static int css_interpret_ccw(SubchDev *sch, hwaddr ccw_addr, static void sch_handle_start_func_virtual(SubchDev *sch) { - - PMCW *p = &sch->curr_status.pmcw; - SCSW *s = &sch->curr_status.scsw; + SCHIB *schib = &sch->curr_status; int path; int ret; bool suspend_allowed; @@ -1121,27 +1118,27 @@ static void sch_handle_start_func_virtual(SubchDev *sch) /* Path management: In our simple css, we always choose the only path. */ path = 0x80; - if (!(s->ctrl & SCSW_ACTL_SUSP)) { + if (!(schib->scsw.ctrl & SCSW_ACTL_SUSP)) { /* Start Function triggered via ssch, i.e. we have an ORB */ ORB *orb = &sch->orb; - s->cstat = 0; - s->dstat = 0; + schib->scsw.cstat = 0; + schib->scsw.dstat = 0; /* Look at the orb and try to execute the channel program. */ - p->intparm = orb->intparm; + schib->pmcw.intparm = orb->intparm; if (!(orb->lpm & path)) { /* Generate a deferred cc 3 condition. */ - s->flags |= SCSW_FLAGS_MASK_CC; - s->ctrl &= ~SCSW_CTRL_MASK_STCTL; - s->ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND); + schib->scsw.flags |= SCSW_FLAGS_MASK_CC; + schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; + schib->scsw.ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND); return; } sch->ccw_fmt_1 = !!(orb->ctrl0 & ORB_CTRL0_MASK_FMT); - s->flags |= (sch->ccw_fmt_1) ? SCSW_FLAGS_MASK_FMT : 0; + schib->scsw.flags |= (sch->ccw_fmt_1) ? SCSW_FLAGS_MASK_FMT : 0; sch->ccw_no_data_cnt = 0; suspend_allowed = !!(orb->ctrl0 & ORB_CTRL0_MASK_SPND); } else { /* Start Function resumed via rsch */ - s->ctrl &= ~(SCSW_ACTL_SUSP | SCSW_ACTL_RESUME_PEND); + schib->scsw.ctrl &= ~(SCSW_ACTL_SUSP | SCSW_ACTL_RESUME_PEND); /* The channel program had been suspended before. */ suspend_allowed = true; } @@ -1154,40 +1151,40 @@ static void sch_handle_start_func_virtual(SubchDev *sch) break; case 0: /* success */ - s->ctrl &= ~SCSW_ACTL_START_PEND; - s->ctrl &= ~SCSW_CTRL_MASK_STCTL; - s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | + schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND; + schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; + schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | SCSW_STCTL_STATUS_PEND; - s->dstat = SCSW_DSTAT_CHANNEL_END | SCSW_DSTAT_DEVICE_END; - s->cpa = sch->channel_prog + 8; + schib->scsw.dstat = SCSW_DSTAT_CHANNEL_END | SCSW_DSTAT_DEVICE_END; + schib->scsw.cpa = sch->channel_prog + 8; break; case -EIO: /* I/O errors, status depends on specific devices */ break; case -ENOSYS: /* unsupported command, generate unit check (command reject) */ - s->ctrl &= ~SCSW_ACTL_START_PEND; - s->dstat = SCSW_DSTAT_UNIT_CHECK; + schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND; + schib->scsw.dstat = SCSW_DSTAT_UNIT_CHECK; /* Set sense bit 0 in ecw0. */ sch->sense_data[0] = 0x80; - s->ctrl &= ~SCSW_CTRL_MASK_STCTL; - s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | + schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; + schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; - s->cpa = sch->channel_prog + 8; + schib->scsw.cpa = sch->channel_prog + 8; break; case -EINPROGRESS: /* channel program has been suspended */ - s->ctrl &= ~SCSW_ACTL_START_PEND; - s->ctrl |= SCSW_ACTL_SUSP; + schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND; + schib->scsw.ctrl |= SCSW_ACTL_SUSP; break; default: /* error, generate channel program check */ - s->ctrl &= ~SCSW_ACTL_START_PEND; - s->cstat = SCSW_CSTAT_PROG_CHECK; - s->ctrl &= ~SCSW_CTRL_MASK_STCTL; - s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | + schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND; + schib->scsw.cstat = SCSW_CSTAT_PROG_CHECK; + schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; + schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; - s->cpa = sch->channel_prog + 8; + schib->scsw.cpa = sch->channel_prog + 8; break; } } while (ret == -EAGAIN); @@ -1196,14 +1193,11 @@ static void sch_handle_start_func_virtual(SubchDev *sch) static IOInstEnding sch_handle_start_func_passthrough(SubchDev *sch) { - - PMCW *p = &sch->curr_status.pmcw; - SCSW *s = &sch->curr_status.scsw; - + SCHIB *schib = &sch->curr_status; ORB *orb = &sch->orb; - if (!(s->ctrl & SCSW_ACTL_SUSP)) { + if (!(schib->scsw.ctrl & SCSW_ACTL_SUSP)) { assert(orb != NULL); - p->intparm = orb->intparm; + schib->pmcw.intparm = orb->intparm; } return s390_ccw_cmd_request(sch); } @@ -1216,14 +1210,13 @@ static IOInstEnding sch_handle_start_func_passthrough(SubchDev *sch) */ IOInstEnding do_subchannel_work_virtual(SubchDev *sch) { + SCHIB *schib = &sch->curr_status; - SCSW *s = &sch->curr_status.scsw; - - if (s->ctrl & SCSW_FCTL_CLEAR_FUNC) { + if (schib->scsw.ctrl & SCSW_FCTL_CLEAR_FUNC) { sch_handle_clear_func(sch); - } else if (s->ctrl & SCSW_FCTL_HALT_FUNC) { + } else if (schib->scsw.ctrl & SCSW_FCTL_HALT_FUNC) { sch_handle_halt_func(sch); - } else if (s->ctrl & SCSW_FCTL_START_FUNC) { + } else if (schib->scsw.ctrl & SCSW_FCTL_START_FUNC) { /* Triggered by both ssch and rsch. */ sch_handle_start_func_virtual(sch); } @@ -1234,15 +1227,15 @@ IOInstEnding do_subchannel_work_virtual(SubchDev *sch) IOInstEnding do_subchannel_work_passthrough(SubchDev *sch) { - SCSW *s = &sch->curr_status.scsw; + SCHIB *schib = &sch->curr_status; - if (s->ctrl & SCSW_FCTL_CLEAR_FUNC) { + if (schib->scsw.ctrl & SCSW_FCTL_CLEAR_FUNC) { /* TODO: Clear handling */ sch_handle_clear_func(sch); - } else if (s->ctrl & SCSW_FCTL_HALT_FUNC) { + } else if (schib->scsw.ctrl & SCSW_FCTL_HALT_FUNC) { /* TODO: Halt handling */ sch_handle_halt_func(sch); - } else if (s->ctrl & SCSW_FCTL_START_FUNC) { + } else if (schib->scsw.ctrl & SCSW_FCTL_START_FUNC) { return sch_handle_start_func_passthrough(sch); } return IOINST_CC_EXPECTED; @@ -1370,46 +1363,45 @@ static void copy_schib_from_guest(SCHIB *dest, const SCHIB *src) IOInstEnding css_do_msch(SubchDev *sch, const SCHIB *orig_schib) { - SCSW *s = &sch->curr_status.scsw; - PMCW *p = &sch->curr_status.pmcw; + SCHIB *schib = &sch->curr_status; uint16_t oldflags; - SCHIB schib; + SCHIB schib_copy; - if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_DNV)) { + if (!(schib->pmcw.flags & PMCW_FLAGS_MASK_DNV)) { return IOINST_CC_EXPECTED; } - if (s->ctrl & SCSW_STCTL_STATUS_PEND) { + if (schib->scsw.ctrl & SCSW_STCTL_STATUS_PEND) { return IOINST_CC_STATUS_PRESENT; } - if (s->ctrl & + if (schib->scsw.ctrl & (SCSW_FCTL_START_FUNC|SCSW_FCTL_HALT_FUNC|SCSW_FCTL_CLEAR_FUNC)) { return IOINST_CC_BUSY; } - copy_schib_from_guest(&schib, orig_schib); + copy_schib_from_guest(&schib_copy, orig_schib); /* Only update the program-modifiable fields. */ - p->intparm = schib.pmcw.intparm; - oldflags = p->flags; - p->flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA | + schib->pmcw.intparm = schib_copy.pmcw.intparm; + oldflags = schib->pmcw.flags; + schib->pmcw.flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA | PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME | PMCW_FLAGS_MASK_MP); - p->flags |= schib.pmcw.flags & + schib->pmcw.flags |= schib_copy.pmcw.flags & (PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA | PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME | PMCW_FLAGS_MASK_MP); - p->lpm = schib.pmcw.lpm; - p->mbi = schib.pmcw.mbi; - p->pom = schib.pmcw.pom; - p->chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE); - p->chars |= schib.pmcw.chars & + schib->pmcw.lpm = schib_copy.pmcw.lpm; + schib->pmcw.mbi = schib_copy.pmcw.mbi; + schib->pmcw.pom = schib_copy.pmcw.pom; + schib->pmcw.chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE); + schib->pmcw.chars |= schib_copy.pmcw.chars & (PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE); - sch->curr_status.mba = schib.mba; + schib->mba = schib_copy.mba; /* Has the channel been disabled? */ if (sch->disable_cb && (oldflags & PMCW_FLAGS_MASK_ENA) != 0 - && (p->flags & PMCW_FLAGS_MASK_ENA) == 0) { + && (schib->pmcw.flags & PMCW_FLAGS_MASK_ENA) == 0) { sch->disable_cb(sch); } return IOINST_CC_EXPECTED; @@ -1417,82 +1409,80 @@ IOInstEnding css_do_msch(SubchDev *sch, const SCHIB *orig_schib) IOInstEnding css_do_xsch(SubchDev *sch) { - SCSW *s = &sch->curr_status.scsw; - PMCW *p = &sch->curr_status.pmcw; + SCHIB *schib = &sch->curr_status; - if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { + if (~(schib->pmcw.flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { return IOINST_CC_NOT_OPERATIONAL; } - if (s->ctrl & SCSW_CTRL_MASK_STCTL) { + if (schib->scsw.ctrl & SCSW_CTRL_MASK_STCTL) { return IOINST_CC_STATUS_PRESENT; } - if (!(s->ctrl & SCSW_CTRL_MASK_FCTL) || - ((s->ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) || - (!(s->ctrl & + if (!(schib->scsw.ctrl & SCSW_CTRL_MASK_FCTL) || + ((schib->scsw.ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) || + (!(schib->scsw.ctrl & (SCSW_ACTL_RESUME_PEND | SCSW_ACTL_START_PEND | SCSW_ACTL_SUSP))) || - (s->ctrl & SCSW_ACTL_SUBCH_ACTIVE)) { + (schib->scsw.ctrl & SCSW_ACTL_SUBCH_ACTIVE)) { return IOINST_CC_BUSY; } /* Cancel the current operation. */ - s->ctrl &= ~(SCSW_FCTL_START_FUNC | + schib->scsw.ctrl &= ~(SCSW_FCTL_START_FUNC | SCSW_ACTL_RESUME_PEND | SCSW_ACTL_START_PEND | SCSW_ACTL_SUSP); sch->channel_prog = 0x0; sch->last_cmd_valid = false; - s->dstat = 0; - s->cstat = 0; + schib->scsw.dstat = 0; + schib->scsw.cstat = 0; return IOINST_CC_EXPECTED; } IOInstEnding css_do_csch(SubchDev *sch) { - SCSW *s = &sch->curr_status.scsw; - PMCW *p = &sch->curr_status.pmcw; + SCHIB *schib = &sch->curr_status; - if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { + if (~(schib->pmcw.flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { return IOINST_CC_NOT_OPERATIONAL; } /* Trigger the clear function. */ - s->ctrl &= ~(SCSW_CTRL_MASK_FCTL | SCSW_CTRL_MASK_ACTL); - s->ctrl |= SCSW_FCTL_CLEAR_FUNC | SCSW_ACTL_CLEAR_PEND; + schib->scsw.ctrl &= ~(SCSW_CTRL_MASK_FCTL | SCSW_CTRL_MASK_ACTL); + schib->scsw.ctrl |= SCSW_FCTL_CLEAR_FUNC | SCSW_ACTL_CLEAR_PEND; return do_subchannel_work(sch); } IOInstEnding css_do_hsch(SubchDev *sch) { - SCSW *s = &sch->curr_status.scsw; - PMCW *p = &sch->curr_status.pmcw; + SCHIB *schib = &sch->curr_status; - if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { + if (~(schib->pmcw.flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { return IOINST_CC_NOT_OPERATIONAL; } - if (((s->ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_STATUS_PEND) || - (s->ctrl & (SCSW_STCTL_PRIMARY | + if (((schib->scsw.ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_STATUS_PEND) || + (schib->scsw.ctrl & (SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | SCSW_STCTL_ALERT))) { return IOINST_CC_STATUS_PRESENT; } - if (s->ctrl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { + if (schib->scsw.ctrl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { return IOINST_CC_BUSY; } /* Trigger the halt function. */ - s->ctrl |= SCSW_FCTL_HALT_FUNC; - s->ctrl &= ~SCSW_FCTL_START_FUNC; - if (((s->ctrl & SCSW_CTRL_MASK_ACTL) == + schib->scsw.ctrl |= SCSW_FCTL_HALT_FUNC; + schib->scsw.ctrl &= ~SCSW_FCTL_START_FUNC; + if (((schib->scsw.ctrl & SCSW_CTRL_MASK_ACTL) == (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) && - ((s->ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_INTERMEDIATE)) { - s->ctrl &= ~SCSW_STCTL_STATUS_PEND; + ((schib->scsw.ctrl & SCSW_CTRL_MASK_STCTL) == + SCSW_STCTL_INTERMEDIATE)) { + schib->scsw.ctrl &= ~SCSW_STCTL_STATUS_PEND; } - s->ctrl |= SCSW_ACTL_HALT_PEND; + schib->scsw.ctrl |= SCSW_ACTL_HALT_PEND; return do_subchannel_work(sch); } @@ -1534,18 +1524,17 @@ static void css_update_chnmon(SubchDev *sch) IOInstEnding css_do_ssch(SubchDev *sch, ORB *orb) { - SCSW *s = &sch->curr_status.scsw; - PMCW *p = &sch->curr_status.pmcw; + SCHIB *schib = &sch->curr_status; - if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { + if (~(schib->pmcw.flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { return IOINST_CC_NOT_OPERATIONAL; } - if (s->ctrl & SCSW_STCTL_STATUS_PEND) { + if (schib->scsw.ctrl & SCSW_STCTL_STATUS_PEND) { return IOINST_CC_STATUS_PRESENT; } - if (s->ctrl & (SCSW_FCTL_START_FUNC | + if (schib->scsw.ctrl & (SCSW_FCTL_START_FUNC | SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { return IOINST_CC_BUSY; @@ -1558,13 +1547,13 @@ IOInstEnding css_do_ssch(SubchDev *sch, ORB *orb) sch->orb = *orb; sch->channel_prog = orb->cpa; /* Trigger the start function. */ - s->ctrl |= (SCSW_FCTL_START_FUNC | SCSW_ACTL_START_PEND); - s->flags &= ~SCSW_FLAGS_MASK_PNO; + schib->scsw.ctrl |= (SCSW_FCTL_START_FUNC | SCSW_ACTL_START_PEND); + schib->scsw.flags &= ~SCSW_FLAGS_MASK_PNO; return do_subchannel_work(sch); } -static void copy_irb_to_guest(IRB *dest, const IRB *src, PMCW *pmcw, +static void copy_irb_to_guest(IRB *dest, const IRB *src, const PMCW *pmcw, int *irb_len) { int i; @@ -1603,24 +1592,24 @@ static void copy_irb_to_guest(IRB *dest, const IRB *src, PMCW *pmcw, int css_do_tsch_get_irb(SubchDev *sch, IRB *target_irb, int *irb_len) { - SCSW *s = &sch->curr_status.scsw; - PMCW *p = &sch->curr_status.pmcw; + SCHIB *schib = &sch->curr_status; + PMCW p; uint16_t stctl; IRB irb; - if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { + if (~(schib->pmcw.flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { return 3; } - stctl = s->ctrl & SCSW_CTRL_MASK_STCTL; + stctl = schib->scsw.ctrl & SCSW_CTRL_MASK_STCTL; /* Prepare the irb for the guest. */ memset(&irb, 0, sizeof(IRB)); /* Copy scsw from current status. */ - memcpy(&irb.scsw, s, sizeof(SCSW)); + irb.scsw = schib->scsw; if (stctl & SCSW_STCTL_STATUS_PEND) { - if (s->cstat & (SCSW_CSTAT_DATA_CHECK | + if (schib->scsw.cstat & (SCSW_CSTAT_DATA_CHECK | SCSW_CSTAT_CHN_CTRL_CHK | SCSW_CSTAT_INTF_CTRL_CHK)) { irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF; @@ -1629,8 +1618,8 @@ int css_do_tsch_get_irb(SubchDev *sch, IRB *target_irb, int *irb_len) irb.esw[0] = 0x00800000; } /* If a unit check is pending, copy sense data. */ - if ((s->dstat & SCSW_DSTAT_UNIT_CHECK) && - (p->chars & PMCW_CHARS_MASK_CSENSE)) { + if ((schib->scsw.dstat & SCSW_DSTAT_UNIT_CHECK) && + (schib->pmcw.chars & PMCW_CHARS_MASK_CSENSE)) { int i; irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF | SCSW_FLAGS_MASK_ECTL; @@ -1643,34 +1632,34 @@ int css_do_tsch_get_irb(SubchDev *sch, IRB *target_irb, int *irb_len) } } /* Store the irb to the guest. */ - copy_irb_to_guest(target_irb, &irb, p, irb_len); + p = schib->pmcw; + copy_irb_to_guest(target_irb, &irb, &p, irb_len); return ((stctl & SCSW_STCTL_STATUS_PEND) == 0); } void css_do_tsch_update_subch(SubchDev *sch) { - SCSW *s = &sch->curr_status.scsw; - PMCW *p = &sch->curr_status.pmcw; + SCHIB *schib = &sch->curr_status; uint16_t stctl; uint16_t fctl; uint16_t actl; - stctl = s->ctrl & SCSW_CTRL_MASK_STCTL; - fctl = s->ctrl & SCSW_CTRL_MASK_FCTL; - actl = s->ctrl & SCSW_CTRL_MASK_ACTL; + stctl = schib->scsw.ctrl & SCSW_CTRL_MASK_STCTL; + fctl = schib->scsw.ctrl & SCSW_CTRL_MASK_FCTL; + actl = schib->scsw.ctrl & SCSW_CTRL_MASK_ACTL; /* Clear conditions on subchannel, if applicable. */ if (stctl & SCSW_STCTL_STATUS_PEND) { - s->ctrl &= ~SCSW_CTRL_MASK_STCTL; + schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; if ((stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) || ((fctl & SCSW_FCTL_HALT_FUNC) && (actl & SCSW_ACTL_SUSP))) { - s->ctrl &= ~SCSW_CTRL_MASK_FCTL; + schib->scsw.ctrl &= ~SCSW_CTRL_MASK_FCTL; } if (stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) { - s->flags &= ~SCSW_FLAGS_MASK_PNO; - s->ctrl &= ~(SCSW_ACTL_RESUME_PEND | + schib->scsw.flags &= ~SCSW_FLAGS_MASK_PNO; + schib->scsw.ctrl &= ~(SCSW_ACTL_RESUME_PEND | SCSW_ACTL_START_PEND | SCSW_ACTL_HALT_PEND | SCSW_ACTL_CLEAR_PEND | @@ -1678,20 +1667,20 @@ void css_do_tsch_update_subch(SubchDev *sch) } else { if ((actl & SCSW_ACTL_SUSP) && (fctl & SCSW_FCTL_START_FUNC)) { - s->flags &= ~SCSW_FLAGS_MASK_PNO; + schib->scsw.flags &= ~SCSW_FLAGS_MASK_PNO; if (fctl & SCSW_FCTL_HALT_FUNC) { - s->ctrl &= ~(SCSW_ACTL_RESUME_PEND | + schib->scsw.ctrl &= ~(SCSW_ACTL_RESUME_PEND | SCSW_ACTL_START_PEND | SCSW_ACTL_HALT_PEND | SCSW_ACTL_CLEAR_PEND | SCSW_ACTL_SUSP); } else { - s->ctrl &= ~SCSW_ACTL_RESUME_PEND; + schib->scsw.ctrl &= ~SCSW_ACTL_RESUME_PEND; } } } /* Clear pending sense data. */ - if (p->chars & PMCW_CHARS_MASK_CSENSE) { + if (schib->pmcw.chars & PMCW_CHARS_MASK_CSENSE) { memset(sch->sense_data, 0 , sizeof(sch->sense_data)); } } @@ -1804,20 +1793,19 @@ void css_do_schm(uint8_t mbk, int update, int dct, uint64_t mbo) IOInstEnding css_do_rsch(SubchDev *sch) { - SCSW *s = &sch->curr_status.scsw; - PMCW *p = &sch->curr_status.pmcw; + SCHIB *schib = &sch->curr_status; - if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { + if (~(schib->pmcw.flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { return IOINST_CC_NOT_OPERATIONAL; } - if (s->ctrl & SCSW_STCTL_STATUS_PEND) { + if (schib->scsw.ctrl & SCSW_STCTL_STATUS_PEND) { return IOINST_CC_STATUS_PRESENT; } - if (((s->ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) || - (s->ctrl & SCSW_ACTL_RESUME_PEND) || - (!(s->ctrl & SCSW_ACTL_SUSP))) { + if (((schib->scsw.ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) || + (schib->scsw.ctrl & SCSW_ACTL_RESUME_PEND) || + (!(schib->scsw.ctrl & SCSW_ACTL_SUSP))) { return IOINST_CC_BUSY; } @@ -1826,7 +1814,7 @@ IOInstEnding css_do_rsch(SubchDev *sch) css_update_chnmon(sch); } - s->ctrl |= SCSW_ACTL_RESUME_PEND; + schib->scsw.ctrl |= SCSW_ACTL_RESUME_PEND; return do_subchannel_work(sch); } @@ -1927,28 +1915,27 @@ static int css_add_chpid(uint8_t cssid, uint8_t chpid, uint8_t type, void css_sch_build_virtual_schib(SubchDev *sch, uint8_t chpid, uint8_t type) { - PMCW *p = &sch->curr_status.pmcw; - SCSW *s = &sch->curr_status.scsw; + SCHIB *schib = &sch->curr_status; int i; CssImage *css = channel_subsys.css[sch->cssid]; assert(css != NULL); - memset(p, 0, sizeof(PMCW)); - p->flags |= PMCW_FLAGS_MASK_DNV; - p->devno = sch->devno; + memset(&schib->pmcw, 0, sizeof(PMCW)); + schib->pmcw.flags |= PMCW_FLAGS_MASK_DNV; + schib->pmcw.devno = sch->devno; /* single path */ - p->pim = 0x80; - p->pom = 0xff; - p->pam = 0x80; - p->chpid[0] = chpid; + schib->pmcw.pim = 0x80; + schib->pmcw.pom = 0xff; + schib->pmcw.pam = 0x80; + schib->pmcw.chpid[0] = chpid; if (!css->chpids[chpid].in_use) { css_add_chpid(sch->cssid, chpid, type, true); } - memset(s, 0, sizeof(SCSW)); - sch->curr_status.mba = 0; - for (i = 0; i < ARRAY_SIZE(sch->curr_status.mda); i++) { - sch->curr_status.mda[i] = 0; + memset(&schib->scsw, 0, sizeof(SCSW)); + schib->mba = 0; + for (i = 0; i < ARRAY_SIZE(schib->mda); i++) { + schib->mda[i] = 0; } } @@ -2246,30 +2233,30 @@ int css_enable_mss(void) void css_reset_sch(SubchDev *sch) { - PMCW *p = &sch->curr_status.pmcw; + SCHIB *schib = &sch->curr_status; - if ((p->flags & PMCW_FLAGS_MASK_ENA) != 0 && sch->disable_cb) { + if ((schib->pmcw.flags & PMCW_FLAGS_MASK_ENA) != 0 && sch->disable_cb) { sch->disable_cb(sch); } - p->intparm = 0; - p->flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA | + schib->pmcw.intparm = 0; + schib->pmcw.flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA | PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME | PMCW_FLAGS_MASK_MP | PMCW_FLAGS_MASK_TF); - p->flags |= PMCW_FLAGS_MASK_DNV; - p->devno = sch->devno; - p->pim = 0x80; - p->lpm = p->pim; - p->pnom = 0; - p->lpum = 0; - p->mbi = 0; - p->pom = 0xff; - p->pam = 0x80; - p->chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_XMWME | + schib->pmcw.flags |= PMCW_FLAGS_MASK_DNV; + schib->pmcw.devno = sch->devno; + schib->pmcw.pim = 0x80; + schib->pmcw.lpm = schib->pmcw.pim; + schib->pmcw.pnom = 0; + schib->pmcw.lpum = 0; + schib->pmcw.mbi = 0; + schib->pmcw.pom = 0xff; + schib->pmcw.pam = 0x80; + schib->pmcw.chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_XMWME | PMCW_CHARS_MASK_CSENSE); - memset(&sch->curr_status.scsw, 0, sizeof(sch->curr_status.scsw)); - sch->curr_status.mba = 0; + memset(&schib->scsw, 0, sizeof(schib->scsw)); + schib->mba = 0; sch->channel_prog = 0x0; sch->last_cmd_valid = false; @@ -2433,7 +2420,7 @@ static int css_sch_get_chpids(SubchDev *sch, CssDevId *dev_id) FILE *fd; uint32_t chpid[8]; int i; - PMCW *p = &sch->curr_status.pmcw; + SCHIB *schib = &sch->curr_status; fid_path = g_strdup_printf("/sys/bus/css/devices/%x.%x.%04x/chpids", dev_id->cssid, dev_id->ssid, dev_id->devid); @@ -2452,8 +2439,8 @@ static int css_sch_get_chpids(SubchDev *sch, CssDevId *dev_id) return -EINVAL; } - for (i = 0; i < ARRAY_SIZE(p->chpid); i++) { - p->chpid[i] = chpid[i]; + for (i = 0; i < ARRAY_SIZE(schib->pmcw.chpid); i++) { + schib->pmcw.chpid[i] = chpid[i]; } fclose(fd); @@ -2467,7 +2454,7 @@ static int css_sch_get_path_masks(SubchDev *sch, CssDevId *dev_id) char *fid_path; FILE *fd; uint32_t pim, pam, pom; - PMCW *p = &sch->curr_status.pmcw; + SCHIB *schib = &sch->curr_status; fid_path = g_strdup_printf("/sys/bus/css/devices/%x.%x.%04x/pimpampom", dev_id->cssid, dev_id->ssid, dev_id->devid); @@ -2484,9 +2471,9 @@ static int css_sch_get_path_masks(SubchDev *sch, CssDevId *dev_id) return -EINVAL; } - p->pim = pim; - p->pam = pam; - p->pom = pom; + schib->pmcw.pim = pim; + schib->pmcw.pam = pam; + schib->pmcw.pom = pom; fclose(fd); g_free(fid_path); @@ -2528,16 +2515,15 @@ static int css_sch_get_chpid_type(uint8_t chpid, uint32_t *type, int css_sch_build_schib(SubchDev *sch, CssDevId *dev_id) { CssImage *css = channel_subsys.css[sch->cssid]; - PMCW *p = &sch->curr_status.pmcw; - SCSW *s = &sch->curr_status.scsw; + SCHIB *schib = &sch->curr_status; uint32_t type; int i, ret; assert(css != NULL); - memset(p, 0, sizeof(PMCW)); - p->flags |= PMCW_FLAGS_MASK_DNV; + memset(&schib->pmcw, 0, sizeof(PMCW)); + schib->pmcw.flags |= PMCW_FLAGS_MASK_DNV; /* We are dealing with I/O subchannels only. */ - p->devno = sch->devno; + schib->pmcw.devno = sch->devno; /* Grab path mask from sysfs. */ ret = css_sch_get_path_masks(sch, dev_id); @@ -2552,20 +2538,20 @@ int css_sch_build_schib(SubchDev *sch, CssDevId *dev_id) } /* Build chpid type. */ - for (i = 0; i < ARRAY_SIZE(p->chpid); i++) { - if (p->chpid[i] && !css->chpids[p->chpid[i]].in_use) { - ret = css_sch_get_chpid_type(p->chpid[i], &type, dev_id); + for (i = 0; i < ARRAY_SIZE(schib->pmcw.chpid); i++) { + if (schib->pmcw.chpid[i] && !css->chpids[schib->pmcw.chpid[i]].in_use) { + ret = css_sch_get_chpid_type(schib->pmcw.chpid[i], &type, dev_id); if (ret) { return ret; } - css_add_chpid(sch->cssid, p->chpid[i], type, false); + css_add_chpid(sch->cssid, schib->pmcw.chpid[i], type, false); } } - memset(s, 0, sizeof(SCSW)); - sch->curr_status.mba = 0; - for (i = 0; i < ARRAY_SIZE(sch->curr_status.mda); i++) { - sch->curr_status.mda[i] = 0; + memset(&schib->scsw, 0, sizeof(SCSW)); + schib->mba = 0; + for (i = 0; i < ARRAY_SIZE(schib->mda); i++) { + schib->mda[i] = 0; } return 0; diff --git a/hw/s390x/ipl.c b/hw/s390x/ipl.c index 896888bf8f..d0cc06a05f 100644 --- a/hw/s390x/ipl.c +++ b/hw/s390x/ipl.c @@ -19,6 +19,7 @@ #include "hw/loader.h" #include "hw/boards.h" #include "hw/s390x/virtio-ccw.h" +#include "hw/s390x/vfio-ccw.h" #include "hw/s390x/css.h" #include "hw/s390x/ebcdic.h" #include "ipl.h" @@ -252,8 +253,6 @@ static void s390_ipl_set_boot_menu(S390IPLState *ipl) { QemuOptsList *plist = qemu_find_opts("boot-opts"); QemuOpts *opts = QTAILQ_FIRST(&plist->head); - uint8_t *flags = &ipl->qipl.qipl_flags; - uint32_t *timeout = &ipl->qipl.boot_menu_timeout; const char *tmp; unsigned long splash_time = 0; @@ -269,7 +268,7 @@ static void s390_ipl_set_boot_menu(S390IPLState *ipl) case S390_IPL_TYPE_CCW: /* In the absence of -boot menu, use zipl parameters */ if (!qemu_opt_get(opts, "menu")) { - *flags |= QIPL_FLAG_BM_OPTS_ZIPL; + ipl->qipl.qipl_flags |= QIPL_FLAG_BM_OPTS_ZIPL; return; } break; @@ -286,35 +285,55 @@ static void s390_ipl_set_boot_menu(S390IPLState *ipl) return; } - *flags |= QIPL_FLAG_BM_OPTS_CMD; + ipl->qipl.qipl_flags |= QIPL_FLAG_BM_OPTS_CMD; tmp = qemu_opt_get(opts, "splash-time"); if (tmp && qemu_strtoul(tmp, NULL, 10, &splash_time)) { error_report("splash-time is invalid, forcing it to 0"); - *timeout = 0; + ipl->qipl.boot_menu_timeout = 0; return; } if (splash_time > 0xffffffff) { error_report("splash-time is too large, forcing it to max value"); - *timeout = 0xffffffff; + ipl->qipl.boot_menu_timeout = 0xffffffff; return; } - *timeout = cpu_to_be32(splash_time); + ipl->qipl.boot_menu_timeout = cpu_to_be32(splash_time); } -static CcwDevice *s390_get_ccw_device(DeviceState *dev_st) +#define CCW_DEVTYPE_NONE 0x00 +#define CCW_DEVTYPE_VIRTIO 0x01 +#define CCW_DEVTYPE_VIRTIO_NET 0x02 +#define CCW_DEVTYPE_SCSI 0x03 +#define CCW_DEVTYPE_VFIO 0x04 + +static CcwDevice *s390_get_ccw_device(DeviceState *dev_st, int *devtype) { CcwDevice *ccw_dev = NULL; + int tmp_dt = CCW_DEVTYPE_NONE; if (dev_st) { + VirtIONet *virtio_net_dev = (VirtIONet *) + object_dynamic_cast(OBJECT(dev_st), TYPE_VIRTIO_NET); VirtioCcwDevice *virtio_ccw_dev = (VirtioCcwDevice *) object_dynamic_cast(OBJECT(qdev_get_parent_bus(dev_st)->parent), TYPE_VIRTIO_CCW_DEVICE); + VFIOCCWDevice *vfio_ccw_dev = (VFIOCCWDevice *) + object_dynamic_cast(OBJECT(dev_st), TYPE_VFIO_CCW); + if (virtio_ccw_dev) { ccw_dev = CCW_DEVICE(virtio_ccw_dev); + if (virtio_net_dev) { + tmp_dt = CCW_DEVTYPE_VIRTIO_NET; + } else { + tmp_dt = CCW_DEVTYPE_VIRTIO; + } + } else if (vfio_ccw_dev) { + ccw_dev = CCW_DEVICE(vfio_ccw_dev); + tmp_dt = CCW_DEVTYPE_VFIO; } else { SCSIDevice *sd = (SCSIDevice *) object_dynamic_cast(OBJECT(dev_st), @@ -327,9 +346,13 @@ static CcwDevice *s390_get_ccw_device(DeviceState *dev_st) ccw_dev = (CcwDevice *)object_dynamic_cast(OBJECT(scsi_ccw), TYPE_CCW_DEVICE); + tmp_dt = CCW_DEVTYPE_SCSI; } } } + if (devtype) { + *devtype = tmp_dt; + } return ccw_dev; } @@ -337,20 +360,22 @@ static bool s390_gen_initial_iplb(S390IPLState *ipl) { DeviceState *dev_st; CcwDevice *ccw_dev = NULL; + SCSIDevice *sd; + int devtype; dev_st = get_boot_device(0); if (dev_st) { - ccw_dev = s390_get_ccw_device(dev_st); + ccw_dev = s390_get_ccw_device(dev_st, &devtype); } /* * Currently allow IPL only from CCW devices. */ if (ccw_dev) { - SCSIDevice *sd = (SCSIDevice *) object_dynamic_cast(OBJECT(dev_st), - TYPE_SCSI_DEVICE); - - if (sd) { + switch (devtype) { + case CCW_DEVTYPE_SCSI: + sd = (SCSIDevice *) object_dynamic_cast(OBJECT(dev_st), + TYPE_SCSI_DEVICE); ipl->iplb.len = cpu_to_be32(S390_IPLB_MIN_QEMU_SCSI_LEN); ipl->iplb.blk0_len = cpu_to_be32(S390_IPLB_MIN_QEMU_SCSI_LEN - S390_IPLB_HEADER_LEN); @@ -360,20 +385,24 @@ static bool s390_gen_initial_iplb(S390IPLState *ipl) ipl->iplb.scsi.channel = cpu_to_be16(sd->channel); ipl->iplb.scsi.devno = cpu_to_be16(ccw_dev->sch->devno); ipl->iplb.scsi.ssid = ccw_dev->sch->ssid & 3; - } else { - VirtIONet *vn = (VirtIONet *) object_dynamic_cast(OBJECT(dev_st), - TYPE_VIRTIO_NET); - + break; + case CCW_DEVTYPE_VFIO: + ipl->iplb.len = cpu_to_be32(S390_IPLB_MIN_CCW_LEN); + ipl->iplb.pbt = S390_IPL_TYPE_CCW; + ipl->iplb.ccw.devno = cpu_to_be16(ccw_dev->sch->devno); + ipl->iplb.ccw.ssid = ccw_dev->sch->ssid & 3; + break; + case CCW_DEVTYPE_VIRTIO_NET: + ipl->netboot = true; + /* Fall through to CCW_DEVTYPE_VIRTIO case */ + case CCW_DEVTYPE_VIRTIO: ipl->iplb.len = cpu_to_be32(S390_IPLB_MIN_CCW_LEN); ipl->iplb.blk0_len = cpu_to_be32(S390_IPLB_MIN_CCW_LEN - S390_IPLB_HEADER_LEN); ipl->iplb.pbt = S390_IPL_TYPE_CCW; ipl->iplb.ccw.devno = cpu_to_be16(ccw_dev->sch->devno); ipl->iplb.ccw.ssid = ccw_dev->sch->ssid & 3; - - if (vn) { - ipl->netboot = true; - } + break; } if (!s390_ipl_set_loadparm(ipl->iplb.loadparm)) { @@ -532,7 +561,7 @@ void s390_ipl_reset_request(CPUState *cs, enum s390_reset reset_type) !ipl->netboot && ipl->iplb.pbt == S390_IPL_TYPE_CCW && is_virtio_scsi_device(&ipl->iplb)) { - CcwDevice *ccw_dev = s390_get_ccw_device(get_boot_device(0)); + CcwDevice *ccw_dev = s390_get_ccw_device(get_boot_device(0), NULL); if (ccw_dev && cpu_to_be16(ccw_dev->sch->devno) == ipl->iplb.ccw.devno && diff --git a/hw/s390x/s390-ccw.c b/hw/s390x/s390-ccw.c index cad91ee626..f5f025d1b6 100644 --- a/hw/s390x/s390-ccw.c +++ b/hw/s390x/s390-ccw.c @@ -124,6 +124,14 @@ static void s390_ccw_unrealize(S390CCWDevice *cdev, Error **errp) g_free(cdev->mdevid); } +static void s390_ccw_instance_init(Object *obj) +{ + S390CCWDevice *dev = S390_CCW_DEVICE(obj); + + device_add_bootindex_property(obj, &dev->bootindex, "bootindex", + "/disk@0,0", DEVICE(obj), NULL); +} + static void s390_ccw_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -137,6 +145,7 @@ static void s390_ccw_class_init(ObjectClass *klass, void *data) static const TypeInfo s390_ccw_info = { .name = TYPE_S390_CCW, .parent = TYPE_CCW_DEVICE, + .instance_init = s390_ccw_instance_init, .instance_size = sizeof(S390CCWDevice), .class_size = sizeof(S390CCWDeviceClass), .class_init = s390_ccw_class_init, diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c index d11069b860..bbc6e8fa0b 100644 --- a/hw/s390x/s390-virtio-ccw.c +++ b/hw/s390x/s390-virtio-ccw.c @@ -15,6 +15,7 @@ #include "cpu.h" #include "hw/boards.h" #include "exec/address-spaces.h" +#include "exec/ram_addr.h" #include "hw/s390x/s390-virtio-hcall.h" #include "hw/s390x/sclp.h" #include "hw/s390x/s390_flic.h" @@ -163,6 +164,7 @@ static void s390_memory_init(ram_addr_t mem_size) MemoryRegion *sysmem = get_system_memory(); ram_addr_t chunk, offset = 0; unsigned int number = 0; + Error *local_err = NULL; gchar *name; /* allocate RAM for core */ @@ -182,6 +184,15 @@ static void s390_memory_init(ram_addr_t mem_size) } g_free(name); + /* + * Configure the maximum page size. As no memory devices were created + * yet, this is the page size of initial memory only. + */ + s390_set_max_pagesize(qemu_maxrampagesize(), &local_err); + if (local_err) { + error_report_err(local_err); + exit(EXIT_FAILURE); + } /* Initialize storage key device */ s390_skeys_init(); /* Initialize storage attributes device */ @@ -253,6 +264,7 @@ static void ccw_init(MachineState *machine) DeviceState *dev; s390_sclp_init(); + /* init memory + setup max page size. Required for the CPU model */ s390_memory_init(machine->ram_size); /* init CPUs (incl. CPU model) early so s390_has_feature() works */ @@ -646,14 +658,26 @@ bool css_migration_enabled(void) } \ type_init(ccw_machine_register_##suffix) +static void ccw_machine_4_1_instance_options(MachineState *machine) +{ +} + +static void ccw_machine_4_1_class_options(MachineClass *mc) +{ +} +DEFINE_CCW_MACHINE(4_1, "4.1", true); + static void ccw_machine_4_0_instance_options(MachineState *machine) { + ccw_machine_4_1_instance_options(machine); } static void ccw_machine_4_0_class_options(MachineClass *mc) { + ccw_machine_4_1_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_4_0, hw_compat_4_0_len); } -DEFINE_CCW_MACHINE(4_0, "4.0", true); +DEFINE_CCW_MACHINE(4_0, "4.0", false); static void ccw_machine_3_1_instance_options(MachineState *machine) { diff --git a/hw/s390x/trace-events b/hw/s390x/trace-events index 0d3622ec6f..0dc5b818c4 100644 --- a/hw/s390x/trace-events +++ b/hw/s390x/trace-events @@ -1,6 +1,6 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/s390x/css.c +# css.c css_enable_facility(const char *facility) "CSS: enable %s" css_crw(uint8_t rsc, uint8_t erc, uint16_t rsid, const char *chained) "CSS: queueing crw: rsc=0x%x, erc=0x%x, rsid=0x%x %s" css_chpid_add(uint8_t cssid, uint8_t chpid, uint8_t type) "CSS: add chpid %x.%02x (type 0x%02x)" @@ -10,7 +10,7 @@ css_io_interrupt(int cssid, int ssid, int schid, uint32_t intparm, uint8_t isc, css_adapter_interrupt(uint8_t isc) "CSS: adapter I/O interrupt (isc 0x%x)" css_do_sic(uint16_t mode, uint8_t isc) "CSS: set interruption mode 0x%x on isc 0x%x" -# hw/s390x/virtio-ccw.c +# virtio-ccw.c virtio_ccw_interpret_ccw(int cssid, int ssid, int schid, int cmd_code) "VIRTIO-CCW: %x.%x.%04x: interpret command 0x%x" virtio_ccw_new_device(int cssid, int ssid, int schid, int devno, const char *devno_mode) "VIRTIO-CCW: add subchannel %x.%x.%04x, devno 0x%04x (%s)" virtio_ccw_set_ind(uint64_t ind_loc, uint8_t ind_old, uint8_t ind_new) "VIRTIO-CCW: indicator at %" PRIu64 ": 0x%x->0x%x" diff --git a/hw/scsi/trace-events b/hw/scsi/trace-events index 09f3fc3086..452b5994e6 100644 --- a/hw/scsi/trace-events +++ b/hw/scsi/trace-events @@ -1,6 +1,6 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/scsi/scsi-bus.c +# scsi-bus.c scsi_req_alloc(int target, int lun, int tag) "target %d lun %d tag %d" scsi_req_cancel(int target, int lun, int tag) "target %d lun %d tag %d" scsi_req_data(int target, int lun, int tag, int len) "target %d lun %d tag %d len %d" @@ -18,7 +18,7 @@ scsi_inquiry(int target, int lun, int tag, int cdb1, int cdb2) "target %d lun %d scsi_test_unit_ready(int target, int lun, int tag) "target %d lun %d tag %d" scsi_request_sense(int target, int lun, int tag) "target %d lun %d tag %d" -# hw/scsi/mptsas.c +# mptsas.c mptsas_command_complete(void *dev, uint32_t ctx, uint32_t status, uint32_t resid) "dev %p context 0x%08x status 0x%x resid %d" mptsas_diag_read(void *dev, uint32_t addr, uint32_t val) "dev %p addr 0x%08x value 0x%08x" mptsas_diag_write(void *dev, uint32_t addr, uint32_t val) "dev %p addr 0x%08x value 0x%08x" @@ -36,11 +36,11 @@ mptsas_sgl_overflow(void *dev, uint32_t ctx, uint64_t req, uint64_t found) "dev mptsas_unhandled_cmd(void *dev, uint32_t ctx, uint8_t msg_cmd) "dev %p context 0x%08x: Unhandled cmd 0x%x" mptsas_unhandled_doorbell_cmd(void *dev, int cmd) "dev %p value 0x%08x" -# hw/scsi/mptconfig.c +# mptconfig.c mptsas_config_sas_device(void *dev, int address, int port, int phy_handle, int dev_handle, int page) "dev %p address %d (port %d, handles: phy %d dev %d) page %d" mptsas_config_sas_phy(void *dev, int address, int port, int phy_handle, int dev_handle, int page) "dev %p address %d (port %d, handles: phy %d dev %d) page %d" -# hw/scsi/megasas.c +# megasas.c megasas_init_firmware(uint64_t pa) "pa 0x%" PRIx64 " " megasas_init_queue(uint64_t queue_pa, int queue_len, uint64_t head, uint64_t tail, uint32_t flags) "queue at 0x%" PRIx64 " len %d head 0x%" PRIx64 " tail 0x%" PRIx64 " flags 0x%x" megasas_initq_map_failed(int frame) "scmd %d: failed to map queue" @@ -118,7 +118,7 @@ megasas_mmio_invalid_readl(unsigned long addr) "addr 0x%lx" megasas_mmio_writel(const char *reg, uint32_t val) "reg %s: 0x%x" megasas_mmio_invalid_writel(uint32_t addr, uint32_t val) "addr 0x%x: 0x%x" -# hw/scsi/vmw_pvscsi.c +# vmw_pvscsi.c pvscsi_ring_init_data(uint32_t txr_len_log2, uint32_t rxr_len_log2) "TX/RX rings logarithms set to %d/%d" pvscsi_ring_init_msg(uint32_t len_log2) "MSG ring logarithm set to %d" pvscsi_ring_flush_cmp(uint64_t filled_cmp_ptr) "new production counter of completion ring is 0x%"PRIx64 @@ -153,7 +153,7 @@ pvscsi_state(const char* state) "starting %s ..." pvscsi_tx_rings_ppn(const char* label, uint64_t ppn) "%s page: 0x%"PRIx64 pvscsi_tx_rings_num_pages(const char* label, uint32_t num) "Number of %s pages: %u" -# hw/scsi/esp.c +# esp.c esp_error_fifo_overrun(void) "FIFO overrun" esp_error_unhandled_command(uint32_t val) "unhandled command (0x%2.2x)" esp_error_invalid_write(uint32_t val, uint32_t addr) "invalid write of 0x%02x at [0x%x]" @@ -190,7 +190,7 @@ esp_mem_writeb_cmd_selatns(uint32_t val) "Select with ATN & stop (0x%2.2x)" esp_mem_writeb_cmd_ensel(uint32_t val) "Enable selection (0x%2.2x)" esp_mem_writeb_cmd_dissel(uint32_t val) "Disable selection (0x%2.2x)" -# hw/scsi/esp-pci.c +# esp-pci.c esp_pci_error_invalid_dma_direction(void) "invalid DMA transfer direction" esp_pci_error_invalid_read(uint32_t reg) "read access outside bounds (reg 0x%x)" esp_pci_error_invalid_write(uint32_t reg) "write access outside bounds (reg 0x%x)" @@ -204,7 +204,7 @@ esp_pci_dma_start(uint32_t val) "START (0x%.8x)" esp_pci_sbac_read(uint32_t reg) "sbac: 0x%8.8x" esp_pci_sbac_write(uint32_t reg, uint32_t val) "sbac: 0x%8.8x -> 0x%8.8x" -# hw/scsi/spapr_vscsi.c +# spapr_vscsi.c spapr_vscsi_send_rsp(uint8_t status, int32_t res_in, int32_t res_out) "status: 0x%x, res_in: %"PRId32", res_out: %"PRId32 spapr_vscsi_fetch_desc_no_data(void) "no data descriptor" spapr_vscsi_fetch_desc_direct(void) "direct segment" @@ -231,7 +231,7 @@ spapr_vscsi_queue_cmd_no_drive(uint64_t lun) "Command for lun 0x%08" PRIx64 " wi spapr_vscsi_queue_cmd(uint32_t qtag, unsigned cdb, const char *cmd, int lun, int ret) "Queued command tag 0x%"PRIx32" CMD 0x%x=%s LUN %d ret: %d" spapr_vscsi_do_crq(unsigned c0, unsigned c1) "crq: %02x %02x ..." -# hw/scsi/lsi53c895a.c +# lsi53c895a.c lsi_reset(void) "Reset" lsi_update_irq(int level, uint8_t dstat, uint8_t sist1, uint8_t sist0) "Update IRQ level %d dstat 0x%02x sist 0x%02x0x%02x" lsi_update_irq_disconnected(void) "Handled IRQs & disconnected, looking for pending processes" @@ -293,7 +293,7 @@ lsi_awoken(void) "Woken by SIGP" lsi_reg_read(const char *name, int offset, uint8_t ret) "Read reg %s 0x%x = 0x%02x" lsi_reg_write(const char *name, int offset, uint8_t val) "Write reg %s 0x%x = 0x%02x" -# hw/scsi/scsi-disk.c +# scsi-disk.c scsi_disk_check_condition(uint32_t tag, uint8_t key, uint8_t asc, uint8_t ascq) "Command complete tag=0x%x sense=%d/%d/%d" scsi_disk_read_complete(uint32_t tag, size_t size) "Data ready tag=0x%x len=%zd" scsi_disk_read_data_count(uint32_t sector_count) "Read sector_count=%d" @@ -322,7 +322,7 @@ scsi_disk_dma_command_READ(uint64_t lba, uint32_t len) "Read (sector %" PRId64 " scsi_disk_dma_command_WRITE(const char *cmd, uint64_t lba, int len) "Write %s(sector %" PRId64 ", count %u)" scsi_disk_new_request(uint32_t lun, uint32_t tag, const char *line) "Command: lun=%d tag=0x%x data=%s" -# hw/scsi/scsi-generic.c +# scsi-generic.c scsi_generic_command_complete_noio(void *req, uint32_t tag, int statuc) "Command complete %p tag=0x%x status=%d" scsi_generic_read_complete(uint32_t tag, int len) "Data ready tag=0x%x len=%d" scsi_generic_read_data(uint32_t tag) "scsi_read_data tag=0x%x" diff --git a/hw/scsi/vhost-user-scsi.c b/hw/scsi/vhost-user-scsi.c index 6728878a52..8b1e6876db 100644 --- a/hw/scsi/vhost-user-scsi.c +++ b/hw/scsi/vhost-user-scsi.c @@ -69,7 +69,6 @@ static void vhost_user_scsi_realize(DeviceState *dev, Error **errp) VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev); VHostUserSCSI *s = VHOST_USER_SCSI(dev); VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s); - VhostUserState *user; Error *err = NULL; int ret; @@ -86,30 +85,24 @@ static void vhost_user_scsi_realize(DeviceState *dev, Error **errp) return; } - user = vhost_user_init(); - if (!user) { - error_setg(errp, "vhost-user-scsi: failed to init vhost_user"); + if (!vhost_user_init(&s->vhost_user, &vs->conf.chardev, errp)) { return; } - user->chr = &vs->conf.chardev; vsc->dev.nvqs = 2 + vs->conf.num_queues; vsc->dev.vqs = g_new(struct vhost_virtqueue, vsc->dev.nvqs); vsc->dev.vq_index = 0; vsc->dev.backend_features = 0; - ret = vhost_dev_init(&vsc->dev, user, + ret = vhost_dev_init(&vsc->dev, &s->vhost_user, VHOST_BACKEND_TYPE_USER, 0); if (ret < 0) { error_setg(errp, "vhost-user-scsi: vhost initialization failed: %s", strerror(-ret)); - vhost_user_cleanup(user); - g_free(user); + vhost_user_cleanup(&s->vhost_user); return; } - s->vhost_user = user; - /* Channel and lun both are 0 for bootable vhost-user-scsi disk */ vsc->channel = 0; vsc->lun = 0; @@ -130,12 +123,7 @@ static void vhost_user_scsi_unrealize(DeviceState *dev, Error **errp) g_free(vqs); virtio_scsi_common_unrealize(dev, errp); - - if (s->vhost_user) { - vhost_user_cleanup(s->vhost_user); - g_free(s->vhost_user); - s->vhost_user = NULL; - } + vhost_user_cleanup(&s->vhost_user); } static Property vhost_user_scsi_properties[] = { diff --git a/hw/sd/Kconfig b/hw/sd/Kconfig index 864f535011..c5e1e5581c 100644 --- a/hw/sd/Kconfig +++ b/hw/sd/Kconfig @@ -12,6 +12,10 @@ config SD config SDHCI bool + select SD + +config SDHCI_PCI + bool default y if PCI_DEVICES depends on PCI - select SD + select SDHCI diff --git a/hw/sd/Makefile.objs b/hw/sd/Makefile.objs index a99d9fbb04..06657279d1 100644 --- a/hw/sd/Makefile.objs +++ b/hw/sd/Makefile.objs @@ -2,6 +2,7 @@ common-obj-$(CONFIG_PL181) += pl181.o common-obj-$(CONFIG_SSI_SD) += ssi-sd.o common-obj-$(CONFIG_SD) += sd.o core.o sdmmc-internal.o common-obj-$(CONFIG_SDHCI) += sdhci.o +common-obj-$(CONFIG_SDHCI_PCI) += sdhci-pci.o obj-$(CONFIG_MILKYMIST) += milkymist-memcard.o obj-$(CONFIG_OMAP) += omap_mmc.o diff --git a/hw/sd/sdhci-internal.h b/hw/sd/sdhci-internal.h index 19665fd401..34141400f8 100644 --- a/hw/sd/sdhci-internal.h +++ b/hw/sd/sdhci-internal.h @@ -304,4 +304,38 @@ extern const VMStateDescription sdhci_vmstate; #define ESDHC_PRNSTS_SDSTB (1 << 3) +/* + * Default SD/MMC host controller features information, which will be + * presented in CAPABILITIES register of generic SD host controller at reset. + * + * support: + * - 3.3v and 1.8v voltages + * - SDMA/ADMA1/ADMA2 + * - high-speed + * max host controller R/W buffers size: 512B + * max clock frequency for SDclock: 52 MHz + * timeout clock frequency: 52 MHz + * + * does not support: + * - 3.0v voltage + * - 64-bit system bus + * - suspend/resume + */ +#define SDHC_CAPAB_REG_DEFAULT 0x057834b4 + +#define DEFINE_SDHCI_COMMON_PROPERTIES(_state) \ + DEFINE_PROP_UINT8("sd-spec-version", _state, sd_spec_version, 2), \ + DEFINE_PROP_UINT8("uhs", _state, uhs_mode, UHS_NOT_SUPPORTED), \ + \ + /* Capabilities registers provide information on supported + * features of this specific host controller implementation */ \ + DEFINE_PROP_UINT64("capareg", _state, capareg, SDHC_CAPAB_REG_DEFAULT), \ + DEFINE_PROP_UINT64("maxcurr", _state, maxcurr, 0) + +void sdhci_initfn(SDHCIState *s); +void sdhci_uninitfn(SDHCIState *s); +void sdhci_common_realize(SDHCIState *s, Error **errp); +void sdhci_common_unrealize(SDHCIState *s, Error **errp); +void sdhci_common_class_init(ObjectClass *klass, void *data); + #endif diff --git a/hw/sd/sdhci-pci.c b/hw/sd/sdhci-pci.c new file mode 100644 index 0000000000..f884661862 --- /dev/null +++ b/hw/sd/sdhci-pci.c @@ -0,0 +1,87 @@ +/* + * SDHCI device on PCI + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/hw.h" +#include "hw/sd/sdhci.h" +#include "sdhci-internal.h" + +static Property sdhci_pci_properties[] = { + DEFINE_SDHCI_COMMON_PROPERTIES(SDHCIState), + DEFINE_PROP_END_OF_LIST(), +}; + +static void sdhci_pci_realize(PCIDevice *dev, Error **errp) +{ + SDHCIState *s = PCI_SDHCI(dev); + Error *local_err = NULL; + + sdhci_initfn(s); + sdhci_common_realize(s, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + dev->config[PCI_CLASS_PROG] = 0x01; /* Standard Host supported DMA */ + dev->config[PCI_INTERRUPT_PIN] = 0x01; /* interrupt pin A */ + s->irq = pci_allocate_irq(dev); + s->dma_as = pci_get_address_space(dev); + pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->iomem); +} + +static void sdhci_pci_exit(PCIDevice *dev) +{ + SDHCIState *s = PCI_SDHCI(dev); + + sdhci_common_unrealize(s, &error_abort); + sdhci_uninitfn(s); +} + +static void sdhci_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = sdhci_pci_realize; + k->exit = sdhci_pci_exit; + k->vendor_id = PCI_VENDOR_ID_REDHAT; + k->device_id = PCI_DEVICE_ID_REDHAT_SDHCI; + k->class_id = PCI_CLASS_SYSTEM_SDHCI; + dc->props = sdhci_pci_properties; + + sdhci_common_class_init(klass, data); +} + +static const TypeInfo sdhci_pci_info = { + .name = TYPE_PCI_SDHCI, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(SDHCIState), + .class_init = sdhci_pci_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void sdhci_pci_register_type(void) +{ + type_register_static(&sdhci_pci_info); +} + +type_init(sdhci_pci_register_type) diff --git a/hw/sd/sdhci.c b/hw/sd/sdhci.c index 83f1574ffd..17ad5465a7 100644 --- a/hw/sd/sdhci.c +++ b/hw/sd/sdhci.c @@ -40,24 +40,6 @@ #define MASKED_WRITE(reg, mask, val) (reg = (reg & (mask)) | (val)) -/* Default SD/MMC host controller features information, which will be - * presented in CAPABILITIES register of generic SD host controller at reset. - * - * support: - * - 3.3v and 1.8v voltages - * - SDMA/ADMA1/ADMA2 - * - high-speed - * max host controller R/W buffers size: 512B - * max clock frequency for SDclock: 52 MHz - * timeout clock frequency: 52 MHz - * - * does not support: - * - 3.0v voltage - * - 64-bit system bus - * - suspend/resume - */ -#define SDHC_CAPAB_REG_DEFAULT 0x057834b4 - static inline unsigned int sdhci_get_fifolen(SDHCIState *s) { return 1 << (9 + FIELD_EX32(s->capareg, SDHC_CAPAB, MAXBLOCKLENGTH)); @@ -1328,16 +1310,7 @@ static void sdhci_init_readonly_registers(SDHCIState *s, Error **errp) /* --- qdev common --- */ -#define DEFINE_SDHCI_COMMON_PROPERTIES(_state) \ - DEFINE_PROP_UINT8("sd-spec-version", _state, sd_spec_version, 2), \ - DEFINE_PROP_UINT8("uhs", _state, uhs_mode, UHS_NOT_SUPPORTED), \ - \ - /* Capabilities registers provide information on supported - * features of this specific host controller implementation */ \ - DEFINE_PROP_UINT64("capareg", _state, capareg, SDHC_CAPAB_REG_DEFAULT), \ - DEFINE_PROP_UINT64("maxcurr", _state, maxcurr, 0) - -static void sdhci_initfn(SDHCIState *s) +void sdhci_initfn(SDHCIState *s) { qbus_create_inplace(&s->sdbus, sizeof(s->sdbus), TYPE_SDHCI_BUS, DEVICE(s), "sd-bus"); @@ -1348,7 +1321,7 @@ static void sdhci_initfn(SDHCIState *s) s->io_ops = &sdhci_mmio_ops; } -static void sdhci_uninitfn(SDHCIState *s) +void sdhci_uninitfn(SDHCIState *s) { timer_del(s->insert_timer); timer_free(s->insert_timer); @@ -1359,7 +1332,7 @@ static void sdhci_uninitfn(SDHCIState *s) s->fifo_buffer = NULL; } -static void sdhci_common_realize(SDHCIState *s, Error **errp) +void sdhci_common_realize(SDHCIState *s, Error **errp) { Error *local_err = NULL; @@ -1375,7 +1348,7 @@ static void sdhci_common_realize(SDHCIState *s, Error **errp) SDHC_REGISTERS_MAP_SIZE); } -static void sdhci_common_unrealize(SDHCIState *s, Error **errp) +void sdhci_common_unrealize(SDHCIState *s, Error **errp) { /* This function is expected to be called only once for each class: * - SysBus: via DeviceClass->unrealize(), @@ -1445,7 +1418,7 @@ const VMStateDescription sdhci_vmstate = { }, }; -static void sdhci_common_class_init(ObjectClass *klass, void *data) +void sdhci_common_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -1454,66 +1427,6 @@ static void sdhci_common_class_init(ObjectClass *klass, void *data) dc->reset = sdhci_poweron_reset; } -/* --- qdev PCI --- */ - -static Property sdhci_pci_properties[] = { - DEFINE_SDHCI_COMMON_PROPERTIES(SDHCIState), - DEFINE_PROP_END_OF_LIST(), -}; - -static void sdhci_pci_realize(PCIDevice *dev, Error **errp) -{ - SDHCIState *s = PCI_SDHCI(dev); - Error *local_err = NULL; - - sdhci_initfn(s); - sdhci_common_realize(s, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } - - dev->config[PCI_CLASS_PROG] = 0x01; /* Standard Host supported DMA */ - dev->config[PCI_INTERRUPT_PIN] = 0x01; /* interrupt pin A */ - s->irq = pci_allocate_irq(dev); - s->dma_as = pci_get_address_space(dev); - pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->iomem); -} - -static void sdhci_pci_exit(PCIDevice *dev) -{ - SDHCIState *s = PCI_SDHCI(dev); - - sdhci_common_unrealize(s, &error_abort); - sdhci_uninitfn(s); -} - -static void sdhci_pci_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); - - k->realize = sdhci_pci_realize; - k->exit = sdhci_pci_exit; - k->vendor_id = PCI_VENDOR_ID_REDHAT; - k->device_id = PCI_DEVICE_ID_REDHAT_SDHCI; - k->class_id = PCI_CLASS_SYSTEM_SDHCI; - dc->props = sdhci_pci_properties; - - sdhci_common_class_init(klass, data); -} - -static const TypeInfo sdhci_pci_info = { - .name = TYPE_PCI_SDHCI, - .parent = TYPE_PCI_DEVICE, - .instance_size = sizeof(SDHCIState), - .class_init = sdhci_pci_class_init, - .interfaces = (InterfaceInfo[]) { - { INTERFACE_CONVENTIONAL_PCI_DEVICE }, - { }, - }, -}; - /* --- qdev SysBus --- */ static Property sdhci_sysbus_properties[] = { @@ -1846,7 +1759,6 @@ static const TypeInfo imx_usdhc_info = { static void sdhci_register_types(void) { - type_register_static(&sdhci_pci_info); type_register_static(&sdhci_sysbus_info); type_register_static(&sdhci_bus_info); type_register_static(&imx_usdhc_info); diff --git a/hw/sd/trace-events b/hw/sd/trace-events index fb0615cd3c..52971dc033 100644 --- a/hw/sd/trace-events +++ b/hw/sd/trace-events @@ -1,12 +1,12 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/sd/bcm2835_sdhost.c +# bcm2835_sdhost.c bcm2835_sdhost_read(uint64_t offset, uint64_t data, unsigned size) "offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" bcm2835_sdhost_write(uint64_t offset, uint64_t data, unsigned size) "offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" bcm2835_sdhost_edm_change(const char *why, uint32_t edm) "(%s) EDM now 0x%x" bcm2835_sdhost_update_irq(uint32_t irq) "IRQ bits 0x%x\n" -# hw/sd/core.c +# core.c sdbus_command(const char *bus_name, uint8_t cmd, uint32_t arg) "@%s CMD%02d arg 0x%08x" sdbus_read(const char *bus_name, uint8_t value) "@%s value 0x%02x" sdbus_write(const char *bus_name, uint8_t value) "@%s value 0x%02x" @@ -14,7 +14,7 @@ sdbus_set_voltage(const char *bus_name, uint16_t millivolts) "@%s %u (mV)" sdbus_get_dat_lines(const char *bus_name, uint8_t dat_lines) "@%s dat_lines: %u" sdbus_get_cmd_line(const char *bus_name, bool cmd_line) "@%s cmd_line: %u" -# hw/sd/sdhci.c +# sdhci.c sdhci_set_inserted(const char *level) "card state changed: %s" sdhci_send_command(uint8_t cmd, uint32_t arg) "CMD%02u ARG[0x%08x]" sdhci_error(const char *msg) "%s" @@ -29,13 +29,12 @@ sdhci_read_dataport(uint16_t data_count) "all %u bytes of data have been read fr sdhci_write_dataport(uint16_t data_count) "write buffer filled with %u bytes of data" sdhci_capareg(const char *desc, uint16_t val) "%s: %u" -# hw/sd/sd.c +# sd.c sdcard_normal_command(const char *proto, const char *cmd_desc, uint8_t cmd, uint32_t arg, const char *state) "%s %20s/ CMD%02d arg 0x%08x (state %s)" sdcard_app_command(const char *proto, const char *acmd_desc, uint8_t acmd, uint32_t arg, const char *state) "%s %23s/ACMD%02d arg 0x%08x (state %s)" sdcard_response(const char *rspdesc, int rsplen) "%s (sz:%d)" sdcard_powerup(void) "" sdcard_inquiry_cmd41(void) "" -sdcard_set_enable(bool current_state, bool new_state) "%u -> %u" sdcard_reset(void) "" sdcard_set_blocklen(uint16_t length) "0x%03x" sdcard_inserted(bool readonly) "read_only: %u" @@ -49,10 +48,10 @@ sdcard_write_data(const char *proto, const char *cmd_desc, uint8_t cmd, uint8_t sdcard_read_data(const char *proto, const char *cmd_desc, uint8_t cmd, int length) "%s %20s/ CMD%02d len %d" sdcard_set_voltage(uint16_t millivolts) "%u mV" -# hw/sd/milkymist-memcard.c +# milkymist-memcard.c milkymist_memcard_memory_read(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" milkymist_memcard_memory_write(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" -# hw/sd/pxa2xx_mmci.c +# pxa2xx_mmci.c pxa2xx_mmci_read(uint8_t size, uint32_t addr, uint32_t value) "size %d addr 0x%02x value 0x%08x" pxa2xx_mmci_write(uint8_t size, uint32_t addr, uint32_t value) "size %d addr 0x%02x value 0x%08x" diff --git a/hw/sh4/Kconfig b/hw/sh4/Kconfig index 8597613a35..593662d28a 100644 --- a/hw/sh4/Kconfig +++ b/hw/sh4/Kconfig @@ -2,6 +2,7 @@ config R2D bool imply PCI_DEVICES imply TEST_DEVICES + imply RTL8139_PCI select I82378 if TEST_DEVICES select IDE_MMIO select PFLASH_CFI02 diff --git a/hw/sparc/trace-events b/hw/sparc/trace-events index 6e7259f8f8..355b07ae05 100644 --- a/hw/sparc/trace-events +++ b/hw/sparc/trace-events @@ -1,12 +1,12 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/sparc/sun4m.c +# sun4m.c sun4m_cpu_interrupt(unsigned int level) "Set CPU IRQ %d" sun4m_cpu_reset_interrupt(unsigned int level) "Reset CPU IRQ %d" sun4m_cpu_set_irq_raise(int level) "Raise CPU IRQ %d" sun4m_cpu_set_irq_lower(int level) "Lower CPU IRQ %d" -# hw/sparc/sun4m_iommu.c +# sun4m_iommu.c sun4m_iommu_mem_readl(uint64_t addr, uint32_t ret) "read reg[0x%"PRIx64"] = 0x%x" sun4m_iommu_mem_writel(uint64_t addr, uint32_t val) "write reg[0x%"PRIx64"] = 0x%x" sun4m_iommu_mem_writel_ctrl(uint64_t iostart) "iostart = 0x%"PRIx64 @@ -16,6 +16,6 @@ sun4m_iommu_page_get_flags(uint64_t pa, uint64_t iopte, uint32_t ret) "get flags sun4m_iommu_translate_pa(uint64_t addr, uint64_t pa, uint32_t iopte) "xlate dva 0x%"PRIx64" => pa 0x%"PRIx64" iopte = 0x%x" sun4m_iommu_bad_addr(uint64_t addr) "bad addr 0x%"PRIx64 -# hw/sparc/leon3.c +# leon3.c leon3_set_irq(int intno) "Set CPU IRQ %d" leon3_reset_irq(int intno) "Reset CPU IRQ %d" diff --git a/hw/sparc64/Kconfig b/hw/sparc64/Kconfig index 4a8166ebb7..d4d76a89be 100644 --- a/hw/sparc64/Kconfig +++ b/hw/sparc64/Kconfig @@ -3,13 +3,13 @@ config SUN4U imply PCI_DEVICES imply SUNHME imply TEST_DEVICES + imply PARALLEL select M48T59 select ISA_BUS select FDC select SERIAL_ISA select PCI_SABRE select IDE_CMD646 - select PARALLEL select PCKBD select SIMBA diff --git a/hw/sparc64/trace-events b/hw/sparc64/trace-events index ce597a6e9d..a0b29987d2 100644 --- a/hw/sparc64/trace-events +++ b/hw/sparc64/trace-events @@ -1,14 +1,14 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/sparc64/sun4u.c +# sun4u.c ebus_isa_irq_handler(int n, int level) "Set ISA IRQ %d level %d" -# hw/sparc64/sun4u_iommu.c +# sun4u_iommu.c sun4u_iommu_mem_read(uint64_t addr, uint64_t val, int size) "addr: 0x%"PRIx64" val: 0x%"PRIx64" size: %d" sun4u_iommu_mem_write(uint64_t addr, uint64_t val, int size) "addr: 0x%"PRIx64" val: 0x%"PRIx64" size: %d" sun4u_iommu_translate(uint64_t addr, uint64_t trans_addr, uint64_t tte) "xlate 0x%"PRIx64" => pa 0x%"PRIx64" tte: 0x%"PRIx64 -# hw/sparc64/sparc64.c +# sparc64.c sparc64_cpu_check_irqs_reset_irq(int intno) "Reset CPU IRQ (current interrupt 0x%x)" sparc64_cpu_check_irqs_noset_irq(uint32_t tl, uint32_t tt, int intno) "Not setting CPU IRQ: TL=%d current 0x%x >= pending 0x%x" sparc64_cpu_check_irqs_set_irq(unsigned int i, int old, int new) "Set CPU IRQ %d old=0x%x new=0x%x" diff --git a/hw/ssi/xilinx_spips.c b/hw/ssi/xilinx_spips.c index 16f88f7402..c615058cc1 100644 --- a/hw/ssi/xilinx_spips.c +++ b/hw/ssi/xilinx_spips.c @@ -429,12 +429,14 @@ static void xlnx_zynqmp_qspips_reset(DeviceState *d) static inline void stripe8(uint8_t *x, int num, bool dir) { - uint8_t r[num]; - memset(r, 0, sizeof(uint8_t) * num); + uint8_t r[MAX_NUM_BUSSES]; int idx[2] = {0, 0}; int bit[2] = {0, 7}; int d = dir; + assert(num <= MAX_NUM_BUSSES); + memset(r, 0, sizeof(uint8_t) * num); + for (idx[0] = 0; idx[0] < num; ++idx[0]) { for (bit[0] = 7; bit[0] >= 0; bit[0]--) { r[idx[!d]] |= x[idx[d]] & 1 << bit[d] ? 1 << bit[!d] : 0; diff --git a/hw/timer/hpet.c b/hw/timer/hpet.c index d97436bc7b..41024f39fb 100644 --- a/hw/timer/hpet.c +++ b/hw/timer/hpet.c @@ -744,7 +744,7 @@ static void hpet_realize(DeviceState *dev, Error **errp) HPETTimer *timer; if (!s->intcap) { - error_printf("Hpet's intcap not initialized.\n"); + warn_report("Hpet's intcap not initialized"); } if (hpet_cfg.count == UINT8_MAX) { /* first instance */ diff --git a/hw/timer/trace-events b/hw/timer/trace-events index 12eb505fee..dcaf3d6da6 100644 --- a/hw/timer/trace-events +++ b/hw/timer/trace-events @@ -1,6 +1,6 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/timer/slavio_timer.c +# slavio_timer.c slavio_timer_get_out(uint64_t limit, uint32_t counthigh, uint32_t count) "limit 0x%"PRIx64" count 0x%x0x%08x" slavio_timer_irq(uint32_t counthigh, uint32_t count) "callback: count 0x%x0x%08x" slavio_timer_mem_readl_invalid(uint64_t addr) "invalid read address 0x%"PRIx64 @@ -15,7 +15,7 @@ slavio_timer_mem_writel_mode_counter(unsigned int timer_index) "processor %d cha slavio_timer_mem_writel_mode_invalid(void) "not system timer" slavio_timer_mem_writel_invalid(uint64_t addr) "invalid write address 0x%"PRIx64 -# hw/timer/grlib_gptimer.c +# grlib_gptimer.c grlib_gptimer_enable(int id, uint32_t count) "timer:%d set count 0x%x and run" grlib_gptimer_disabled(int id, uint32_t config) "timer:%d Timer disable config 0x%x" grlib_gptimer_restart(int id, uint32_t reload) "timer:%d reload val: 0x%x" @@ -24,13 +24,13 @@ grlib_gptimer_hit(int id) "timer:%d HIT" grlib_gptimer_readl(int id, uint64_t addr, uint32_t val) "timer:%d addr 0x%"PRIx64" 0x%x" grlib_gptimer_writel(int id, uint64_t addr, uint32_t val) "timer:%d addr 0x%"PRIx64" 0x%x" -# hw/timer/lm32_timer.c +# lm32_timer.c lm32_timer_memory_write(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" lm32_timer_memory_read(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" lm32_timer_hit(void) "timer hit" lm32_timer_irq_state(int level) "irq state %d" -# hw/timer/milkymist-sysctl.c +# milkymist-sysctl.c milkymist_sysctl_memory_read(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" milkymist_sysctl_memory_write(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" milkymist_sysctl_icap_write(uint32_t value) "value 0x%08x" @@ -41,7 +41,7 @@ milkymist_sysctl_stop_timer1(void) "Stop timer1" milkymist_sysctl_pulse_irq_timer0(void) "Pulse IRQ Timer0" milkymist_sysctl_pulse_irq_timer1(void) "Pulse IRQ Timer1" -# hw/timer/aspeed_timer.c +# aspeed_timer.c aspeed_timer_ctrl_enable(uint8_t i, bool enable) "Timer %" PRIu8 ": %d" aspeed_timer_ctrl_external_clock(uint8_t i, bool enable) "Timer %" PRIu8 ": %d" aspeed_timer_ctrl_overflow_interrupt(uint8_t i, bool enable) "Timer %" PRIu8 ": %d" @@ -50,34 +50,34 @@ aspeed_timer_set_ctrl2(uint32_t value) "Value: 0x%" PRIx32 aspeed_timer_set_value(int timer, int reg, uint32_t value) "Timer %d register %d: 0x%" PRIx32 aspeed_timer_read(uint64_t offset, unsigned size, uint64_t value) "From 0x%" PRIx64 ": of size %u: 0x%" PRIx64 -# hw/timer/armv7m_systick.c +# armv7m_systick.c systick_reload(void) "systick reload" systick_timer_tick(void) "systick reload" systick_read(uint64_t addr, uint32_t value, unsigned size) "systick read addr 0x%" PRIx64 " data 0x%" PRIx32 " size %u" systick_write(uint64_t addr, uint32_t value, unsigned size) "systick write addr 0x%" PRIx64 " data 0x%" PRIx32 " size %u" -# hw/timer/cmsdk_apb_timer.c +# cmsdk-apb-timer.c cmsdk_apb_timer_read(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB timer read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" cmsdk_apb_timer_write(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB timer write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" cmsdk_apb_timer_reset(void) "CMSDK APB timer: reset" -# hw/timer/cmsdk_apb_dualtimer.c +# cmsdk-apb-dualtimer.c cmsdk_apb_dualtimer_read(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB dualtimer read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" cmsdk_apb_dualtimer_write(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB dualtimer write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" cmsdk_apb_dualtimer_reset(void) "CMSDK APB dualtimer: reset" -# hw/timer/sun4v-rtc.c +# sun4v-rtc.c sun4v_rtc_read(uint64_t addr, uint64_t value) "read: addr 0x%" PRIx64 " value 0x%" PRIx64 sun4v_rtc_write(uint64_t addr, uint64_t value) "write: addr 0x%" PRIx64 " value 0x%" PRIx64 -# hw/timer/xlnx-zynqmp-rtc.c +# xlnx-zynqmp-rtc.c xlnx_zynqmp_rtc_gettime(int year, int month, int day, int hour, int min, int sec) "Get time from host: %d-%d-%d %2d:%02d:%02d" -# hw/timer/nrf51_timer.c +# nrf51_timer.c nrf51_timer_read(uint64_t addr, uint32_t value, unsigned size) "read addr 0x%" PRIx64 " data 0x%" PRIx32 " size %u" nrf51_timer_write(uint64_t addr, uint32_t value, unsigned size) "write addr 0x%" PRIx64 " data 0x%" PRIx32 " size %u" -# hw/timer/pl031.c +# pl031.c pl031_irq_state(int level) "irq state %d" pl031_read(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" pl031_write(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" diff --git a/hw/tpm/trace-events b/hw/tpm/trace-events index f45dcd2209..0b94aa1526 100644 --- a/hw/tpm/trace-events +++ b/hw/tpm/trace-events @@ -1,21 +1,21 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/tpm/tpm_crb.c +# tpm_crb.c tpm_crb_mmio_read(uint64_t addr, unsigned size, uint32_t val) "CRB read 0x" TARGET_FMT_plx " len:%u val: 0x%" PRIx32 tpm_crb_mmio_write(uint64_t addr, unsigned size, uint32_t val) "CRB write 0x" TARGET_FMT_plx " len:%u val: 0x%" PRIx32 -# hw/tpm/tpm_passthrough.c +# tpm_passthrough.c tpm_passthrough_handle_request(void *cmd) "processing command %p" tpm_passthrough_reset(void) "reset" -# hw/tpm/tpm_util.c +# tpm_util.c tpm_util_get_buffer_size_hdr_len(uint32_t len, size_t expected) "tpm_resp->hdr.len = %u, expected = %zu" tpm_util_get_buffer_size_len(uint32_t len, size_t expected) "tpm_resp->len = %u, expected = %zu" tpm_util_get_buffer_size_hdr_len2(uint32_t len, size_t expected) "tpm2_resp->hdr.len = %u, expected = %zu" tpm_util_get_buffer_size_len2(uint32_t len, size_t expected) "tpm2_resp->len = %u, expected = %zu" tpm_util_get_buffer_size(size_t len) "buffersize of device: %zu" -# hw/tpm/tpm_emulator.c +# tpm_emulator.c tpm_emulator_set_locality(uint8_t locty) "setting locality to %d" tpm_emulator_handle_request(void) "processing TPM command" tpm_emulator_probe_caps(uint64_t caps) "capabilities: 0x%"PRIx64 @@ -35,7 +35,7 @@ tpm_emulator_set_state_blobs_done(void) "Done setting state blobs" tpm_emulator_pre_save(void) "" tpm_emulator_inst_init(void) "" -# hw/tpm/tpm_tis.c +# tpm_tis.c tpm_tis_show_buffer(const char *direction, size_t len, const char *buf) "direction: %s len: %zu\nbuf: %s" tpm_tis_raise_irq(uint32_t irqmask) "Raising IRQ for flag 0x%08x" tpm_tis_new_active_locality(uint8_t locty) "Active locality is now %d" @@ -53,5 +53,5 @@ tpm_tis_mmio_write_lowering_irq(void) "Lowering IRQ" tpm_tis_mmio_write_data2send(uint32_t value, unsigned size) "Data to send to TPM: 0x%08x (size=%d)" tpm_tis_pre_save(uint8_t locty, uint32_t rw_offset) "locty: %d, rw_offset = %u" -# hw/tpm/tpm_ppi.c +# tpm_ppi.c tpm_ppi_memset(uint8_t *ptr, size_t size) "memset: %p %zu" diff --git a/hw/usb/bus.c b/hw/usb/bus.c index 6fffab7bfa..9a74dc9560 100644 --- a/hw/usb/bus.c +++ b/hw/usb/bus.c @@ -500,6 +500,10 @@ static void usb_mask_to_str(char *dest, size_t size, speeds[i].name); } } + + if (pos == 0) { + snprintf(dest, size, "unknown"); + } } void usb_check_attach(USBDevice *dev, Error **errp) diff --git a/hw/usb/dev-audio.c b/hw/usb/dev-audio.c index 28ac7c5165..c46d5eeb79 100644 --- a/hw/usb/dev-audio.c +++ b/hw/usb/dev-audio.c @@ -650,7 +650,7 @@ static void usb_audio_realize(USBDevice *dev, Error **errp) s->out.vol[1] = 240; /* 0 dB */ s->out.as.freq = USBAUDIO_SAMPLE_RATE; s->out.as.nchannels = 2; - s->out.as.fmt = AUD_FMT_S16; + s->out.as.fmt = AUDIO_FORMAT_S16; s->out.as.endianness = 0; streambuf_init(&s->out.buf, s->buffer); diff --git a/hw/usb/dev-mtp.c b/hw/usb/dev-mtp.c index 06e376bcd2..99548b012d 100644 --- a/hw/usb/dev-mtp.c +++ b/hw/usb/dev-mtp.c @@ -170,7 +170,7 @@ struct MTPObject { char *path; struct stat stat; /* file monitor watch id */ - int watchid; + int64_t watchid; MTPObject *parent; uint32_t nchildren; QLIST_HEAD(, MTPObject) children; @@ -498,7 +498,7 @@ static MTPObject *usb_mtp_object_lookup_name(MTPObject *parent, return NULL; } -static MTPObject *usb_mtp_object_lookup_id(MTPState *s, int id) +static MTPObject *usb_mtp_object_lookup_id(MTPState *s, int64_t id) { MTPObject *iter; @@ -511,7 +511,7 @@ static MTPObject *usb_mtp_object_lookup_id(MTPState *s, int id) return NULL; } -static void file_monitor_event(int id, +static void file_monitor_event(int64_t id, QFileMonitorEvent ev, const char *name, void *opaque) @@ -625,8 +625,8 @@ static void usb_mtp_object_readdir(MTPState *s, MTPObject *o) } if (s->file_monitor) { - int id = qemu_file_monitor_add_watch(s->file_monitor, o->path, NULL, - file_monitor_event, s, &err); + int64_t id = qemu_file_monitor_add_watch(s->file_monitor, o->path, NULL, + file_monitor_event, s, &err); if (id == -1) { error_report("usb-mtp: failed to add watch for %s: %s", o->path, error_get_pretty(err)); @@ -1135,28 +1135,25 @@ static MTPData *usb_mtp_get_object_prop_value(MTPState *s, MTPControl *c, return d; } -/* Return correct return code for a delete event */ +/* + * Return values when object @o is deleted. + * If at least one of the deletions succeeded, + * DELETE_SUCCESS is set and if at least one + * of the deletions failed, DELETE_FAILURE is + * set. Both bits being set (DELETE_PARTIAL) + * signifies a RES_PARTIAL_DELETE being sent + * back to the initiator. + */ enum { - ALL_DELETE, - PARTIAL_DELETE, - READ_ONLY, + DELETE_SUCCESS = (1 << 0), + DELETE_FAILURE = (1 << 1), + DELETE_PARTIAL = (DELETE_FAILURE | DELETE_SUCCESS), }; -/* Assumes that children, if any, have been already freed */ -static void usb_mtp_object_free_one(MTPState *s, MTPObject *o) -{ - assert(o->nchildren == 0); - QTAILQ_REMOVE(&s->objects, o, next); - g_free(o->name); - g_free(o->path); - g_free(o); -} - static int usb_mtp_deletefn(MTPState *s, MTPObject *o, uint32_t trans) { MTPObject *iter, *iter2; - bool partial_delete = false; - bool success = false; + int ret = 0; /* * TODO: Add support for Protection Status @@ -1165,34 +1162,28 @@ static int usb_mtp_deletefn(MTPState *s, MTPObject *o, uint32_t trans) QLIST_FOREACH(iter, &o->children, list) { if (iter->format == FMT_ASSOCIATION) { QLIST_FOREACH(iter2, &iter->children, list) { - usb_mtp_deletefn(s, iter2, trans); + ret |= usb_mtp_deletefn(s, iter2, trans); } } } if (o->format == FMT_UNDEFINED_OBJECT) { if (remove(o->path)) { - partial_delete = true; + ret |= DELETE_FAILURE; } else { - usb_mtp_object_free_one(s, o); - success = true; + usb_mtp_object_free(s, o); + ret |= DELETE_SUCCESS; } } else if (o->format == FMT_ASSOCIATION) { if (rmdir(o->path)) { - partial_delete = true; + ret |= DELETE_FAILURE; } else { - usb_mtp_object_free_one(s, o); - success = true; + usb_mtp_object_free(s, o); + ret |= DELETE_SUCCESS; } } - if (success && partial_delete) { - return PARTIAL_DELETE; - } - if (!success && partial_delete) { - return READ_ONLY; - } - return ALL_DELETE; + return ret; } static void usb_mtp_object_delete(MTPState *s, uint32_t handle, @@ -1226,19 +1217,24 @@ static void usb_mtp_object_delete(MTPState *s, uint32_t handle, } ret = usb_mtp_deletefn(s, o, trans); - if (ret == PARTIAL_DELETE) { - usb_mtp_queue_result(s, RES_PARTIAL_DELETE, - trans, 0, 0, 0, 0); - return; - } else if (ret == READ_ONLY) { - usb_mtp_queue_result(s, RES_STORE_READ_ONLY, trans, - 0, 0, 0, 0); - return; - } else { + switch (ret) { + case DELETE_SUCCESS: usb_mtp_queue_result(s, RES_OK, trans, 0, 0, 0, 0); - return; + break; + case DELETE_FAILURE: + usb_mtp_queue_result(s, RES_PARTIAL_DELETE, + trans, 0, 0, 0, 0); + break; + case DELETE_PARTIAL: + usb_mtp_queue_result(s, RES_PARTIAL_DELETE, + trans, 0, 0, 0, 0); + break; + default: + g_assert_not_reached(); } + + return; } static void usb_mtp_command(MTPState *s, MTPControl *c) @@ -1703,12 +1699,19 @@ static void usb_mtp_write_metadata(MTPState *s, uint64_t dlen) MTPObject *o; MTPObject *p = usb_mtp_object_lookup(s, s->dataset.parent_handle); uint32_t next_handle = s->next_handle; + size_t filename_chars = dlen - offsetof(ObjectInfo, filename); + + /* + * filename is utf-16. We're intentionally doing + * integer division to truncate if malicious guest + * sent an odd number of bytes. + */ + filename_chars /= 2; assert(!s->write_pending); assert(p != NULL); - filename = utf16_to_str(MIN(dataset->length, - dlen - offsetof(ObjectInfo, filename)), + filename = utf16_to_str(MIN(dataset->length, filename_chars), dataset->filename); if (strchr(filename, '/')) { diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c index 196a9f7200..81cf5ab7a5 100644 --- a/hw/usb/hcd-ohci.c +++ b/hw/usb/hcd-ohci.c @@ -1200,7 +1200,7 @@ static int ohci_service_ed_list(OHCIState *ohci, uint32_t head, int completion) if (head == 0) return 0; - for (cur = head; cur; cur = next_ed) { + for (cur = head; cur && link_cnt++ < ED_LINK_LIMIT; cur = next_ed) { if (ohci_read_ed(ohci, cur, &ed)) { trace_usb_ohci_ed_read_error(cur); ohci_die(ohci); @@ -1209,11 +1209,6 @@ static int ohci_service_ed_list(OHCIState *ohci, uint32_t head, int completion) next_ed = ed.next & OHCI_DPTR_MASK; - if (++link_cnt > ED_LINK_LIMIT) { - ohci_die(ohci); - return 0; - } - if ((ed.head & OHCI_ED_H) || (ed.flags & OHCI_ED_K)) { uint32_t addr; /* Cancel pending packets for ED that have been paused. */ diff --git a/hw/usb/trace-events b/hw/usb/trace-events index 99b1e8b8ce..2d3713351c 100644 --- a/hw/usb/trace-events +++ b/hw/usb/trace-events @@ -1,16 +1,16 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/usb/core.c +# core.c usb_packet_state_change(int bus, const char *port, int ep, void *p, const char *o, const char *n) "bus %d, port %s, ep %d, packet %p, state %s -> %s" usb_packet_state_fault(int bus, const char *port, int ep, void *p, const char *o, const char *n) "bus %d, port %s, ep %d, packet %p, state %s, expected %s" -# hw/usb/bus.c +# bus.c usb_port_claim(int bus, const char *port) "bus %d, port %s" usb_port_attach(int bus, const char *port, const char *devspeed, const char *portspeed) "bus %d, port %s, devspeed %s, portspeed %s" usb_port_detach(int bus, const char *port) "bus %d, port %s" usb_port_release(int bus, const char *port) "bus %d, port %s" -# hw/usb/hcd-ohci.c +# hcd-ohci.c usb_ohci_iso_td_read_failed(uint32_t addr) "ISO_TD read error at 0x%x" usb_ohci_iso_td_head(uint32_t head, uint32_t tail, uint32_t flags, uint32_t bp, uint32_t next, uint32_t be, uint32_t framenum, uint32_t startframe, uint32_t framecount, int rel_frame_num) "ISO_TD ED head 0x%.8x tailp 0x%.8x\n0x%.8x 0x%.8x 0x%.8x 0x%.8x\nframe_number 0x%.8x starting_frame 0x%.8x\nframe_count 0x%.8x relative %d" usb_ohci_iso_td_head_offset(uint32_t o0, uint32_t o1, uint32_t o2, uint32_t o3, uint32_t o4, uint32_t o5, uint32_t o6, uint32_t o7) "0x%.8x 0x%.8x 0x%.8x 0x%.8x 0x%.8x 0x%.8x 0x%.8x 0x%.8x" @@ -67,7 +67,7 @@ usb_ohci_init_time(int64_t frametime, int64_t bittime) "usb_bit_time=%" PRId64 " usb_ohci_die(void) "" usb_ohci_async_complete(void) "" -# hw/usb/hcd-ehci.c +# hcd-ehci.c usb_ehci_reset(void) "=== RESET ===" usb_ehci_unrealize(void) "=== UNREALIZE ===" usb_ehci_opreg_read(uint32_t addr, const char *str, uint32_t val) "rd mmio 0x%04x [%s] = 0x%x" @@ -100,7 +100,7 @@ usb_ehci_doorbell_ring(void) "" usb_ehci_doorbell_ack(void) "" usb_ehci_dma_error(void) "" -# hw/usb/hcd-uhci.c +# hcd-uhci.c usb_uhci_reset(void) "=== RESET ===" usb_uhci_exit(void) "=== EXIT ===" usb_uhci_schedule_start(void) "" @@ -130,7 +130,7 @@ usb_uhci_td_nextqh(uint32_t qh, uint32_t td) "qh 0x%x, td 0x%x" usb_uhci_td_async(uint32_t qh, uint32_t td) "qh 0x%x, td 0x%x" usb_uhci_td_complete(uint32_t qh, uint32_t td) "qh 0x%x, td 0x%x" -# hw/usb/hcd-xhci.c +# hcd-xhci.c usb_xhci_reset(void) "=== RESET ===" usb_xhci_exit(void) "=== EXIT ===" usb_xhci_run(void) "" @@ -176,7 +176,7 @@ usb_xhci_xfer_error(void *xfer, uint32_t ret) "%p: ret %d" usb_xhci_unimplemented(const char *item, int nr) "%s (0x%x)" usb_xhci_enforced_limit(const char *item) "%s" -# hw/usb/desc.c +# desc.c usb_desc_device(int addr, int len, int ret) "dev %d query device, len %d, ret %d" usb_desc_device_qualifier(int addr, int len, int ret) "dev %d query device qualifier, len %d, ret %d" usb_desc_config(int addr, int index, int len, int ret) "dev %d query config %d, len %d, ret %d" @@ -190,7 +190,7 @@ usb_set_interface(int addr, int iface, int alt, int ret) "dev %d, interface %d, usb_clear_device_feature(int addr, int feature, int ret) "dev %d, feature %d, ret %d" usb_set_device_feature(int addr, int feature, int ret) "dev %d, feature %d, ret %d" -# hw/usb/dev-hub.c +# dev-hub.c usb_hub_reset(int addr) "dev %d" usb_hub_control(int addr, int request, int value, int index, int length) "dev %d, req 0x%x, value %d, index %d, langth %d" usb_hub_get_port_status(int addr, int nr, int status, int changed) "dev %d, port %d, status 0x%x, changed 0x%x" @@ -200,7 +200,7 @@ usb_hub_attach(int addr, int nr) "dev %d, port %d" usb_hub_detach(int addr, int nr) "dev %d, port %d" usb_hub_status_report(int addr, int status) "dev %d, status 0x%x" -# hw/usb/dev-uas.c +# dev-uas.c usb_uas_reset(int addr) "dev %d" usb_uas_command(int addr, uint16_t tag, int lun, uint32_t lun64_1, uint32_t lun64_2) "dev %d, tag 0x%x, lun %d, lun64 0x%08x-0x%08x" usb_uas_response(int addr, uint16_t tag, uint8_t code) "dev %d, tag 0x%x, code 0x%x" @@ -214,7 +214,7 @@ usb_uas_tmf_abort_task(int addr, uint16_t tag, uint16_t task_tag) "dev %d, tag 0 usb_uas_tmf_logical_unit_reset(int addr, uint16_t tag, int lun) "dev %d, tag 0x%x, lun %d" usb_uas_tmf_unsupported(int addr, uint16_t tag, uint32_t function) "dev %d, tag 0x%x, function 0x%x" -# hw/usb/dev-mtp.c +# dev-mtp.c usb_mtp_reset(int addr) "dev %d" usb_mtp_command(int dev, uint16_t code, uint32_t trans, uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint32_t arg4) "dev %d, code 0x%x, trans 0x%x, args 0x%x, 0x%x, 0x%x, 0x%x, 0x%x" usb_mtp_success(int dev, uint32_t trans, uint32_t arg0, uint32_t arg1) "dev %d, trans 0x%x, args 0x%x, 0x%x" @@ -239,7 +239,7 @@ usb_mtp_object_free(int dev, uint32_t handle, const char *path) "dev %d, handle usb_mtp_add_child(int dev, uint32_t handle, const char *path) "dev %d, handle 0x%x, path %s" usb_mtp_file_monitor_event(int dev, const char *path, const char *s) "dev %d, path %s event %s" -# hw/usb/host-libusb.c +# host-libusb.c usb_host_open_started(int bus, int addr) "dev %d:%d" usb_host_open_success(int bus, int addr) "dev %d:%d" usb_host_open_failure(int bus, int addr) "dev %d:%d" diff --git a/hw/vfio/ccw.c b/hw/vfio/ccw.c index 9246729a75..31dd3a2a87 100644 --- a/hw/vfio/ccw.c +++ b/hw/vfio/ccw.c @@ -21,12 +21,12 @@ #include "hw/vfio/vfio.h" #include "hw/vfio/vfio-common.h" #include "hw/s390x/s390-ccw.h" +#include "hw/s390x/vfio-ccw.h" #include "hw/s390x/ccw-device.h" #include "exec/address-spaces.h" #include "qemu/error-report.h" -#define TYPE_VFIO_CCW "vfio-ccw" -typedef struct VFIOCCWDevice { +struct VFIOCCWDevice { S390CCWDevice cdev; VFIODevice vdev; uint64_t io_region_size; @@ -35,7 +35,7 @@ typedef struct VFIOCCWDevice { EventNotifier io_notifier; bool force_orb_pfch; bool warned_orb_pfch; -} VFIOCCWDevice; +}; static inline void warn_once_pfch(VFIOCCWDevice *vcdev, SubchDev *sch, const char *msg) @@ -130,8 +130,8 @@ static void vfio_ccw_io_notifier_handler(void *opaque) S390CCWDevice *cdev = S390_CCW_DEVICE(vcdev); CcwDevice *ccw_dev = CCW_DEVICE(cdev); SubchDev *sch = ccw_dev->sch; - SCSW *s = &sch->curr_status.scsw; - PMCW *p = &sch->curr_status.pmcw; + SCHIB *schib = &sch->curr_status; + SCSW s; IRB irb; int size; @@ -145,33 +145,33 @@ static void vfio_ccw_io_notifier_handler(void *opaque) switch (errno) { case ENODEV: /* Generate a deferred cc 3 condition. */ - s->flags |= SCSW_FLAGS_MASK_CC; - s->ctrl &= ~SCSW_CTRL_MASK_STCTL; - s->ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND); + schib->scsw.flags |= SCSW_FLAGS_MASK_CC; + schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; + schib->scsw.ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND); goto read_err; case EFAULT: /* Memory problem, generate channel data check. */ - s->ctrl &= ~SCSW_ACTL_START_PEND; - s->cstat = SCSW_CSTAT_DATA_CHECK; - s->ctrl &= ~SCSW_CTRL_MASK_STCTL; - s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | + schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND; + schib->scsw.cstat = SCSW_CSTAT_DATA_CHECK; + schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; + schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; goto read_err; default: /* Error, generate channel program check. */ - s->ctrl &= ~SCSW_ACTL_START_PEND; - s->cstat = SCSW_CSTAT_PROG_CHECK; - s->ctrl &= ~SCSW_CTRL_MASK_STCTL; - s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | + schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND; + schib->scsw.cstat = SCSW_CSTAT_PROG_CHECK; + schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; + schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; goto read_err; } } else if (size != vcdev->io_region_size) { /* Information transfer error, generate channel-control check. */ - s->ctrl &= ~SCSW_ACTL_START_PEND; - s->cstat = SCSW_CSTAT_CHN_CTRL_CHK; - s->ctrl &= ~SCSW_CTRL_MASK_STCTL; - s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | + schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND; + schib->scsw.cstat = SCSW_CSTAT_CHN_CTRL_CHK; + schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; + schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; goto read_err; } @@ -179,11 +179,13 @@ static void vfio_ccw_io_notifier_handler(void *opaque) memcpy(&irb, region->irb_area, sizeof(IRB)); /* Update control block via irb. */ - copy_scsw_to_guest(s, &irb.scsw); + s = schib->scsw; + copy_scsw_to_guest(&s, &irb.scsw); + schib->scsw = s; /* If a uint check is pending, copy sense data. */ - if ((s->dstat & SCSW_DSTAT_UNIT_CHECK) && - (p->chars & PMCW_CHARS_MASK_CSENSE)) { + if ((schib->scsw.dstat & SCSW_DSTAT_UNIT_CHECK) && + (schib->pmcw.chars & PMCW_CHARS_MASK_CSENSE)) { memcpy(sch->sense_data, irb.ecw, sizeof(irb.ecw)); } diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c index 40a12001f5..29b2697fe1 100644 --- a/hw/vfio/pci-quirks.c +++ b/hw/vfio/pci-quirks.c @@ -2180,3 +2180,134 @@ int vfio_add_virt_caps(VFIOPCIDevice *vdev, Error **errp) return 0; } + +static void vfio_pci_nvlink2_get_tgt(Object *obj, Visitor *v, + const char *name, + void *opaque, Error **errp) +{ + uint64_t tgt = (uintptr_t) opaque; + visit_type_uint64(v, name, &tgt, errp); +} + +static void vfio_pci_nvlink2_get_link_speed(Object *obj, Visitor *v, + const char *name, + void *opaque, Error **errp) +{ + uint32_t link_speed = (uint32_t)(uintptr_t) opaque; + visit_type_uint32(v, name, &link_speed, errp); +} + +int vfio_pci_nvidia_v100_ram_init(VFIOPCIDevice *vdev, Error **errp) +{ + int ret; + void *p; + struct vfio_region_info *nv2reg = NULL; + struct vfio_info_cap_header *hdr; + struct vfio_region_info_cap_nvlink2_ssatgt *cap; + VFIOQuirk *quirk; + + ret = vfio_get_dev_region_info(&vdev->vbasedev, + VFIO_REGION_TYPE_PCI_VENDOR_TYPE | + PCI_VENDOR_ID_NVIDIA, + VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM, + &nv2reg); + if (ret) { + return ret; + } + + hdr = vfio_get_region_info_cap(nv2reg, VFIO_REGION_INFO_CAP_NVLINK2_SSATGT); + if (!hdr) { + ret = -ENODEV; + goto free_exit; + } + cap = (void *) hdr; + + p = mmap(NULL, nv2reg->size, PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_SHARED, vdev->vbasedev.fd, nv2reg->offset); + if (p == MAP_FAILED) { + ret = -errno; + goto free_exit; + } + + quirk = vfio_quirk_alloc(1); + memory_region_init_ram_ptr(&quirk->mem[0], OBJECT(vdev), "nvlink2-mr", + nv2reg->size, p); + QLIST_INSERT_HEAD(&vdev->bars[0].quirks, quirk, next); + + object_property_add(OBJECT(vdev), "nvlink2-tgt", "uint64", + vfio_pci_nvlink2_get_tgt, NULL, NULL, + (void *) (uintptr_t) cap->tgt, NULL); + trace_vfio_pci_nvidia_gpu_setup_quirk(vdev->vbasedev.name, cap->tgt, + nv2reg->size); +free_exit: + g_free(nv2reg); + + return ret; +} + +int vfio_pci_nvlink2_init(VFIOPCIDevice *vdev, Error **errp) +{ + int ret; + void *p; + struct vfio_region_info *atsdreg = NULL; + struct vfio_info_cap_header *hdr; + struct vfio_region_info_cap_nvlink2_ssatgt *captgt; + struct vfio_region_info_cap_nvlink2_lnkspd *capspeed; + VFIOQuirk *quirk; + + ret = vfio_get_dev_region_info(&vdev->vbasedev, + VFIO_REGION_TYPE_PCI_VENDOR_TYPE | + PCI_VENDOR_ID_IBM, + VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD, + &atsdreg); + if (ret) { + return ret; + } + + hdr = vfio_get_region_info_cap(atsdreg, + VFIO_REGION_INFO_CAP_NVLINK2_SSATGT); + if (!hdr) { + ret = -ENODEV; + goto free_exit; + } + captgt = (void *) hdr; + + hdr = vfio_get_region_info_cap(atsdreg, + VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD); + if (!hdr) { + ret = -ENODEV; + goto free_exit; + } + capspeed = (void *) hdr; + + /* Some NVLink bridges may not have assigned ATSD */ + if (atsdreg->size) { + p = mmap(NULL, atsdreg->size, PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_SHARED, vdev->vbasedev.fd, atsdreg->offset); + if (p == MAP_FAILED) { + ret = -errno; + goto free_exit; + } + + quirk = vfio_quirk_alloc(1); + memory_region_init_ram_device_ptr(&quirk->mem[0], OBJECT(vdev), + "nvlink2-atsd-mr", atsdreg->size, p); + QLIST_INSERT_HEAD(&vdev->bars[0].quirks, quirk, next); + } + + object_property_add(OBJECT(vdev), "nvlink2-tgt", "uint64", + vfio_pci_nvlink2_get_tgt, NULL, NULL, + (void *) (uintptr_t) captgt->tgt, NULL); + trace_vfio_pci_nvlink2_setup_quirk_ssatgt(vdev->vbasedev.name, captgt->tgt, + atsdreg->size); + + object_property_add(OBJECT(vdev), "nvlink2-link-speed", "uint32", + vfio_pci_nvlink2_get_link_speed, NULL, NULL, + (void *) (uintptr_t) capspeed->link_speed, NULL); + trace_vfio_pci_nvlink2_setup_quirk_lnkspd(vdev->vbasedev.name, + capspeed->link_speed); +free_exit: + g_free(atsdreg); + + return ret; +} diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c index 504019c458..8cecb53d5c 100644 --- a/hw/vfio/pci.c +++ b/hw/vfio/pci.c @@ -947,8 +947,10 @@ static void vfio_pci_size_rom(VFIOPCIDevice *vdev) if (vdev->pdev.romfile || !vdev->pdev.rom_bar) { /* Since pci handles romfile, just print a message and return */ if (vfio_blacklist_opt_rom(vdev) && vdev->pdev.romfile) { - error_printf("Warning : Device at %s is known to cause system instability issues during option rom execution. Proceeding anyway since user specified romfile\n", - vdev->vbasedev.name); + warn_report("Device at %s is known to cause system instability" + " issues during option rom execution", + vdev->vbasedev.name); + error_printf("Proceeding anyway since user specified romfile\n"); } return; } @@ -973,11 +975,16 @@ static void vfio_pci_size_rom(VFIOPCIDevice *vdev) if (vfio_blacklist_opt_rom(vdev)) { if (dev->opts && qemu_opt_get(dev->opts, "rombar")) { - error_printf("Warning : Device at %s is known to cause system instability issues during option rom execution. Proceeding anyway since user specified non zero value for rombar\n", - vdev->vbasedev.name); + warn_report("Device at %s is known to cause system instability" + " issues during option rom execution", + vdev->vbasedev.name); + error_printf("Proceeding anyway since user specified" + " non zero value for rombar\n"); } else { - error_printf("Warning : Rom loading for device at %s has been disabled due to system instability issues. Specify rombar=1 or romfile to force\n", - vdev->vbasedev.name); + warn_report("Rom loading for device at %s has been disabled" + " due to system instability issues", + vdev->vbasedev.name); + error_printf("Specify rombar=1 or romfile to force\n"); return; } } @@ -3079,6 +3086,20 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) } } + if (vdev->vendor_id == PCI_VENDOR_ID_NVIDIA) { + ret = vfio_pci_nvidia_v100_ram_init(vdev, errp); + if (ret && ret != -ENODEV) { + error_report("Failed to setup NVIDIA V100 GPU RAM"); + } + } + + if (vdev->vendor_id == PCI_VENDOR_ID_IBM) { + ret = vfio_pci_nvlink2_init(vdev, errp); + if (ret && ret != -ENODEV) { + error_report("Failed to setup NVlink2 bridge"); + } + } + vfio_register_err_notifier(vdev); vfio_register_req_notifier(vdev); vfio_setup_resetfn_quirk(vdev); diff --git a/hw/vfio/pci.h b/hw/vfio/pci.h index c11c3f1670..cfcd1a81b8 100644 --- a/hw/vfio/pci.h +++ b/hw/vfio/pci.h @@ -196,6 +196,8 @@ int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp); int vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev, struct vfio_region_info *info, Error **errp); +int vfio_pci_nvidia_v100_ram_init(VFIOPCIDevice *vdev, Error **errp); +int vfio_pci_nvlink2_init(VFIOPCIDevice *vdev, Error **errp); void vfio_display_reset(VFIOPCIDevice *vdev); int vfio_display_probe(VFIOPCIDevice *vdev, Error **errp); diff --git a/hw/vfio/spapr.c b/hw/vfio/spapr.c index 57fe758e54..96c0ad9d9b 100644 --- a/hw/vfio/spapr.c +++ b/hw/vfio/spapr.c @@ -148,7 +148,7 @@ int vfio_spapr_create_window(VFIOContainer *container, uint64_t pagesize = memory_region_iommu_get_min_page_size(iommu_mr); unsigned entries, bits_total, bits_per_level, max_levels; struct vfio_iommu_spapr_tce_create create = { .argsz = sizeof(create) }; - long rampagesize = qemu_getrampagesize(); + long rampagesize = qemu_minrampagesize(); /* * The host might not support the guest supported IOMMU page size, diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events index 22019728e0..b1ef55a33f 100644 --- a/hw/vfio/trace-events +++ b/hw/vfio/trace-events @@ -1,6 +1,6 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/vfio/pci.c +# pci.c vfio_intx_interrupt(const char *name, char line) " (%s) Pin %c" vfio_intx_eoi(const char *name) " (%s) EOI" vfio_intx_enable_kvm(const char *name) " (%s) KVM INTx accel enabled" @@ -16,7 +16,6 @@ vfio_msix_pba_disable(const char *name) " (%s)" vfio_msix_pba_enable(const char *name) " (%s)" vfio_msix_disable(const char *name) " (%s)" vfio_msix_fixup(const char *name, int bar, uint64_t start, uint64_t end) " (%s) MSI-X region %d mmap fixup [0x%"PRIx64" - 0x%"PRIx64"]" -vfio_msix_relo_cost(const char *name, int bar, uint64_t cost) " (%s) BAR %d cost 0x%"PRIx64"" vfio_msix_relo(const char *name, int bar, uint64_t offset) " (%s) BAR %d offset 0x%"PRIx64"" vfio_msi_enable(const char *name, int nr_vectors) " (%s) Enabled %d MSI vectors" vfio_msi_disable(const char *name) " (%s)" @@ -49,7 +48,7 @@ vfio_pci_emulated_device_id(const char *name, uint16_t val) "%s 0x%04x" vfio_pci_emulated_sub_vendor_id(const char *name, uint16_t val) "%s 0x%04x" vfio_pci_emulated_sub_device_id(const char *name, uint16_t val) "%s 0x%04x" -# hw/vfio/pci-quirks.c +# pci-quirks.c vfio_quirk_rom_blacklisted(const char *name, uint16_t vid, uint16_t did) "%s %04x:%04x" vfio_quirk_generic_window_address_write(const char *name, const char * region_name, uint64_t data) "%s %s 0x%"PRIx64 vfio_quirk_generic_window_data_read(const char *name, const char * region_name, uint64_t data) "%s %s 0x%"PRIx64 @@ -87,11 +86,16 @@ vfio_pci_igd_opregion_enabled(const char *name) "%s" vfio_pci_igd_host_bridge_enabled(const char *name) "%s" vfio_pci_igd_lpc_bridge_enabled(const char *name) "%s" -# hw/vfio/common.c +vfio_pci_nvidia_gpu_setup_quirk(const char *name, uint64_t tgt, uint64_t size) "%s tgt=0x%"PRIx64" size=0x%"PRIx64 +vfio_pci_nvlink2_setup_quirk_ssatgt(const char *name, uint64_t tgt, uint64_t size) "%s tgt=0x%"PRIx64" size=0x%"PRIx64 +vfio_pci_nvlink2_setup_quirk_lnkspd(const char *name, uint32_t link_speed) "%s link_speed=0x%x" + +# common.c vfio_region_write(const char *name, int index, uint64_t addr, uint64_t data, unsigned size) " (%s:region%d+0x%"PRIx64", 0x%"PRIx64 ", %d)" vfio_region_read(char *name, int index, uint64_t addr, unsigned size, uint64_t data) " (%s:region%d+0x%"PRIx64", %d) = 0x%"PRIx64 vfio_iommu_map_notify(const char *op, uint64_t iova_start, uint64_t iova_end) "iommu %s @ 0x%"PRIx64" - 0x%"PRIx64 vfio_listener_region_add_skip(uint64_t start, uint64_t end) "SKIPPING region_add 0x%"PRIx64" - 0x%"PRIx64 +vfio_spapr_group_attach(int groupfd, int tablefd) "Attached groupfd %d to liobn fd %d" vfio_listener_region_add_iommu(uint64_t start, uint64_t end) "region_add [iommu] 0x%"PRIx64" - 0x%"PRIx64 vfio_listener_region_add_ram(uint64_t iova_start, uint64_t iova_end, void *vaddr) "region_add [ram] 0x%"PRIx64" - 0x%"PRIx64" [%p]" vfio_listener_region_add_no_dma_map(const char *name, uint64_t iova, uint64_t size, uint64_t page_size) "Region \"%s\" 0x%"PRIx64" size=0x%"PRIx64" is not aligned to 0x%"PRIx64" and cannot be mapped for DMA" @@ -112,7 +116,7 @@ vfio_region_sparse_mmap_entry(int i, unsigned long start, unsigned long end) "sp vfio_get_dev_region(const char *name, int index, uint32_t type, uint32_t subtype) "%s index %d, %08x/%0x8" vfio_dma_unmap_overflow_workaround(void) "" -# hw/vfio/platform.c +# platform.c vfio_platform_base_device_init(char *name, int groupid) "%s belongs to group #%d" vfio_platform_realize(char *name, char *compat) "vfio device %s, compat = %s" vfio_platform_eoi(int pin, int fd) "EOI IRQ pin %d (fd=%d)" @@ -124,16 +128,15 @@ vfio_intp_interrupt_set_pending(int index) "irq %d is set PENDING" vfio_platform_start_level_irqfd_injection(int index, int fd, int resamplefd) "IRQ index=%d, fd = %d, resamplefd = %d" vfio_platform_start_edge_irqfd_injection(int index, int fd) "IRQ index=%d, fd = %d" -# hw/vfio/spapr.c +# spapr.c vfio_prereg_listener_region_add_skip(uint64_t start, uint64_t end) "0x%"PRIx64" - 0x%"PRIx64 vfio_prereg_listener_region_del_skip(uint64_t start, uint64_t end) "0x%"PRIx64" - 0x%"PRIx64 vfio_prereg_register(uint64_t va, uint64_t size, int ret) "va=0x%"PRIx64" size=0x%"PRIx64" ret=%d" vfio_prereg_unregister(uint64_t va, uint64_t size, int ret) "va=0x%"PRIx64" size=0x%"PRIx64" ret=%d" vfio_spapr_create_window(int ps, unsigned int levels, uint64_t ws, uint64_t off) "pageshift=0x%x levels=%u winsize=0x%"PRIx64" offset=0x%"PRIx64 vfio_spapr_remove_window(uint64_t off) "offset=0x%"PRIx64 -vfio_spapr_group_attach(int groupfd, int tablefd) "Attached groupfd %d to liobn fd %d" -# hw/vfio/display.c +# display.c vfio_display_edid_available(void) "" vfio_display_edid_link_up(void) "" vfio_display_edid_link_down(void) "" diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events index 07bcbe9e85..60c649c4bc 100644 --- a/hw/virtio/trace-events +++ b/hw/virtio/trace-events @@ -1,6 +1,6 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/virtio/vhost.c +# vhost.c vhost_commit(bool started, bool changed) "Started: %d Changed: %d" vhost_region_add_section(const char *name, uint64_t gpa, uint64_t size, uint64_t host) "%s: 0x%"PRIx64"+0x%"PRIx64" @ 0x%"PRIx64 vhost_region_add_section_merge(const char *name, uint64_t new_size, uint64_t gpa, uint64_t owr) "%s: size: 0x%"PRIx64 " gpa: 0x%"PRIx64 " owr: 0x%"PRIx64 @@ -8,7 +8,7 @@ vhost_region_add_section_aligned(const char *name, uint64_t gpa, uint64_t size, vhost_section(const char *name, int r) "%s:%d" vhost_iotlb_miss(void *dev, int step) "%p step %d" -# hw/virtio/vhost-user.c +# vhost-user.c vhost_user_postcopy_end_entry(void) "" vhost_user_postcopy_end_exit(void) "" vhost_user_postcopy_fault_handler(const char *name, uint64_t fault_address, int nregions) "%s: @0x%"PRIx64" nregions:%d" @@ -21,7 +21,7 @@ vhost_user_postcopy_waker(const char *rb, uint64_t rb_offset) "%s + 0x%"PRIx64 vhost_user_postcopy_waker_found(uint64_t client_addr) "0x%"PRIx64 vhost_user_postcopy_waker_nomatch(const char *rb, uint64_t rb_offset) "%s + 0x%"PRIx64 -# hw/virtio/virtio.c +# virtio.c virtqueue_alloc_element(void *elem, size_t sz, unsigned in_num, unsigned out_num) "elem %p size %zd in_num %u out_num %u" virtqueue_fill(void *vq, const void *elem, unsigned int len, unsigned int idx) "vq %p elem %p len %u idx %u" virtqueue_flush(void *vq, unsigned int count) "vq %p count %u" @@ -31,7 +31,7 @@ virtio_notify_irqfd(void *vdev, void *vq) "vdev %p vq %p" virtio_notify(void *vdev, void *vq) "vdev %p vq %p" virtio_set_status(void *vdev, uint8_t val) "vdev %p val %u" -# hw/virtio/virtio-rng.c +# virtio-rng.c virtio_rng_guest_not_ready(void *rng) "rng %p: guest not ready" virtio_rng_cpu_is_stopped(void *rng, int size) "rng %p: cpu is stopped, dropping %d bytes" virtio_rng_popped(void *rng) "rng %p: elem popped" @@ -39,7 +39,7 @@ virtio_rng_pushed(void *rng, size_t len) "rng %p: %zd bytes pushed" virtio_rng_request(void *rng, size_t size, unsigned quota) "rng %p: %zd bytes requested, %u bytes quota left" virtio_rng_vm_state_change(void *rng, int running, int state) "rng %p: state change to running %d state %d" -# hw/virtio/virtio-balloon.c +# virtio-balloon.c # virtio_balloon_bad_addr(uint64_t gpa) "0x%"PRIx64 virtio_balloon_handle_output(const char *name, uint64_t gpa) "section name: %s gpa: 0x%"PRIx64 diff --git a/hw/virtio/vhost-stub.c b/hw/virtio/vhost-stub.c index 049089b5e2..c175148fce 100644 --- a/hw/virtio/vhost-stub.c +++ b/hw/virtio/vhost-stub.c @@ -7,9 +7,9 @@ bool vhost_has_free_slot(void) return true; } -VhostUserState *vhost_user_init(void) +bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp) { - return NULL; + return false; } void vhost_user_cleanup(VhostUserState *user) diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c index 0d6c64e5ca..553319c7ac 100644 --- a/hw/virtio/vhost-user.c +++ b/hw/virtio/vhost-user.c @@ -56,6 +56,7 @@ enum VhostUserProtocolFeature { VHOST_USER_PROTOCOL_F_CONFIG = 9, VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10, VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11, + VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12, VHOST_USER_PROTOCOL_F_MAX }; @@ -93,6 +94,8 @@ typedef enum VhostUserRequest { VHOST_USER_POSTCOPY_ADVISE = 28, VHOST_USER_POSTCOPY_LISTEN = 29, VHOST_USER_POSTCOPY_END = 30, + VHOST_USER_GET_INFLIGHT_FD = 31, + VHOST_USER_SET_INFLIGHT_FD = 32, VHOST_USER_MAX } VhostUserRequest; @@ -151,6 +154,13 @@ typedef struct VhostUserVringArea { uint64_t offset; } VhostUserVringArea; +typedef struct VhostUserInflight { + uint64_t mmap_size; + uint64_t mmap_offset; + uint16_t num_queues; + uint16_t queue_size; +} VhostUserInflight; + typedef struct { VhostUserRequest request; @@ -173,6 +183,7 @@ typedef union { VhostUserConfig config; VhostUserCryptoSession session; VhostUserVringArea area; + VhostUserInflight inflight; } VhostUserPayload; typedef struct VhostUserMsg { @@ -214,7 +225,7 @@ static bool ioeventfd_enabled(void) return !kvm_enabled() || kvm_eventfds_enabled(); } -static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg) +static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg) { struct vhost_user *u = dev->opaque; CharBackend *chr = u->user->chr; @@ -225,7 +236,7 @@ static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg) if (r != size) { error_report("Failed to read msg header. Read %d instead of %d." " Original request %d.", r, size, msg->hdr.request); - goto fail; + return -1; } /* validate received flags */ @@ -233,7 +244,21 @@ static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg) error_report("Failed to read msg header." " Flags 0x%x instead of 0x%x.", msg->hdr.flags, VHOST_USER_REPLY_MASK | VHOST_USER_VERSION); - goto fail; + return -1; + } + + return 0; +} + +static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg) +{ + struct vhost_user *u = dev->opaque; + CharBackend *chr = u->user->chr; + uint8_t *p = (uint8_t *) msg; + int r, size; + + if (vhost_user_read_header(dev, msg) < 0) { + return -1; } /* validate message size is sane */ @@ -241,7 +266,7 @@ static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg) error_report("Failed to read msg header." " Size %d exceeds the maximum %zu.", msg->hdr.size, VHOST_USER_PAYLOAD_SIZE); - goto fail; + return -1; } if (msg->hdr.size) { @@ -251,14 +276,11 @@ static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg) if (r != size) { error_report("Failed to read msg payload." " Read %d instead of %d.", r, msg->hdr.size); - goto fail; + return -1; } } return 0; - -fail: - return -1; } static int process_message_reply(struct vhost_dev *dev, @@ -968,7 +990,10 @@ static void slave_read(void *opaque) iov.iov_base = &hdr; iov.iov_len = VHOST_USER_HDR_SIZE; - size = recvmsg(u->slave_fd, &msgh, 0); + do { + size = recvmsg(u->slave_fd, &msgh, 0); + } while (size < 0 && (errno == EINTR || errno == EAGAIN)); + if (size != VHOST_USER_HDR_SIZE) { error_report("Failed to read from slave."); goto err; @@ -997,7 +1022,10 @@ static void slave_read(void *opaque) } /* Read payload */ - size = read(u->slave_fd, &payload, hdr.size); + do { + size = read(u->slave_fd, &payload, hdr.size); + } while (size < 0 && (errno == EINTR || errno == EAGAIN)); + if (size != hdr.size) { error_report("Failed to read payload from slave."); goto err; @@ -1045,7 +1073,10 @@ static void slave_read(void *opaque) iovec[1].iov_base = &payload; iovec[1].iov_len = hdr.size; - size = writev(u->slave_fd, iovec, ARRAY_SIZE(iovec)); + do { + size = writev(u->slave_fd, iovec, ARRAY_SIZE(iovec)); + } while (size < 0 && (errno == EINTR || errno == EAGAIN)); + if (size != VHOST_USER_HDR_SIZE + hdr.size) { error_report("Failed to send msg reply to slave."); goto err; @@ -1750,17 +1781,118 @@ static bool vhost_user_mem_section_filter(struct vhost_dev *dev, return result; } -VhostUserState *vhost_user_init(void) +static int vhost_user_get_inflight_fd(struct vhost_dev *dev, + uint16_t queue_size, + struct vhost_inflight *inflight) +{ + void *addr; + int fd; + struct vhost_user *u = dev->opaque; + CharBackend *chr = u->user->chr; + VhostUserMsg msg = { + .hdr.request = VHOST_USER_GET_INFLIGHT_FD, + .hdr.flags = VHOST_USER_VERSION, + .payload.inflight.num_queues = dev->nvqs, + .payload.inflight.queue_size = queue_size, + .hdr.size = sizeof(msg.payload.inflight), + }; + + if (!virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { + return 0; + } + + if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + return -1; + } + + if (vhost_user_read(dev, &msg) < 0) { + return -1; + } + + if (msg.hdr.request != VHOST_USER_GET_INFLIGHT_FD) { + error_report("Received unexpected msg type. " + "Expected %d received %d", + VHOST_USER_GET_INFLIGHT_FD, msg.hdr.request); + return -1; + } + + if (msg.hdr.size != sizeof(msg.payload.inflight)) { + error_report("Received bad msg size."); + return -1; + } + + if (!msg.payload.inflight.mmap_size) { + return 0; + } + + fd = qemu_chr_fe_get_msgfd(chr); + if (fd < 0) { + error_report("Failed to get mem fd"); + return -1; + } + + addr = mmap(0, msg.payload.inflight.mmap_size, PROT_READ | PROT_WRITE, + MAP_SHARED, fd, msg.payload.inflight.mmap_offset); + + if (addr == MAP_FAILED) { + error_report("Failed to mmap mem fd"); + close(fd); + return -1; + } + + inflight->addr = addr; + inflight->fd = fd; + inflight->size = msg.payload.inflight.mmap_size; + inflight->offset = msg.payload.inflight.mmap_offset; + inflight->queue_size = queue_size; + + return 0; +} + +static int vhost_user_set_inflight_fd(struct vhost_dev *dev, + struct vhost_inflight *inflight) { - VhostUserState *user = g_new0(struct VhostUserState, 1); + VhostUserMsg msg = { + .hdr.request = VHOST_USER_SET_INFLIGHT_FD, + .hdr.flags = VHOST_USER_VERSION, + .payload.inflight.mmap_size = inflight->size, + .payload.inflight.mmap_offset = inflight->offset, + .payload.inflight.num_queues = dev->nvqs, + .payload.inflight.queue_size = inflight->queue_size, + .hdr.size = sizeof(msg.payload.inflight), + }; - return user; + if (!virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { + return 0; + } + + if (vhost_user_write(dev, &msg, &inflight->fd, 1) < 0) { + return -1; + } + + return 0; +} + +bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp) +{ + if (user->chr) { + error_setg(errp, "Cannot initialize vhost-user state"); + return false; + } + user->chr = chr; + return true; } void vhost_user_cleanup(VhostUserState *user) { int i; + if (!user->chr) { + return; + } + for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { if (user->notifier[i].addr) { object_unparent(OBJECT(&user->notifier[i].mr)); @@ -1768,6 +1900,7 @@ void vhost_user_cleanup(VhostUserState *user) user->notifier[i].addr = NULL; } } + user->chr = NULL; } const VhostOps user_ops = { @@ -1801,4 +1934,6 @@ const VhostOps user_ops = { .vhost_crypto_create_session = vhost_user_crypto_create_session, .vhost_crypto_close_session = vhost_user_crypto_close_session, .vhost_backend_mem_section_filter = vhost_user_mem_section_filter, + .vhost_get_inflight_fd = vhost_user_get_inflight_fd, + .vhost_set_inflight_fd = vhost_user_set_inflight_fd, }; diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index 311432f190..7f61018f2a 100644 --- a/hw/virtio/vhost.c +++ b/hw/virtio/vhost.c @@ -1481,6 +1481,102 @@ void vhost_dev_set_config_notifier(struct vhost_dev *hdev, hdev->config_ops = ops; } +void vhost_dev_free_inflight(struct vhost_inflight *inflight) +{ + if (inflight->addr) { + qemu_memfd_free(inflight->addr, inflight->size, inflight->fd); + inflight->addr = NULL; + inflight->fd = -1; + } +} + +static int vhost_dev_resize_inflight(struct vhost_inflight *inflight, + uint64_t new_size) +{ + Error *err = NULL; + int fd = -1; + void *addr = qemu_memfd_alloc("vhost-inflight", new_size, + F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL, + &fd, &err); + + if (err) { + error_report_err(err); + return -1; + } + + vhost_dev_free_inflight(inflight); + inflight->offset = 0; + inflight->addr = addr; + inflight->fd = fd; + inflight->size = new_size; + + return 0; +} + +void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f) +{ + if (inflight->addr) { + qemu_put_be64(f, inflight->size); + qemu_put_be16(f, inflight->queue_size); + qemu_put_buffer(f, inflight->addr, inflight->size); + } else { + qemu_put_be64(f, 0); + } +} + +int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f) +{ + uint64_t size; + + size = qemu_get_be64(f); + if (!size) { + return 0; + } + + if (inflight->size != size) { + if (vhost_dev_resize_inflight(inflight, size)) { + return -1; + } + } + inflight->queue_size = qemu_get_be16(f); + + qemu_get_buffer(f, inflight->addr, size); + + return 0; +} + +int vhost_dev_set_inflight(struct vhost_dev *dev, + struct vhost_inflight *inflight) +{ + int r; + + if (dev->vhost_ops->vhost_set_inflight_fd && inflight->addr) { + r = dev->vhost_ops->vhost_set_inflight_fd(dev, inflight); + if (r) { + VHOST_OPS_DEBUG("vhost_set_inflight_fd failed"); + return -errno; + } + } + + return 0; +} + +int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size, + struct vhost_inflight *inflight) +{ + int r; + + if (dev->vhost_ops->vhost_get_inflight_fd) { + r = dev->vhost_ops->vhost_get_inflight_fd(dev, queue_size, inflight); + if (r) { + VHOST_OPS_DEBUG("vhost_get_inflight_fd failed"); + return -errno; + } + } + + return 0; +} + /* Host notifiers must be enabled at this point. */ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) { diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c index e3a65940ef..2112874055 100644 --- a/hw/virtio/virtio-balloon.c +++ b/hw/virtio/virtio-balloon.c @@ -82,7 +82,7 @@ static void balloon_inflate_page(VirtIOBalloon *balloon, /* We've partially ballooned part of a host page, but now * we're trying to balloon part of a different one. Too hard, * give up on the old partial page */ - free(balloon->pbp); + g_free(balloon->pbp); balloon->pbp = NULL; } @@ -107,11 +107,61 @@ static void balloon_inflate_page(VirtIOBalloon *balloon, * has already reported them, and failing to discard a balloon * page is not fatal */ - free(balloon->pbp); + g_free(balloon->pbp); balloon->pbp = NULL; } } +static void balloon_deflate_page(VirtIOBalloon *balloon, + MemoryRegion *mr, hwaddr offset) +{ + void *addr = memory_region_get_ram_ptr(mr) + offset; + RAMBlock *rb; + size_t rb_page_size; + ram_addr_t ram_offset, host_page_base; + void *host_addr; + int ret; + + /* XXX is there a better way to get to the RAMBlock than via a + * host address? */ + rb = qemu_ram_block_from_host(addr, false, &ram_offset); + rb_page_size = qemu_ram_pagesize(rb); + host_page_base = ram_offset & ~(rb_page_size - 1); + + if (balloon->pbp + && rb == balloon->pbp->rb + && host_page_base == balloon->pbp->base) { + int subpages = rb_page_size / BALLOON_PAGE_SIZE; + + /* + * This means the guest has asked to discard some of the 4kiB + * subpages of a host page, but then changed its mind and + * asked to keep them after all. It's exceedingly unlikely + * for a guest to do this in practice, but handle it anyway, + * since getting it wrong could mean discarding memory the + * guest is still using. */ + bitmap_clear(balloon->pbp->bitmap, + (ram_offset - balloon->pbp->base) / BALLOON_PAGE_SIZE, + subpages); + + if (bitmap_empty(balloon->pbp->bitmap, subpages)) { + g_free(balloon->pbp); + balloon->pbp = NULL; + } + } + + host_addr = (void *)((uintptr_t)addr & ~(rb_page_size - 1)); + + /* When a page is deflated, we hint the whole host page it lives + * on, since we can't do anything smaller */ + ret = qemu_madvise(host_addr, rb_page_size, QEMU_MADV_WILLNEED); + if (ret != 0) { + warn_report("Couldn't MADV_WILLNEED on balloon deflate: %s", + strerror(errno)); + /* Otherwise ignore, failing to page hint shouldn't be fatal */ + } +} + static const char *balloon_stat_names[] = { [VIRTIO_BALLOON_S_SWAP_IN] = "stat-swap-in", [VIRTIO_BALLOON_S_SWAP_OUT] = "stat-swap-out", @@ -315,8 +365,15 @@ static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq) trace_virtio_balloon_handle_output(memory_region_name(section.mr), pa); - if (!qemu_balloon_is_inhibited() && vq != s->dvq) { - balloon_inflate_page(s, section.mr, section.offset_within_region); + if (!qemu_balloon_is_inhibited()) { + if (vq == s->ivq) { + balloon_inflate_page(s, section.mr, + section.offset_within_region); + } else if (vq == s->dvq) { + balloon_deflate_page(s, section.mr, section.offset_within_region); + } else { + g_assert_not_reached(); + } } memory_region_unref(section.mr); } @@ -391,6 +448,7 @@ static bool get_free_page_hints(VirtIOBalloon *dev) VirtQueueElement *elem; VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtQueue *vq = dev->free_page_vq; + bool ret = true; while (dev->block_iothread) { qemu_cond_wait(&dev->free_page_cond, &dev->free_page_lock); @@ -405,13 +463,12 @@ static bool get_free_page_hints(VirtIOBalloon *dev) uint32_t id; size_t size = iov_to_buf(elem->out_sg, elem->out_num, 0, &id, sizeof(id)); - virtqueue_push(vq, elem, size); - g_free(elem); virtio_tswap32s(vdev, &id); if (unlikely(size != sizeof(id))) { virtio_error(vdev, "received an incorrect cmd id"); - return false; + ret = false; + goto out; } if (id == dev->free_page_report_cmd_id) { dev->free_page_report_status = FREE_PAGE_REPORT_S_START; @@ -431,11 +488,12 @@ static bool get_free_page_hints(VirtIOBalloon *dev) qemu_guest_free_page_hint(elem->in_sg[0].iov_base, elem->in_sg[0].iov_len); } - virtqueue_push(vq, elem, 1); - g_free(elem); } - return true; +out: + virtqueue_push(vq, elem, 1); + g_free(elem); + return ret; } static void virtio_ballloon_get_free_page_hints(void *opaque) diff --git a/hw/watchdog/trace-events b/hw/watchdog/trace-events index fee95847df..ab94d7df50 100644 --- a/hw/watchdog/trace-events +++ b/hw/watchdog/trace-events @@ -1,6 +1,6 @@ # See docs/devel/tracing.txt for syntax documentation. -# hw/char/cmsdk_apb_watchdog.c +# cmsdk-apb-watchdog.c cmsdk_apb_watchdog_read(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB watchdog read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" cmsdk_apb_watchdog_write(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB watchdog write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" cmsdk_apb_watchdog_reset(void) "CMSDK APB watchdog: reset" diff --git a/hw/xen/trace-events b/hw/xen/trace-events index f6944624b2..bc82ecb1a5 100644 --- a/hw/xen/trace-events +++ b/hw/xen/trace-events @@ -1,6 +1,6 @@ # See docs/devel/tracing.txt for syntax documentation. -# include/hw/xen/xen_common.h +# ../../include/hw/xen/xen_common.h xen_default_ioreq_server(void) "" xen_ioreq_server_create(uint32_t id) "id: %u" xen_ioreq_server_destroy(uint32_t id) "id: %u" @@ -13,7 +13,7 @@ xen_map_pcidev(uint32_t id, uint8_t bus, uint8_t dev, uint8_t func) "id: %u bdf: xen_unmap_pcidev(uint32_t id, uint8_t bus, uint8_t dev, uint8_t func) "id: %u bdf: %02x.%02x.%02x" xen_domid_restrict(int err) "err: %u" -# include/hw/xen/xen-bus.c +# xen-bus.c xen_bus_realize(void) "" xen_bus_unrealize(void) "" xen_bus_enumerate(void) "" @@ -31,7 +31,7 @@ xen_device_frontend_state(const char *type, char *name, const char *state) "type xen_device_frontend_changed(const char *type, char *name) "type: %s name: %s" xen_device_unplug(const char *type, char *name) "type: %s name: %s" -# include/hw/xen/xen-bus-helper.c +# xen-bus-helper.c xs_node_create(const char *node) "%s" xs_node_destroy(const char *node) "%s" xs_node_vprintf(char *path, char *value) "%s %s" |