diff options
Diffstat (limited to 'hw')
298 files changed, 10671 insertions, 3997 deletions
diff --git a/hw/9pfs/9p-util.h b/hw/9pfs/9p-util.h index c314cf381d..df1b583a5e 100644 --- a/hw/9pfs/9p-util.h +++ b/hw/9pfs/9p-util.h @@ -13,6 +13,8 @@ #ifndef QEMU_9P_UTIL_H #define QEMU_9P_UTIL_H +#include "qemu/error-report.h" + #ifdef O_PATH #define O_PATH_9P_UTIL O_PATH #else @@ -95,6 +97,7 @@ static inline int errno_to_dotl(int err) { #endif #define qemu_openat openat +#define qemu_fstat fstat #define qemu_fstatat fstatat #define qemu_mkdirat mkdirat #define qemu_renameat renameat @@ -108,6 +111,38 @@ static inline void close_preserve_errno(int fd) errno = serrno; } +/** + * close_if_special_file() - Close @fd if neither regular file nor directory. + * + * @fd: file descriptor of open file + * Return: 0 on regular file or directory, -1 otherwise + * + * CVE-2023-2861: Prohibit opening any special file directly on host + * (especially device files), as a compromised client could potentially gain + * access outside exported tree under certain, unsafe setups. We expect + * client to handle I/O on special files exclusively on guest side. + */ +static inline int close_if_special_file(int fd) +{ + struct stat stbuf; + + if (qemu_fstat(fd, &stbuf) < 0) { + close_preserve_errno(fd); + return -1; + } + if (!S_ISREG(stbuf.st_mode) && !S_ISDIR(stbuf.st_mode)) { + error_report_once( + "9p: broken or compromised client detected; attempt to open " + "special file (i.e. neither regular file, nor directory)" + ); + close(fd); + errno = ENXIO; + return -1; + } + + return 0; +} + static inline int openat_dir(int dirfd, const char *name) { return qemu_openat(dirfd, name, @@ -142,6 +177,10 @@ again: return -1; } + if (close_if_special_file(fd) < 0) { + return -1; + } + serrno = errno; /* O_NONBLOCK was only needed to open the file. Let's drop it. We don't * do that with O_PATH since fcntl(F_SETFL) isn't supported, and openat() diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c index 9621ec1341..991645adca 100644 --- a/hw/9pfs/9p.c +++ b/hw/9pfs/9p.c @@ -738,15 +738,14 @@ static VariLenAffix affixForIndex(uint64_t index) return invertAffix(&prefix); /* convert prefix to suffix */ } -/* creative abuse of tb_hash_func7, which is based on xxhash */ static uint32_t qpp_hash(QppEntry e) { - return qemu_xxhash7(e.ino_prefix, e.dev, 0, 0, 0); + return qemu_xxhash4(e.ino_prefix, e.dev); } static uint32_t qpf_hash(QpfEntry e) { - return qemu_xxhash7(e.ino, e.dev, 0, 0, 0); + return qemu_xxhash4(e.ino, e.dev); } static bool qpd_cmp_func(const void *obj, const void *userp) diff --git a/hw/9pfs/9p.h b/hw/9pfs/9p.h index 2fce4140d1..1b0d805b9c 100644 --- a/hw/9pfs/9p.h +++ b/hw/9pfs/9p.h @@ -203,7 +203,7 @@ typedef struct V9fsDir { QemuMutex readdir_mutex_L; } V9fsDir; -static inline void v9fs_readdir_lock(V9fsDir *dir) +static inline void coroutine_fn v9fs_readdir_lock(V9fsDir *dir) { if (dir->proto_version == V9FS_PROTO_2000U) { qemu_co_mutex_lock(&dir->readdir_mutex_u); @@ -212,7 +212,7 @@ static inline void v9fs_readdir_lock(V9fsDir *dir) } } -static inline void v9fs_readdir_unlock(V9fsDir *dir) +static inline void coroutine_fn v9fs_readdir_unlock(V9fsDir *dir) { if (dir->proto_version == V9FS_PROTO_2000U) { qemu_co_mutex_unlock(&dir->readdir_mutex_u); diff --git a/hw/9pfs/codir.c b/hw/9pfs/codir.c index 7ba63be489..2068a4779d 100644 --- a/hw/9pfs/codir.c +++ b/hw/9pfs/codir.c @@ -68,9 +68,9 @@ int coroutine_fn v9fs_co_readdir(V9fsPDU *pdu, V9fsFidState *fidp, * * See v9fs_co_readdir_many() (as its only user) below for details. */ -static int do_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp, - struct V9fsDirEnt **entries, off_t offset, - int32_t maxsize, bool dostat) +static int coroutine_fn +do_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp, struct V9fsDirEnt **entries, + off_t offset, int32_t maxsize, bool dostat) { V9fsState *s = pdu->s; V9fsString name; diff --git a/hw/9pfs/coth.c b/hw/9pfs/coth.c index 2802d41cce..598f46add9 100644 --- a/hw/9pfs/coth.c +++ b/hw/9pfs/coth.c @@ -41,6 +41,5 @@ static int coroutine_enter_func(void *arg) void co_run_in_worker_bh(void *opaque) { Coroutine *co = opaque; - thread_pool_submit_aio(aio_get_thread_pool(qemu_get_aio_context()), - coroutine_enter_func, co, coroutine_enter_cb, co); + thread_pool_submit_aio(coroutine_enter_func, co, coroutine_enter_cb, co); } diff --git a/hw/9pfs/meson.build b/hw/9pfs/meson.build index fd37b7a02d..2944ea63c3 100644 --- a/hw/9pfs/meson.build +++ b/hw/9pfs/meson.build @@ -16,6 +16,6 @@ fs_ss.add(files( fs_ss.add(when: 'CONFIG_LINUX', if_true: files('9p-util-linux.c')) fs_ss.add(when: 'CONFIG_DARWIN', if_true: files('9p-util-darwin.c')) fs_ss.add(when: 'CONFIG_XEN_BUS', if_true: files('xen-9p-backend.c')) -softmmu_ss.add_all(when: 'CONFIG_FSDEV_9P', if_true: fs_ss) +system_ss.add_all(when: 'CONFIG_FSDEV_9P', if_true: fs_ss) specific_ss.add(when: 'CONFIG_VIRTIO_9P', if_true: files('virtio-9p-device.c')) diff --git a/hw/9pfs/trace-events b/hw/9pfs/trace-events index 6c77966c0b..a12e55c165 100644 --- a/hw/9pfs/trace-events +++ b/hw/9pfs/trace-events @@ -48,3 +48,9 @@ v9fs_readlink(uint16_t tag, uint8_t id, int32_t fid) "tag %d id %d fid %d" v9fs_readlink_return(uint16_t tag, uint8_t id, char* target) "tag %d id %d name %s" v9fs_setattr(uint16_t tag, uint8_t id, int32_t fid, int32_t valid, int32_t mode, int32_t uid, int32_t gid, int64_t size, int64_t atime_sec, int64_t mtime_sec) "tag %u id %u fid %d iattr={valid %d mode %d uid %d gid %d size %"PRId64" atime=%"PRId64" mtime=%"PRId64" }" v9fs_setattr_return(uint16_t tag, uint8_t id) "tag %u id %u" + +# xen-9p-backend.c +xen_9pfs_alloc(char *name) "name %s" +xen_9pfs_connect(char *name) "name %s" +xen_9pfs_disconnect(char *name) "name %s" +xen_9pfs_free(char *name) "name %s" diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c index 74f3a05f88..4aa9c8c736 100644 --- a/hw/9pfs/xen-9p-backend.c +++ b/hw/9pfs/xen-9p-backend.c @@ -25,6 +25,8 @@ #include "qemu/iov.h" #include "fsdev/qemu-fsdev.h" +#include "trace.h" + #define VERSIONS "1" #define MAX_RINGS 8 #define MAX_RING_ORDER 9 @@ -61,6 +63,7 @@ typedef struct Xen9pfsDev { int num_rings; Xen9pfsRing *rings; + MemReentrancyGuard mem_reentrancy_guard; } Xen9pfsDev; static void xen_9pfs_disconnect(struct XenLegacyDevice *xendev); @@ -336,6 +339,8 @@ static void xen_9pfs_disconnect(struct XenLegacyDevice *xendev) Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev); int i; + trace_xen_9pfs_disconnect(xendev->name); + for (i = 0; i < xen_9pdev->num_rings; i++) { if (xen_9pdev->rings[i].evtchndev != NULL) { qemu_set_fd_handler(qemu_xen_evtchn_fd(xen_9pdev->rings[i].evtchndev), @@ -344,40 +349,41 @@ static void xen_9pfs_disconnect(struct XenLegacyDevice *xendev) xen_9pdev->rings[i].local_port); xen_9pdev->rings[i].evtchndev = NULL; } - } -} - -static int xen_9pfs_free(struct XenLegacyDevice *xendev) -{ - Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev); - int i; - - if (xen_9pdev->rings[0].evtchndev != NULL) { - xen_9pfs_disconnect(xendev); - } - - for (i = 0; i < xen_9pdev->num_rings; i++) { if (xen_9pdev->rings[i].data != NULL) { xen_be_unmap_grant_refs(&xen_9pdev->xendev, xen_9pdev->rings[i].data, xen_9pdev->rings[i].intf->ref, (1 << xen_9pdev->rings[i].ring_order)); + xen_9pdev->rings[i].data = NULL; } if (xen_9pdev->rings[i].intf != NULL) { xen_be_unmap_grant_ref(&xen_9pdev->xendev, xen_9pdev->rings[i].intf, xen_9pdev->rings[i].ref); + xen_9pdev->rings[i].intf = NULL; } if (xen_9pdev->rings[i].bh != NULL) { qemu_bh_delete(xen_9pdev->rings[i].bh); + xen_9pdev->rings[i].bh = NULL; } } g_free(xen_9pdev->id); + xen_9pdev->id = NULL; g_free(xen_9pdev->tag); + xen_9pdev->tag = NULL; g_free(xen_9pdev->path); + xen_9pdev->path = NULL; g_free(xen_9pdev->security_model); + xen_9pdev->security_model = NULL; g_free(xen_9pdev->rings); + xen_9pdev->rings = NULL; +} + +static int xen_9pfs_free(struct XenLegacyDevice *xendev) +{ + trace_xen_9pfs_free(xendev->name); + return 0; } @@ -389,6 +395,8 @@ static int xen_9pfs_connect(struct XenLegacyDevice *xendev) V9fsState *s = &xen_9pdev->state; QemuOpts *fsdev; + trace_xen_9pfs_connect(xendev->name); + if (xenstore_read_fe_int(&xen_9pdev->xendev, "num-rings", &xen_9pdev->num_rings) == -1 || xen_9pdev->num_rings > MAX_RINGS || xen_9pdev->num_rings < 1) { @@ -443,7 +451,9 @@ static int xen_9pfs_connect(struct XenLegacyDevice *xendev) xen_9pdev->rings[i].ring.out = xen_9pdev->rings[i].data + XEN_FLEX_RING_SIZE(ring_order); - xen_9pdev->rings[i].bh = qemu_bh_new(xen_9pfs_bh, &xen_9pdev->rings[i]); + xen_9pdev->rings[i].bh = qemu_bh_new_guarded(xen_9pfs_bh, + &xen_9pdev->rings[i], + &xen_9pdev->mem_reentrancy_guard); xen_9pdev->rings[i].out_cons = 0; xen_9pdev->rings[i].out_size = 0; xen_9pdev->rings[i].inprogress = false; @@ -496,6 +506,8 @@ out: static void xen_9pfs_alloc(struct XenLegacyDevice *xendev) { + trace_xen_9pfs_alloc(xendev->name); + xenstore_write_be_str(xendev, "versions", VERSIONS); xenstore_write_be_int(xendev, "max-rings", MAX_RINGS); xenstore_write_be_int(xendev, "max-ring-page-order", MAX_RING_ORDER); diff --git a/hw/acpi/cxl.c b/hw/acpi/cxl.c index 2bf8c07993..92b46bc932 100644 --- a/hw/acpi/cxl.c +++ b/hw/acpi/cxl.c @@ -30,9 +30,10 @@ #include "qapi/error.h" #include "qemu/uuid.h" -static void cedt_build_chbs(GArray *table_data, PXBDev *cxl) +static void cedt_build_chbs(GArray *table_data, PXBCXLDev *cxl) { - SysBusDevice *sbd = SYS_BUS_DEVICE(cxl->cxl.cxl_host_bridge); + PXBDev *pxb = PXB_DEV(cxl); + SysBusDevice *sbd = SYS_BUS_DEVICE(cxl->cxl_host_bridge); struct MemoryRegion *mr = sbd->mmio[0].memory; /* Type */ @@ -45,7 +46,7 @@ static void cedt_build_chbs(GArray *table_data, PXBDev *cxl) build_append_int_noprefix(table_data, 32, 2); /* UID - currently equal to bus number */ - build_append_int_noprefix(table_data, cxl->bus_nr, 4); + build_append_int_noprefix(table_data, pxb->bus_nr, 4); /* Version */ build_append_int_noprefix(table_data, 1, 4); @@ -112,7 +113,7 @@ static void cedt_build_cfmws(GArray *table_data, CXLState *cxls) /* Host Bridge List (list of UIDs - currently bus_nr) */ for (i = 0; i < fw->num_targets; i++) { g_assert(fw->target_hbs[i]); - build_append_int_noprefix(table_data, fw->target_hbs[i]->bus_nr, 4); + build_append_int_noprefix(table_data, PXB_DEV(fw->target_hbs[i])->bus_nr, 4); } } } @@ -121,7 +122,7 @@ static int cxl_foreach_pxb_hb(Object *obj, void *opaque) { Aml *cedt = opaque; - if (object_dynamic_cast(obj, TYPE_PXB_CXL_DEVICE)) { + if (object_dynamic_cast(obj, TYPE_PXB_CXL_DEV)) { cedt_build_chbs(cedt->buf, PXB_CXL_DEV(obj)); } diff --git a/hw/acpi/meson.build b/hw/acpi/meson.build index e0bf39bf4c..fc1b952379 100644 --- a/hw/acpi/meson.build +++ b/hw/acpi/meson.build @@ -30,12 +30,12 @@ acpi_ss.add(when: 'CONFIG_PC', if_false: files('acpi-x86-stub.c')) if have_tpm acpi_ss.add(files('tpm.c')) endif -softmmu_ss.add(when: 'CONFIG_ACPI', if_false: files('acpi-stub.c', 'aml-build-stub.c', 'ghes-stub.c', 'acpi_interface.c')) -softmmu_ss.add(when: 'CONFIG_ACPI_PCI_BRIDGE', if_false: files('pci-bridge-stub.c')) -softmmu_ss.add_all(when: 'CONFIG_ACPI', if_true: acpi_ss) -softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('acpi-stub.c', 'aml-build-stub.c', +system_ss.add(when: 'CONFIG_ACPI', if_false: files('acpi-stub.c', 'aml-build-stub.c', 'ghes-stub.c', 'acpi_interface.c')) +system_ss.add(when: 'CONFIG_ACPI_PCI_BRIDGE', if_false: files('pci-bridge-stub.c')) +system_ss.add_all(when: 'CONFIG_ACPI', if_true: acpi_ss) +system_ss.add(when: 'CONFIG_ALL', if_true: files('acpi-stub.c', 'aml-build-stub.c', 'acpi-x86-stub.c', 'ipmi-stub.c', 'ghes-stub.c', 'acpi-mem-hotplug-stub.c', 'acpi-cpu-hotplug-stub.c', 'acpi-pci-hotplug-stub.c', 'acpi-nvdimm-stub.c', 'cxl-stub.c', 'pci-bridge-stub.c')) -softmmu_ss.add(files('acpi-qmp-cmds.c')) +system_ss.add(files('acpi-qmp-cmds.c')) diff --git a/hw/acpi/pcihp.c b/hw/acpi/pcihp.c index dcfb779a7a..cdd6f775a1 100644 --- a/hw/acpi/pcihp.c +++ b/hw/acpi/pcihp.c @@ -357,6 +357,16 @@ void acpi_pcihp_device_unplug_request_cb(HotplugHandler *hotplug_dev, * acpi_pcihp_eject_slot() when the operation is completed. */ pdev->qdev.pending_deleted_event = true; + /* if unplug was requested before OSPM is initialized, + * linux kernel will clear GPE0.sts[] bits during boot, which effectively + * hides unplug event. And than followup qmp_device_del() calls remain + * blocked by above flag permanently. + * Unblock qmp_device_del() by setting expire limit, so user can + * repeat unplug request later when OSPM has been booted. + */ + pdev->qdev.pending_deleted_expires_ms = + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); /* 1 msec */ + s->acpi_pcihp_pci_status[bsel].down |= (1U << slot); acpi_send_event(DEVICE(hotplug_dev), ACPI_PCI_HOTPLUG_STATUS); } diff --git a/hw/adc/meson.build b/hw/adc/meson.build index b29ac7ccdf..a4f85b7d46 100644 --- a/hw/adc/meson.build +++ b/hw/adc/meson.build @@ -1,5 +1,5 @@ -softmmu_ss.add(when: 'CONFIG_STM32F2XX_ADC', if_true: files('stm32f2xx_adc.c')) -softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_adc.c')) -softmmu_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_adc.c')) -softmmu_ss.add(when: 'CONFIG_ZYNQ', if_true: files('zynq-xadc.c')) -softmmu_ss.add(when: 'CONFIG_MAX111X', if_true: files('max111x.c')) +system_ss.add(when: 'CONFIG_STM32F2XX_ADC', if_true: files('stm32f2xx_adc.c')) +system_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_adc.c')) +system_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_adc.c')) +system_ss.add(when: 'CONFIG_ZYNQ', if_true: files('zynq-xadc.c')) +system_ss.add(when: 'CONFIG_MAX111X', if_true: files('max111x.c')) diff --git a/hw/alpha/dp264.c b/hw/alpha/dp264.c index 4161f559a7..03495e1e60 100644 --- a/hw/alpha/dp264.c +++ b/hw/alpha/dp264.c @@ -49,6 +49,7 @@ static void clipper_init(MachineState *machine) const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; + MachineClass *mc = MACHINE_GET_CLASS(machine); AlphaCPU *cpus[4]; PCIBus *pci_bus; PCIDevice *pci_dev; @@ -124,7 +125,7 @@ static void clipper_init(MachineState *machine) /* Network setup. e1000 is good enough, failing Tulip support. */ for (i = 0; i < nb_nics; i++) { - pci_nic_init_nofail(&nd_table[i], pci_bus, "e1000", NULL); + pci_nic_init_nofail(&nd_table[i], pci_bus, mc->default_nic, NULL); } /* Super I/O */ @@ -213,6 +214,7 @@ static void clipper_machine_init(MachineClass *mc) mc->is_default = true; mc->default_cpu_type = ALPHA_CPU_TYPE_NAME("ev67"); mc->default_ram_id = "ram"; + mc->default_nic = "e1000"; } DEFINE_MACHINE("clipper", clipper_machine_init) diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig index b53bd7f0b2..7de17d1e8c 100644 --- a/hw/arm/Kconfig +++ b/hw/arm/Kconfig @@ -35,20 +35,28 @@ config ARM_VIRT config CHEETAH bool + default y + depends on TCG && ARM select OMAP select TSC210X config CUBIEBOARD bool + default y + depends on TCG && ARM select ALLWINNER_A10 config DIGIC bool + default y + depends on TCG && ARM select PTIMER select PFLASH_CFI02 config EXYNOS4 bool + default y + depends on TCG && ARM imply I2C_DEVICES select A9MPCORE select I2C @@ -61,6 +69,8 @@ config EXYNOS4 config HIGHBANK bool + default y + depends on TCG && ARM select A9MPCORE select A15MPCORE select AHCI @@ -75,6 +85,8 @@ config HIGHBANK config INTEGRATOR bool + default y + depends on TCG && ARM select ARM_TIMER select INTEGRATOR_DEBUG select PL011 # UART @@ -87,12 +99,16 @@ config INTEGRATOR config MAINSTONE bool + default y + depends on TCG && ARM select PXA2XX select PFLASH_CFI01 select SMC91C111 config MUSCA bool + default y + depends on TCG && ARM select ARMSSE select PL011 select PL031 @@ -104,6 +120,8 @@ config MARVELL_88W8618 config MUSICPAL bool + default y + depends on TCG && ARM select OR_IRQ select BITBANG_I2C select MARVELL_88W8618 @@ -114,18 +132,26 @@ config MUSICPAL config NETDUINO2 bool + default y + depends on TCG && ARM select STM32F205_SOC config NETDUINOPLUS2 bool + default y + depends on TCG && ARM select STM32F405_SOC config OLIMEX_STM32_H405 bool + default y + depends on TCG && ARM select STM32F405_SOC config NSERIES bool + default y + depends on TCG && ARM select OMAP select TMP105 # temperature sensor select BLIZZARD # LCD/TV controller @@ -158,12 +184,16 @@ config PXA2XX config GUMSTIX bool + default y + depends on TCG && ARM select PFLASH_CFI01 select SMC91C111 select PXA2XX config TOSA bool + default y + depends on TCG && ARM select ZAURUS # scoop select MICRODRIVE select PXA2XX @@ -171,6 +201,8 @@ config TOSA config SPITZ bool + default y + depends on TCG && ARM select ADS7846 # touch-screen controller select MAX111X # A/D converter select WM8750 # audio codec @@ -183,6 +215,8 @@ config SPITZ config Z2 bool + default y + depends on TCG && ARM select PFLASH_CFI01 select WM8750 select PL011 # UART @@ -190,6 +224,8 @@ config Z2 config REALVIEW bool + default y + depends on TCG && ARM imply PCI_DEVICES imply PCI_TESTDEV imply I2C_DEVICES @@ -218,6 +254,8 @@ config REALVIEW config SBSA_REF bool + default y + depends on TCG && AARCH64 imply PCI_DEVICES select AHCI select ARM_SMMUV3 @@ -230,14 +268,19 @@ config SBSA_REF select PL061 # GPIO select USB_EHCI_SYSBUS select WDT_SBSA + select BOCHS_DISPLAY config SABRELITE bool + default y + depends on TCG && ARM select FSL_IMX6 select SSI_M25P80 config STELLARIS bool + default y + depends on TCG && ARM imply I2C_DEVICES select ARM_V7M select CMSDK_APB_WATCHDOG @@ -255,6 +298,8 @@ config STELLARIS config STM32VLDISCOVERY bool + default y + depends on TCG && ARM select STM32F100_SOC config STRONGARM @@ -263,16 +308,22 @@ config STRONGARM config COLLIE bool + default y + depends on TCG && ARM select PFLASH_CFI01 select ZAURUS # scoop select STRONGARM config SX1 bool + default y + depends on TCG && ARM select OMAP config VERSATILE bool + default y + depends on TCG && ARM select ARM_TIMER # sp804 select PFLASH_CFI01 select LSI_SCSI_PCI @@ -284,6 +335,8 @@ config VERSATILE config VEXPRESS bool + default y + depends on TCG && ARM select A9MPCORE select A15MPCORE select ARM_MPTIMER @@ -299,6 +352,8 @@ config VEXPRESS config ZYNQ bool + default y + depends on TCG && ARM select A9MPCORE select CADENCE # UART select PFLASH_CFI02 @@ -315,9 +370,9 @@ config ZYNQ config ARM_V7M bool # currently v7M must be included in a TCG build due to translate.c - default y if TCG && (ARM || AARCH64) + default y + depends on TCG && ARM select PTIMER - select ARM_COMPATIBLE_SEMIHOSTING config ALLWINNER_A10 bool @@ -329,12 +384,14 @@ config ALLWINNER_A10 select ALLWINNER_WDT select ALLWINNER_EMAC select ALLWINNER_I2C - select AXP209_PMU + select AXP2XX_PMU select SERIAL select UNIMP config ALLWINNER_H3 bool + default y + depends on TCG && ARM select ALLWINNER_A10_PIT select ALLWINNER_SUN8I_EMAC select ALLWINNER_I2C @@ -347,8 +404,22 @@ config ALLWINNER_H3 select USB_EHCI_SYSBUS select SD +config ALLWINNER_R40 + bool + default y if TCG && ARM + select ALLWINNER_SRAMC + select ALLWINNER_A10_PIT + select AXP2XX_PMU + select SERIAL + select ARM_TIMER + select ARM_GIC + select UNIMP + select SD + config RASPI bool + default y + depends on TCG && ARM select FRAMEBUFFER select PL011 # UART select SDHCI @@ -379,6 +450,8 @@ config STM32F405_SOC config XLNX_ZYNQMP_ARM bool + default y + depends on TCG && AARCH64 select AHCI select ARM_GIC select CADENCE @@ -396,6 +469,8 @@ config XLNX_ZYNQMP_ARM config XLNX_VERSAL bool + default y + depends on TCG && AARCH64 select ARM_GIC select PL011 select CADENCE @@ -406,9 +481,12 @@ config XLNX_VERSAL select OR_IRQ select XLNX_BBRAM select XLNX_EFUSE_VERSAL + select XLNX_USB_SUBSYS config NPCM7XX bool + default y + depends on TCG && ARM select A9MPCORE select ADM1272 select ARM_GIC @@ -425,6 +503,8 @@ config NPCM7XX config FSL_IMX25 bool + default y + depends on TCG && ARM imply I2C_DEVICES select IMX select IMX_FEC @@ -434,6 +514,8 @@ config FSL_IMX25 config FSL_IMX31 bool + default y + depends on TCG && ARM imply I2C_DEVICES select SERIAL select IMX @@ -454,6 +536,8 @@ config FSL_IMX6 config ASPEED_SOC bool + default y + depends on TCG && ARM select DS1338 select FTGMAC100 select I2C @@ -474,6 +558,8 @@ config ASPEED_SOC config MPS2 bool + default y + depends on TCG && ARM imply I2C_DEVICES select ARMSSE select LAN9118 @@ -489,6 +575,8 @@ config MPS2 config FSL_IMX7 bool + default y + depends on TCG && ARM imply PCI_DEVICES imply TEST_DEVICES imply I2C_DEVICES @@ -507,6 +595,8 @@ config ARM_SMMUV3 config FSL_IMX6UL bool + default y + depends on TCG && ARM imply I2C_DEVICES select A15MPCORE select IMX @@ -518,6 +608,8 @@ config FSL_IMX6UL config MICROBIT bool + default y + depends on TCG && ARM select NRF51_SOC config NRF51_SOC @@ -529,6 +621,8 @@ config NRF51_SOC config EMCRAFT_SF2 bool + default y + depends on TCG && ARM select MSF2 select SSI_M25P80 diff --git a/hw/arm/allwinner-r40.c b/hw/arm/allwinner-r40.c new file mode 100644 index 0000000000..7d29eb224f --- /dev/null +++ b/hw/arm/allwinner-r40.c @@ -0,0 +1,526 @@ +/* + * Allwinner R40/A40i/T3 System on Chip emulation + * + * Copyright (C) 2023 qianfan Zhao <qianfanguijin@163.com> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/bswap.h" +#include "qemu/module.h" +#include "qemu/units.h" +#include "hw/qdev-core.h" +#include "hw/sysbus.h" +#include "hw/char/serial.h" +#include "hw/misc/unimp.h" +#include "hw/usb/hcd-ehci.h" +#include "hw/loader.h" +#include "sysemu/sysemu.h" +#include "hw/arm/allwinner-r40.h" +#include "hw/misc/allwinner-r40-dramc.h" + +/* Memory map */ +const hwaddr allwinner_r40_memmap[] = { + [AW_R40_DEV_SRAM_A1] = 0x00000000, + [AW_R40_DEV_SRAM_A2] = 0x00004000, + [AW_R40_DEV_SRAM_A3] = 0x00008000, + [AW_R40_DEV_SRAM_A4] = 0x0000b400, + [AW_R40_DEV_SRAMC] = 0x01c00000, + [AW_R40_DEV_EMAC] = 0x01c0b000, + [AW_R40_DEV_MMC0] = 0x01c0f000, + [AW_R40_DEV_MMC1] = 0x01c10000, + [AW_R40_DEV_MMC2] = 0x01c11000, + [AW_R40_DEV_MMC3] = 0x01c12000, + [AW_R40_DEV_CCU] = 0x01c20000, + [AW_R40_DEV_PIT] = 0x01c20c00, + [AW_R40_DEV_UART0] = 0x01c28000, + [AW_R40_DEV_UART1] = 0x01c28400, + [AW_R40_DEV_UART2] = 0x01c28800, + [AW_R40_DEV_UART3] = 0x01c28c00, + [AW_R40_DEV_UART4] = 0x01c29000, + [AW_R40_DEV_UART5] = 0x01c29400, + [AW_R40_DEV_UART6] = 0x01c29800, + [AW_R40_DEV_UART7] = 0x01c29c00, + [AW_R40_DEV_TWI0] = 0x01c2ac00, + [AW_R40_DEV_GMAC] = 0x01c50000, + [AW_R40_DEV_DRAMCOM] = 0x01c62000, + [AW_R40_DEV_DRAMCTL] = 0x01c63000, + [AW_R40_DEV_DRAMPHY] = 0x01c65000, + [AW_R40_DEV_GIC_DIST] = 0x01c81000, + [AW_R40_DEV_GIC_CPU] = 0x01c82000, + [AW_R40_DEV_GIC_HYP] = 0x01c84000, + [AW_R40_DEV_GIC_VCPU] = 0x01c86000, + [AW_R40_DEV_SDRAM] = 0x40000000 +}; + +/* List of unimplemented devices */ +struct AwR40Unimplemented { + const char *device_name; + hwaddr base; + hwaddr size; +}; + +static struct AwR40Unimplemented r40_unimplemented[] = { + { "d-engine", 0x01000000, 4 * MiB }, + { "d-inter", 0x01400000, 128 * KiB }, + { "dma", 0x01c02000, 4 * KiB }, + { "nfdc", 0x01c03000, 4 * KiB }, + { "ts", 0x01c04000, 4 * KiB }, + { "spi0", 0x01c05000, 4 * KiB }, + { "spi1", 0x01c06000, 4 * KiB }, + { "cs0", 0x01c09000, 4 * KiB }, + { "keymem", 0x01c0a000, 4 * KiB }, + { "usb0-otg", 0x01c13000, 4 * KiB }, + { "usb0-host", 0x01c14000, 4 * KiB }, + { "crypto", 0x01c15000, 4 * KiB }, + { "spi2", 0x01c17000, 4 * KiB }, + { "sata", 0x01c18000, 4 * KiB }, + { "usb1-host", 0x01c19000, 4 * KiB }, + { "sid", 0x01c1b000, 4 * KiB }, + { "usb2-host", 0x01c1c000, 4 * KiB }, + { "cs1", 0x01c1d000, 4 * KiB }, + { "spi3", 0x01c1f000, 4 * KiB }, + { "rtc", 0x01c20400, 1 * KiB }, + { "pio", 0x01c20800, 1 * KiB }, + { "owa", 0x01c21000, 1 * KiB }, + { "ac97", 0x01c21400, 1 * KiB }, + { "cir0", 0x01c21800, 1 * KiB }, + { "cir1", 0x01c21c00, 1 * KiB }, + { "pcm0", 0x01c22000, 1 * KiB }, + { "pcm1", 0x01c22400, 1 * KiB }, + { "pcm2", 0x01c22800, 1 * KiB }, + { "audio", 0x01c22c00, 1 * KiB }, + { "keypad", 0x01c23000, 1 * KiB }, + { "pwm", 0x01c23400, 1 * KiB }, + { "keyadc", 0x01c24400, 1 * KiB }, + { "ths", 0x01c24c00, 1 * KiB }, + { "rtp", 0x01c25000, 1 * KiB }, + { "pmu", 0x01c25400, 1 * KiB }, + { "cpu-cfg", 0x01c25c00, 1 * KiB }, + { "uart0", 0x01c28000, 1 * KiB }, + { "uart1", 0x01c28400, 1 * KiB }, + { "uart2", 0x01c28800, 1 * KiB }, + { "uart3", 0x01c28c00, 1 * KiB }, + { "uart4", 0x01c29000, 1 * KiB }, + { "uart5", 0x01c29400, 1 * KiB }, + { "uart6", 0x01c29800, 1 * KiB }, + { "uart7", 0x01c29c00, 1 * KiB }, + { "ps20", 0x01c2a000, 1 * KiB }, + { "ps21", 0x01c2a400, 1 * KiB }, + { "twi1", 0x01c2b000, 1 * KiB }, + { "twi2", 0x01c2b400, 1 * KiB }, + { "twi3", 0x01c2b800, 1 * KiB }, + { "twi4", 0x01c2c000, 1 * KiB }, + { "scr", 0x01c2c400, 1 * KiB }, + { "tvd-top", 0x01c30000, 4 * KiB }, + { "tvd0", 0x01c31000, 4 * KiB }, + { "tvd1", 0x01c32000, 4 * KiB }, + { "tvd2", 0x01c33000, 4 * KiB }, + { "tvd3", 0x01c34000, 4 * KiB }, + { "gpu", 0x01c40000, 64 * KiB }, + { "hstmr", 0x01c60000, 4 * KiB }, + { "tcon-top", 0x01c70000, 4 * KiB }, + { "lcd0", 0x01c71000, 4 * KiB }, + { "lcd1", 0x01c72000, 4 * KiB }, + { "tv0", 0x01c73000, 4 * KiB }, + { "tv1", 0x01c74000, 4 * KiB }, + { "tve-top", 0x01c90000, 16 * KiB }, + { "tve0", 0x01c94000, 16 * KiB }, + { "tve1", 0x01c98000, 16 * KiB }, + { "mipi_dsi", 0x01ca0000, 4 * KiB }, + { "mipi_dphy", 0x01ca1000, 4 * KiB }, + { "ve", 0x01d00000, 1024 * KiB }, + { "mp", 0x01e80000, 128 * KiB }, + { "hdmi", 0x01ee0000, 128 * KiB }, + { "prcm", 0x01f01400, 1 * KiB }, + { "debug", 0x3f500000, 64 * KiB }, + { "cpubist", 0x3f501000, 4 * KiB }, + { "dcu", 0x3fff0000, 64 * KiB }, + { "hstmr", 0x01c60000, 4 * KiB }, + { "brom", 0xffff0000, 36 * KiB } +}; + +/* Per Processor Interrupts */ +enum { + AW_R40_GIC_PPI_MAINT = 9, + AW_R40_GIC_PPI_HYPTIMER = 10, + AW_R40_GIC_PPI_VIRTTIMER = 11, + AW_R40_GIC_PPI_SECTIMER = 13, + AW_R40_GIC_PPI_PHYSTIMER = 14 +}; + +/* Shared Processor Interrupts */ +enum { + AW_R40_GIC_SPI_UART0 = 1, + AW_R40_GIC_SPI_UART1 = 2, + AW_R40_GIC_SPI_UART2 = 3, + AW_R40_GIC_SPI_UART3 = 4, + AW_R40_GIC_SPI_TWI0 = 7, + AW_R40_GIC_SPI_UART4 = 17, + AW_R40_GIC_SPI_UART5 = 18, + AW_R40_GIC_SPI_UART6 = 19, + AW_R40_GIC_SPI_UART7 = 20, + AW_R40_GIC_SPI_TIMER0 = 22, + AW_R40_GIC_SPI_TIMER1 = 23, + AW_R40_GIC_SPI_MMC0 = 32, + AW_R40_GIC_SPI_MMC1 = 33, + AW_R40_GIC_SPI_MMC2 = 34, + AW_R40_GIC_SPI_MMC3 = 35, + AW_R40_GIC_SPI_EMAC = 55, + AW_R40_GIC_SPI_GMAC = 85, +}; + +/* Allwinner R40 general constants */ +enum { + AW_R40_GIC_NUM_SPI = 128 +}; + +#define BOOT0_MAGIC "eGON.BT0" + +/* The low 8-bits of the 'boot_media' field in the SPL header */ +#define SUNXI_BOOTED_FROM_MMC0 0 +#define SUNXI_BOOTED_FROM_NAND 1 +#define SUNXI_BOOTED_FROM_MMC2 2 +#define SUNXI_BOOTED_FROM_SPI 3 + +struct boot_file_head { + uint32_t b_instruction; + uint8_t magic[8]; + uint32_t check_sum; + uint32_t length; + uint32_t pub_head_size; + uint32_t fel_script_address; + uint32_t fel_uEnv_length; + uint32_t dt_name_offset; + uint32_t dram_size; + uint32_t boot_media; + uint32_t string_pool[13]; +}; + +bool allwinner_r40_bootrom_setup(AwR40State *s, BlockBackend *blk, int unit) +{ + const int64_t rom_size = 32 * KiB; + g_autofree uint8_t *buffer = g_new0(uint8_t, rom_size); + struct boot_file_head *head = (struct boot_file_head *)buffer; + + if (blk_pread(blk, 8 * KiB, rom_size, buffer, 0) < 0) { + error_setg(&error_fatal, "%s: failed to read BlockBackend data", + __func__); + return false; + } + + /* we only check the magic string here. */ + if (memcmp(head->magic, BOOT0_MAGIC, sizeof(head->magic))) { + return false; + } + + /* + * Simulate the behavior of the bootROM, it will change the boot_media + * flag to indicate where the chip is booting from. R40 can boot from + * mmc0 or mmc2, the default value of boot_media is zero + * (SUNXI_BOOTED_FROM_MMC0), let's fix this flag when it is booting from + * the others. + */ + if (unit == 2) { + head->boot_media = cpu_to_le32(SUNXI_BOOTED_FROM_MMC2); + } else { + head->boot_media = cpu_to_le32(SUNXI_BOOTED_FROM_MMC0); + } + + rom_add_blob("allwinner-r40.bootrom", buffer, rom_size, + rom_size, s->memmap[AW_R40_DEV_SRAM_A1], + NULL, NULL, NULL, NULL, false); + return true; +} + +static void allwinner_r40_init(Object *obj) +{ + static const char *mmc_names[AW_R40_NUM_MMCS] = { + "mmc0", "mmc1", "mmc2", "mmc3" + }; + AwR40State *s = AW_R40(obj); + + s->memmap = allwinner_r40_memmap; + + for (int i = 0; i < AW_R40_NUM_CPUS; i++) { + object_initialize_child(obj, "cpu[*]", &s->cpus[i], + ARM_CPU_TYPE_NAME("cortex-a7")); + } + + object_initialize_child(obj, "gic", &s->gic, TYPE_ARM_GIC); + + object_initialize_child(obj, "timer", &s->timer, TYPE_AW_A10_PIT); + object_property_add_alias(obj, "clk0-freq", OBJECT(&s->timer), + "clk0-freq"); + object_property_add_alias(obj, "clk1-freq", OBJECT(&s->timer), + "clk1-freq"); + + object_initialize_child(obj, "ccu", &s->ccu, TYPE_AW_R40_CCU); + + for (int i = 0; i < AW_R40_NUM_MMCS; i++) { + object_initialize_child(obj, mmc_names[i], &s->mmc[i], + TYPE_AW_SDHOST_SUN50I_A64); + } + + object_initialize_child(obj, "twi0", &s->i2c0, TYPE_AW_I2C_SUN6I); + + object_initialize_child(obj, "emac", &s->emac, TYPE_AW_EMAC); + object_initialize_child(obj, "gmac", &s->gmac, TYPE_AW_SUN8I_EMAC); + object_property_add_alias(obj, "gmac-phy-addr", + OBJECT(&s->gmac), "phy-addr"); + + object_initialize_child(obj, "dramc", &s->dramc, TYPE_AW_R40_DRAMC); + object_property_add_alias(obj, "ram-addr", OBJECT(&s->dramc), + "ram-addr"); + object_property_add_alias(obj, "ram-size", OBJECT(&s->dramc), + "ram-size"); + + object_initialize_child(obj, "sramc", &s->sramc, TYPE_AW_SRAMC_SUN8I_R40); +} + +static void allwinner_r40_realize(DeviceState *dev, Error **errp) +{ + const char *r40_nic_models[] = { "gmac", "emac", NULL }; + AwR40State *s = AW_R40(dev); + unsigned i; + + /* CPUs */ + for (i = 0; i < AW_R40_NUM_CPUS; i++) { + + /* + * Disable secondary CPUs. Guest EL3 firmware will start + * them via CPU reset control registers. + */ + qdev_prop_set_bit(DEVICE(&s->cpus[i]), "start-powered-off", + i > 0); + + /* All exception levels required */ + qdev_prop_set_bit(DEVICE(&s->cpus[i]), "has_el3", true); + qdev_prop_set_bit(DEVICE(&s->cpus[i]), "has_el2", true); + + /* Mark realized */ + qdev_realize(DEVICE(&s->cpus[i]), NULL, &error_fatal); + } + + /* Generic Interrupt Controller */ + qdev_prop_set_uint32(DEVICE(&s->gic), "num-irq", AW_R40_GIC_NUM_SPI + + GIC_INTERNAL); + qdev_prop_set_uint32(DEVICE(&s->gic), "revision", 2); + qdev_prop_set_uint32(DEVICE(&s->gic), "num-cpu", AW_R40_NUM_CPUS); + qdev_prop_set_bit(DEVICE(&s->gic), "has-security-extensions", false); + qdev_prop_set_bit(DEVICE(&s->gic), "has-virtualization-extensions", true); + sysbus_realize(SYS_BUS_DEVICE(&s->gic), &error_fatal); + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gic), 0, s->memmap[AW_R40_DEV_GIC_DIST]); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gic), 1, s->memmap[AW_R40_DEV_GIC_CPU]); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gic), 2, s->memmap[AW_R40_DEV_GIC_HYP]); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gic), 3, s->memmap[AW_R40_DEV_GIC_VCPU]); + + /* + * Wire the outputs from each CPU's generic timer and the GICv2 + * maintenance interrupt signal to the appropriate GIC PPI inputs, + * and the GIC's IRQ/FIQ/VIRQ/VFIQ interrupt outputs to the CPU's inputs. + */ + for (i = 0; i < AW_R40_NUM_CPUS; i++) { + DeviceState *cpudev = DEVICE(&s->cpus[i]); + int ppibase = AW_R40_GIC_NUM_SPI + i * GIC_INTERNAL + GIC_NR_SGIS; + int irq; + /* + * Mapping from the output timer irq lines from the CPU to the + * GIC PPI inputs used for this board. + */ + const int timer_irq[] = { + [GTIMER_PHYS] = AW_R40_GIC_PPI_PHYSTIMER, + [GTIMER_VIRT] = AW_R40_GIC_PPI_VIRTTIMER, + [GTIMER_HYP] = AW_R40_GIC_PPI_HYPTIMER, + [GTIMER_SEC] = AW_R40_GIC_PPI_SECTIMER, + }; + + /* Connect CPU timer outputs to GIC PPI inputs */ + for (irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) { + qdev_connect_gpio_out(cpudev, irq, + qdev_get_gpio_in(DEVICE(&s->gic), + ppibase + timer_irq[irq])); + } + + /* Connect GIC outputs to CPU interrupt inputs */ + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i, + qdev_get_gpio_in(cpudev, ARM_CPU_IRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + AW_R40_NUM_CPUS, + qdev_get_gpio_in(cpudev, ARM_CPU_FIQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + (2 * AW_R40_NUM_CPUS), + qdev_get_gpio_in(cpudev, ARM_CPU_VIRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + (3 * AW_R40_NUM_CPUS), + qdev_get_gpio_in(cpudev, ARM_CPU_VFIQ)); + + /* GIC maintenance signal */ + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + (4 * AW_R40_NUM_CPUS), + qdev_get_gpio_in(DEVICE(&s->gic), + ppibase + AW_R40_GIC_PPI_MAINT)); + } + + /* Timer */ + sysbus_realize(SYS_BUS_DEVICE(&s->timer), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->timer), 0, s->memmap[AW_R40_DEV_PIT]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->timer), 0, + qdev_get_gpio_in(DEVICE(&s->gic), + AW_R40_GIC_SPI_TIMER0)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->timer), 1, + qdev_get_gpio_in(DEVICE(&s->gic), + AW_R40_GIC_SPI_TIMER1)); + + /* SRAM */ + sysbus_realize(SYS_BUS_DEVICE(&s->sramc), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->sramc), 0, s->memmap[AW_R40_DEV_SRAMC]); + + memory_region_init_ram(&s->sram_a1, OBJECT(dev), "sram A1", + 16 * KiB, &error_abort); + memory_region_init_ram(&s->sram_a2, OBJECT(dev), "sram A2", + 16 * KiB, &error_abort); + memory_region_init_ram(&s->sram_a3, OBJECT(dev), "sram A3", + 13 * KiB, &error_abort); + memory_region_init_ram(&s->sram_a4, OBJECT(dev), "sram A4", + 3 * KiB, &error_abort); + memory_region_add_subregion(get_system_memory(), + s->memmap[AW_R40_DEV_SRAM_A1], &s->sram_a1); + memory_region_add_subregion(get_system_memory(), + s->memmap[AW_R40_DEV_SRAM_A2], &s->sram_a2); + memory_region_add_subregion(get_system_memory(), + s->memmap[AW_R40_DEV_SRAM_A3], &s->sram_a3); + memory_region_add_subregion(get_system_memory(), + s->memmap[AW_R40_DEV_SRAM_A4], &s->sram_a4); + + /* Clock Control Unit */ + sysbus_realize(SYS_BUS_DEVICE(&s->ccu), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ccu), 0, s->memmap[AW_R40_DEV_CCU]); + + /* SD/MMC */ + for (int i = 0; i < AW_R40_NUM_MMCS; i++) { + qemu_irq irq = qdev_get_gpio_in(DEVICE(&s->gic), + AW_R40_GIC_SPI_MMC0 + i); + const hwaddr addr = s->memmap[AW_R40_DEV_MMC0 + i]; + + object_property_set_link(OBJECT(&s->mmc[i]), "dma-memory", + OBJECT(get_system_memory()), &error_fatal); + sysbus_realize(SYS_BUS_DEVICE(&s->mmc[i]), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->mmc[i]), 0, addr); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->mmc[i]), 0, irq); + } + + /* UART0. For future clocktree API: All UARTS are connected to APB2_CLK. */ + for (int i = 0; i < AW_R40_NUM_UARTS; i++) { + static const int uart_irqs[AW_R40_NUM_UARTS] = { + AW_R40_GIC_SPI_UART0, + AW_R40_GIC_SPI_UART1, + AW_R40_GIC_SPI_UART2, + AW_R40_GIC_SPI_UART3, + AW_R40_GIC_SPI_UART4, + AW_R40_GIC_SPI_UART5, + AW_R40_GIC_SPI_UART6, + AW_R40_GIC_SPI_UART7, + }; + const hwaddr addr = s->memmap[AW_R40_DEV_UART0 + i]; + + serial_mm_init(get_system_memory(), addr, 2, + qdev_get_gpio_in(DEVICE(&s->gic), uart_irqs[i]), + 115200, serial_hd(i), DEVICE_NATIVE_ENDIAN); + } + + /* I2C */ + sysbus_realize(SYS_BUS_DEVICE(&s->i2c0), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c0), 0, s->memmap[AW_R40_DEV_TWI0]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c0), 0, + qdev_get_gpio_in(DEVICE(&s->gic), AW_R40_GIC_SPI_TWI0)); + + /* DRAMC */ + sysbus_realize(SYS_BUS_DEVICE(&s->dramc), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->dramc), 0, + s->memmap[AW_R40_DEV_DRAMCOM]); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->dramc), 1, + s->memmap[AW_R40_DEV_DRAMCTL]); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->dramc), 2, + s->memmap[AW_R40_DEV_DRAMPHY]); + + /* nic support gmac and emac */ + for (int i = 0; i < ARRAY_SIZE(r40_nic_models) - 1; i++) { + NICInfo *nic = &nd_table[i]; + + if (!nic->used) { + continue; + } + if (qemu_show_nic_models(nic->model, r40_nic_models)) { + exit(0); + } + + switch (qemu_find_nic_model(nic, r40_nic_models, r40_nic_models[0])) { + case 0: /* gmac */ + qdev_set_nic_properties(DEVICE(&s->gmac), nic); + break; + case 1: /* emac */ + qdev_set_nic_properties(DEVICE(&s->emac), nic); + break; + default: + exit(1); + break; + } + } + + /* GMAC */ + object_property_set_link(OBJECT(&s->gmac), "dma-memory", + OBJECT(get_system_memory()), &error_fatal); + sysbus_realize(SYS_BUS_DEVICE(&s->gmac), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gmac), 0, s->memmap[AW_R40_DEV_GMAC]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gmac), 0, + qdev_get_gpio_in(DEVICE(&s->gic), AW_R40_GIC_SPI_GMAC)); + + /* EMAC */ + sysbus_realize(SYS_BUS_DEVICE(&s->emac), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->emac), 0, s->memmap[AW_R40_DEV_EMAC]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->emac), 0, + qdev_get_gpio_in(DEVICE(&s->gic), AW_R40_GIC_SPI_EMAC)); + + /* Unimplemented devices */ + for (i = 0; i < ARRAY_SIZE(r40_unimplemented); i++) { + create_unimplemented_device(r40_unimplemented[i].device_name, + r40_unimplemented[i].base, + r40_unimplemented[i].size); + } +} + +static void allwinner_r40_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = allwinner_r40_realize; + /* Reason: uses serial_hd() in realize function */ + dc->user_creatable = false; +} + +static const TypeInfo allwinner_r40_type_info = { + .name = TYPE_AW_R40, + .parent = TYPE_DEVICE, + .instance_size = sizeof(AwR40State), + .instance_init = allwinner_r40_init, + .class_init = allwinner_r40_class_init, +}; + +static void allwinner_r40_register_types(void) +{ + type_register_static(&allwinner_r40_type_info); +} + +type_init(allwinner_r40_register_types) diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c index c1f2b9cfca..6880998484 100644 --- a/hw/arm/aspeed.c +++ b/hw/arm/aspeed.c @@ -40,7 +40,9 @@ struct AspeedMachineState { /* Public */ AspeedSoCState soc; + MemoryRegion boot_rom; bool mmio_exec; + uint32_t uart_chosen; char *fmc_model; char *spi_model; }; @@ -200,33 +202,35 @@ struct AspeedMachineState { static void aspeed_write_smpboot(ARMCPU *cpu, const struct arm_boot_info *info) { - static const uint32_t poll_mailbox_ready[] = { + AddressSpace *as = arm_boot_address_space(cpu, info); + static const ARMInsnFixup poll_mailbox_ready[] = { /* * r2 = per-cpu go sign value * r1 = AST_SMP_MBOX_FIELD_ENTRY * r0 = AST_SMP_MBOX_FIELD_GOSIGN */ - 0xee100fb0, /* mrc p15, 0, r0, c0, c0, 5 */ - 0xe21000ff, /* ands r0, r0, #255 */ - 0xe59f201c, /* ldr r2, [pc, #28] */ - 0xe1822000, /* orr r2, r2, r0 */ - - 0xe59f1018, /* ldr r1, [pc, #24] */ - 0xe59f0018, /* ldr r0, [pc, #24] */ - - 0xe320f002, /* wfe */ - 0xe5904000, /* ldr r4, [r0] */ - 0xe1520004, /* cmp r2, r4 */ - 0x1afffffb, /* bne <wfe> */ - 0xe591f000, /* ldr pc, [r1] */ - AST_SMP_MBOX_GOSIGN, - AST_SMP_MBOX_FIELD_ENTRY, - AST_SMP_MBOX_FIELD_GOSIGN, + { 0xee100fb0 }, /* mrc p15, 0, r0, c0, c0, 5 */ + { 0xe21000ff }, /* ands r0, r0, #255 */ + { 0xe59f201c }, /* ldr r2, [pc, #28] */ + { 0xe1822000 }, /* orr r2, r2, r0 */ + + { 0xe59f1018 }, /* ldr r1, [pc, #24] */ + { 0xe59f0018 }, /* ldr r0, [pc, #24] */ + + { 0xe320f002 }, /* wfe */ + { 0xe5904000 }, /* ldr r4, [r0] */ + { 0xe1520004 }, /* cmp r2, r4 */ + { 0x1afffffb }, /* bne <wfe> */ + { 0xe591f000 }, /* ldr pc, [r1] */ + { AST_SMP_MBOX_GOSIGN }, + { AST_SMP_MBOX_FIELD_ENTRY }, + { AST_SMP_MBOX_FIELD_GOSIGN }, + { 0, FIXUP_TERMINATOR } }; + static const uint32_t fixupcontext[FIXUP_MAX] = { 0 }; - rom_add_blob_fixed("aspeed.smpboot", poll_mailbox_ready, - sizeof(poll_mailbox_ready), - info->smp_loader_start); + arm_write_bootloader("aspeed.smpboot", as, info->smp_loader_start, + poll_mailbox_ready, fixupcontext); } static void aspeed_reset_secondary(ARMCPU *cpu, @@ -273,15 +277,15 @@ static void write_boot_rom(BlockBackend *blk, hwaddr addr, size_t rom_size, * Create a ROM and copy the flash contents at the expected address * (0x0). Boots faster than execute-in-place. */ -static void aspeed_install_boot_rom(AspeedSoCState *soc, BlockBackend *blk, +static void aspeed_install_boot_rom(AspeedMachineState *bmc, BlockBackend *blk, uint64_t rom_size) { - MemoryRegion *boot_rom = g_new(MemoryRegion, 1); + AspeedSoCState *soc = &bmc->soc; - memory_region_init_rom(boot_rom, NULL, "aspeed.boot_rom", rom_size, + memory_region_init_rom(&bmc->boot_rom, NULL, "aspeed.boot_rom", rom_size, &error_abort); memory_region_add_subregion_overlap(&soc->spi_boot_container, 0, - boot_rom, 1); + &bmc->boot_rom, 1); write_boot_rom(blk, ASPEED_SOC_SPI_BOOT_ADDR, rom_size, &error_abort); } @@ -330,10 +334,11 @@ static void connect_serial_hds_to_uarts(AspeedMachineState *bmc) AspeedMachineClass *amc = ASPEED_MACHINE_GET_CLASS(bmc); AspeedSoCState *s = &bmc->soc; AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + int uart_chosen = bmc->uart_chosen ? bmc->uart_chosen : amc->uart_default; - aspeed_soc_uart_set_chr(s, amc->uart_default, serial_hd(0)); + aspeed_soc_uart_set_chr(s, uart_chosen, serial_hd(0)); for (int i = 1, uart = ASPEED_DEV_UART1; i < sc->uarts_num; i++, uart++) { - if (uart == amc->uart_default) { + if (uart == uart_chosen) { continue; } aspeed_soc_uart_set_chr(s, uart, serial_hd(i)); @@ -429,8 +434,7 @@ static void aspeed_machine_init(MachineState *machine) if (mtd0) { uint64_t rom_size = memory_region_size(&bmc->soc.spi_boot); - aspeed_install_boot_rom(&bmc->soc, blk_by_legacy_dinfo(mtd0), - rom_size); + aspeed_install_boot_rom(bmc, blk_by_legacy_dinfo(mtd0), rom_size); } } @@ -786,8 +790,10 @@ static void rainier_bmc_i2c_init(AspeedMachineState *bmc) 0x48); i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 8), TYPE_TMP105, 0x4a); - at24c_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 8), 0x50, 64 * KiB); - at24c_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 8), 0x51, 64 * KiB); + at24c_eeprom_init_rom(aspeed_i2c_get_bus(&soc->i2c, 8), 0x50, + 64 * KiB, rainier_bb_fruid, rainier_bb_fruid_len); + at24c_eeprom_init_rom(aspeed_i2c_get_bus(&soc->i2c, 8), 0x51, + 64 * KiB, rainier_bmc_fruid, rainier_bmc_fruid_len); create_pca9552(soc, 8, 0x60); create_pca9552(soc, 8, 0x61); /* Bus 8: ucd90320@11 */ @@ -1074,6 +1080,35 @@ static void aspeed_set_spi_model(Object *obj, const char *value, Error **errp) bmc->spi_model = g_strdup(value); } +static char *aspeed_get_bmc_console(Object *obj, Error **errp) +{ + AspeedMachineState *bmc = ASPEED_MACHINE(obj); + AspeedMachineClass *amc = ASPEED_MACHINE_GET_CLASS(bmc); + int uart_chosen = bmc->uart_chosen ? bmc->uart_chosen : amc->uart_default; + + return g_strdup_printf("uart%d", uart_chosen - ASPEED_DEV_UART1 + 1); +} + +static void aspeed_set_bmc_console(Object *obj, const char *value, Error **errp) +{ + AspeedMachineState *bmc = ASPEED_MACHINE(obj); + AspeedMachineClass *amc = ASPEED_MACHINE_GET_CLASS(bmc); + AspeedSoCClass *sc = ASPEED_SOC_CLASS(object_class_by_name(amc->soc_name)); + int val; + + if (sscanf(value, "uart%u", &val) != 1) { + error_setg(errp, "Bad value for \"uart\" property"); + return; + } + + /* The number of UART depends on the SoC */ + if (val < 1 || val > sc->uarts_num) { + error_setg(errp, "\"uart\" should be in range [1 - %d]", sc->uarts_num); + return; + } + bmc->uart_chosen = ASPEED_DEV_UART1 + val - 1; +} + static void aspeed_machine_class_props_init(ObjectClass *oc) { object_class_property_add_bool(oc, "execute-in-place", @@ -1082,6 +1117,11 @@ static void aspeed_machine_class_props_init(ObjectClass *oc) object_class_property_set_description(oc, "execute-in-place", "boot directly from CE0 flash device"); + object_class_property_add_str(oc, "bmc-console", aspeed_get_bmc_console, + aspeed_set_bmc_console); + object_class_property_set_description(oc, "bmc-console", + "Change the default UART to \"uartX\""); + object_class_property_add_str(oc, "fmc-model", aspeed_get_fmc_model, aspeed_set_fmc_model); object_class_property_set_description(oc, "fmc-model", diff --git a/hw/arm/aspeed_ast2600.c b/hw/arm/aspeed_ast2600.c index 1bf1246148..a8b3a8065a 100644 --- a/hw/arm/aspeed_ast2600.c +++ b/hw/arm/aspeed_ast2600.c @@ -316,6 +316,8 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) &error_abort); object_property_set_bool(OBJECT(&s->cpu[i]), "neon", false, &error_abort); + object_property_set_bool(OBJECT(&s->cpu[i]), "vfp-d32", false, + &error_abort); object_property_set_link(OBJECT(&s->cpu[i]), "memory", OBJECT(s->memory), &error_abort); diff --git a/hw/arm/aspeed_eeprom.c b/hw/arm/aspeed_eeprom.c index dc33a88a54..ace5266cec 100644 --- a/hw/arm/aspeed_eeprom.c +++ b/hw/arm/aspeed_eeprom.c @@ -119,9 +119,52 @@ const uint8_t yosemitev2_bmc_fruid[] = { 0x6e, 0x66, 0x69, 0x67, 0x20, 0x41, 0xc1, 0x45, }; +const uint8_t rainier_bb_fruid[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x84, + 0x28, 0x00, 0x52, 0x54, 0x04, 0x56, 0x48, 0x44, 0x52, 0x56, 0x44, 0x02, + 0x01, 0x00, 0x50, 0x54, 0x0e, 0x56, 0x54, 0x4f, 0x43, 0x00, 0x00, 0x37, + 0x00, 0x4a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x46, 0x08, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46, 0x00, 0x52, 0x54, + 0x04, 0x56, 0x54, 0x4f, 0x43, 0x50, 0x54, 0x38, 0x56, 0x49, 0x4e, 0x49, + 0x00, 0x00, 0x81, 0x00, 0x3a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x53, + 0x59, 0x53, 0x00, 0x00, 0xbb, 0x00, 0x27, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x56, 0x43, 0x45, 0x4e, 0x00, 0x00, 0xe2, 0x00, 0x27, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x56, 0x53, 0x42, 0x50, 0x00, 0x00, 0x09, 0x01, 0x19, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x50, 0x46, 0x01, 0x00, 0x00, 0x00, 0x36, 0x00, + 0x52, 0x54, 0x04, 0x56, 0x49, 0x4e, 0x49, 0x44, 0x52, 0x04, 0x44, 0x45, + 0x53, 0x43, 0x48, 0x57, 0x02, 0x30, 0x31, 0x43, 0x43, 0x04, 0x33, 0x34, + 0x35, 0x36, 0x46, 0x4e, 0x04, 0x46, 0x52, 0x34, 0x39, 0x53, 0x4e, 0x04, + 0x53, 0x52, 0x31, 0x32, 0x50, 0x4e, 0x04, 0x50, 0x52, 0x39, 0x39, 0x50, + 0x46, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x52, 0x54, + 0x04, 0x56, 0x53, 0x59, 0x53, 0x53, 0x45, 0x07, 0x49, 0x42, 0x4d, 0x53, + 0x59, 0x53, 0x31, 0x54, 0x4d, 0x08, 0x32, 0x32, 0x32, 0x32, 0x2d, 0x32, + 0x32, 0x32, 0x50, 0x46, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, + 0x00, 0x52, 0x54, 0x04, 0x56, 0x43, 0x45, 0x4e, 0x53, 0x45, 0x07, 0x31, + 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x46, 0x43, 0x08, 0x31, 0x31, 0x31, + 0x31, 0x2d, 0x31, 0x31, 0x31, 0x50, 0x46, 0x04, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x15, 0x00, 0x52, 0x54, 0x04, 0x56, 0x53, 0x42, 0x50, 0x49, + 0x4d, 0x04, 0x50, 0x00, 0x10, 0x01, 0x50, 0x46, 0x04, 0x00, 0x00, 0x00, + 0x00, 0x00, +}; + +/* Rainier BMC FRU */ +const uint8_t rainier_bmc_fruid[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x84, + 0x28, 0x00, 0x52, 0x54, 0x04, 0x56, 0x48, 0x44, 0x52, 0x56, 0x44, 0x02, + 0x01, 0x00, 0x50, 0x54, 0x0e, 0x56, 0x54, 0x4f, 0x43, 0x00, 0x00, 0x37, + 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x46, 0x08, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x52, 0x54, + 0x04, 0x56, 0x54, 0x4f, 0x43, 0x50, 0x54, 0x0e, 0x56, 0x49, 0x4e, 0x49, + 0x00, 0x00, 0x57, 0x00, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x46, + 0x01, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x52, 0x54, 0x04, 0x56, 0x49, 0x4e, + 0x49, 0x44, 0x52, 0x04, 0x44, 0x45, 0x53, 0x43, 0x48, 0x57, 0x02, 0x30, + 0x31, 0x50, 0x46, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + const size_t tiogapass_bmc_fruid_len = sizeof(tiogapass_bmc_fruid); const size_t fby35_nic_fruid_len = sizeof(fby35_nic_fruid); const size_t fby35_bb_fruid_len = sizeof(fby35_bb_fruid); const size_t fby35_bmc_fruid_len = sizeof(fby35_bmc_fruid); - const size_t yosemitev2_bmc_fruid_len = sizeof(yosemitev2_bmc_fruid); +const size_t rainier_bb_fruid_len = sizeof(rainier_bb_fruid); +const size_t rainier_bmc_fruid_len = sizeof(rainier_bmc_fruid); diff --git a/hw/arm/aspeed_eeprom.h b/hw/arm/aspeed_eeprom.h index 86db6f0479..bbf9e54365 100644 --- a/hw/arm/aspeed_eeprom.h +++ b/hw/arm/aspeed_eeprom.h @@ -22,4 +22,9 @@ extern const size_t fby35_bmc_fruid_len; extern const uint8_t yosemitev2_bmc_fruid[]; extern const size_t yosemitev2_bmc_fruid_len; +extern const uint8_t rainier_bb_fruid[]; +extern const size_t rainier_bb_fruid_len; +extern const uint8_t rainier_bmc_fruid[]; +extern const size_t rainier_bmc_fruid_len; + #endif diff --git a/hw/arm/bananapi_m2u.c b/hw/arm/bananapi_m2u.c new file mode 100644 index 0000000000..74121d8966 --- /dev/null +++ b/hw/arm/bananapi_m2u.c @@ -0,0 +1,145 @@ +/* + * Bananapi M2U emulation + * + * Copyright (C) 2023 qianfan Zhao <qianfanguijin@163.com> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "exec/address-spaces.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "hw/boards.h" +#include "hw/i2c/i2c.h" +#include "hw/qdev-properties.h" +#include "hw/arm/allwinner-r40.h" + +static struct arm_boot_info bpim2u_binfo; + +/* + * R40 can boot from mmc0 and mmc2, and bpim2u has two mmc interface, one is + * connected to sdcard and another mount an emmc media. + * Attach the mmc driver and try loading bootloader. + */ +static void mmc_attach_drive(AwR40State *s, AwSdHostState *mmc, int unit, + bool load_bootroom, bool *bootroom_loaded) +{ + DriveInfo *di = drive_get(IF_SD, 0, unit); + BlockBackend *blk = di ? blk_by_legacy_dinfo(di) : NULL; + BusState *bus; + DeviceState *carddev; + + bus = qdev_get_child_bus(DEVICE(mmc), "sd-bus"); + if (bus == NULL) { + error_report("No SD bus found in SOC object"); + exit(1); + } + + carddev = qdev_new(TYPE_SD_CARD); + qdev_prop_set_drive_err(carddev, "drive", blk, &error_fatal); + qdev_realize_and_unref(carddev, bus, &error_fatal); + + if (load_bootroom && blk && blk_is_available(blk)) { + /* Use Boot ROM to copy data from SD card to SRAM */ + *bootroom_loaded = allwinner_r40_bootrom_setup(s, blk, unit); + } +} + +static void bpim2u_init(MachineState *machine) +{ + bool bootroom_loaded = false; + AwR40State *r40; + I2CBus *i2c; + + /* BIOS is not supported by this board */ + if (machine->firmware) { + error_report("BIOS not supported for this machine"); + exit(1); + } + + /* Only allow Cortex-A7 for this board */ + if (strcmp(machine->cpu_type, ARM_CPU_TYPE_NAME("cortex-a7")) != 0) { + error_report("This board can only be used with cortex-a7 CPU"); + exit(1); + } + + r40 = AW_R40(object_new(TYPE_AW_R40)); + object_property_add_child(OBJECT(machine), "soc", OBJECT(r40)); + object_unref(OBJECT(r40)); + + /* Setup timer properties */ + object_property_set_int(OBJECT(r40), "clk0-freq", 32768, &error_abort); + object_property_set_int(OBJECT(r40), "clk1-freq", 24 * 1000 * 1000, + &error_abort); + + /* DRAMC */ + r40->ram_size = machine->ram_size / MiB; + object_property_set_uint(OBJECT(r40), "ram-addr", + r40->memmap[AW_R40_DEV_SDRAM], &error_abort); + object_property_set_int(OBJECT(r40), "ram-size", + r40->ram_size, &error_abort); + + /* GMAC PHY */ + object_property_set_uint(OBJECT(r40), "gmac-phy-addr", 1, &error_abort); + + /* Mark R40 object realized */ + qdev_realize(DEVICE(r40), NULL, &error_abort); + + /* + * Plug in SD card and try load bootrom, R40 has 4 mmc controllers but can + * only booting from mmc0 and mmc2. + */ + for (int i = 0; i < AW_R40_NUM_MMCS; i++) { + switch (i) { + case 0: + case 2: + mmc_attach_drive(r40, &r40->mmc[i], i, + !machine->kernel_filename && !bootroom_loaded, + &bootroom_loaded); + break; + default: + mmc_attach_drive(r40, &r40->mmc[i], i, false, NULL); + break; + } + } + + /* Connect AXP221 */ + i2c = I2C_BUS(qdev_get_child_bus(DEVICE(&r40->i2c0), "i2c")); + i2c_slave_create_simple(i2c, "axp221_pmu", 0x34); + + /* SDRAM */ + memory_region_add_subregion(get_system_memory(), + r40->memmap[AW_R40_DEV_SDRAM], machine->ram); + + bpim2u_binfo.loader_start = r40->memmap[AW_R40_DEV_SDRAM]; + bpim2u_binfo.ram_size = machine->ram_size; + bpim2u_binfo.psci_conduit = QEMU_PSCI_CONDUIT_SMC; + arm_load_kernel(ARM_CPU(first_cpu), machine, &bpim2u_binfo); +} + +static void bpim2u_machine_init(MachineClass *mc) +{ + mc->desc = "Bananapi M2U (Cortex-A7)"; + mc->init = bpim2u_init; + mc->min_cpus = AW_R40_NUM_CPUS; + mc->max_cpus = AW_R40_NUM_CPUS; + mc->default_cpus = AW_R40_NUM_CPUS; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a7"); + mc->default_ram_size = 1 * GiB; + mc->default_ram_id = "bpim2u.ram"; +} + +DEFINE_MACHINE("bpim2u", bpim2u_machine_init) diff --git a/hw/arm/bcm2835_peripherals.c b/hw/arm/bcm2835_peripherals.c index 3c2a4160cd..0233038b95 100644 --- a/hw/arm/bcm2835_peripherals.c +++ b/hw/arm/bcm2835_peripherals.c @@ -90,6 +90,8 @@ static void bcm2835_peripherals_init(Object *obj) TYPE_BCM2835_PROPERTY); object_property_add_alias(obj, "board-rev", OBJECT(&s->property), "board-rev"); + object_property_add_alias(obj, "command-line", OBJECT(&s->property), + "command-line"); object_property_add_const_link(OBJECT(&s->property), "fb", OBJECT(&s->fb)); diff --git a/hw/arm/bcm2836.c b/hw/arm/bcm2836.c index f894338fc6..166dc896c0 100644 --- a/hw/arm/bcm2836.c +++ b/hw/arm/bcm2836.c @@ -55,6 +55,8 @@ static void bcm2836_init(Object *obj) TYPE_BCM2835_PERIPHERALS); object_property_add_alias(obj, "board-rev", OBJECT(&s->peripherals), "board-rev"); + object_property_add_alias(obj, "command-line", OBJECT(&s->peripherals), + "command-line"); object_property_add_alias(obj, "vcram-size", OBJECT(&s->peripherals), "vcram-size"); } diff --git a/hw/arm/boot.c b/hw/arm/boot.c index 54f6a3e0b3..720f22531a 100644 --- a/hw/arm/boot.c +++ b/hw/arm/boot.c @@ -60,26 +60,6 @@ AddressSpace *arm_boot_address_space(ARMCPU *cpu, return cpu_get_address_space(cs, asidx); } -typedef enum { - FIXUP_NONE = 0, /* do nothing */ - FIXUP_TERMINATOR, /* end of insns */ - FIXUP_BOARDID, /* overwrite with board ID number */ - FIXUP_BOARD_SETUP, /* overwrite with board specific setup code address */ - FIXUP_ARGPTR_LO, /* overwrite with pointer to kernel args */ - FIXUP_ARGPTR_HI, /* overwrite with pointer to kernel args (high half) */ - FIXUP_ENTRYPOINT_LO, /* overwrite with kernel entry point */ - FIXUP_ENTRYPOINT_HI, /* overwrite with kernel entry point (high half) */ - FIXUP_GIC_CPU_IF, /* overwrite with GIC CPU interface address */ - FIXUP_BOOTREG, /* overwrite with boot register address */ - FIXUP_DSB, /* overwrite with correct DSB insn for cpu */ - FIXUP_MAX, -} FixupType; - -typedef struct ARMInsnFixup { - uint32_t insn; - FixupType fixup; -} ARMInsnFixup; - static const ARMInsnFixup bootloader_aarch64[] = { { 0x580000c0 }, /* ldr x0, arg ; Load the lower 32-bits of DTB */ { 0xaa1f03e1 }, /* mov x1, xzr */ @@ -150,9 +130,10 @@ static const ARMInsnFixup smpboot[] = { { 0, FIXUP_TERMINATOR } }; -static void write_bootloader(const char *name, hwaddr addr, - const ARMInsnFixup *insns, uint32_t *fixupcontext, - AddressSpace *as) +void arm_write_bootloader(const char *name, + AddressSpace *as, hwaddr addr, + const ARMInsnFixup *insns, + const uint32_t *fixupcontext) { /* Fix up the specified bootloader fragment and write it into * guest memory using rom_add_blob_fixed(). fixupcontext is @@ -214,8 +195,8 @@ static void default_write_secondary(ARMCPU *cpu, fixupcontext[FIXUP_DSB] = CP15_DSB_INSN; } - write_bootloader("smpboot", info->smp_loader_start, - smpboot, fixupcontext, as); + arm_write_bootloader("smpboot", as, info->smp_loader_start, + smpboot, fixupcontext); } void arm_write_secure_board_setup_dummy_smc(ARMCPU *cpu, @@ -1186,8 +1167,8 @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu, fixupcontext[FIXUP_ENTRYPOINT_LO] = entry; fixupcontext[FIXUP_ENTRYPOINT_HI] = entry >> 32; - write_bootloader("bootloader", info->loader_start, - primary_loader, fixupcontext, as); + arm_write_bootloader("bootloader", as, info->loader_start, + primary_loader, fixupcontext); if (info->write_board_setup) { info->write_board_setup(cpu, info); diff --git a/hw/arm/fby35.c b/hw/arm/fby35.c index f4600c290b..f2ff6c1abf 100644 --- a/hw/arm/fby35.c +++ b/hw/arm/fby35.c @@ -70,8 +70,6 @@ static void fby35_bmc_write_boot_rom(DriveInfo *dinfo, MemoryRegion *mr, static void fby35_bmc_init(Fby35State *s) { - DriveInfo *drive0 = drive_get(IF_MTD, 0, 0); - object_initialize_child(OBJECT(s), "bmc", &s->bmc, "ast2600-a3"); memory_region_init(&s->bmc_memory, OBJECT(&s->bmc), "bmc-memory", @@ -95,18 +93,21 @@ static void fby35_bmc_init(Fby35State *s) aspeed_board_init_flashes(&s->bmc.fmc, "n25q00", 2, 0); /* Install first FMC flash content as a boot rom. */ - if (drive0) { - AspeedSMCFlash *fl = &s->bmc.fmc.flashes[0]; - MemoryRegion *boot_rom = g_new(MemoryRegion, 1); - uint64_t size = memory_region_size(&fl->mmio); - - if (!s->mmio_exec) { - memory_region_init_rom(boot_rom, NULL, "aspeed.boot_rom", - size, &error_abort); - memory_region_add_subregion(&s->bmc_memory, FBY35_BMC_FIRMWARE_ADDR, - boot_rom); - fby35_bmc_write_boot_rom(drive0, boot_rom, FBY35_BMC_FIRMWARE_ADDR, - size, &error_abort); + if (!s->mmio_exec) { + DriveInfo *mtd0 = drive_get(IF_MTD, 0, 0); + + if (mtd0) { + AspeedSoCState *bmc = &s->bmc; + uint64_t rom_size = memory_region_size(&bmc->spi_boot); + + memory_region_init_rom(&s->bmc_boot_rom, NULL, "aspeed.boot_rom", + rom_size, &error_abort); + memory_region_add_subregion_overlap(&bmc->spi_boot_container, 0, + &s->bmc_boot_rom, 1); + + fby35_bmc_write_boot_rom(mtd0, &s->bmc_boot_rom, + FBY35_BMC_FIRMWARE_ADDR, + rom_size, &error_abort); } } } diff --git a/hw/arm/fsl-imx6.c b/hw/arm/fsl-imx6.c index 00dafe3f62..4fa7f0b95e 100644 --- a/hw/arm/fsl-imx6.c +++ b/hw/arm/fsl-imx6.c @@ -53,6 +53,8 @@ static void fsl_imx6_init(Object *obj) object_initialize_child(obj, "src", &s->src, TYPE_IMX6_SRC); + object_initialize_child(obj, "snvs", &s->snvs, TYPE_IMX7_SNVS); + for (i = 0; i < FSL_IMX6_NUM_UARTS; i++) { snprintf(name, NAME_SIZE, "uart%d", i + 1); object_initialize_child(obj, name, &s->uart[i], TYPE_IMX_SERIAL); @@ -391,6 +393,12 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp) FSL_IMX6_ENET_MAC_1588_IRQ)); /* + * SNVS + */ + sysbus_realize(SYS_BUS_DEVICE(&s->snvs), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->snvs), 0, FSL_IMX6_SNVSHP_ADDR); + + /* * Watchdog */ for (i = 0; i < FSL_IMX6_NUM_WDTS; i++) { diff --git a/hw/arm/meson.build b/hw/arm/meson.build index b545ba0e4f..11eb9112f8 100644 --- a/hw/arm/meson.build +++ b/hw/arm/meson.build @@ -37,6 +37,7 @@ arm_ss.add(when: 'CONFIG_OMAP', if_true: files('omap1.c', 'omap2.c')) arm_ss.add(when: 'CONFIG_STRONGARM', if_true: files('strongarm.c')) arm_ss.add(when: 'CONFIG_ALLWINNER_A10', if_true: files('allwinner-a10.c', 'cubieboard.c')) arm_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-h3.c', 'orangepi.c')) +arm_ss.add(when: 'CONFIG_ALLWINNER_R40', if_true: files('allwinner-r40.c', 'bananapi_m2u.c')) arm_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2836.c', 'raspi.c')) arm_ss.add(when: 'CONFIG_STM32F100_SOC', if_true: files('stm32f100_soc.c')) arm_ss.add(when: 'CONFIG_STM32F205_SOC', if_true: files('stm32f205_soc.c')) @@ -62,10 +63,12 @@ arm_ss.add(when: 'CONFIG_FSL_IMX7', if_true: files('fsl-imx7.c', 'mcimx7d-sabre. arm_ss.add(when: 'CONFIG_ARM_SMMUV3', if_true: files('smmuv3.c')) arm_ss.add(when: 'CONFIG_FSL_IMX6UL', if_true: files('fsl-imx6ul.c', 'mcimx6ul-evk.c')) arm_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_soc.c')) +arm_ss.add(when: 'CONFIG_XEN', if_true: files('xen_arm.c')) +arm_ss.add_all(xen_ss) -softmmu_ss.add(when: 'CONFIG_ARM_SMMUV3', if_true: files('smmu-common.c')) -softmmu_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4_boards.c')) -softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_peripherals.c')) -softmmu_ss.add(when: 'CONFIG_TOSA', if_true: files('tosa.c')) +system_ss.add(when: 'CONFIG_ARM_SMMUV3', if_true: files('smmu-common.c')) +system_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4_boards.c')) +system_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_peripherals.c')) +system_ss.add(when: 'CONFIG_TOSA', if_true: files('tosa.c')) hw_arch += {'arm': arm_ss} diff --git a/hw/arm/musicpal.c b/hw/arm/musicpal.c index 58f3d30c9b..dc4e43e0ee 100644 --- a/hw/arm/musicpal.c +++ b/hw/arm/musicpal.c @@ -1250,7 +1250,7 @@ static void musicpal_init(MachineState *machine) uart_orgate = DEVICE(object_new(TYPE_OR_IRQ)); object_property_set_int(OBJECT(uart_orgate), "num-lines", 2, &error_fatal); qdev_realize_and_unref(uart_orgate, NULL, &error_fatal); - qdev_connect_gpio_out(DEVICE(uart_orgate), 0, + qdev_connect_gpio_out(uart_orgate, 0, qdev_get_gpio_in(pic, MP_UART_SHARED_IRQ)); serial_mm_init(address_space_mem, MP_UART1_BASE, 2, diff --git a/hw/arm/raspi.c b/hw/arm/raspi.c index 92d068d1f9..cc4c4ec9bf 100644 --- a/hw/arm/raspi.c +++ b/hw/arm/raspi.c @@ -16,6 +16,7 @@ #include "qemu/units.h" #include "qemu/cutils.h" #include "qapi/error.h" +#include "hw/arm/boot.h" #include "hw/arm/bcm2836.h" #include "hw/registerfields.h" #include "qemu/error-report.h" @@ -124,20 +125,22 @@ static const char *board_type(uint32_t board_rev) static void write_smpboot(ARMCPU *cpu, const struct arm_boot_info *info) { - static const uint32_t smpboot[] = { - 0xe1a0e00f, /* mov lr, pc */ - 0xe3a0fe00 + (BOARDSETUP_ADDR >> 4), /* mov pc, BOARDSETUP_ADDR */ - 0xee100fb0, /* mrc p15, 0, r0, c0, c0, 5;get core ID */ - 0xe7e10050, /* ubfx r0, r0, #0, #2 ;extract LSB */ - 0xe59f5014, /* ldr r5, =0x400000CC ;load mbox base */ - 0xe320f001, /* 1: yield */ - 0xe7953200, /* ldr r3, [r5, r0, lsl #4] ;read mbox for our core*/ - 0xe3530000, /* cmp r3, #0 ;spin while zero */ - 0x0afffffb, /* beq 1b */ - 0xe7853200, /* str r3, [r5, r0, lsl #4] ;clear mbox */ - 0xe12fff13, /* bx r3 ;jump to target */ - 0x400000cc, /* (constant: mailbox 3 read/clear base) */ + static const ARMInsnFixup smpboot[] = { + { 0xe1a0e00f }, /* mov lr, pc */ + { 0xe3a0fe00 + (BOARDSETUP_ADDR >> 4) }, /* mov pc, BOARDSETUP_ADDR */ + { 0xee100fb0 }, /* mrc p15, 0, r0, c0, c0, 5;get core ID */ + { 0xe7e10050 }, /* ubfx r0, r0, #0, #2 ;extract LSB */ + { 0xe59f5014 }, /* ldr r5, =0x400000CC ;load mbox base */ + { 0xe320f001 }, /* 1: yield */ + { 0xe7953200 }, /* ldr r3, [r5, r0, lsl #4] ;read mbox for our core */ + { 0xe3530000 }, /* cmp r3, #0 ;spin while zero */ + { 0x0afffffb }, /* beq 1b */ + { 0xe7853200 }, /* str r3, [r5, r0, lsl #4] ;clear mbox */ + { 0xe12fff13 }, /* bx r3 ;jump to target */ + { 0x400000cc }, /* (constant: mailbox 3 read/clear base) */ + { 0, FIXUP_TERMINATOR } }; + static const uint32_t fixupcontext[FIXUP_MAX] = { 0 }; /* check that we don't overrun board setup vectors */ QEMU_BUILD_BUG_ON(SMPBOOT_ADDR + sizeof(smpboot) > MVBAR_ADDR); @@ -145,9 +148,8 @@ static void write_smpboot(ARMCPU *cpu, const struct arm_boot_info *info) QEMU_BUILD_BUG_ON((BOARDSETUP_ADDR & 0xf) != 0 || (BOARDSETUP_ADDR >> 4) >= 0x100); - rom_add_blob_fixed_as("raspi_smpboot", smpboot, sizeof(smpboot), - info->smp_loader_start, - arm_boot_address_space(cpu, info)); + arm_write_bootloader("raspi_smpboot", arm_boot_address_space(cpu, info), + info->smp_loader_start, smpboot, fixupcontext); } static void write_smpboot64(ARMCPU *cpu, const struct arm_boot_info *info) @@ -161,26 +163,28 @@ static void write_smpboot64(ARMCPU *cpu, const struct arm_boot_info *info) * the primary CPU goes into the kernel. We put these variables inside * a rom blob, so that the reset for ROM contents zeroes them for us. */ - static const uint32_t smpboot[] = { - 0xd2801b05, /* mov x5, 0xd8 */ - 0xd53800a6, /* mrs x6, mpidr_el1 */ - 0x924004c6, /* and x6, x6, #0x3 */ - 0xd503205f, /* spin: wfe */ - 0xf86678a4, /* ldr x4, [x5,x6,lsl #3] */ - 0xb4ffffc4, /* cbz x4, spin */ - 0xd2800000, /* mov x0, #0x0 */ - 0xd2800001, /* mov x1, #0x0 */ - 0xd2800002, /* mov x2, #0x0 */ - 0xd2800003, /* mov x3, #0x0 */ - 0xd61f0080, /* br x4 */ + static const ARMInsnFixup smpboot[] = { + { 0xd2801b05 }, /* mov x5, 0xd8 */ + { 0xd53800a6 }, /* mrs x6, mpidr_el1 */ + { 0x924004c6 }, /* and x6, x6, #0x3 */ + { 0xd503205f }, /* spin: wfe */ + { 0xf86678a4 }, /* ldr x4, [x5,x6,lsl #3] */ + { 0xb4ffffc4 }, /* cbz x4, spin */ + { 0xd2800000 }, /* mov x0, #0x0 */ + { 0xd2800001 }, /* mov x1, #0x0 */ + { 0xd2800002 }, /* mov x2, #0x0 */ + { 0xd2800003 }, /* mov x3, #0x0 */ + { 0xd61f0080 }, /* br x4 */ + { 0, FIXUP_TERMINATOR } }; + static const uint32_t fixupcontext[FIXUP_MAX] = { 0 }; static const uint64_t spintables[] = { 0, 0, 0, 0 }; - rom_add_blob_fixed_as("raspi_smpboot", smpboot, sizeof(smpboot), - info->smp_loader_start, as); + arm_write_bootloader("raspi_smpboot", as, info->smp_loader_start, + smpboot, fixupcontext); rom_add_blob_fixed_as("raspi_spintables", spintables, sizeof(spintables), SPINTABLE_ADDR, as); } @@ -280,6 +284,8 @@ static void raspi_machine_init(MachineState *machine) object_property_add_const_link(OBJECT(&s->soc), "ram", OBJECT(machine->ram)); object_property_set_int(OBJECT(&s->soc), "board-rev", board_rev, &error_abort); + object_property_set_str(OBJECT(&s->soc), "command-line", + machine->kernel_cmdline, &error_abort); qdev_realize(DEVICE(&s->soc), NULL, &error_fatal); /* Create and plug in the SD cards */ diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c index 0b93558dde..de21200ff9 100644 --- a/hw/arm/sbsa-ref.c +++ b/hw/arm/sbsa-ref.c @@ -29,6 +29,7 @@ #include "exec/hwaddr.h" #include "kvm_arm.h" #include "hw/arm/boot.h" +#include "hw/arm/fdt.h" #include "hw/arm/smmuv3.h" #include "hw/block/flash.h" #include "hw/boards.h" @@ -168,6 +169,20 @@ static uint64_t sbsa_ref_cpu_mp_affinity(SBSAMachineState *sms, int idx) return arm_cpu_mp_affinity(idx, clustersz); } +static void sbsa_fdt_add_gic_node(SBSAMachineState *sms) +{ + char *nodename; + + nodename = g_strdup_printf("/intc"); + qemu_fdt_add_subnode(sms->fdt, nodename); + qemu_fdt_setprop_sized_cells(sms->fdt, nodename, "reg", + 2, sbsa_ref_memmap[SBSA_GIC_DIST].base, + 2, sbsa_ref_memmap[SBSA_GIC_DIST].size, + 2, sbsa_ref_memmap[SBSA_GIC_REDIST].base, + 2, sbsa_ref_memmap[SBSA_GIC_REDIST].size); + + g_free(nodename); +} /* * Firmware on this machine only uses ACPI table to load OS, these limited * device tree nodes are just to let firmware know the info which varies from @@ -204,7 +219,7 @@ static void create_fdt(SBSAMachineState *sms) * fw compatibility. */ qemu_fdt_setprop_cell(fdt, "/", "machine-version-major", 0); - qemu_fdt_setprop_cell(fdt, "/", "machine-version-minor", 0); + qemu_fdt_setprop_cell(fdt, "/", "machine-version-minor", 1); if (ms->numa_state->have_numa_distance) { int size = nb_numa_nodes * nb_numa_nodes * 3 * sizeof(uint32_t); @@ -260,6 +275,8 @@ static void create_fdt(SBSAMachineState *sms) g_free(nodename); } + + sbsa_fdt_add_gic_node(sms); } #define SBSA_FLASH_SECTOR_SIZE (256 * KiB) @@ -596,6 +613,7 @@ static void create_pcie(SBSAMachineState *sms) hwaddr size_mmio_high = sbsa_ref_memmap[SBSA_PCIE_MMIO_HIGH].size; hwaddr base_pio = sbsa_ref_memmap[SBSA_PCIE_PIO].base; int irq = sbsa_ref_irqmap[SBSA_PCIE]; + MachineClass *mc = MACHINE_GET_CLASS(sms); MemoryRegion *mmio_alias, *mmio_alias_high, *mmio_reg; MemoryRegion *ecam_alias, *ecam_reg; DeviceState *dev; @@ -641,14 +659,14 @@ static void create_pcie(SBSAMachineState *sms) NICInfo *nd = &nd_table[i]; if (!nd->model) { - nd->model = g_strdup("e1000e"); + nd->model = g_strdup(mc->default_nic); } pci_nic_init_nofail(nd, pci->bus, nd->model, NULL); } } - pci_create_simple(pci->bus, -1, "VGA"); + pci_create_simple(pci->bus, -1, "bochs-display"); create_smmu(sms, pci->bus); } @@ -852,12 +870,13 @@ static void sbsa_ref_class_init(ObjectClass *oc, void *data) mc->init = sbsa_ref_init; mc->desc = "QEMU 'SBSA Reference' ARM Virtual Machine"; - mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a57"); + mc->default_cpu_type = ARM_CPU_TYPE_NAME("neoverse-n1"); mc->max_cpus = 512; mc->pci_allow_0_address = true; mc->minimum_page_bits = 12; mc->block_default_type = IF_IDE; mc->no_cdrom = 1; + mc->default_nic = "e1000e"; mc->default_ram_size = 1 * GiB; mc->default_ram_id = "sbsa-ref.ram"; mc->default_cpus = 4; diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c index e7f1c1f219..5ab9d45d58 100644 --- a/hw/arm/smmu-common.c +++ b/hw/arm/smmu-common.c @@ -38,7 +38,7 @@ static guint smmu_iotlb_key_hash(gconstpointer v) /* Jenkins hash */ a = b = c = JHASH_INITVAL + sizeof(*key); - a += key->asid + key->level + key->tg; + a += key->asid + key->vmid + key->level + key->tg; b += extract64(key->iova, 0, 32); c += extract64(key->iova, 32, 32); @@ -53,13 +53,15 @@ static gboolean smmu_iotlb_key_equal(gconstpointer v1, gconstpointer v2) SMMUIOTLBKey *k1 = (SMMUIOTLBKey *)v1, *k2 = (SMMUIOTLBKey *)v2; return (k1->asid == k2->asid) && (k1->iova == k2->iova) && - (k1->level == k2->level) && (k1->tg == k2->tg); + (k1->level == k2->level) && (k1->tg == k2->tg) && + (k1->vmid == k2->vmid); } -SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint64_t iova, +SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint16_t vmid, uint64_t iova, uint8_t tg, uint8_t level) { - SMMUIOTLBKey key = {.asid = asid, .iova = iova, .tg = tg, .level = level}; + SMMUIOTLBKey key = {.asid = asid, .vmid = vmid, .iova = iova, + .tg = tg, .level = level}; return key; } @@ -78,7 +80,8 @@ SMMUTLBEntry *smmu_iotlb_lookup(SMMUState *bs, SMMUTransCfg *cfg, uint64_t mask = subpage_size - 1; SMMUIOTLBKey key; - key = smmu_get_iotlb_key(cfg->asid, iova & ~mask, tg, level); + key = smmu_get_iotlb_key(cfg->asid, cfg->s2cfg.vmid, + iova & ~mask, tg, level); entry = g_hash_table_lookup(bs->iotlb, &key); if (entry) { break; @@ -88,13 +91,13 @@ SMMUTLBEntry *smmu_iotlb_lookup(SMMUState *bs, SMMUTransCfg *cfg, if (entry) { cfg->iotlb_hits++; - trace_smmu_iotlb_lookup_hit(cfg->asid, iova, + trace_smmu_iotlb_lookup_hit(cfg->asid, cfg->s2cfg.vmid, iova, cfg->iotlb_hits, cfg->iotlb_misses, 100 * cfg->iotlb_hits / (cfg->iotlb_hits + cfg->iotlb_misses)); } else { cfg->iotlb_misses++; - trace_smmu_iotlb_lookup_miss(cfg->asid, iova, + trace_smmu_iotlb_lookup_miss(cfg->asid, cfg->s2cfg.vmid, iova, cfg->iotlb_hits, cfg->iotlb_misses, 100 * cfg->iotlb_hits / (cfg->iotlb_hits + cfg->iotlb_misses)); @@ -111,8 +114,10 @@ void smmu_iotlb_insert(SMMUState *bs, SMMUTransCfg *cfg, SMMUTLBEntry *new) smmu_iotlb_inv_all(bs); } - *key = smmu_get_iotlb_key(cfg->asid, new->entry.iova, tg, new->level); - trace_smmu_iotlb_insert(cfg->asid, new->entry.iova, tg, new->level); + *key = smmu_get_iotlb_key(cfg->asid, cfg->s2cfg.vmid, new->entry.iova, + tg, new->level); + trace_smmu_iotlb_insert(cfg->asid, cfg->s2cfg.vmid, new->entry.iova, + tg, new->level); g_hash_table_insert(bs->iotlb, key, new); } @@ -131,7 +136,16 @@ static gboolean smmu_hash_remove_by_asid(gpointer key, gpointer value, return SMMU_IOTLB_ASID(*iotlb_key) == asid; } -static gboolean smmu_hash_remove_by_asid_iova(gpointer key, gpointer value, +static gboolean smmu_hash_remove_by_vmid(gpointer key, gpointer value, + gpointer user_data) +{ + uint16_t vmid = *(uint16_t *)user_data; + SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key; + + return SMMU_IOTLB_VMID(*iotlb_key) == vmid; +} + +static gboolean smmu_hash_remove_by_asid_vmid_iova(gpointer key, gpointer value, gpointer user_data) { SMMUTLBEntry *iter = (SMMUTLBEntry *)value; @@ -142,18 +156,21 @@ static gboolean smmu_hash_remove_by_asid_iova(gpointer key, gpointer value, if (info->asid >= 0 && info->asid != SMMU_IOTLB_ASID(iotlb_key)) { return false; } + if (info->vmid >= 0 && info->vmid != SMMU_IOTLB_VMID(iotlb_key)) { + return false; + } return ((info->iova & ~entry->addr_mask) == entry->iova) || ((entry->iova & ~info->mask) == info->iova); } -void smmu_iotlb_inv_iova(SMMUState *s, int asid, dma_addr_t iova, +void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova, uint8_t tg, uint64_t num_pages, uint8_t ttl) { /* if tg is not set we use 4KB range invalidation */ uint8_t granule = tg ? tg * 2 + 10 : 12; if (ttl && (num_pages == 1) && (asid >= 0)) { - SMMUIOTLBKey key = smmu_get_iotlb_key(asid, iova, tg, ttl); + SMMUIOTLBKey key = smmu_get_iotlb_key(asid, vmid, iova, tg, ttl); if (g_hash_table_remove(s->iotlb, &key)) { return; @@ -166,10 +183,11 @@ void smmu_iotlb_inv_iova(SMMUState *s, int asid, dma_addr_t iova, SMMUIOTLBPageInvInfo info = { .asid = asid, .iova = iova, + .vmid = vmid, .mask = (num_pages * 1 << granule) - 1}; g_hash_table_foreach_remove(s->iotlb, - smmu_hash_remove_by_asid_iova, + smmu_hash_remove_by_asid_vmid_iova, &info); } @@ -179,6 +197,12 @@ void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid) g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_asid, &asid); } +inline void smmu_iotlb_inv_vmid(SMMUState *s, uint16_t vmid) +{ + trace_smmu_iotlb_inv_vmid(vmid); + g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_vmid, &vmid); +} + /* VMSAv8-64 Translation */ /** @@ -264,7 +288,7 @@ SMMUTransTableInfo *select_tt(SMMUTransCfg *cfg, dma_addr_t iova) } /** - * smmu_ptw_64 - VMSAv8-64 Walk of the page tables for a given IOVA + * smmu_ptw_64_s1 - VMSAv8-64 Walk of the page tables for a given IOVA * @cfg: translation config * @iova: iova to translate * @perm: access type @@ -276,9 +300,9 @@ SMMUTransTableInfo *select_tt(SMMUTransCfg *cfg, dma_addr_t iova) * Upon success, @tlbe is filled with translated_addr and entry * permission rights. */ -static int smmu_ptw_64(SMMUTransCfg *cfg, - dma_addr_t iova, IOMMUAccessFlags perm, - SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info) +static int smmu_ptw_64_s1(SMMUTransCfg *cfg, + dma_addr_t iova, IOMMUAccessFlags perm, + SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info) { dma_addr_t baseaddr, indexmask; int stage = cfg->stage; @@ -291,14 +315,14 @@ static int smmu_ptw_64(SMMUTransCfg *cfg, } granule_sz = tt->granule_sz; - stride = granule_sz - 3; + stride = VMSA_STRIDE(granule_sz); inputsize = 64 - tt->tsz; level = 4 - (inputsize - 4) / stride; - indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1; + indexmask = VMSA_IDXMSK(inputsize, stride, level); baseaddr = extract64(tt->ttb, 0, 48); baseaddr &= ~indexmask; - while (level <= 3) { + while (level < VMSA_LEVELS) { uint64_t subpage_size = 1ULL << level_shift(level, granule_sz); uint64_t mask = subpage_size - 1; uint32_t offset = iova_level_offset(iova, inputsize, level, granule_sz); @@ -309,7 +333,7 @@ static int smmu_ptw_64(SMMUTransCfg *cfg, if (get_pte(baseaddr, offset, &pte, info)) { goto error; } - trace_smmu_ptw_level(level, iova, subpage_size, + trace_smmu_ptw_level(stage, level, iova, subpage_size, baseaddr, offset, pte); if (is_invalid_pte(pte) || is_reserved_pte(pte, level)) { @@ -358,6 +382,128 @@ static int smmu_ptw_64(SMMUTransCfg *cfg, info->type = SMMU_PTW_ERR_TRANSLATION; error: + info->stage = 1; + tlbe->entry.perm = IOMMU_NONE; + return -EINVAL; +} + +/** + * smmu_ptw_64_s2 - VMSAv8-64 Walk of the page tables for a given ipa + * for stage-2. + * @cfg: translation config + * @ipa: ipa to translate + * @perm: access type + * @tlbe: SMMUTLBEntry (out) + * @info: handle to an error info + * + * Return 0 on success, < 0 on error. In case of error, @info is filled + * and tlbe->perm is set to IOMMU_NONE. + * Upon success, @tlbe is filled with translated_addr and entry + * permission rights. + */ +static int smmu_ptw_64_s2(SMMUTransCfg *cfg, + dma_addr_t ipa, IOMMUAccessFlags perm, + SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info) +{ + const int stage = 2; + int granule_sz = cfg->s2cfg.granule_sz; + /* ARM DDI0487I.a: Table D8-7. */ + int inputsize = 64 - cfg->s2cfg.tsz; + int level = get_start_level(cfg->s2cfg.sl0, granule_sz); + int stride = VMSA_STRIDE(granule_sz); + int idx = pgd_concat_idx(level, granule_sz, ipa); + /* + * Get the ttb from concatenated structure. + * The offset is the idx * size of each ttb(number of ptes * (sizeof(pte)) + */ + uint64_t baseaddr = extract64(cfg->s2cfg.vttb, 0, 48) + (1 << stride) * + idx * sizeof(uint64_t); + dma_addr_t indexmask = VMSA_IDXMSK(inputsize, stride, level); + + baseaddr &= ~indexmask; + + /* + * On input, a stage 2 Translation fault occurs if the IPA is outside the + * range configured by the relevant S2T0SZ field of the STE. + */ + if (ipa >= (1ULL << inputsize)) { + info->type = SMMU_PTW_ERR_TRANSLATION; + goto error; + } + + while (level < VMSA_LEVELS) { + uint64_t subpage_size = 1ULL << level_shift(level, granule_sz); + uint64_t mask = subpage_size - 1; + uint32_t offset = iova_level_offset(ipa, inputsize, level, granule_sz); + uint64_t pte, gpa; + dma_addr_t pte_addr = baseaddr + offset * sizeof(pte); + uint8_t s2ap; + + if (get_pte(baseaddr, offset, &pte, info)) { + goto error; + } + trace_smmu_ptw_level(stage, level, ipa, subpage_size, + baseaddr, offset, pte); + if (is_invalid_pte(pte) || is_reserved_pte(pte, level)) { + trace_smmu_ptw_invalid_pte(stage, level, baseaddr, + pte_addr, offset, pte); + break; + } + + if (is_table_pte(pte, level)) { + baseaddr = get_table_pte_address(pte, granule_sz); + level++; + continue; + } else if (is_page_pte(pte, level)) { + gpa = get_page_pte_address(pte, granule_sz); + trace_smmu_ptw_page_pte(stage, level, ipa, + baseaddr, pte_addr, pte, gpa); + } else { + uint64_t block_size; + + gpa = get_block_pte_address(pte, level, granule_sz, + &block_size); + trace_smmu_ptw_block_pte(stage, level, baseaddr, + pte_addr, pte, ipa, gpa, + block_size >> 20); + } + + /* + * If S2AFFD and PTE.AF are 0 => fault. (5.2. Stream Table Entry) + * An Access fault takes priority over a Permission fault. + */ + if (!PTE_AF(pte) && !cfg->s2cfg.affd) { + info->type = SMMU_PTW_ERR_ACCESS; + goto error; + } + + s2ap = PTE_AP(pte); + if (is_permission_fault_s2(s2ap, perm)) { + info->type = SMMU_PTW_ERR_PERMISSION; + goto error; + } + + /* + * The address output from the translation causes a stage 2 Address + * Size fault if it exceeds the effective PA output range. + */ + if (gpa >= (1ULL << cfg->s2cfg.eff_ps)) { + info->type = SMMU_PTW_ERR_ADDR_SIZE; + goto error; + } + + tlbe->entry.translated_addr = gpa; + tlbe->entry.iova = ipa & ~mask; + tlbe->entry.addr_mask = mask; + tlbe->entry.perm = s2ap; + tlbe->level = level; + tlbe->granule = granule_sz; + return 0; + } + info->type = SMMU_PTW_ERR_TRANSLATION; + +error: + info->stage = 2; tlbe->entry.perm = IOMMU_NONE; return -EINVAL; } @@ -376,15 +522,26 @@ error: int smmu_ptw(SMMUTransCfg *cfg, dma_addr_t iova, IOMMUAccessFlags perm, SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info) { - if (!cfg->aa64) { + if (cfg->stage == 1) { + return smmu_ptw_64_s1(cfg, iova, perm, tlbe, info); + } else if (cfg->stage == 2) { /* - * This code path is not entered as we check this while decoding - * the configuration data in the derived SMMU model. + * If bypassing stage 1(or unimplemented), the input address is passed + * directly to stage 2 as IPA. If the input address of a transaction + * exceeds the size of the IAS, a stage 1 Address Size fault occurs. + * For AA64, IAS = OAS according to (IHI 0070.E.a) "3.4 Address sizes" */ - g_assert_not_reached(); + if (iova >= (1ULL << cfg->oas)) { + info->type = SMMU_PTW_ERR_ADDR_SIZE; + info->stage = 1; + tlbe->entry.perm = IOMMU_NONE; + return -EINVAL; + } + + return smmu_ptw_64_s2(cfg, iova, perm, tlbe, info); } - return smmu_ptw_64(cfg, iova, perm, tlbe, info); + g_assert_not_reached(); } /** diff --git a/hw/arm/smmu-internal.h b/hw/arm/smmu-internal.h index 2d75b31953..843bebb185 100644 --- a/hw/arm/smmu-internal.h +++ b/hw/arm/smmu-internal.h @@ -66,6 +66,8 @@ #define PTE_APTABLE(pte) \ (extract64(pte, 61, 2)) +#define PTE_AF(pte) \ + (extract64(pte, 10, 1)) /* * TODO: At the moment all transactions are considered as privileged (EL1) * as IOMMU translation callback does not pass user/priv attributes. @@ -73,6 +75,9 @@ #define is_permission_fault(ap, perm) \ (((perm) & IOMMU_WO) && ((ap) & 0x2)) +#define is_permission_fault_s2(s2ap, perm) \ + (!(((s2ap) & (perm)) == (perm))) + #define PTE_AP_TO_PERM(ap) \ (IOMMU_ACCESS_FLAG(true, !((ap) & 0x2))) @@ -96,10 +101,42 @@ uint64_t iova_level_offset(uint64_t iova, int inputsize, MAKE_64BIT_MASK(0, gsz - 3); } +/* FEAT_LPA2 and FEAT_TTST are not implemented. */ +static inline int get_start_level(int sl0 , int granule_sz) +{ + /* ARM DDI0487I.a: Table D8-12. */ + if (granule_sz == 12) { + return 2 - sl0; + } + /* ARM DDI0487I.a: Table D8-22 and Table D8-31. */ + return 3 - sl0; +} + +/* + * Index in a concatenated first level stage-2 page table. + * ARM DDI0487I.a: D8.2.2 Concatenated translation tables. + */ +static inline int pgd_concat_idx(int start_level, int granule_sz, + dma_addr_t ipa) +{ + uint64_t ret; + /* + * Get the number of bits handled by next levels, then any extra bits in + * the address should index the concatenated tables. This relation can be + * deduced from tables in ARM DDI0487I.a: D8.2.7-9 + */ + int shift = level_shift(start_level - 1, granule_sz); + + ret = ipa >> shift; + return ret; +} + #define SMMU_IOTLB_ASID(key) ((key).asid) +#define SMMU_IOTLB_VMID(key) ((key).vmid) typedef struct SMMUIOTLBPageInvInfo { int asid; + int vmid; uint64_t iova; uint64_t mask; } SMMUIOTLBPageInvInfo; diff --git a/hw/arm/smmuv3-internal.h b/hw/arm/smmuv3-internal.h index e8f0ebf25e..6d1c1edab7 100644 --- a/hw/arm/smmuv3-internal.h +++ b/hw/arm/smmuv3-internal.h @@ -34,10 +34,12 @@ typedef enum SMMUTranslationStatus { /* MMIO Registers */ REG32(IDR0, 0x0) + FIELD(IDR0, S2P, 0 , 1) FIELD(IDR0, S1P, 1 , 1) FIELD(IDR0, TTF, 2 , 2) FIELD(IDR0, COHACC, 4 , 1) FIELD(IDR0, ASID16, 12, 1) + FIELD(IDR0, VMID16, 18, 1) FIELD(IDR0, TTENDIAN, 21, 2) FIELD(IDR0, STALL_MODEL, 24, 2) FIELD(IDR0, TERM_MODEL, 26, 1) @@ -524,9 +526,13 @@ typedef struct CD { #define STE_S2TG(x) extract32((x)->word[5], 14, 2) #define STE_S2PS(x) extract32((x)->word[5], 16, 3) #define STE_S2AA64(x) extract32((x)->word[5], 19, 1) -#define STE_S2HD(x) extract32((x)->word[5], 24, 1) -#define STE_S2HA(x) extract32((x)->word[5], 25, 1) -#define STE_S2S(x) extract32((x)->word[5], 26, 1) +#define STE_S2ENDI(x) extract32((x)->word[5], 20, 1) +#define STE_S2AFFD(x) extract32((x)->word[5], 21, 1) +#define STE_S2HD(x) extract32((x)->word[5], 23, 1) +#define STE_S2HA(x) extract32((x)->word[5], 24, 1) +#define STE_S2S(x) extract32((x)->word[5], 25, 1) +#define STE_S2R(x) extract32((x)->word[5], 26, 1) + #define STE_CTXPTR(x) \ ({ \ unsigned long addr; \ diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index 270c80b665..932f009697 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -21,6 +21,7 @@ #include "hw/irq.h" #include "hw/sysbus.h" #include "migration/vmstate.h" +#include "hw/qdev-properties.h" #include "hw/qdev-core.h" #include "hw/pci/pci.h" #include "cpu.h" @@ -33,6 +34,9 @@ #include "smmuv3-internal.h" #include "smmu-internal.h" +#define PTW_RECORD_FAULT(cfg) (((cfg)->stage == 1) ? (cfg)->record_faults : \ + (cfg)->s2cfg.record_faults) + /** * smmuv3_trigger_irq - pulse @irq if enabled and update * GERROR register in case of GERROR interrupt @@ -238,14 +242,17 @@ void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info) static void smmuv3_init_regs(SMMUv3State *s) { - /** - * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID, - * multi-level stream table - */ - s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */ + /* Based on sys property, the stages supported in smmu will be advertised.*/ + if (s->stage && !strcmp("2", s->stage)) { + s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S2P, 1); + } else { + s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); + } + s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */ s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */ s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */ + s->idr[0] = FIELD_DP32(s->idr[0], IDR0, VMID16, 1); /* 16-bit VMID */ s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */ s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */ /* terminated transaction will always be aborted/error returned */ @@ -329,11 +336,138 @@ static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid, return 0; } +/* + * Max valid value is 39 when SMMU_IDR3.STT == 0. + * In architectures after SMMUv3.0: + * - If STE.S2TG selects a 4KB or 16KB granule, the minimum valid value for this + * field is MAX(16, 64-IAS) + * - If STE.S2TG selects a 64KB granule, the minimum valid value for this field + * is (64-IAS). + * As we only support AA64, IAS = OAS. + */ +static bool s2t0sz_valid(SMMUTransCfg *cfg) +{ + if (cfg->s2cfg.tsz > 39) { + return false; + } + + if (cfg->s2cfg.granule_sz == 16) { + return (cfg->s2cfg.tsz >= 64 - oas2bits(SMMU_IDR5_OAS)); + } + + return (cfg->s2cfg.tsz >= MAX(64 - oas2bits(SMMU_IDR5_OAS), 16)); +} + +/* + * Return true if s2 page table config is valid. + * This checks with the configured start level, ias_bits and granularity we can + * have a valid page table as described in ARM ARM D8.2 Translation process. + * The idea here is to see for the highest possible number of IPA bits, how + * many concatenated tables we would need, if it is more than 16, then this is + * not possible. + */ +static bool s2_pgtable_config_valid(uint8_t sl0, uint8_t t0sz, uint8_t gran) +{ + int level = get_start_level(sl0, gran); + uint64_t ipa_bits = 64 - t0sz; + uint64_t max_ipa = (1ULL << ipa_bits) - 1; + int nr_concat = pgd_concat_idx(level, gran, max_ipa) + 1; + + return nr_concat <= VMSA_MAX_S2_CONCAT; +} + +static int decode_ste_s2_cfg(SMMUTransCfg *cfg, STE *ste) +{ + cfg->stage = 2; + + if (STE_S2AA64(ste) == 0x0) { + qemu_log_mask(LOG_UNIMP, + "SMMUv3 AArch32 tables not supported\n"); + g_assert_not_reached(); + } + + switch (STE_S2TG(ste)) { + case 0x0: /* 4KB */ + cfg->s2cfg.granule_sz = 12; + break; + case 0x1: /* 64KB */ + cfg->s2cfg.granule_sz = 16; + break; + case 0x2: /* 16KB */ + cfg->s2cfg.granule_sz = 14; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "SMMUv3 bad STE S2TG: %x\n", STE_S2TG(ste)); + goto bad_ste; + } + + cfg->s2cfg.vttb = STE_S2TTB(ste); + + cfg->s2cfg.sl0 = STE_S2SL0(ste); + /* FEAT_TTST not supported. */ + if (cfg->s2cfg.sl0 == 0x3) { + qemu_log_mask(LOG_UNIMP, "SMMUv3 S2SL0 = 0x3 has no meaning!\n"); + goto bad_ste; + } + + /* For AA64, The effective S2PS size is capped to the OAS. */ + cfg->s2cfg.eff_ps = oas2bits(MIN(STE_S2PS(ste), SMMU_IDR5_OAS)); + /* + * It is ILLEGAL for the address in S2TTB to be outside the range + * described by the effective S2PS value. + */ + if (cfg->s2cfg.vttb & ~(MAKE_64BIT_MASK(0, cfg->s2cfg.eff_ps))) { + qemu_log_mask(LOG_GUEST_ERROR, + "SMMUv3 S2TTB too large 0x%" PRIx64 + ", effective PS %d bits\n", + cfg->s2cfg.vttb, cfg->s2cfg.eff_ps); + goto bad_ste; + } + + cfg->s2cfg.tsz = STE_S2T0SZ(ste); + + if (!s2t0sz_valid(cfg)) { + qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 bad STE S2T0SZ = %d\n", + cfg->s2cfg.tsz); + goto bad_ste; + } + + if (!s2_pgtable_config_valid(cfg->s2cfg.sl0, cfg->s2cfg.tsz, + cfg->s2cfg.granule_sz)) { + qemu_log_mask(LOG_GUEST_ERROR, + "SMMUv3 STE stage 2 config not valid!\n"); + goto bad_ste; + } + + /* Only LE supported(IDR0.TTENDIAN). */ + if (STE_S2ENDI(ste)) { + qemu_log_mask(LOG_GUEST_ERROR, + "SMMUv3 STE_S2ENDI only supports LE!\n"); + goto bad_ste; + } + + cfg->s2cfg.affd = STE_S2AFFD(ste); + + cfg->s2cfg.record_faults = STE_S2R(ste); + /* As stall is not supported. */ + if (STE_S2S(ste)) { + qemu_log_mask(LOG_UNIMP, "SMMUv3 Stall not implemented!\n"); + goto bad_ste; + } + + return 0; + +bad_ste: + return -EINVAL; +} + /* Returns < 0 in case of invalid STE, 0 otherwise */ static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg, STE *ste, SMMUEventInfo *event) { uint32_t config; + int ret; if (!STE_VALID(ste)) { if (!event->inval_ste_allowed) { @@ -354,11 +488,39 @@ static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg, return 0; } - if (STE_CFG_S2_ENABLED(config)) { - qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support stage 2 yet\n"); + /* + * If a stage is enabled in SW while not advertised, throw bad ste + * according to user manual(IHI0070E) "5.2 Stream Table Entry". + */ + if (!STAGE1_SUPPORTED(s) && STE_CFG_S1_ENABLED(config)) { + qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 S1 used but not supported.\n"); + goto bad_ste; + } + if (!STAGE2_SUPPORTED(s) && STE_CFG_S2_ENABLED(config)) { + qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 S2 used but not supported.\n"); goto bad_ste; } + if (STAGE2_SUPPORTED(s)) { + /* VMID is considered even if s2 is disabled. */ + cfg->s2cfg.vmid = STE_S2VMID(ste); + } else { + /* Default to -1 */ + cfg->s2cfg.vmid = -1; + } + + if (STE_CFG_S2_ENABLED(config)) { + /* + * Stage-1 OAS defaults to OAS even if not enabled as it would be used + * in input address check for stage-2. + */ + cfg->oas = oas2bits(SMMU_IDR5_OAS); + ret = decode_ste_s2_cfg(cfg, ste); + if (ret) { + goto bad_ste; + } + } + if (STE_S1CDMAX(ste) != 0) { qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support multiple context descriptors yet\n"); @@ -559,6 +721,9 @@ static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg, STE ste; CD cd; + /* ASID defaults to -1 (if s1 is not supported). */ + cfg->asid = -1; + ret = smmu_find_ste(s, sid, &ste, event); if (ret) { return ret; @@ -569,7 +734,7 @@ static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg, return ret; } - if (cfg->aborted || cfg->bypassed) { + if (cfg->aborted || cfg->bypassed || (cfg->stage == 2)) { return 0; } @@ -656,6 +821,11 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, .addr_mask = ~(hwaddr)0, .perm = IOMMU_NONE, }; + /* + * Combined attributes used for TLB lookup, as only one stage is supported, + * it will hold attributes based on the enabled stage. + */ + SMMUTransTableInfo tt_combined; qemu_mutex_lock(&s->mutex); @@ -684,25 +854,45 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, goto epilogue; } - tt = select_tt(cfg, addr); - if (!tt) { - if (cfg->record_faults) { - event.type = SMMU_EVT_F_TRANSLATION; - event.u.f_translation.addr = addr; - event.u.f_translation.rnw = flag & 0x1; + if (cfg->stage == 1) { + /* Select stage1 translation table. */ + tt = select_tt(cfg, addr); + if (!tt) { + if (cfg->record_faults) { + event.type = SMMU_EVT_F_TRANSLATION; + event.u.f_translation.addr = addr; + event.u.f_translation.rnw = flag & 0x1; + } + status = SMMU_TRANS_ERROR; + goto epilogue; } - status = SMMU_TRANS_ERROR; - goto epilogue; - } + tt_combined.granule_sz = tt->granule_sz; + tt_combined.tsz = tt->tsz; - page_mask = (1ULL << (tt->granule_sz)) - 1; + } else { + /* Stage2. */ + tt_combined.granule_sz = cfg->s2cfg.granule_sz; + tt_combined.tsz = cfg->s2cfg.tsz; + } + /* + * TLB lookup looks for granule and input size for a translation stage, + * as only one stage is supported right now, choose the right values + * from the configuration. + */ + page_mask = (1ULL << tt_combined.granule_sz) - 1; aligned_addr = addr & ~page_mask; - cached_entry = smmu_iotlb_lookup(bs, cfg, tt, aligned_addr); + cached_entry = smmu_iotlb_lookup(bs, cfg, &tt_combined, aligned_addr); if (cached_entry) { if ((flag & IOMMU_WO) && !(cached_entry->entry.perm & IOMMU_WO)) { status = SMMU_TRANS_ERROR; - if (cfg->record_faults) { + /* + * We know that the TLB only contains either stage-1 or stage-2 as + * nesting is not supported. So it is sufficient to check the + * translation stage to know the TLB stage for now. + */ + event.u.f_walk_eabt.s2 = (cfg->stage == 2); + if (PTW_RECORD_FAULT(cfg)) { event.type = SMMU_EVT_F_PERMISSION; event.u.f_permission.addr = addr; event.u.f_permission.rnw = flag & 0x1; @@ -716,6 +906,8 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, cached_entry = g_new0(SMMUTLBEntry, 1); if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) { + /* All faults from PTW has S2 field. */ + event.u.f_walk_eabt.s2 = (ptw_info.stage == 2); g_free(cached_entry); switch (ptw_info.type) { case SMMU_PTW_ERR_WALK_EABT: @@ -726,28 +918,28 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, event.u.f_walk_eabt.addr2 = ptw_info.addr; break; case SMMU_PTW_ERR_TRANSLATION: - if (cfg->record_faults) { + if (PTW_RECORD_FAULT(cfg)) { event.type = SMMU_EVT_F_TRANSLATION; event.u.f_translation.addr = addr; event.u.f_translation.rnw = flag & 0x1; } break; case SMMU_PTW_ERR_ADDR_SIZE: - if (cfg->record_faults) { + if (PTW_RECORD_FAULT(cfg)) { event.type = SMMU_EVT_F_ADDR_SIZE; event.u.f_addr_size.addr = addr; event.u.f_addr_size.rnw = flag & 0x1; } break; case SMMU_PTW_ERR_ACCESS: - if (cfg->record_faults) { + if (PTW_RECORD_FAULT(cfg)) { event.type = SMMU_EVT_F_ACCESS; event.u.f_access.addr = addr; event.u.f_access.rnw = flag & 0x1; } break; case SMMU_PTW_ERR_PERMISSION: - if (cfg->record_faults) { + if (PTW_RECORD_FAULT(cfg)) { event.type = SMMU_EVT_F_PERMISSION; event.u.f_permission.addr = addr; event.u.f_permission.rnw = flag & 0x1; @@ -808,18 +1000,21 @@ epilogue: * @mr: IOMMU mr region handle * @n: notifier to be called * @asid: address space ID or negative value if we don't care + * @vmid: virtual machine ID or negative value if we don't care * @iova: iova * @tg: translation granule (if communicated through range invalidation) * @num_pages: number of @granule sized pages (if tg != 0), otherwise 1 */ static void smmuv3_notify_iova(IOMMUMemoryRegion *mr, IOMMUNotifier *n, - int asid, dma_addr_t iova, - uint8_t tg, uint64_t num_pages) + int asid, int vmid, + dma_addr_t iova, uint8_t tg, + uint64_t num_pages) { SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); IOMMUTLBEvent event; uint8_t granule; + SMMUv3State *s = sdev->smmu; if (!tg) { SMMUEventInfo event = {.inval_ste_allowed = true}; @@ -834,11 +1029,20 @@ static void smmuv3_notify_iova(IOMMUMemoryRegion *mr, return; } - tt = select_tt(cfg, iova); - if (!tt) { + if (vmid >= 0 && cfg->s2cfg.vmid != vmid) { return; } - granule = tt->granule_sz; + + if (STAGE1_SUPPORTED(s)) { + tt = select_tt(cfg, iova); + if (!tt) { + return; + } + granule = tt->granule_sz; + } else { + granule = cfg->s2cfg.granule_sz; + } + } else { granule = tg * 2 + 10; } @@ -852,9 +1056,10 @@ static void smmuv3_notify_iova(IOMMUMemoryRegion *mr, memory_region_notify_iommu_one(n, &event); } -/* invalidate an asid/iova range tuple in all mr's */ -static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova, - uint8_t tg, uint64_t num_pages) +/* invalidate an asid/vmid/iova range tuple in all mr's */ +static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, int vmid, + dma_addr_t iova, uint8_t tg, + uint64_t num_pages) { SMMUDevice *sdev; @@ -862,20 +1067,20 @@ static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova, IOMMUMemoryRegion *mr = &sdev->iommu; IOMMUNotifier *n; - trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova, - tg, num_pages); + trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, vmid, + iova, tg, num_pages); IOMMU_NOTIFIER_FOREACH(n, mr) { - smmuv3_notify_iova(mr, n, asid, iova, tg, num_pages); + smmuv3_notify_iova(mr, n, asid, vmid, iova, tg, num_pages); } } } -static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd) +static void smmuv3_range_inval(SMMUState *s, Cmd *cmd) { dma_addr_t end, addr = CMD_ADDR(cmd); uint8_t type = CMD_TYPE(cmd); - uint16_t vmid = CMD_VMID(cmd); + int vmid = -1; uint8_t scale = CMD_SCALE(cmd); uint8_t num = CMD_NUM(cmd); uint8_t ttl = CMD_TTL(cmd); @@ -884,15 +1089,21 @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd) uint64_t num_pages; uint8_t granule; int asid = -1; + SMMUv3State *smmuv3 = ARM_SMMUV3(s); + + /* Only consider VMID if stage-2 is supported. */ + if (STAGE2_SUPPORTED(smmuv3)) { + vmid = CMD_VMID(cmd); + } if (type == SMMU_CMD_TLBI_NH_VA) { asid = CMD_ASID(cmd); } if (!tg) { - trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, 1, ttl, leaf); - smmuv3_inv_notifiers_iova(s, asid, addr, tg, 1); - smmu_iotlb_inv_iova(s, asid, addr, tg, 1, ttl); + trace_smmuv3_range_inval(vmid, asid, addr, tg, 1, ttl, leaf); + smmuv3_inv_notifiers_iova(s, asid, vmid, addr, tg, 1); + smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, 1, ttl); return; } @@ -908,9 +1119,9 @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd) uint64_t mask = dma_aligned_pow2_mask(addr, end, 64); num_pages = (mask + 1) >> granule; - trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf); - smmuv3_inv_notifiers_iova(s, asid, addr, tg, num_pages); - smmu_iotlb_inv_iova(s, asid, addr, tg, num_pages, ttl); + trace_smmuv3_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf); + smmuv3_inv_notifiers_iova(s, asid, vmid, addr, tg, num_pages); + smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, num_pages, ttl); addr += mask + 1; } } @@ -1042,12 +1253,22 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) { uint16_t asid = CMD_ASID(&cmd); + if (!STAGE1_SUPPORTED(s)) { + cmd_error = SMMU_CERROR_ILL; + break; + } + trace_smmuv3_cmdq_tlbi_nh_asid(asid); smmu_inv_notifiers_all(&s->smmu_state); smmu_iotlb_inv_asid(bs, asid); break; } case SMMU_CMD_TLBI_NH_ALL: + if (!STAGE1_SUPPORTED(s)) { + cmd_error = SMMU_CERROR_ILL; + break; + } + QEMU_FALLTHROUGH; case SMMU_CMD_TLBI_NSNH_ALL: trace_smmuv3_cmdq_tlbi_nh(); smmu_inv_notifiers_all(&s->smmu_state); @@ -1055,7 +1276,36 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) break; case SMMU_CMD_TLBI_NH_VAA: case SMMU_CMD_TLBI_NH_VA: - smmuv3_s1_range_inval(bs, &cmd); + if (!STAGE1_SUPPORTED(s)) { + cmd_error = SMMU_CERROR_ILL; + break; + } + smmuv3_range_inval(bs, &cmd); + break; + case SMMU_CMD_TLBI_S12_VMALL: + { + uint16_t vmid = CMD_VMID(&cmd); + + if (!STAGE2_SUPPORTED(s)) { + cmd_error = SMMU_CERROR_ILL; + break; + } + + trace_smmuv3_cmdq_tlbi_s12_vmid(vmid); + smmu_inv_notifiers_all(&s->smmu_state); + smmu_iotlb_inv_vmid(bs, vmid); + break; + } + case SMMU_CMD_TLBI_S2_IPA: + if (!STAGE2_SUPPORTED(s)) { + cmd_error = SMMU_CERROR_ILL; + break; + } + /* + * As currently only either s1 or s2 are supported + * we can reuse same function for s2. + */ + smmuv3_range_inval(bs, &cmd); break; case SMMU_CMD_TLBI_EL3_ALL: case SMMU_CMD_TLBI_EL3_VA: @@ -1063,8 +1313,6 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) case SMMU_CMD_TLBI_EL2_ASID: case SMMU_CMD_TLBI_EL2_VA: case SMMU_CMD_TLBI_EL2_VAA: - case SMMU_CMD_TLBI_S12_VMALL: - case SMMU_CMD_TLBI_S2_IPA: case SMMU_CMD_ATC_INV: case SMMU_CMD_PRI_RESP: case SMMU_CMD_RESUME: @@ -1073,12 +1321,14 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) break; default: cmd_error = SMMU_CERROR_ILL; - qemu_log_mask(LOG_GUEST_ERROR, - "Illegal command type: %d\n", CMD_TYPE(&cmd)); break; } qemu_mutex_unlock(&s->mutex); if (cmd_error) { + if (cmd_error == SMMU_CERROR_ILL) { + qemu_log_mask(LOG_GUEST_ERROR, + "Illegal command type: %d\n", CMD_TYPE(&cmd)); + } break; } /* @@ -1555,6 +1805,17 @@ static const VMStateDescription vmstate_smmuv3 = { } }; +static Property smmuv3_properties[] = { + /* + * Stages of translation advertised. + * "1": Stage 1 + * "2": Stage 2 + * Defaults to stage 1 + */ + DEFINE_PROP_STRING("stage", SMMUv3State, stage), + DEFINE_PROP_END_OF_LIST() +}; + static void smmuv3_instance_init(Object *obj) { /* Nothing much to do here as of now */ @@ -1571,6 +1832,7 @@ static void smmuv3_class_init(ObjectClass *klass, void *data) &c->parent_phases); c->parent_realize = dc->realize; dc->realize = smmu_realize; + device_class_set_props(dc, smmuv3_properties); } static int smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu, diff --git a/hw/arm/trace-events b/hw/arm/trace-events index 2dee296c8f..cdc1ea06a8 100644 --- a/hw/arm/trace-events +++ b/hw/arm/trace-events @@ -5,18 +5,19 @@ virt_acpi_setup(void) "No fw cfg or ACPI disabled. Bailing out." # smmu-common.c smmu_add_mr(const char *name) "%s" -smmu_ptw_level(int level, uint64_t iova, size_t subpage_size, uint64_t baseaddr, uint32_t offset, uint64_t pte) "level=%d iova=0x%"PRIx64" subpage_sz=0x%zx baseaddr=0x%"PRIx64" offset=%d => pte=0x%"PRIx64 +smmu_ptw_level(int stage, int level, uint64_t iova, size_t subpage_size, uint64_t baseaddr, uint32_t offset, uint64_t pte) "stage=%d level=%d iova=0x%"PRIx64" subpage_sz=0x%zx baseaddr=0x%"PRIx64" offset=%d => pte=0x%"PRIx64 smmu_ptw_invalid_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, uint32_t offset, uint64_t pte) "stage=%d level=%d base@=0x%"PRIx64" pte@=0x%"PRIx64" offset=%d pte=0x%"PRIx64 smmu_ptw_page_pte(int stage, int level, uint64_t iova, uint64_t baseaddr, uint64_t pteaddr, uint64_t pte, uint64_t address) "stage=%d level=%d iova=0x%"PRIx64" base@=0x%"PRIx64" pte@=0x%"PRIx64" pte=0x%"PRIx64" page address = 0x%"PRIx64 smmu_ptw_block_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, uint64_t pte, uint64_t iova, uint64_t gpa, int bsize_mb) "stage=%d level=%d base@=0x%"PRIx64" pte@=0x%"PRIx64" pte=0x%"PRIx64" iova=0x%"PRIx64" block address = 0x%"PRIx64" block size = %d MiB" smmu_get_pte(uint64_t baseaddr, int index, uint64_t pteaddr, uint64_t pte) "baseaddr=0x%"PRIx64" index=0x%x, pteaddr=0x%"PRIx64", pte=0x%"PRIx64 smmu_iotlb_inv_all(void) "IOTLB invalidate all" smmu_iotlb_inv_asid(uint16_t asid) "IOTLB invalidate asid=%d" +smmu_iotlb_inv_vmid(uint16_t vmid) "IOTLB invalidate vmid=%d" smmu_iotlb_inv_iova(uint16_t asid, uint64_t addr) "IOTLB invalidate asid=%d addr=0x%"PRIx64 smmu_inv_notifiers_mr(const char *name) "iommu mr=%s" -smmu_iotlb_lookup_hit(uint16_t asid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache HIT asid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d" -smmu_iotlb_lookup_miss(uint16_t asid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache MISS asid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d" -smmu_iotlb_insert(uint16_t asid, uint64_t addr, uint8_t tg, uint8_t level) "IOTLB ++ asid=%d addr=0x%"PRIx64" tg=%d level=%d" +smmu_iotlb_lookup_hit(uint16_t asid, uint16_t vmid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache HIT asid=%d vmid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d" +smmu_iotlb_lookup_miss(uint16_t asid, uint16_t vmid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache MISS asid=%d vmid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d" +smmu_iotlb_insert(uint16_t asid, uint16_t vmid, uint64_t addr, uint8_t tg, uint8_t level) "IOTLB ++ asid=%d vmid=%d addr=0x%"PRIx64" tg=%d level=%d" # smmuv3.c smmuv3_read_mmio(uint64_t addr, uint64_t val, unsigned size, uint32_t r) "addr: 0x%"PRIx64" val:0x%"PRIx64" size: 0x%x(%d)" @@ -45,11 +46,12 @@ smmuv3_cmdq_cfgi_ste_range(int start, int end) "start=0x%x - end=0x%x" smmuv3_cmdq_cfgi_cd(uint32_t sid) "sid=0x%x" smmuv3_config_cache_hit(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t perc) "Config cache HIT for sid=0x%x (hits=%d, misses=%d, hit rate=%d)" smmuv3_config_cache_miss(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t perc) "Config cache MISS for sid=0x%x (hits=%d, misses=%d, hit rate=%d)" -smmuv3_s1_range_inval(int vmid, int asid, uint64_t addr, uint8_t tg, uint64_t num_pages, uint8_t ttl, bool leaf) "vmid=%d asid=%d addr=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64" ttl=%d leaf=%d" +smmuv3_range_inval(int vmid, int asid, uint64_t addr, uint8_t tg, uint64_t num_pages, uint8_t ttl, bool leaf) "vmid=%d asid=%d addr=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64" ttl=%d leaf=%d" smmuv3_cmdq_tlbi_nh(void) "" smmuv3_cmdq_tlbi_nh_asid(uint16_t asid) "asid=%d" +smmuv3_cmdq_tlbi_s12_vmid(uint16_t vmid) "vmid=%d" smmuv3_config_cache_inv(uint32_t sid) "Config cache INV for sid=0x%x" smmuv3_notify_flag_add(const char *iommu) "ADD SMMUNotifier node for iommu mr=%s" smmuv3_notify_flag_del(const char *iommu) "DEL SMMUNotifier node for iommu mr=%s" -smmuv3_inv_notifiers_iova(const char *name, uint16_t asid, uint64_t iova, uint8_t tg, uint64_t num_pages) "iommu mr=%s asid=%d iova=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64 +smmuv3_inv_notifiers_iova(const char *name, uint16_t asid, uint16_t vmid, uint64_t iova, uint8_t tg, uint64_t num_pages) "iommu mr=%s asid=%d vmid=%d iova=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64 diff --git a/hw/arm/vexpress.c b/hw/arm/vexpress.c index 34b012b528..56abadd9b8 100644 --- a/hw/arm/vexpress.c +++ b/hw/arm/vexpress.c @@ -173,6 +173,11 @@ struct VexpressMachineClass { struct VexpressMachineState { MachineState parent; + MemoryRegion vram; + MemoryRegion sram; + MemoryRegion flashalias; + MemoryRegion lowram; + MemoryRegion a15sram; bool secure; bool virt; }; @@ -182,7 +187,7 @@ struct VexpressMachineState { #define TYPE_VEXPRESS_A15_MACHINE MACHINE_TYPE_NAME("vexpress-a15") OBJECT_DECLARE_TYPE(VexpressMachineState, VexpressMachineClass, VEXPRESS_MACHINE) -typedef void DBoardInitFn(const VexpressMachineState *machine, +typedef void DBoardInitFn(VexpressMachineState *machine, ram_addr_t ram_size, const char *cpu_type, qemu_irq *pic); @@ -263,14 +268,13 @@ static void init_cpus(MachineState *ms, const char *cpu_type, } } -static void a9_daughterboard_init(const VexpressMachineState *vms, +static void a9_daughterboard_init(VexpressMachineState *vms, ram_addr_t ram_size, const char *cpu_type, qemu_irq *pic) { MachineState *machine = MACHINE(vms); MemoryRegion *sysmem = get_system_memory(); - MemoryRegion *lowram = g_new(MemoryRegion, 1); ram_addr_t low_ram_size; if (ram_size > 0x40000000) { @@ -287,9 +291,9 @@ static void a9_daughterboard_init(const VexpressMachineState *vms, * address space should in theory be remappable to various * things including ROM or RAM; we always map the RAM there. */ - memory_region_init_alias(lowram, NULL, "vexpress.lowmem", machine->ram, - 0, low_ram_size); - memory_region_add_subregion(sysmem, 0x0, lowram); + memory_region_init_alias(&vms->lowram, NULL, "vexpress.lowmem", + machine->ram, 0, low_ram_size); + memory_region_add_subregion(sysmem, 0x0, &vms->lowram); memory_region_add_subregion(sysmem, 0x60000000, machine->ram); /* 0x1e000000 A9MPCore (SCU) private memory region */ @@ -348,14 +352,13 @@ static VEDBoardInfo a9_daughterboard = { .init = a9_daughterboard_init, }; -static void a15_daughterboard_init(const VexpressMachineState *vms, +static void a15_daughterboard_init(VexpressMachineState *vms, ram_addr_t ram_size, const char *cpu_type, qemu_irq *pic) { MachineState *machine = MACHINE(vms); MemoryRegion *sysmem = get_system_memory(); - MemoryRegion *sram = g_new(MemoryRegion, 1); { /* We have to use a separate 64 bit variable here to avoid the gcc @@ -386,9 +389,9 @@ static void a15_daughterboard_init(const VexpressMachineState *vms, /* 0x2b060000: SP805 watchdog: not modelled */ /* 0x2b0a0000: PL341 dynamic memory controller: not modelled */ /* 0x2e000000: system SRAM */ - memory_region_init_ram(sram, NULL, "vexpress.a15sram", 0x10000, + memory_region_init_ram(&vms->a15sram, NULL, "vexpress.a15sram", 0x10000, &error_fatal); - memory_region_add_subregion(sysmem, 0x2e000000, sram); + memory_region_add_subregion(sysmem, 0x2e000000, &vms->a15sram); /* 0x7ffb0000: DMA330 DMA controller: not modelled */ /* 0x7ffd0000: PL354 static memory controller: not modelled */ @@ -547,10 +550,6 @@ static void vexpress_common_init(MachineState *machine) I2CBus *i2c; ram_addr_t vram_size, sram_size; MemoryRegion *sysmem = get_system_memory(); - MemoryRegion *vram = g_new(MemoryRegion, 1); - MemoryRegion *sram = g_new(MemoryRegion, 1); - MemoryRegion *flashalias = g_new(MemoryRegion, 1); - MemoryRegion *flash0mem; const hwaddr *map = daughterboard->motherboard_map; int i; @@ -662,24 +661,25 @@ static void vexpress_common_init(MachineState *machine) if (map[VE_NORFLASHALIAS] != -1) { /* Map flash 0 as an alias into low memory */ + MemoryRegion *flash0mem; flash0mem = sysbus_mmio_get_region(SYS_BUS_DEVICE(pflash0), 0); - memory_region_init_alias(flashalias, NULL, "vexpress.flashalias", + memory_region_init_alias(&vms->flashalias, NULL, "vexpress.flashalias", flash0mem, 0, VEXPRESS_FLASH_SIZE); - memory_region_add_subregion(sysmem, map[VE_NORFLASHALIAS], flashalias); + memory_region_add_subregion(sysmem, map[VE_NORFLASHALIAS], &vms->flashalias); } dinfo = drive_get(IF_PFLASH, 0, 1); ve_pflash_cfi01_register(map[VE_NORFLASH1], "vexpress.flash1", dinfo); sram_size = 0x2000000; - memory_region_init_ram(sram, NULL, "vexpress.sram", sram_size, + memory_region_init_ram(&vms->sram, NULL, "vexpress.sram", sram_size, &error_fatal); - memory_region_add_subregion(sysmem, map[VE_SRAM], sram); + memory_region_add_subregion(sysmem, map[VE_SRAM], &vms->sram); vram_size = 0x800000; - memory_region_init_ram(vram, NULL, "vexpress.vram", vram_size, + memory_region_init_ram(&vms->vram, NULL, "vexpress.vram", vram_size, &error_fatal); - memory_region_add_subregion(sysmem, map[VE_VIDEORAM], vram); + memory_region_add_subregion(sysmem, map[VE_VIDEORAM], &vms->vram); /* 0x4e000000 LAN9118 Ethernet */ if (nd_table[0].used) { diff --git a/hw/arm/virt.c b/hw/arm/virt.c index a89d699f0b..9b9f7d9c68 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -206,16 +206,16 @@ static const int a15irqmap[] = { static const char *valid_cpus[] = { #ifdef CONFIG_TCG ARM_CPU_TYPE_NAME("cortex-a7"), -#endif ARM_CPU_TYPE_NAME("cortex-a15"), ARM_CPU_TYPE_NAME("cortex-a35"), - ARM_CPU_TYPE_NAME("cortex-a53"), ARM_CPU_TYPE_NAME("cortex-a55"), - ARM_CPU_TYPE_NAME("cortex-a57"), ARM_CPU_TYPE_NAME("cortex-a72"), ARM_CPU_TYPE_NAME("cortex-a76"), ARM_CPU_TYPE_NAME("a64fx"), ARM_CPU_TYPE_NAME("neoverse-n1"), +#endif + ARM_CPU_TYPE_NAME("cortex-a53"), + ARM_CPU_TYPE_NAME("cortex-a57"), ARM_CPU_TYPE_NAME("host"), ARM_CPU_TYPE_NAME("max"), }; @@ -1426,6 +1426,7 @@ static void create_pcie(VirtMachineState *vms) int i, ecam_id; PCIHostState *pci; MachineState *ms = MACHINE(vms); + MachineClass *mc = MACHINE_GET_CLASS(ms); dev = qdev_new(TYPE_GPEX_HOST); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); @@ -1479,7 +1480,7 @@ static void create_pcie(VirtMachineState *vms) NICInfo *nd = &nd_table[i]; if (!nd->model) { - nd->model = g_strdup("virtio"); + nd->model = g_strdup(mc->default_nic); } pci_nic_init_nofail(nd, pci->bus, nd->model, NULL); @@ -3033,6 +3034,7 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) mc->auto_enable_numa_with_memhp = true; mc->auto_enable_numa_with_memdev = true; mc->default_ram_id = "mach-virt.ram"; + mc->default_nic = "virtio-net-pci"; object_class_property_add(oc, "acpi", "OnOffAuto", virt_get_acpi, virt_set_acpi, diff --git a/hw/arm/xen_arm.c b/hw/arm/xen_arm.c new file mode 100644 index 0000000000..19b1cb81ad --- /dev/null +++ b/hw/arm/xen_arm.c @@ -0,0 +1,181 @@ +/* + * QEMU ARM Xen PVH Machine + * + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qapi/qapi-commands-migration.h" +#include "qapi/visitor.h" +#include "hw/boards.h" +#include "hw/sysbus.h" +#include "sysemu/block-backend.h" +#include "sysemu/tpm_backend.h" +#include "sysemu/sysemu.h" +#include "hw/xen/xen-hvm-common.h" +#include "sysemu/tpm.h" +#include "hw/xen/arch_hvm.h" + +#define TYPE_XEN_ARM MACHINE_TYPE_NAME("xenpvh") +OBJECT_DECLARE_SIMPLE_TYPE(XenArmState, XEN_ARM) + +static MemoryListener xen_memory_listener = { + .region_add = xen_region_add, + .region_del = xen_region_del, + .log_start = NULL, + .log_stop = NULL, + .log_sync = NULL, + .log_global_start = NULL, + .log_global_stop = NULL, + .priority = 10, +}; + +struct XenArmState { + /*< private >*/ + MachineState parent; + + XenIOState *state; + + struct { + uint64_t tpm_base_addr; + } cfg; +}; + +void arch_handle_ioreq(XenIOState *state, ioreq_t *req) +{ + hw_error("Invalid ioreq type 0x%x\n", req->type); + + return; +} + +void arch_xen_set_memory(XenIOState *state, MemoryRegionSection *section, + bool add) +{ +} + +void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length) +{ +} + +void qmp_xen_set_global_dirty_log(bool enable, Error **errp) +{ +} + +#ifdef CONFIG_TPM +static void xen_enable_tpm(XenArmState *xam) +{ + Error *errp = NULL; + DeviceState *dev; + SysBusDevice *busdev; + + TPMBackend *be = qemu_find_tpm_be("tpm0"); + if (be == NULL) { + DPRINTF("Couldn't fine the backend for tpm0\n"); + return; + } + dev = qdev_new(TYPE_TPM_TIS_SYSBUS); + object_property_set_link(OBJECT(dev), "tpmdev", OBJECT(be), &errp); + object_property_set_str(OBJECT(dev), "tpmdev", be->id, &errp); + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_mmio_map(busdev, 0, xam->cfg.tpm_base_addr); + + DPRINTF("Connected tpmdev at address 0x%lx\n", xam->cfg.tpm_base_addr); +} +#endif + +static void xen_arm_init(MachineState *machine) +{ + XenArmState *xam = XEN_ARM(machine); + + xam->state = g_new0(XenIOState, 1); + + xen_register_ioreq(xam->state, machine->smp.cpus, xen_memory_listener); + +#ifdef CONFIG_TPM + if (xam->cfg.tpm_base_addr) { + xen_enable_tpm(xam); + } else { + DPRINTF("tpm-base-addr is not provided. TPM will not be enabled\n"); + } +#endif +} + +#ifdef CONFIG_TPM +static void xen_arm_get_tpm_base_addr(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + XenArmState *xam = XEN_ARM(obj); + uint64_t value = xam->cfg.tpm_base_addr; + + visit_type_uint64(v, name, &value, errp); +} + +static void xen_arm_set_tpm_base_addr(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + XenArmState *xam = XEN_ARM(obj); + uint64_t value; + + if (!visit_type_uint64(v, name, &value, errp)) { + return; + } + + xam->cfg.tpm_base_addr = value; +} +#endif + +static void xen_arm_machine_class_init(ObjectClass *oc, void *data) +{ + + MachineClass *mc = MACHINE_CLASS(oc); + mc->desc = "Xen Para-virtualized PC"; + mc->init = xen_arm_init; + mc->max_cpus = 1; + mc->default_machine_opts = "accel=xen"; + +#ifdef CONFIG_TPM + object_class_property_add(oc, "tpm-base-addr", "uint64_t", + xen_arm_get_tpm_base_addr, + xen_arm_set_tpm_base_addr, + NULL, NULL); + object_class_property_set_description(oc, "tpm-base-addr", + "Set Base address for TPM device."); + + machine_class_allow_dynamic_sysbus_dev(mc, TYPE_TPM_TIS_SYSBUS); +#endif +} + +static const TypeInfo xen_arm_machine_type = { + .name = TYPE_XEN_ARM, + .parent = TYPE_MACHINE, + .class_init = xen_arm_machine_class_init, + .instance_size = sizeof(XenArmState), +}; + +static void xen_arm_machine_register_types(void) +{ + type_register_static(&xen_arm_machine_type); +} + +type_init(xen_arm_machine_register_types) diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c index 668a9d65a4..1ee2b8697f 100644 --- a/hw/arm/xlnx-versal-virt.c +++ b/hw/arm/xlnx-versal-virt.c @@ -40,9 +40,11 @@ struct VersalVirt { uint32_t clk_25Mhz; uint32_t usb; uint32_t dwc; + uint32_t canfd[2]; } phandle; struct arm_boot_info binfo; + CanBusState *canbus[XLNX_VERSAL_NR_CANFD]; struct { bool secure; } cfg; @@ -235,6 +237,38 @@ static void fdt_add_uart_nodes(VersalVirt *s) } } +static void fdt_add_canfd_nodes(VersalVirt *s) +{ + uint64_t addrs[] = { MM_CANFD1, MM_CANFD0 }; + uint32_t size[] = { MM_CANFD1_SIZE, MM_CANFD0_SIZE }; + unsigned int irqs[] = { VERSAL_CANFD1_IRQ_0, VERSAL_CANFD0_IRQ_0 }; + const char clocknames[] = "can_clk\0s_axi_aclk"; + int i; + + /* Create and connect CANFD0 and CANFD1 nodes to canbus0. */ + for (i = 0; i < ARRAY_SIZE(addrs); i++) { + char *name = g_strdup_printf("/canfd@%" PRIx64, addrs[i]); + qemu_fdt_add_subnode(s->fdt, name); + + qemu_fdt_setprop_cell(s->fdt, name, "rx-fifo-depth", 0x40); + qemu_fdt_setprop_cell(s->fdt, name, "tx-mailbox-count", 0x20); + + qemu_fdt_setprop_cells(s->fdt, name, "clocks", + s->phandle.clk_25Mhz, s->phandle.clk_25Mhz); + qemu_fdt_setprop(s->fdt, name, "clock-names", + clocknames, sizeof(clocknames)); + qemu_fdt_setprop_cells(s->fdt, name, "interrupts", + GIC_FDT_IRQ_TYPE_SPI, irqs[i], + GIC_FDT_IRQ_FLAGS_LEVEL_HI); + qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", + 2, addrs[i], 2, size[i]); + qemu_fdt_setprop_string(s->fdt, name, "compatible", + "xlnx,canfd-2.0"); + + g_free(name); + } +} + static void fdt_add_fixed_link_nodes(VersalVirt *s, char *gemname, uint32_t phandle) { @@ -639,12 +673,17 @@ static void versal_virt_init(MachineState *machine) TYPE_XLNX_VERSAL); object_property_set_link(OBJECT(&s->soc), "ddr", OBJECT(machine->ram), &error_abort); + object_property_set_link(OBJECT(&s->soc), "canbus0", OBJECT(s->canbus[0]), + &error_abort); + object_property_set_link(OBJECT(&s->soc), "canbus1", OBJECT(s->canbus[1]), + &error_abort); sysbus_realize(SYS_BUS_DEVICE(&s->soc), &error_fatal); fdt_create(s); create_virtio_regions(s); fdt_add_gem_nodes(s); fdt_add_uart_nodes(s); + fdt_add_canfd_nodes(s); fdt_add_gic_nodes(s); fdt_add_timer_nodes(s); fdt_add_zdma_nodes(s); @@ -712,6 +751,20 @@ static void versal_virt_init(MachineState *machine) static void versal_virt_machine_instance_init(Object *obj) { + VersalVirt *s = XLNX_VERSAL_VIRT_MACHINE(obj); + + /* + * User can set canbus0 and canbus1 properties to can-bus object and connect + * to socketcan(optional) interface via command line. + */ + object_property_add_link(obj, "canbus0", TYPE_CAN_BUS, + (Object **)&s->canbus[0], + object_property_allow_set_link, + 0); + object_property_add_link(obj, "canbus1", TYPE_CAN_BUS, + (Object **)&s->canbus[1], + object_property_allow_set_link, + 0); } static void versal_virt_machine_class_init(ObjectClass *oc, void *data) diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c index 69b1b99e93..60bf5fe657 100644 --- a/hw/arm/xlnx-versal.c +++ b/hw/arm/xlnx-versal.c @@ -184,6 +184,38 @@ static void versal_create_uarts(Versal *s, qemu_irq *pic) } } +static void versal_create_canfds(Versal *s, qemu_irq *pic) +{ + int i; + uint32_t irqs[] = { VERSAL_CANFD0_IRQ_0, VERSAL_CANFD1_IRQ_0}; + uint64_t addrs[] = { MM_CANFD0, MM_CANFD1 }; + + for (i = 0; i < ARRAY_SIZE(s->lpd.iou.canfd); i++) { + char *name = g_strdup_printf("canfd%d", i); + SysBusDevice *sbd; + MemoryRegion *mr; + + object_initialize_child(OBJECT(s), name, &s->lpd.iou.canfd[i], + TYPE_XILINX_CANFD); + sbd = SYS_BUS_DEVICE(&s->lpd.iou.canfd[i]); + + object_property_set_int(OBJECT(&s->lpd.iou.canfd[i]), "ext_clk_freq", + XLNX_VERSAL_CANFD_REF_CLK , &error_abort); + + object_property_set_link(OBJECT(&s->lpd.iou.canfd[i]), "canfdbus", + OBJECT(s->lpd.iou.canbus[i]), + &error_abort); + + sysbus_realize(sbd, &error_fatal); + + mr = sysbus_mmio_get_region(sbd, 0); + memory_region_add_subregion(&s->mr_ps, addrs[i], mr); + + sysbus_connect_irq(sbd, 0, pic[irqs[i]]); + g_free(name); + } +} + static void versal_create_usbs(Versal *s, qemu_irq *pic) { DeviceState *dev; @@ -327,7 +359,7 @@ static void versal_create_rtc(Versal *s, qemu_irq *pic) object_initialize_child(OBJECT(s), "rtc", &s->pmc.rtc, TYPE_XLNX_ZYNQMP_RTC); sbd = SYS_BUS_DEVICE(&s->pmc.rtc); - sysbus_realize(SYS_BUS_DEVICE(sbd), &error_fatal); + sysbus_realize(sbd, &error_fatal); mr = sysbus_mmio_get_region(sbd, 0); memory_region_add_subregion(&s->mr_ps, MM_PMC_RTC, mr); @@ -718,6 +750,7 @@ static void versal_realize(DeviceState *dev, Error **errp) versal_create_apu_gic(s, pic); versal_create_rpu_cpus(s); versal_create_uarts(s, pic); + versal_create_canfds(s, pic); versal_create_usbs(s, pic); versal_create_gems(s, pic); versal_create_admas(s, pic); @@ -757,6 +790,10 @@ static void versal_init(Object *obj) static Property versal_properties[] = { DEFINE_PROP_LINK("ddr", Versal, cfg.mr_ddr, TYPE_MEMORY_REGION, MemoryRegion *), + DEFINE_PROP_LINK("canbus0", Versal, lpd.iou.canbus[0], + TYPE_CAN_BUS, CanBusState *), + DEFINE_PROP_LINK("canbus1", Versal, lpd.iou.canbus[1], + TYPE_CAN_BUS, CanBusState *), DEFINE_PROP_END_OF_LIST() }; diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c index 335cfc417d..5905a33015 100644 --- a/hw/arm/xlnx-zynqmp.c +++ b/hw/arm/xlnx-zynqmp.c @@ -213,7 +213,7 @@ static void xlnx_zynqmp_create_rpu(MachineState *ms, XlnxZynqMPState *s, const char *boot_cpu, Error **errp) { int i; - int num_rpus = MIN(ms->smp.cpus - XLNX_ZYNQMP_NUM_APU_CPUS, + int num_rpus = MIN((int)(ms->smp.cpus - XLNX_ZYNQMP_NUM_APU_CPUS), XLNX_ZYNQMP_NUM_RPU_CPUS); if (num_rpus <= 0) { diff --git a/hw/audio/meson.build b/hw/audio/meson.build index e48a9fc73d..d0fda1009e 100644 --- a/hw/audio/meson.build +++ b/hw/audio/meson.build @@ -1,14 +1,14 @@ -softmmu_ss.add(files('soundhw.c')) -softmmu_ss.add(when: 'CONFIG_AC97', if_true: files('ac97.c')) -softmmu_ss.add(when: 'CONFIG_ADLIB', if_true: files('fmopl.c', 'adlib.c')) -softmmu_ss.add(when: 'CONFIG_CS4231', if_true: files('cs4231.c')) -softmmu_ss.add(when: 'CONFIG_CS4231A', if_true: files('cs4231a.c')) -softmmu_ss.add(when: 'CONFIG_ES1370', if_true: files('es1370.c')) -softmmu_ss.add(when: 'CONFIG_GUS', if_true: files('gus.c', 'gusemu_hal.c', 'gusemu_mixer.c')) -softmmu_ss.add(when: 'CONFIG_HDA', if_true: files('intel-hda.c', 'hda-codec.c')) -softmmu_ss.add(when: 'CONFIG_MARVELL_88W8618', if_true: files('marvell_88w8618.c')) -softmmu_ss.add(when: 'CONFIG_PCSPK', if_true: files('pcspk.c')) -softmmu_ss.add(when: 'CONFIG_PL041', if_true: files('pl041.c', 'lm4549.c')) -softmmu_ss.add(when: 'CONFIG_SB16', if_true: files('sb16.c')) -softmmu_ss.add(when: 'CONFIG_VT82C686', if_true: files('via-ac97.c')) -softmmu_ss.add(when: 'CONFIG_WM8750', if_true: files('wm8750.c')) +system_ss.add(files('soundhw.c')) +system_ss.add(when: 'CONFIG_AC97', if_true: files('ac97.c')) +system_ss.add(when: 'CONFIG_ADLIB', if_true: files('fmopl.c', 'adlib.c')) +system_ss.add(when: 'CONFIG_CS4231', if_true: files('cs4231.c')) +system_ss.add(when: 'CONFIG_CS4231A', if_true: files('cs4231a.c')) +system_ss.add(when: 'CONFIG_ES1370', if_true: files('es1370.c')) +system_ss.add(when: 'CONFIG_GUS', if_true: files('gus.c', 'gusemu_hal.c', 'gusemu_mixer.c')) +system_ss.add(when: 'CONFIG_HDA', if_true: files('intel-hda.c', 'hda-codec.c')) +system_ss.add(when: 'CONFIG_MARVELL_88W8618', if_true: files('marvell_88w8618.c')) +system_ss.add(when: 'CONFIG_PCSPK', if_true: files('pcspk.c')) +system_ss.add(when: 'CONFIG_PL041', if_true: files('pl041.c', 'lm4549.c')) +system_ss.add(when: 'CONFIG_SB16', if_true: files('sb16.c')) +system_ss.add(when: 'CONFIG_VT82C686', if_true: files('via-ac97.c')) +system_ss.add(when: 'CONFIG_WM8750', if_true: files('wm8750.c')) diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c index b28d81737e..b90456c08c 100644 --- a/hw/block/dataplane/virtio-blk.c +++ b/hw/block/dataplane/virtio-blk.c @@ -127,7 +127,8 @@ bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf, } else { s->ctx = qemu_get_aio_context(); } - s->bh = aio_bh_new(s->ctx, notify_guest_bh, s); + s->bh = aio_bh_new_guarded(s->ctx, notify_guest_bh, s, + &DEVICE(vdev)->mem_reentrancy_guard); s->batch_notify_vqs = bitmap_new(conf->num_queues); *dataplane = s; @@ -245,13 +246,15 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev) } /* Get this show started by hooking up our callbacks */ - aio_context_acquire(s->ctx); - for (i = 0; i < nvqs; i++) { - VirtQueue *vq = virtio_get_queue(s->vdev, i); + if (!blk_in_drain(s->conf->conf.blk)) { + aio_context_acquire(s->ctx); + for (i = 0; i < nvqs; i++) { + VirtQueue *vq = virtio_get_queue(s->vdev, i); - virtio_queue_aio_attach_host_notifier(vq, s->ctx); + virtio_queue_aio_attach_host_notifier(vq, s->ctx); + } + aio_context_release(s->ctx); } - aio_context_release(s->ctx); return 0; fail_aio_context: @@ -286,8 +289,15 @@ static void virtio_blk_data_plane_stop_bh(void *opaque) for (i = 0; i < s->conf->num_queues; i++) { VirtQueue *vq = virtio_get_queue(s->vdev, i); + EventNotifier *host_notifier = virtio_queue_get_host_notifier(vq); virtio_queue_aio_detach_host_notifier(vq, s->ctx); + + /* + * Test and clear notifier after disabling event, in case poll callback + * didn't have time to run. + */ + virtio_queue_host_notifier_read(host_notifier); } } @@ -314,8 +324,11 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev) s->stopping = true; trace_virtio_blk_data_plane_stop(s); + if (!blk_in_drain(s->conf->conf.blk)) { + aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s); + } + aio_context_acquire(s->ctx); - aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s); /* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */ blk_drain(s->conf->conf.blk); diff --git a/hw/block/dataplane/xen-block.c b/hw/block/dataplane/xen-block.c index 734da42ea7..3b6f2b0aa2 100644 --- a/hw/block/dataplane/xen-block.c +++ b/hw/block/dataplane/xen-block.c @@ -537,7 +537,7 @@ static bool xen_block_handle_requests(XenBlockDataPlane *dataplane) * is below us. */ if (inflight_atstart > IO_PLUG_THRESHOLD) { - blk_io_plug(dataplane->blk); + blk_io_plug(); } while (rc != rp) { /* pull request from ring */ @@ -577,12 +577,12 @@ static bool xen_block_handle_requests(XenBlockDataPlane *dataplane) if (inflight_atstart > IO_PLUG_THRESHOLD && batched >= inflight_atstart) { - blk_io_unplug(dataplane->blk); + blk_io_unplug(); } xen_block_do_aio(request); if (inflight_atstart > IO_PLUG_THRESHOLD) { if (batched >= inflight_atstart) { - blk_io_plug(dataplane->blk); + blk_io_plug(); batched = 0; } else { batched++; @@ -590,7 +590,7 @@ static bool xen_block_handle_requests(XenBlockDataPlane *dataplane) } } if (inflight_atstart > IO_PLUG_THRESHOLD) { - blk_io_unplug(dataplane->blk); + blk_io_unplug(); } return done_something; @@ -633,8 +633,9 @@ XenBlockDataPlane *xen_block_dataplane_create(XenDevice *xendev, } else { dataplane->ctx = qemu_get_aio_context(); } - dataplane->bh = aio_bh_new(dataplane->ctx, xen_block_dataplane_bh, - dataplane); + dataplane->bh = aio_bh_new_guarded(dataplane->ctx, xen_block_dataplane_bh, + dataplane, + &DEVICE(xendev)->mem_reentrancy_guard); return dataplane; } @@ -663,6 +664,30 @@ void xen_block_dataplane_destroy(XenBlockDataPlane *dataplane) g_free(dataplane); } +void xen_block_dataplane_detach(XenBlockDataPlane *dataplane) +{ + if (!dataplane || !dataplane->event_channel) { + return; + } + + /* Only reason for failure is a NULL channel */ + xen_device_set_event_channel_context(dataplane->xendev, + dataplane->event_channel, + NULL, &error_abort); +} + +void xen_block_dataplane_attach(XenBlockDataPlane *dataplane) +{ + if (!dataplane || !dataplane->event_channel) { + return; + } + + /* Only reason for failure is a NULL channel */ + xen_device_set_event_channel_context(dataplane->xendev, + dataplane->event_channel, + dataplane->ctx, &error_abort); +} + void xen_block_dataplane_stop(XenBlockDataPlane *dataplane) { XenDevice *xendev; @@ -673,13 +698,11 @@ void xen_block_dataplane_stop(XenBlockDataPlane *dataplane) xendev = dataplane->xendev; - aio_context_acquire(dataplane->ctx); - if (dataplane->event_channel) { - /* Only reason for failure is a NULL channel */ - xen_device_set_event_channel_context(xendev, dataplane->event_channel, - qemu_get_aio_context(), - &error_abort); + if (!blk_in_drain(dataplane->blk)) { + xen_block_dataplane_detach(dataplane); } + + aio_context_acquire(dataplane->ctx); /* Xen doesn't have multiple users for nodes, so this can't fail */ blk_set_aio_context(dataplane->blk, qemu_get_aio_context(), &error_abort); aio_context_release(dataplane->ctx); @@ -818,11 +841,9 @@ void xen_block_dataplane_start(XenBlockDataPlane *dataplane, blk_set_aio_context(dataplane->blk, dataplane->ctx, NULL); aio_context_release(old_context); - /* Only reason for failure is a NULL channel */ - aio_context_acquire(dataplane->ctx); - xen_device_set_event_channel_context(xendev, dataplane->event_channel, - dataplane->ctx, &error_abort); - aio_context_release(dataplane->ctx); + if (!blk_in_drain(dataplane->blk)) { + xen_block_dataplane_attach(dataplane); + } return; diff --git a/hw/block/dataplane/xen-block.h b/hw/block/dataplane/xen-block.h index 76dcd51c3d..7b8e9df09f 100644 --- a/hw/block/dataplane/xen-block.h +++ b/hw/block/dataplane/xen-block.h @@ -26,5 +26,7 @@ void xen_block_dataplane_start(XenBlockDataPlane *dataplane, unsigned int protocol, Error **errp); void xen_block_dataplane_stop(XenBlockDataPlane *dataplane); +void xen_block_dataplane_attach(XenBlockDataPlane *dataplane); +void xen_block_dataplane_detach(XenBlockDataPlane *dataplane); #endif /* HW_BLOCK_DATAPLANE_XEN_BLOCK_H */ diff --git a/hw/block/meson.build b/hw/block/meson.build index cc2a75cc50..8aa4dc3893 100644 --- a/hw/block/meson.build +++ b/hw/block/meson.build @@ -1,21 +1,21 @@ -softmmu_ss.add(files( +system_ss.add(files( 'block.c', 'cdrom.c', 'hd-geometry.c' )) -softmmu_ss.add(when: 'CONFIG_ECC', if_true: files('ecc.c')) -softmmu_ss.add(when: 'CONFIG_FDC', if_true: files('fdc.c')) -softmmu_ss.add(when: 'CONFIG_FDC_ISA', if_true: files('fdc-isa.c')) -softmmu_ss.add(when: 'CONFIG_FDC_SYSBUS', if_true: files('fdc-sysbus.c')) -softmmu_ss.add(when: 'CONFIG_NAND', if_true: files('nand.c')) -softmmu_ss.add(when: 'CONFIG_ONENAND', if_true: files('onenand.c')) -softmmu_ss.add(when: 'CONFIG_PFLASH_CFI01', if_true: files('pflash_cfi01.c')) -softmmu_ss.add(when: 'CONFIG_PFLASH_CFI02', if_true: files('pflash_cfi02.c')) -softmmu_ss.add(when: 'CONFIG_SSI_M25P80', if_true: files('m25p80.c')) -softmmu_ss.add(when: 'CONFIG_SSI_M25P80', if_true: files('m25p80_sfdp.c')) -softmmu_ss.add(when: 'CONFIG_SWIM', if_true: files('swim.c')) -softmmu_ss.add(when: 'CONFIG_XEN_BUS', if_true: files('xen-block.c')) -softmmu_ss.add(when: 'CONFIG_TC58128', if_true: files('tc58128.c')) +system_ss.add(when: 'CONFIG_ECC', if_true: files('ecc.c')) +system_ss.add(when: 'CONFIG_FDC', if_true: files('fdc.c')) +system_ss.add(when: 'CONFIG_FDC_ISA', if_true: files('fdc-isa.c')) +system_ss.add(when: 'CONFIG_FDC_SYSBUS', if_true: files('fdc-sysbus.c')) +system_ss.add(when: 'CONFIG_NAND', if_true: files('nand.c')) +system_ss.add(when: 'CONFIG_ONENAND', if_true: files('onenand.c')) +system_ss.add(when: 'CONFIG_PFLASH_CFI01', if_true: files('pflash_cfi01.c')) +system_ss.add(when: 'CONFIG_PFLASH_CFI02', if_true: files('pflash_cfi02.c')) +system_ss.add(when: 'CONFIG_SSI_M25P80', if_true: files('m25p80.c')) +system_ss.add(when: 'CONFIG_SSI_M25P80', if_true: files('m25p80_sfdp.c')) +system_ss.add(when: 'CONFIG_SWIM', if_true: files('swim.c')) +system_ss.add(when: 'CONFIG_XEN_BUS', if_true: files('xen-block.c')) +system_ss.add(when: 'CONFIG_TC58128', if_true: files('tc58128.c')) specific_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio-blk.c', 'virtio-blk-common.c')) specific_ss.add(when: 'CONFIG_VHOST_USER_BLK', if_true: files('vhost-user-blk.c', 'virtio-blk-common.c')) diff --git a/hw/block/trace-events b/hw/block/trace-events index 2c45a62bd5..34be8b9135 100644 --- a/hw/block/trace-events +++ b/hw/block/trace-events @@ -44,9 +44,16 @@ pflash_write_unknown(const char *name, uint8_t cmd) "%s: unknown command 0x%02x" # virtio-blk.c virtio_blk_req_complete(void *vdev, void *req, int status) "vdev %p req %p status %d" virtio_blk_rw_complete(void *vdev, void *req, int ret) "vdev %p req %p ret %d" +virtio_blk_zone_report_complete(void *vdev, void *req, unsigned int nr_zones, int ret) "vdev %p req %p nr_zones %u ret %d" +virtio_blk_zone_mgmt_complete(void *vdev, void *req, int ret) "vdev %p req %p ret %d" +virtio_blk_zone_append_complete(void *vdev, void *req, int64_t sector, int ret) "vdev %p req %p, append sector 0x%" PRIx64 " ret %d" virtio_blk_handle_write(void *vdev, void *req, uint64_t sector, size_t nsectors) "vdev %p req %p sector %"PRIu64" nsectors %zu" virtio_blk_handle_read(void *vdev, void *req, uint64_t sector, size_t nsectors) "vdev %p req %p sector %"PRIu64" nsectors %zu" virtio_blk_submit_multireq(void *vdev, void *mrb, int start, int num_reqs, uint64_t offset, size_t size, bool is_write) "vdev %p mrb %p start %d num_reqs %d offset %"PRIu64" size %zu is_write %d" +virtio_blk_handle_zone_report(void *vdev, void *req, int64_t sector, unsigned int nr_zones) "vdev %p req %p sector 0x%" PRIx64 " nr_zones %u" +virtio_blk_handle_zone_mgmt(void *vdev, void *req, uint8_t op, int64_t sector, int64_t len) "vdev %p req %p op 0x%x sector 0x%" PRIx64 " len 0x%" PRIx64 "" +virtio_blk_handle_zone_reset_all(void *vdev, void *req, int64_t sector, int64_t len) "vdev %p req %p sector 0x%" PRIx64 " cap 0x%" PRIx64 "" +virtio_blk_handle_zone_append(void *vdev, void *req, int64_t sector) "vdev %p req %p, append sector 0x%" PRIx64 "" # hd-geometry.c hd_geometry_lchs_guess(void *blk, int cyls, int heads, int secs) "blk %p LCHS %d %d %d" diff --git a/hw/block/virtio-blk-common.c b/hw/block/virtio-blk-common.c index ac52d7c176..e2f8e2f6da 100644 --- a/hw/block/virtio-blk-common.c +++ b/hw/block/virtio-blk-common.c @@ -29,6 +29,8 @@ static const VirtIOFeature feature_sizes[] = { .end = endof(struct virtio_blk_config, discard_sector_alignment)}, {.flags = 1ULL << VIRTIO_BLK_F_WRITE_ZEROES, .end = endof(struct virtio_blk_config, write_zeroes_may_unmap)}, + {.flags = 1ULL << VIRTIO_BLK_F_ZONED, + .end = endof(struct virtio_blk_config, zoned)}, {} }; diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c index cefca93b31..39e7f23fab 100644 --- a/hw/block/virtio-blk.c +++ b/hw/block/virtio-blk.c @@ -17,6 +17,7 @@ #include "qemu/module.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" +#include "block/block_int.h" #include "trace.h" #include "hw/block/block.h" #include "hw/qdev-properties.h" @@ -601,6 +602,351 @@ err: return err_status; } +typedef struct ZoneCmdData { + VirtIOBlockReq *req; + struct iovec *in_iov; + unsigned in_num; + union { + struct { + unsigned int nr_zones; + BlockZoneDescriptor *zones; + } zone_report_data; + struct { + int64_t offset; + } zone_append_data; + }; +} ZoneCmdData; + +/* + * check zoned_request: error checking before issuing requests. If all checks + * passed, return true. + * append: true if only zone append requests issued. + */ +static bool check_zoned_request(VirtIOBlock *s, int64_t offset, int64_t len, + bool append, uint8_t *status) { + BlockDriverState *bs = blk_bs(s->blk); + int index; + + if (!virtio_has_feature(s->host_features, VIRTIO_BLK_F_ZONED)) { + *status = VIRTIO_BLK_S_UNSUPP; + return false; + } + + if (offset < 0 || len < 0 || len > (bs->total_sectors << BDRV_SECTOR_BITS) + || offset > (bs->total_sectors << BDRV_SECTOR_BITS) - len) { + *status = VIRTIO_BLK_S_ZONE_INVALID_CMD; + return false; + } + + if (append) { + if (bs->bl.write_granularity) { + if ((offset % bs->bl.write_granularity) != 0) { + *status = VIRTIO_BLK_S_ZONE_UNALIGNED_WP; + return false; + } + } + + index = offset / bs->bl.zone_size; + if (BDRV_ZT_IS_CONV(bs->wps->wp[index])) { + *status = VIRTIO_BLK_S_ZONE_INVALID_CMD; + return false; + } + + if (len / 512 > bs->bl.max_append_sectors) { + if (bs->bl.max_append_sectors == 0) { + *status = VIRTIO_BLK_S_UNSUPP; + } else { + *status = VIRTIO_BLK_S_ZONE_INVALID_CMD; + } + return false; + } + } + return true; +} + +static void virtio_blk_zone_report_complete(void *opaque, int ret) +{ + ZoneCmdData *data = opaque; + VirtIOBlockReq *req = data->req; + VirtIOBlock *s = req->dev; + VirtIODevice *vdev = VIRTIO_DEVICE(req->dev); + struct iovec *in_iov = data->in_iov; + unsigned in_num = data->in_num; + int64_t zrp_size, n, j = 0; + int64_t nz = data->zone_report_data.nr_zones; + int8_t err_status = VIRTIO_BLK_S_OK; + + trace_virtio_blk_zone_report_complete(vdev, req, nz, ret); + if (ret) { + err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; + goto out; + } + + struct virtio_blk_zone_report zrp_hdr = (struct virtio_blk_zone_report) { + .nr_zones = cpu_to_le64(nz), + }; + zrp_size = sizeof(struct virtio_blk_zone_report) + + sizeof(struct virtio_blk_zone_descriptor) * nz; + n = iov_from_buf(in_iov, in_num, 0, &zrp_hdr, sizeof(zrp_hdr)); + if (n != sizeof(zrp_hdr)) { + virtio_error(vdev, "Driver provided input buffer that is too small!"); + err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; + goto out; + } + + for (size_t i = sizeof(zrp_hdr); i < zrp_size; + i += sizeof(struct virtio_blk_zone_descriptor), ++j) { + struct virtio_blk_zone_descriptor desc = + (struct virtio_blk_zone_descriptor) { + .z_start = cpu_to_le64(data->zone_report_data.zones[j].start + >> BDRV_SECTOR_BITS), + .z_cap = cpu_to_le64(data->zone_report_data.zones[j].cap + >> BDRV_SECTOR_BITS), + .z_wp = cpu_to_le64(data->zone_report_data.zones[j].wp + >> BDRV_SECTOR_BITS), + }; + + switch (data->zone_report_data.zones[j].type) { + case BLK_ZT_CONV: + desc.z_type = VIRTIO_BLK_ZT_CONV; + break; + case BLK_ZT_SWR: + desc.z_type = VIRTIO_BLK_ZT_SWR; + break; + case BLK_ZT_SWP: + desc.z_type = VIRTIO_BLK_ZT_SWP; + break; + default: + g_assert_not_reached(); + } + + switch (data->zone_report_data.zones[j].state) { + case BLK_ZS_RDONLY: + desc.z_state = VIRTIO_BLK_ZS_RDONLY; + break; + case BLK_ZS_OFFLINE: + desc.z_state = VIRTIO_BLK_ZS_OFFLINE; + break; + case BLK_ZS_EMPTY: + desc.z_state = VIRTIO_BLK_ZS_EMPTY; + break; + case BLK_ZS_CLOSED: + desc.z_state = VIRTIO_BLK_ZS_CLOSED; + break; + case BLK_ZS_FULL: + desc.z_state = VIRTIO_BLK_ZS_FULL; + break; + case BLK_ZS_EOPEN: + desc.z_state = VIRTIO_BLK_ZS_EOPEN; + break; + case BLK_ZS_IOPEN: + desc.z_state = VIRTIO_BLK_ZS_IOPEN; + break; + case BLK_ZS_NOT_WP: + desc.z_state = VIRTIO_BLK_ZS_NOT_WP; + break; + default: + g_assert_not_reached(); + } + + /* TODO: it takes O(n^2) time complexity. Optimizations required. */ + n = iov_from_buf(in_iov, in_num, i, &desc, sizeof(desc)); + if (n != sizeof(desc)) { + virtio_error(vdev, "Driver provided input buffer " + "for descriptors that is too small!"); + err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; + } + } + +out: + aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); + virtio_blk_req_complete(req, err_status); + virtio_blk_free_request(req); + aio_context_release(blk_get_aio_context(s->conf.conf.blk)); + g_free(data->zone_report_data.zones); + g_free(data); +} + +static void virtio_blk_handle_zone_report(VirtIOBlockReq *req, + struct iovec *in_iov, + unsigned in_num) +{ + VirtIOBlock *s = req->dev; + VirtIODevice *vdev = VIRTIO_DEVICE(s); + unsigned int nr_zones; + ZoneCmdData *data; + int64_t zone_size, offset; + uint8_t err_status; + + if (req->in_len < sizeof(struct virtio_blk_inhdr) + + sizeof(struct virtio_blk_zone_report) + + sizeof(struct virtio_blk_zone_descriptor)) { + virtio_error(vdev, "in buffer too small for zone report"); + return; + } + + /* start byte offset of the zone report */ + offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS; + if (!check_zoned_request(s, offset, 0, false, &err_status)) { + goto out; + } + nr_zones = (req->in_len - sizeof(struct virtio_blk_inhdr) - + sizeof(struct virtio_blk_zone_report)) / + sizeof(struct virtio_blk_zone_descriptor); + trace_virtio_blk_handle_zone_report(vdev, req, + offset >> BDRV_SECTOR_BITS, nr_zones); + + zone_size = sizeof(BlockZoneDescriptor) * nr_zones; + data = g_malloc(sizeof(ZoneCmdData)); + data->req = req; + data->in_iov = in_iov; + data->in_num = in_num; + data->zone_report_data.nr_zones = nr_zones; + data->zone_report_data.zones = g_malloc(zone_size), + + blk_aio_zone_report(s->blk, offset, &data->zone_report_data.nr_zones, + data->zone_report_data.zones, + virtio_blk_zone_report_complete, data); + return; +out: + virtio_blk_req_complete(req, err_status); + virtio_blk_free_request(req); +} + +static void virtio_blk_zone_mgmt_complete(void *opaque, int ret) +{ + VirtIOBlockReq *req = opaque; + VirtIOBlock *s = req->dev; + VirtIODevice *vdev = VIRTIO_DEVICE(s); + int8_t err_status = VIRTIO_BLK_S_OK; + trace_virtio_blk_zone_mgmt_complete(vdev, req,ret); + + if (ret) { + err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; + } + + aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); + virtio_blk_req_complete(req, err_status); + virtio_blk_free_request(req); + aio_context_release(blk_get_aio_context(s->conf.conf.blk)); +} + +static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op) +{ + VirtIOBlock *s = req->dev; + VirtIODevice *vdev = VIRTIO_DEVICE(s); + BlockDriverState *bs = blk_bs(s->blk); + int64_t offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS; + uint64_t len; + uint64_t capacity = bs->total_sectors << BDRV_SECTOR_BITS; + uint8_t err_status = VIRTIO_BLK_S_OK; + + uint32_t type = virtio_ldl_p(vdev, &req->out.type); + if (type == VIRTIO_BLK_T_ZONE_RESET_ALL) { + /* Entire drive capacity */ + offset = 0; + len = capacity; + trace_virtio_blk_handle_zone_reset_all(vdev, req, 0, + bs->total_sectors); + } else { + if (bs->bl.zone_size > capacity - offset) { + /* The zoned device allows the last smaller zone. */ + len = capacity - bs->bl.zone_size * (bs->bl.nr_zones - 1); + } else { + len = bs->bl.zone_size; + } + trace_virtio_blk_handle_zone_mgmt(vdev, req, op, + offset >> BDRV_SECTOR_BITS, + len >> BDRV_SECTOR_BITS); + } + + if (!check_zoned_request(s, offset, len, false, &err_status)) { + goto out; + } + + blk_aio_zone_mgmt(s->blk, op, offset, len, + virtio_blk_zone_mgmt_complete, req); + + return 0; +out: + virtio_blk_req_complete(req, err_status); + virtio_blk_free_request(req); + return err_status; +} + +static void virtio_blk_zone_append_complete(void *opaque, int ret) +{ + ZoneCmdData *data = opaque; + VirtIOBlockReq *req = data->req; + VirtIOBlock *s = req->dev; + VirtIODevice *vdev = VIRTIO_DEVICE(req->dev); + int64_t append_sector, n; + uint8_t err_status = VIRTIO_BLK_S_OK; + + if (ret) { + err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; + goto out; + } + + virtio_stq_p(vdev, &append_sector, + data->zone_append_data.offset >> BDRV_SECTOR_BITS); + n = iov_from_buf(data->in_iov, data->in_num, 0, &append_sector, + sizeof(append_sector)); + if (n != sizeof(append_sector)) { + virtio_error(vdev, "Driver provided input buffer less than size of " + "append_sector"); + err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; + goto out; + } + trace_virtio_blk_zone_append_complete(vdev, req, append_sector, ret); + +out: + aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); + virtio_blk_req_complete(req, err_status); + virtio_blk_free_request(req); + aio_context_release(blk_get_aio_context(s->conf.conf.blk)); + g_free(data); +} + +static int virtio_blk_handle_zone_append(VirtIOBlockReq *req, + struct iovec *out_iov, + struct iovec *in_iov, + uint64_t out_num, + unsigned in_num) { + VirtIOBlock *s = req->dev; + VirtIODevice *vdev = VIRTIO_DEVICE(s); + uint8_t err_status = VIRTIO_BLK_S_OK; + + int64_t offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS; + int64_t len = iov_size(out_iov, out_num); + + trace_virtio_blk_handle_zone_append(vdev, req, offset >> BDRV_SECTOR_BITS); + if (!check_zoned_request(s, offset, len, true, &err_status)) { + goto out; + } + + ZoneCmdData *data = g_malloc(sizeof(ZoneCmdData)); + data->req = req; + data->in_iov = in_iov; + data->in_num = in_num; + data->zone_append_data.offset = offset; + qemu_iovec_init_external(&req->qiov, out_iov, out_num); + + block_acct_start(blk_get_stats(s->blk), &req->acct, len, + BLOCK_ACCT_ZONE_APPEND); + + blk_aio_zone_append(s->blk, &data->zone_append_data.offset, &req->qiov, 0, + virtio_blk_zone_append_complete, data); + return 0; + +out: + aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); + virtio_blk_req_complete(req, err_status); + virtio_blk_free_request(req); + aio_context_release(blk_get_aio_context(s->conf.conf.blk)); + return err_status; +} + static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) { uint32_t type; @@ -687,6 +1033,24 @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) case VIRTIO_BLK_T_FLUSH: virtio_blk_handle_flush(req, mrb); break; + case VIRTIO_BLK_T_ZONE_REPORT: + virtio_blk_handle_zone_report(req, in_iov, in_num); + break; + case VIRTIO_BLK_T_ZONE_OPEN: + virtio_blk_handle_zone_mgmt(req, BLK_ZO_OPEN); + break; + case VIRTIO_BLK_T_ZONE_CLOSE: + virtio_blk_handle_zone_mgmt(req, BLK_ZO_CLOSE); + break; + case VIRTIO_BLK_T_ZONE_FINISH: + virtio_blk_handle_zone_mgmt(req, BLK_ZO_FINISH); + break; + case VIRTIO_BLK_T_ZONE_RESET: + virtio_blk_handle_zone_mgmt(req, BLK_ZO_RESET); + break; + case VIRTIO_BLK_T_ZONE_RESET_ALL: + virtio_blk_handle_zone_mgmt(req, BLK_ZO_RESET); + break; case VIRTIO_BLK_T_SCSI_CMD: virtio_blk_handle_scsi(req); break; @@ -705,6 +1069,14 @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) virtio_blk_free_request(req); break; } + case VIRTIO_BLK_T_ZONE_APPEND & ~VIRTIO_BLK_T_OUT: + /* + * Passing out_iov/out_num and in_iov/in_num is not safe + * to access req->elem.out_sg directly because it may be + * modified by virtio_blk_handle_request(). + */ + virtio_blk_handle_zone_append(req, out_iov, in_iov, out_num, in_num); + break; /* * VIRTIO_BLK_T_DISCARD and VIRTIO_BLK_T_WRITE_ZEROES are defined with * VIRTIO_BLK_T_OUT flag set. We masked this flag in the switch statement, @@ -762,7 +1134,7 @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq) bool suppress_notifications = virtio_queue_get_notification(vq); aio_context_acquire(blk_get_aio_context(s->blk)); - blk_io_plug(s->blk); + blk_io_plug(); do { if (suppress_notifications) { @@ -786,7 +1158,7 @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq) virtio_blk_submit_multireq(s, &mrb); } - blk_io_unplug(s->blk); + blk_io_unplug(); aio_context_release(blk_get_aio_context(s->blk)); } @@ -890,6 +1262,7 @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config) { VirtIOBlock *s = VIRTIO_BLK(vdev); BlockConf *conf = &s->conf.conf; + BlockDriverState *bs = blk_bs(s->blk); struct virtio_blk_config blkcfg; uint64_t capacity; int64_t length; @@ -954,6 +1327,30 @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config) blkcfg.write_zeroes_may_unmap = 1; virtio_stl_p(vdev, &blkcfg.max_write_zeroes_seg, 1); } + if (bs->bl.zoned != BLK_Z_NONE) { + switch (bs->bl.zoned) { + case BLK_Z_HM: + blkcfg.zoned.model = VIRTIO_BLK_Z_HM; + break; + case BLK_Z_HA: + blkcfg.zoned.model = VIRTIO_BLK_Z_HA; + break; + default: + g_assert_not_reached(); + } + + virtio_stl_p(vdev, &blkcfg.zoned.zone_sectors, + bs->bl.zone_size / 512); + virtio_stl_p(vdev, &blkcfg.zoned.max_active_zones, + bs->bl.max_active_zones); + virtio_stl_p(vdev, &blkcfg.zoned.max_open_zones, + bs->bl.max_open_zones); + virtio_stl_p(vdev, &blkcfg.zoned.write_granularity, blk_size); + virtio_stl_p(vdev, &blkcfg.zoned.max_append_sectors, + bs->bl.max_append_sectors); + } else { + blkcfg.zoned.model = VIRTIO_BLK_Z_NONE; + } memcpy(config, &blkcfg, s->config_size); } @@ -1109,8 +1506,44 @@ static void virtio_blk_resize(void *opaque) aio_bh_schedule_oneshot(qemu_get_aio_context(), virtio_resize_cb, vdev); } +/* Suspend virtqueue ioeventfd processing during drain */ +static void virtio_blk_drained_begin(void *opaque) +{ + VirtIOBlock *s = opaque; + VirtIODevice *vdev = VIRTIO_DEVICE(opaque); + AioContext *ctx = blk_get_aio_context(s->conf.conf.blk); + + if (!s->dataplane || !s->dataplane_started) { + return; + } + + for (uint16_t i = 0; i < s->conf.num_queues; i++) { + VirtQueue *vq = virtio_get_queue(vdev, i); + virtio_queue_aio_detach_host_notifier(vq, ctx); + } +} + +/* Resume virtqueue ioeventfd processing after drain */ +static void virtio_blk_drained_end(void *opaque) +{ + VirtIOBlock *s = opaque; + VirtIODevice *vdev = VIRTIO_DEVICE(opaque); + AioContext *ctx = blk_get_aio_context(s->conf.conf.blk); + + if (!s->dataplane || !s->dataplane_started) { + return; + } + + for (uint16_t i = 0; i < s->conf.num_queues; i++) { + VirtQueue *vq = virtio_get_queue(vdev, i); + virtio_queue_aio_attach_host_notifier(vq, ctx); + } +} + static const BlockDevOps virtio_block_ops = { - .resize_cb = virtio_blk_resize, + .resize_cb = virtio_blk_resize, + .drained_begin = virtio_blk_drained_begin, + .drained_end = virtio_blk_drained_end, }; static void virtio_blk_device_realize(DeviceState *dev, Error **errp) @@ -1163,6 +1596,14 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp) return; } + BlockDriverState *bs = blk_bs(conf->conf.blk); + if (bs->bl.zoned != BLK_Z_NONE) { + virtio_add_feature(&s->host_features, VIRTIO_BLK_F_ZONED); + if (bs->bl.zoned == BLK_Z_HM) { + virtio_clear_feature(&s->host_features, VIRTIO_BLK_F_DISCARD); + } + } + if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD) && (!conf->max_discard_sectors || conf->max_discard_sectors > BDRV_REQUEST_MAX_SECTORS)) { diff --git a/hw/block/xen-block.c b/hw/block/xen-block.c index f5a744589d..f099914831 100644 --- a/hw/block/xen-block.c +++ b/hw/block/xen-block.c @@ -189,8 +189,26 @@ static void xen_block_resize_cb(void *opaque) xen_device_backend_printf(xendev, "state", "%u", state); } +/* Suspend request handling */ +static void xen_block_drained_begin(void *opaque) +{ + XenBlockDevice *blockdev = opaque; + + xen_block_dataplane_detach(blockdev->dataplane); +} + +/* Resume request handling */ +static void xen_block_drained_end(void *opaque) +{ + XenBlockDevice *blockdev = opaque; + + xen_block_dataplane_attach(blockdev->dataplane); +} + static const BlockDevOps xen_block_dev_ops = { - .resize_cb = xen_block_resize_cb, + .resize_cb = xen_block_resize_cb, + .drained_begin = xen_block_drained_begin, + .drained_end = xen_block_drained_end, }; static void xen_block_realize(XenDevice *xendev, Error **errp) @@ -242,8 +260,6 @@ static void xen_block_realize(XenDevice *xendev, Error **errp) return; } - blk_set_dev_ops(blk, &xen_block_dev_ops, blockdev); - if (conf->discard_granularity == -1) { conf->discard_granularity = conf->physical_block_size; } @@ -277,6 +293,8 @@ static void xen_block_realize(XenDevice *xendev, Error **errp) blockdev->dataplane = xen_block_dataplane_create(xendev, blk, conf->logical_block_size, blockdev->props.iothread); + + blk_set_dev_ops(blk, &xen_block_dev_ops, blockdev); } static void xen_block_frontend_changed(XenDevice *xendev, diff --git a/hw/char/imx_serial.c b/hw/char/imx_serial.c index ee1375e26d..1b75a89588 100644 --- a/hw/char/imx_serial.c +++ b/hw/char/imx_serial.c @@ -80,7 +80,7 @@ static void imx_update(IMXSerialState *s) * TCEN and TXDC are both bit 3 * RDR and DREN are both bit 0 */ - mask |= s->ucr4 & (UCR4_TCEN | UCR4_DREN); + mask |= s->ucr4 & (UCR4_WKEN | UCR4_TCEN | UCR4_DREN); usr2 = s->usr2 & mask; @@ -321,6 +321,9 @@ static void imx_put_data(void *opaque, uint32_t value) static void imx_receive(void *opaque, const uint8_t *buf, int size) { + IMXSerialState *s = (IMXSerialState *)opaque; + + s->usr2 |= USR2_WAKE; imx_put_data(opaque, *buf); } diff --git a/hw/char/meson.build b/hw/char/meson.build index 0807e00ae4..006d20f1e2 100644 --- a/hw/char/meson.build +++ b/hw/char/meson.build @@ -1,39 +1,39 @@ -softmmu_ss.add(when: 'CONFIG_CADENCE', if_true: files('cadence_uart.c')) -softmmu_ss.add(when: 'CONFIG_CMSDK_APB_UART', if_true: files('cmsdk-apb-uart.c')) -softmmu_ss.add(when: 'CONFIG_ESCC', if_true: files('escc.c')) -softmmu_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_ser.c')) -softmmu_ss.add(when: 'CONFIG_GRLIB', if_true: files('grlib_apbuart.c')) -softmmu_ss.add(when: 'CONFIG_IBEX', if_true: files('ibex_uart.c')) -softmmu_ss.add(when: 'CONFIG_IMX', if_true: files('imx_serial.c')) -softmmu_ss.add(when: 'CONFIG_IPACK', if_true: files('ipoctal232.c')) -softmmu_ss.add(when: 'CONFIG_ISA_BUS', if_true: files('parallel-isa.c')) -softmmu_ss.add(when: 'CONFIG_ISA_DEBUG', if_true: files('debugcon.c')) -softmmu_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_uart.c')) -softmmu_ss.add(when: 'CONFIG_PARALLEL', if_true: files('parallel.c')) -softmmu_ss.add(when: 'CONFIG_PL011', if_true: files('pl011.c')) -softmmu_ss.add(when: 'CONFIG_SCLPCONSOLE', if_true: files('sclpconsole.c', 'sclpconsole-lm.c')) -softmmu_ss.add(when: 'CONFIG_SERIAL', if_true: files('serial.c')) -softmmu_ss.add(when: 'CONFIG_SERIAL_ISA', if_true: files('serial-isa.c')) -softmmu_ss.add(when: 'CONFIG_SERIAL_PCI', if_true: files('serial-pci.c')) -softmmu_ss.add(when: 'CONFIG_SERIAL_PCI_MULTI', if_true: files('serial-pci-multi.c')) -softmmu_ss.add(when: 'CONFIG_SHAKTI_UART', if_true: files('shakti_uart.c')) -softmmu_ss.add(when: 'CONFIG_VIRTIO_SERIAL', if_true: files('virtio-console.c')) -softmmu_ss.add(when: 'CONFIG_XEN_BUS', if_true: files('xen_console.c')) -softmmu_ss.add(when: 'CONFIG_XILINX', if_true: files('xilinx_uartlite.c')) +system_ss.add(when: 'CONFIG_CADENCE', if_true: files('cadence_uart.c')) +system_ss.add(when: 'CONFIG_CMSDK_APB_UART', if_true: files('cmsdk-apb-uart.c')) +system_ss.add(when: 'CONFIG_ESCC', if_true: files('escc.c')) +system_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_ser.c')) +system_ss.add(when: 'CONFIG_GRLIB', if_true: files('grlib_apbuart.c')) +system_ss.add(when: 'CONFIG_IBEX', if_true: files('ibex_uart.c')) +system_ss.add(when: 'CONFIG_IMX', if_true: files('imx_serial.c')) +system_ss.add(when: 'CONFIG_IPACK', if_true: files('ipoctal232.c')) +system_ss.add(when: 'CONFIG_ISA_BUS', if_true: files('parallel-isa.c')) +system_ss.add(when: 'CONFIG_ISA_DEBUG', if_true: files('debugcon.c')) +system_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_uart.c')) +system_ss.add(when: 'CONFIG_PARALLEL', if_true: files('parallel.c')) +system_ss.add(when: 'CONFIG_PL011', if_true: files('pl011.c')) +system_ss.add(when: 'CONFIG_SCLPCONSOLE', if_true: files('sclpconsole.c', 'sclpconsole-lm.c')) +system_ss.add(when: 'CONFIG_SERIAL', if_true: files('serial.c')) +system_ss.add(when: 'CONFIG_SERIAL_ISA', if_true: files('serial-isa.c')) +system_ss.add(when: 'CONFIG_SERIAL_PCI', if_true: files('serial-pci.c')) +system_ss.add(when: 'CONFIG_SERIAL_PCI_MULTI', if_true: files('serial-pci-multi.c')) +system_ss.add(when: 'CONFIG_SHAKTI_UART', if_true: files('shakti_uart.c')) +system_ss.add(when: 'CONFIG_VIRTIO_SERIAL', if_true: files('virtio-console.c')) +system_ss.add(when: 'CONFIG_XEN_BUS', if_true: files('xen_console.c')) +system_ss.add(when: 'CONFIG_XILINX', if_true: files('xilinx_uartlite.c')) -softmmu_ss.add(when: 'CONFIG_AVR_USART', if_true: files('avr_usart.c')) -softmmu_ss.add(when: 'CONFIG_COLDFIRE', if_true: files('mcf_uart.c')) -softmmu_ss.add(when: 'CONFIG_DIGIC', if_true: files('digic-uart.c')) -softmmu_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_uart.c')) -softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_uart.c')) -softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_aux.c')) -softmmu_ss.add(when: 'CONFIG_RENESAS_SCI', if_true: files('renesas_sci.c')) -softmmu_ss.add(when: 'CONFIG_SIFIVE_UART', if_true: files('sifive_uart.c')) -softmmu_ss.add(when: 'CONFIG_SH_SCI', if_true: files('sh_serial.c')) -softmmu_ss.add(when: 'CONFIG_STM32F2XX_USART', if_true: files('stm32f2xx_usart.c')) -softmmu_ss.add(when: 'CONFIG_MCHP_PFSOC_MMUART', if_true: files('mchp_pfsoc_mmuart.c')) -softmmu_ss.add(when: 'CONFIG_HTIF', if_true: files('riscv_htif.c')) -softmmu_ss.add(when: 'CONFIG_GOLDFISH_TTY', if_true: files('goldfish_tty.c')) +system_ss.add(when: 'CONFIG_AVR_USART', if_true: files('avr_usart.c')) +system_ss.add(when: 'CONFIG_COLDFIRE', if_true: files('mcf_uart.c')) +system_ss.add(when: 'CONFIG_DIGIC', if_true: files('digic-uart.c')) +system_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_uart.c')) +system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_uart.c')) +system_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_aux.c')) +system_ss.add(when: 'CONFIG_RENESAS_SCI', if_true: files('renesas_sci.c')) +system_ss.add(when: 'CONFIG_SIFIVE_UART', if_true: files('sifive_uart.c')) +system_ss.add(when: 'CONFIG_SH_SCI', if_true: files('sh_serial.c')) +system_ss.add(when: 'CONFIG_STM32F2XX_USART', if_true: files('stm32f2xx_usart.c')) +system_ss.add(when: 'CONFIG_MCHP_PFSOC_MMUART', if_true: files('mchp_pfsoc_mmuart.c')) +system_ss.add(when: 'CONFIG_HTIF', if_true: files('riscv_htif.c')) +system_ss.add(when: 'CONFIG_GOLDFISH_TTY', if_true: files('goldfish_tty.c')) specific_ss.add(when: 'CONFIG_TERMINAL3270', if_true: files('terminal3270.c')) specific_ss.add(when: 'CONFIG_VIRTIO', if_true: files('virtio-serial-bus.c')) diff --git a/hw/char/omap_uart.c b/hw/char/omap_uart.c index 1c890b9201..6848bddb4e 100644 --- a/hw/char/omap_uart.c +++ b/hw/char/omap_uart.c @@ -175,12 +175,3 @@ struct omap_uart_s *omap2_uart_init(MemoryRegion *sysmem, return s; } - -void omap_uart_attach(struct omap_uart_s *s, Chardev *chr) -{ - /* TODO: Should reuse or destroy current s->serial */ - s->serial = serial_mm_init(get_system_memory(), s->base, 2, s->irq, - omap_clk_getrate(s->fclk) / 16, - chr ?: qemu_chr_new("null", "null", NULL), - DEVICE_NATIVE_ENDIAN); -} diff --git a/hw/char/parallel-isa.c b/hw/char/parallel-isa.c index 1ccbb96e70..ab0f879998 100644 --- a/hw/char/parallel-isa.c +++ b/hw/char/parallel-isa.c @@ -13,6 +13,7 @@ #include "sysemu/sysemu.h" #include "hw/isa/isa.h" #include "hw/qdev-properties.h" +#include "hw/char/parallel-isa.h" #include "hw/char/parallel.h" #include "qapi/error.h" @@ -21,7 +22,7 @@ static void parallel_init(ISABus *bus, int index, Chardev *chr) DeviceState *dev; ISADevice *isadev; - isadev = isa_new("isa-parallel"); + isadev = isa_new(TYPE_ISA_PARALLEL); dev = DEVICE(isadev); qdev_prop_set_uint32(dev, "index", index); qdev_prop_set_chr(dev, "chardev", chr); diff --git a/hw/char/parallel.c b/hw/char/parallel.c index af551e7864..147c900f0d 100644 --- a/hw/char/parallel.c +++ b/hw/char/parallel.c @@ -27,13 +27,11 @@ #include "qapi/error.h" #include "qemu/module.h" #include "chardev/char-parallel.h" -#include "chardev/char-fe.h" #include "hw/acpi/acpi_aml_interface.h" -#include "hw/irq.h" -#include "hw/isa/isa.h" #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" #include "migration/vmstate.h" +#include "hw/char/parallel-isa.h" #include "hw/char/parallel.h" #include "sysemu/reset.h" #include "sysemu/sysemu.h" @@ -76,35 +74,6 @@ #define PARA_CTR_SIGNAL (PARA_CTR_SELECT|PARA_CTR_INIT|PARA_CTR_AUTOLF|PARA_CTR_STROBE) -typedef struct ParallelState { - MemoryRegion iomem; - uint8_t dataw; - uint8_t datar; - uint8_t status; - uint8_t control; - qemu_irq irq; - int irq_pending; - CharBackend chr; - int hw_driver; - int epp_timeout; - uint32_t last_read_offset; /* For debugging */ - /* Memory-mapped interface */ - int it_shift; - PortioList portio_list; -} ParallelState; - -#define TYPE_ISA_PARALLEL "isa-parallel" -OBJECT_DECLARE_SIMPLE_TYPE(ISAParallelState, ISA_PARALLEL) - -struct ISAParallelState { - ISADevice parent_obj; - - uint32_t index; - uint32_t iobase; - uint32_t isairq; - ParallelState state; -}; - static void parallel_update_irq(ParallelState *s) { if (s->irq_pending) diff --git a/hw/char/riscv_htif.c b/hw/char/riscv_htif.c index 098de50e35..37d3ccc76b 100644 --- a/hw/char/riscv_htif.c +++ b/hw/char/riscv_htif.c @@ -29,6 +29,8 @@ #include "chardev/char-fe.h" #include "qemu/timer.h" #include "qemu/error-report.h" +#include "exec/address-spaces.h" +#include "sysemu/dma.h" #define RISCV_DEBUG_HTIF 0 #define HTIF_DEBUG(fmt, ...) \ @@ -51,7 +53,10 @@ /* PK system call number */ #define PK_SYS_WRITE 64 -static uint64_t fromhost_addr, tohost_addr; +const char *sig_file; +uint8_t line_size = 16; + +static uint64_t fromhost_addr, tohost_addr, begin_sig_addr, end_sig_addr; void htif_symbol_callback(const char *st_name, int st_info, uint64_t st_value, uint64_t st_size) @@ -68,6 +73,10 @@ void htif_symbol_callback(const char *st_name, int st_info, uint64_t st_value, error_report("HTIF tohost must be 8 bytes"); exit(1); } + } else if (strcmp("begin_signature", st_name) == 0) { + begin_sig_addr = st_value; + } else if (strcmp("end_signature", st_name) == 0) { + end_sig_addr = st_value; } } @@ -163,6 +172,39 @@ static void htif_handle_tohost_write(HTIFState *s, uint64_t val_written) if (payload & 0x1) { /* exit code */ int exit_code = payload >> 1; + + /* + * Dump signature data if sig_file is specified and + * begin/end_signature symbols exist. + */ + if (sig_file && begin_sig_addr && end_sig_addr) { + uint64_t sig_len = end_sig_addr - begin_sig_addr; + char *sig_data = g_malloc(sig_len); + dma_memory_read(&address_space_memory, begin_sig_addr, + sig_data, sig_len, MEMTXATTRS_UNSPECIFIED); + FILE *signature = fopen(sig_file, "w"); + if (signature == NULL) { + error_report("Unable to open %s with error %s", + sig_file, strerror(errno)); + exit(1); + } + + for (int i = 0; i < sig_len; i += line_size) { + for (int j = line_size; j > 0; j--) { + if (i + j <= sig_len) { + fprintf(signature, "%02x", + sig_data[i + j - 1] & 0xff); + } else { + fprintf(signature, "%02x", 0); + } + } + fprintf(signature, "\n"); + } + + fclose(signature); + g_free(sig_data); + } + exit(exit_code); } else { uint64_t syscall[8]; diff --git a/hw/char/serial-pci-multi.c b/hw/char/serial-pci-multi.c index f18b8dcce5..5d65c534cb 100644 --- a/hw/char/serial-pci-multi.c +++ b/hw/char/serial-pci-multi.c @@ -25,7 +25,7 @@ * THE SOFTWARE. */ -/* see docs/specs/pci-serial.txt */ +/* see docs/specs/pci-serial.rst */ #include "qemu/osdep.h" #include "qapi/error.h" diff --git a/hw/char/serial-pci.c b/hw/char/serial-pci.c index 801b769aba..087da3059a 100644 --- a/hw/char/serial-pci.c +++ b/hw/char/serial-pci.c @@ -23,7 +23,7 @@ * THE SOFTWARE. */ -/* see docs/specs/pci-serial.txt */ +/* see docs/specs/pci-serial.rst */ #include "qemu/osdep.h" #include "qapi/error.h" diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c index 7d4601cb5d..dd619f0731 100644 --- a/hw/char/virtio-serial-bus.c +++ b/hw/char/virtio-serial-bus.c @@ -985,7 +985,8 @@ static void virtser_port_device_realize(DeviceState *dev, Error **errp) return; } - port->bh = qemu_bh_new(flush_queued_data_bh, port); + port->bh = qemu_bh_new_guarded(flush_queued_data_bh, port, + &dev->mem_reentrancy_guard); port->elem = NULL; } diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c index 5ccc3837b6..ced66c2b34 100644 --- a/hw/core/cpu-common.c +++ b/hw/core/cpu-common.c @@ -32,7 +32,7 @@ #include "sysemu/tcg.h" #include "hw/boards.h" #include "hw/qdev-properties.h" -#include "trace/trace-root.h" +#include "trace.h" #include "qemu/plugin.h" CPUState *cpu_by_arch_id(int64_t id) @@ -113,7 +113,7 @@ void cpu_reset(CPUState *cpu) { device_cold_reset(DEVICE(cpu)); - trace_guest_cpu_reset(cpu); + trace_cpu_reset(cpu->cpu_index); } static void cpu_common_reset_hold(Object *obj) @@ -196,8 +196,7 @@ static void cpu_common_realizefn(DeviceState *dev, Error **errp) * no need to check the ignore_memory_transaction_failures board flag. */ if (object_dynamic_cast(machine, TYPE_MACHINE)) { - ObjectClass *oc = object_get_class(machine); - MachineClass *mc = MACHINE_CLASS(oc); + MachineClass *mc = MACHINE_GET_CLASS(machine); if (mc) { cpu->ignore_memory_transaction_failures = @@ -211,7 +210,6 @@ static void cpu_common_realizefn(DeviceState *dev, Error **errp) } /* NOTE: latest generic point where the cpu is fully realized */ - trace_init_vcpu(cpu); } static void cpu_common_unrealizefn(DeviceState *dev) @@ -219,7 +217,6 @@ static void cpu_common_unrealizefn(DeviceState *dev) CPUState *cpu = CPU(dev); /* NOTE: latest generic point before the cpu is fully unrealized */ - trace_fini_vcpu(cpu); cpu_exec_unrealizefn(cpu); } diff --git a/hw/core/machine-qmp-cmds.c b/hw/core/machine-qmp-cmds.c index b98ff15089..3860a50c3b 100644 --- a/hw/core/machine-qmp-cmds.c +++ b/hw/core/machine-qmp-cmds.c @@ -28,18 +28,6 @@ #include "sysemu/runstate.h" #include "sysemu/sysemu.h" -static void cpustate_to_cpuinfo_s390(CpuInfoS390 *info, const CPUState *cpu) -{ -#ifdef TARGET_S390X - S390CPU *s390_cpu = S390_CPU(cpu); - CPUS390XState *env = &s390_cpu->env; - - info->cpu_state = env->cpu_state; -#else - abort(); -#endif -} - /* * fast means: we NEVER interrupt vCPU threads to retrieve * information from KVM. @@ -49,7 +37,7 @@ CpuInfoFastList *qmp_query_cpus_fast(Error **errp) MachineState *ms = MACHINE(qdev_get_machine()); MachineClass *mc = MACHINE_GET_CLASS(ms); CpuInfoFastList *head = NULL, **tail = &head; - SysEmuTarget target = qapi_enum_parse(&SysEmuTarget_lookup, TARGET_NAME, + SysEmuTarget target = qapi_enum_parse(&SysEmuTarget_lookup, target_name(), -1, &error_abort); CPUState *cpu; @@ -68,8 +56,8 @@ CpuInfoFastList *qmp_query_cpus_fast(Error **errp) } value->target = target; - if (target == SYS_EMU_TARGET_S390X) { - cpustate_to_cpuinfo_s390(&value->u.s390x, cpu); + if (cpu->cc->query_cpu_fast) { + cpu->cc->query_cpu_fast(cpu, value); } QAPI_LIST_APPEND(tail, value); @@ -129,7 +117,7 @@ TargetInfo *qmp_query_target(Error **errp) { TargetInfo *info = g_malloc0(sizeof(*info)); - info->arch = qapi_enum_parse(&SysEmuTarget_lookup, TARGET_NAME, -1, + info->arch = qapi_enum_parse(&SysEmuTarget_lookup, target_name(), -1, &error_abort); return info; diff --git a/hw/core/machine.c b/hw/core/machine.c index 2ce97a5d3b..1000406211 100644 --- a/hw/core/machine.c +++ b/hw/core/machine.c @@ -39,13 +39,16 @@ #include "hw/virtio/virtio.h" #include "hw/virtio/virtio-pci.h" -GlobalProperty hw_compat_8_0[] = {}; +GlobalProperty hw_compat_8_0[] = { + { "migration", "multifd-flush-after-each-section", "on"}, +}; const size_t hw_compat_8_0_len = G_N_ELEMENTS(hw_compat_8_0); GlobalProperty hw_compat_7_2[] = { { "e1000e", "migrate-timadj", "off" }, { "virtio-mem", "x-early-migration", "false" }, { "migration", "x-preempt-pre-7-2", "true" }, + { TYPE_PCI_DEVICE, "x-pcie-err-unc-mask", "off" }, }; const size_t hw_compat_7_2_len = G_N_ELEMENTS(hw_compat_7_2); @@ -1335,6 +1338,14 @@ void machine_run_board_init(MachineState *machine, const char *mem_path, Error * } } else if (machine_class->default_ram_id && machine->ram_size && numa_uses_legacy_mem()) { + if (object_property_find(object_get_objects_root(), + machine_class->default_ram_id)) { + error_setg(errp, "object name '%s' is reserved for the default" + " RAM backend, it can't be used for any other purposes." + " Change the object's 'id' to something else", + machine_class->default_ram_id); + return; + } if (!create_default_memdev(current_machine, mem_path, errp)) { return; } diff --git a/hw/core/meson.build b/hw/core/meson.build index ae977c9396..67dad04de5 100644 --- a/hw/core/meson.build +++ b/hw/core/meson.build @@ -24,23 +24,24 @@ endif common_ss.add(files('cpu-common.c')) common_ss.add(files('machine-smp.c')) -softmmu_ss.add(when: 'CONFIG_FITLOADER', if_true: files('loader-fit.c')) -softmmu_ss.add(when: 'CONFIG_GENERIC_LOADER', if_true: files('generic-loader.c')) -softmmu_ss.add(when: ['CONFIG_GUEST_LOADER', fdt], if_true: files('guest-loader.c')) -softmmu_ss.add(when: 'CONFIG_OR_IRQ', if_true: files('or-irq.c')) -softmmu_ss.add(when: 'CONFIG_PLATFORM_BUS', if_true: files('platform-bus.c')) -softmmu_ss.add(when: 'CONFIG_PTIMER', if_true: files('ptimer.c')) -softmmu_ss.add(when: 'CONFIG_REGISTER', if_true: files('register.c')) -softmmu_ss.add(when: 'CONFIG_SPLIT_IRQ', if_true: files('split-irq.c')) -softmmu_ss.add(when: 'CONFIG_XILINX_AXI', if_true: files('stream.c')) -softmmu_ss.add(when: 'CONFIG_PLATFORM_BUS', if_true: files('sysbus-fdt.c')) +system_ss.add(when: 'CONFIG_FITLOADER', if_true: files('loader-fit.c')) +system_ss.add(when: 'CONFIG_GENERIC_LOADER', if_true: files('generic-loader.c')) +system_ss.add(when: ['CONFIG_GUEST_LOADER', fdt], if_true: files('guest-loader.c')) +system_ss.add(when: 'CONFIG_OR_IRQ', if_true: files('or-irq.c')) +system_ss.add(when: 'CONFIG_PLATFORM_BUS', if_true: files('platform-bus.c')) +system_ss.add(when: 'CONFIG_PTIMER', if_true: files('ptimer.c')) +system_ss.add(when: 'CONFIG_REGISTER', if_true: files('register.c')) +system_ss.add(when: 'CONFIG_SPLIT_IRQ', if_true: files('split-irq.c')) +system_ss.add(when: 'CONFIG_XILINX_AXI', if_true: files('stream.c')) +system_ss.add(when: 'CONFIG_PLATFORM_BUS', if_true: files('sysbus-fdt.c')) -softmmu_ss.add(files( +system_ss.add(files( 'cpu-sysemu.c', 'fw-path-provider.c', 'gpio.c', 'loader.c', 'machine-hmp-cmds.c', + 'machine-qmp-cmds.c', 'machine.c', 'nmi.c', 'null-machine.c', @@ -51,7 +52,3 @@ softmmu_ss.add(files( 'vm-change-state-handler.c', 'clock-vmstate.c', )) - -specific_ss.add(when: 'CONFIG_SOFTMMU', if_true: files( - 'machine-qmp-cmds.c', -)) diff --git a/hw/core/numa.c b/hw/core/numa.c index d8d36b16d8..f08956ddb0 100644 --- a/hw/core/numa.c +++ b/hw/core/numa.c @@ -531,10 +531,17 @@ static int parse_numa(void *opaque, QemuOpts *opts, Error **errp) /* Fix up legacy suffix-less format */ if ((object->type == NUMA_OPTIONS_TYPE_NODE) && object->u.node.has_mem) { const char *mem_str = qemu_opt_get(opts, "mem"); - qemu_strtosz_MiB(mem_str, NULL, &object->u.node.mem); + int ret = qemu_strtosz_MiB(mem_str, NULL, &object->u.node.mem); + + if (ret < 0) { + error_setg_errno(&err, -ret, "could not parse memory size '%s'", + mem_str); + } } - set_numa_options(ms, object, &err); + if (!err) { + set_numa_options(ms, object, &err); + } qapi_free_NumaOptions(object); if (err) { diff --git a/hw/core/trace-events b/hw/core/trace-events index 56da55bd71..2cf085ac66 100644 --- a/hw/core/trace-events +++ b/hw/core/trace-events @@ -29,3 +29,6 @@ clock_set(const char *clk, uint64_t old, uint64_t new) "'%s', %"PRIu64"Hz->%"PRI clock_propagate(const char *clk) "'%s'" clock_update(const char *clk, const char *src, uint64_t hz, int cb) "'%s', src='%s', val=%"PRIu64"Hz cb=%d" clock_set_mul_div(const char *clk, uint32_t oldmul, uint32_t mul, uint32_t olddiv, uint32_t div) "'%s', mul: %u -> %u, div: %u -> %u" + +# cpu-common.c +cpu_reset(int cpu_index) "%d" diff --git a/hw/cpu/meson.build b/hw/cpu/meson.build index e37490074f..6d319947ca 100644 --- a/hw/cpu/meson.build +++ b/hw/cpu/meson.build @@ -1,6 +1,6 @@ -softmmu_ss.add(files('core.c', 'cluster.c')) +system_ss.add(files('core.c', 'cluster.c')) -softmmu_ss.add(when: 'CONFIG_ARM11MPCORE', if_true: files('arm11mpcore.c')) -softmmu_ss.add(when: 'CONFIG_REALVIEW', if_true: files('realview_mpcore.c')) +system_ss.add(when: 'CONFIG_ARM11MPCORE', if_true: files('arm11mpcore.c')) +system_ss.add(when: 'CONFIG_REALVIEW', if_true: files('realview_mpcore.c')) specific_ss.add(when: 'CONFIG_A9MPCORE', if_true: files('a9mpcore.c')) specific_ss.add(when: 'CONFIG_A15MPCORE', if_true: files('a15mpcore.c')) diff --git a/hw/cxl/cxl-cdat.c b/hw/cxl/cxl-cdat.c index 137abd0992..d246d6885b 100644 --- a/hw/cxl/cxl-cdat.c +++ b/hw/cxl/cxl-cdat.c @@ -108,31 +108,21 @@ static void ct3_build_cdat(CDATObject *cdat, Error **errp) static void ct3_load_cdat(CDATObject *cdat, Error **errp) { g_autofree CDATEntry *cdat_st = NULL; + g_autofree char *buf = NULL; uint8_t sum = 0; int num_ent; - int i = 0, ent = 1, file_size = 0; + int i = 0, ent = 1; + gsize file_size = 0; CDATSubHeader *hdr; - FILE *fp = NULL; + GError *error = NULL; /* Read CDAT file and create its cache */ - fp = fopen(cdat->filename, "r"); - if (!fp) { - error_setg(errp, "CDAT: Unable to open file"); + if (!g_file_get_contents(cdat->filename, (gchar **)&buf, + &file_size, &error)) { + error_setg(errp, "CDAT: File read failed: %s", error->message); + g_error_free(error); return; } - - fseek(fp, 0, SEEK_END); - file_size = ftell(fp); - fseek(fp, 0, SEEK_SET); - cdat->buf = g_malloc0(file_size); - - if (fread(cdat->buf, file_size, 1, fp) == 0) { - error_setg(errp, "CDAT: File read failed"); - return; - } - - fclose(fp); - if (file_size < sizeof(CDATTableHeader)) { error_setg(errp, "CDAT: File too short"); return; @@ -140,9 +130,17 @@ static void ct3_load_cdat(CDATObject *cdat, Error **errp) i = sizeof(CDATTableHeader); num_ent = 1; while (i < file_size) { - hdr = (CDATSubHeader *)(cdat->buf + i); + hdr = (CDATSubHeader *)(buf + i); + if (i + sizeof(CDATSubHeader) > file_size) { + error_setg(errp, "CDAT: Truncated table"); + return; + } cdat_len_check(hdr, errp); i += hdr->length; + if (i > file_size) { + error_setg(errp, "CDAT: Truncated table"); + return; + } num_ent++; } if (i != file_size) { @@ -150,33 +148,26 @@ static void ct3_load_cdat(CDATObject *cdat, Error **errp) return; } - cdat_st = g_malloc0(sizeof(*cdat_st) * num_ent); - if (!cdat_st) { - error_setg(errp, "CDAT: Failed to allocate entry array"); - return; - } + cdat_st = g_new0(CDATEntry, num_ent); /* Set CDAT header, Entry = 0 */ - cdat_st[0].base = cdat->buf; + cdat_st[0].base = buf; cdat_st[0].length = sizeof(CDATTableHeader); i = 0; while (i < cdat_st[0].length) { - sum += cdat->buf[i++]; + sum += buf[i++]; } /* Read CDAT structures */ while (i < file_size) { - hdr = (CDATSubHeader *)(cdat->buf + i); - cdat_len_check(hdr, errp); - + hdr = (CDATSubHeader *)(buf + i); cdat_st[ent].base = hdr; cdat_st[ent].length = hdr->length; - while (cdat->buf + i < - (uint8_t *)cdat_st[ent].base + cdat_st[ent].length) { + while (buf + i < (char *)cdat_st[ent].base + cdat_st[ent].length) { assert(i < file_size); - sum += cdat->buf[i++]; + sum += buf[i++]; } ent++; @@ -187,6 +178,7 @@ static void ct3_load_cdat(CDATObject *cdat, Error **errp) } cdat->entry_len = num_ent; cdat->entry = g_steal_pointer(&cdat_st); + cdat->buf = g_steal_pointer(&buf); } void cxl_doe_cdat_init(CXLComponentState *cxl_cstate, Error **errp) @@ -218,7 +210,5 @@ void cxl_doe_cdat_release(CXLComponentState *cxl_cstate) cdat->free_cdat_table(cdat->built_buf, cdat->built_buf_len, cdat->private); } - if (cdat->buf) { - free(cdat->buf); - } + g_free(cdat->buf); } diff --git a/hw/cxl/cxl-component-utils.c b/hw/cxl/cxl-component-utils.c index b665d4f565..378f1082ce 100644 --- a/hw/cxl/cxl-component-utils.c +++ b/hw/cxl/cxl-component-utils.c @@ -38,23 +38,25 @@ static void dumb_hdm_handler(CXLComponentState *cxl_cstate, hwaddr offset, ComponentRegisters *cregs = &cxl_cstate->crb; uint32_t *cache_mem = cregs->cache_mem_registers; bool should_commit = false; + bool should_uncommit = false; switch (offset) { case A_CXL_HDM_DECODER0_CTRL: should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT); + should_uncommit = !should_commit; break; default: break; } - memory_region_transaction_begin(); - stl_le_p((uint8_t *)cache_mem + offset, value); if (should_commit) { - ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMIT, 0); - ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, ERR, 0); - ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMITTED, 1); + value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, ERR, 0); + value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, COMMITTED, 1); + } else if (should_uncommit) { + value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, ERR, 0); + value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, COMMITTED, 0); } - memory_region_transaction_commit(); + stl_le_p((uint8_t *)cache_mem + offset, value); } static void cxl_cache_mem_write_reg(void *opaque, hwaddr offset, uint64_t value, diff --git a/hw/cxl/cxl-device-utils.c b/hw/cxl/cxl-device-utils.c index 4c5e88aaf5..86e1cea8ce 100644 --- a/hw/cxl/cxl-device-utils.c +++ b/hw/cxl/cxl-device-utils.c @@ -269,3 +269,18 @@ void cxl_device_register_init_common(CXLDeviceState *cxl_dstate) cxl_initialize_mailbox(cxl_dstate); } + +uint64_t cxl_device_get_timestamp(CXLDeviceState *cxl_dstate) +{ + uint64_t time, delta; + uint64_t final_time = 0; + + if (cxl_dstate->timestamp.set) { + /* Find the delta from the last time the host set the time. */ + time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + delta = time - cxl_dstate->timestamp.last_set; + final_time = cxl_dstate->timestamp.host_set + delta; + } + + return final_time; +} diff --git a/hw/cxl/cxl-host.c b/hw/cxl/cxl-host.c index 6e923ceeaf..034c7805b3 100644 --- a/hw/cxl/cxl-host.c +++ b/hw/cxl/cxl-host.c @@ -84,7 +84,7 @@ void cxl_fmws_link_targets(CXLState *cxl_state, Error **errp) bool ambig; o = object_resolve_path_type(fw->targets[i], - TYPE_PXB_CXL_DEVICE, + TYPE_PXB_CXL_DEV, &ambig); if (!o) { error_setg(errp, "Could not resolve CXLFM target %s", @@ -141,7 +141,7 @@ static PCIDevice *cxl_cfmws_find_device(CXLFixedWindow *fw, hwaddr addr) addr += fw->base; rb_index = (addr / cxl_decode_ig(fw->enc_int_gran)) % fw->num_targets; - hb = PCI_HOST_BRIDGE(fw->target_hbs[rb_index]->cxl.cxl_host_bridge); + hb = PCI_HOST_BRIDGE(fw->target_hbs[rb_index]->cxl_host_bridge); if (!hb || !hb->bus || !pci_bus_is_cxl(hb->bus)) { return NULL; } diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c index 206e04a4b8..702e16ca20 100644 --- a/hw/cxl/cxl-mailbox-utils.c +++ b/hw/cxl/cxl-mailbox-utils.c @@ -23,7 +23,7 @@ * FOO = 0x7f, * #define BAR 0 * 2. Implement the handler - * static ret_code cmd_foo_bar(struct cxl_cmd *cmd, + * static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd, * CXLDeviceState *cxl_dstate, uint16_t *len) * 3. Add the command to the cxl_cmd_set[][] * [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y }, @@ -90,10 +90,10 @@ typedef enum { CXL_MBOX_UNSUPPORTED_MAILBOX = 0x15, CXL_MBOX_INVALID_PAYLOAD_LENGTH = 0x16, CXL_MBOX_MAX = 0x17 -} ret_code; +} CXLRetCode; struct cxl_cmd; -typedef ret_code (*opcode_handler)(struct cxl_cmd *cmd, +typedef CXLRetCode (*opcode_handler)(struct cxl_cmd *cmd, CXLDeviceState *cxl_dstate, uint16_t *len); struct cxl_cmd { const char *name; @@ -105,16 +105,16 @@ struct cxl_cmd { #define DEFINE_MAILBOX_HANDLER_ZEROED(name, size) \ uint16_t __zero##name = size; \ - static ret_code cmd_##name(struct cxl_cmd *cmd, \ - CXLDeviceState *cxl_dstate, uint16_t *len) \ + static CXLRetCode cmd_##name(struct cxl_cmd *cmd, \ + CXLDeviceState *cxl_dstate, uint16_t *len) \ { \ *len = __zero##name; \ memset(cmd->payload, 0, *len); \ return CXL_MBOX_SUCCESS; \ } #define DEFINE_MAILBOX_HANDLER_NOP(name) \ - static ret_code cmd_##name(struct cxl_cmd *cmd, \ - CXLDeviceState *cxl_dstate, uint16_t *len) \ + static CXLRetCode cmd_##name(struct cxl_cmd *cmd, \ + CXLDeviceState *cxl_dstate, uint16_t *len) \ { \ return CXL_MBOX_SUCCESS; \ } @@ -125,9 +125,9 @@ DEFINE_MAILBOX_HANDLER_ZEROED(events_get_interrupt_policy, 4); DEFINE_MAILBOX_HANDLER_NOP(events_set_interrupt_policy); /* 8.2.9.2.1 */ -static ret_code cmd_firmware_update_get_info(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_firmware_update_get_info(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) { struct { uint8_t slots_supported; @@ -141,7 +141,8 @@ static ret_code cmd_firmware_update_get_info(struct cxl_cmd *cmd, } QEMU_PACKED *fw_info; QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50); - if (cxl_dstate->pmem_size < CXL_CAPACITY_MULTIPLIER) { + if ((cxl_dstate->vmem_size < CXL_CAPACITY_MULTIPLIER) || + (cxl_dstate->pmem_size < CXL_CAPACITY_MULTIPLIER)) { return CXL_MBOX_INTERNAL_ERROR; } @@ -158,21 +159,12 @@ static ret_code cmd_firmware_update_get_info(struct cxl_cmd *cmd, } /* 8.2.9.3.1 */ -static ret_code cmd_timestamp_get(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_timestamp_get(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) { - uint64_t time, delta; - uint64_t final_time = 0; - - if (cxl_dstate->timestamp.set) { - /* First find the delta from the last time the host set the time. */ - time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - delta = time - cxl_dstate->timestamp.last_set; - final_time = cxl_dstate->timestamp.host_set + delta; - } + uint64_t final_time = cxl_device_get_timestamp(cxl_dstate); - /* Then adjust the actual time */ stq_le_p(cmd->payload, final_time); *len = 8; @@ -180,7 +172,7 @@ static ret_code cmd_timestamp_get(struct cxl_cmd *cmd, } /* 8.2.9.3.2 */ -static ret_code cmd_timestamp_set(struct cxl_cmd *cmd, +static CXLRetCode cmd_timestamp_set(struct cxl_cmd *cmd, CXLDeviceState *cxl_dstate, uint16_t *len) { @@ -200,9 +192,9 @@ static const QemuUUID cel_uuid = { }; /* 8.2.9.4.1 */ -static ret_code cmd_logs_get_supported(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_logs_get_supported(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) { struct { uint16_t entries; @@ -223,9 +215,9 @@ static ret_code cmd_logs_get_supported(struct cxl_cmd *cmd, } /* 8.2.9.4.2 */ -static ret_code cmd_logs_get_log(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_logs_get_log(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) { struct { QemuUUID uuid; @@ -264,9 +256,9 @@ static ret_code cmd_logs_get_log(struct cxl_cmd *cmd, } /* 8.2.9.5.1.1 */ -static ret_code cmd_identify_memory_device(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_identify_memory_device(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) { struct { char fw_revision[0x10]; @@ -288,29 +280,29 @@ static ret_code cmd_identify_memory_device(struct cxl_cmd *cmd, CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate); CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); - uint64_t size = cxl_dstate->pmem_size; - if (!QEMU_IS_ALIGNED(size, CXL_CAPACITY_MULTIPLIER)) { + if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || + (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER))) { return CXL_MBOX_INTERNAL_ERROR; } id = (void *)cmd->payload; memset(id, 0, sizeof(*id)); - /* PMEM only */ snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0); - id->total_capacity = size / CXL_CAPACITY_MULTIPLIER; - id->persistent_capacity = size / CXL_CAPACITY_MULTIPLIER; - id->lsa_size = cvc->get_lsa_size(ct3d); + stq_le_p(&id->total_capacity, cxl_dstate->mem_size / CXL_CAPACITY_MULTIPLIER); + stq_le_p(&id->persistent_capacity, cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); + stq_le_p(&id->volatile_capacity, cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); + stl_le_p(&id->lsa_size, cvc->get_lsa_size(ct3d)); *len = sizeof(*id); return CXL_MBOX_SUCCESS; } -static ret_code cmd_ccls_get_partition_info(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_ccls_get_partition_info(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) { struct { uint64_t active_vmem; @@ -319,25 +311,28 @@ static ret_code cmd_ccls_get_partition_info(struct cxl_cmd *cmd, uint64_t next_pmem; } QEMU_PACKED *part_info = (void *)cmd->payload; QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20); - uint64_t size = cxl_dstate->pmem_size; - if (!QEMU_IS_ALIGNED(size, CXL_CAPACITY_MULTIPLIER)) { + if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || + (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER))) { return CXL_MBOX_INTERNAL_ERROR; } - /* PMEM only */ - part_info->active_vmem = 0; - part_info->next_vmem = 0; - part_info->active_pmem = size / CXL_CAPACITY_MULTIPLIER; - part_info->next_pmem = 0; + stq_le_p(&part_info->active_vmem, cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); + /* + * When both next_vmem and next_pmem are 0, there is no pending change to + * partitioning. + */ + stq_le_p(&part_info->next_vmem, 0); + stq_le_p(&part_info->active_pmem, cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); + stq_le_p(&part_info->next_pmem, 0); *len = sizeof(*part_info); return CXL_MBOX_SUCCESS; } -static ret_code cmd_ccls_get_lsa(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_ccls_get_lsa(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) { struct { uint32_t offset; @@ -360,9 +355,9 @@ static ret_code cmd_ccls_get_lsa(struct cxl_cmd *cmd, return CXL_MBOX_SUCCESS; } -static ret_code cmd_ccls_set_lsa(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_ccls_set_lsa(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) { struct set_lsa_pl { uint32_t offset; diff --git a/hw/cxl/meson.build b/hw/cxl/meson.build index cfa95ffd40..1f9aa2ea1f 100644 --- a/hw/cxl/meson.build +++ b/hw/cxl/meson.build @@ -1,4 +1,4 @@ -softmmu_ss.add(when: 'CONFIG_CXL', +system_ss.add(when: 'CONFIG_CXL', if_true: files( 'cxl-component-utils.c', 'cxl-device-utils.c', @@ -10,4 +10,4 @@ softmmu_ss.add(when: 'CONFIG_CXL', 'cxl-host-stubs.c', )) -softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('cxl-host-stubs.c')) +system_ss.add(when: 'CONFIG_ALL', if_true: files('cxl-host-stubs.c')) diff --git a/hw/display/meson.build b/hw/display/meson.build index 17165bd536..413ba4ab24 100644 --- a/hw/display/meson.build +++ b/hw/display/meson.build @@ -1,49 +1,49 @@ hw_display_modules = {} -softmmu_ss.add(when: 'CONFIG_DDC', if_true: files('i2c-ddc.c')) -softmmu_ss.add(when: 'CONFIG_EDID', if_true: files('edid-generate.c', 'edid-region.c')) - -softmmu_ss.add(when: 'CONFIG_FW_CFG_DMA', if_true: files('ramfb.c')) -softmmu_ss.add(when: 'CONFIG_FW_CFG_DMA', if_true: files('ramfb-standalone.c')) - -softmmu_ss.add(when: 'CONFIG_VGA_CIRRUS', if_true: files('cirrus_vga.c')) -softmmu_ss.add(when: ['CONFIG_VGA_CIRRUS', 'CONFIG_VGA_ISA'], if_true: files('cirrus_vga_isa.c')) -softmmu_ss.add(when: 'CONFIG_G364FB', if_true: files('g364fb.c')) -softmmu_ss.add(when: 'CONFIG_JAZZ_LED', if_true: files('jazz_led.c')) -softmmu_ss.add(when: 'CONFIG_PL110', if_true: files('pl110.c')) -softmmu_ss.add(when: 'CONFIG_SII9022', if_true: files('sii9022.c')) -softmmu_ss.add(when: 'CONFIG_SSD0303', if_true: files('ssd0303.c')) -softmmu_ss.add(when: 'CONFIG_SSD0323', if_true: files('ssd0323.c')) -softmmu_ss.add(when: 'CONFIG_XEN_BUS', if_true: files('xenfb.c')) - -softmmu_ss.add(when: 'CONFIG_VGA_PCI', if_true: files('vga-pci.c')) -softmmu_ss.add(when: 'CONFIG_VGA_ISA', if_true: files('vga-isa.c')) -softmmu_ss.add(when: 'CONFIG_VGA_MMIO', if_true: files('vga-mmio.c')) -softmmu_ss.add(when: 'CONFIG_VMWARE_VGA', if_true: files('vmware_vga.c')) -softmmu_ss.add(when: 'CONFIG_BOCHS_DISPLAY', if_true: files('bochs-display.c')) - -softmmu_ss.add(when: 'CONFIG_BLIZZARD', if_true: files('blizzard.c')) -softmmu_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_fimd.c')) -softmmu_ss.add(when: 'CONFIG_FRAMEBUFFER', if_true: files('framebuffer.c')) -softmmu_ss.add(when: 'CONFIG_ZAURUS', if_true: files('tc6393xb.c')) - -softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_dss.c')) -softmmu_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_lcd.c')) -softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_fb.c')) -softmmu_ss.add(when: 'CONFIG_SM501', if_true: files('sm501.c')) -softmmu_ss.add(when: 'CONFIG_TCX', if_true: files('tcx.c')) -softmmu_ss.add(when: 'CONFIG_CG3', if_true: files('cg3.c')) -softmmu_ss.add(when: 'CONFIG_MACFB', if_true: files('macfb.c')) -softmmu_ss.add(when: 'CONFIG_NEXTCUBE', if_true: files('next-fb.c')) - -softmmu_ss.add(when: 'CONFIG_VGA', if_true: files('vga.c')) +system_ss.add(when: 'CONFIG_DDC', if_true: files('i2c-ddc.c')) +system_ss.add(when: 'CONFIG_EDID', if_true: files('edid-generate.c', 'edid-region.c')) + +system_ss.add(when: 'CONFIG_FW_CFG_DMA', if_true: files('ramfb.c')) +system_ss.add(when: 'CONFIG_FW_CFG_DMA', if_true: files('ramfb-standalone.c')) + +system_ss.add(when: 'CONFIG_VGA_CIRRUS', if_true: files('cirrus_vga.c')) +system_ss.add(when: ['CONFIG_VGA_CIRRUS', 'CONFIG_VGA_ISA'], if_true: files('cirrus_vga_isa.c')) +system_ss.add(when: 'CONFIG_G364FB', if_true: files('g364fb.c')) +system_ss.add(when: 'CONFIG_JAZZ_LED', if_true: files('jazz_led.c')) +system_ss.add(when: 'CONFIG_PL110', if_true: files('pl110.c')) +system_ss.add(when: 'CONFIG_SII9022', if_true: files('sii9022.c')) +system_ss.add(when: 'CONFIG_SSD0303', if_true: files('ssd0303.c')) +system_ss.add(when: 'CONFIG_SSD0323', if_true: files('ssd0323.c')) +system_ss.add(when: 'CONFIG_XEN_BUS', if_true: files('xenfb.c')) + +system_ss.add(when: 'CONFIG_VGA_PCI', if_true: files('vga-pci.c')) +system_ss.add(when: 'CONFIG_VGA_ISA', if_true: files('vga-isa.c')) +system_ss.add(when: 'CONFIG_VGA_MMIO', if_true: files('vga-mmio.c')) +system_ss.add(when: 'CONFIG_VMWARE_VGA', if_true: files('vmware_vga.c')) +system_ss.add(when: 'CONFIG_BOCHS_DISPLAY', if_true: files('bochs-display.c')) + +system_ss.add(when: 'CONFIG_BLIZZARD', if_true: files('blizzard.c')) +system_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_fimd.c')) +system_ss.add(when: 'CONFIG_FRAMEBUFFER', if_true: files('framebuffer.c')) +system_ss.add(when: 'CONFIG_ZAURUS', if_true: files('tc6393xb.c')) + +system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_dss.c')) +system_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_lcd.c')) +system_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_fb.c')) +system_ss.add(when: 'CONFIG_SM501', if_true: files('sm501.c')) +system_ss.add(when: 'CONFIG_TCX', if_true: files('tcx.c')) +system_ss.add(when: 'CONFIG_CG3', if_true: files('cg3.c')) +system_ss.add(when: 'CONFIG_MACFB', if_true: files('macfb.c')) +system_ss.add(when: 'CONFIG_NEXTCUBE', if_true: files('next-fb.c')) + +system_ss.add(when: 'CONFIG_VGA', if_true: files('vga.c')) if (config_all_devices.has_key('CONFIG_VGA_CIRRUS') or config_all_devices.has_key('CONFIG_VGA_PCI') or config_all_devices.has_key('CONFIG_VMWARE_VGA') or config_all_devices.has_key('CONFIG_ATI_VGA') ) - softmmu_ss.add(when: 'CONFIG_ACPI', if_true: files('acpi-vga.c'), + system_ss.add(when: 'CONFIG_ACPI', if_true: files('acpi-vga.c'), if_false: files('acpi-vga-stub.c')) endif @@ -56,12 +56,12 @@ if config_all_devices.has_key('CONFIG_QXL') hw_display_modules += {'qxl': qxl_ss} endif -softmmu_ss.add(when: 'CONFIG_DPCD', if_true: files('dpcd.c')) -softmmu_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx_dp.c')) +system_ss.add(when: 'CONFIG_DPCD', if_true: files('dpcd.c')) +system_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx_dp.c')) -softmmu_ss.add(when: 'CONFIG_ARTIST', if_true: files('artist.c')) +system_ss.add(when: 'CONFIG_ARTIST', if_true: files('artist.c')) -softmmu_ss.add(when: [pixman, 'CONFIG_ATI_VGA'], if_true: files('ati.c', 'ati_2d.c', 'ati_dbg.c')) +system_ss.add(when: [pixman, 'CONFIG_ATI_VGA'], if_true: files('ati.c', 'ati_2d.c', 'ati_dbg.c')) if config_all_devices.has_key('CONFIG_VIRTIO_GPU') @@ -115,7 +115,7 @@ if config_all_devices.has_key('CONFIG_VIRTIO_VGA') hw_display_modules += {'virtio-vga-gl': virtio_vga_gl_ss} endif -softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_lcdc.c')) +system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_lcdc.c')) -softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('acpi-vga-stub.c')) +system_ss.add(when: 'CONFIG_ALL', if_true: files('acpi-vga-stub.c')) modules += { 'hw-display': hw_display_modules } diff --git a/hw/display/qxl.c b/hw/display/qxl.c index 80ce1e9a93..f1c0eb7dfc 100644 --- a/hw/display/qxl.c +++ b/hw/display/qxl.c @@ -2201,11 +2201,14 @@ static void qxl_realize_common(PCIQXLDevice *qxl, Error **errp) qemu_add_vm_change_state_handler(qxl_vm_change_state_handler, qxl); - qxl->update_irq = qemu_bh_new(qxl_update_irq_bh, qxl); + qxl->update_irq = qemu_bh_new_guarded(qxl_update_irq_bh, qxl, + &DEVICE(qxl)->mem_reentrancy_guard); qxl_reset_state(qxl); - qxl->update_area_bh = qemu_bh_new(qxl_render_update_area_bh, qxl); - qxl->ssd.cursor_bh = qemu_bh_new(qemu_spice_cursor_refresh_bh, &qxl->ssd); + qxl->update_area_bh = qemu_bh_new_guarded(qxl_render_update_area_bh, qxl, + &DEVICE(qxl)->mem_reentrancy_guard); + qxl->ssd.cursor_bh = qemu_bh_new_guarded(qemu_spice_cursor_refresh_bh, &qxl->ssd, + &DEVICE(qxl)->mem_reentrancy_guard); } static void qxl_realize_primary(PCIDevice *dev, Error **errp) diff --git a/hw/display/sm501.c b/hw/display/sm501.c index dbabbc4339..0eecd00701 100644 --- a/hw/display/sm501.c +++ b/hw/display/sm501.c @@ -901,7 +901,7 @@ static void sm501_2d_operation(SM501State *s) /* fallback when pixman failed or we don't want to call it */ uint8_t *d = s->local_mem + dst_base; unsigned int x, y, i; - for (y = 0; y < height; y++, i += dst_pitch * bypp) { + for (y = 0; y < height; y++) { i = (dst_x + (dst_y + y) * dst_pitch) * bypp; for (x = 0; x < width; x++, i += bypp) { stn_he_p(&d[i], bypp, color); diff --git a/hw/display/vhost-user-gpu.c b/hw/display/vhost-user-gpu.c index 71dfd956b8..1386e869e5 100644 --- a/hw/display/vhost-user-gpu.c +++ b/hw/display/vhost-user-gpu.c @@ -364,11 +364,11 @@ vhost_user_gpu_gl_flushed(VirtIOGPUBase *b) VhostUserGPU *g = VHOST_USER_GPU(b); if (g->backend_blocked) { - vhost_user_gpu_unblock(VHOST_USER_GPU(g)); + vhost_user_gpu_unblock(g); g->backend_blocked = false; } - vhost_user_gpu_update_blocked(VHOST_USER_GPU(g), false); + vhost_user_gpu_update_blocked(g, false); } static bool diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index 5e15c79b94..66cddd94d9 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -1289,6 +1289,7 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, /* load & apply scanout state */ vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { + /* FIXME: should take scanout.r.{x,y} into account */ scanout = &g->parent_obj.scanout[i]; if (!scanout->resource_id) { continue; @@ -1339,8 +1340,10 @@ void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) g->ctrl_vq = virtio_get_queue(vdev, 0); g->cursor_vq = virtio_get_queue(vdev, 1); - g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); - g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); + g->ctrl_bh = qemu_bh_new_guarded(virtio_gpu_ctrl_bh, g, + &qdev->mem_reentrancy_guard); + g->cursor_bh = qemu_bh_new_guarded(virtio_gpu_cursor_bh, g, + &qdev->mem_reentrancy_guard); QTAILQ_INIT(&g->reslist); QTAILQ_INIT(&g->cmdq); QTAILQ_INIT(&g->fenceq); diff --git a/hw/dma/meson.build b/hw/dma/meson.build index f3f0661bc3..a96c1be2c8 100644 --- a/hw/dma/meson.build +++ b/hw/dma/meson.build @@ -1,16 +1,16 @@ -softmmu_ss.add(when: 'CONFIG_RC4030', if_true: files('rc4030.c')) -softmmu_ss.add(when: 'CONFIG_PL080', if_true: files('pl080.c')) -softmmu_ss.add(when: 'CONFIG_PL330', if_true: files('pl330.c')) -softmmu_ss.add(when: 'CONFIG_I82374', if_true: files('i82374.c')) -softmmu_ss.add(when: 'CONFIG_I8257', if_true: files('i8257.c')) -softmmu_ss.add(when: 'CONFIG_XILINX_AXI', if_true: files('xilinx_axidma.c')) -softmmu_ss.add(when: 'CONFIG_ZYNQ_DEVCFG', if_true: files('xlnx-zynq-devcfg.c')) -softmmu_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_dma.c')) -softmmu_ss.add(when: 'CONFIG_STP2000', if_true: files('sparc32_dma.c')) -softmmu_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx_dpdma.c')) -softmmu_ss.add(when: 'CONFIG_XLNX_ZDMA', if_true: files('xlnx-zdma.c')) -softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_dma.c', 'soc_dma.c')) -softmmu_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_dma.c')) -softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_dma.c')) -softmmu_ss.add(when: 'CONFIG_SIFIVE_PDMA', if_true: files('sifive_pdma.c')) -softmmu_ss.add(when: 'CONFIG_XLNX_CSU_DMA', if_true: files('xlnx_csu_dma.c')) +system_ss.add(when: 'CONFIG_RC4030', if_true: files('rc4030.c')) +system_ss.add(when: 'CONFIG_PL080', if_true: files('pl080.c')) +system_ss.add(when: 'CONFIG_PL330', if_true: files('pl330.c')) +system_ss.add(when: 'CONFIG_I82374', if_true: files('i82374.c')) +system_ss.add(when: 'CONFIG_I8257', if_true: files('i8257.c')) +system_ss.add(when: 'CONFIG_XILINX_AXI', if_true: files('xilinx_axidma.c')) +system_ss.add(when: 'CONFIG_ZYNQ_DEVCFG', if_true: files('xlnx-zynq-devcfg.c')) +system_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_dma.c')) +system_ss.add(when: 'CONFIG_STP2000', if_true: files('sparc32_dma.c')) +system_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx_dpdma.c')) +system_ss.add(when: 'CONFIG_XLNX_ZDMA', if_true: files('xlnx-zdma.c')) +system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_dma.c', 'soc_dma.c')) +system_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_dma.c')) +system_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_dma.c')) +system_ss.add(when: 'CONFIG_SIFIVE_PDMA', if_true: files('sifive_pdma.c')) +system_ss.add(when: 'CONFIG_XLNX_CSU_DMA', if_true: files('xlnx_csu_dma.c')) diff --git a/hw/dma/xilinx_axidma.c b/hw/dma/xilinx_axidma.c index 6030c76435..12c90267df 100644 --- a/hw/dma/xilinx_axidma.c +++ b/hw/dma/xilinx_axidma.c @@ -168,6 +168,11 @@ static inline int stream_idle(struct Stream *s) return !!(s->regs[R_DMASR] & DMASR_IDLE); } +static inline int stream_halted(struct Stream *s) +{ + return !!(s->regs[R_DMASR] & DMASR_HALTED); +} + static void stream_reset(struct Stream *s) { s->regs[R_DMASR] = DMASR_HALTED; /* starts up halted. */ @@ -269,7 +274,7 @@ static void stream_process_mem2s(struct Stream *s, StreamSink *tx_data_dev, uint64_t addr; bool eop; - if (!stream_running(s) || stream_idle(s)) { + if (!stream_running(s) || stream_idle(s) || stream_halted(s)) { return; } @@ -326,7 +331,7 @@ static size_t stream_process_s2mem(struct Stream *s, unsigned char *buf, unsigned int rxlen; size_t pos = 0; - if (!stream_running(s) || stream_idle(s)) { + if (!stream_running(s) || stream_idle(s) || stream_halted(s)) { return 0; } @@ -407,7 +412,7 @@ xilinx_axidma_data_stream_can_push(StreamSink *obj, XilinxAXIDMAStreamSink *ds = XILINX_AXI_DMA_DATA_STREAM(obj); struct Stream *s = &ds->dma->streams[1]; - if (!stream_running(s) || stream_idle(s)) { + if (!stream_running(s) || stream_idle(s) || stream_halted(s)) { ds->dma->notify = notify; ds->dma->notify_opaque = notify_opaque; return false; diff --git a/hw/gpio/meson.build b/hw/gpio/meson.build index b726e6d27a..066ea96480 100644 --- a/hw/gpio/meson.build +++ b/hw/gpio/meson.build @@ -1,14 +1,14 @@ -softmmu_ss.add(when: 'CONFIG_GPIO_KEY', if_true: files('gpio_key.c')) -softmmu_ss.add(when: 'CONFIG_GPIO_MPC8XXX', if_true: files('mpc8xxx.c')) -softmmu_ss.add(when: 'CONFIG_GPIO_PWR', if_true: files('gpio_pwr.c')) -softmmu_ss.add(when: 'CONFIG_MAX7310', if_true: files('max7310.c')) -softmmu_ss.add(when: 'CONFIG_PL061', if_true: files('pl061.c')) -softmmu_ss.add(when: 'CONFIG_ZAURUS', if_true: files('zaurus.c')) +system_ss.add(when: 'CONFIG_GPIO_KEY', if_true: files('gpio_key.c')) +system_ss.add(when: 'CONFIG_GPIO_MPC8XXX', if_true: files('mpc8xxx.c')) +system_ss.add(when: 'CONFIG_GPIO_PWR', if_true: files('gpio_pwr.c')) +system_ss.add(when: 'CONFIG_MAX7310', if_true: files('max7310.c')) +system_ss.add(when: 'CONFIG_PL061', if_true: files('pl061.c')) +system_ss.add(when: 'CONFIG_ZAURUS', if_true: files('zaurus.c')) -softmmu_ss.add(when: 'CONFIG_IMX', if_true: files('imx_gpio.c')) -softmmu_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_gpio.c')) -softmmu_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_gpio.c')) -softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_gpio.c')) -softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_gpio.c')) -softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_gpio.c')) -softmmu_ss.add(when: 'CONFIG_SIFIVE_GPIO', if_true: files('sifive_gpio.c')) +system_ss.add(when: 'CONFIG_IMX', if_true: files('imx_gpio.c')) +system_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_gpio.c')) +system_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_gpio.c')) +system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_gpio.c')) +system_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_gpio.c')) +system_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_gpio.c')) +system_ss.add(when: 'CONFIG_SIFIVE_GPIO', if_true: files('sifive_gpio.c')) diff --git a/hw/hppa/machine.c b/hw/hppa/machine.c index 8fea5fa6b8..b00a91ecfe 100644 --- a/hw/hppa/machine.c +++ b/hw/hppa/machine.c @@ -177,6 +177,7 @@ static void machine_hppa_init(MachineState *machine) const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; + MachineClass *mc = MACHINE_GET_CLASS(machine); DeviceState *dev, *dino_dev, *lasi_dev; PCIBus *pci_bus; ISABus *isa_bus; @@ -272,7 +273,7 @@ static void machine_hppa_init(MachineState *machine) for (i = 0; i < nb_nics; i++) { if (!enable_lasi_lan()) { - pci_nic_init_nofail(&nd_table[i], pci_bus, "tulip", NULL); + pci_nic_init_nofail(&nd_table[i], pci_bus, mc->default_nic, NULL); } } @@ -462,6 +463,7 @@ static void hppa_machine_init_class_init(ObjectClass *oc, void *data) mc->default_ram_size = 512 * MiB; mc->default_boot_order = "cd"; mc->default_ram_id = "ram"; + mc->default_nic = "tulip"; nc->nmi_monitor_handler = hppa_nmi; } diff --git a/hw/i2c/i2c_mux_pca954x.c b/hw/i2c/i2c_mux_pca954x.c index 3945de795c..db5db956a6 100644 --- a/hw/i2c/i2c_mux_pca954x.c +++ b/hw/i2c/i2c_mux_pca954x.c @@ -20,6 +20,7 @@ #include "hw/i2c/i2c_mux_pca954x.h" #include "hw/i2c/smbus_slave.h" #include "hw/qdev-core.h" +#include "hw/qdev-properties.h" #include "hw/sysbus.h" #include "qemu/log.h" #include "qemu/module.h" @@ -43,6 +44,8 @@ typedef struct Pca954xState { bool enabled[PCA9548_CHANNEL_COUNT]; I2CBus *bus[PCA9548_CHANNEL_COUNT]; + + char *name; } Pca954xState; /* @@ -181,6 +184,17 @@ static void pca9548_class_init(ObjectClass *klass, void *data) s->nchans = PCA9548_CHANNEL_COUNT; } +static void pca954x_realize(DeviceState *dev, Error **errp) +{ + Pca954xState *s = PCA954X(dev); + DeviceState *d = DEVICE(s); + if (s->name) { + d->id = g_strdup(s->name); + } else { + d->id = g_strdup_printf("pca954x[%x]", s->parent.i2c.address); + } +} + static void pca954x_init(Object *obj) { Pca954xState *s = PCA954X(obj); @@ -197,6 +211,11 @@ static void pca954x_init(Object *obj) } } +static Property pca954x_props[] = { + DEFINE_PROP_STRING("name", Pca954xState, name), + DEFINE_PROP_END_OF_LIST() +}; + static void pca954x_class_init(ObjectClass *klass, void *data) { I2CSlaveClass *sc = I2C_SLAVE_CLASS(klass); @@ -209,9 +228,12 @@ static void pca954x_class_init(ObjectClass *klass, void *data) rc->phases.enter = pca954x_enter_reset; dc->desc = "Pca954x i2c-mux"; + dc->realize = pca954x_realize; k->write_data = pca954x_write_data; k->receive_byte = pca954x_read_byte; + + device_class_set_props(dc, pca954x_props); } static const TypeInfo pca954x_info[] = { diff --git a/hw/i2c/meson.build b/hw/i2c/meson.build index 3996564c25..b58bc167db 100644 --- a/hw/i2c/meson.build +++ b/hw/i2c/meson.build @@ -17,4 +17,4 @@ i2c_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_i2c.c')) i2c_ss.add(when: 'CONFIG_PPC4XX', if_true: files('ppc4xx_i2c.c')) i2c_ss.add(when: 'CONFIG_PCA954X', if_true: files('i2c_mux_pca954x.c')) i2c_ss.add(when: 'CONFIG_PMBUS', if_true: files('pmbus_device.c')) -softmmu_ss.add_all(when: 'CONFIG_I2C', if_true: i2c_ss) +system_ss.add_all(when: 'CONFIG_I2C', if_true: i2c_ss) diff --git a/hw/i386/Kconfig b/hw/i386/Kconfig index d40802d83f..9051083c1e 100644 --- a/hw/i386/Kconfig +++ b/hw/i386/Kconfig @@ -80,10 +80,10 @@ config I440FX config ISAPC bool + imply VGA_ISA select ISA_BUS select PC select IDE_ISA - select VGA_ISA # FIXME: it is in the same file as i440fx, and does not compile # if separated depends on I440FX diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c index ec857a117e..512162003b 100644 --- a/hw/i386/acpi-build.c +++ b/hw/i386/acpi-build.c @@ -2395,9 +2395,11 @@ build_amd_iommu(GArray *table_data, BIOSLinker *linker, const char *oem_id, /* IVHD length */ build_append_int_noprefix(table_data, ivhd_table_len, 2); /* DeviceID */ - build_append_int_noprefix(table_data, s->devid, 2); + build_append_int_noprefix(table_data, + object_property_get_int(OBJECT(&s->pci), "addr", + &error_abort), 2); /* Capability offset */ - build_append_int_noprefix(table_data, s->capab_offset, 2); + build_append_int_noprefix(table_data, s->pci.capab_offset, 2); /* IOMMU base address */ build_append_int_noprefix(table_data, s->mmio.addr, 8); /* PCI Segment Group */ @@ -2695,7 +2697,8 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine) int legacy_table_size = ROUND_UP(tables_blob->len - aml_len + legacy_aml_len, ACPI_BUILD_ALIGN_SIZE); - if (tables_blob->len > legacy_table_size) { + if ((tables_blob->len > legacy_table_size) && + !pcmc->resizable_acpi_blob) { /* Should happen only with PCI bridges and -M pc-i440fx-2.0. */ warn_report("ACPI table size %u exceeds %d bytes," " migration may not work", @@ -2706,7 +2709,8 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine) g_array_set_size(tables_blob, legacy_table_size); } else { /* Make sure we have a buffer in case we need to resize the tables. */ - if (tables_blob->len > ACPI_BUILD_TABLE_SIZE / 2) { + if ((tables_blob->len > ACPI_BUILD_TABLE_SIZE / 2) && + !pcmc->resizable_acpi_blob) { /* As of QEMU 2.1, this fires with 160 VCPUs and 255 memory slots. */ warn_report("ACPI table size %u exceeds %d bytes," " migration may not work", diff --git a/hw/i386/acpi-common.c b/hw/i386/acpi-common.c index 52e5c1439a..8a0932fe84 100644 --- a/hw/i386/acpi-common.c +++ b/hw/i386/acpi-common.c @@ -102,7 +102,7 @@ void acpi_build_madt(GArray *table_data, BIOSLinker *linker, MachineClass *mc = MACHINE_GET_CLASS(x86ms); const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(MACHINE(x86ms)); AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(adev); - AcpiTable table = { .sig = "APIC", .rev = 1, .oem_id = oem_id, + AcpiTable table = { .sig = "APIC", .rev = 3, .oem_id = oem_id, .oem_table_id = oem_table_id }; acpi_table_begin(&table, table_data); diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c index bcd016f5c5..9c77304438 100644 --- a/hw/i386/amd_iommu.c +++ b/hw/i386/amd_iommu.c @@ -1509,23 +1509,48 @@ static void amdvi_init(AMDVIState *s) amdvi_set_quad(s, AMDVI_MMIO_EXT_FEATURES, AMDVI_EXT_FEATURES, 0xffffffffffffffef, 0); amdvi_set_quad(s, AMDVI_MMIO_STATUS, 0, 0x98, 0x67); +} + +static void amdvi_pci_realize(PCIDevice *pdev, Error **errp) +{ + AMDVIPCIState *s = AMD_IOMMU_PCI(pdev); + int ret; + + ret = pci_add_capability(pdev, AMDVI_CAPAB_ID_SEC, 0, + AMDVI_CAPAB_SIZE, errp); + if (ret < 0) { + return; + } + s->capab_offset = ret; + + ret = pci_add_capability(pdev, PCI_CAP_ID_MSI, 0, + AMDVI_CAPAB_REG_SIZE, errp); + if (ret < 0) { + return; + } + ret = pci_add_capability(pdev, PCI_CAP_ID_HT, 0, + AMDVI_CAPAB_REG_SIZE, errp); + if (ret < 0) { + return; + } + + if (msi_init(pdev, 0, 1, true, false, errp) < 0) { + return; + } /* reset device ident */ - pci_config_set_vendor_id(s->pci.dev.config, PCI_VENDOR_ID_AMD); - pci_config_set_prog_interface(s->pci.dev.config, 00); - pci_config_set_device_id(s->pci.dev.config, s->devid); - pci_config_set_class(s->pci.dev.config, 0x0806); + pci_config_set_prog_interface(pdev->config, 0); /* reset AMDVI specific capabilities, all r/o */ - pci_set_long(s->pci.dev.config + s->capab_offset, AMDVI_CAPAB_FEATURES); - pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_BAR_LOW, - s->mmio.addr & ~(0xffff0000)); - pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_BAR_HIGH, - (s->mmio.addr & ~(0xffff)) >> 16); - pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_RANGE, + pci_set_long(pdev->config + s->capab_offset, AMDVI_CAPAB_FEATURES); + pci_set_long(pdev->config + s->capab_offset + AMDVI_CAPAB_BAR_LOW, + AMDVI_BASE_ADDR & ~(0xffff0000)); + pci_set_long(pdev->config + s->capab_offset + AMDVI_CAPAB_BAR_HIGH, + (AMDVI_BASE_ADDR & ~(0xffff)) >> 16); + pci_set_long(pdev->config + s->capab_offset + AMDVI_CAPAB_RANGE, 0xff000000); - pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_MISC, 0); - pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_MISC, + pci_set_long(pdev->config + s->capab_offset + AMDVI_CAPAB_MISC, 0); + pci_set_long(pdev->config + s->capab_offset + AMDVI_CAPAB_MISC, AMDVI_MAX_PH_ADDR | AMDVI_MAX_GVA_ADDR | AMDVI_MAX_VA_ADDR); } @@ -1539,7 +1564,6 @@ static void amdvi_sysbus_reset(DeviceState *dev) static void amdvi_sysbus_realize(DeviceState *dev, Error **errp) { - int ret = 0; AMDVIState *s = AMD_IOMMU_DEVICE(dev); MachineState *ms = MACHINE(qdev_get_machine()); PCMachineState *pcms = PC_MACHINE(ms); @@ -1553,23 +1577,6 @@ static void amdvi_sysbus_realize(DeviceState *dev, Error **errp) if (!qdev_realize(DEVICE(&s->pci), &bus->qbus, errp)) { return; } - ret = pci_add_capability(&s->pci.dev, AMDVI_CAPAB_ID_SEC, 0, - AMDVI_CAPAB_SIZE, errp); - if (ret < 0) { - return; - } - s->capab_offset = ret; - - ret = pci_add_capability(&s->pci.dev, PCI_CAP_ID_MSI, 0, - AMDVI_CAPAB_REG_SIZE, errp); - if (ret < 0) { - return; - } - ret = pci_add_capability(&s->pci.dev, PCI_CAP_ID_HT, 0, - AMDVI_CAPAB_REG_SIZE, errp); - if (ret < 0) { - return; - } /* Pseudo address space under root PCI bus. */ x86ms->ioapic_as = amdvi_host_dma_iommu(bus, s, AMDVI_IOAPIC_SB_DEVID); @@ -1581,8 +1588,6 @@ static void amdvi_sysbus_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->mmio); sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, AMDVI_BASE_ADDR); pci_setup_iommu(bus, amdvi_host_dma_iommu, s); - s->devid = object_property_get_int(OBJECT(&s->pci), "addr", &error_abort); - msi_init(&s->pci.dev, 0, 1, true, false, errp); amdvi_init(s); } @@ -1625,6 +1630,11 @@ static const TypeInfo amdvi_sysbus = { static void amdvi_pci_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->vendor_id = PCI_VENDOR_ID_AMD; + k->class_id = 0x0806; + k->realize = amdvi_pci_realize; set_bit(DEVICE_CATEGORY_MISC, dc->categories); dc->desc = "AMD IOMMU (AMD-Vi) DMA Remapping device"; diff --git a/hw/i386/amd_iommu.h b/hw/i386/amd_iommu.h index 79d38a3e41..6da893ee57 100644 --- a/hw/i386/amd_iommu.h +++ b/hw/i386/amd_iommu.h @@ -300,27 +300,26 @@ struct irte_ga { OBJECT_DECLARE_SIMPLE_TYPE(AMDVIState, AMD_IOMMU_DEVICE) #define TYPE_AMD_IOMMU_PCI "AMDVI-PCI" +OBJECT_DECLARE_SIMPLE_TYPE(AMDVIPCIState, AMD_IOMMU_PCI) #define TYPE_AMD_IOMMU_MEMORY_REGION "amd-iommu-iommu-memory-region" typedef struct AMDVIAddressSpace AMDVIAddressSpace; /* functions to steal PCI config space */ -typedef struct AMDVIPCIState { +struct AMDVIPCIState { PCIDevice dev; /* The PCI device itself */ -} AMDVIPCIState; + uint32_t capab_offset; /* capability offset pointer */ +}; struct AMDVIState { X86IOMMUState iommu; /* IOMMU bus device */ AMDVIPCIState pci; /* IOMMU PCI device */ uint32_t version; - uint32_t capab_offset; /* capability offset pointer */ uint64_t mmio_addr; - uint32_t devid; /* auto-assigned devid */ - bool enabled; /* IOMMU enabled */ bool ats_enabled; /* address translation enabled */ bool cmdbuf_enabled; /* command buffer enabled */ diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index a62896759c..94d52f4205 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -64,8 +64,8 @@ struct vtd_as_key { struct vtd_iotlb_key { uint64_t gfn; uint32_t pasid; - uint32_t level; uint16_t sid; + uint8_t level; }; static void vtd_address_space_refresh_all(IntelIOMMUState *s); @@ -221,10 +221,11 @@ static gboolean vtd_iotlb_equal(gconstpointer v1, gconstpointer v2) static guint vtd_iotlb_hash(gconstpointer v) { const struct vtd_iotlb_key *key = v; + uint64_t hash64 = key->gfn | ((uint64_t)(key->sid) << VTD_IOTLB_SID_SHIFT) | + (uint64_t)(key->level - 1) << VTD_IOTLB_LVL_SHIFT | + (uint64_t)(key->pasid) << VTD_IOTLB_PASID_SHIFT; - return key->gfn | ((key->sid) << VTD_IOTLB_SID_SHIFT) | - (key->level) << VTD_IOTLB_LVL_SHIFT | - (key->pasid) << VTD_IOTLB_PASID_SHIFT; + return (guint)((hash64 >> 32) ^ (hash64 & 0xffffffffU)); } static gboolean vtd_as_equal(gconstpointer v1, gconstpointer v2) diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h index f090e61e11..2e61eec2f5 100644 --- a/hw/i386/intel_iommu_internal.h +++ b/hw/i386/intel_iommu_internal.h @@ -114,9 +114,9 @@ VTD_INTERRUPT_ADDR_FIRST + 1) /* The shift of source_id in the key of IOTLB hash table */ -#define VTD_IOTLB_SID_SHIFT 20 -#define VTD_IOTLB_LVL_SHIFT 28 -#define VTD_IOTLB_PASID_SHIFT 30 +#define VTD_IOTLB_SID_SHIFT 26 +#define VTD_IOTLB_LVL_SHIFT 42 +#define VTD_IOTLB_PASID_SHIFT 44 #define VTD_IOTLB_MAX_SIZE 1024 /* Max size of the hash table */ /* IOTLB_REG */ diff --git a/hw/i386/kvm/i8254.c b/hw/i386/kvm/i8254.c index 191a26fa57..6a7383d877 100644 --- a/hw/i386/kvm/i8254.c +++ b/hw/i386/kvm/i8254.c @@ -301,7 +301,6 @@ static void kvm_pit_realizefn(DeviceState *dev, Error **errp) } static Property kvm_pit_properties[] = { - DEFINE_PROP_UINT32("iobase", PITCommonState, iobase, -1), DEFINE_PROP_LOSTTICKPOLICY("lost_tick_policy", KVMPITState, lost_tick_policy, LOST_TICK_POLICY_DELAY), DEFINE_PROP_END_OF_LIST(), diff --git a/hw/i386/kvm/meson.build b/hw/i386/kvm/meson.build index 6621ba5cd7..ab143d6474 100644 --- a/hw/i386/kvm/meson.build +++ b/hw/i386/kvm/meson.build @@ -19,4 +19,4 @@ xen_stubs_ss.add(when: 'CONFIG_XEN_EMU', if_false: files( 'xen-stubs.c', )) -specific_ss.add_all(when: 'CONFIG_SOFTMMU', if_true: xen_stubs_ss) +specific_ss.add_all(when: 'CONFIG_SYSTEM_ONLY', if_true: xen_stubs_ss) diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c index 3048329474..3d810dbd59 100644 --- a/hw/i386/kvm/xen_evtchn.c +++ b/hw/i386/kvm/xen_evtchn.c @@ -147,7 +147,10 @@ struct XenEvtchnState { QemuMutex port_lock; uint32_t nr_ports; XenEvtchnPort port_table[EVTCHN_2L_NR_CHANNELS]; - qemu_irq gsis[IOAPIC_NUM_PINS]; + + /* Connected to the system GSIs for raising callback as GSI / INTx */ + unsigned int nr_callback_gsis; + qemu_irq *callback_gsis; struct xenevtchn_handle *be_handles[EVTCHN_2L_NR_CHANNELS]; @@ -299,7 +302,7 @@ static void gsi_assert_bh(void *opaque) } } -void xen_evtchn_create(void) +void xen_evtchn_create(unsigned int nr_gsis, qemu_irq *system_gsis) { XenEvtchnState *s = XEN_EVTCHN(sysbus_create_simple(TYPE_XEN_EVTCHN, -1, NULL)); @@ -310,8 +313,19 @@ void xen_evtchn_create(void) qemu_mutex_init(&s->port_lock); s->gsi_bh = aio_bh_new(qemu_get_aio_context(), gsi_assert_bh, s); - for (i = 0; i < IOAPIC_NUM_PINS; i++) { - sysbus_init_irq(SYS_BUS_DEVICE(s), &s->gsis[i]); + /* + * These are the *output* GSI from event channel support, for + * signalling CPU0's events via GSI or PCI INTx instead of the + * per-CPU vector. We create a *set* of irqs and connect one to + * each of the system GSIs which were passed in from the platform + * code, and then just trigger the right one as appropriate from + * xen_evtchn_set_callback_level(). + */ + s->nr_callback_gsis = nr_gsis; + s->callback_gsis = g_new0(qemu_irq, nr_gsis); + for (i = 0; i < nr_gsis; i++) { + sysbus_init_irq(SYS_BUS_DEVICE(s), &s->callback_gsis[i]); + sysbus_connect_irq(SYS_BUS_DEVICE(s), i, system_gsis[i]); } /* @@ -336,20 +350,6 @@ void xen_evtchn_create(void) xen_evtchn_ops = &emu_evtchn_backend_ops; } -void xen_evtchn_connect_gsis(qemu_irq *system_gsis) -{ - XenEvtchnState *s = xen_evtchn_singleton; - int i; - - if (!s) { - return; - } - - for (i = 0; i < IOAPIC_NUM_PINS; i++) { - sysbus_connect_irq(SYS_BUS_DEVICE(s), i, system_gsis[i]); - } -} - static void xen_evtchn_register_types(void) { type_register_static(&xen_evtchn_info); @@ -430,8 +430,8 @@ void xen_evtchn_set_callback_level(int level) return; } - if (s->callback_gsi && s->callback_gsi < IOAPIC_NUM_PINS) { - qemu_set_irq(s->gsis[s->callback_gsi], level); + if (s->callback_gsi && s->callback_gsi < s->nr_callback_gsis) { + qemu_set_irq(s->callback_gsis[s->callback_gsi], level); if (level) { /* Ensure the vCPU polls for deassertion */ kvm_xen_set_callback_asserted(); diff --git a/hw/i386/kvm/xen_evtchn.h b/hw/i386/kvm/xen_evtchn.h index bfb67ac2bc..b740acfc0d 100644 --- a/hw/i386/kvm/xen_evtchn.h +++ b/hw/i386/kvm/xen_evtchn.h @@ -16,10 +16,9 @@ typedef uint32_t evtchn_port_t; -void xen_evtchn_create(void); +void xen_evtchn_create(unsigned int nr_gsis, qemu_irq *system_gsis); int xen_evtchn_soft_reset(void); int xen_evtchn_set_callback_param(uint64_t param); -void xen_evtchn_connect_gsis(qemu_irq *system_gsis); void xen_evtchn_set_callback_level(int level); int xen_evtchn_set_port(uint16_t port); diff --git a/hw/i386/kvm/xen_xenstore.c b/hw/i386/kvm/xen_xenstore.c index 900679af8a..133d89e953 100644 --- a/hw/i386/kvm/xen_xenstore.c +++ b/hw/i386/kvm/xen_xenstore.c @@ -133,7 +133,7 @@ static void xen_xenstore_realize(DeviceState *dev, Error **errp) error_setg(errp, "Xenstore evtchn port init failed"); return; } - aio_set_fd_handler(qemu_get_aio_context(), xen_be_evtchn_fd(s->eh), true, + aio_set_fd_handler(qemu_get_aio_context(), xen_be_evtchn_fd(s->eh), xen_xenstore_event, NULL, NULL, NULL, s); s->impl = xs_impl_create(xen_domid); @@ -1688,7 +1688,7 @@ static struct qemu_xs_handle *xs_be_open(void) XenXenstoreState *s = xen_xenstore_singleton; struct qemu_xs_handle *h; - if (!s && !s->impl) { + if (!s || !s->impl) { errno = -ENOSYS; return NULL; } diff --git a/hw/i386/meson.build b/hw/i386/meson.build index 213e2e82b3..cfdbfdcbcb 100644 --- a/hw/i386/meson.build +++ b/hw/i386/meson.build @@ -33,5 +33,6 @@ subdir('kvm') subdir('xen') i386_ss.add_all(xenpv_ss) +i386_ss.add_all(xen_ss) hw_arch += {'i386': i386_ss} diff --git a/hw/i386/microvm.c b/hw/i386/microvm.c index 3d606a20b4..7227a2156c 100644 --- a/hw/i386/microvm.c +++ b/hw/i386/microvm.c @@ -389,9 +389,8 @@ static void microvm_fix_kernel_cmdline(MachineState *machine) bus = sysbus_get_default(); QTAILQ_FOREACH(kid, &bus->children, sibling) { DeviceState *dev = kid->child; - ObjectClass *class = object_get_class(OBJECT(dev)); - if (class == object_class_by_name(TYPE_VIRTIO_MMIO)) { + if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MMIO)) { VirtIOMMIOProxy *mmio = VIRTIO_MMIO(OBJECT(dev)); VirtioBusState *mmio_virtio_bus = &mmio->bus; BusState *mmio_bus = &mmio_virtio_bus->parent_obj; diff --git a/hw/i386/pc.c b/hw/i386/pc.c index 615e1d3d06..fc52772fdd 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -116,7 +116,9 @@ { "qemu64-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, },\ { "athlon-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, }, -GlobalProperty pc_compat_8_0[] = {}; +GlobalProperty pc_compat_8_0[] = { + { "virtio-mem", "unplugged-inaccessible", "auto" }, +}; const size_t pc_compat_8_0_len = G_N_ELEMENTS(pc_compat_8_0); GlobalProperty pc_compat_7_2[] = { @@ -950,7 +952,6 @@ static hwaddr pc_max_used_gpa(PCMachineState *pcms, uint64_t pci_hole64_size) void pc_memory_init(PCMachineState *pcms, MemoryRegion *system_memory, MemoryRegion *rom_memory, - MemoryRegion **ram_memory, uint64_t pci_hole64_size) { int linux_boot, i; @@ -1008,7 +1009,6 @@ void pc_memory_init(PCMachineState *pcms, * Split single memory region and use aliases to address portions of it, * done for backwards compatibility with older qemus. */ - *ram_memory = machine->ram; ram_below_4g = g_malloc(sizeof(*ram_below_4g)); memory_region_init_alias(ram_below_4g, NULL, "ram-below-4g", machine->ram, 0, x86ms->below_4g_mem_size); @@ -1265,7 +1265,7 @@ static void pc_superio_init(ISABus *isa_bus, bool create_fdctrl, void pc_basic_device_init(struct PCMachineState *pcms, ISABus *isa_bus, qemu_irq *gsi, - ISADevice **rtc_state, + ISADevice *rtc_state, bool create_fdctrl, uint32_t hpet_irqs) { @@ -1318,11 +1318,24 @@ void pc_basic_device_init(struct PCMachineState *pcms, pit_alt_irq = qdev_get_gpio_in(hpet, HPET_LEGACY_PIT_INT); rtc_irq = qdev_get_gpio_in(hpet, HPET_LEGACY_RTC_INT); } - *rtc_state = ISA_DEVICE(mc146818_rtc_init(isa_bus, 2000, rtc_irq)); + + if (rtc_irq) { + qdev_connect_gpio_out(DEVICE(rtc_state), 0, rtc_irq); + } else { + uint32_t irq = object_property_get_uint(OBJECT(rtc_state), + "irq", + &error_fatal); + isa_connect_gpio_out(rtc_state, 0, irq); + } + object_property_add_alias(OBJECT(pcms), "rtc-time", OBJECT(rtc_state), + "date"); #ifdef CONFIG_XEN_EMU if (xen_mode == XEN_EMULATE) { - xen_evtchn_connect_gsis(gsi); + xen_overlay_create(); + xen_evtchn_create(IOAPIC_NUM_PINS, gsi); + xen_gnttab_create(); + xen_xenstore_create(); if (pcms->bus) { pci_create_simple(pcms->bus, -1, "xen-platform"); } @@ -1331,7 +1344,7 @@ void pc_basic_device_init(struct PCMachineState *pcms, } #endif - qemu_register_boot_set(pc_boot_set, *rtc_state); + qemu_register_boot_set(pc_boot_set, rtc_state); if (!xen_enabled() && (x86ms->pit == ON_OFF_AUTO_AUTO || x86ms->pit == ON_OFF_AUTO_ON)) { @@ -1354,12 +1367,13 @@ void pc_basic_device_init(struct PCMachineState *pcms, void pc_nic_init(PCMachineClass *pcmc, ISABus *isa_bus, PCIBus *pci_bus) { + MachineClass *mc = MACHINE_CLASS(pcmc); int i; rom_set_order_override(FW_CFG_ORDER_OVERRIDE_NIC); for (i = 0; i < nb_nics; i++) { NICInfo *nd = &nd_table[i]; - const char *model = nd->model ? nd->model : pcmc->default_nic_model; + const char *model = nd->model ? nd->model : mc->default_nic; if (g_str_equal(model, "ne2k_isa")) { pc_init_ne2k_isa(isa_bus, nd); @@ -1871,14 +1885,6 @@ static void pc_machine_initfn(Object *obj) int pc_machine_kvm_type(MachineState *machine, const char *kvm_type) { -#ifdef CONFIG_XEN_EMU - if (xen_mode == XEN_EMULATE) { - xen_overlay_create(); - xen_evtchn_create(); - xen_gnttab_create(); - xen_xenstore_create(); - } -#endif return 0; } @@ -1946,6 +1952,7 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) pcmc->acpi_data_size = 0x20000 + 0x8000; pcmc->pvh_enabled = true; pcmc->kvmclock_create_always = true; + pcmc->resizable_acpi_blob = true; assert(!mc->get_hotplug_handler); mc->get_hotplug_handler = pc_get_hotplug_handler; mc->hotplug_allowed = pc_hotplug_allowed; diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c index 21591dad8d..44146e6ff5 100644 --- a/hw/i386/pc_piix.c +++ b/hw/i386/pc_piix.c @@ -26,12 +26,14 @@ #include CONFIG_DEVICES #include "qemu/units.h" +#include "hw/char/parallel-isa.h" #include "hw/dma/i8257.h" #include "hw/loader.h" #include "hw/i386/x86.h" #include "hw/i386/pc.h" #include "hw/i386/apic.h" #include "hw/pci-host/i440fx.h" +#include "hw/rtc/mc146818rtc.h" #include "hw/southbridge/piix.h" #include "hw/display/ramfb.h" #include "hw/firmware/smbios.h" @@ -69,6 +71,7 @@ #include "kvm/kvm-cpu.h" #define MAX_IDE_BUS 2 +#define XEN_IOAPIC_NUM_PIRQS 128ULL #ifdef CONFIG_IDE_ISA static const int ide_iobase[MAX_IDE_BUS] = { 0x1f0, 0x170 }; @@ -87,6 +90,21 @@ static int pc_pci_slot_get_pirq(PCIDevice *pci_dev, int pci_intx) return (pci_intx + slot_addend) & 3; } +static void piix_intx_routing_notifier_xen(PCIDevice *dev) +{ + int i; + + /* Scan for updates to PCI link routes (0x60-0x63). */ + for (i = 0; i < PIIX_NUM_PIRQS; i++) { + uint8_t v = dev->config_read(dev, PIIX_PIRQCA + i, 1); + if (v & 0x80) { + v = 0; + } + v &= 0xf; + xen_set_pci_link_route(i, v); + } +} + /* PC hardware initialisation */ static void pc_init1(MachineState *machine, const char *host_type, const char *pci_type) @@ -144,6 +162,7 @@ static void pc_init1(MachineState *machine, if (xen_enabled()) { xen_hvm_init_pc(pcms, &ram_memory); } else { + ram_memory = machine->ram; if (!pcms->max_ram_below_4g) { pcms->max_ram_below_4g = 0xe0000000; /* default: 3.5G */ } @@ -198,7 +217,7 @@ static void pc_init1(MachineState *machine, if (pcmc->smbios_defaults) { MachineClass *mc = MACHINE_GET_CLASS(machine); /* These values are guest ABI, do not change */ - smbios_set_defaults("QEMU", "Standard PC (i440FX + PIIX, 1996)", + smbios_set_defaults("QEMU", mc->desc, mc->name, pcmc->smbios_legacy_mode, pcmc->smbios_uuid_encoded, pcms->smbios_entry_point_type); @@ -206,8 +225,7 @@ static void pc_init1(MachineState *machine, /* allocate ram and load rom/bios */ if (!xen_enabled()) { - pc_memory_init(pcms, system_memory, - rom_memory, &ram_memory, hole64_size); + pc_memory_init(pcms, system_memory, rom_memory, hole64_size); } else { pc_system_flash_cleanup_unused(pcms); if (machine->kernel_filename != NULL) { @@ -221,8 +239,6 @@ static void pc_init1(MachineState *machine, if (pcmc->pci_enabled) { PIIX3State *piix3; PCIDevice *pci_dev; - const char *type = xen_enabled() ? TYPE_PIIX3_XEN_DEVICE - : TYPE_PIIX3_DEVICE; pci_bus = i440fx_init(pci_type, i440fx_host, @@ -235,15 +251,38 @@ static void pc_init1(MachineState *machine, : pc_pci_slot_get_pirq); pcms->bus = pci_bus; - pci_dev = pci_create_simple_multifunction(pci_bus, -1, true, type); + pci_dev = pci_create_simple_multifunction(pci_bus, -1, true, + TYPE_PIIX3_DEVICE); + + if (xen_enabled()) { + pci_device_set_intx_routing_notifier( + pci_dev, piix_intx_routing_notifier_xen); + + /* + * Xen supports additional interrupt routes from the PCI devices to + * the IOAPIC: the four pins of each PCI device on the bus are also + * connected to the IOAPIC directly. + * These additional routes can be discovered through ACPI. + */ + pci_bus_irqs(pci_bus, xen_intx_set_irq, pci_dev, + XEN_IOAPIC_NUM_PIRQS); + } + piix3 = PIIX3_PCI_DEVICE(pci_dev); piix3->pic = x86ms->gsi; piix3_devfn = piix3->dev.devfn; isa_bus = ISA_BUS(qdev_get_child_bus(DEVICE(piix3), "isa.0")); + rtc_state = ISA_DEVICE(object_resolve_path_component(OBJECT(pci_dev), + "rtc")); } else { pci_bus = NULL; - isa_bus = isa_bus_new(NULL, get_system_memory(), system_io, + isa_bus = isa_bus_new(NULL, system_memory, system_io, &error_abort); + + rtc_state = isa_new(TYPE_MC146818_RTC); + qdev_prop_set_int32(DEVICE(rtc_state), "base_year", 2000); + isa_realize_and_unref(rtc_state, isa_bus, &error_fatal); + i8257_dma_init(isa_bus, 0); pcms->hpet_enabled = false; } @@ -269,7 +308,7 @@ static void pc_init1(MachineState *machine, } /* init basic PC hardware */ - pc_basic_device_init(pcms, isa_bus, x86ms->gsi, &rtc_state, true, + pc_basic_device_init(pcms, isa_bus, x86ms->gsi, rtc_state, true, 0x4); pc_nic_init(pcmc, isa_bus, pci_bus); @@ -442,7 +481,6 @@ static void pc_xen_hvm_init(MachineState *machine) static void pc_i440fx_machine_options(MachineClass *m) { PCMachineClass *pcmc = PC_MACHINE_CLASS(m); - pcmc->default_nic_model = "e1000"; pcmc->pci_root_uid = 0; pcmc->default_cpu_version = 1; @@ -450,6 +488,8 @@ static void pc_i440fx_machine_options(MachineClass *m) m->desc = "Standard PC (i440FX + PIIX, 1996)"; m->default_machine_opts = "firmware=bios-256k.bin"; m->default_display = "std"; + m->default_nic = "e1000"; + m->no_parallel = !module_object_class_by_name(TYPE_ISA_PARALLEL); machine_class_allow_dynamic_sysbus_dev(m, TYPE_RAMFB_DEVICE); machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE); } @@ -756,6 +796,7 @@ static void pc_i440fx_2_2_machine_options(MachineClass *m) compat_props_add(m->compat_props, hw_compat_2_2, hw_compat_2_2_len); compat_props_add(m->compat_props, pc_compat_2_2, pc_compat_2_2_len); pcmc->rsdp_in_ram = false; + pcmc->resizable_acpi_blob = false; } DEFINE_I440FX_MACHINE(v2_2, "pc-i440fx-2.2", pc_compat_2_2_fn, @@ -875,8 +916,9 @@ static void isapc_machine_options(MachineClass *m) pcmc->gigabyte_align = false; pcmc->smbios_legacy_mode = true; pcmc->has_reserved_memory = false; - pcmc->default_nic_model = "ne2k_isa"; + m->default_nic = "ne2k_isa"; m->default_cpu_type = X86_CPU_TYPE_NAME("486"); + m->no_parallel = !module_object_class_by_name(TYPE_ISA_PARALLEL); } DEFINE_PC_MACHINE(isapc, "isapc", pc_init_isa, diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c index f02919d92c..a9a59ed42b 100644 --- a/hw/i386/pc_q35.c +++ b/hw/i386/pc_q35.c @@ -30,6 +30,7 @@ #include "qemu/osdep.h" #include "qemu/units.h" +#include "hw/char/parallel-isa.h" #include "hw/loader.h" #include "hw/i2c/smbus_eeprom.h" #include "hw/rtc/mc146818rtc.h" @@ -126,10 +127,10 @@ static void pc_q35_init(MachineState *machine) DeviceState *lpc_dev; BusState *idebus[MAX_SATA_PORTS]; ISADevice *rtc_state; + MemoryRegion *system_memory = get_system_memory(); MemoryRegion *system_io = get_system_io(); MemoryRegion *pci_memory; MemoryRegion *rom_memory; - MemoryRegion *ram_memory; GSIState *gsi_state; ISABus *isa_bus; int i; @@ -192,14 +193,14 @@ static void pc_q35_init(MachineState *machine) rom_memory = pci_memory; } else { pci_memory = NULL; - rom_memory = get_system_memory(); + rom_memory = system_memory; } pc_guest_info_init(pcms); if (pcmc->smbios_defaults) { /* These values are guest ABI, do not change */ - smbios_set_defaults("QEMU", "Standard PC (Q35 + ICH9, 2009)", + smbios_set_defaults("QEMU", mc->desc, mc->name, pcmc->smbios_legacy_mode, pcmc->smbios_uuid_encoded, pcms->smbios_entry_point_type); @@ -215,16 +216,15 @@ static void pc_q35_init(MachineState *machine) } /* allocate ram and load rom/bios */ - pc_memory_init(pcms, get_system_memory(), rom_memory, &ram_memory, - pci_hole64_size); + pc_memory_init(pcms, system_memory, rom_memory, pci_hole64_size); - object_property_add_child(qdev_get_machine(), "q35", OBJECT(q35_host)); + object_property_add_child(OBJECT(machine), "q35", OBJECT(q35_host)); object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_RAM_MEM, - OBJECT(ram_memory), NULL); + OBJECT(machine->ram), NULL); object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_PCI_MEM, OBJECT(pci_memory), NULL); object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_SYSTEM_MEM, - OBJECT(get_system_memory()), NULL); + OBJECT(system_memory), NULL); object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_IO_MEM, OBJECT(system_io), NULL); object_property_set_int(OBJECT(q35_host), PCI_HOST_BELOW_4G_MEM_SIZE, @@ -242,6 +242,8 @@ static void pc_q35_init(MachineState *machine) x86_machine_is_smm_enabled(x86ms)); pci_realize_and_unref(lpc, host_bus, &error_fatal); + rtc_state = ISA_DEVICE(object_resolve_path_component(OBJECT(lpc), "rtc")); + object_property_add_link(OBJECT(machine), PC_MACHINE_ACPI_DEVICE_PROP, TYPE_HOTPLUG_HANDLER, (Object **)&x86ms->acpi_dev, @@ -291,7 +293,7 @@ static void pc_q35_init(MachineState *machine) } /* init basic PC hardware */ - pc_basic_device_init(pcms, isa_bus, x86ms->gsi, &rtc_state, !mc->no_floppy, + pc_basic_device_init(pcms, isa_bus, x86ms->gsi, rtc_state, !mc->no_floppy, 0xff0104); if (pcms->sata_enabled) { @@ -355,7 +357,6 @@ static void pc_q35_init(MachineState *machine) static void pc_q35_machine_options(MachineClass *m) { PCMachineClass *pcmc = PC_MACHINE_CLASS(m); - pcmc->default_nic_model = "e1000e"; pcmc->pci_root_uid = 0; pcmc->default_cpu_version = 1; @@ -364,8 +365,10 @@ static void pc_q35_machine_options(MachineClass *m) m->units_per_default_bus = 1; m->default_machine_opts = "firmware=bios-256k.bin"; m->default_display = "std"; + m->default_nic = "e1000e"; m->default_kernel_irqchip_split = false; m->no_floppy = 1; + m->no_parallel = !module_object_class_by_name(TYPE_ISA_PARALLEL); machine_class_allow_dynamic_sysbus_dev(m, TYPE_AMD_IOMMU_DEVICE); machine_class_allow_dynamic_sysbus_dev(m, TYPE_INTEL_IOMMU_DEVICE); machine_class_allow_dynamic_sysbus_dev(m, TYPE_RAMFB_DEVICE); @@ -576,10 +579,8 @@ DEFINE_Q35_MACHINE(v2_12, "pc-q35-2.12", NULL, static void pc_q35_2_11_machine_options(MachineClass *m) { - PCMachineClass *pcmc = PC_MACHINE_CLASS(m); - pc_q35_2_12_machine_options(m); - pcmc->default_nic_model = "e1000"; + m->default_nic = "e1000"; compat_props_add(m->compat_props, hw_compat_2_11, hw_compat_2_11_len); compat_props_add(m->compat_props, pc_compat_2_11, pc_compat_2_11_len); } diff --git a/hw/i386/xen/meson.build b/hw/i386/xen/meson.build index 2e64a34e16..3dc4c4f106 100644 --- a/hw/i386/xen/meson.build +++ b/hw/i386/xen/meson.build @@ -1,6 +1,5 @@ i386_ss.add(when: 'CONFIG_XEN', if_true: files( 'xen-hvm.c', - 'xen-mapcache.c', 'xen_apic.c', 'xen_pvdevice.c', )) diff --git a/hw/i386/xen/trace-events b/hw/i386/xen/trace-events index 5d6be61090..5d0a8d6dcf 100644 --- a/hw/i386/xen/trace-events +++ b/hw/i386/xen/trace-events @@ -7,22 +7,3 @@ xen_platform_log(char *s) "xen platform: %s" xen_pv_mmio_read(uint64_t addr) "WARNING: read from Xen PV Device MMIO space (address 0x%"PRIx64")" xen_pv_mmio_write(uint64_t addr) "WARNING: write to Xen PV Device MMIO space (address 0x%"PRIx64")" -# xen-hvm.c -xen_ram_alloc(unsigned long ram_addr, unsigned long size) "requested: 0x%lx, size 0x%lx" -xen_client_set_memory(uint64_t start_addr, unsigned long size, bool log_dirty) "0x%"PRIx64" size 0x%lx, log_dirty %i" -handle_ioreq(void *req, uint32_t type, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p type=%d dir=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d" -handle_ioreq_read(void *req, uint32_t type, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p read type=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d" -handle_ioreq_write(void *req, uint32_t type, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p write type=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d" -cpu_ioreq_pio(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p pio dir=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d" -cpu_ioreq_pio_read_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio read reg data=0x%"PRIx64" port=0x%"PRIx64" size=%d" -cpu_ioreq_pio_write_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio write reg data=0x%"PRIx64" port=0x%"PRIx64" size=%d" -cpu_ioreq_move(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p copy dir=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d" -xen_map_resource_ioreq(uint32_t id, void *addr) "id: %u addr: %p" -cpu_ioreq_config_read(void *req, uint32_t sbdf, uint32_t reg, uint32_t size, uint32_t data) "I/O=%p sbdf=0x%x reg=%u size=%u data=0x%x" -cpu_ioreq_config_write(void *req, uint32_t sbdf, uint32_t reg, uint32_t size, uint32_t data) "I/O=%p sbdf=0x%x reg=%u size=%u data=0x%x" - -# xen-mapcache.c -xen_map_cache(uint64_t phys_addr) "want 0x%"PRIx64 -xen_remap_bucket(uint64_t index) "index 0x%"PRIx64 -xen_map_cache_return(void* ptr) "%p" - diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c index 56641a550e..5dc5e80535 100644 --- a/hw/i386/xen/xen-hvm.c +++ b/hw/i386/xen/xen-hvm.c @@ -10,43 +10,21 @@ #include "qemu/osdep.h" #include "qemu/units.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-migration.h" +#include "trace.h" -#include "cpu.h" -#include "hw/pci/pci.h" -#include "hw/pci/pci_host.h" #include "hw/i386/pc.h" #include "hw/irq.h" -#include "hw/hw.h" #include "hw/i386/apic-msidef.h" -#include "hw/xen/xen_native.h" -#include "hw/xen/xen-legacy-backend.h" -#include "hw/xen/xen-bus.h" #include "hw/xen/xen-x86.h" -#include "qapi/error.h" -#include "qapi/qapi-commands-migration.h" -#include "qemu/error-report.h" -#include "qemu/main-loop.h" #include "qemu/range.h" -#include "sysemu/runstate.h" -#include "sysemu/sysemu.h" -#include "sysemu/xen.h" -#include "sysemu/xen-mapcache.h" -#include "trace.h" -#include <xen/hvm/ioreq.h> +#include "hw/xen/xen-hvm-common.h" +#include "hw/xen/arch_hvm.h" #include <xen/hvm/e820.h> -//#define DEBUG_XEN_HVM - -#ifdef DEBUG_XEN_HVM -#define DPRINTF(fmt, ...) \ - do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0) -#else -#define DPRINTF(fmt, ...) \ - do { } while (0) -#endif - -static MemoryRegion ram_memory, ram_640k, ram_lo, ram_hi; +static MemoryRegion ram_640k, ram_lo, ram_hi; static MemoryRegion *framebuffer; static bool xen_in_migration; @@ -75,66 +53,14 @@ struct shared_vmport_iopage { typedef struct shared_vmport_iopage shared_vmport_iopage_t; #endif -static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i) -{ - return shared_page->vcpu_ioreq[i].vp_eport; -} -static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu) -{ - return &shared_page->vcpu_ioreq[vcpu]; -} - -#define BUFFER_IO_MAX_DELAY 100 - -typedef struct XenPhysmap { - hwaddr start_addr; - ram_addr_t size; - const char *name; - hwaddr phys_offset; - - QLIST_ENTRY(XenPhysmap) list; -} XenPhysmap; +static shared_vmport_iopage_t *shared_vmport_page; static QLIST_HEAD(, XenPhysmap) xen_physmap; - -typedef struct XenPciDevice { - PCIDevice *pci_dev; - uint32_t sbdf; - QLIST_ENTRY(XenPciDevice) entry; -} XenPciDevice; - -typedef struct XenIOState { - ioservid_t ioservid; - shared_iopage_t *shared_page; - shared_vmport_iopage_t *shared_vmport_page; - buffered_iopage_t *buffered_io_page; - xenforeignmemory_resource_handle *fres; - QEMUTimer *buffered_io_timer; - CPUState **cpu_by_vcpu_id; - /* the evtchn port for polling the notification, */ - evtchn_port_t *ioreq_local_port; - /* evtchn remote and local ports for buffered io */ - evtchn_port_t bufioreq_remote_port; - evtchn_port_t bufioreq_local_port; - /* the evtchn fd for polling */ - xenevtchn_handle *xce_handle; - /* which vcpu we are serving */ - int send_vcpu; - - struct xs_handle *xenstore; - MemoryListener memory_listener; - MemoryListener io_listener; - QLIST_HEAD(, XenPciDevice) dev_list; - DeviceListener device_listener; - hwaddr free_phys_offset; - const XenPhysmap *log_for_dirtybit; - /* Buffer used by xen_sync_dirty_bitmap */ - unsigned long *dirty_bitmap; - - Notifier exit; - Notifier suspend; - Notifier wakeup; -} XenIOState; +static const XenPhysmap *log_for_dirtybit; +/* Buffer used by xen_sync_dirty_bitmap */ +static unsigned long *dirty_bitmap; +static Notifier suspend; +static Notifier wakeup; /* Xen specific function for piix pci */ @@ -143,7 +69,7 @@ int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num) return irq_num + (PCI_SLOT(pci_dev->devfn) << 2); } -void xen_piix3_set_irq(void *opaque, int irq_num, int level) +void xen_intx_set_irq(void *opaque, int irq_num, int level) { xen_set_pci_intx_level(xen_domid, 0, 0, irq_num >> 2, irq_num & 3, level); @@ -248,42 +174,6 @@ static void xen_ram_init(PCMachineState *pcms, } } -void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr, - Error **errp) -{ - unsigned long nr_pfn; - xen_pfn_t *pfn_list; - int i; - - if (runstate_check(RUN_STATE_INMIGRATE)) { - /* RAM already populated in Xen */ - fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT - " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n", - __func__, size, ram_addr); - return; - } - - if (mr == &ram_memory) { - return; - } - - trace_xen_ram_alloc(ram_addr, size); - - nr_pfn = size >> TARGET_PAGE_BITS; - pfn_list = g_malloc(sizeof (*pfn_list) * nr_pfn); - - for (i = 0; i < nr_pfn; i++) { - pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i; - } - - if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) { - error_setg(errp, "xen: failed to populate ram at " RAM_ADDR_FMT, - ram_addr); - } - - g_free(pfn_list); -} - static XenPhysmap *get_physmapping(hwaddr start_addr, ram_addr_t size) { XenPhysmap *physmap = NULL; @@ -463,154 +353,16 @@ static int xen_remove_from_physmap(XenIOState *state, } QLIST_REMOVE(physmap, list); - if (state->log_for_dirtybit == physmap) { - state->log_for_dirtybit = NULL; - g_free(state->dirty_bitmap); - state->dirty_bitmap = NULL; + if (log_for_dirtybit == physmap) { + log_for_dirtybit = NULL; + g_free(dirty_bitmap); + dirty_bitmap = NULL; } g_free(physmap); return 0; } -static void xen_set_memory(struct MemoryListener *listener, - MemoryRegionSection *section, - bool add) -{ - XenIOState *state = container_of(listener, XenIOState, memory_listener); - hwaddr start_addr = section->offset_within_address_space; - ram_addr_t size = int128_get64(section->size); - bool log_dirty = memory_region_is_logging(section->mr, DIRTY_MEMORY_VGA); - hvmmem_type_t mem_type; - - if (section->mr == &ram_memory) { - return; - } else { - if (add) { - xen_map_memory_section(xen_domid, state->ioservid, - section); - } else { - xen_unmap_memory_section(xen_domid, state->ioservid, - section); - } - } - - if (!memory_region_is_ram(section->mr)) { - return; - } - - if (log_dirty != add) { - return; - } - - trace_xen_client_set_memory(start_addr, size, log_dirty); - - start_addr &= TARGET_PAGE_MASK; - size = TARGET_PAGE_ALIGN(size); - - if (add) { - if (!memory_region_is_rom(section->mr)) { - xen_add_to_physmap(state, start_addr, size, - section->mr, section->offset_within_region); - } else { - mem_type = HVMMEM_ram_ro; - if (xen_set_mem_type(xen_domid, mem_type, - start_addr >> TARGET_PAGE_BITS, - size >> TARGET_PAGE_BITS)) { - DPRINTF("xen_set_mem_type error, addr: "HWADDR_FMT_plx"\n", - start_addr); - } - } - } else { - if (xen_remove_from_physmap(state, start_addr, size) < 0) { - DPRINTF("physmapping does not exist at "HWADDR_FMT_plx"\n", start_addr); - } - } -} - -static void xen_region_add(MemoryListener *listener, - MemoryRegionSection *section) -{ - memory_region_ref(section->mr); - xen_set_memory(listener, section, true); -} - -static void xen_region_del(MemoryListener *listener, - MemoryRegionSection *section) -{ - xen_set_memory(listener, section, false); - memory_region_unref(section->mr); -} - -static void xen_io_add(MemoryListener *listener, - MemoryRegionSection *section) -{ - XenIOState *state = container_of(listener, XenIOState, io_listener); - MemoryRegion *mr = section->mr; - - if (mr->ops == &unassigned_io_ops) { - return; - } - - memory_region_ref(mr); - - xen_map_io_section(xen_domid, state->ioservid, section); -} - -static void xen_io_del(MemoryListener *listener, - MemoryRegionSection *section) -{ - XenIOState *state = container_of(listener, XenIOState, io_listener); - MemoryRegion *mr = section->mr; - - if (mr->ops == &unassigned_io_ops) { - return; - } - - xen_unmap_io_section(xen_domid, state->ioservid, section); - - memory_region_unref(mr); -} - -static void xen_device_realize(DeviceListener *listener, - DeviceState *dev) -{ - XenIOState *state = container_of(listener, XenIOState, device_listener); - - if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { - PCIDevice *pci_dev = PCI_DEVICE(dev); - XenPciDevice *xendev = g_new(XenPciDevice, 1); - - xendev->pci_dev = pci_dev; - xendev->sbdf = PCI_BUILD_BDF(pci_dev_bus_num(pci_dev), - pci_dev->devfn); - QLIST_INSERT_HEAD(&state->dev_list, xendev, entry); - - xen_map_pcidev(xen_domid, state->ioservid, pci_dev); - } -} - -static void xen_device_unrealize(DeviceListener *listener, - DeviceState *dev) -{ - XenIOState *state = container_of(listener, XenIOState, device_listener); - - if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { - PCIDevice *pci_dev = PCI_DEVICE(dev); - XenPciDevice *xendev, *next; - - xen_unmap_pcidev(xen_domid, state->ioservid, pci_dev); - - QLIST_FOREACH_SAFE(xendev, &state->dev_list, entry, next) { - if (xendev->pci_dev == pci_dev) { - QLIST_REMOVE(xendev, entry); - g_free(xendev); - break; - } - } - } -} - static void xen_sync_dirty_bitmap(XenIOState *state, hwaddr start_addr, ram_addr_t size) @@ -627,16 +379,16 @@ static void xen_sync_dirty_bitmap(XenIOState *state, return; } - if (state->log_for_dirtybit == NULL) { - state->log_for_dirtybit = physmap; - state->dirty_bitmap = g_new(unsigned long, bitmap_size); - } else if (state->log_for_dirtybit != physmap) { + if (log_for_dirtybit == NULL) { + log_for_dirtybit = physmap; + dirty_bitmap = g_new(unsigned long, bitmap_size); + } else if (log_for_dirtybit != physmap) { /* Only one range for dirty bitmap can be tracked. */ return; } rc = xen_track_dirty_vram(xen_domid, start_addr >> TARGET_PAGE_BITS, - npages, state->dirty_bitmap); + npages, dirty_bitmap); if (rc < 0) { #ifndef ENODATA #define ENODATA ENOENT @@ -651,7 +403,7 @@ static void xen_sync_dirty_bitmap(XenIOState *state, } for (i = 0; i < bitmap_size; i++) { - unsigned long map = state->dirty_bitmap[i]; + unsigned long map = dirty_bitmap[i]; while (map != 0) { j = ctzl(map); map &= ~(1ul << j); @@ -677,12 +429,10 @@ static void xen_log_start(MemoryListener *listener, static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section, int old, int new) { - XenIOState *state = container_of(listener, XenIOState, memory_listener); - if (old & ~new & (1 << DIRTY_MEMORY_VGA)) { - state->log_for_dirtybit = NULL; - g_free(state->dirty_bitmap); - state->dirty_bitmap = NULL; + log_for_dirtybit = NULL; + g_free(dirty_bitmap); + dirty_bitmap = NULL; /* Disable dirty bit tracking */ xen_track_dirty_vram(xen_domid, 0, 0, NULL); } @@ -720,277 +470,6 @@ static MemoryListener xen_memory_listener = { .priority = 10, }; -static MemoryListener xen_io_listener = { - .name = "xen-io", - .region_add = xen_io_add, - .region_del = xen_io_del, - .priority = 10, -}; - -static DeviceListener xen_device_listener = { - .realize = xen_device_realize, - .unrealize = xen_device_unrealize, -}; - -/* get the ioreq packets from share mem */ -static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu) -{ - ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu); - - if (req->state != STATE_IOREQ_READY) { - DPRINTF("I/O request not ready: " - "%x, ptr: %x, port: %"PRIx64", " - "data: %"PRIx64", count: %u, size: %u\n", - req->state, req->data_is_ptr, req->addr, - req->data, req->count, req->size); - return NULL; - } - - xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */ - - req->state = STATE_IOREQ_INPROCESS; - return req; -} - -/* use poll to get the port notification */ -/* ioreq_vec--out,the */ -/* retval--the number of ioreq packet */ -static ioreq_t *cpu_get_ioreq(XenIOState *state) -{ - MachineState *ms = MACHINE(qdev_get_machine()); - unsigned int max_cpus = ms->smp.max_cpus; - int i; - evtchn_port_t port; - - port = qemu_xen_evtchn_pending(state->xce_handle); - if (port == state->bufioreq_local_port) { - timer_mod(state->buffered_io_timer, - BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); - return NULL; - } - - if (port != -1) { - for (i = 0; i < max_cpus; i++) { - if (state->ioreq_local_port[i] == port) { - break; - } - } - - if (i == max_cpus) { - hw_error("Fatal error while trying to get io event!\n"); - } - - /* unmask the wanted port again */ - qemu_xen_evtchn_unmask(state->xce_handle, port); - - /* get the io packet from shared memory */ - state->send_vcpu = i; - return cpu_get_ioreq_from_shared_memory(state, i); - } - - /* read error or read nothing */ - return NULL; -} - -static uint32_t do_inp(uint32_t addr, unsigned long size) -{ - switch (size) { - case 1: - return cpu_inb(addr); - case 2: - return cpu_inw(addr); - case 4: - return cpu_inl(addr); - default: - hw_error("inp: bad size: %04x %lx", addr, size); - } -} - -static void do_outp(uint32_t addr, - unsigned long size, uint32_t val) -{ - switch (size) { - case 1: - return cpu_outb(addr, val); - case 2: - return cpu_outw(addr, val); - case 4: - return cpu_outl(addr, val); - default: - hw_error("outp: bad size: %04x %lx", addr, size); - } -} - -/* - * Helper functions which read/write an object from/to physical guest - * memory, as part of the implementation of an ioreq. - * - * Equivalent to - * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i, - * val, req->size, 0/1) - * except without the integer overflow problems. - */ -static void rw_phys_req_item(hwaddr addr, - ioreq_t *req, uint32_t i, void *val, int rw) -{ - /* Do everything unsigned so overflow just results in a truncated result - * and accesses to undesired parts of guest memory, which is up - * to the guest */ - hwaddr offset = (hwaddr)req->size * i; - if (req->df) { - addr -= offset; - } else { - addr += offset; - } - cpu_physical_memory_rw(addr, val, req->size, rw); -} - -static inline void read_phys_req_item(hwaddr addr, - ioreq_t *req, uint32_t i, void *val) -{ - rw_phys_req_item(addr, req, i, val, 0); -} -static inline void write_phys_req_item(hwaddr addr, - ioreq_t *req, uint32_t i, void *val) -{ - rw_phys_req_item(addr, req, i, val, 1); -} - - -static void cpu_ioreq_pio(ioreq_t *req) -{ - uint32_t i; - - trace_cpu_ioreq_pio(req, req->dir, req->df, req->data_is_ptr, req->addr, - req->data, req->count, req->size); - - if (req->size > sizeof(uint32_t)) { - hw_error("PIO: bad size (%u)", req->size); - } - - if (req->dir == IOREQ_READ) { - if (!req->data_is_ptr) { - req->data = do_inp(req->addr, req->size); - trace_cpu_ioreq_pio_read_reg(req, req->data, req->addr, - req->size); - } else { - uint32_t tmp; - - for (i = 0; i < req->count; i++) { - tmp = do_inp(req->addr, req->size); - write_phys_req_item(req->data, req, i, &tmp); - } - } - } else if (req->dir == IOREQ_WRITE) { - if (!req->data_is_ptr) { - trace_cpu_ioreq_pio_write_reg(req, req->data, req->addr, - req->size); - do_outp(req->addr, req->size, req->data); - } else { - for (i = 0; i < req->count; i++) { - uint32_t tmp = 0; - - read_phys_req_item(req->data, req, i, &tmp); - do_outp(req->addr, req->size, tmp); - } - } - } -} - -static void cpu_ioreq_move(ioreq_t *req) -{ - uint32_t i; - - trace_cpu_ioreq_move(req, req->dir, req->df, req->data_is_ptr, req->addr, - req->data, req->count, req->size); - - if (req->size > sizeof(req->data)) { - hw_error("MMIO: bad size (%u)", req->size); - } - - if (!req->data_is_ptr) { - if (req->dir == IOREQ_READ) { - for (i = 0; i < req->count; i++) { - read_phys_req_item(req->addr, req, i, &req->data); - } - } else if (req->dir == IOREQ_WRITE) { - for (i = 0; i < req->count; i++) { - write_phys_req_item(req->addr, req, i, &req->data); - } - } - } else { - uint64_t tmp; - - if (req->dir == IOREQ_READ) { - for (i = 0; i < req->count; i++) { - read_phys_req_item(req->addr, req, i, &tmp); - write_phys_req_item(req->data, req, i, &tmp); - } - } else if (req->dir == IOREQ_WRITE) { - for (i = 0; i < req->count; i++) { - read_phys_req_item(req->data, req, i, &tmp); - write_phys_req_item(req->addr, req, i, &tmp); - } - } - } -} - -static void cpu_ioreq_config(XenIOState *state, ioreq_t *req) -{ - uint32_t sbdf = req->addr >> 32; - uint32_t reg = req->addr; - XenPciDevice *xendev; - - if (req->size != sizeof(uint8_t) && req->size != sizeof(uint16_t) && - req->size != sizeof(uint32_t)) { - hw_error("PCI config access: bad size (%u)", req->size); - } - - if (req->count != 1) { - hw_error("PCI config access: bad count (%u)", req->count); - } - - QLIST_FOREACH(xendev, &state->dev_list, entry) { - if (xendev->sbdf != sbdf) { - continue; - } - - if (!req->data_is_ptr) { - if (req->dir == IOREQ_READ) { - req->data = pci_host_config_read_common( - xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, - req->size); - trace_cpu_ioreq_config_read(req, xendev->sbdf, reg, - req->size, req->data); - } else if (req->dir == IOREQ_WRITE) { - trace_cpu_ioreq_config_write(req, xendev->sbdf, reg, - req->size, req->data); - pci_host_config_write_common( - xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, - req->data, req->size); - } - } else { - uint32_t tmp; - - if (req->dir == IOREQ_READ) { - tmp = pci_host_config_read_common( - xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, - req->size); - trace_cpu_ioreq_config_read(req, xendev->sbdf, reg, - req->size, tmp); - write_phys_req_item(req->data, req, 0, &tmp); - } else if (req->dir == IOREQ_WRITE) { - read_phys_req_item(req->data, req, 0, &tmp); - trace_cpu_ioreq_config_write(req, xendev->sbdf, reg, - req->size, tmp); - pci_host_config_write_common( - xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, - tmp, req->size); - } - } - } -} - static void regs_to_cpu(vmware_regs_t *vmport_regs, ioreq_t *req) { X86CPU *cpu; @@ -1022,9 +501,9 @@ static void handle_vmport_ioreq(XenIOState *state, ioreq_t *req) { vmware_regs_t *vmport_regs; - assert(state->shared_vmport_page); + assert(shared_vmport_page); vmport_regs = - &state->shared_vmport_page->vcpu_vmport_regs[state->send_vcpu]; + &shared_vmport_page->vcpu_vmport_regs[state->send_vcpu]; QEMU_BUILD_BUG_ON(sizeof(*req) < sizeof(*vmport_regs)); current_cpu = state->cpu_by_vcpu_id[state->send_vcpu]; @@ -1034,226 +513,6 @@ static void handle_vmport_ioreq(XenIOState *state, ioreq_t *req) current_cpu = NULL; } -static void handle_ioreq(XenIOState *state, ioreq_t *req) -{ - trace_handle_ioreq(req, req->type, req->dir, req->df, req->data_is_ptr, - req->addr, req->data, req->count, req->size); - - if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) && - (req->size < sizeof (target_ulong))) { - req->data &= ((target_ulong) 1 << (8 * req->size)) - 1; - } - - if (req->dir == IOREQ_WRITE) - trace_handle_ioreq_write(req, req->type, req->df, req->data_is_ptr, - req->addr, req->data, req->count, req->size); - - switch (req->type) { - case IOREQ_TYPE_PIO: - cpu_ioreq_pio(req); - break; - case IOREQ_TYPE_COPY: - cpu_ioreq_move(req); - break; - case IOREQ_TYPE_VMWARE_PORT: - handle_vmport_ioreq(state, req); - break; - case IOREQ_TYPE_TIMEOFFSET: - break; - case IOREQ_TYPE_INVALIDATE: - xen_invalidate_map_cache(); - break; - case IOREQ_TYPE_PCI_CONFIG: - cpu_ioreq_config(state, req); - break; - default: - hw_error("Invalid ioreq type 0x%x\n", req->type); - } - if (req->dir == IOREQ_READ) { - trace_handle_ioreq_read(req, req->type, req->df, req->data_is_ptr, - req->addr, req->data, req->count, req->size); - } -} - -static bool handle_buffered_iopage(XenIOState *state) -{ - buffered_iopage_t *buf_page = state->buffered_io_page; - buf_ioreq_t *buf_req = NULL; - bool handled_ioreq = false; - ioreq_t req; - int qw; - - if (!buf_page) { - return 0; - } - - memset(&req, 0x00, sizeof(req)); - req.state = STATE_IOREQ_READY; - req.count = 1; - req.dir = IOREQ_WRITE; - - for (;;) { - uint32_t rdptr = buf_page->read_pointer, wrptr; - - xen_rmb(); - wrptr = buf_page->write_pointer; - xen_rmb(); - if (rdptr != buf_page->read_pointer) { - continue; - } - if (rdptr == wrptr) { - break; - } - buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM]; - req.size = 1U << buf_req->size; - req.addr = buf_req->addr; - req.data = buf_req->data; - req.type = buf_req->type; - xen_rmb(); - qw = (req.size == 8); - if (qw) { - if (rdptr + 1 == wrptr) { - hw_error("Incomplete quad word buffered ioreq"); - } - buf_req = &buf_page->buf_ioreq[(rdptr + 1) % - IOREQ_BUFFER_SLOT_NUM]; - req.data |= ((uint64_t)buf_req->data) << 32; - xen_rmb(); - } - - handle_ioreq(state, &req); - - /* Only req.data may get updated by handle_ioreq(), albeit even that - * should not happen as such data would never make it to the guest (we - * can only usefully see writes here after all). - */ - assert(req.state == STATE_IOREQ_READY); - assert(req.count == 1); - assert(req.dir == IOREQ_WRITE); - assert(!req.data_is_ptr); - - qatomic_add(&buf_page->read_pointer, qw + 1); - handled_ioreq = true; - } - - return handled_ioreq; -} - -static void handle_buffered_io(void *opaque) -{ - XenIOState *state = opaque; - - if (handle_buffered_iopage(state)) { - timer_mod(state->buffered_io_timer, - BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); - } else { - timer_del(state->buffered_io_timer); - qemu_xen_evtchn_unmask(state->xce_handle, state->bufioreq_local_port); - } -} - -static void cpu_handle_ioreq(void *opaque) -{ - XenIOState *state = opaque; - ioreq_t *req = cpu_get_ioreq(state); - - handle_buffered_iopage(state); - if (req) { - ioreq_t copy = *req; - - xen_rmb(); - handle_ioreq(state, ©); - req->data = copy.data; - - if (req->state != STATE_IOREQ_INPROCESS) { - fprintf(stderr, "Badness in I/O request ... not in service?!: " - "%x, ptr: %x, port: %"PRIx64", " - "data: %"PRIx64", count: %u, size: %u, type: %u\n", - req->state, req->data_is_ptr, req->addr, - req->data, req->count, req->size, req->type); - destroy_hvm_domain(false); - return; - } - - xen_wmb(); /* Update ioreq contents /then/ update state. */ - - /* - * We do this before we send the response so that the tools - * have the opportunity to pick up on the reset before the - * guest resumes and does a hlt with interrupts disabled which - * causes Xen to powerdown the domain. - */ - if (runstate_is_running()) { - ShutdownCause request; - - if (qemu_shutdown_requested_get()) { - destroy_hvm_domain(false); - } - request = qemu_reset_requested_get(); - if (request) { - qemu_system_reset(request); - destroy_hvm_domain(true); - } - } - - req->state = STATE_IORESP_READY; - qemu_xen_evtchn_notify(state->xce_handle, - state->ioreq_local_port[state->send_vcpu]); - } -} - -static void xen_main_loop_prepare(XenIOState *state) -{ - int evtchn_fd = -1; - - if (state->xce_handle != NULL) { - evtchn_fd = qemu_xen_evtchn_fd(state->xce_handle); - } - - state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io, - state); - - if (evtchn_fd != -1) { - CPUState *cpu_state; - - DPRINTF("%s: Init cpu_by_vcpu_id\n", __func__); - CPU_FOREACH(cpu_state) { - DPRINTF("%s: cpu_by_vcpu_id[%d]=%p\n", - __func__, cpu_state->cpu_index, cpu_state); - state->cpu_by_vcpu_id[cpu_state->cpu_index] = cpu_state; - } - qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state); - } -} - - -static void xen_hvm_change_state_handler(void *opaque, bool running, - RunState rstate) -{ - XenIOState *state = opaque; - - if (running) { - xen_main_loop_prepare(state); - } - - xen_set_ioreq_server_state(xen_domid, - state->ioservid, - (rstate == RUN_STATE_RUNNING)); -} - -static void xen_exit_notifier(Notifier *n, void *data) -{ - XenIOState *state = container_of(n, XenIOState, exit); - - xen_destroy_ioreq_server(xen_domid, state->ioservid); - if (state->fres != NULL) { - xenforeignmemory_unmap_resource(xen_fmem, state->fres); - } - - qemu_xen_evtchn_close(state->xce_handle); - xs_daemon_close(state->xenstore); -} - #ifdef XEN_COMPAT_PHYSMAP static void xen_read_physmap(XenIOState *state) { @@ -1313,135 +572,34 @@ static void xen_wakeup_notifier(Notifier *notifier, void *data) xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 0); } -static int xen_map_ioreq_server(XenIOState *state) -{ - void *addr = NULL; - xen_pfn_t ioreq_pfn; - xen_pfn_t bufioreq_pfn; - evtchn_port_t bufioreq_evtchn; - int rc; - - /* - * Attempt to map using the resource API and fall back to normal - * foreign mapping if this is not supported. - */ - QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_bufioreq != 0); - QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_ioreq(0) != 1); - state->fres = xenforeignmemory_map_resource(xen_fmem, xen_domid, - XENMEM_resource_ioreq_server, - state->ioservid, 0, 2, - &addr, - PROT_READ | PROT_WRITE, 0); - if (state->fres != NULL) { - trace_xen_map_resource_ioreq(state->ioservid, addr); - state->buffered_io_page = addr; - state->shared_page = addr + TARGET_PAGE_SIZE; - } else if (errno != EOPNOTSUPP) { - error_report("failed to map ioreq server resources: error %d handle=%p", - errno, xen_xc); - return -1; - } - - rc = xen_get_ioreq_server_info(xen_domid, state->ioservid, - (state->shared_page == NULL) ? - &ioreq_pfn : NULL, - (state->buffered_io_page == NULL) ? - &bufioreq_pfn : NULL, - &bufioreq_evtchn); - if (rc < 0) { - error_report("failed to get ioreq server info: error %d handle=%p", - errno, xen_xc); - return rc; - } - - if (state->shared_page == NULL) { - DPRINTF("shared page at pfn %lx\n", ioreq_pfn); - - state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid, - PROT_READ | PROT_WRITE, - 1, &ioreq_pfn, NULL); - if (state->shared_page == NULL) { - error_report("map shared IO page returned error %d handle=%p", - errno, xen_xc); - } - } - - if (state->buffered_io_page == NULL) { - DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn); - - state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid, - PROT_READ | PROT_WRITE, - 1, &bufioreq_pfn, - NULL); - if (state->buffered_io_page == NULL) { - error_report("map buffered IO page returned error %d", errno); - return -1; - } - } - - if (state->shared_page == NULL || state->buffered_io_page == NULL) { - return -1; - } - - DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn); - - state->bufioreq_remote_port = bufioreq_evtchn; - - return 0; -} - void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory) { MachineState *ms = MACHINE(pcms); unsigned int max_cpus = ms->smp.max_cpus; - int i, rc; + int rc; xen_pfn_t ioreq_pfn; XenIOState *state; - setup_xen_backend_ops(); - state = g_new0(XenIOState, 1); - state->xce_handle = qemu_xen_evtchn_open(); - if (state->xce_handle == NULL) { - perror("xen: event channel open"); - goto err; - } - - state->xenstore = xs_daemon_open(); - if (state->xenstore == NULL) { - perror("xen: xenstore open"); - goto err; - } - - xen_create_ioreq_server(xen_domid, &state->ioservid); - - state->exit.notify = xen_exit_notifier; - qemu_add_exit_notifier(&state->exit); + xen_register_ioreq(state, max_cpus, xen_memory_listener); - state->suspend.notify = xen_suspend_notifier; - qemu_register_suspend_notifier(&state->suspend); + QLIST_INIT(&xen_physmap); + xen_read_physmap(state); - state->wakeup.notify = xen_wakeup_notifier; - qemu_register_wakeup_notifier(&state->wakeup); + suspend.notify = xen_suspend_notifier; + qemu_register_suspend_notifier(&suspend); - /* - * Register wake-up support in QMP query-current-machine API - */ - qemu_register_wakeup_support(); - - rc = xen_map_ioreq_server(state); - if (rc < 0) { - goto err; - } + wakeup.notify = xen_wakeup_notifier; + qemu_register_wakeup_notifier(&wakeup); rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn); if (!rc) { DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn); - state->shared_vmport_page = + shared_vmport_page = xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE, 1, &ioreq_pfn, NULL); - if (state->shared_vmport_page == NULL) { + if (shared_vmport_page == NULL) { error_report("map shared vmport IO page returned error %d handle=%p", errno, xen_xc); goto err; @@ -1452,65 +610,8 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory) goto err; } - /* Note: cpus is empty at this point in init */ - state->cpu_by_vcpu_id = g_new0(CPUState *, max_cpus); - - rc = xen_set_ioreq_server_state(xen_domid, state->ioservid, true); - if (rc < 0) { - error_report("failed to enable ioreq server info: error %d handle=%p", - errno, xen_xc); - goto err; - } - - state->ioreq_local_port = g_new0(evtchn_port_t, max_cpus); - - /* FIXME: how about if we overflow the page here? */ - for (i = 0; i < max_cpus; i++) { - rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid, - xen_vcpu_eport(state->shared_page, - i)); - if (rc == -1) { - error_report("shared evtchn %d bind error %d", i, errno); - goto err; - } - state->ioreq_local_port[i] = rc; - } - - rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid, - state->bufioreq_remote_port); - if (rc == -1) { - error_report("buffered evtchn bind error %d", errno); - goto err; - } - state->bufioreq_local_port = rc; - - /* Init RAM management */ -#ifdef XEN_COMPAT_PHYSMAP - xen_map_cache_init(xen_phys_offset_to_gaddr, state); -#else - xen_map_cache_init(NULL, state); -#endif xen_ram_init(pcms, ms->ram_size, ram_memory); - qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state); - - state->memory_listener = xen_memory_listener; - memory_listener_register(&state->memory_listener, &address_space_memory); - state->log_for_dirtybit = NULL; - - state->io_listener = xen_io_listener; - memory_listener_register(&state->io_listener, &address_space_io); - - state->device_listener = xen_device_listener; - QLIST_INIT(&state->dev_list); - device_listener_register(&state->device_listener); - - xen_bus_init(); - xen_be_init(); - - QLIST_INIT(&xen_physmap); - xen_read_physmap(state); - /* Disable ACPI build because Xen handles it */ pcms->acpi_build_enabled = false; @@ -1521,59 +622,11 @@ err: exit(1); } -void destroy_hvm_domain(bool reboot) -{ - xc_interface *xc_handle; - int sts; - int rc; - - unsigned int reason = reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff; - - if (xen_dmod) { - rc = xendevicemodel_shutdown(xen_dmod, xen_domid, reason); - if (!rc) { - return; - } - if (errno != ENOTTY /* old Xen */) { - perror("xendevicemodel_shutdown failed"); - } - /* well, try the old thing then */ - } - - xc_handle = xc_interface_open(0, 0, 0); - if (xc_handle == NULL) { - fprintf(stderr, "Cannot acquire xenctrl handle\n"); - } else { - sts = xc_domain_shutdown(xc_handle, xen_domid, reason); - if (sts != 0) { - fprintf(stderr, "xc_domain_shutdown failed to issue %s, " - "sts %d, %s\n", reboot ? "reboot" : "poweroff", - sts, strerror(errno)); - } else { - fprintf(stderr, "Issued domain %d %s\n", xen_domid, - reboot ? "reboot" : "poweroff"); - } - xc_interface_close(xc_handle); - } -} - void xen_register_framebuffer(MemoryRegion *mr) { framebuffer = mr; } -void xen_shutdown_fatal_error(const char *fmt, ...) -{ - va_list ap; - - va_start(ap, fmt); - vfprintf(stderr, fmt, ap); - va_end(ap); - fprintf(stderr, "Will destroy the domain.\n"); - /* destroy the domain */ - qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_ERROR); -} - void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length) { if (unlikely(xen_in_migration)) { @@ -1605,3 +658,57 @@ void qmp_xen_set_global_dirty_log(bool enable, Error **errp) memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION); } } + +void arch_xen_set_memory(XenIOState *state, MemoryRegionSection *section, + bool add) +{ + hwaddr start_addr = section->offset_within_address_space; + ram_addr_t size = int128_get64(section->size); + bool log_dirty = memory_region_is_logging(section->mr, DIRTY_MEMORY_VGA); + hvmmem_type_t mem_type; + + if (!memory_region_is_ram(section->mr)) { + return; + } + + if (log_dirty != add) { + return; + } + + trace_xen_client_set_memory(start_addr, size, log_dirty); + + start_addr &= TARGET_PAGE_MASK; + size = TARGET_PAGE_ALIGN(size); + + if (add) { + if (!memory_region_is_rom(section->mr)) { + xen_add_to_physmap(state, start_addr, size, + section->mr, section->offset_within_region); + } else { + mem_type = HVMMEM_ram_ro; + if (xen_set_mem_type(xen_domid, mem_type, + start_addr >> TARGET_PAGE_BITS, + size >> TARGET_PAGE_BITS)) { + DPRINTF("xen_set_mem_type error, addr: "HWADDR_FMT_plx"\n", + start_addr); + } + } + } else { + if (xen_remove_from_physmap(state, start_addr, size) < 0) { + DPRINTF("physmapping does not exist at "HWADDR_FMT_plx"\n", start_addr); + } + } +} + +void arch_handle_ioreq(XenIOState *state, ioreq_t *req) +{ + switch (req->type) { + case IOREQ_TYPE_VMWARE_PORT: + handle_vmport_ioreq(state, req); + break; + default: + hw_error("Invalid ioreq type 0x%x\n", req->type); + } + + return; +} diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c index 55902e1df7..48d550f633 100644 --- a/hw/ide/ahci.c +++ b/hw/ide/ahci.c @@ -690,7 +690,7 @@ static void ahci_reset_port(AHCIState *s, int port) s->dev[port].port_state = STATE_RUN; if (ide_state->drive_kind == IDE_CD) { - ahci_set_signature(d, SATA_SIGNATURE_CDROM);\ + ahci_set_signature(d, SATA_SIGNATURE_CDROM); ide_state->status = SEEK_STAT | WRERR_STAT | READY_STAT; } else { ahci_set_signature(d, SATA_SIGNATURE_DISK); @@ -1509,7 +1509,8 @@ static void ahci_cmd_done(const IDEDMA *dma) ahci_write_fis_d2h(ad); if (ad->port_regs.cmd_issue && !ad->check_bh) { - ad->check_bh = qemu_bh_new(ahci_check_cmd_bh, ad); + ad->check_bh = qemu_bh_new_guarded(ahci_check_cmd_bh, ad, + &ad->mem_reentrancy_guard); qemu_bh_schedule(ad->check_bh); } } diff --git a/hw/ide/ahci_internal.h b/hw/ide/ahci_internal.h index 303fcd7235..2480455372 100644 --- a/hw/ide/ahci_internal.h +++ b/hw/ide/ahci_internal.h @@ -321,6 +321,7 @@ struct AHCIDevice { bool init_d2h_sent; AHCICmdHdr *cur_cmd; NCQTransferState ncq_tfs[AHCI_MAX_CMDS]; + MemReentrancyGuard mem_reentrancy_guard; }; struct AHCIPCIState { diff --git a/hw/ide/core.c b/hw/ide/core.c index 45d14a25e9..de48ff9f86 100644 --- a/hw/ide/core.c +++ b/hw/ide/core.c @@ -513,6 +513,7 @@ BlockAIOCB *ide_issue_trim( BlockCompletionFunc *cb, void *cb_opaque, void *opaque) { IDEState *s = opaque; + IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master; TrimAIOCB *iocb; /* Paired with a decrement in ide_trim_bh_cb() */ @@ -520,7 +521,8 @@ BlockAIOCB *ide_issue_trim( iocb = blk_aio_get(&trim_aiocb_info, s->blk, cb, cb_opaque); iocb->s = s; - iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb); + iocb->bh = qemu_bh_new_guarded(ide_trim_bh_cb, iocb, + &DEVICE(dev)->mem_reentrancy_guard); iocb->ret = 0; iocb->qiov = qiov; iocb->i = -1; diff --git a/hw/ide/meson.build b/hw/ide/meson.build index ddcb3b28d2..e050eef942 100644 --- a/hw/ide/meson.build +++ b/hw/ide/meson.build @@ -1,14 +1,14 @@ -softmmu_ss.add(when: 'CONFIG_AHCI', if_true: files('ahci.c')) -softmmu_ss.add(when: 'CONFIG_AHCI_ICH9', if_true: files('ich.c')) -softmmu_ss.add(when: 'CONFIG_ALLWINNER_A10', if_true: files('ahci-allwinner.c')) -softmmu_ss.add(when: 'CONFIG_IDE_CMD646', if_true: files('cmd646.c')) -softmmu_ss.add(when: 'CONFIG_IDE_CORE', if_true: files('core.c', 'atapi.c')) -softmmu_ss.add(when: 'CONFIG_IDE_ISA', if_true: files('isa.c', 'ioport.c')) -softmmu_ss.add(when: 'CONFIG_IDE_MACIO', if_true: files('macio.c')) -softmmu_ss.add(when: 'CONFIG_IDE_MMIO', if_true: files('mmio.c')) -softmmu_ss.add(when: 'CONFIG_IDE_PCI', if_true: files('pci.c')) -softmmu_ss.add(when: 'CONFIG_IDE_PIIX', if_true: files('piix.c', 'ioport.c')) -softmmu_ss.add(when: 'CONFIG_IDE_QDEV', if_true: files('qdev.c')) -softmmu_ss.add(when: 'CONFIG_IDE_SII3112', if_true: files('sii3112.c')) -softmmu_ss.add(when: 'CONFIG_IDE_VIA', if_true: files('via.c')) -softmmu_ss.add(when: 'CONFIG_MICRODRIVE', if_true: files('microdrive.c')) +system_ss.add(when: 'CONFIG_AHCI', if_true: files('ahci.c')) +system_ss.add(when: 'CONFIG_AHCI_ICH9', if_true: files('ich.c')) +system_ss.add(when: 'CONFIG_ALLWINNER_A10', if_true: files('ahci-allwinner.c')) +system_ss.add(when: 'CONFIG_IDE_CMD646', if_true: files('cmd646.c')) +system_ss.add(when: 'CONFIG_IDE_CORE', if_true: files('core.c', 'atapi.c')) +system_ss.add(when: 'CONFIG_IDE_ISA', if_true: files('isa.c', 'ioport.c')) +system_ss.add(when: 'CONFIG_IDE_MACIO', if_true: files('macio.c')) +system_ss.add(when: 'CONFIG_IDE_MMIO', if_true: files('mmio.c')) +system_ss.add(when: 'CONFIG_IDE_PCI', if_true: files('pci.c')) +system_ss.add(when: 'CONFIG_IDE_PIIX', if_true: files('piix.c', 'ioport.c')) +system_ss.add(when: 'CONFIG_IDE_QDEV', if_true: files('qdev.c')) +system_ss.add(when: 'CONFIG_IDE_SII3112', if_true: files('sii3112.c')) +system_ss.add(when: 'CONFIG_IDE_VIA', if_true: files('via.c')) +system_ss.add(when: 'CONFIG_MICRODRIVE', if_true: files('microdrive.c')) diff --git a/hw/input/meson.build b/hw/input/meson.build index 8deb011d4a..c0d4482180 100644 --- a/hw/input/meson.build +++ b/hw/input/meson.build @@ -1,18 +1,18 @@ -softmmu_ss.add(files('hid.c')) -softmmu_ss.add(when: 'CONFIG_ADB', if_true: files('adb.c', 'adb-mouse.c', 'adb-kbd.c')) -softmmu_ss.add(when: 'CONFIG_ADS7846', if_true: files('ads7846.c')) -softmmu_ss.add(when: 'CONFIG_LM832X', if_true: files('lm832x.c')) -softmmu_ss.add(when: 'CONFIG_PCKBD', if_true: files('pckbd.c')) -softmmu_ss.add(when: 'CONFIG_PL050', if_true: files('pl050.c')) -softmmu_ss.add(when: 'CONFIG_PS2', if_true: files('ps2.c')) -softmmu_ss.add(when: 'CONFIG_STELLARIS_INPUT', if_true: files('stellaris_input.c')) -softmmu_ss.add(when: 'CONFIG_TSC2005', if_true: files('tsc2005.c')) +system_ss.add(files('hid.c')) +system_ss.add(when: 'CONFIG_ADB', if_true: files('adb.c', 'adb-mouse.c', 'adb-kbd.c')) +system_ss.add(when: 'CONFIG_ADS7846', if_true: files('ads7846.c')) +system_ss.add(when: 'CONFIG_LM832X', if_true: files('lm832x.c')) +system_ss.add(when: 'CONFIG_PCKBD', if_true: files('pckbd.c')) +system_ss.add(when: 'CONFIG_PL050', if_true: files('pl050.c')) +system_ss.add(when: 'CONFIG_PS2', if_true: files('ps2.c')) +system_ss.add(when: 'CONFIG_STELLARIS_INPUT', if_true: files('stellaris_input.c')) +system_ss.add(when: 'CONFIG_TSC2005', if_true: files('tsc2005.c')) -softmmu_ss.add(when: 'CONFIG_VIRTIO_INPUT', if_true: files('virtio-input.c')) -softmmu_ss.add(when: 'CONFIG_VIRTIO_INPUT', if_true: files('virtio-input-hid.c')) -softmmu_ss.add(when: 'CONFIG_VIRTIO_INPUT_HOST', if_true: files('virtio-input-host.c')) -softmmu_ss.add(when: 'CONFIG_VHOST_USER_INPUT', if_true: files('vhost-user-input.c')) +system_ss.add(when: 'CONFIG_VIRTIO_INPUT', if_true: files('virtio-input.c')) +system_ss.add(when: 'CONFIG_VIRTIO_INPUT', if_true: files('virtio-input-hid.c')) +system_ss.add(when: 'CONFIG_VIRTIO_INPUT_HOST', if_true: files('virtio-input-host.c')) +system_ss.add(when: 'CONFIG_VHOST_USER_INPUT', if_true: files('vhost-user-input.c')) -softmmu_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_keypad.c')) -softmmu_ss.add(when: 'CONFIG_TSC210X', if_true: files('tsc210x.c')) -softmmu_ss.add(when: 'CONFIG_LASIPS2', if_true: files('lasips2.c')) +system_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_keypad.c')) +system_ss.add(when: 'CONFIG_TSC210X', if_true: files('tsc210x.c')) +system_ss.add(when: 'CONFIG_LASIPS2', if_true: files('lasips2.c')) diff --git a/hw/input/virtio-input-hid.c b/hw/input/virtio-input-hid.c index a7a244a95d..7053ad72d4 100644 --- a/hw/input/virtio-input-hid.c +++ b/hw/input/virtio-input-hid.c @@ -16,9 +16,10 @@ #include "standard-headers/linux/input.h" -#define VIRTIO_ID_NAME_KEYBOARD "QEMU Virtio Keyboard" -#define VIRTIO_ID_NAME_MOUSE "QEMU Virtio Mouse" -#define VIRTIO_ID_NAME_TABLET "QEMU Virtio Tablet" +#define VIRTIO_ID_NAME_KEYBOARD "QEMU Virtio Keyboard" +#define VIRTIO_ID_NAME_MOUSE "QEMU Virtio Mouse" +#define VIRTIO_ID_NAME_TABLET "QEMU Virtio Tablet" +#define VIRTIO_ID_NAME_MULTITOUCH "QEMU Virtio MultiTouch" /* ----------------------------------------------------------------- */ @@ -30,6 +31,7 @@ static const unsigned short keymap_button[INPUT_BUTTON__MAX] = { [INPUT_BUTTON_WHEEL_DOWN] = BTN_GEAR_DOWN, [INPUT_BUTTON_SIDE] = BTN_SIDE, [INPUT_BUTTON_EXTRA] = BTN_EXTRA, + [INPUT_BUTTON_TOUCH] = BTN_TOUCH, }; static const unsigned short axismap_rel[INPUT_AXIS__MAX] = { @@ -42,32 +44,38 @@ static const unsigned short axismap_abs[INPUT_AXIS__MAX] = { [INPUT_AXIS_Y] = ABS_Y, }; +static const unsigned short axismap_tch[INPUT_AXIS__MAX] = { + [INPUT_AXIS_X] = ABS_MT_POSITION_X, + [INPUT_AXIS_Y] = ABS_MT_POSITION_Y, +}; + /* ----------------------------------------------------------------- */ -static void virtio_input_key_config(VirtIOInput *vinput, - const unsigned short *keymap, - size_t mapsize) +static void virtio_input_extend_config(VirtIOInput *vinput, + const unsigned short *map, + size_t mapsize, + uint8_t select, uint8_t subsel) { - virtio_input_config keys; + virtio_input_config ext; int i, bit, byte, bmax = 0; - memset(&keys, 0, sizeof(keys)); + memset(&ext, 0, sizeof(ext)); for (i = 0; i < mapsize; i++) { - bit = keymap[i]; + bit = map[i]; if (!bit) { continue; } byte = bit / 8; bit = bit % 8; - keys.u.bitmap[byte] |= (1 << bit); + ext.u.bitmap[byte] |= (1 << bit); if (bmax < byte+1) { bmax = byte+1; } } - keys.select = VIRTIO_INPUT_CFG_EV_BITS; - keys.subsel = EV_KEY; - keys.size = bmax; - virtio_input_add_config(vinput, &keys); + ext.select = select; + ext.subsel = subsel; + ext.size = bmax; + virtio_input_add_config(vinput, &ext); } static void virtio_input_handle_event(DeviceState *dev, QemuConsole *src, @@ -80,6 +88,7 @@ static void virtio_input_handle_event(DeviceState *dev, QemuConsole *src, InputKeyEvent *key; InputMoveEvent *move; InputBtnEvent *btn; + InputMultiTouchEvent *mtt; switch (evt->type) { case INPUT_EVENT_KIND_KEY: @@ -136,6 +145,24 @@ static void virtio_input_handle_event(DeviceState *dev, QemuConsole *src, event.value = cpu_to_le32(move->value); virtio_input_send(vinput, &event); break; + case INPUT_EVENT_KIND_MTT: + mtt = evt->u.mtt.data; + if (mtt->type == INPUT_MULTI_TOUCH_TYPE_DATA) { + event.type = cpu_to_le16(EV_ABS); + event.code = cpu_to_le16(axismap_tch[mtt->axis]); + event.value = cpu_to_le32(mtt->value); + virtio_input_send(vinput, &event); + } else { + event.type = cpu_to_le16(EV_ABS); + event.code = cpu_to_le16(ABS_MT_SLOT); + event.value = cpu_to_le32(mtt->slot); + virtio_input_send(vinput, &event); + event.type = cpu_to_le16(EV_ABS); + event.code = cpu_to_le16(ABS_MT_TRACKING_ID); + event.value = cpu_to_le32(mtt->tracking_id); + virtio_input_send(vinput, &event); + } + break; default: /* keep gcc happy */ break; @@ -281,8 +308,9 @@ static void virtio_keyboard_init(Object *obj) vhid->handler = &virtio_keyboard_handler; virtio_input_init_config(vinput, virtio_keyboard_config); - virtio_input_key_config(vinput, qemu_input_map_qcode_to_linux, - qemu_input_map_qcode_to_linux_len); + virtio_input_extend_config(vinput, qemu_input_map_qcode_to_linux, + qemu_input_map_qcode_to_linux_len, + VIRTIO_INPUT_CFG_EV_BITS, EV_KEY); } static const TypeInfo virtio_keyboard_info = { @@ -373,8 +401,9 @@ static void virtio_mouse_init(Object *obj) virtio_input_init_config(vinput, vhid->wheel_axis ? virtio_mouse_config_v2 : virtio_mouse_config_v1); - virtio_input_key_config(vinput, keymap_button, - ARRAY_SIZE(keymap_button)); + virtio_input_extend_config(vinput, keymap_button, + ARRAY_SIZE(keymap_button), + VIRTIO_INPUT_CFG_EV_BITS, EV_KEY); } static const TypeInfo virtio_mouse_info = { @@ -497,8 +526,9 @@ static void virtio_tablet_init(Object *obj) virtio_input_init_config(vinput, vhid->wheel_axis ? virtio_tablet_config_v2 : virtio_tablet_config_v1); - virtio_input_key_config(vinput, keymap_button, - ARRAY_SIZE(keymap_button)); + virtio_input_extend_config(vinput, keymap_button, + ARRAY_SIZE(keymap_button), + VIRTIO_INPUT_CFG_EV_BITS, EV_KEY); } static const TypeInfo virtio_tablet_info = { @@ -511,12 +541,98 @@ static const TypeInfo virtio_tablet_info = { /* ----------------------------------------------------------------- */ +static QemuInputHandler virtio_multitouch_handler = { + .name = VIRTIO_ID_NAME_MULTITOUCH, + .mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_MTT, + .event = virtio_input_handle_event, + .sync = virtio_input_handle_sync, +}; + +static struct virtio_input_config virtio_multitouch_config[] = { + { + .select = VIRTIO_INPUT_CFG_ID_NAME, + .size = sizeof(VIRTIO_ID_NAME_MULTITOUCH), + .u.string = VIRTIO_ID_NAME_MULTITOUCH, + },{ + .select = VIRTIO_INPUT_CFG_ID_DEVIDS, + .size = sizeof(struct virtio_input_devids), + .u.ids = { + .bustype = const_le16(BUS_VIRTUAL), + .vendor = const_le16(0x0627), /* same we use for usb hid devices */ + .product = const_le16(0x0003), + .version = const_le16(0x0001), + }, + },{ + .select = VIRTIO_INPUT_CFG_ABS_INFO, + .subsel = ABS_MT_SLOT, + .size = sizeof(virtio_input_absinfo), + .u.abs.min = const_le32(INPUT_EVENT_SLOTS_MIN), + .u.abs.max = const_le32(INPUT_EVENT_SLOTS_MAX), + },{ + .select = VIRTIO_INPUT_CFG_ABS_INFO, + .subsel = ABS_MT_TRACKING_ID, + .size = sizeof(virtio_input_absinfo), + .u.abs.min = const_le32(INPUT_EVENT_SLOTS_MIN), + .u.abs.max = const_le32(INPUT_EVENT_SLOTS_MAX), + },{ + .select = VIRTIO_INPUT_CFG_ABS_INFO, + .subsel = ABS_MT_POSITION_X, + .size = sizeof(virtio_input_absinfo), + .u.abs.min = const_le32(INPUT_EVENT_ABS_MIN), + .u.abs.max = const_le32(INPUT_EVENT_ABS_MAX), + },{ + .select = VIRTIO_INPUT_CFG_ABS_INFO, + .subsel = ABS_MT_POSITION_Y, + .size = sizeof(virtio_input_absinfo), + .u.abs.min = const_le32(INPUT_EVENT_ABS_MIN), + .u.abs.max = const_le32(INPUT_EVENT_ABS_MAX), + }, + { /* end of list */ }, +}; + +static void virtio_multitouch_init(Object *obj) +{ + VirtIOInputHID *vhid = VIRTIO_INPUT_HID(obj); + VirtIOInput *vinput = VIRTIO_INPUT(obj); + unsigned short abs_props[] = { + INPUT_PROP_DIRECT, + }; + unsigned short abs_bits[] = { + ABS_MT_SLOT, + ABS_MT_TRACKING_ID, + ABS_MT_POSITION_X, + ABS_MT_POSITION_Y, + }; + + vhid->handler = &virtio_multitouch_handler; + virtio_input_init_config(vinput, virtio_multitouch_config); + virtio_input_extend_config(vinput, keymap_button, + ARRAY_SIZE(keymap_button), + VIRTIO_INPUT_CFG_EV_BITS, EV_KEY); + virtio_input_extend_config(vinput, abs_props, + ARRAY_SIZE(abs_props), + VIRTIO_INPUT_CFG_PROP_BITS, 0); + virtio_input_extend_config(vinput, abs_bits, + ARRAY_SIZE(abs_bits), + VIRTIO_INPUT_CFG_EV_BITS, EV_ABS); +} + +static const TypeInfo virtio_multitouch_info = { + .name = TYPE_VIRTIO_MULTITOUCH, + .parent = TYPE_VIRTIO_INPUT_HID, + .instance_size = sizeof(VirtIOInputHID), + .instance_init = virtio_multitouch_init, +}; + +/* ----------------------------------------------------------------- */ + static void virtio_register_types(void) { type_register_static(&virtio_input_hid_info); type_register_static(&virtio_keyboard_info); type_register_static(&virtio_mouse_info); type_register_static(&virtio_tablet_info); + type_register_static(&virtio_multitouch_info); } type_init(virtio_register_types) diff --git a/hw/intc/allwinner-a10-pic.c b/hw/intc/allwinner-a10-pic.c index 8cca124807..d0bf8d545b 100644 --- a/hw/intc/allwinner-a10-pic.c +++ b/hw/intc/allwinner-a10-pic.c @@ -49,12 +49,9 @@ static void aw_a10_pic_update(AwA10PICState *s) static void aw_a10_pic_set_irq(void *opaque, int irq, int level) { AwA10PICState *s = opaque; + uint32_t *pending_reg = &s->irq_pending[irq / 32]; - if (level) { - set_bit(irq % 32, (void *)&s->irq_pending[irq / 32]); - } else { - clear_bit(irq % 32, (void *)&s->irq_pending[irq / 32]); - } + *pending_reg = deposit32(*pending_reg, irq % 32, 1, !!level); aw_a10_pic_update(s); } diff --git a/hw/intc/apic.c b/hw/intc/apic.c index 20b5a94073..ac3d47d231 100644 --- a/hw/intc/apic.c +++ b/hw/intc/apic.c @@ -885,6 +885,13 @@ static void apic_realize(DeviceState *dev, Error **errp) memory_region_init_io(&s->io_memory, OBJECT(s), &apic_io_ops, s, "apic-msi", APIC_SPACE_SIZE); + /* + * apic-msi's apic_mem_write can call into ioapic_eoi_broadcast, which can + * write back to apic-msi. As such mark the apic-msi region re-entrancy + * safe. + */ + s->io_memory.disable_reentrancy_guard = true; + s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, apic_timer, s); local_apics[s->id] = s; diff --git a/hw/intc/loongarch_extioi.c b/hw/intc/loongarch_extioi.c index 4b8ec3f28a..af75460643 100644 --- a/hw/intc/loongarch_extioi.c +++ b/hw/intc/loongarch_extioi.c @@ -254,7 +254,7 @@ static const VMStateDescription vmstate_loongarch_extioi = { .minimum_version_id = 1, .fields = (VMStateField[]) { VMSTATE_UINT32_ARRAY(bounce, LoongArchExtIOI, EXTIOI_IRQS_GROUP_COUNT), - VMSTATE_UINT32_2DARRAY(coreisr, LoongArchExtIOI, LOONGARCH_MAX_VCPUS, + VMSTATE_UINT32_2DARRAY(coreisr, LoongArchExtIOI, EXTIOI_CPUS, EXTIOI_IRQS_GROUP_COUNT), VMSTATE_UINT32_ARRAY(nodetype, LoongArchExtIOI, EXTIOI_IRQS_NODETYPE_COUNT / 2), @@ -276,22 +276,22 @@ static void loongarch_extioi_instance_init(Object *obj) int i, cpu, pin; for (i = 0; i < EXTIOI_IRQS; i++) { - sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[i]); + sysbus_init_irq(dev, &s->irq[i]); } qdev_init_gpio_in(DEVICE(obj), extioi_setirq, EXTIOI_IRQS); - for (cpu = 0; cpu < LOONGARCH_MAX_VCPUS; cpu++) { + for (cpu = 0; cpu < EXTIOI_CPUS; cpu++) { memory_region_init_io(&s->extioi_iocsr_mem[cpu], OBJECT(s), &extioi_ops, s, "extioi_iocsr", 0x900); - sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->extioi_iocsr_mem[cpu]); + sysbus_init_mmio(dev, &s->extioi_iocsr_mem[cpu]); for (pin = 0; pin < LS3A_INTC_IP; pin++) { qdev_init_gpio_out(DEVICE(obj), &s->parent_irq[cpu][pin], 1); } } memory_region_init_io(&s->extioi_system_mem, OBJECT(s), &extioi_ops, s, "extioi_system_mem", 0x900); - sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->extioi_system_mem); + sysbus_init_mmio(dev, &s->extioi_system_mem); } static void loongarch_extioi_class_init(ObjectClass *klass, void *data) diff --git a/hw/intc/loongarch_ipi.c b/hw/intc/loongarch_ipi.c index aa4bf9eb74..67858b521c 100644 --- a/hw/intc/loongarch_ipi.c +++ b/hw/intc/loongarch_ipi.c @@ -17,6 +17,8 @@ #include "target/loongarch/internals.h" #include "trace.h" +static void loongarch_ipi_writel(void *, hwaddr, uint64_t, unsigned); + static uint64_t loongarch_ipi_readl(void *opaque, hwaddr addr, unsigned size) { IPICore *s = opaque; @@ -50,7 +52,7 @@ static uint64_t loongarch_ipi_readl(void *opaque, hwaddr addr, unsigned size) return ret; } -static void send_ipi_data(CPULoongArchState *env, target_ulong val, target_ulong addr) +static void send_ipi_data(CPULoongArchState *env, uint64_t val, hwaddr addr) { int i, mask = 0, data = 0; @@ -75,35 +77,74 @@ static void send_ipi_data(CPULoongArchState *env, target_ulong val, target_ulong data, MEMTXATTRS_UNSPECIFIED, NULL); } +static int archid_cmp(const void *a, const void *b) +{ + CPUArchId *archid_a = (CPUArchId *)a; + CPUArchId *archid_b = (CPUArchId *)b; + + return archid_a->arch_id - archid_b->arch_id; +} + +static CPUArchId *find_cpu_by_archid(MachineState *ms, uint32_t id) +{ + CPUArchId apic_id, *found_cpu; + + apic_id.arch_id = id; + found_cpu = bsearch(&apic_id, ms->possible_cpus->cpus, + ms->possible_cpus->len, sizeof(*ms->possible_cpus->cpus), + archid_cmp); + + return found_cpu; +} + +static CPUState *ipi_getcpu(int arch_id) +{ + MachineState *machine = MACHINE(qdev_get_machine()); + CPUArchId *archid; + + archid = find_cpu_by_archid(machine, arch_id); + return CPU(archid->cpu); +} + static void ipi_send(uint64_t val) { - int cpuid, data; - CPULoongArchState *env; + uint32_t cpuid; + uint8_t vector; CPUState *cs; LoongArchCPU *cpu; + LoongArchIPI *s; + + cpuid = extract32(val, 16, 10); + if (cpuid >= LOONGARCH_MAX_CPUS) { + trace_loongarch_ipi_unsupported_cpuid("IOCSR_IPI_SEND", cpuid); + return; + } - cpuid = (val >> 16) & 0x3ff; /* IPI status vector */ - data = 1 << (val & 0x1f); - cs = qemu_get_cpu(cpuid); - cpu = LOONGARCH_CPU(cs); - env = &cpu->env; - address_space_stl(&env->address_space_iocsr, 0x1008, - data, MEMTXATTRS_UNSPECIFIED, NULL); + vector = extract8(val, 0, 5); + cs = ipi_getcpu(cpuid); + cpu = LOONGARCH_CPU(cs); + s = LOONGARCH_IPI(cpu->env.ipistate); + loongarch_ipi_writel(&s->ipi_core, CORE_SET_OFF, BIT(vector), 4); } static void mail_send(uint64_t val) { - int cpuid; + uint32_t cpuid; hwaddr addr; CPULoongArchState *env; CPUState *cs; LoongArchCPU *cpu; - cpuid = (val >> 16) & 0x3ff; + cpuid = extract32(val, 16, 10); + if (cpuid >= LOONGARCH_MAX_CPUS) { + trace_loongarch_ipi_unsupported_cpuid("IOCSR_MAIL_SEND", cpuid); + return; + } + addr = 0x1020 + (val & 0x1c); - cs = qemu_get_cpu(cpuid); + cs = ipi_getcpu(cpuid); cpu = LOONGARCH_CPU(cs); env = &cpu->env; send_ipi_data(env, val, addr); @@ -111,14 +152,21 @@ static void mail_send(uint64_t val) static void any_send(uint64_t val) { - int cpuid; + uint32_t cpuid; hwaddr addr; CPULoongArchState *env; + CPUState *cs; + LoongArchCPU *cpu; + + cpuid = extract32(val, 16, 10); + if (cpuid >= LOONGARCH_MAX_CPUS) { + trace_loongarch_ipi_unsupported_cpuid("IOCSR_ANY_SEND", cpuid); + return; + } - cpuid = (val >> 16) & 0x3ff; addr = val & 0xffff; - CPUState *cs = qemu_get_cpu(cpuid); - LoongArchCPU *cpu = LOONGARCH_CPU(cs); + cs = ipi_getcpu(cpuid); + cpu = LOONGARCH_CPU(cs); env = &cpu->env; send_ipi_data(env, val, addr); } @@ -201,51 +249,43 @@ static const MemoryRegionOps loongarch_ipi64_ops = { static void loongarch_ipi_init(Object *obj) { - int cpu; - LoongArchMachineState *lams; LoongArchIPI *s = LOONGARCH_IPI(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - Object *machine = qdev_get_machine(); - ObjectClass *mc = object_get_class(machine); - /* 'lams' should be initialized */ - if (!strcmp(MACHINE_CLASS(mc)->name, "none")) { - return; - } - lams = LOONGARCH_MACHINE(machine); - for (cpu = 0; cpu < MAX_IPI_CORE_NUM; cpu++) { - memory_region_init_io(&s->ipi_iocsr_mem[cpu], obj, &loongarch_ipi_ops, - &lams->ipi_core[cpu], "loongarch_ipi_iocsr", 0x48); - sysbus_init_mmio(sbd, &s->ipi_iocsr_mem[cpu]); - - memory_region_init_io(&s->ipi64_iocsr_mem[cpu], obj, &loongarch_ipi64_ops, - &lams->ipi_core[cpu], "loongarch_ipi64_iocsr", 0x118); - sysbus_init_mmio(sbd, &s->ipi64_iocsr_mem[cpu]); - qdev_init_gpio_out(DEVICE(obj), &lams->ipi_core[cpu].irq, 1); - } + + memory_region_init_io(&s->ipi_iocsr_mem, obj, &loongarch_ipi_ops, + &s->ipi_core, "loongarch_ipi_iocsr", 0x48); + + /* loongarch_ipi_iocsr performs re-entrant IO through ipi_send */ + s->ipi_iocsr_mem.disable_reentrancy_guard = true; + + sysbus_init_mmio(sbd, &s->ipi_iocsr_mem); + + memory_region_init_io(&s->ipi64_iocsr_mem, obj, &loongarch_ipi64_ops, + &s->ipi_core, "loongarch_ipi64_iocsr", 0x118); + sysbus_init_mmio(sbd, &s->ipi64_iocsr_mem); + qdev_init_gpio_out(DEVICE(obj), &s->ipi_core.irq, 1); } static const VMStateDescription vmstate_ipi_core = { .name = "ipi-single", - .version_id = 0, - .minimum_version_id = 0, + .version_id = 2, + .minimum_version_id = 2, .fields = (VMStateField[]) { VMSTATE_UINT32(status, IPICore), VMSTATE_UINT32(en, IPICore), VMSTATE_UINT32(set, IPICore), VMSTATE_UINT32(clear, IPICore), - VMSTATE_UINT32_ARRAY(buf, IPICore, MAX_IPI_MBX_NUM * 2), + VMSTATE_UINT32_ARRAY(buf, IPICore, IPI_MBX_NUM * 2), VMSTATE_END_OF_LIST() } }; static const VMStateDescription vmstate_loongarch_ipi = { .name = TYPE_LOONGARCH_IPI, - .version_id = 0, - .minimum_version_id = 0, + .version_id = 1, + .minimum_version_id = 1, .fields = (VMStateField[]) { - VMSTATE_STRUCT_ARRAY(ipi_core, LoongArchMachineState, - MAX_IPI_CORE_NUM, 0, - vmstate_ipi_core, IPICore), + VMSTATE_STRUCT(ipi_core, LoongArchIPI, 0, vmstate_ipi_core, IPICore), VMSTATE_END_OF_LIST() } }; diff --git a/hw/intc/meson.build b/hw/intc/meson.build index 8be459b41c..ed355941d1 100644 --- a/hw/intc/meson.build +++ b/hw/intc/meson.build @@ -1,40 +1,40 @@ -softmmu_ss.add(files('intc.c')) -softmmu_ss.add(when: 'CONFIG_ARM_GIC', if_true: files( +system_ss.add(files('intc.c')) +system_ss.add(when: 'CONFIG_ARM_GIC', if_true: files( 'arm_gic.c', 'arm_gic_common.c', 'arm_gicv2m.c', 'arm_gicv3_common.c', 'arm_gicv3_its_common.c', )) -softmmu_ss.add(when: 'CONFIG_ARM_GICV3_TCG', if_true: files( +system_ss.add(when: 'CONFIG_ARM_GICV3_TCG', if_true: files( 'arm_gicv3.c', 'arm_gicv3_dist.c', 'arm_gicv3_its.c', 'arm_gicv3_redist.c', )) -softmmu_ss.add(when: 'CONFIG_ALLWINNER_A10_PIC', if_true: files('allwinner-a10-pic.c')) -softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_vic.c')) -softmmu_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_pic.c')) -softmmu_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_gic.c', 'exynos4210_combiner.c')) -softmmu_ss.add(when: 'CONFIG_GOLDFISH_PIC', if_true: files('goldfish_pic.c')) -softmmu_ss.add(when: 'CONFIG_HEATHROW_PIC', if_true: files('heathrow_pic.c')) -softmmu_ss.add(when: 'CONFIG_I8259', if_true: files('i8259_common.c', 'i8259.c')) -softmmu_ss.add(when: 'CONFIG_IMX', if_true: files('imx_avic.c', 'imx_gpcv2.c')) -softmmu_ss.add(when: 'CONFIG_IOAPIC', if_true: files('ioapic_common.c')) -softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_intc.c')) -softmmu_ss.add(when: 'CONFIG_OPENPIC', if_true: files('openpic.c')) -softmmu_ss.add(when: 'CONFIG_PL190', if_true: files('pl190.c')) -softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_ic.c', 'bcm2836_control.c')) -softmmu_ss.add(when: 'CONFIG_REALVIEW', if_true: files('realview_gic.c')) -softmmu_ss.add(when: 'CONFIG_SLAVIO', if_true: files('slavio_intctl.c')) -softmmu_ss.add(when: 'CONFIG_XILINX', if_true: files('xilinx_intc.c')) -softmmu_ss.add(when: 'CONFIG_XLNX_ZYNQMP', if_true: files('xlnx-zynqmp-ipi.c')) -softmmu_ss.add(when: 'CONFIG_XLNX_ZYNQMP_PMU', if_true: files('xlnx-pmu-iomod-intc.c')) +system_ss.add(when: 'CONFIG_ALLWINNER_A10_PIC', if_true: files('allwinner-a10-pic.c')) +system_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_vic.c')) +system_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_pic.c')) +system_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_gic.c', 'exynos4210_combiner.c')) +system_ss.add(when: 'CONFIG_GOLDFISH_PIC', if_true: files('goldfish_pic.c')) +system_ss.add(when: 'CONFIG_HEATHROW_PIC', if_true: files('heathrow_pic.c')) +system_ss.add(when: 'CONFIG_I8259', if_true: files('i8259_common.c', 'i8259.c')) +system_ss.add(when: 'CONFIG_IMX', if_true: files('imx_avic.c', 'imx_gpcv2.c')) +system_ss.add(when: 'CONFIG_IOAPIC', if_true: files('ioapic_common.c')) +system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_intc.c')) +system_ss.add(when: 'CONFIG_OPENPIC', if_true: files('openpic.c')) +system_ss.add(when: 'CONFIG_PL190', if_true: files('pl190.c')) +system_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_ic.c', 'bcm2836_control.c')) +system_ss.add(when: 'CONFIG_REALVIEW', if_true: files('realview_gic.c')) +system_ss.add(when: 'CONFIG_SLAVIO', if_true: files('slavio_intctl.c')) +system_ss.add(when: 'CONFIG_XILINX', if_true: files('xilinx_intc.c')) +system_ss.add(when: 'CONFIG_XLNX_ZYNQMP', if_true: files('xlnx-zynqmp-ipi.c')) +system_ss.add(when: 'CONFIG_XLNX_ZYNQMP_PMU', if_true: files('xlnx-pmu-iomod-intc.c')) if config_all_devices.has_key('CONFIG_APIC') or \ config_all_devices.has_key('CONFIG_I8259') or \ config_all_devices.has_key('CONFIG_MC146818RTC') - softmmu_ss.add(files('kvm_irqcount.c')) + system_ss.add(files('kvm_irqcount.c')) endif specific_ss.add(when: 'CONFIG_APIC', if_true: files('apic.c', 'apic_common.c')) diff --git a/hw/intc/pnv_xive2.c b/hw/intc/pnv_xive2.c index 7176d70234..ec1edeb385 100644 --- a/hw/intc/pnv_xive2.c +++ b/hw/intc/pnv_xive2.c @@ -163,7 +163,9 @@ static uint64_t pnv_xive2_vst_addr_indirect(PnvXive2 *xive, uint32_t type, ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED); if (!(vsd & VSD_ADDRESS_MASK)) { +#ifdef XIVE2_DEBUG xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx); +#endif return 0; } @@ -185,7 +187,9 @@ static uint64_t pnv_xive2_vst_addr_indirect(PnvXive2 *xive, uint32_t type, MEMTXATTRS_UNSPECIFIED); if (!(vsd & VSD_ADDRESS_MASK)) { +#ifdef XIVE2_DEBUG xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx); +#endif return 0; } @@ -955,6 +959,10 @@ static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset, val = xive->vc_regs[reg]; break; + case VC_ESBC_CFG: + val = xive->vc_regs[reg]; + break; + /* * EAS cache updates (not modeled) */ @@ -1046,6 +1054,9 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset, /* ESB update */ break; + case VC_ESBC_CFG: + break; + /* * EAS cache updates (not modeled) */ @@ -1265,6 +1276,9 @@ static uint64_t pnv_xive2_ic_tctxt_read(void *opaque, hwaddr offset, case TCTXT_EN1_RESET: val = xive->tctxt_regs[TCTXT_EN1 >> 3]; break; + case TCTXT_CFG: + val = xive->tctxt_regs[reg]; + break; default: xive2_error(xive, "TCTXT: invalid read @%"HWADDR_PRIx, offset); } @@ -1276,6 +1290,7 @@ static void pnv_xive2_ic_tctxt_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) { PnvXive2 *xive = PNV_XIVE2(opaque); + uint32_t reg = offset >> 3; switch (offset) { /* @@ -1283,6 +1298,7 @@ static void pnv_xive2_ic_tctxt_write(void *opaque, hwaddr offset, */ case TCTXT_EN0: /* Physical Thread Enable */ case TCTXT_EN1: /* Physical Thread Enable (fused core) */ + xive->tctxt_regs[reg] = val; break; case TCTXT_EN0_SET: @@ -1297,7 +1313,9 @@ static void pnv_xive2_ic_tctxt_write(void *opaque, hwaddr offset, case TCTXT_EN1_RESET: xive->tctxt_regs[TCTXT_EN1 >> 3] &= ~val; break; - + case TCTXT_CFG: + xive->tctxt_regs[reg] = val; + break; default: xive2_error(xive, "TCTXT: invalid write @%"HWADDR_PRIx, offset); return; @@ -1648,6 +1666,8 @@ static void pnv_xive2_tm_write(void *opaque, hwaddr offset, bool gen1_tima_os = xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS; + offset &= TM_ADDRESS_MASK; + /* TODO: should we switch the TM ops table instead ? */ if (!gen1_tima_os && offset == HV_PUSH_OS_CTX_OFFSET) { xive2_tm_push_os_ctx(xptr, tctx, offset, value, size); @@ -1667,6 +1687,8 @@ static uint64_t pnv_xive2_tm_read(void *opaque, hwaddr offset, unsigned size) bool gen1_tima_os = xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS; + offset &= TM_ADDRESS_MASK; + /* TODO: should we switch the TM ops table instead ? */ if (!gen1_tima_os && offset == HV_PULL_OS_CTX_OFFSET) { return xive2_tm_pull_os_ctx(xptr, tctx, offset, size); diff --git a/hw/intc/pnv_xive2_regs.h b/hw/intc/pnv_xive2_regs.h index 0c096e4adb..7165dc8704 100644 --- a/hw/intc/pnv_xive2_regs.h +++ b/hw/intc/pnv_xive2_regs.h @@ -232,6 +232,10 @@ #define VC_ESBC_FLUSH_POLL_BLOCK_ID_MASK PPC_BITMASK(32, 35) #define VC_ESBC_FLUSH_POLL_OFFSET_MASK PPC_BITMASK(36, 63) /* 28-bit */ +/* ESBC configuration */ +#define X_VC_ESBC_CFG 0x148 +#define VC_ESBC_CFG 0x240 + /* EASC flush control register */ #define X_VC_EASC_FLUSH_CTRL 0x160 #define VC_EASC_FLUSH_CTRL 0x300 @@ -405,6 +409,10 @@ #define X_TCTXT_EN1_RESET 0x307 #define TCTXT_EN1_RESET 0x038 +/* TCTXT Config register */ +#define X_TCTXT_CFG 0x328 +#define TCTXT_CFG 0x140 + /* * VSD Tables */ diff --git a/hw/intc/riscv_aplic.c b/hw/intc/riscv_aplic.c index cd7efc4ad4..4bdc6a5d1a 100644 --- a/hw/intc/riscv_aplic.c +++ b/hw/intc/riscv_aplic.c @@ -688,13 +688,13 @@ static void riscv_aplic_write(void *opaque, hwaddr addr, uint64_t value, * domains). */ if (aplic->num_children && - !(aplic->smsicfgaddrH & APLIC_xMSICFGADDRH_L)) { + !(aplic->mmsicfgaddrH & APLIC_xMSICFGADDRH_L)) { aplic->smsicfgaddr = value; } } else if (aplic->mmode && aplic->msimode && (addr == APLIC_SMSICFGADDRH)) { if (aplic->num_children && - !(aplic->smsicfgaddrH & APLIC_xMSICFGADDRH_L)) { + !(aplic->mmsicfgaddrH & APLIC_xMSICFGADDRH_L)) { aplic->smsicfgaddrH = value & APLIC_xMSICFGADDRH_VALID_MASK; } } else if ((APLIC_SETIP_BASE <= addr) && @@ -803,7 +803,7 @@ static void riscv_aplic_realize(DeviceState *dev, Error **errp) aplic->bitfield_words = (aplic->num_irqs + 31) >> 5; aplic->sourcecfg = g_new0(uint32_t, aplic->num_irqs); - aplic->state = g_new(uint32_t, aplic->num_irqs); + aplic->state = g_new0(uint32_t, aplic->num_irqs); aplic->target = g_new0(uint32_t, aplic->num_irqs); if (!aplic->msimode) { for (i = 0; i < aplic->num_irqs; i++) { diff --git a/hw/intc/trace-events b/hw/intc/trace-events index 50cadfb996..5c6094c457 100644 --- a/hw/intc/trace-events +++ b/hw/intc/trace-events @@ -292,6 +292,7 @@ sh_intc_set(int id, int enable) "setting interrupt group %d to %d" # loongarch_ipi.c loongarch_ipi_read(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%"PRIx64 loongarch_ipi_write(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%"PRIx64 +loongarch_ipi_unsupported_cpuid(const char *s, uint32_t cpuid) "%s unsupported cpuid 0x%" PRIx32 # loongarch_pch_pic.c loongarch_pch_pic_irq_handler(int irq, int level) "irq %d level %d" diff --git a/hw/intc/xive.c b/hw/intc/xive.c index a986b96843..5204c14b87 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -249,7 +249,7 @@ static const uint8_t *xive_tm_views[] = { static uint64_t xive_tm_mask(hwaddr offset, unsigned size, bool write) { uint8_t page_offset = (offset >> TM_SHIFT) & 0x3; - uint8_t reg_offset = offset & 0x3F; + uint8_t reg_offset = offset & TM_REG_OFFSET; uint8_t reg_mask = write ? 0x1 : 0x2; uint64_t mask = 0x0; int i; @@ -266,8 +266,8 @@ static uint64_t xive_tm_mask(hwaddr offset, unsigned size, bool write) static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size) { - uint8_t ring_offset = offset & 0x30; - uint8_t reg_offset = offset & 0x3F; + uint8_t ring_offset = offset & TM_RING_OFFSET; + uint8_t reg_offset = offset & TM_REG_OFFSET; uint64_t mask = xive_tm_mask(offset, size, true); int i; @@ -296,8 +296,8 @@ static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value, static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size) { - uint8_t ring_offset = offset & 0x30; - uint8_t reg_offset = offset & 0x3F; + uint8_t ring_offset = offset & TM_RING_OFFSET; + uint8_t reg_offset = offset & TM_REG_OFFSET; uint64_t mask = xive_tm_mask(offset, size, false); uint64_t ret; int i; @@ -500,7 +500,7 @@ static const XiveTmOp xive_tm_operations[] = { static const XiveTmOp *xive_tm_find_op(hwaddr offset, unsigned size, bool write) { uint8_t page_offset = (offset >> TM_SHIFT) & 0x3; - uint32_t op_offset = offset & 0xFFF; + uint32_t op_offset = offset & TM_ADDRESS_MASK; int i; for (i = 0; i < ARRAY_SIZE(xive_tm_operations); i++) { @@ -534,7 +534,7 @@ void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, /* * First, check for special operations in the 2K region */ - if (offset & 0x800) { + if (offset & TM_SPECIAL_OP) { xto = xive_tm_find_op(offset, size, true); if (!xto) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA " @@ -573,7 +573,7 @@ uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, /* * First, check for special operations in the 2K region */ - if (offset & 0x800) { + if (offset & TM_SPECIAL_OP) { xto = xive_tm_find_op(offset, size, false); if (!xto) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA" diff --git a/hw/ipack/meson.build b/hw/ipack/meson.build index 3f8138b6f2..26567f1068 100644 --- a/hw/ipack/meson.build +++ b/hw/ipack/meson.build @@ -1 +1 @@ -softmmu_ss.add(when: 'CONFIG_IPACK', if_true: files('ipack.c', 'tpci200.c')) +system_ss.add(when: 'CONFIG_IPACK', if_true: files('ipack.c', 'tpci200.c')) diff --git a/hw/ipmi/meson.build b/hw/ipmi/meson.build index 9622ea2a2c..07f109d365 100644 --- a/hw/ipmi/meson.build +++ b/hw/ipmi/meson.build @@ -8,4 +8,4 @@ ipmi_ss.add(when: 'CONFIG_ISA_IPMI_BT', if_true: files('isa_ipmi_bt.c')) ipmi_ss.add(when: 'CONFIG_PCI_IPMI_BT', if_true: files('pci_ipmi_bt.c')) ipmi_ss.add(when: 'CONFIG_IPMI_SSIF', if_true: files('smbus_ipmi.c')) -softmmu_ss.add_all(when: 'CONFIG_IPMI', if_true: ipmi_ss) +system_ss.add_all(when: 'CONFIG_IPMI', if_true: ipmi_ss) diff --git a/hw/isa/Kconfig b/hw/isa/Kconfig index 0156a66889..c10cbc5fc1 100644 --- a/hw/isa/Kconfig +++ b/hw/isa/Kconfig @@ -35,6 +35,7 @@ config PIIX3 bool select I8257 select ISA_BUS + select MC146818RTC config PIIX4 bool @@ -79,3 +80,4 @@ config LPC_ICH9 select I8257 select ISA_BUS select ACPI_ICH9 + select MC146818RTC diff --git a/hw/isa/i82378.c b/hw/isa/i82378.c index 5432ab5065..63e0857208 100644 --- a/hw/isa/i82378.c +++ b/hw/isa/i82378.c @@ -34,7 +34,6 @@ struct I82378State { qemu_irq cpu_intr; qemu_irq *isa_irqs_in; - MemoryRegion io; }; static const VMStateDescription vmstate_i82378 = { diff --git a/hw/isa/isa-superio.c b/hw/isa/isa-superio.c index c81bfe58ef..7dbfc374da 100644 --- a/hw/isa/isa-superio.c +++ b/hw/isa/isa-superio.c @@ -16,10 +16,12 @@ #include "qapi/error.h" #include "sysemu/blockdev.h" #include "chardev/char.h" +#include "hw/char/parallel.h" #include "hw/block/fdc.h" #include "hw/isa/superio.h" #include "hw/qdev-properties.h" #include "hw/input/i8042.h" +#include "hw/char/parallel-isa.h" #include "hw/char/serial.h" #include "trace.h" @@ -51,7 +53,7 @@ static void isa_superio_realize(DeviceState *dev, Error **errp) } else { name = g_strdup_printf("parallel%d", i); } - isa = isa_new("isa-parallel"); + isa = isa_new(TYPE_ISA_PARALLEL); d = DEVICE(isa); qdev_prop_set_uint32(d, "index", i); if (k->parallel.get_iobase) { diff --git a/hw/isa/lpc_ich9.c b/hw/isa/lpc_ich9.c index 9714b0001e..9c47a2f6c7 100644 --- a/hw/isa/lpc_ich9.c +++ b/hw/isa/lpc_ich9.c @@ -658,6 +658,8 @@ static void ich9_lpc_initfn(Object *obj) static const uint8_t acpi_enable_cmd = ICH9_APM_ACPI_ENABLE; static const uint8_t acpi_disable_cmd = ICH9_APM_ACPI_DISABLE; + object_initialize_child(obj, "rtc", &lpc->rtc, TYPE_MC146818_RTC); + object_property_add_uint8_ptr(obj, ACPI_PM_PROP_SCI_INT, &lpc->sci_gsi, OBJ_PROP_FLAG_READ); object_property_add_uint8_ptr(OBJECT(lpc), ACPI_PM_PROP_ACPI_ENABLE_CMD, @@ -723,6 +725,12 @@ static void ich9_lpc_realize(PCIDevice *d, Error **errp) i8257_dma_init(isa_bus, 0); + /* RTC */ + qdev_prop_set_int32(DEVICE(&lpc->rtc), "base_year", 2000); + if (!qdev_realize(DEVICE(&lpc->rtc), BUS(isa_bus), errp)) { + return; + } + pci_bus_irqs(pci_bus, ich9_lpc_set_irq, d, ICH9_LPC_NB_PIRQS); pci_bus_map_irqs(pci_bus, ich9_lpc_map_irq); pci_bus_set_route_irq_fn(pci_bus, ich9_route_intx_pin_to_irq); diff --git a/hw/isa/meson.build b/hw/isa/meson.build index 8bf678ca0a..b855e81276 100644 --- a/hw/isa/meson.build +++ b/hw/isa/meson.build @@ -1,11 +1,11 @@ -softmmu_ss.add(when: 'CONFIG_APM', if_true: files('apm.c')) -softmmu_ss.add(when: 'CONFIG_I82378', if_true: files('i82378.c')) -softmmu_ss.add(when: 'CONFIG_ISA_BUS', if_true: files('isa-bus.c')) -softmmu_ss.add(when: 'CONFIG_ISA_SUPERIO', if_true: files('isa-superio.c')) -softmmu_ss.add(when: 'CONFIG_PC87312', if_true: files('pc87312.c')) -softmmu_ss.add(when: 'CONFIG_PIIX3', if_true: files('piix3.c')) -softmmu_ss.add(when: 'CONFIG_PIIX4', if_true: files('piix4.c')) -softmmu_ss.add(when: 'CONFIG_SMC37C669', if_true: files('smc37c669-superio.c')) -softmmu_ss.add(when: 'CONFIG_VT82C686', if_true: files('vt82c686.c')) +system_ss.add(when: 'CONFIG_APM', if_true: files('apm.c')) +system_ss.add(when: 'CONFIG_I82378', if_true: files('i82378.c')) +system_ss.add(when: 'CONFIG_ISA_BUS', if_true: files('isa-bus.c')) +system_ss.add(when: 'CONFIG_ISA_SUPERIO', if_true: files('isa-superio.c')) +system_ss.add(when: 'CONFIG_PC87312', if_true: files('pc87312.c')) +system_ss.add(when: 'CONFIG_PIIX3', if_true: files('piix3.c')) +system_ss.add(when: 'CONFIG_PIIX4', if_true: files('piix4.c')) +system_ss.add(when: 'CONFIG_SMC37C669', if_true: files('smc37c669-superio.c')) +system_ss.add(when: 'CONFIG_VT82C686', if_true: files('vt82c686.c')) specific_ss.add(when: 'CONFIG_LPC_ICH9', if_true: files('lpc_ich9.c')) diff --git a/hw/isa/piix3.c b/hw/isa/piix3.c index a9cb39bf21..117024e450 100644 --- a/hw/isa/piix3.c +++ b/hw/isa/piix3.c @@ -28,14 +28,12 @@ #include "hw/dma/i8257.h" #include "hw/southbridge/piix.h" #include "hw/irq.h" +#include "hw/qdev-properties.h" #include "hw/isa/isa.h" -#include "hw/xen/xen.h" #include "sysemu/runstate.h" #include "migration/vmstate.h" #include "hw/acpi/acpi_aml_interface.h" -#define XEN_PIIX_NUM_PIRQS 128ULL - static void piix3_set_irq_pic(PIIX3State *piix3, int pic_irq) { qemu_set_irq(piix3->pic[pic_irq], @@ -123,26 +121,6 @@ static void piix3_write_config(PCIDevice *dev, } } -static void piix3_write_config_xen(PCIDevice *dev, - uint32_t address, uint32_t val, int len) -{ - int i; - - /* Scan for updates to PCI link routes (0x60-0x63). */ - for (i = 0; i < len; i++) { - uint8_t v = (val >> (8 * i)) & 0xff; - if (v & 0x80) { - v = 0; - } - v &= 0xf; - if (((address + i) >= PIIX_PIRQCA) && ((address + i) <= PIIX_PIRQCD)) { - xen_set_pci_link_route(address + i - PIIX_PIRQCA, v); - } - } - - piix3_write_config(dev, address, val, len); -} - static void piix3_reset(DeviceState *dev) { PIIX3State *d = PIIX3_PCI_DEVICE(dev); @@ -301,6 +279,12 @@ static void pci_piix3_realize(PCIDevice *dev, Error **errp) PIIX_RCR_IOPORT, &d->rcr_mem, 1); i8257_dma_init(isa_bus, 0); + + /* RTC */ + qdev_prop_set_int32(DEVICE(&d->rtc), "base_year", 2000); + if (!qdev_realize(DEVICE(&d->rtc), BUS(isa_bus), errp)) { + return; + } } static void build_pci_isa_aml(AcpiDevAmlIf *adev, Aml *scope) @@ -324,12 +308,20 @@ static void build_pci_isa_aml(AcpiDevAmlIf *adev, Aml *scope) qbus_build_aml(bus, scope); } +static void pci_piix3_init(Object *obj) +{ + PIIX3State *d = PIIX3_PCI_DEVICE(obj); + + object_initialize_child(obj, "rtc", &d->rtc, TYPE_MC146818_RTC); +} + static void pci_piix3_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass); + k->config_write = piix3_write_config; dc->reset = piix3_reset; dc->desc = "ISA bridge"; dc->vmsd = &vmstate_piix3; @@ -350,6 +342,7 @@ static const TypeInfo piix3_pci_type_info = { .name = TYPE_PIIX3_PCI_DEVICE, .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(PIIX3State), + .instance_init = pci_piix3_init, .abstract = true, .class_init = pci_piix3_class_init, .interfaces = (InterfaceInfo[]) { @@ -378,7 +371,6 @@ static void piix3_class_init(ObjectClass *klass, void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); - k->config_write = piix3_write_config; k->realize = piix3_realize; } @@ -388,45 +380,10 @@ static const TypeInfo piix3_info = { .class_init = piix3_class_init, }; -static void piix3_xen_realize(PCIDevice *dev, Error **errp) -{ - ERRP_GUARD(); - PIIX3State *piix3 = PIIX3_PCI_DEVICE(dev); - PCIBus *pci_bus = pci_get_bus(dev); - - pci_piix3_realize(dev, errp); - if (*errp) { - return; - } - - /* - * Xen supports additional interrupt routes from the PCI devices to - * the IOAPIC: the four pins of each PCI device on the bus are also - * connected to the IOAPIC directly. - * These additional routes can be discovered through ACPI. - */ - pci_bus_irqs(pci_bus, xen_piix3_set_irq, piix3, XEN_PIIX_NUM_PIRQS); -} - -static void piix3_xen_class_init(ObjectClass *klass, void *data) -{ - PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); - - k->config_write = piix3_write_config_xen; - k->realize = piix3_xen_realize; -} - -static const TypeInfo piix3_xen_info = { - .name = TYPE_PIIX3_XEN_DEVICE, - .parent = TYPE_PIIX3_PCI_DEVICE, - .class_init = piix3_xen_class_init, -}; - static void piix3_register_types(void) { type_register_static(&piix3_pci_type_info); type_register_static(&piix3_info); - type_register_static(&piix3_xen_info); } type_init(piix3_register_types) diff --git a/hw/loongarch/Kconfig b/hw/loongarch/Kconfig index eb112af990..1e7c5b43c5 100644 --- a/hw/loongarch/Kconfig +++ b/hw/loongarch/Kconfig @@ -21,3 +21,4 @@ config LOONGARCH_VIRT select FW_CFG_DMA select DIMM select PFLASH_CFI01 + select ACPI_HMAT diff --git a/hw/loongarch/acpi-build.c b/hw/loongarch/acpi-build.c index 8e3ce07367..0b62c3a2f7 100644 --- a/hw/loongarch/acpi-build.c +++ b/hw/loongarch/acpi-build.c @@ -34,6 +34,7 @@ #include "sysemu/tpm.h" #include "hw/platform-bus.h" #include "hw/acpi/aml-build.h" +#include "hw/acpi/hmat.h" #define ACPI_BUILD_ALIGN_SIZE 0x1000 #define ACPI_BUILD_TABLE_SIZE 0x20000 @@ -107,7 +108,9 @@ static void build_madt(GArray *table_data, BIOSLinker *linker, LoongArchMachineState *lams) { MachineState *ms = MACHINE(lams); - int i; + MachineClass *mc = MACHINE_GET_CLASS(ms); + const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(ms); + int i, arch_id; AcpiTable table = { .sig = "APIC", .rev = 1, .oem_id = lams->oem_id, .oem_table_id = lams->oem_table_id }; @@ -117,13 +120,15 @@ build_madt(GArray *table_data, BIOSLinker *linker, LoongArchMachineState *lams) build_append_int_noprefix(table_data, 0, 4); build_append_int_noprefix(table_data, 1 /* PCAT_COMPAT */, 4); /* Flags */ - for (i = 0; i < ms->smp.cpus; i++) { + for (i = 0; i < arch_ids->len; i++) { /* Processor Core Interrupt Controller Structure */ + arch_id = arch_ids->cpus[i].arch_id; + build_append_int_noprefix(table_data, 17, 1); /* Type */ build_append_int_noprefix(table_data, 15, 1); /* Length */ build_append_int_noprefix(table_data, 1, 1); /* Version */ build_append_int_noprefix(table_data, i + 1, 4); /* ACPI Processor ID */ - build_append_int_noprefix(table_data, i, 4); /* Core ID */ + build_append_int_noprefix(table_data, arch_id, 4); /* Core ID */ build_append_int_noprefix(table_data, 1, 4); /* Flags */ } @@ -159,9 +164,12 @@ build_madt(GArray *table_data, BIOSLinker *linker, LoongArchMachineState *lams) static void build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) { - uint64_t i; + int i, arch_id, node_id; + uint64_t mem_len, mem_base; + int nb_numa_nodes = machine->numa_state->num_nodes; LoongArchMachineState *lams = LOONGARCH_MACHINE(machine); - MachineState *ms = MACHINE(lams); + MachineClass *mc = MACHINE_GET_CLASS(lams); + const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(machine); AcpiTable table = { .sig = "SRAT", .rev = 1, .oem_id = lams->oem_id, .oem_table_id = lams->oem_table_id }; @@ -169,13 +177,16 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) build_append_int_noprefix(table_data, 1, 4); /* Reserved */ build_append_int_noprefix(table_data, 0, 8); /* Reserved */ - for (i = 0; i < ms->smp.cpus; ++i) { + for (i = 0; i < arch_ids->len; ++i) { + arch_id = arch_ids->cpus[i].arch_id; + node_id = arch_ids->cpus[i].props.node_id; + /* Processor Local APIC/SAPIC Affinity Structure */ build_append_int_noprefix(table_data, 0, 1); /* Type */ build_append_int_noprefix(table_data, 16, 1); /* Length */ /* Proximity Domain [7:0] */ - build_append_int_noprefix(table_data, 0, 1); - build_append_int_noprefix(table_data, i, 1); /* APIC ID */ + build_append_int_noprefix(table_data, node_id, 1); + build_append_int_noprefix(table_data, arch_id, 1); /* APIC ID */ /* Flags, Table 5-36 */ build_append_int_noprefix(table_data, 1, 4); build_append_int_noprefix(table_data, 0, 1); /* Local SAPIC EID */ @@ -184,16 +195,36 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) build_append_int_noprefix(table_data, 0, 4); /* Reserved */ } + /* Node0 */ build_srat_memory(table_data, VIRT_LOWMEM_BASE, VIRT_LOWMEM_SIZE, 0, MEM_AFFINITY_ENABLED); + mem_base = VIRT_HIGHMEM_BASE; + if (!nb_numa_nodes) { + mem_len = machine->ram_size - VIRT_LOWMEM_SIZE; + } else { + mem_len = machine->numa_state->nodes[0].node_mem - VIRT_LOWMEM_SIZE; + } + if (mem_len) + build_srat_memory(table_data, mem_base, mem_len, 0, MEM_AFFINITY_ENABLED); + + /* Node1 - Nodemax */ + if (nb_numa_nodes) { + mem_base += mem_len; + for (i = 1; i < nb_numa_nodes; ++i) { + if (machine->numa_state->nodes[i].node_mem > 0) { + build_srat_memory(table_data, mem_base, + machine->numa_state->nodes[i].node_mem, i, + MEM_AFFINITY_ENABLED); + mem_base += machine->numa_state->nodes[i].node_mem; + } + } + } - build_srat_memory(table_data, VIRT_HIGHMEM_BASE, machine->ram_size - VIRT_LOWMEM_SIZE, - 0, MEM_AFFINITY_ENABLED); - - if (ms->device_memory) { - build_srat_memory(table_data, ms->device_memory->base, - memory_region_size(&ms->device_memory->mr), - 0, MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED); + if (machine->device_memory) { + build_srat_memory(table_data, machine->device_memory->base, + memory_region_size(&machine->device_memory->mr), + nb_numa_nodes - 1, + MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED); } acpi_table_end(linker, &table); @@ -407,8 +438,25 @@ static void acpi_build(AcpiBuildTables *tables, MachineState *machine) build_madt(tables_blob, tables->linker, lams); acpi_add_table(table_offsets, tables_blob); + build_pptt(tables_blob, tables->linker, machine, + lams->oem_id, lams->oem_table_id); + + acpi_add_table(table_offsets, tables_blob); build_srat(tables_blob, tables->linker, machine); + if (machine->numa_state->num_nodes) { + if (machine->numa_state->have_numa_distance) { + acpi_add_table(table_offsets, tables_blob); + build_slit(tables_blob, tables->linker, machine, lams->oem_id, + lams->oem_table_id); + } + if (machine->numa_state->hmat_enabled) { + acpi_add_table(table_offsets, tables_blob); + build_hmat(tables_blob, tables->linker, machine->numa_state, + lams->oem_id, lams->oem_table_id); + } + } + acpi_add_table(table_offsets, tables_blob); { AcpiMcfgInfo mcfg = { diff --git a/hw/loongarch/virt.c b/hw/loongarch/virt.c index f4bf14c1c8..ca8824b6ef 100644 --- a/hw/loongarch/virt.c +++ b/hw/loongarch/virt.c @@ -164,11 +164,16 @@ static void fdt_add_cpu_nodes(const LoongArchMachineState *lams) for (num = smp_cpus - 1; num >= 0; num--) { char *nodename = g_strdup_printf("/cpus/cpu@%d", num); LoongArchCPU *cpu = LOONGARCH_CPU(qemu_get_cpu(num)); + CPUState *cs = CPU(cpu); qemu_fdt_add_subnode(ms->fdt, nodename); qemu_fdt_setprop_string(ms->fdt, nodename, "device_type", "cpu"); qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", cpu->dtb_compatible); + if (ms->possible_cpus->cpus[cs->cpu_index].props.has_node_id) { + qemu_fdt_setprop_cell(ms->fdt, nodename, "numa-node-id", + ms->possible_cpus->cpus[cs->cpu_index].props.node_id); + } qemu_fdt_setprop_cell(ms->fdt, nodename, "reg", num); qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", qemu_fdt_alloc_phandle(ms->fdt)); @@ -280,6 +285,22 @@ static void fdt_add_irqchip_node(LoongArchMachineState *lams) g_free(nodename); } +static void fdt_add_memory_node(MachineState *ms, + uint64_t base, uint64_t size, int node_id) +{ + char *nodename = g_strdup_printf("/memory@%" PRIx64, base); + + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_cells(ms->fdt, nodename, "reg", 2, base, 2, size); + qemu_fdt_setprop_string(ms->fdt, nodename, "device_type", "memory"); + + if (ms->numa_state && ms->numa_state->num_nodes) { + qemu_fdt_setprop_cell(ms->fdt, nodename, "numa-node-id", node_id); + } + + g_free(nodename); +} + #define PM_BASE 0x10080000 #define PM_SIZE 0x100 #define PM_CTRL 0x10 @@ -474,6 +495,7 @@ static DeviceState *create_platform_bus(DeviceState *pch_pic) static void loongarch_devices_init(DeviceState *pch_pic, LoongArchMachineState *lams) { + MachineClass *mc = MACHINE_GET_CLASS(lams); DeviceState *gpex_dev; SysBusDevice *d; PCIBus *pci_bus; @@ -528,7 +550,7 @@ static void loongarch_devices_init(DeviceState *pch_pic, LoongArchMachineState * NICInfo *nd = &nd_table[i]; if (!nd->model) { - nd->model = g_strdup("virtio"); + nd->model = g_strdup(mc->default_nic); } pci_nic_init_nofail(nd, pci_bus, nd->model, NULL); @@ -565,9 +587,6 @@ static void loongarch_irq_init(LoongArchMachineState *lams) CPUState *cpu_state; int cpu, pin, i, start, num; - ipi = qdev_new(TYPE_LOONGARCH_IPI); - sysbus_realize_and_unref(SYS_BUS_DEVICE(ipi), &error_fatal); - extioi = qdev_new(TYPE_LOONGARCH_EXTIOI); sysbus_realize_and_unref(SYS_BUS_DEVICE(extioi), &error_fatal); @@ -598,26 +617,35 @@ static void loongarch_irq_init(LoongArchMachineState *lams) lacpu = LOONGARCH_CPU(cpu_state); env = &(lacpu->env); + ipi = qdev_new(TYPE_LOONGARCH_IPI); + sysbus_realize_and_unref(SYS_BUS_DEVICE(ipi), &error_fatal); + /* connect ipi irq to cpu irq */ - qdev_connect_gpio_out(ipi, cpu, qdev_get_gpio_in(cpudev, IRQ_IPI)); + qdev_connect_gpio_out(ipi, 0, qdev_get_gpio_in(cpudev, IRQ_IPI)); /* IPI iocsr memory region */ memory_region_add_subregion(&env->system_iocsr, SMP_IPI_MAILBOX, sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), - cpu * 2)); + 0)); memory_region_add_subregion(&env->system_iocsr, MAIL_SEND_ADDR, sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), - cpu * 2 + 1)); - /* extioi iocsr memory region */ - memory_region_add_subregion(&env->system_iocsr, APIC_BASE, + 1)); + /* + * extioi iocsr memory region + * only one extioi is added on loongarch virt machine + * external device interrupt can only be routed to cpu 0-3 + */ + if (cpu < EXTIOI_CPUS) + memory_region_add_subregion(&env->system_iocsr, APIC_BASE, sysbus_mmio_get_region(SYS_BUS_DEVICE(extioi), cpu)); + env->ipistate = ipi; } /* * connect ext irq to the cpu irq * cpu_pin[9:2] <= intc_pin[7:0] */ - for (cpu = 0; cpu < ms->smp.cpus; cpu++) { + for (cpu = 0; cpu < MIN(ms->smp.cpus, EXTIOI_CPUS); cpu++) { cpudev = DEVICE(qemu_get_cpu(cpu)); for (pin = 0; pin < LS3A_INTC_IP; pin++) { qdev_connect_gpio_out(extioi, (cpu * 8 + pin), @@ -760,11 +788,17 @@ static void loongarch_init(MachineState *machine) const char *cpu_model = machine->cpu_type; ram_addr_t offset = 0; ram_addr_t ram_size = machine->ram_size; - uint64_t highram_size = 0; + uint64_t highram_size = 0, phyAddr = 0; MemoryRegion *address_space_mem = get_system_memory(); LoongArchMachineState *lams = LOONGARCH_MACHINE(machine); + int nb_numa_nodes = machine->numa_state->num_nodes; + NodeInfo *numa_info = machine->numa_state->nodes; int i; hwaddr fdt_base; + const CPUArchIdList *possible_cpus; + MachineClass *mc = MACHINE_GET_CLASS(machine); + CPUState *cpu; + char *ramName = NULL; if (!cpu_model) { cpu_model = LOONGARCH_CPU_TYPE_NAME("la464"); @@ -781,21 +815,51 @@ static void loongarch_init(MachineState *machine) } create_fdt(lams); /* Init CPUs */ - for (i = 0; i < machine->smp.cpus; i++) { - cpu_create(machine->cpu_type); + + possible_cpus = mc->possible_cpu_arch_ids(machine); + for (i = 0; i < possible_cpus->len; i++) { + cpu = cpu_create(machine->cpu_type); + cpu->cpu_index = i; + machine->possible_cpus->cpus[i].cpu = OBJECT(cpu); } fdt_add_cpu_nodes(lams); - /* Add memory region */ - memory_region_init_alias(&lams->lowmem, NULL, "loongarch.lowram", - machine->ram, 0, 256 * MiB); - memory_region_add_subregion(address_space_mem, offset, &lams->lowmem); - offset += 256 * MiB; - memmap_add_entry(0, 256 * MiB, 1); - highram_size = ram_size - 256 * MiB; - memory_region_init_alias(&lams->highmem, NULL, "loongarch.highmem", - machine->ram, offset, highram_size); - memory_region_add_subregion(address_space_mem, 0x90000000, &lams->highmem); - memmap_add_entry(0x90000000, highram_size, 1); + + /* Node0 memory */ + memmap_add_entry(VIRT_LOWMEM_BASE, VIRT_LOWMEM_SIZE, 1); + fdt_add_memory_node(machine, VIRT_LOWMEM_BASE, VIRT_LOWMEM_SIZE, 0); + memory_region_init_alias(&lams->lowmem, NULL, "loongarch.node0.lowram", + machine->ram, offset, VIRT_LOWMEM_SIZE); + memory_region_add_subregion(address_space_mem, phyAddr, &lams->lowmem); + + offset += VIRT_LOWMEM_SIZE; + if (nb_numa_nodes > 0) { + assert(numa_info[0].node_mem > VIRT_LOWMEM_SIZE); + highram_size = numa_info[0].node_mem - VIRT_LOWMEM_SIZE; + } else { + highram_size = ram_size - VIRT_LOWMEM_SIZE; + } + phyAddr = VIRT_HIGHMEM_BASE; + memmap_add_entry(phyAddr, highram_size, 1); + fdt_add_memory_node(machine, phyAddr, highram_size, 0); + memory_region_init_alias(&lams->highmem, NULL, "loongarch.node0.highram", + machine->ram, offset, highram_size); + memory_region_add_subregion(address_space_mem, phyAddr, &lams->highmem); + + /* Node1 - Nodemax memory */ + offset += highram_size; + phyAddr += highram_size; + + for (i = 1; i < nb_numa_nodes; i++) { + MemoryRegion *nodemem = g_new(MemoryRegion, 1); + ramName = g_strdup_printf("loongarch.node%d.ram", i); + memory_region_init_alias(nodemem, NULL, ramName, machine->ram, + offset, numa_info[i].node_mem); + memory_region_add_subregion(address_space_mem, phyAddr, nodemem); + memmap_add_entry(phyAddr, numa_info[i].node_mem, 1); + fdt_add_memory_node(machine, phyAddr, numa_info[i].node_mem, i); + offset += numa_info[i].node_mem; + phyAddr += numa_info[i].node_mem; + } /* initialize device memory address space */ if (machine->ram_size < machine->maxram_size) { @@ -1016,6 +1080,58 @@ static HotplugHandler *virt_machine_get_hotplug_handler(MachineState *machine, return NULL; } +static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms) +{ + int n; + unsigned int max_cpus = ms->smp.max_cpus; + + if (ms->possible_cpus) { + assert(ms->possible_cpus->len == max_cpus); + return ms->possible_cpus; + } + + ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) + + sizeof(CPUArchId) * max_cpus); + ms->possible_cpus->len = max_cpus; + for (n = 0; n < ms->possible_cpus->len; n++) { + ms->possible_cpus->cpus[n].type = ms->cpu_type; + ms->possible_cpus->cpus[n].arch_id = n; + + ms->possible_cpus->cpus[n].props.has_socket_id = true; + ms->possible_cpus->cpus[n].props.socket_id = + n / (ms->smp.cores * ms->smp.threads); + ms->possible_cpus->cpus[n].props.has_core_id = true; + ms->possible_cpus->cpus[n].props.core_id = + n / ms->smp.threads % ms->smp.cores; + ms->possible_cpus->cpus[n].props.has_thread_id = true; + ms->possible_cpus->cpus[n].props.thread_id = n % ms->smp.threads; + } + return ms->possible_cpus; +} + +static CpuInstanceProperties +virt_cpu_index_to_props(MachineState *ms, unsigned cpu_index) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms); + + assert(cpu_index < possible_cpus->len); + return possible_cpus->cpus[cpu_index].props; +} + +static int64_t virt_get_default_cpu_node_id(const MachineState *ms, int idx) +{ + int64_t nidx = 0; + + if (ms->numa_state->num_nodes) { + nidx = idx / (ms->smp.cpus / ms->numa_state->num_nodes); + if (ms->numa_state->num_nodes <= nidx) { + nidx = ms->numa_state->num_nodes - 1; + } + } + return nidx; +} + static void loongarch_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -1026,13 +1142,20 @@ static void loongarch_class_init(ObjectClass *oc, void *data) mc->default_ram_size = 1 * GiB; mc->default_cpu_type = LOONGARCH_CPU_TYPE_NAME("la464"); mc->default_ram_id = "loongarch.ram"; - mc->max_cpus = LOONGARCH_MAX_VCPUS; + mc->max_cpus = LOONGARCH_MAX_CPUS; mc->is_default = 1; mc->default_kernel_irqchip_split = false; mc->block_default_type = IF_VIRTIO; mc->default_boot_order = "c"; mc->no_cdrom = 1; + mc->possible_cpu_arch_ids = virt_possible_cpu_arch_ids; + mc->cpu_index_to_instance_props = virt_cpu_index_to_props; + mc->get_default_cpu_node_id = virt_get_default_cpu_node_id; + mc->numa_mem_supported = true; + mc->auto_enable_numa_with_memhp = true; + mc->auto_enable_numa_with_memdev = true; mc->get_hotplug_handler = virt_machine_get_hotplug_handler; + mc->default_nic = "virtio-net-pci"; hc->plug = loongarch_machine_device_plug_cb; hc->pre_plug = virt_machine_device_pre_plug; hc->unplug_request = virt_machine_device_unplug_request; diff --git a/hw/m68k/q800.c b/hw/m68k/q800.c index b35ecafbc7..68f0cd8cac 100644 --- a/hw/m68k/q800.c +++ b/hw/m68k/q800.c @@ -525,7 +525,7 @@ static void q800_init(MachineState *machine) qdev_realize_and_unref(escc_orgate, NULL, &error_fatal); sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(escc_orgate, 0)); sysbus_connect_irq(sysbus, 1, qdev_get_gpio_in(escc_orgate, 1)); - qdev_connect_gpio_out(DEVICE(escc_orgate), 0, + qdev_connect_gpio_out(escc_orgate, 0, qdev_get_gpio_in(glue, GLUE_IRQ_IN_ESCC)); sysbus_mmio_map(sysbus, 0, SCC_BASE); diff --git a/hw/mem/cxl_type3.c b/hw/mem/cxl_type3.c index abe60b362c..2adacbd01b 100644 --- a/hw/mem/cxl_type3.c +++ b/hw/mem/cxl_type3.c @@ -31,7 +31,8 @@ enum { }; static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table, - int dsmad_handle, MemoryRegion *mr) + int dsmad_handle, MemoryRegion *mr, + bool is_pmem, uint64_t dpa_base) { g_autofree CDATDsmas *dsmas = NULL; g_autofree CDATDslbis *dslbis0 = NULL; @@ -50,9 +51,9 @@ static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table, .length = sizeof(*dsmas), }, .DSMADhandle = dsmad_handle, - .flags = CDAT_DSMAS_FLAG_NV, - .DPA_base = 0, - .DPA_length = int128_get64(mr->size), + .flags = is_pmem ? CDAT_DSMAS_FLAG_NV : 0, + .DPA_base = dpa_base, + .DPA_length = memory_region_size(mr), }; /* For now, no memory side cache, plausiblish numbers */ @@ -130,10 +131,13 @@ static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table, .length = sizeof(*dsemts), }, .DSMAS_handle = dsmad_handle, - /* Reserved - the non volatile from DSMAS matters */ - .EFI_memory_type_attr = 2, + /* + * NV: Reserved - the non volatile from DSMAS matters + * V: EFI_MEMORY_SP + */ + .EFI_memory_type_attr = is_pmem ? 2 : 1, .DPA_offset = 0, - .DPA_length = int128_get64(mr->size), + .DPA_length = memory_region_size(mr), }; /* Header always at start of structure */ @@ -150,33 +154,68 @@ static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table, static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv) { g_autofree CDATSubHeader **table = NULL; - MemoryRegion *nonvolatile_mr; CXLType3Dev *ct3d = priv; + MemoryRegion *volatile_mr = NULL, *nonvolatile_mr = NULL; int dsmad_handle = 0; - int rc; + int cur_ent = 0; + int len = 0; + int rc, i; - if (!ct3d->hostmem) { + if (!ct3d->hostpmem && !ct3d->hostvmem) { return 0; } - nonvolatile_mr = host_memory_backend_get_memory(ct3d->hostmem); - if (!nonvolatile_mr) { - return -EINVAL; + if (ct3d->hostvmem) { + volatile_mr = host_memory_backend_get_memory(ct3d->hostvmem); + if (!volatile_mr) { + return -EINVAL; + } + len += CT3_CDAT_NUM_ENTRIES; } - table = g_malloc0(CT3_CDAT_NUM_ENTRIES * sizeof(*table)); + if (ct3d->hostpmem) { + nonvolatile_mr = host_memory_backend_get_memory(ct3d->hostpmem); + if (!nonvolatile_mr) { + return -EINVAL; + } + len += CT3_CDAT_NUM_ENTRIES; + } + + table = g_malloc0(len * sizeof(*table)); if (!table) { return -ENOMEM; } - rc = ct3_build_cdat_entries_for_mr(table, dsmad_handle++, nonvolatile_mr); - if (rc < 0) { - return rc; + /* Now fill them in */ + if (volatile_mr) { + rc = ct3_build_cdat_entries_for_mr(table, dsmad_handle++, volatile_mr, + false, 0); + if (rc < 0) { + return rc; + } + cur_ent = CT3_CDAT_NUM_ENTRIES; + } + + if (nonvolatile_mr) { + rc = ct3_build_cdat_entries_for_mr(&(table[cur_ent]), dsmad_handle++, + nonvolatile_mr, true, + (volatile_mr ? + memory_region_size(volatile_mr) : 0)); + if (rc < 0) { + goto error_cleanup; + } + cur_ent += CT3_CDAT_NUM_ENTRIES; } + assert(len == cur_ent); *cdat_table = g_steal_pointer(&table); - return CT3_CDAT_NUM_ENTRIES; + return len; +error_cleanup: + for (i = 0; i < cur_ent; i++) { + g_free(table[i]); + } + return rc; } static void ct3_free_cdat_table(CDATSubHeader **cdat_table, int num, void *priv) @@ -264,16 +303,42 @@ static void build_dvsecs(CXLType3Dev *ct3d) { CXLComponentState *cxl_cstate = &ct3d->cxl_cstate; uint8_t *dvsec; + uint32_t range1_size_hi, range1_size_lo, + range1_base_hi = 0, range1_base_lo = 0, + range2_size_hi = 0, range2_size_lo = 0, + range2_base_hi = 0, range2_base_lo = 0; + + /* + * Volatile memory is mapped as (0x0) + * Persistent memory is mapped at (volatile->size) + */ + if (ct3d->hostvmem) { + range1_size_hi = ct3d->hostvmem->size >> 32; + range1_size_lo = (2 << 5) | (2 << 2) | 0x3 | + (ct3d->hostvmem->size & 0xF0000000); + if (ct3d->hostpmem) { + range2_size_hi = ct3d->hostpmem->size >> 32; + range2_size_lo = (2 << 5) | (2 << 2) | 0x3 | + (ct3d->hostpmem->size & 0xF0000000); + } + } else { + range1_size_hi = ct3d->hostpmem->size >> 32; + range1_size_lo = (2 << 5) | (2 << 2) | 0x3 | + (ct3d->hostpmem->size & 0xF0000000); + } dvsec = (uint8_t *)&(CXLDVSECDevice){ .cap = 0x1e, .ctrl = 0x2, .status2 = 0x2, - .range1_size_hi = ct3d->hostmem->size >> 32, - .range1_size_lo = (2 << 5) | (2 << 2) | 0x3 | - (ct3d->hostmem->size & 0xF0000000), - .range1_base_hi = 0, - .range1_base_lo = 0, + .range1_size_hi = range1_size_hi, + .range1_size_lo = range1_size_lo, + .range1_base_hi = range1_base_hi, + .range1_base_lo = range1_base_lo, + .range2_size_hi = range2_size_hi, + .range2_size_lo = range2_size_lo, + .range2_base_hi = range2_base_hi, + .range2_base_lo = range2_base_lo, }; cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE, PCIE_CXL_DEVICE_DVSEC_LENGTH, @@ -314,14 +379,32 @@ static void hdm_decoder_commit(CXLType3Dev *ct3d, int which) { ComponentRegisters *cregs = &ct3d->cxl_cstate.crb; uint32_t *cache_mem = cregs->cache_mem_registers; + uint32_t ctrl; assert(which == 0); + ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL); /* TODO: Sanity checks that the decoder is possible */ - ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMIT, 0); - ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, ERR, 0); + ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, ERR, 0); + ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED, 1); + + stl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL, ctrl); +} + +static void hdm_decoder_uncommit(CXLType3Dev *ct3d, int which) +{ + ComponentRegisters *cregs = &ct3d->cxl_cstate.crb; + uint32_t *cache_mem = cregs->cache_mem_registers; + uint32_t ctrl; + + assert(which == 0); + + ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL); - ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMITTED, 1); + ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, ERR, 0); + ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED, 0); + + stl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL, ctrl); } static int ct3d_qmp_uncor_err_to_cxl(CxlUncorErrorType qmp_err) @@ -392,6 +475,7 @@ static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value, CXLType3Dev *ct3d = container_of(cxl_cstate, CXLType3Dev, cxl_cstate); uint32_t *cache_mem = cregs->cache_mem_registers; bool should_commit = false; + bool should_uncommit = false; int which_hdm = -1; assert(size == 4); @@ -400,6 +484,7 @@ static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value, switch (offset) { case A_CXL_HDM_DECODER0_CTRL: should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT); + should_uncommit = !should_commit; which_hdm = 0; break; case A_CXL_RAS_UNC_ERR_STATUS: @@ -486,42 +571,77 @@ static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value, stl_le_p((uint8_t *)cache_mem + offset, value); if (should_commit) { hdm_decoder_commit(ct3d, which_hdm); + } else if (should_uncommit) { + hdm_decoder_uncommit(ct3d, which_hdm); } } static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp) { DeviceState *ds = DEVICE(ct3d); - MemoryRegion *mr; - char *name; - if (!ct3d->hostmem) { - error_setg(errp, "memdev property must be set"); + if (!ct3d->hostmem && !ct3d->hostvmem && !ct3d->hostpmem) { + error_setg(errp, "at least one memdev property must be set"); + return false; + } else if (ct3d->hostmem && ct3d->hostpmem) { + error_setg(errp, "[memdev] cannot be used with new " + "[persistent-memdev] property"); return false; + } else if (ct3d->hostmem) { + /* Use of hostmem property implies pmem */ + ct3d->hostpmem = ct3d->hostmem; + ct3d->hostmem = NULL; } - mr = host_memory_backend_get_memory(ct3d->hostmem); - if (!mr) { - error_setg(errp, "memdev property must be set"); + if (ct3d->hostpmem && !ct3d->lsa) { + error_setg(errp, "lsa property must be set for persistent devices"); return false; } - memory_region_set_nonvolatile(mr, true); - memory_region_set_enabled(mr, true); - host_memory_backend_set_mapped(ct3d->hostmem, true); - if (ds->id) { - name = g_strdup_printf("cxl-type3-dpa-space:%s", ds->id); - } else { - name = g_strdup("cxl-type3-dpa-space"); + if (ct3d->hostvmem) { + MemoryRegion *vmr; + char *v_name; + + vmr = host_memory_backend_get_memory(ct3d->hostvmem); + if (!vmr) { + error_setg(errp, "volatile memdev must have backing device"); + return false; + } + memory_region_set_nonvolatile(vmr, false); + memory_region_set_enabled(vmr, true); + host_memory_backend_set_mapped(ct3d->hostvmem, true); + if (ds->id) { + v_name = g_strdup_printf("cxl-type3-dpa-vmem-space:%s", ds->id); + } else { + v_name = g_strdup("cxl-type3-dpa-vmem-space"); + } + address_space_init(&ct3d->hostvmem_as, vmr, v_name); + ct3d->cxl_dstate.vmem_size = memory_region_size(vmr); + ct3d->cxl_dstate.mem_size += memory_region_size(vmr); + g_free(v_name); } - address_space_init(&ct3d->hostmem_as, mr, name); - g_free(name); - ct3d->cxl_dstate.pmem_size = ct3d->hostmem->size; + if (ct3d->hostpmem) { + MemoryRegion *pmr; + char *p_name; - if (!ct3d->lsa) { - error_setg(errp, "lsa property must be set"); - return false; + pmr = host_memory_backend_get_memory(ct3d->hostpmem); + if (!pmr) { + error_setg(errp, "persistent memdev must have backing device"); + return false; + } + memory_region_set_nonvolatile(pmr, true); + memory_region_set_enabled(pmr, true); + host_memory_backend_set_mapped(ct3d->hostpmem, true); + if (ds->id) { + p_name = g_strdup_printf("cxl-type3-dpa-pmem-space:%s", ds->id); + } else { + p_name = g_strdup("cxl-type3-dpa-pmem-space"); + } + address_space_init(&ct3d->hostpmem_as, pmr, p_name); + ct3d->cxl_dstate.pmem_size = memory_region_size(pmr); + ct3d->cxl_dstate.mem_size += memory_region_size(pmr); + g_free(p_name); } return true; @@ -593,6 +713,9 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp) cxl_cstate->cdat.free_cdat_table = ct3_free_cdat_table; cxl_cstate->cdat.private = ct3d; cxl_doe_cdat_init(cxl_cstate, errp); + if (*errp) { + goto err_free_special_ops; + } pcie_cap_deverr_init(pci_dev); /* Leave a bit of room for expansion */ @@ -605,9 +728,15 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp) err_release_cdat: cxl_doe_cdat_release(cxl_cstate); +err_free_special_ops: g_free(regs->special_ops); err_address_space_free: - address_space_destroy(&ct3d->hostmem_as); + if (ct3d->hostpmem) { + address_space_destroy(&ct3d->hostpmem_as); + } + if (ct3d->hostvmem) { + address_space_destroy(&ct3d->hostvmem_as); + } return; } @@ -620,7 +749,12 @@ static void ct3_exit(PCIDevice *pci_dev) pcie_aer_exit(pci_dev); cxl_doe_cdat_release(cxl_cstate); g_free(regs->special_ops); - address_space_destroy(&ct3d->hostmem_as); + if (ct3d->hostpmem) { + address_space_destroy(&ct3d->hostpmem_as); + } + if (ct3d->hostvmem) { + address_space_destroy(&ct3d->hostvmem_as); + } } /* TODO: Support multiple HDM decoders and DPA skip */ @@ -655,51 +789,77 @@ static bool cxl_type3_dpa(CXLType3Dev *ct3d, hwaddr host_addr, uint64_t *dpa) return true; } -MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data, - unsigned size, MemTxAttrs attrs) +static int cxl_type3_hpa_to_as_and_dpa(CXLType3Dev *ct3d, + hwaddr host_addr, + unsigned int size, + AddressSpace **as, + uint64_t *dpa_offset) { - CXLType3Dev *ct3d = CXL_TYPE3(d); - uint64_t dpa_offset; - MemoryRegion *mr; + MemoryRegion *vmr = NULL, *pmr = NULL; - /* TODO support volatile region */ - mr = host_memory_backend_get_memory(ct3d->hostmem); - if (!mr) { - return MEMTX_ERROR; + if (ct3d->hostvmem) { + vmr = host_memory_backend_get_memory(ct3d->hostvmem); + } + if (ct3d->hostpmem) { + pmr = host_memory_backend_get_memory(ct3d->hostpmem); } - if (!cxl_type3_dpa(ct3d, host_addr, &dpa_offset)) { - return MEMTX_ERROR; + if (!vmr && !pmr) { + return -ENODEV; } - if (dpa_offset > int128_get64(mr->size)) { + if (!cxl_type3_dpa(ct3d, host_addr, dpa_offset)) { + return -EINVAL; + } + + if (*dpa_offset > ct3d->cxl_dstate.mem_size) { + return -EINVAL; + } + + if (vmr) { + if (*dpa_offset < memory_region_size(vmr)) { + *as = &ct3d->hostvmem_as; + } else { + *as = &ct3d->hostpmem_as; + *dpa_offset -= memory_region_size(vmr); + } + } else { + *as = &ct3d->hostpmem_as; + } + + return 0; +} + +MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data, + unsigned size, MemTxAttrs attrs) +{ + uint64_t dpa_offset = 0; + AddressSpace *as = NULL; + int res; + + res = cxl_type3_hpa_to_as_and_dpa(CXL_TYPE3(d), host_addr, size, + &as, &dpa_offset); + if (res) { return MEMTX_ERROR; } - return address_space_read(&ct3d->hostmem_as, dpa_offset, attrs, data, size); + return address_space_read(as, dpa_offset, attrs, data, size); } MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data, unsigned size, MemTxAttrs attrs) { - CXLType3Dev *ct3d = CXL_TYPE3(d); - uint64_t dpa_offset; - MemoryRegion *mr; - - mr = host_memory_backend_get_memory(ct3d->hostmem); - if (!mr) { - return MEMTX_OK; - } + uint64_t dpa_offset = 0; + AddressSpace *as = NULL; + int res; - if (!cxl_type3_dpa(ct3d, host_addr, &dpa_offset)) { - return MEMTX_OK; + res = cxl_type3_hpa_to_as_and_dpa(CXL_TYPE3(d), host_addr, size, + &as, &dpa_offset); + if (res) { + return MEMTX_ERROR; } - if (dpa_offset > int128_get64(mr->size)) { - return MEMTX_OK; - } - return address_space_write(&ct3d->hostmem_as, dpa_offset, attrs, - &data, size); + return address_space_write(as, dpa_offset, attrs, &data, size); } static void ct3d_reset(DeviceState *dev) @@ -714,7 +874,11 @@ static void ct3d_reset(DeviceState *dev) static Property ct3_props[] = { DEFINE_PROP_LINK("memdev", CXLType3Dev, hostmem, TYPE_MEMORY_BACKEND, - HostMemoryBackend *), + HostMemoryBackend *), /* for backward compatibility */ + DEFINE_PROP_LINK("persistent-memdev", CXLType3Dev, hostpmem, + TYPE_MEMORY_BACKEND, HostMemoryBackend *), + DEFINE_PROP_LINK("volatile-memdev", CXLType3Dev, hostvmem, + TYPE_MEMORY_BACKEND, HostMemoryBackend *), DEFINE_PROP_LINK("lsa", CXLType3Dev, lsa, TYPE_MEMORY_BACKEND, HostMemoryBackend *), DEFINE_PROP_UINT64("sn", CXLType3Dev, sn, UI64_NULL), @@ -726,6 +890,10 @@ static uint64_t get_lsa_size(CXLType3Dev *ct3d) { MemoryRegion *mr; + if (!ct3d->lsa) { + return 0; + } + mr = host_memory_backend_get_memory(ct3d->lsa); return memory_region_size(mr); } @@ -743,6 +911,10 @@ static uint64_t get_lsa(CXLType3Dev *ct3d, void *buf, uint64_t size, MemoryRegion *mr; void *lsa; + if (!ct3d->lsa) { + return 0; + } + mr = host_memory_backend_get_memory(ct3d->lsa); validate_lsa_access(mr, size, offset); @@ -758,6 +930,10 @@ static void set_lsa(CXLType3Dev *ct3d, const void *buf, uint64_t size, MemoryRegion *mr; void *lsa; + if (!ct3d->lsa) { + return; + } + mr = host_memory_backend_get_memory(ct3d->lsa); validate_lsa_access(mr, size, offset); @@ -929,7 +1105,7 @@ static void ct3_class_init(ObjectClass *oc, void *data) pc->config_read = ct3d_config_read; set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); - dc->desc = "CXL PMEM Device (Type 3)"; + dc->desc = "CXL Memory Device (Type 3)"; dc->reset = ct3d_reset; device_class_set_props(dc, ct3_props); diff --git a/hw/mem/meson.build b/hw/mem/meson.build index 56c2618b84..ec26ef5544 100644 --- a/hw/mem/meson.build +++ b/hw/mem/meson.build @@ -4,9 +4,9 @@ mem_ss.add(when: 'CONFIG_DIMM', if_true: files('pc-dimm.c')) mem_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_mc.c')) mem_ss.add(when: 'CONFIG_NVDIMM', if_true: files('nvdimm.c')) mem_ss.add(when: 'CONFIG_CXL_MEM_DEVICE', if_true: files('cxl_type3.c')) -softmmu_ss.add(when: 'CONFIG_CXL_MEM_DEVICE', if_false: files('cxl_type3_stubs.c')) -softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('cxl_type3_stubs.c')) +system_ss.add(when: 'CONFIG_CXL_MEM_DEVICE', if_false: files('cxl_type3_stubs.c')) +system_ss.add(when: 'CONFIG_ALL', if_true: files('cxl_type3_stubs.c')) -softmmu_ss.add_all(when: 'CONFIG_MEM_DEVICE', if_true: mem_ss) +system_ss.add_all(when: 'CONFIG_MEM_DEVICE', if_true: mem_ss) -softmmu_ss.add(when: 'CONFIG_SPARSE_MEM', if_true: files('sparse-mem.c')) +system_ss.add(when: 'CONFIG_SPARSE_MEM', if_true: files('sparse-mem.c')) diff --git a/hw/mem/pc-dimm.c b/hw/mem/pc-dimm.c index 50ef83215c..37f1f4ccfd 100644 --- a/hw/mem/pc-dimm.c +++ b/hw/mem/pc-dimm.c @@ -81,6 +81,10 @@ void pc_dimm_plug(PCDIMMDevice *dimm, MachineState *machine) memory_device_plug(MEMORY_DEVICE(dimm), machine); vmstate_register_ram(vmstate_mr, DEVICE(dimm)); + /* count only "real" DIMMs, not NVDIMMs */ + if (!object_dynamic_cast(OBJECT(dimm), TYPE_NVDIMM)) { + machine->device_memory->dimm_size += memory_region_size(vmstate_mr); + } } void pc_dimm_unplug(PCDIMMDevice *dimm, MachineState *machine) @@ -90,6 +94,9 @@ void pc_dimm_unplug(PCDIMMDevice *dimm, MachineState *machine) memory_device_unplug(MEMORY_DEVICE(dimm), machine); vmstate_unregister_ram(vmstate_mr, DEVICE(dimm)); + if (!object_dynamic_cast(OBJECT(dimm), TYPE_NVDIMM)) { + machine->device_memory->dimm_size -= memory_region_size(vmstate_mr); + } } static int pc_dimm_slot2bitmap(Object *obj, void *opaque) diff --git a/hw/mips/loongson3_virt.c b/hw/mips/loongson3_virt.c index 25534288dd..216812f660 100644 --- a/hw/mips/loongson3_virt.c +++ b/hw/mips/loongson3_virt.c @@ -406,6 +406,7 @@ static inline void loongson3_virt_devices_init(MachineState *machine, PCIBus *pci_bus; DeviceState *dev; MemoryRegion *mmio_reg, *ecam_reg; + MachineClass *mc = MACHINE_GET_CLASS(machine); LoongsonMachineState *s = LOONGSON_MACHINE(machine); dev = qdev_new(TYPE_GPEX_HOST); @@ -456,7 +457,7 @@ static inline void loongson3_virt_devices_init(MachineState *machine, NICInfo *nd = &nd_table[i]; if (!nd->model) { - nd->model = g_strdup("virtio"); + nd->model = g_strdup(mc->default_nic); } pci_nic_init_nofail(nd, pci_bus, nd->model, NULL); @@ -619,6 +620,7 @@ static void loongson3v_machine_class_init(ObjectClass *oc, void *data) mc->default_ram_size = 1600 * MiB; mc->kvm_type = mips_kvm_type; mc->minimum_page_bits = 14; + mc->default_nic = "virtio-net-pci"; } static const TypeInfo loongson3_machine_types[] = { diff --git a/hw/mips/malta.c b/hw/mips/malta.c index af9021316d..47cb49f691 100644 --- a/hw/mips/malta.c +++ b/hw/mips/malta.c @@ -629,9 +629,9 @@ static void bl_setup_gt64120_jump_kernel(void **p, uint64_t run_addr, /* Bus endianess is always reversed */ #if TARGET_BIG_ENDIAN -#define cpu_to_gt32 cpu_to_le32 +#define cpu_to_gt32(x) (x) #else -#define cpu_to_gt32 cpu_to_be32 +#define cpu_to_gt32(x) bswap32(x) #endif /* setup MEM-to-PCI0 mapping as done by YAMON */ @@ -748,7 +748,6 @@ static void write_bootloader(uint8_t *base, uint64_t run_addr, uint64_t kernel_entry) { uint32_t *p; - void *v; /* Small bootloader */ p = (uint32_t *)base; @@ -785,9 +784,7 @@ static void write_bootloader(uint8_t *base, uint64_t run_addr, * */ - v = p; - bl_setup_gt64120_jump_kernel(&v, run_addr, kernel_entry); - p = v; + bl_setup_gt64120_jump_kernel((void **)&p, run_addr, kernel_entry); /* YAMON subroutines */ p = (uint32_t *) (base + 0x800); diff --git a/hw/misc/Kconfig b/hw/misc/Kconfig index 2ef5781ef8..e4c2149175 100644 --- a/hw/misc/Kconfig +++ b/hw/misc/Kconfig @@ -170,13 +170,16 @@ config VIRT_CTRL config LASI bool +config ALLWINNER_SRAMC + bool + config ALLWINNER_A10_CCM bool config ALLWINNER_A10_DRAMC bool -config AXP209_PMU +config AXP2XX_PMU bool depends on I2C diff --git a/hw/misc/allwinner-r40-ccu.c b/hw/misc/allwinner-r40-ccu.c new file mode 100644 index 0000000000..d82fee12db --- /dev/null +++ b/hw/misc/allwinner-r40-ccu.c @@ -0,0 +1,209 @@ +/* + * Allwinner R40 Clock Control Unit emulation + * + * Copyright (C) 2023 qianfan Zhao <qianfanguijin@163.com> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/misc/allwinner-r40-ccu.h" + +/* CCU register offsets */ +enum { + REG_PLL_CPUX_CTRL = 0x0000, + REG_PLL_AUDIO_CTRL = 0x0008, + REG_PLL_VIDEO0_CTRL = 0x0010, + REG_PLL_VE_CTRL = 0x0018, + REG_PLL_DDR0_CTRL = 0x0020, + REG_PLL_PERIPH0_CTRL = 0x0028, + REG_PLL_PERIPH1_CTRL = 0x002c, + REG_PLL_VIDEO1_CTRL = 0x0030, + REG_PLL_SATA_CTRL = 0x0034, + REG_PLL_GPU_CTRL = 0x0038, + REG_PLL_MIPI_CTRL = 0x0040, + REG_PLL_DE_CTRL = 0x0048, + REG_PLL_DDR1_CTRL = 0x004c, + REG_AHB1_APB1_CFG = 0x0054, + REG_APB2_CFG = 0x0058, + REG_MMC0_CLK = 0x0088, + REG_MMC1_CLK = 0x008c, + REG_MMC2_CLK = 0x0090, + REG_MMC3_CLK = 0x0094, + REG_USBPHY_CFG = 0x00cc, + REG_PLL_DDR_AUX = 0x00f0, + REG_DRAM_CFG = 0x00f4, + REG_PLL_DDR1_CFG = 0x00f8, + REG_DRAM_CLK_GATING = 0x0100, + REG_GMAC_CLK = 0x0164, + REG_SYS_32K_CLK = 0x0310, + REG_PLL_LOCK_CTRL = 0x0320, +}; + +#define REG_INDEX(offset) (offset / sizeof(uint32_t)) + +/* CCU register flags */ +enum { + REG_PLL_ENABLE = (1 << 31), + REG_PLL_LOCK = (1 << 28), +}; + +static uint64_t allwinner_r40_ccu_read(void *opaque, hwaddr offset, + unsigned size) +{ + const AwR40ClockCtlState *s = AW_R40_CCU(opaque); + const uint32_t idx = REG_INDEX(offset); + + switch (offset) { + case 0x324 ... AW_R40_CCU_IOSIZE: + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + return 0; + } + + return s->regs[idx]; +} + +static void allwinner_r40_ccu_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + AwR40ClockCtlState *s = AW_R40_CCU(opaque); + + switch (offset) { + case REG_DRAM_CFG: /* DRAM Configuration(for DDR0) */ + /* bit16: SDRCLK_UPD (SDRCLK configuration 0 update) */ + val &= ~(1 << 16); + break; + case REG_PLL_DDR1_CTRL: /* DDR1 Control register */ + /* bit30: SDRPLL_UPD */ + val &= ~(1 << 30); + if (val & REG_PLL_ENABLE) { + val |= REG_PLL_LOCK; + } + break; + case REG_PLL_CPUX_CTRL: + case REG_PLL_AUDIO_CTRL: + case REG_PLL_VE_CTRL: + case REG_PLL_VIDEO0_CTRL: + case REG_PLL_DDR0_CTRL: + case REG_PLL_PERIPH0_CTRL: + case REG_PLL_PERIPH1_CTRL: + case REG_PLL_VIDEO1_CTRL: + case REG_PLL_SATA_CTRL: + case REG_PLL_GPU_CTRL: + case REG_PLL_MIPI_CTRL: + case REG_PLL_DE_CTRL: + if (val & REG_PLL_ENABLE) { + val |= REG_PLL_LOCK; + } + break; + case 0x324 ... AW_R40_CCU_IOSIZE: + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: unimplemented write offset 0x%04x\n", + __func__, (uint32_t)offset); + break; + } + + s->regs[REG_INDEX(offset)] = (uint32_t) val; +} + +static const MemoryRegionOps allwinner_r40_ccu_ops = { + .read = allwinner_r40_ccu_read, + .write = allwinner_r40_ccu_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .impl.min_access_size = 4, +}; + +static void allwinner_r40_ccu_reset(DeviceState *dev) +{ + AwR40ClockCtlState *s = AW_R40_CCU(dev); + + memset(s->regs, 0, sizeof(s->regs)); + + /* Set default values for registers */ + s->regs[REG_INDEX(REG_PLL_CPUX_CTRL)] = 0x00001000; + s->regs[REG_INDEX(REG_PLL_AUDIO_CTRL)] = 0x00035514; + s->regs[REG_INDEX(REG_PLL_VIDEO0_CTRL)] = 0x03006207; + s->regs[REG_INDEX(REG_PLL_VE_CTRL)] = 0x03006207; + s->regs[REG_INDEX(REG_PLL_DDR0_CTRL)] = 0x00001000, + s->regs[REG_INDEX(REG_PLL_PERIPH0_CTRL)] = 0x00041811; + s->regs[REG_INDEX(REG_PLL_PERIPH1_CTRL)] = 0x00041811; + s->regs[REG_INDEX(REG_PLL_VIDEO1_CTRL)] = 0x03006207; + s->regs[REG_INDEX(REG_PLL_SATA_CTRL)] = 0x00001811; + s->regs[REG_INDEX(REG_PLL_GPU_CTRL)] = 0x03006207; + s->regs[REG_INDEX(REG_PLL_MIPI_CTRL)] = 0x00000515; + s->regs[REG_INDEX(REG_PLL_DE_CTRL)] = 0x03006207; + s->regs[REG_INDEX(REG_PLL_DDR1_CTRL)] = 0x00001800; + s->regs[REG_INDEX(REG_AHB1_APB1_CFG)] = 0x00001010; + s->regs[REG_INDEX(REG_APB2_CFG)] = 0x01000000; + s->regs[REG_INDEX(REG_PLL_DDR_AUX)] = 0x00000001; + s->regs[REG_INDEX(REG_PLL_DDR1_CFG)] = 0x0ccca000; + s->regs[REG_INDEX(REG_SYS_32K_CLK)] = 0x0000000f; +} + +static void allwinner_r40_ccu_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + AwR40ClockCtlState *s = AW_R40_CCU(obj); + + /* Memory mapping */ + memory_region_init_io(&s->iomem, OBJECT(s), &allwinner_r40_ccu_ops, s, + TYPE_AW_R40_CCU, AW_R40_CCU_IOSIZE); + sysbus_init_mmio(sbd, &s->iomem); +} + +static const VMStateDescription allwinner_r40_ccu_vmstate = { + .name = "allwinner-r40-ccu", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, AwR40ClockCtlState, AW_R40_CCU_REGS_NUM), + VMSTATE_END_OF_LIST() + } +}; + +static void allwinner_r40_ccu_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = allwinner_r40_ccu_reset; + dc->vmsd = &allwinner_r40_ccu_vmstate; +} + +static const TypeInfo allwinner_r40_ccu_info = { + .name = TYPE_AW_R40_CCU, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = allwinner_r40_ccu_init, + .instance_size = sizeof(AwR40ClockCtlState), + .class_init = allwinner_r40_ccu_class_init, +}; + +static void allwinner_r40_ccu_register(void) +{ + type_register_static(&allwinner_r40_ccu_info); +} + +type_init(allwinner_r40_ccu_register) diff --git a/hw/misc/allwinner-r40-dramc.c b/hw/misc/allwinner-r40-dramc.c new file mode 100644 index 0000000000..ea6124744f --- /dev/null +++ b/hw/misc/allwinner-r40-dramc.c @@ -0,0 +1,513 @@ +/* + * Allwinner R40 SDRAM Controller emulation + * + * CCopyright (C) 2023 qianfan Zhao <qianfanguijin@163.com> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/error-report.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "exec/address-spaces.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qemu/bitops.h" +#include "hw/misc/allwinner-r40-dramc.h" +#include "trace.h" + +#define REG_INDEX(offset) (offset / sizeof(uint32_t)) + +/* DRAMCOM register offsets */ +enum { + REG_DRAMCOM_CR = 0x0000, /* Control Register */ +}; + +/* DRAMCOMM register flags */ +enum { + REG_DRAMCOM_CR_DUAL_RANK = (1 << 0), +}; + +/* DRAMCTL register offsets */ +enum { + REG_DRAMCTL_PIR = 0x0000, /* PHY Initialization Register */ + REG_DRAMCTL_PGSR = 0x0010, /* PHY General Status Register */ + REG_DRAMCTL_STATR = 0x0018, /* Status Register */ + REG_DRAMCTL_PGCR = 0x0100, /* PHY general configuration registers */ +}; + +/* DRAMCTL register flags */ +enum { + REG_DRAMCTL_PGSR_INITDONE = (1 << 0), + REG_DRAMCTL_PGSR_READ_TIMEOUT = (1 << 13), + REG_DRAMCTL_PGCR_ENABLE_READ_TIMEOUT = (1 << 25), +}; + +enum { + REG_DRAMCTL_STATR_ACTIVE = (1 << 0), +}; + +#define DRAM_MAX_ROW_BITS 16 +#define DRAM_MAX_COL_BITS 13 /* 8192 */ +#define DRAM_MAX_BANK 3 + +static uint64_t dram_autodetect_cells[DRAM_MAX_ROW_BITS] + [DRAM_MAX_BANK] + [DRAM_MAX_COL_BITS]; +struct VirtualDDRChip { + uint32_t ram_size; + uint8_t bank_bits; + uint8_t row_bits; + uint8_t col_bits; +}; + +/* + * Only power of 2 RAM sizes from 256MiB up to 2048MiB are supported, + * 2GiB memory is not supported due to dual rank feature. + */ +static const struct VirtualDDRChip dummy_ddr_chips[] = { + { + .ram_size = 256, + .bank_bits = 3, + .row_bits = 12, + .col_bits = 13, + }, { + .ram_size = 512, + .bank_bits = 3, + .row_bits = 13, + .col_bits = 13, + }, { + .ram_size = 1024, + .bank_bits = 3, + .row_bits = 14, + .col_bits = 13, + }, { + 0 + } +}; + +static const struct VirtualDDRChip *get_match_ddr(uint32_t ram_size) +{ + const struct VirtualDDRChip *ddr; + + for (ddr = &dummy_ddr_chips[0]; ddr->ram_size; ddr++) { + if (ddr->ram_size == ram_size) { + return ddr; + } + } + + return NULL; +} + +static uint64_t *address_to_autodetect_cells(AwR40DramCtlState *s, + const struct VirtualDDRChip *ddr, + uint32_t offset) +{ + int row_index = 0, bank_index = 0, col_index = 0; + uint32_t row_addr, bank_addr, col_addr; + + row_addr = extract32(offset, s->set_col_bits + s->set_bank_bits, + s->set_row_bits); + bank_addr = extract32(offset, s->set_col_bits, s->set_bank_bits); + col_addr = extract32(offset, 0, s->set_col_bits); + + for (int i = 0; i < ddr->row_bits; i++) { + if (row_addr & BIT(i)) { + row_index = i; + } + } + + for (int i = 0; i < ddr->bank_bits; i++) { + if (bank_addr & BIT(i)) { + bank_index = i; + } + } + + for (int i = 0; i < ddr->col_bits; i++) { + if (col_addr & BIT(i)) { + col_index = i; + } + } + + trace_allwinner_r40_dramc_offset_to_cell(offset, row_index, bank_index, + col_index); + return &dram_autodetect_cells[row_index][bank_index][col_index]; +} + +static void allwinner_r40_dramc_map_rows(AwR40DramCtlState *s, uint8_t row_bits, + uint8_t bank_bits, uint8_t col_bits) +{ + const struct VirtualDDRChip *ddr = get_match_ddr(s->ram_size); + bool enable_detect_cells; + + trace_allwinner_r40_dramc_map_rows(row_bits, bank_bits, col_bits); + + if (!ddr) { + return; + } + + s->set_row_bits = row_bits; + s->set_bank_bits = bank_bits; + s->set_col_bits = col_bits; + + enable_detect_cells = ddr->bank_bits != bank_bits + || ddr->row_bits != row_bits + || ddr->col_bits != col_bits; + + if (enable_detect_cells) { + trace_allwinner_r40_dramc_detect_cells_enable(); + } else { + trace_allwinner_r40_dramc_detect_cells_disable(); + } + + memory_region_set_enabled(&s->detect_cells, enable_detect_cells); +} + +static uint64_t allwinner_r40_dramcom_read(void *opaque, hwaddr offset, + unsigned size) +{ + const AwR40DramCtlState *s = AW_R40_DRAMC(opaque); + const uint32_t idx = REG_INDEX(offset); + + if (idx >= AW_R40_DRAMCOM_REGS_NUM) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + return 0; + } + + trace_allwinner_r40_dramcom_read(offset, s->dramcom[idx], size); + return s->dramcom[idx]; +} + +static void allwinner_r40_dramcom_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + AwR40DramCtlState *s = AW_R40_DRAMC(opaque); + const uint32_t idx = REG_INDEX(offset); + + trace_allwinner_r40_dramcom_write(offset, val, size); + + if (idx >= AW_R40_DRAMCOM_REGS_NUM) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + return; + } + + switch (offset) { + case REG_DRAMCOM_CR: /* Control Register */ + if (!(val & REG_DRAMCOM_CR_DUAL_RANK)) { + allwinner_r40_dramc_map_rows(s, ((val >> 4) & 0xf) + 1, + ((val >> 2) & 0x1) + 2, + (((val >> 8) & 0xf) + 3)); + } + break; + }; + + s->dramcom[idx] = (uint32_t) val; +} + +static uint64_t allwinner_r40_dramctl_read(void *opaque, hwaddr offset, + unsigned size) +{ + const AwR40DramCtlState *s = AW_R40_DRAMC(opaque); + const uint32_t idx = REG_INDEX(offset); + + if (idx >= AW_R40_DRAMCTL_REGS_NUM) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + return 0; + } + + trace_allwinner_r40_dramctl_read(offset, s->dramctl[idx], size); + return s->dramctl[idx]; +} + +static void allwinner_r40_dramctl_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + AwR40DramCtlState *s = AW_R40_DRAMC(opaque); + const uint32_t idx = REG_INDEX(offset); + + trace_allwinner_r40_dramctl_write(offset, val, size); + + if (idx >= AW_R40_DRAMCTL_REGS_NUM) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + return; + } + + switch (offset) { + case REG_DRAMCTL_PIR: /* PHY Initialization Register */ + s->dramctl[REG_INDEX(REG_DRAMCTL_PGSR)] |= REG_DRAMCTL_PGSR_INITDONE; + s->dramctl[REG_INDEX(REG_DRAMCTL_STATR)] |= REG_DRAMCTL_STATR_ACTIVE; + break; + } + + s->dramctl[idx] = (uint32_t) val; +} + +static uint64_t allwinner_r40_dramphy_read(void *opaque, hwaddr offset, + unsigned size) +{ + const AwR40DramCtlState *s = AW_R40_DRAMC(opaque); + const uint32_t idx = REG_INDEX(offset); + + if (idx >= AW_R40_DRAMPHY_REGS_NUM) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + return 0; + } + + trace_allwinner_r40_dramphy_read(offset, s->dramphy[idx], size); + return s->dramphy[idx]; +} + +static void allwinner_r40_dramphy_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + AwR40DramCtlState *s = AW_R40_DRAMC(opaque); + const uint32_t idx = REG_INDEX(offset); + + trace_allwinner_r40_dramphy_write(offset, val, size); + + if (idx >= AW_R40_DRAMPHY_REGS_NUM) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + return; + } + + s->dramphy[idx] = (uint32_t) val; +} + +static const MemoryRegionOps allwinner_r40_dramcom_ops = { + .read = allwinner_r40_dramcom_read, + .write = allwinner_r40_dramcom_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .impl.min_access_size = 4, +}; + +static const MemoryRegionOps allwinner_r40_dramctl_ops = { + .read = allwinner_r40_dramctl_read, + .write = allwinner_r40_dramctl_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .impl.min_access_size = 4, +}; + +static const MemoryRegionOps allwinner_r40_dramphy_ops = { + .read = allwinner_r40_dramphy_read, + .write = allwinner_r40_dramphy_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .impl.min_access_size = 4, +}; + +static uint64_t allwinner_r40_detect_read(void *opaque, hwaddr offset, + unsigned size) +{ + AwR40DramCtlState *s = AW_R40_DRAMC(opaque); + const struct VirtualDDRChip *ddr = get_match_ddr(s->ram_size); + uint64_t data = 0; + + if (ddr) { + data = *address_to_autodetect_cells(s, ddr, (uint32_t)offset); + } + + trace_allwinner_r40_dramc_detect_cell_read(offset, data); + return data; +} + +static void allwinner_r40_detect_write(void *opaque, hwaddr offset, + uint64_t data, unsigned size) +{ + AwR40DramCtlState *s = AW_R40_DRAMC(opaque); + const struct VirtualDDRChip *ddr = get_match_ddr(s->ram_size); + + if (ddr) { + uint64_t *cell = address_to_autodetect_cells(s, ddr, (uint32_t)offset); + trace_allwinner_r40_dramc_detect_cell_write(offset, data); + *cell = data; + } +} + +static const MemoryRegionOps allwinner_r40_detect_ops = { + .read = allwinner_r40_detect_read, + .write = allwinner_r40_detect_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .impl.min_access_size = 4, +}; + +/* + * mctl_r40_detect_rank_count in u-boot will write the high 1G of DDR + * to detect wether the board support dual_rank or not. Create a virtual memory + * if the board's ram_size less or equal than 1G, and set read time out flag of + * REG_DRAMCTL_PGSR when the user touch this high dram. + */ +static uint64_t allwinner_r40_dualrank_detect_read(void *opaque, hwaddr offset, + unsigned size) +{ + AwR40DramCtlState *s = AW_R40_DRAMC(opaque); + uint32_t reg; + + reg = s->dramctl[REG_INDEX(REG_DRAMCTL_PGCR)]; + if (reg & REG_DRAMCTL_PGCR_ENABLE_READ_TIMEOUT) { /* Enable read time out */ + /* + * this driver only support one rank, mark READ_TIMEOUT when try + * read the second rank. + */ + s->dramctl[REG_INDEX(REG_DRAMCTL_PGSR)] + |= REG_DRAMCTL_PGSR_READ_TIMEOUT; + } + + return 0; +} + +static const MemoryRegionOps allwinner_r40_dualrank_detect_ops = { + .read = allwinner_r40_dualrank_detect_read, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .impl.min_access_size = 4, +}; + +static void allwinner_r40_dramc_reset(DeviceState *dev) +{ + AwR40DramCtlState *s = AW_R40_DRAMC(dev); + + /* Set default values for registers */ + memset(&s->dramcom, 0, sizeof(s->dramcom)); + memset(&s->dramctl, 0, sizeof(s->dramctl)); + memset(&s->dramphy, 0, sizeof(s->dramphy)); +} + +static void allwinner_r40_dramc_realize(DeviceState *dev, Error **errp) +{ + AwR40DramCtlState *s = AW_R40_DRAMC(dev); + + if (!get_match_ddr(s->ram_size)) { + error_report("%s: ram-size %u MiB is not supported", + __func__, s->ram_size); + exit(1); + } + + /* detect_cells */ + sysbus_mmio_map_overlap(SYS_BUS_DEVICE(s), 3, s->ram_addr, 10); + memory_region_set_enabled(&s->detect_cells, false); + + /* + * We only support DRAM size up to 1G now, so prepare a high memory page + * after 1G for dualrank detect. index = 4 + */ + memory_region_init_io(&s->dram_high, OBJECT(s), + &allwinner_r40_dualrank_detect_ops, s, + "DRAMHIGH", KiB); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->dram_high); + sysbus_mmio_map(SYS_BUS_DEVICE(s), 4, s->ram_addr + GiB); +} + +static void allwinner_r40_dramc_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + AwR40DramCtlState *s = AW_R40_DRAMC(obj); + + /* DRAMCOM registers, index 0 */ + memory_region_init_io(&s->dramcom_iomem, OBJECT(s), + &allwinner_r40_dramcom_ops, s, + "DRAMCOM", 4 * KiB); + sysbus_init_mmio(sbd, &s->dramcom_iomem); + + /* DRAMCTL registers, index 1 */ + memory_region_init_io(&s->dramctl_iomem, OBJECT(s), + &allwinner_r40_dramctl_ops, s, + "DRAMCTL", 4 * KiB); + sysbus_init_mmio(sbd, &s->dramctl_iomem); + + /* DRAMPHY registers. index 2 */ + memory_region_init_io(&s->dramphy_iomem, OBJECT(s), + &allwinner_r40_dramphy_ops, s, + "DRAMPHY", 4 * KiB); + sysbus_init_mmio(sbd, &s->dramphy_iomem); + + /* R40 support max 2G memory but we only support up to 1G now. index 3 */ + memory_region_init_io(&s->detect_cells, OBJECT(s), + &allwinner_r40_detect_ops, s, + "DRAMCELLS", 1 * GiB); + sysbus_init_mmio(sbd, &s->detect_cells); +} + +static Property allwinner_r40_dramc_properties[] = { + DEFINE_PROP_UINT64("ram-addr", AwR40DramCtlState, ram_addr, 0x0), + DEFINE_PROP_UINT32("ram-size", AwR40DramCtlState, ram_size, 256), /* MiB */ + DEFINE_PROP_END_OF_LIST() +}; + +static const VMStateDescription allwinner_r40_dramc_vmstate = { + .name = "allwinner-r40-dramc", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(dramcom, AwR40DramCtlState, + AW_R40_DRAMCOM_REGS_NUM), + VMSTATE_UINT32_ARRAY(dramctl, AwR40DramCtlState, + AW_R40_DRAMCTL_REGS_NUM), + VMSTATE_UINT32_ARRAY(dramphy, AwR40DramCtlState, + AW_R40_DRAMPHY_REGS_NUM), + VMSTATE_END_OF_LIST() + } +}; + +static void allwinner_r40_dramc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = allwinner_r40_dramc_reset; + dc->vmsd = &allwinner_r40_dramc_vmstate; + dc->realize = allwinner_r40_dramc_realize; + device_class_set_props(dc, allwinner_r40_dramc_properties); +} + +static const TypeInfo allwinner_r40_dramc_info = { + .name = TYPE_AW_R40_DRAMC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = allwinner_r40_dramc_init, + .instance_size = sizeof(AwR40DramCtlState), + .class_init = allwinner_r40_dramc_class_init, +}; + +static void allwinner_r40_dramc_register(void) +{ + type_register_static(&allwinner_r40_dramc_info); +} + +type_init(allwinner_r40_dramc_register) diff --git a/hw/misc/allwinner-sramc.c b/hw/misc/allwinner-sramc.c new file mode 100644 index 0000000000..a8b731f8f2 --- /dev/null +++ b/hw/misc/allwinner-sramc.c @@ -0,0 +1,184 @@ +/* + * Allwinner R40 SRAM controller emulation + * + * Copyright (C) 2023 qianfan Zhao <qianfanguijin@163.com> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/misc/allwinner-sramc.h" +#include "trace.h" + +/* + * register offsets + * https://linux-sunxi.org/SRAM_Controller_Register_Guide + */ +enum { + REG_SRAM_CTL1_CFG = 0x04, /* SRAM Control register 1 */ + REG_SRAM_VER = 0x24, /* SRAM Version register */ + REG_SRAM_R40_SOFT_ENTRY_REG0 = 0xbc, +}; + +/* REG_SRAMC_VERSION bit defines */ +#define SRAM_VER_READ_ENABLE (1 << 15) +#define SRAM_VER_VERSION_SHIFT 16 +#define SRAM_VERSION_SUN8I_R40 0x1701 + +static uint64_t allwinner_sramc_read(void *opaque, hwaddr offset, + unsigned size) +{ + AwSRAMCState *s = AW_SRAMC(opaque); + AwSRAMCClass *sc = AW_SRAMC_GET_CLASS(s); + uint64_t val = 0; + + switch (offset) { + case REG_SRAM_CTL1_CFG: + val = s->sram_ctl1; + break; + case REG_SRAM_VER: + /* bit15: lock bit, set this bit before reading this register */ + if (s->sram_ver & SRAM_VER_READ_ENABLE) { + val = SRAM_VER_READ_ENABLE | + (sc->sram_version_code << SRAM_VER_VERSION_SHIFT); + } + break; + case REG_SRAM_R40_SOFT_ENTRY_REG0: + val = s->sram_soft_entry_reg0; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + return 0; + } + + trace_allwinner_sramc_read(offset, val); + + return val; +} + +static void allwinner_sramc_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + AwSRAMCState *s = AW_SRAMC(opaque); + + trace_allwinner_sramc_write(offset, val); + + switch (offset) { + case REG_SRAM_CTL1_CFG: + s->sram_ctl1 = val; + break; + case REG_SRAM_VER: + /* Only the READ_ENABLE bit is writeable */ + s->sram_ver = val & SRAM_VER_READ_ENABLE; + break; + case REG_SRAM_R40_SOFT_ENTRY_REG0: + s->sram_soft_entry_reg0 = val; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + break; + } +} + +static const MemoryRegionOps allwinner_sramc_ops = { + .read = allwinner_sramc_read, + .write = allwinner_sramc_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .impl.min_access_size = 4, +}; + +static const VMStateDescription allwinner_sramc_vmstate = { + .name = "allwinner-sramc", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(sram_ver, AwSRAMCState), + VMSTATE_UINT32(sram_soft_entry_reg0, AwSRAMCState), + VMSTATE_END_OF_LIST() + } +}; + +static void allwinner_sramc_reset(DeviceState *dev) +{ + AwSRAMCState *s = AW_SRAMC(dev); + AwSRAMCClass *sc = AW_SRAMC_GET_CLASS(s); + + switch (sc->sram_version_code) { + case SRAM_VERSION_SUN8I_R40: + s->sram_ctl1 = 0x1300; + break; + } +} + +static void allwinner_sramc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = allwinner_sramc_reset; + dc->vmsd = &allwinner_sramc_vmstate; +} + +static void allwinner_sramc_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + AwSRAMCState *s = AW_SRAMC(obj); + + /* Memory mapping */ + memory_region_init_io(&s->iomem, OBJECT(s), &allwinner_sramc_ops, s, + TYPE_AW_SRAMC, 1 * KiB); + sysbus_init_mmio(sbd, &s->iomem); +} + +static const TypeInfo allwinner_sramc_info = { + .name = TYPE_AW_SRAMC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = allwinner_sramc_init, + .instance_size = sizeof(AwSRAMCState), + .class_init = allwinner_sramc_class_init, +}; + +static void allwinner_r40_sramc_class_init(ObjectClass *klass, void *data) +{ + AwSRAMCClass *sc = AW_SRAMC_CLASS(klass); + + sc->sram_version_code = SRAM_VERSION_SUN8I_R40; +} + +static const TypeInfo allwinner_r40_sramc_info = { + .name = TYPE_AW_SRAMC_SUN8I_R40, + .parent = TYPE_AW_SRAMC, + .class_init = allwinner_r40_sramc_class_init, +}; + +static void allwinner_sramc_register(void) +{ + type_register_static(&allwinner_sramc_info); + type_register_static(&allwinner_r40_sramc_info); +} + +type_init(allwinner_sramc_register) diff --git a/hw/misc/aspeed_hace.c b/hw/misc/aspeed_hace.c index 12a761f1f5..b07506ec04 100644 --- a/hw/misc/aspeed_hace.c +++ b/hw/misc/aspeed_hace.c @@ -189,7 +189,7 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode, bool acc_mode) { struct iovec iov[ASPEED_HACE_MAX_SG]; - g_autofree uint8_t *digest_buf; + g_autofree uint8_t *digest_buf = NULL; size_t digest_len = 0; int niov = 0; int i; diff --git a/hw/misc/axp209.c b/hw/misc/axp209.c deleted file mode 100644 index 2908ed99a6..0000000000 --- a/hw/misc/axp209.c +++ /dev/null @@ -1,238 +0,0 @@ -/* - * AXP-209 PMU Emulation - * - * Copyright (C) 2022 Strahinja Jankovic <strahinja.p.jankovic@gmail.com> - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * SPDX-License-Identifier: MIT - */ - -#include "qemu/osdep.h" -#include "qemu/log.h" -#include "trace.h" -#include "hw/i2c/i2c.h" -#include "migration/vmstate.h" - -#define TYPE_AXP209_PMU "axp209_pmu" - -#define AXP209(obj) \ - OBJECT_CHECK(AXP209I2CState, (obj), TYPE_AXP209_PMU) - -/* registers */ -enum { - REG_POWER_STATUS = 0x0u, - REG_OPERATING_MODE, - REG_OTG_VBUS_STATUS, - REG_CHIP_VERSION, - REG_DATA_CACHE_0, - REG_DATA_CACHE_1, - REG_DATA_CACHE_2, - REG_DATA_CACHE_3, - REG_DATA_CACHE_4, - REG_DATA_CACHE_5, - REG_DATA_CACHE_6, - REG_DATA_CACHE_7, - REG_DATA_CACHE_8, - REG_DATA_CACHE_9, - REG_DATA_CACHE_A, - REG_DATA_CACHE_B, - REG_POWER_OUTPUT_CTRL = 0x12u, - REG_DC_DC2_OUT_V_CTRL = 0x23u, - REG_DC_DC2_DVS_CTRL = 0x25u, - REG_DC_DC3_OUT_V_CTRL = 0x27u, - REG_LDO2_4_OUT_V_CTRL, - REG_LDO3_OUT_V_CTRL, - REG_VBUS_CH_MGMT = 0x30u, - REG_SHUTDOWN_V_CTRL, - REG_SHUTDOWN_CTRL, - REG_CHARGE_CTRL_1, - REG_CHARGE_CTRL_2, - REG_SPARE_CHARGE_CTRL, - REG_PEK_KEY_CTRL, - REG_DC_DC_FREQ_SET, - REG_CHR_TEMP_TH_SET, - REG_CHR_HIGH_TEMP_TH_CTRL, - REG_IPSOUT_WARN_L1, - REG_IPSOUT_WARN_L2, - REG_DISCHR_TEMP_TH_SET, - REG_DISCHR_HIGH_TEMP_TH_CTRL, - REG_IRQ_BANK_1_CTRL = 0x40u, - REG_IRQ_BANK_2_CTRL, - REG_IRQ_BANK_3_CTRL, - REG_IRQ_BANK_4_CTRL, - REG_IRQ_BANK_5_CTRL, - REG_IRQ_BANK_1_STAT = 0x48u, - REG_IRQ_BANK_2_STAT, - REG_IRQ_BANK_3_STAT, - REG_IRQ_BANK_4_STAT, - REG_IRQ_BANK_5_STAT, - REG_ADC_ACIN_V_H = 0x56u, - REG_ADC_ACIN_V_L, - REG_ADC_ACIN_CURR_H, - REG_ADC_ACIN_CURR_L, - REG_ADC_VBUS_V_H, - REG_ADC_VBUS_V_L, - REG_ADC_VBUS_CURR_H, - REG_ADC_VBUS_CURR_L, - REG_ADC_INT_TEMP_H, - REG_ADC_INT_TEMP_L, - REG_ADC_TEMP_SENS_V_H = 0x62u, - REG_ADC_TEMP_SENS_V_L, - REG_ADC_BAT_V_H = 0x78u, - REG_ADC_BAT_V_L, - REG_ADC_BAT_DISCHR_CURR_H, - REG_ADC_BAT_DISCHR_CURR_L, - REG_ADC_BAT_CHR_CURR_H, - REG_ADC_BAT_CHR_CURR_L, - REG_ADC_IPSOUT_V_H, - REG_ADC_IPSOUT_V_L, - REG_DC_DC_MOD_SEL = 0x80u, - REG_ADC_EN_1, - REG_ADC_EN_2, - REG_ADC_SR_CTRL, - REG_ADC_IN_RANGE, - REG_GPIO1_ADC_IRQ_RISING_TH, - REG_GPIO1_ADC_IRQ_FALLING_TH, - REG_TIMER_CTRL = 0x8au, - REG_VBUS_CTRL_MON_SRP, - REG_OVER_TEMP_SHUTDOWN = 0x8fu, - REG_GPIO0_FEAT_SET, - REG_GPIO_OUT_HIGH_SET, - REG_GPIO1_FEAT_SET, - REG_GPIO2_FEAT_SET, - REG_GPIO_SIG_STATE_SET_MON, - REG_GPIO3_SET, - REG_COULOMB_CNTR_CTRL = 0xb8u, - REG_POWER_MEAS_RES, - NR_REGS -}; - -#define AXP209_CHIP_VERSION_ID (0x01) -#define AXP209_DC_DC2_OUT_V_CTRL_RESET (0x16) -#define AXP209_IRQ_BANK_1_CTRL_RESET (0xd8) - -/* A simple I2C slave which returns values of ID or CNT register. */ -typedef struct AXP209I2CState { - /*< private >*/ - I2CSlave i2c; - /*< public >*/ - uint8_t regs[NR_REGS]; /* peripheral registers */ - uint8_t ptr; /* current register index */ - uint8_t count; /* counter used for tx/rx */ -} AXP209I2CState; - -/* Reset all counters and load ID register */ -static void axp209_reset_enter(Object *obj, ResetType type) -{ - AXP209I2CState *s = AXP209(obj); - - memset(s->regs, 0, NR_REGS); - s->ptr = 0; - s->count = 0; - s->regs[REG_CHIP_VERSION] = AXP209_CHIP_VERSION_ID; - s->regs[REG_DC_DC2_OUT_V_CTRL] = AXP209_DC_DC2_OUT_V_CTRL_RESET; - s->regs[REG_IRQ_BANK_1_CTRL] = AXP209_IRQ_BANK_1_CTRL_RESET; -} - -/* Handle events from master. */ -static int axp209_event(I2CSlave *i2c, enum i2c_event event) -{ - AXP209I2CState *s = AXP209(i2c); - - s->count = 0; - - return 0; -} - -/* Called when master requests read */ -static uint8_t axp209_rx(I2CSlave *i2c) -{ - AXP209I2CState *s = AXP209(i2c); - uint8_t ret = 0xff; - - if (s->ptr < NR_REGS) { - ret = s->regs[s->ptr++]; - } - - trace_axp209_rx(s->ptr - 1, ret); - - return ret; -} - -/* - * Called when master sends write. - * Update ptr with byte 0, then perform write with second byte. - */ -static int axp209_tx(I2CSlave *i2c, uint8_t data) -{ - AXP209I2CState *s = AXP209(i2c); - - if (s->count == 0) { - /* Store register address */ - s->ptr = data; - s->count++; - trace_axp209_select(data); - } else { - trace_axp209_tx(s->ptr, data); - if (s->ptr == REG_DC_DC2_OUT_V_CTRL) { - s->regs[s->ptr++] = data; - } - } - - return 0; -} - -static const VMStateDescription vmstate_axp209 = { - .name = TYPE_AXP209_PMU, - .version_id = 1, - .fields = (VMStateField[]) { - VMSTATE_UINT8_ARRAY(regs, AXP209I2CState, NR_REGS), - VMSTATE_UINT8(count, AXP209I2CState), - VMSTATE_UINT8(ptr, AXP209I2CState), - VMSTATE_END_OF_LIST() - } -}; - -static void axp209_class_init(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - I2CSlaveClass *isc = I2C_SLAVE_CLASS(oc); - ResettableClass *rc = RESETTABLE_CLASS(oc); - - rc->phases.enter = axp209_reset_enter; - dc->vmsd = &vmstate_axp209; - isc->event = axp209_event; - isc->recv = axp209_rx; - isc->send = axp209_tx; -} - -static const TypeInfo axp209_info = { - .name = TYPE_AXP209_PMU, - .parent = TYPE_I2C_SLAVE, - .instance_size = sizeof(AXP209I2CState), - .class_init = axp209_class_init -}; - -static void axp209_register_devices(void) -{ - type_register_static(&axp209_info); -} - -type_init(axp209_register_devices); diff --git a/hw/misc/axp2xx.c b/hw/misc/axp2xx.c new file mode 100644 index 0000000000..41538c1cd7 --- /dev/null +++ b/hw/misc/axp2xx.c @@ -0,0 +1,283 @@ +/* + * AXP-2XX PMU Emulation, supported lists: + * AXP209 + * AXP221 + * + * Copyright (C) 2022 Strahinja Jankovic <strahinja.p.jankovic@gmail.com> + * Copyright (C) 2023 qianfan Zhao <qianfanguijin@163.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * SPDX-License-Identifier: MIT + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qom/object.h" +#include "trace.h" +#include "hw/i2c/i2c.h" +#include "migration/vmstate.h" + +#define TYPE_AXP2XX "axp2xx_pmu" +#define TYPE_AXP209_PMU "axp209_pmu" +#define TYPE_AXP221_PMU "axp221_pmu" + +OBJECT_DECLARE_TYPE(AXP2xxI2CState, AXP2xxClass, AXP2XX) + +#define NR_REGS (0xff) + +/* A simple I2C slave which returns values of ID or CNT register. */ +typedef struct AXP2xxI2CState { + /*< private >*/ + I2CSlave i2c; + /*< public >*/ + uint8_t regs[NR_REGS]; /* peripheral registers */ + uint8_t ptr; /* current register index */ + uint8_t count; /* counter used for tx/rx */ +} AXP2xxI2CState; + +typedef struct AXP2xxClass { + /*< private >*/ + I2CSlaveClass parent_class; + /*< public >*/ + void (*reset_enter)(AXP2xxI2CState *s, ResetType type); +} AXP2xxClass; + +#define AXP209_CHIP_VERSION_ID (0x01) +#define AXP209_DC_DC2_OUT_V_CTRL_RESET (0x16) + +/* Reset all counters and load ID register */ +static void axp209_reset_enter(AXP2xxI2CState *s, ResetType type) +{ + memset(s->regs, 0, NR_REGS); + s->ptr = 0; + s->count = 0; + + s->regs[0x03] = AXP209_CHIP_VERSION_ID; + s->regs[0x23] = AXP209_DC_DC2_OUT_V_CTRL_RESET; + + s->regs[0x30] = 0x60; + s->regs[0x32] = 0x46; + s->regs[0x34] = 0x41; + s->regs[0x35] = 0x22; + s->regs[0x36] = 0x5d; + s->regs[0x37] = 0x08; + s->regs[0x38] = 0xa5; + s->regs[0x39] = 0x1f; + s->regs[0x3a] = 0x68; + s->regs[0x3b] = 0x5f; + s->regs[0x3c] = 0xfc; + s->regs[0x3d] = 0x16; + s->regs[0x40] = 0xd8; + s->regs[0x42] = 0xff; + s->regs[0x43] = 0x3b; + s->regs[0x80] = 0xe0; + s->regs[0x82] = 0x83; + s->regs[0x83] = 0x80; + s->regs[0x84] = 0x32; + s->regs[0x86] = 0xff; + s->regs[0x90] = 0x07; + s->regs[0x91] = 0xa0; + s->regs[0x92] = 0x07; + s->regs[0x93] = 0x07; +} + +#define AXP221_PWR_STATUS_ACIN_PRESENT BIT(7) +#define AXP221_PWR_STATUS_ACIN_AVAIL BIT(6) +#define AXP221_PWR_STATUS_VBUS_PRESENT BIT(5) +#define AXP221_PWR_STATUS_VBUS_USED BIT(4) +#define AXP221_PWR_STATUS_BAT_CHARGING BIT(2) +#define AXP221_PWR_STATUS_ACIN_VBUS_POWERED BIT(1) + +/* Reset all counters and load ID register */ +static void axp221_reset_enter(AXP2xxI2CState *s, ResetType type) +{ + memset(s->regs, 0, NR_REGS); + s->ptr = 0; + s->count = 0; + + /* input power status register */ + s->regs[0x00] = AXP221_PWR_STATUS_ACIN_PRESENT + | AXP221_PWR_STATUS_ACIN_AVAIL + | AXP221_PWR_STATUS_ACIN_VBUS_POWERED; + + s->regs[0x01] = 0x00; /* no battery is connected */ + + /* + * CHIPID register, no documented on datasheet, but it is checked in + * u-boot spl. I had read it from AXP221s and got 0x06 value. + * So leave 06h here. + */ + s->regs[0x03] = 0x06; + + s->regs[0x10] = 0xbf; + s->regs[0x13] = 0x01; + s->regs[0x30] = 0x60; + s->regs[0x31] = 0x03; + s->regs[0x32] = 0x43; + s->regs[0x33] = 0xc6; + s->regs[0x34] = 0x45; + s->regs[0x35] = 0x0e; + s->regs[0x36] = 0x5d; + s->regs[0x37] = 0x08; + s->regs[0x38] = 0xa5; + s->regs[0x39] = 0x1f; + s->regs[0x3c] = 0xfc; + s->regs[0x3d] = 0x16; + s->regs[0x80] = 0x80; + s->regs[0x82] = 0xe0; + s->regs[0x84] = 0x32; + s->regs[0x8f] = 0x01; + + s->regs[0x90] = 0x07; + s->regs[0x91] = 0x1f; + s->regs[0x92] = 0x07; + s->regs[0x93] = 0x1f; + + s->regs[0x40] = 0xd8; + s->regs[0x41] = 0xff; + s->regs[0x42] = 0x03; + s->regs[0x43] = 0x03; + + s->regs[0xb8] = 0xc0; + s->regs[0xb9] = 0x64; + s->regs[0xe6] = 0xa0; +} + +static void axp2xx_reset_enter(Object *obj, ResetType type) +{ + AXP2xxI2CState *s = AXP2XX(obj); + AXP2xxClass *sc = AXP2XX_GET_CLASS(s); + + sc->reset_enter(s, type); +} + +/* Handle events from master. */ +static int axp2xx_event(I2CSlave *i2c, enum i2c_event event) +{ + AXP2xxI2CState *s = AXP2XX(i2c); + + s->count = 0; + + return 0; +} + +/* Called when master requests read */ +static uint8_t axp2xx_rx(I2CSlave *i2c) +{ + AXP2xxI2CState *s = AXP2XX(i2c); + uint8_t ret = 0xff; + + if (s->ptr < NR_REGS) { + ret = s->regs[s->ptr++]; + } + + trace_axp2xx_rx(s->ptr - 1, ret); + + return ret; +} + +/* + * Called when master sends write. + * Update ptr with byte 0, then perform write with second byte. + */ +static int axp2xx_tx(I2CSlave *i2c, uint8_t data) +{ + AXP2xxI2CState *s = AXP2XX(i2c); + + if (s->count == 0) { + /* Store register address */ + s->ptr = data; + s->count++; + trace_axp2xx_select(data); + } else { + trace_axp2xx_tx(s->ptr, data); + s->regs[s->ptr++] = data; + } + + return 0; +} + +static const VMStateDescription vmstate_axp2xx = { + .name = TYPE_AXP2XX, + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY(regs, AXP2xxI2CState, NR_REGS), + VMSTATE_UINT8(ptr, AXP2xxI2CState), + VMSTATE_UINT8(count, AXP2xxI2CState), + VMSTATE_END_OF_LIST() + } +}; + +static void axp2xx_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + I2CSlaveClass *isc = I2C_SLAVE_CLASS(oc); + ResettableClass *rc = RESETTABLE_CLASS(oc); + + rc->phases.enter = axp2xx_reset_enter; + dc->vmsd = &vmstate_axp2xx; + isc->event = axp2xx_event; + isc->recv = axp2xx_rx; + isc->send = axp2xx_tx; +} + +static const TypeInfo axp2xx_info = { + .name = TYPE_AXP2XX, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(AXP2xxI2CState), + .class_size = sizeof(AXP2xxClass), + .class_init = axp2xx_class_init, + .abstract = true, +}; + +static void axp209_class_init(ObjectClass *oc, void *data) +{ + AXP2xxClass *sc = AXP2XX_CLASS(oc); + + sc->reset_enter = axp209_reset_enter; +} + +static const TypeInfo axp209_info = { + .name = TYPE_AXP209_PMU, + .parent = TYPE_AXP2XX, + .class_init = axp209_class_init +}; + +static void axp221_class_init(ObjectClass *oc, void *data) +{ + AXP2xxClass *sc = AXP2XX_CLASS(oc); + + sc->reset_enter = axp221_reset_enter; +} + +static const TypeInfo axp221_info = { + .name = TYPE_AXP221_PMU, + .parent = TYPE_AXP2XX, + .class_init = axp221_class_init, +}; + +static void axp2xx_register_devices(void) +{ + type_register_static(&axp2xx_info); + type_register_static(&axp209_info); + type_register_static(&axp221_info); +} + +type_init(axp2xx_register_devices); diff --git a/hw/misc/bcm2835_property.c b/hw/misc/bcm2835_property.c index 890ae7bae5..4ed9faa54a 100644 --- a/hw/misc/bcm2835_property.c +++ b/hw/misc/bcm2835_property.c @@ -12,10 +12,12 @@ #include "migration/vmstate.h" #include "hw/irq.h" #include "hw/misc/bcm2835_mbox_defs.h" +#include "hw/misc/raspberrypi-fw-defs.h" #include "sysemu/dma.h" #include "qemu/log.h" #include "qemu/module.h" #include "trace.h" +#include "hw/arm/raspi_platform.h" /* https://github.com/raspberrypi/firmware/wiki/Mailbox-property-interface */ @@ -51,48 +53,48 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value) /* @(value + 8) : Request/response indicator */ resplen = 0; switch (tag) { - case 0x00000000: /* End tag */ + case RPI_FWREQ_PROPERTY_END: break; - case 0x00000001: /* Get firmware revision */ + case RPI_FWREQ_GET_FIRMWARE_REVISION: stl_le_phys(&s->dma_as, value + 12, 346337); resplen = 4; break; - case 0x00010001: /* Get board model */ + case RPI_FWREQ_GET_BOARD_MODEL: qemu_log_mask(LOG_UNIMP, "bcm2835_property: 0x%08x get board model NYI\n", tag); resplen = 4; break; - case 0x00010002: /* Get board revision */ + case RPI_FWREQ_GET_BOARD_REVISION: stl_le_phys(&s->dma_as, value + 12, s->board_rev); resplen = 4; break; - case 0x00010003: /* Get board MAC address */ + case RPI_FWREQ_GET_BOARD_MAC_ADDRESS: resplen = sizeof(s->macaddr.a); dma_memory_write(&s->dma_as, value + 12, s->macaddr.a, resplen, MEMTXATTRS_UNSPECIFIED); break; - case 0x00010004: /* Get board serial */ + case RPI_FWREQ_GET_BOARD_SERIAL: qemu_log_mask(LOG_UNIMP, "bcm2835_property: 0x%08x get board serial NYI\n", tag); resplen = 8; break; - case 0x00010005: /* Get ARM memory */ + case RPI_FWREQ_GET_ARM_MEMORY: /* base */ stl_le_phys(&s->dma_as, value + 12, 0); /* size */ stl_le_phys(&s->dma_as, value + 16, s->fbdev->vcram_base); resplen = 8; break; - case 0x00010006: /* Get VC memory */ + case RPI_FWREQ_GET_VC_MEMORY: /* base */ stl_le_phys(&s->dma_as, value + 12, s->fbdev->vcram_base); /* size */ stl_le_phys(&s->dma_as, value + 16, s->fbdev->vcram_size); resplen = 8; break; - case 0x00028001: /* Set power state */ + case RPI_FWREQ_SET_POWER_STATE: /* Assume that whatever device they asked for exists, * and we'll just claim we set it to the desired state */ @@ -103,38 +105,42 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value) /* Clocks */ - case 0x00030001: /* Get clock state */ + case RPI_FWREQ_GET_CLOCK_STATE: stl_le_phys(&s->dma_as, value + 16, 0x1); resplen = 8; break; - case 0x00038001: /* Set clock state */ + case RPI_FWREQ_SET_CLOCK_STATE: qemu_log_mask(LOG_UNIMP, "bcm2835_property: 0x%08x set clock state NYI\n", tag); resplen = 8; break; - case 0x00030002: /* Get clock rate */ - case 0x00030004: /* Get max clock rate */ - case 0x00030007: /* Get min clock rate */ + case RPI_FWREQ_GET_CLOCK_RATE: + case RPI_FWREQ_GET_MAX_CLOCK_RATE: + case RPI_FWREQ_GET_MIN_CLOCK_RATE: switch (ldl_le_phys(&s->dma_as, value + 12)) { - case 1: /* EMMC */ - stl_le_phys(&s->dma_as, value + 16, 50000000); + case RPI_FIRMWARE_EMMC_CLK_ID: + stl_le_phys(&s->dma_as, value + 16, RPI_FIRMWARE_EMMC_CLK_RATE); break; - case 2: /* UART */ - stl_le_phys(&s->dma_as, value + 16, 3000000); + case RPI_FIRMWARE_UART_CLK_ID: + stl_le_phys(&s->dma_as, value + 16, RPI_FIRMWARE_UART_CLK_RATE); + break; + case RPI_FIRMWARE_CORE_CLK_ID: + stl_le_phys(&s->dma_as, value + 16, RPI_FIRMWARE_CORE_CLK_RATE); break; default: - stl_le_phys(&s->dma_as, value + 16, 700000000); + stl_le_phys(&s->dma_as, value + 16, + RPI_FIRMWARE_DEFAULT_CLK_RATE); break; } resplen = 8; break; - case 0x00038002: /* Set clock rate */ - case 0x00038004: /* Set max clock rate */ - case 0x00038007: /* Set min clock rate */ + case RPI_FWREQ_SET_CLOCK_RATE: + case RPI_FWREQ_SET_MAX_CLOCK_RATE: + case RPI_FWREQ_SET_MIN_CLOCK_RATE: qemu_log_mask(LOG_UNIMP, "bcm2835_property: 0x%08x set clock rate NYI\n", tag); @@ -143,121 +149,121 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value) /* Temperature */ - case 0x00030006: /* Get temperature */ + case RPI_FWREQ_GET_TEMPERATURE: stl_le_phys(&s->dma_as, value + 16, 25000); resplen = 8; break; - case 0x0003000A: /* Get max temperature */ + case RPI_FWREQ_GET_MAX_TEMPERATURE: stl_le_phys(&s->dma_as, value + 16, 99000); resplen = 8; break; /* Frame buffer */ - case 0x00040001: /* Allocate buffer */ + case RPI_FWREQ_FRAMEBUFFER_ALLOCATE: stl_le_phys(&s->dma_as, value + 12, fbconfig.base); stl_le_phys(&s->dma_as, value + 16, bcm2835_fb_get_size(&fbconfig)); resplen = 8; break; - case 0x00048001: /* Release buffer */ + case RPI_FWREQ_FRAMEBUFFER_RELEASE: resplen = 0; break; - case 0x00040002: /* Blank screen */ + case RPI_FWREQ_FRAMEBUFFER_BLANK: resplen = 4; break; - case 0x00044003: /* Test physical display width/height */ - case 0x00044004: /* Test virtual display width/height */ + case RPI_FWREQ_FRAMEBUFFER_TEST_PHYSICAL_WIDTH_HEIGHT: + case RPI_FWREQ_FRAMEBUFFER_TEST_VIRTUAL_WIDTH_HEIGHT: resplen = 8; break; - case 0x00048003: /* Set physical display width/height */ + case RPI_FWREQ_FRAMEBUFFER_SET_PHYSICAL_WIDTH_HEIGHT: fbconfig.xres = ldl_le_phys(&s->dma_as, value + 12); fbconfig.yres = ldl_le_phys(&s->dma_as, value + 16); bcm2835_fb_validate_config(&fbconfig); fbconfig_updated = true; /* fall through */ - case 0x00040003: /* Get physical display width/height */ + case RPI_FWREQ_FRAMEBUFFER_GET_PHYSICAL_WIDTH_HEIGHT: stl_le_phys(&s->dma_as, value + 12, fbconfig.xres); stl_le_phys(&s->dma_as, value + 16, fbconfig.yres); resplen = 8; break; - case 0x00048004: /* Set virtual display width/height */ + case RPI_FWREQ_FRAMEBUFFER_SET_VIRTUAL_WIDTH_HEIGHT: fbconfig.xres_virtual = ldl_le_phys(&s->dma_as, value + 12); fbconfig.yres_virtual = ldl_le_phys(&s->dma_as, value + 16); bcm2835_fb_validate_config(&fbconfig); fbconfig_updated = true; /* fall through */ - case 0x00040004: /* Get virtual display width/height */ + case RPI_FWREQ_FRAMEBUFFER_GET_VIRTUAL_WIDTH_HEIGHT: stl_le_phys(&s->dma_as, value + 12, fbconfig.xres_virtual); stl_le_phys(&s->dma_as, value + 16, fbconfig.yres_virtual); resplen = 8; break; - case 0x00044005: /* Test depth */ + case RPI_FWREQ_FRAMEBUFFER_TEST_DEPTH: resplen = 4; break; - case 0x00048005: /* Set depth */ + case RPI_FWREQ_FRAMEBUFFER_SET_DEPTH: fbconfig.bpp = ldl_le_phys(&s->dma_as, value + 12); bcm2835_fb_validate_config(&fbconfig); fbconfig_updated = true; /* fall through */ - case 0x00040005: /* Get depth */ + case RPI_FWREQ_FRAMEBUFFER_GET_DEPTH: stl_le_phys(&s->dma_as, value + 12, fbconfig.bpp); resplen = 4; break; - case 0x00044006: /* Test pixel order */ + case RPI_FWREQ_FRAMEBUFFER_TEST_PIXEL_ORDER: resplen = 4; break; - case 0x00048006: /* Set pixel order */ + case RPI_FWREQ_FRAMEBUFFER_SET_PIXEL_ORDER: fbconfig.pixo = ldl_le_phys(&s->dma_as, value + 12); bcm2835_fb_validate_config(&fbconfig); fbconfig_updated = true; /* fall through */ - case 0x00040006: /* Get pixel order */ + case RPI_FWREQ_FRAMEBUFFER_GET_PIXEL_ORDER: stl_le_phys(&s->dma_as, value + 12, fbconfig.pixo); resplen = 4; break; - case 0x00044007: /* Test pixel alpha */ + case RPI_FWREQ_FRAMEBUFFER_TEST_ALPHA_MODE: resplen = 4; break; - case 0x00048007: /* Set alpha */ + case RPI_FWREQ_FRAMEBUFFER_SET_ALPHA_MODE: fbconfig.alpha = ldl_le_phys(&s->dma_as, value + 12); bcm2835_fb_validate_config(&fbconfig); fbconfig_updated = true; /* fall through */ - case 0x00040007: /* Get alpha */ + case RPI_FWREQ_FRAMEBUFFER_GET_ALPHA_MODE: stl_le_phys(&s->dma_as, value + 12, fbconfig.alpha); resplen = 4; break; - case 0x00040008: /* Get pitch */ + case RPI_FWREQ_FRAMEBUFFER_GET_PITCH: stl_le_phys(&s->dma_as, value + 12, bcm2835_fb_get_pitch(&fbconfig)); resplen = 4; break; - case 0x00044009: /* Test virtual offset */ + case RPI_FWREQ_FRAMEBUFFER_TEST_VIRTUAL_OFFSET: resplen = 8; break; - case 0x00048009: /* Set virtual offset */ + case RPI_FWREQ_FRAMEBUFFER_SET_VIRTUAL_OFFSET: fbconfig.xoffset = ldl_le_phys(&s->dma_as, value + 12); fbconfig.yoffset = ldl_le_phys(&s->dma_as, value + 16); bcm2835_fb_validate_config(&fbconfig); fbconfig_updated = true; /* fall through */ - case 0x00040009: /* Get virtual offset */ + case RPI_FWREQ_FRAMEBUFFER_GET_VIRTUAL_OFFSET: stl_le_phys(&s->dma_as, value + 12, fbconfig.xoffset); stl_le_phys(&s->dma_as, value + 16, fbconfig.yoffset); resplen = 8; break; - case 0x0004000a: /* Get/Test/Set overscan */ - case 0x0004400a: - case 0x0004800a: + case RPI_FWREQ_FRAMEBUFFER_GET_OVERSCAN: + case RPI_FWREQ_FRAMEBUFFER_TEST_OVERSCAN: + case RPI_FWREQ_FRAMEBUFFER_SET_OVERSCAN: stl_le_phys(&s->dma_as, value + 12, 0); stl_le_phys(&s->dma_as, value + 16, 0); stl_le_phys(&s->dma_as, value + 20, 0); stl_le_phys(&s->dma_as, value + 24, 0); resplen = 16; break; - case 0x0004800b: /* Set palette */ + case RPI_FWREQ_FRAMEBUFFER_SET_PALETTE: offset = ldl_le_phys(&s->dma_as, value + 12); length = ldl_le_phys(&s->dma_as, value + 16); n = 0; @@ -270,19 +276,29 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value) stl_le_phys(&s->dma_as, value + 12, 0); resplen = 4; break; - case 0x00040013: /* Get number of displays */ + case RPI_FWREQ_FRAMEBUFFER_GET_NUM_DISPLAYS: stl_le_phys(&s->dma_as, value + 12, 1); resplen = 4; break; - case 0x00060001: /* Get DMA channels */ + case RPI_FWREQ_GET_DMA_CHANNELS: /* channels 2-5 */ stl_le_phys(&s->dma_as, value + 12, 0x003C); resplen = 4; break; - case 0x00050001: /* Get command line */ - resplen = 0; + case RPI_FWREQ_GET_COMMAND_LINE: + /* + * We follow the firmware behaviour: no NUL terminator is + * written to the buffer, and if the buffer is too short + * we report the required length in the response header + * and copy nothing to the buffer. + */ + resplen = strlen(s->command_line); + if (bufsize >= resplen) + address_space_write(&s->dma_as, value + 12, + MEMTXATTRS_UNSPECIFIED, s->command_line, + resplen); break; default: @@ -382,6 +398,13 @@ static void bcm2835_property_init(Object *obj) memory_region_init_io(&s->iomem, OBJECT(s), &bcm2835_property_ops, s, TYPE_BCM2835_PROPERTY, 0x10); + + /* + * bcm2835_property_ops call into bcm2835_mbox, which in-turn reads from + * iomem. As such, mark iomem as re-entracy safe. + */ + s->iomem.disable_reentrancy_guard = true; + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); sysbus_init_irq(SYS_BUS_DEVICE(s), &s->mbox_irq); } @@ -413,6 +436,7 @@ static void bcm2835_property_realize(DeviceState *dev, Error **errp) static Property bcm2835_property_props[] = { DEFINE_PROP_UINT32("board-rev", BCM2835PropertyState, board_rev, 0), + DEFINE_PROP_STRING("command-line", BCM2835PropertyState, command_line), DEFINE_PROP_END_OF_LIST() }; diff --git a/hw/misc/imx_rngc.c b/hw/misc/imx_rngc.c index 632c03779c..082c6980ad 100644 --- a/hw/misc/imx_rngc.c +++ b/hw/misc/imx_rngc.c @@ -228,8 +228,10 @@ static void imx_rngc_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(sbd, &s->iomem); sysbus_init_irq(sbd, &s->irq); - s->self_test_bh = qemu_bh_new(imx_rngc_self_test, s); - s->seed_bh = qemu_bh_new(imx_rngc_seed, s); + s->self_test_bh = qemu_bh_new_guarded(imx_rngc_self_test, s, + &dev->mem_reentrancy_guard); + s->seed_bh = qemu_bh_new_guarded(imx_rngc_seed, s, + &dev->mem_reentrancy_guard); } static void imx_rngc_reset(DeviceState *dev) diff --git a/hw/misc/macio/mac_dbdma.c b/hw/misc/macio/mac_dbdma.c index 43bb1f56ba..80a789f32b 100644 --- a/hw/misc/macio/mac_dbdma.c +++ b/hw/misc/macio/mac_dbdma.c @@ -914,7 +914,7 @@ static void mac_dbdma_realize(DeviceState *dev, Error **errp) { DBDMAState *s = MAC_DBDMA(dev); - s->bh = qemu_bh_new(DBDMA_run_bh, s); + s->bh = qemu_bh_new_guarded(DBDMA_run_bh, s, &dev->mem_reentrancy_guard); } static void mac_dbdma_class_init(ObjectClass *oc, void *data) diff --git a/hw/misc/macio/meson.build b/hw/misc/macio/meson.build index 17282da20a..8984b818b0 100644 --- a/hw/misc/macio/meson.build +++ b/hw/misc/macio/meson.build @@ -5,4 +5,4 @@ macio_ss.add(when: 'CONFIG_MACIO_GPIO', if_true: files('gpio.c')) macio_ss.add(when: 'CONFIG_MAC_DBDMA', if_true: files('mac_dbdma.c')) macio_ss.add(when: 'CONFIG_MAC_PMU', if_true: files('pmu.c')) -softmmu_ss.add_all(when: 'CONFIG_MACIO', if_true: macio_ss) +system_ss.add_all(when: 'CONFIG_MACIO', if_true: macio_ss) diff --git a/hw/misc/meson.build b/hw/misc/meson.build index a40245ad44..05877f61cc 100644 --- a/hw/misc/meson.build +++ b/hw/misc/meson.build @@ -1,55 +1,58 @@ -softmmu_ss.add(when: 'CONFIG_APPLESMC', if_true: files('applesmc.c')) -softmmu_ss.add(when: 'CONFIG_EDU', if_true: files('edu.c')) -softmmu_ss.add(when: 'CONFIG_FW_CFG_DMA', if_true: files('vmcoreinfo.c')) -softmmu_ss.add(when: 'CONFIG_ISA_DEBUG', if_true: files('debugexit.c')) -softmmu_ss.add(when: 'CONFIG_ISA_TESTDEV', if_true: files('pc-testdev.c')) -softmmu_ss.add(when: 'CONFIG_PCA9552', if_true: files('pca9552.c')) -softmmu_ss.add(when: 'CONFIG_PCI_TESTDEV', if_true: files('pci-testdev.c')) -softmmu_ss.add(when: 'CONFIG_UNIMP', if_true: files('unimp.c')) -softmmu_ss.add(when: 'CONFIG_EMPTY_SLOT', if_true: files('empty_slot.c')) -softmmu_ss.add(when: 'CONFIG_LED', if_true: files('led.c')) -softmmu_ss.add(when: 'CONFIG_PVPANIC_COMMON', if_true: files('pvpanic.c')) +system_ss.add(when: 'CONFIG_APPLESMC', if_true: files('applesmc.c')) +system_ss.add(when: 'CONFIG_EDU', if_true: files('edu.c')) +system_ss.add(when: 'CONFIG_FW_CFG_DMA', if_true: files('vmcoreinfo.c')) +system_ss.add(when: 'CONFIG_ISA_DEBUG', if_true: files('debugexit.c')) +system_ss.add(when: 'CONFIG_ISA_TESTDEV', if_true: files('pc-testdev.c')) +system_ss.add(when: 'CONFIG_PCA9552', if_true: files('pca9552.c')) +system_ss.add(when: 'CONFIG_PCI_TESTDEV', if_true: files('pci-testdev.c')) +system_ss.add(when: 'CONFIG_UNIMP', if_true: files('unimp.c')) +system_ss.add(when: 'CONFIG_EMPTY_SLOT', if_true: files('empty_slot.c')) +system_ss.add(when: 'CONFIG_LED', if_true: files('led.c')) +system_ss.add(when: 'CONFIG_PVPANIC_COMMON', if_true: files('pvpanic.c')) # ARM devices -softmmu_ss.add(when: 'CONFIG_PL310', if_true: files('arm_l2x0.c')) -softmmu_ss.add(when: 'CONFIG_INTEGRATOR_DEBUG', if_true: files('arm_integrator_debug.c')) -softmmu_ss.add(when: 'CONFIG_A9SCU', if_true: files('a9scu.c')) -softmmu_ss.add(when: 'CONFIG_ARM11SCU', if_true: files('arm11scu.c')) +system_ss.add(when: 'CONFIG_PL310', if_true: files('arm_l2x0.c')) +system_ss.add(when: 'CONFIG_INTEGRATOR_DEBUG', if_true: files('arm_integrator_debug.c')) +system_ss.add(when: 'CONFIG_A9SCU', if_true: files('a9scu.c')) +system_ss.add(when: 'CONFIG_ARM11SCU', if_true: files('arm11scu.c')) -softmmu_ss.add(when: 'CONFIG_ARM_V7M', if_true: files('armv7m_ras.c')) +system_ss.add(when: 'CONFIG_ARM_V7M', if_true: files('armv7m_ras.c')) # Mac devices -softmmu_ss.add(when: 'CONFIG_MOS6522', if_true: files('mos6522.c')) +system_ss.add(when: 'CONFIG_MOS6522', if_true: files('mos6522.c')) # virt devices -softmmu_ss.add(when: 'CONFIG_VIRT_CTRL', if_true: files('virt_ctrl.c')) +system_ss.add(when: 'CONFIG_VIRT_CTRL', if_true: files('virt_ctrl.c')) # RISC-V devices -softmmu_ss.add(when: 'CONFIG_MCHP_PFSOC_DMC', if_true: files('mchp_pfsoc_dmc.c')) -softmmu_ss.add(when: 'CONFIG_MCHP_PFSOC_IOSCB', if_true: files('mchp_pfsoc_ioscb.c')) -softmmu_ss.add(when: 'CONFIG_MCHP_PFSOC_SYSREG', if_true: files('mchp_pfsoc_sysreg.c')) -softmmu_ss.add(when: 'CONFIG_SIFIVE_TEST', if_true: files('sifive_test.c')) -softmmu_ss.add(when: 'CONFIG_SIFIVE_E_PRCI', if_true: files('sifive_e_prci.c')) -softmmu_ss.add(when: 'CONFIG_SIFIVE_U_OTP', if_true: files('sifive_u_otp.c')) -softmmu_ss.add(when: 'CONFIG_SIFIVE_U_PRCI', if_true: files('sifive_u_prci.c')) +system_ss.add(when: 'CONFIG_MCHP_PFSOC_DMC', if_true: files('mchp_pfsoc_dmc.c')) +system_ss.add(when: 'CONFIG_MCHP_PFSOC_IOSCB', if_true: files('mchp_pfsoc_ioscb.c')) +system_ss.add(when: 'CONFIG_MCHP_PFSOC_SYSREG', if_true: files('mchp_pfsoc_sysreg.c')) +system_ss.add(when: 'CONFIG_SIFIVE_TEST', if_true: files('sifive_test.c')) +system_ss.add(when: 'CONFIG_SIFIVE_E_PRCI', if_true: files('sifive_e_prci.c')) +system_ss.add(when: 'CONFIG_SIFIVE_U_OTP', if_true: files('sifive_u_otp.c')) +system_ss.add(when: 'CONFIG_SIFIVE_U_PRCI', if_true: files('sifive_u_prci.c')) subdir('macio') -softmmu_ss.add(when: 'CONFIG_IVSHMEM_DEVICE', if_true: files('ivshmem.c')) +system_ss.add(when: 'CONFIG_IVSHMEM_DEVICE', if_true: files('ivshmem.c')) -softmmu_ss.add(when: 'CONFIG_ALLWINNER_A10_CCM', if_true: files('allwinner-a10-ccm.c')) -softmmu_ss.add(when: 'CONFIG_ALLWINNER_A10_DRAMC', if_true: files('allwinner-a10-dramc.c')) -softmmu_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-h3-ccu.c')) +system_ss.add(when: 'CONFIG_ALLWINNER_SRAMC', if_true: files('allwinner-sramc.c')) +system_ss.add(when: 'CONFIG_ALLWINNER_A10_CCM', if_true: files('allwinner-a10-ccm.c')) +system_ss.add(when: 'CONFIG_ALLWINNER_A10_DRAMC', if_true: files('allwinner-a10-dramc.c')) +system_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-h3-ccu.c')) specific_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-cpucfg.c')) -softmmu_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-h3-dramc.c')) -softmmu_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-h3-sysctrl.c')) -softmmu_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-sid.c')) -softmmu_ss.add(when: 'CONFIG_AXP209_PMU', if_true: files('axp209.c')) -softmmu_ss.add(when: 'CONFIG_REALVIEW', if_true: files('arm_sysctl.c')) -softmmu_ss.add(when: 'CONFIG_NSERIES', if_true: files('cbus.c')) -softmmu_ss.add(when: 'CONFIG_ECCMEMCTL', if_true: files('eccmemctl.c')) -softmmu_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_pmu.c', 'exynos4210_clk.c', 'exynos4210_rng.c')) -softmmu_ss.add(when: 'CONFIG_IMX', if_true: files( +system_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-h3-dramc.c')) +system_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-h3-sysctrl.c')) +system_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-sid.c')) +system_ss.add(when: 'CONFIG_ALLWINNER_R40', if_true: files('allwinner-r40-ccu.c')) +system_ss.add(when: 'CONFIG_ALLWINNER_R40', if_true: files('allwinner-r40-dramc.c')) +system_ss.add(when: 'CONFIG_AXP2XX_PMU', if_true: files('axp2xx.c')) +system_ss.add(when: 'CONFIG_REALVIEW', if_true: files('arm_sysctl.c')) +system_ss.add(when: 'CONFIG_NSERIES', if_true: files('cbus.c')) +system_ss.add(when: 'CONFIG_ECCMEMCTL', if_true: files('eccmemctl.c')) +system_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_pmu.c', 'exynos4210_clk.c', 'exynos4210_rng.c')) +system_ss.add(when: 'CONFIG_IMX', if_true: files( 'imx25_ccm.c', 'imx31_ccm.c', 'imx6_ccm.c', @@ -61,22 +64,22 @@ softmmu_ss.add(when: 'CONFIG_IMX', if_true: files( 'imx_ccm.c', 'imx_rngc.c', )) -softmmu_ss.add(when: 'CONFIG_MAINSTONE', if_true: files('mst_fpga.c')) -softmmu_ss.add(when: 'CONFIG_NPCM7XX', if_true: files( +system_ss.add(when: 'CONFIG_MAINSTONE', if_true: files('mst_fpga.c')) +system_ss.add(when: 'CONFIG_NPCM7XX', if_true: files( 'npcm7xx_clk.c', 'npcm7xx_gcr.c', 'npcm7xx_mft.c', 'npcm7xx_pwm.c', 'npcm7xx_rng.c', )) -softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files( +system_ss.add(when: 'CONFIG_OMAP', if_true: files( 'omap_clk.c', 'omap_gpmc.c', 'omap_l4.c', 'omap_sdrc.c', 'omap_tap.c', )) -softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files( +system_ss.add(when: 'CONFIG_RASPI', if_true: files( 'bcm2835_mbox.c', 'bcm2835_mphi.c', 'bcm2835_property.c', @@ -85,35 +88,35 @@ softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files( 'bcm2835_cprman.c', 'bcm2835_powermgt.c', )) -softmmu_ss.add(when: 'CONFIG_SLAVIO', if_true: files('slavio_misc.c')) -softmmu_ss.add(when: 'CONFIG_ZYNQ', if_true: files('zynq_slcr.c')) -softmmu_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx-zynqmp-crf.c')) -softmmu_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx-zynqmp-apu-ctrl.c')) +system_ss.add(when: 'CONFIG_SLAVIO', if_true: files('slavio_misc.c')) +system_ss.add(when: 'CONFIG_ZYNQ', if_true: files('zynq_slcr.c')) +system_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx-zynqmp-crf.c')) +system_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx-zynqmp-apu-ctrl.c')) specific_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal-crl.c')) -softmmu_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files( +system_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files( 'xlnx-versal-xramc.c', 'xlnx-versal-pmc-iou-slcr.c', )) -softmmu_ss.add(when: 'CONFIG_STM32F2XX_SYSCFG', if_true: files('stm32f2xx_syscfg.c')) -softmmu_ss.add(when: 'CONFIG_STM32F4XX_SYSCFG', if_true: files('stm32f4xx_syscfg.c')) -softmmu_ss.add(when: 'CONFIG_STM32F4XX_EXTI', if_true: files('stm32f4xx_exti.c')) -softmmu_ss.add(when: 'CONFIG_MPS2_FPGAIO', if_true: files('mps2-fpgaio.c')) -softmmu_ss.add(when: 'CONFIG_MPS2_SCC', if_true: files('mps2-scc.c')) - -softmmu_ss.add(when: 'CONFIG_TZ_MPC', if_true: files('tz-mpc.c')) -softmmu_ss.add(when: 'CONFIG_TZ_MSC', if_true: files('tz-msc.c')) -softmmu_ss.add(when: 'CONFIG_TZ_PPC', if_true: files('tz-ppc.c')) -softmmu_ss.add(when: 'CONFIG_IOTKIT_SECCTL', if_true: files('iotkit-secctl.c')) -softmmu_ss.add(when: 'CONFIG_IOTKIT_SYSCTL', if_true: files('iotkit-sysctl.c')) -softmmu_ss.add(when: 'CONFIG_IOTKIT_SYSINFO', if_true: files('iotkit-sysinfo.c')) -softmmu_ss.add(when: 'CONFIG_ARMSSE_CPU_PWRCTRL', if_true: files('armsse-cpu-pwrctrl.c')) -softmmu_ss.add(when: 'CONFIG_ARMSSE_CPUID', if_true: files('armsse-cpuid.c')) -softmmu_ss.add(when: 'CONFIG_ARMSSE_MHU', if_true: files('armsse-mhu.c')) - -softmmu_ss.add(when: 'CONFIG_PVPANIC_ISA', if_true: files('pvpanic-isa.c')) -softmmu_ss.add(when: 'CONFIG_PVPANIC_PCI', if_true: files('pvpanic-pci.c')) -softmmu_ss.add(when: 'CONFIG_AUX', if_true: files('auxbus.c')) -softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files( +system_ss.add(when: 'CONFIG_STM32F2XX_SYSCFG', if_true: files('stm32f2xx_syscfg.c')) +system_ss.add(when: 'CONFIG_STM32F4XX_SYSCFG', if_true: files('stm32f4xx_syscfg.c')) +system_ss.add(when: 'CONFIG_STM32F4XX_EXTI', if_true: files('stm32f4xx_exti.c')) +system_ss.add(when: 'CONFIG_MPS2_FPGAIO', if_true: files('mps2-fpgaio.c')) +system_ss.add(when: 'CONFIG_MPS2_SCC', if_true: files('mps2-scc.c')) + +system_ss.add(when: 'CONFIG_TZ_MPC', if_true: files('tz-mpc.c')) +system_ss.add(when: 'CONFIG_TZ_MSC', if_true: files('tz-msc.c')) +system_ss.add(when: 'CONFIG_TZ_PPC', if_true: files('tz-ppc.c')) +system_ss.add(when: 'CONFIG_IOTKIT_SECCTL', if_true: files('iotkit-secctl.c')) +system_ss.add(when: 'CONFIG_IOTKIT_SYSCTL', if_true: files('iotkit-sysctl.c')) +system_ss.add(when: 'CONFIG_IOTKIT_SYSINFO', if_true: files('iotkit-sysinfo.c')) +system_ss.add(when: 'CONFIG_ARMSSE_CPU_PWRCTRL', if_true: files('armsse-cpu-pwrctrl.c')) +system_ss.add(when: 'CONFIG_ARMSSE_CPUID', if_true: files('armsse-cpuid.c')) +system_ss.add(when: 'CONFIG_ARMSSE_MHU', if_true: files('armsse-mhu.c')) + +system_ss.add(when: 'CONFIG_PVPANIC_ISA', if_true: files('pvpanic-isa.c')) +system_ss.add(when: 'CONFIG_PVPANIC_PCI', if_true: files('pvpanic-pci.c')) +system_ss.add(when: 'CONFIG_AUX', if_true: files('auxbus.c')) +system_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files( 'aspeed_hace.c', 'aspeed_i3c.c', 'aspeed_lpc.c', @@ -123,12 +126,12 @@ softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files( 'aspeed_xdma.c', 'aspeed_peci.c')) -softmmu_ss.add(when: 'CONFIG_MSF2', if_true: files('msf2-sysreg.c')) -softmmu_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_rng.c')) +system_ss.add(when: 'CONFIG_MSF2', if_true: files('msf2-sysreg.c')) +system_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_rng.c')) -softmmu_ss.add(when: 'CONFIG_GRLIB', if_true: files('grlib_ahb_apb_pnp.c')) +system_ss.add(when: 'CONFIG_GRLIB', if_true: files('grlib_ahb_apb_pnp.c')) -softmmu_ss.add(when: 'CONFIG_I2C', if_true: files('i2c-echo.c')) +system_ss.add(when: 'CONFIG_I2C', if_true: files('i2c-echo.c')) specific_ss.add(when: 'CONFIG_AVR_POWER', if_true: files('avr_power.c')) @@ -137,7 +140,7 @@ specific_ss.add(when: 'CONFIG_MAC_VIA', if_true: files('mac_via.c')) specific_ss.add(when: 'CONFIG_MIPS_CPS', if_true: files('mips_cmgcr.c', 'mips_cpc.c')) specific_ss.add(when: 'CONFIG_MIPS_ITU', if_true: files('mips_itu.c')) -softmmu_ss.add(when: 'CONFIG_SBSA_REF', if_true: files('sbsa_ec.c')) +system_ss.add(when: 'CONFIG_SBSA_REF', if_true: files('sbsa_ec.c')) # HPPA devices -softmmu_ss.add(when: 'CONFIG_LASI', if_true: files('lasi.c')) +system_ss.add(when: 'CONFIG_LASI', if_true: files('lasi.c')) diff --git a/hw/misc/trace-events b/hw/misc/trace-events index c47876a902..4d1a0e17af 100644 --- a/hw/misc/trace-events +++ b/hw/misc/trace-events @@ -15,18 +15,36 @@ allwinner_h3_dramctl_write(uint64_t offset, uint64_t data, unsigned size) "Write allwinner_h3_dramphy_read(uint64_t offset, uint64_t data, unsigned size) "Read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 allwinner_h3_dramphy_write(uint64_t offset, uint64_t data, unsigned size) "write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 +# allwinner-r40-dramc.c +allwinner_r40_dramc_detect_cells_disable(void) "Disable detect cells" +allwinner_r40_dramc_detect_cells_enable(void) "Enable detect cells" +allwinner_r40_dramc_map_rows(uint8_t row_bits, uint8_t bank_bits, uint8_t col_bits) "DRAM layout: row_bits %d, bank_bits %d, col_bits %d" +allwinner_r40_dramc_offset_to_cell(uint64_t offset, int row, int bank, int col) "offset 0x%" PRIx64 " row %d bank %d col %d" +allwinner_r40_dramc_detect_cell_write(uint64_t offset, uint64_t data) "offset 0x%" PRIx64 " data 0x%" PRIx64 "" +allwinner_r40_dramc_detect_cell_read(uint64_t offset, uint64_t data) "offset 0x%" PRIx64 " data 0x%" PRIx64 "" +allwinner_r40_dramcom_read(uint64_t offset, uint64_t data, unsigned size) "Read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 +allwinner_r40_dramcom_write(uint64_t offset, uint64_t data, unsigned size) "Write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 +allwinner_r40_dramctl_read(uint64_t offset, uint64_t data, unsigned size) "Read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 +allwinner_r40_dramctl_write(uint64_t offset, uint64_t data, unsigned size) "Write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 +allwinner_r40_dramphy_read(uint64_t offset, uint64_t data, unsigned size) "Read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 +allwinner_r40_dramphy_write(uint64_t offset, uint64_t data, unsigned size) "write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 + # allwinner-sid.c allwinner_sid_read(uint64_t offset, uint64_t data, unsigned size) "offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 allwinner_sid_write(uint64_t offset, uint64_t data, unsigned size) "offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 +# allwinner-sramc.c +allwinner_sramc_read(uint64_t offset, uint64_t data) "offset 0x%" PRIx64 " data 0x%" PRIx64 +allwinner_sramc_write(uint64_t offset, uint64_t data) "offset 0x%" PRIx64 " data 0x%" PRIx64 + # avr_power.c avr_power_read(uint8_t value) "power_reduc read value:%u" avr_power_write(uint8_t value) "power_reduc write value:%u" -# axp209.c -axp209_rx(uint8_t reg, uint8_t data) "Read reg 0x%" PRIx8 " : 0x%" PRIx8 -axp209_select(uint8_t reg) "Accessing reg 0x%" PRIx8 -axp209_tx(uint8_t reg, uint8_t data) "Write reg 0x%" PRIx8 " : 0x%" PRIx8 +# axp2xx +axp2xx_rx(uint8_t reg, uint8_t data) "Read reg 0x%" PRIx8 " : 0x%" PRIx8 +axp2xx_select(uint8_t reg) "Accessing reg 0x%" PRIx8 +axp2xx_tx(uint8_t reg, uint8_t data) "Write reg 0x%" PRIx8 " : 0x%" PRIx8 # eccmemctl.c ecc_mem_writel_mer(uint32_t val) "Write memory enable 0x%08x" diff --git a/hw/net/Kconfig b/hw/net/Kconfig index 18c7851efe..98e00be4f9 100644 --- a/hw/net/Kconfig +++ b/hw/net/Kconfig @@ -56,7 +56,7 @@ config RTL8139_PCI config VMXNET3_PCI bool - default y if PCI_DEVICES && PC_PCI + default y if PCI_DEVICES depends on PCI config SMC91C111 diff --git a/hw/net/allwinner-sun8i-emac.c b/hw/net/allwinner-sun8i-emac.c index b861d8ff35..fac4405f45 100644 --- a/hw/net/allwinner-sun8i-emac.c +++ b/hw/net/allwinner-sun8i-emac.c @@ -350,8 +350,13 @@ static void allwinner_sun8i_emac_get_desc(AwSun8iEmacState *s, FrameDescriptor *desc, uint32_t phys_addr) { - dma_memory_read(&s->dma_as, phys_addr, desc, sizeof(*desc), + uint32_t desc_words[4]; + dma_memory_read(&s->dma_as, phys_addr, &desc_words, sizeof(desc_words), MEMTXATTRS_UNSPECIFIED); + desc->status = le32_to_cpu(desc_words[0]); + desc->status2 = le32_to_cpu(desc_words[1]); + desc->addr = le32_to_cpu(desc_words[2]); + desc->next = le32_to_cpu(desc_words[3]); } static uint32_t allwinner_sun8i_emac_next_desc(AwSun8iEmacState *s, @@ -400,10 +405,15 @@ static uint32_t allwinner_sun8i_emac_tx_desc(AwSun8iEmacState *s, } static void allwinner_sun8i_emac_flush_desc(AwSun8iEmacState *s, - FrameDescriptor *desc, + const FrameDescriptor *desc, uint32_t phys_addr) { - dma_memory_write(&s->dma_as, phys_addr, desc, sizeof(*desc), + uint32_t desc_words[4]; + desc_words[0] = cpu_to_le32(desc->status); + desc_words[1] = cpu_to_le32(desc->status2); + desc_words[2] = cpu_to_le32(desc->addr); + desc_words[3] = cpu_to_le32(desc->next); + dma_memory_write(&s->dma_as, phys_addr, &desc_words, sizeof(desc_words), MEMTXATTRS_UNSPECIFIED); } @@ -638,8 +648,7 @@ static uint64_t allwinner_sun8i_emac_read(void *opaque, hwaddr offset, break; case REG_TX_CUR_BUF: /* Transmit Current Buffer */ if (s->tx_desc_curr != 0) { - dma_memory_read(&s->dma_as, s->tx_desc_curr, &desc, sizeof(desc), - MEMTXATTRS_UNSPECIFIED); + allwinner_sun8i_emac_get_desc(s, &desc, s->tx_desc_curr); value = desc.addr; } else { value = 0; @@ -652,8 +661,7 @@ static uint64_t allwinner_sun8i_emac_read(void *opaque, hwaddr offset, break; case REG_RX_CUR_BUF: /* Receive Current Buffer */ if (s->rx_desc_curr != 0) { - dma_memory_read(&s->dma_as, s->rx_desc_curr, &desc, sizeof(desc), - MEMTXATTRS_UNSPECIFIED); + allwinner_sun8i_emac_get_desc(s, &desc, s->rx_desc_curr); value = desc.addr; } else { value = 0; diff --git a/hw/net/can/meson.build b/hw/net/can/meson.build index 8fabbd9ee6..7382344628 100644 --- a/hw/net/can/meson.build +++ b/hw/net/can/meson.build @@ -1,7 +1,8 @@ -softmmu_ss.add(when: 'CONFIG_CAN_SJA1000', if_true: files('can_sja1000.c')) -softmmu_ss.add(when: 'CONFIG_CAN_PCI', if_true: files('can_kvaser_pci.c')) -softmmu_ss.add(when: 'CONFIG_CAN_PCI', if_true: files('can_pcm3680_pci.c')) -softmmu_ss.add(when: 'CONFIG_CAN_PCI', if_true: files('can_mioe3680_pci.c')) -softmmu_ss.add(when: 'CONFIG_CAN_CTUCANFD', if_true: files('ctucan_core.c')) -softmmu_ss.add(when: 'CONFIG_CAN_CTUCANFD_PCI', if_true: files('ctucan_pci.c')) -softmmu_ss.add(when: 'CONFIG_XLNX_ZYNQMP', if_true: files('xlnx-zynqmp-can.c')) +system_ss.add(when: 'CONFIG_CAN_SJA1000', if_true: files('can_sja1000.c')) +system_ss.add(when: 'CONFIG_CAN_PCI', if_true: files('can_kvaser_pci.c')) +system_ss.add(when: 'CONFIG_CAN_PCI', if_true: files('can_pcm3680_pci.c')) +system_ss.add(when: 'CONFIG_CAN_PCI', if_true: files('can_mioe3680_pci.c')) +system_ss.add(when: 'CONFIG_CAN_CTUCANFD', if_true: files('ctucan_core.c')) +system_ss.add(when: 'CONFIG_CAN_CTUCANFD_PCI', if_true: files('ctucan_pci.c')) +system_ss.add(when: 'CONFIG_XLNX_ZYNQMP', if_true: files('xlnx-zynqmp-can.c')) +system_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal-canfd.c')) diff --git a/hw/net/can/trace-events b/hw/net/can/trace-events index 8346a98ab5..de64ac1b31 100644 --- a/hw/net/can/trace-events +++ b/hw/net/can/trace-events @@ -7,3 +7,10 @@ xlnx_can_filter_mask_pre_write(uint8_t filter_num, uint32_t value) "Filter%d MAS xlnx_can_tx_data(uint32_t id, uint8_t dlc, uint8_t db0, uint8_t db1, uint8_t db2, uint8_t db3, uint8_t db4, uint8_t db5, uint8_t db6, uint8_t db7) "Frame: ID: 0x%08x DLC: 0x%02x DATA: 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x" xlnx_can_rx_data(uint32_t id, uint32_t dlc, uint8_t db0, uint8_t db1, uint8_t db2, uint8_t db3, uint8_t db4, uint8_t db5, uint8_t db6, uint8_t db7) "Frame: ID: 0x%08x DLC: 0x%02x DATA: 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x" xlnx_can_rx_discard(uint32_t status) "Controller is not enabled for bus communication. Status Register: 0x%08x" + +# xlnx-versal-canfd.c +xlnx_canfd_update_irq(char *path, uint32_t isr, uint32_t ier, uint32_t irq) "%s: ISR: 0x%08x IER: 0x%08x IRQ: 0x%08x" +xlnx_canfd_rx_fifo_filter_reject(char *path, uint32_t id, uint8_t dlc) "%s: Frame: ID: 0x%08x DLC: 0x%02x" +xlnx_canfd_rx_data(char *path, uint32_t id, uint8_t dlc, uint8_t flags) "%s: Frame: ID: 0x%08x DLC: 0x%02x CANFD Flag: 0x%02x" +xlnx_canfd_tx_data(char *path, uint32_t id, uint8_t dlc, uint8_t flgas) "%s: Frame: ID: 0x%08x DLC: 0x%02x CANFD Flag: 0x%02x" +xlnx_canfd_reset(char *path, uint32_t val) "%s: Resetting controller with value = 0x%08x" diff --git a/hw/net/can/xlnx-versal-canfd.c b/hw/net/can/xlnx-versal-canfd.c new file mode 100644 index 0000000000..5b8ce0a285 --- /dev/null +++ b/hw/net/can/xlnx-versal-canfd.c @@ -0,0 +1,2107 @@ +/* + * QEMU model of the Xilinx Versal CANFD device. + * + * This implementation is based on the following datasheet: + * https://docs.xilinx.com/v/u/2.0-English/pg223-canfd + * + * Copyright (c) 2023 Advanced Micro Devices, Inc. + * + * Written-by: Vikram Garhwal <vikram.garhwal@amd.com> + * + * Based on QEMU CANFD Device emulation implemented by Jin Yang, Deniz Eren and + * Pavel Pisa + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/irq.h" +#include "hw/register.h" +#include "qapi/error.h" +#include "qemu/bitops.h" +#include "qemu/log.h" +#include "qemu/cutils.h" +#include "qemu/event_notifier.h" +#include "hw/qdev-properties.h" +#include "qom/object_interfaces.h" +#include "migration/vmstate.h" +#include "hw/net/xlnx-versal-canfd.h" +#include "trace.h" + +REG32(SOFTWARE_RESET_REGISTER, 0x0) + FIELD(SOFTWARE_RESET_REGISTER, CEN, 1, 1) + FIELD(SOFTWARE_RESET_REGISTER, SRST, 0, 1) +REG32(MODE_SELECT_REGISTER, 0x4) + FIELD(MODE_SELECT_REGISTER, ITO, 8, 8) + FIELD(MODE_SELECT_REGISTER, ABR, 7, 1) + FIELD(MODE_SELECT_REGISTER, SBR, 6, 1) + FIELD(MODE_SELECT_REGISTER, DPEE, 5, 1) + FIELD(MODE_SELECT_REGISTER, DAR, 4, 1) + FIELD(MODE_SELECT_REGISTER, BRSD, 3, 1) + FIELD(MODE_SELECT_REGISTER, SNOOP, 2, 1) + FIELD(MODE_SELECT_REGISTER, LBACK, 1, 1) + FIELD(MODE_SELECT_REGISTER, SLEEP, 0, 1) +REG32(ARBITRATION_PHASE_BAUD_RATE_PRESCALER_REGISTER, 0x8) + FIELD(ARBITRATION_PHASE_BAUD_RATE_PRESCALER_REGISTER, BRP, 0, 8) +REG32(ARBITRATION_PHASE_BIT_TIMING_REGISTER, 0xc) + FIELD(ARBITRATION_PHASE_BIT_TIMING_REGISTER, SJW, 16, 7) + FIELD(ARBITRATION_PHASE_BIT_TIMING_REGISTER, TS2, 8, 7) + FIELD(ARBITRATION_PHASE_BIT_TIMING_REGISTER, TS1, 0, 8) +REG32(ERROR_COUNTER_REGISTER, 0x10) + FIELD(ERROR_COUNTER_REGISTER, REC, 8, 8) + FIELD(ERROR_COUNTER_REGISTER, TEC, 0, 8) +REG32(ERROR_STATUS_REGISTER, 0x14) + FIELD(ERROR_STATUS_REGISTER, F_BERR, 11, 1) + FIELD(ERROR_STATUS_REGISTER, F_STER, 10, 1) + FIELD(ERROR_STATUS_REGISTER, F_FMER, 9, 1) + FIELD(ERROR_STATUS_REGISTER, F_CRCER, 8, 1) + FIELD(ERROR_STATUS_REGISTER, ACKER, 4, 1) + FIELD(ERROR_STATUS_REGISTER, BERR, 3, 1) + FIELD(ERROR_STATUS_REGISTER, STER, 2, 1) + FIELD(ERROR_STATUS_REGISTER, FMER, 1, 1) + FIELD(ERROR_STATUS_REGISTER, CRCER, 0, 1) +REG32(STATUS_REGISTER, 0x18) + FIELD(STATUS_REGISTER, TDCV, 16, 7) + FIELD(STATUS_REGISTER, SNOOP, 12, 1) + FIELD(STATUS_REGISTER, BSFR_CONFIG, 10, 1) + FIELD(STATUS_REGISTER, PEE_CONFIG, 9, 1) + FIELD(STATUS_REGISTER, ESTAT, 7, 2) + FIELD(STATUS_REGISTER, ERRWRN, 6, 1) + FIELD(STATUS_REGISTER, BBSY, 5, 1) + FIELD(STATUS_REGISTER, BIDLE, 4, 1) + FIELD(STATUS_REGISTER, NORMAL, 3, 1) + FIELD(STATUS_REGISTER, SLEEP, 2, 1) + FIELD(STATUS_REGISTER, LBACK, 1, 1) + FIELD(STATUS_REGISTER, CONFIG, 0, 1) +REG32(INTERRUPT_STATUS_REGISTER, 0x1c) + FIELD(INTERRUPT_STATUS_REGISTER, TXEWMFLL, 31, 1) + FIELD(INTERRUPT_STATUS_REGISTER, TXEOFLW, 30, 1) + FIELD(INTERRUPT_STATUS_REGISTER, RXBOFLW_BI, 24, 6) + FIELD(INTERRUPT_STATUS_REGISTER, RXLRM_BI, 18, 6) + FIELD(INTERRUPT_STATUS_REGISTER, RXMNF, 17, 1) + FIELD(INTERRUPT_STATUS_REGISTER, RXFWMFLL_1, 16, 1) + FIELD(INTERRUPT_STATUS_REGISTER, RXFOFLW_1, 15, 1) + FIELD(INTERRUPT_STATUS_REGISTER, TXCRS, 14, 1) + FIELD(INTERRUPT_STATUS_REGISTER, TXRRS, 13, 1) + FIELD(INTERRUPT_STATUS_REGISTER, RXFWMFLL, 12, 1) + FIELD(INTERRUPT_STATUS_REGISTER, WKUP, 11, 1) + FIELD(INTERRUPT_STATUS_REGISTER, SLP, 10, 1) + FIELD(INTERRUPT_STATUS_REGISTER, BSOFF, 9, 1) + /* + * In the original HW description below bit is named as ERROR but an ERROR + * field name collides with a macro in Windows build. To avoid Windows build + * failures, the bit is renamed to ERROR_BIT. + */ + FIELD(INTERRUPT_STATUS_REGISTER, ERROR_BIT, 8, 1) + FIELD(INTERRUPT_STATUS_REGISTER, RXFOFLW, 6, 1) + FIELD(INTERRUPT_STATUS_REGISTER, TSCNT_OFLW, 5, 1) + FIELD(INTERRUPT_STATUS_REGISTER, RXOK, 4, 1) + FIELD(INTERRUPT_STATUS_REGISTER, BSFRD, 3, 1) + FIELD(INTERRUPT_STATUS_REGISTER, PEE, 2, 1) + FIELD(INTERRUPT_STATUS_REGISTER, TXOK, 1, 1) + FIELD(INTERRUPT_STATUS_REGISTER, ARBLST, 0, 1) +REG32(INTERRUPT_ENABLE_REGISTER, 0x20) + FIELD(INTERRUPT_ENABLE_REGISTER, ETXEWMFLL, 31, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, ETXEOFLW, 30, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, ERXMNF, 17, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, ERXFWMFLL_1, 16, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, ERXFOFLW_1, 15, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, ETXCRS, 14, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, ETXRRS, 13, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, ERXFWMFLL, 12, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, EWKUP, 11, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, ESLP, 10, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, EBSOFF, 9, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, EERROR, 8, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, ERFXOFLW, 6, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, ETSCNT_OFLW, 5, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, ERXOK, 4, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, EBSFRD, 3, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, EPEE, 2, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, ETXOK, 1, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, EARBLOST, 0, 1) +REG32(INTERRUPT_CLEAR_REGISTER, 0x24) + FIELD(INTERRUPT_CLEAR_REGISTER, CTXEWMFLL, 31, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CTXEOFLW, 30, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CRXMNF, 17, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CRXFWMFLL_1, 16, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CRXFOFLW_1, 15, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CTXCRS, 14, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CTXRRS, 13, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CRXFWMFLL, 12, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CWKUP, 11, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CSLP, 10, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CBSOFF, 9, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CERROR, 8, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CRFXOFLW, 6, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CTSCNT_OFLW, 5, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CRXOK, 4, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CBSFRD, 3, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CPEE, 2, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CTXOK, 1, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CARBLOST, 0, 1) +REG32(TIMESTAMP_REGISTER, 0x28) + FIELD(TIMESTAMP_REGISTER, TIMESTAMP_CNT, 16, 16) + FIELD(TIMESTAMP_REGISTER, CTS, 0, 1) +REG32(DATA_PHASE_BAUD_RATE_PRESCALER_REGISTER, 0x88) + FIELD(DATA_PHASE_BAUD_RATE_PRESCALER_REGISTER, TDC, 16, 1) + FIELD(DATA_PHASE_BAUD_RATE_PRESCALER_REGISTER, TDCOFF, 8, 6) + FIELD(DATA_PHASE_BAUD_RATE_PRESCALER_REGISTER, DP_BRP, 0, 8) +REG32(DATA_PHASE_BIT_TIMING_REGISTER, 0x8c) + FIELD(DATA_PHASE_BIT_TIMING_REGISTER, DP_SJW, 16, 4) + FIELD(DATA_PHASE_BIT_TIMING_REGISTER, DP_TS2, 8, 4) + FIELD(DATA_PHASE_BIT_TIMING_REGISTER, DP_TS1, 0, 5) +REG32(TX_BUFFER_READY_REQUEST_REGISTER, 0x90) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR31, 31, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR30, 30, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR29, 29, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR28, 28, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR27, 27, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR26, 26, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR25, 25, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR24, 24, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR23, 23, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR22, 22, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR21, 21, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR20, 20, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR19, 19, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR18, 18, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR17, 17, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR16, 16, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR15, 15, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR14, 14, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR13, 13, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR12, 12, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR11, 11, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR10, 10, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR9, 9, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR8, 8, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR7, 7, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR6, 6, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR5, 5, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR4, 4, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR3, 3, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR2, 2, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR1, 1, 1) + FIELD(TX_BUFFER_READY_REQUEST_REGISTER, RR0, 0, 1) +REG32(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, 0x94) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS31, 31, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS30, 30, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS29, 29, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS28, 28, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS27, 27, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS26, 26, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS25, 25, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS24, 24, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS23, 23, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS22, 22, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS21, 21, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS20, 20, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS19, 19, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS18, 18, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS17, 17, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS16, 16, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS15, 15, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS14, 14, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS13, 13, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS12, 12, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS11, 11, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS10, 10, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS9, 9, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS8, 8, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS7, 7, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS6, 6, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS5, 5, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS4, 4, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS3, 3, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS2, 2, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS1, 1, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, ERRS0, 0, 1) +REG32(TX_BUFFER_CANCEL_REQUEST_REGISTER, 0x98) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR31, 31, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR30, 30, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR29, 29, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR28, 28, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR27, 27, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR26, 26, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR25, 25, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR24, 24, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR23, 23, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR22, 22, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR21, 21, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR20, 20, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR19, 19, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR18, 18, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR17, 17, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR16, 16, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR15, 15, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR14, 14, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR13, 13, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR12, 12, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR11, 11, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR10, 10, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR9, 9, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR8, 8, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR7, 7, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR6, 6, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR5, 5, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR4, 4, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR3, 3, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR2, 2, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR1, 1, 1) + FIELD(TX_BUFFER_CANCEL_REQUEST_REGISTER, CR0, 0, 1) +REG32(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, 0x9c) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS31, 31, + 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS30, 30, + 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS29, 29, + 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS28, 28, + 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS27, 27, + 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS26, 26, + 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS25, 25, + 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS24, 24, + 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS23, 23, + 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS22, 22, + 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS21, 21, + 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS20, 20, + 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS19, 19, + 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS18, 18, + 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS17, 17, + 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS16, 16, + 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS15, 15, + 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS14, 14, + 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS13, 13, + 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS12, 12, + 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS11, 11, + 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS10, 10, + 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS9, 9, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS8, 8, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS7, 7, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS6, 6, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS5, 5, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS4, 4, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS3, 3, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS2, 2, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS1, 1, 1) + FIELD(INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, ECRS0, 0, 1) +REG32(TX_EVENT_FIFO_STATUS_REGISTER, 0xa0) + FIELD(TX_EVENT_FIFO_STATUS_REGISTER, TXE_FL, 8, 6) + FIELD(TX_EVENT_FIFO_STATUS_REGISTER, TXE_IRI, 7, 1) + FIELD(TX_EVENT_FIFO_STATUS_REGISTER, TXE_RI, 0, 5) +REG32(TX_EVENT_FIFO_WATERMARK_REGISTER, 0xa4) + FIELD(TX_EVENT_FIFO_WATERMARK_REGISTER, TXE_FWM, 0, 5) +REG32(ACCEPTANCE_FILTER_CONTROL_REGISTER, 0xe0) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF31, 31, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF30, 30, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF29, 29, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF28, 28, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF27, 27, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF26, 26, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF25, 25, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF24, 24, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF23, 23, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF22, 22, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF21, 21, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF20, 20, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF19, 19, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF18, 18, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF17, 17, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF16, 16, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF15, 15, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF14, 14, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF13, 13, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF12, 12, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF11, 11, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF10, 10, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF9, 9, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF8, 8, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF7, 7, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF6, 6, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF5, 5, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF4, 4, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF3, 3, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF2, 2, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF1, 1, 1) + FIELD(ACCEPTANCE_FILTER_CONTROL_REGISTER, UAF0, 0, 1) +REG32(RX_FIFO_STATUS_REGISTER, 0xe8) + FIELD(RX_FIFO_STATUS_REGISTER, FL_1, 24, 7) + FIELD(RX_FIFO_STATUS_REGISTER, IRI_1, 23, 1) + FIELD(RX_FIFO_STATUS_REGISTER, RI_1, 16, 6) + FIELD(RX_FIFO_STATUS_REGISTER, FL, 8, 7) + FIELD(RX_FIFO_STATUS_REGISTER, IRI, 7, 1) + FIELD(RX_FIFO_STATUS_REGISTER, RI, 0, 6) +REG32(RX_FIFO_WATERMARK_REGISTER, 0xec) + FIELD(RX_FIFO_WATERMARK_REGISTER, RXFP, 16, 5) + FIELD(RX_FIFO_WATERMARK_REGISTER, RXFWM_1, 8, 6) + FIELD(RX_FIFO_WATERMARK_REGISTER, RXFWM, 0, 6) +REG32(TB_ID_REGISTER, 0x100) + FIELD(TB_ID_REGISTER, ID, 21, 11) + FIELD(TB_ID_REGISTER, SRR_RTR_RRS, 20, 1) + FIELD(TB_ID_REGISTER, IDE, 19, 1) + FIELD(TB_ID_REGISTER, ID_EXT, 1, 18) + FIELD(TB_ID_REGISTER, RTR_RRS, 0, 1) +REG32(TB0_DLC_REGISTER, 0x104) + FIELD(TB0_DLC_REGISTER, DLC, 28, 4) + FIELD(TB0_DLC_REGISTER, FDF, 27, 1) + FIELD(TB0_DLC_REGISTER, BRS, 26, 1) + FIELD(TB0_DLC_REGISTER, RSVD2, 25, 1) + FIELD(TB0_DLC_REGISTER, EFC, 24, 1) + FIELD(TB0_DLC_REGISTER, MM, 16, 8) + FIELD(TB0_DLC_REGISTER, RSVD1, 0, 16) +REG32(TB_DW0_REGISTER, 0x108) + FIELD(TB_DW0_REGISTER, DATA_BYTES0, 24, 8) + FIELD(TB_DW0_REGISTER, DATA_BYTES1, 16, 8) + FIELD(TB_DW0_REGISTER, DATA_BYTES2, 8, 8) + FIELD(TB_DW0_REGISTER, DATA_BYTES3, 0, 8) +REG32(TB_DW1_REGISTER, 0x10c) + FIELD(TB_DW1_REGISTER, DATA_BYTES4, 24, 8) + FIELD(TB_DW1_REGISTER, DATA_BYTES5, 16, 8) + FIELD(TB_DW1_REGISTER, DATA_BYTES6, 8, 8) + FIELD(TB_DW1_REGISTER, DATA_BYTES7, 0, 8) +REG32(TB_DW2_REGISTER, 0x110) + FIELD(TB_DW2_REGISTER, DATA_BYTES8, 24, 8) + FIELD(TB_DW2_REGISTER, DATA_BYTES9, 16, 8) + FIELD(TB_DW2_REGISTER, DATA_BYTES10, 8, 8) + FIELD(TB_DW2_REGISTER, DATA_BYTES11, 0, 8) +REG32(TB_DW3_REGISTER, 0x114) + FIELD(TB_DW3_REGISTER, DATA_BYTES12, 24, 8) + FIELD(TB_DW3_REGISTER, DATA_BYTES13, 16, 8) + FIELD(TB_DW3_REGISTER, DATA_BYTES14, 8, 8) + FIELD(TB_DW3_REGISTER, DATA_BYTES15, 0, 8) +REG32(TB_DW4_REGISTER, 0x118) + FIELD(TB_DW4_REGISTER, DATA_BYTES16, 24, 8) + FIELD(TB_DW4_REGISTER, DATA_BYTES17, 16, 8) + FIELD(TB_DW4_REGISTER, DATA_BYTES18, 8, 8) + FIELD(TB_DW4_REGISTER, DATA_BYTES19, 0, 8) +REG32(TB_DW5_REGISTER, 0x11c) + FIELD(TB_DW5_REGISTER, DATA_BYTES20, 24, 8) + FIELD(TB_DW5_REGISTER, DATA_BYTES21, 16, 8) + FIELD(TB_DW5_REGISTER, DATA_BYTES22, 8, 8) + FIELD(TB_DW5_REGISTER, DATA_BYTES23, 0, 8) +REG32(TB_DW6_REGISTER, 0x120) + FIELD(TB_DW6_REGISTER, DATA_BYTES24, 24, 8) + FIELD(TB_DW6_REGISTER, DATA_BYTES25, 16, 8) + FIELD(TB_DW6_REGISTER, DATA_BYTES26, 8, 8) + FIELD(TB_DW6_REGISTER, DATA_BYTES27, 0, 8) +REG32(TB_DW7_REGISTER, 0x124) + FIELD(TB_DW7_REGISTER, DATA_BYTES28, 24, 8) + FIELD(TB_DW7_REGISTER, DATA_BYTES29, 16, 8) + FIELD(TB_DW7_REGISTER, DATA_BYTES30, 8, 8) + FIELD(TB_DW7_REGISTER, DATA_BYTES31, 0, 8) +REG32(TB_DW8_REGISTER, 0x128) + FIELD(TB_DW8_REGISTER, DATA_BYTES32, 24, 8) + FIELD(TB_DW8_REGISTER, DATA_BYTES33, 16, 8) + FIELD(TB_DW8_REGISTER, DATA_BYTES34, 8, 8) + FIELD(TB_DW8_REGISTER, DATA_BYTES35, 0, 8) +REG32(TB_DW9_REGISTER, 0x12c) + FIELD(TB_DW9_REGISTER, DATA_BYTES36, 24, 8) + FIELD(TB_DW9_REGISTER, DATA_BYTES37, 16, 8) + FIELD(TB_DW9_REGISTER, DATA_BYTES38, 8, 8) + FIELD(TB_DW9_REGISTER, DATA_BYTES39, 0, 8) +REG32(TB_DW10_REGISTER, 0x130) + FIELD(TB_DW10_REGISTER, DATA_BYTES40, 24, 8) + FIELD(TB_DW10_REGISTER, DATA_BYTES41, 16, 8) + FIELD(TB_DW10_REGISTER, DATA_BYTES42, 8, 8) + FIELD(TB_DW10_REGISTER, DATA_BYTES43, 0, 8) +REG32(TB_DW11_REGISTER, 0x134) + FIELD(TB_DW11_REGISTER, DATA_BYTES44, 24, 8) + FIELD(TB_DW11_REGISTER, DATA_BYTES45, 16, 8) + FIELD(TB_DW11_REGISTER, DATA_BYTES46, 8, 8) + FIELD(TB_DW11_REGISTER, DATA_BYTES47, 0, 8) +REG32(TB_DW12_REGISTER, 0x138) + FIELD(TB_DW12_REGISTER, DATA_BYTES48, 24, 8) + FIELD(TB_DW12_REGISTER, DATA_BYTES49, 16, 8) + FIELD(TB_DW12_REGISTER, DATA_BYTES50, 8, 8) + FIELD(TB_DW12_REGISTER, DATA_BYTES51, 0, 8) +REG32(TB_DW13_REGISTER, 0x13c) + FIELD(TB_DW13_REGISTER, DATA_BYTES52, 24, 8) + FIELD(TB_DW13_REGISTER, DATA_BYTES53, 16, 8) + FIELD(TB_DW13_REGISTER, DATA_BYTES54, 8, 8) + FIELD(TB_DW13_REGISTER, DATA_BYTES55, 0, 8) +REG32(TB_DW14_REGISTER, 0x140) + FIELD(TB_DW14_REGISTER, DATA_BYTES56, 24, 8) + FIELD(TB_DW14_REGISTER, DATA_BYTES57, 16, 8) + FIELD(TB_DW14_REGISTER, DATA_BYTES58, 8, 8) + FIELD(TB_DW14_REGISTER, DATA_BYTES59, 0, 8) +REG32(TB_DW15_REGISTER, 0x144) + FIELD(TB_DW15_REGISTER, DATA_BYTES60, 24, 8) + FIELD(TB_DW15_REGISTER, DATA_BYTES61, 16, 8) + FIELD(TB_DW15_REGISTER, DATA_BYTES62, 8, 8) + FIELD(TB_DW15_REGISTER, DATA_BYTES63, 0, 8) +REG32(AFMR_REGISTER, 0xa00) + FIELD(AFMR_REGISTER, AMID, 21, 11) + FIELD(AFMR_REGISTER, AMSRR, 20, 1) + FIELD(AFMR_REGISTER, AMIDE, 19, 1) + FIELD(AFMR_REGISTER, AMID_EXT, 1, 18) + FIELD(AFMR_REGISTER, AMRTR, 0, 1) +REG32(AFIR_REGISTER, 0xa04) + FIELD(AFIR_REGISTER, AIID, 21, 11) + FIELD(AFIR_REGISTER, AISRR, 20, 1) + FIELD(AFIR_REGISTER, AIIDE, 19, 1) + FIELD(AFIR_REGISTER, AIID_EXT, 1, 18) + FIELD(AFIR_REGISTER, AIRTR, 0, 1) +REG32(TXE_FIFO_TB_ID_REGISTER, 0x2000) + FIELD(TXE_FIFO_TB_ID_REGISTER, ID, 21, 11) + FIELD(TXE_FIFO_TB_ID_REGISTER, SRR_RTR_RRS, 20, 1) + FIELD(TXE_FIFO_TB_ID_REGISTER, IDE, 19, 1) + FIELD(TXE_FIFO_TB_ID_REGISTER, ID_EXT, 1, 18) + FIELD(TXE_FIFO_TB_ID_REGISTER, RTR_RRS, 0, 1) +REG32(TXE_FIFO_TB_DLC_REGISTER, 0x2004) + FIELD(TXE_FIFO_TB_DLC_REGISTER, DLC, 28, 4) + FIELD(TXE_FIFO_TB_DLC_REGISTER, FDF, 27, 1) + FIELD(TXE_FIFO_TB_DLC_REGISTER, BRS, 26, 1) + FIELD(TXE_FIFO_TB_DLC_REGISTER, ET, 24, 2) + FIELD(TXE_FIFO_TB_DLC_REGISTER, MM, 16, 8) + FIELD(TXE_FIFO_TB_DLC_REGISTER, TIMESTAMP, 0, 16) +REG32(RB_ID_REGISTER, 0x2100) + FIELD(RB_ID_REGISTER, ID, 21, 11) + FIELD(RB_ID_REGISTER, SRR_RTR_RRS, 20, 1) + FIELD(RB_ID_REGISTER, IDE, 19, 1) + FIELD(RB_ID_REGISTER, ID_EXT, 1, 18) + FIELD(RB_ID_REGISTER, RTR_RRS, 0, 1) +REG32(RB_DLC_REGISTER, 0x2104) + FIELD(RB_DLC_REGISTER, DLC, 28, 4) + FIELD(RB_DLC_REGISTER, FDF, 27, 1) + FIELD(RB_DLC_REGISTER, BRS, 26, 1) + FIELD(RB_DLC_REGISTER, ESI, 25, 1) + FIELD(RB_DLC_REGISTER, MATCHED_FILTER_INDEX, 16, 5) + FIELD(RB_DLC_REGISTER, TIMESTAMP, 0, 16) +REG32(RB_DW0_REGISTER, 0x2108) + FIELD(RB_DW0_REGISTER, DATA_BYTES0, 24, 8) + FIELD(RB_DW0_REGISTER, DATA_BYTES1, 16, 8) + FIELD(RB_DW0_REGISTER, DATA_BYTES2, 8, 8) + FIELD(RB_DW0_REGISTER, DATA_BYTES3, 0, 8) +REG32(RB_DW1_REGISTER, 0x210c) + FIELD(RB_DW1_REGISTER, DATA_BYTES4, 24, 8) + FIELD(RB_DW1_REGISTER, DATA_BYTES5, 16, 8) + FIELD(RB_DW1_REGISTER, DATA_BYTES6, 8, 8) + FIELD(RB_DW1_REGISTER, DATA_BYTES7, 0, 8) +REG32(RB_DW2_REGISTER, 0x2110) + FIELD(RB_DW2_REGISTER, DATA_BYTES8, 24, 8) + FIELD(RB_DW2_REGISTER, DATA_BYTES9, 16, 8) + FIELD(RB_DW2_REGISTER, DATA_BYTES10, 8, 8) + FIELD(RB_DW2_REGISTER, DATA_BYTES11, 0, 8) +REG32(RB_DW3_REGISTER, 0x2114) + FIELD(RB_DW3_REGISTER, DATA_BYTES12, 24, 8) + FIELD(RB_DW3_REGISTER, DATA_BYTES13, 16, 8) + FIELD(RB_DW3_REGISTER, DATA_BYTES14, 8, 8) + FIELD(RB_DW3_REGISTER, DATA_BYTES15, 0, 8) +REG32(RB_DW4_REGISTER, 0x2118) + FIELD(RB_DW4_REGISTER, DATA_BYTES16, 24, 8) + FIELD(RB_DW4_REGISTER, DATA_BYTES17, 16, 8) + FIELD(RB_DW4_REGISTER, DATA_BYTES18, 8, 8) + FIELD(RB_DW4_REGISTER, DATA_BYTES19, 0, 8) +REG32(RB_DW5_REGISTER, 0x211c) + FIELD(RB_DW5_REGISTER, DATA_BYTES20, 24, 8) + FIELD(RB_DW5_REGISTER, DATA_BYTES21, 16, 8) + FIELD(RB_DW5_REGISTER, DATA_BYTES22, 8, 8) + FIELD(RB_DW5_REGISTER, DATA_BYTES23, 0, 8) +REG32(RB_DW6_REGISTER, 0x2120) + FIELD(RB_DW6_REGISTER, DATA_BYTES24, 24, 8) + FIELD(RB_DW6_REGISTER, DATA_BYTES25, 16, 8) + FIELD(RB_DW6_REGISTER, DATA_BYTES26, 8, 8) + FIELD(RB_DW6_REGISTER, DATA_BYTES27, 0, 8) +REG32(RB_DW7_REGISTER, 0x2124) + FIELD(RB_DW7_REGISTER, DATA_BYTES28, 24, 8) + FIELD(RB_DW7_REGISTER, DATA_BYTES29, 16, 8) + FIELD(RB_DW7_REGISTER, DATA_BYTES30, 8, 8) + FIELD(RB_DW7_REGISTER, DATA_BYTES31, 0, 8) +REG32(RB_DW8_REGISTER, 0x2128) + FIELD(RB_DW8_REGISTER, DATA_BYTES32, 24, 8) + FIELD(RB_DW8_REGISTER, DATA_BYTES33, 16, 8) + FIELD(RB_DW8_REGISTER, DATA_BYTES34, 8, 8) + FIELD(RB_DW8_REGISTER, DATA_BYTES35, 0, 8) +REG32(RB_DW9_REGISTER, 0x212c) + FIELD(RB_DW9_REGISTER, DATA_BYTES36, 24, 8) + FIELD(RB_DW9_REGISTER, DATA_BYTES37, 16, 8) + FIELD(RB_DW9_REGISTER, DATA_BYTES38, 8, 8) + FIELD(RB_DW9_REGISTER, DATA_BYTES39, 0, 8) +REG32(RB_DW10_REGISTER, 0x2130) + FIELD(RB_DW10_REGISTER, DATA_BYTES40, 24, 8) + FIELD(RB_DW10_REGISTER, DATA_BYTES41, 16, 8) + FIELD(RB_DW10_REGISTER, DATA_BYTES42, 8, 8) + FIELD(RB_DW10_REGISTER, DATA_BYTES43, 0, 8) +REG32(RB_DW11_REGISTER, 0x2134) + FIELD(RB_DW11_REGISTER, DATA_BYTES44, 24, 8) + FIELD(RB_DW11_REGISTER, DATA_BYTES45, 16, 8) + FIELD(RB_DW11_REGISTER, DATA_BYTES46, 8, 8) + FIELD(RB_DW11_REGISTER, DATA_BYTES47, 0, 8) +REG32(RB_DW12_REGISTER, 0x2138) + FIELD(RB_DW12_REGISTER, DATA_BYTES48, 24, 8) + FIELD(RB_DW12_REGISTER, DATA_BYTES49, 16, 8) + FIELD(RB_DW12_REGISTER, DATA_BYTES50, 8, 8) + FIELD(RB_DW12_REGISTER, DATA_BYTES51, 0, 8) +REG32(RB_DW13_REGISTER, 0x213c) + FIELD(RB_DW13_REGISTER, DATA_BYTES52, 24, 8) + FIELD(RB_DW13_REGISTER, DATA_BYTES53, 16, 8) + FIELD(RB_DW13_REGISTER, DATA_BYTES54, 8, 8) + FIELD(RB_DW13_REGISTER, DATA_BYTES55, 0, 8) +REG32(RB_DW14_REGISTER, 0x2140) + FIELD(RB_DW14_REGISTER, DATA_BYTES56, 24, 8) + FIELD(RB_DW14_REGISTER, DATA_BYTES57, 16, 8) + FIELD(RB_DW14_REGISTER, DATA_BYTES58, 8, 8) + FIELD(RB_DW14_REGISTER, DATA_BYTES59, 0, 8) +REG32(RB_DW15_REGISTER, 0x2144) + FIELD(RB_DW15_REGISTER, DATA_BYTES60, 24, 8) + FIELD(RB_DW15_REGISTER, DATA_BYTES61, 16, 8) + FIELD(RB_DW15_REGISTER, DATA_BYTES62, 8, 8) + FIELD(RB_DW15_REGISTER, DATA_BYTES63, 0, 8) +REG32(RB_ID_REGISTER_1, 0x4100) + FIELD(RB_ID_REGISTER_1, ID, 21, 11) + FIELD(RB_ID_REGISTER_1, SRR_RTR_RRS, 20, 1) + FIELD(RB_ID_REGISTER_1, IDE, 19, 1) + FIELD(RB_ID_REGISTER_1, ID_EXT, 1, 18) + FIELD(RB_ID_REGISTER_1, RTR_RRS, 0, 1) +REG32(RB_DLC_REGISTER_1, 0x4104) + FIELD(RB_DLC_REGISTER_1, DLC, 28, 4) + FIELD(RB_DLC_REGISTER_1, FDF, 27, 1) + FIELD(RB_DLC_REGISTER_1, BRS, 26, 1) + FIELD(RB_DLC_REGISTER_1, ESI, 25, 1) + FIELD(RB_DLC_REGISTER_1, MATCHED_FILTER_INDEX, 16, 5) + FIELD(RB_DLC_REGISTER_1, TIMESTAMP, 0, 16) +REG32(RB0_DW0_REGISTER_1, 0x4108) + FIELD(RB0_DW0_REGISTER_1, DATA_BYTES0, 24, 8) + FIELD(RB0_DW0_REGISTER_1, DATA_BYTES1, 16, 8) + FIELD(RB0_DW0_REGISTER_1, DATA_BYTES2, 8, 8) + FIELD(RB0_DW0_REGISTER_1, DATA_BYTES3, 0, 8) +REG32(RB_DW1_REGISTER_1, 0x410c) + FIELD(RB_DW1_REGISTER_1, DATA_BYTES4, 24, 8) + FIELD(RB_DW1_REGISTER_1, DATA_BYTES5, 16, 8) + FIELD(RB_DW1_REGISTER_1, DATA_BYTES6, 8, 8) + FIELD(RB_DW1_REGISTER_1, DATA_BYTES7, 0, 8) +REG32(RB_DW2_REGISTER_1, 0x4110) + FIELD(RB_DW2_REGISTER_1, DATA_BYTES8, 24, 8) + FIELD(RB_DW2_REGISTER_1, DATA_BYTES9, 16, 8) + FIELD(RB_DW2_REGISTER_1, DATA_BYTES10, 8, 8) + FIELD(RB_DW2_REGISTER_1, DATA_BYTES11, 0, 8) +REG32(RB_DW3_REGISTER_1, 0x4114) + FIELD(RB_DW3_REGISTER_1, DATA_BYTES12, 24, 8) + FIELD(RB_DW3_REGISTER_1, DATA_BYTES13, 16, 8) + FIELD(RB_DW3_REGISTER_1, DATA_BYTES14, 8, 8) + FIELD(RB_DW3_REGISTER_1, DATA_BYTES15, 0, 8) +REG32(RB_DW4_REGISTER_1, 0x4118) + FIELD(RB_DW4_REGISTER_1, DATA_BYTES16, 24, 8) + FIELD(RB_DW4_REGISTER_1, DATA_BYTES17, 16, 8) + FIELD(RB_DW4_REGISTER_1, DATA_BYTES18, 8, 8) + FIELD(RB_DW4_REGISTER_1, DATA_BYTES19, 0, 8) +REG32(RB_DW5_REGISTER_1, 0x411c) + FIELD(RB_DW5_REGISTER_1, DATA_BYTES20, 24, 8) + FIELD(RB_DW5_REGISTER_1, DATA_BYTES21, 16, 8) + FIELD(RB_DW5_REGISTER_1, DATA_BYTES22, 8, 8) + FIELD(RB_DW5_REGISTER_1, DATA_BYTES23, 0, 8) +REG32(RB_DW6_REGISTER_1, 0x4120) + FIELD(RB_DW6_REGISTER_1, DATA_BYTES24, 24, 8) + FIELD(RB_DW6_REGISTER_1, DATA_BYTES25, 16, 8) + FIELD(RB_DW6_REGISTER_1, DATA_BYTES26, 8, 8) + FIELD(RB_DW6_REGISTER_1, DATA_BYTES27, 0, 8) +REG32(RB_DW7_REGISTER_1, 0x4124) + FIELD(RB_DW7_REGISTER_1, DATA_BYTES28, 24, 8) + FIELD(RB_DW7_REGISTER_1, DATA_BYTES29, 16, 8) + FIELD(RB_DW7_REGISTER_1, DATA_BYTES30, 8, 8) + FIELD(RB_DW7_REGISTER_1, DATA_BYTES31, 0, 8) +REG32(RB_DW8_REGISTER_1, 0x4128) + FIELD(RB_DW8_REGISTER_1, DATA_BYTES32, 24, 8) + FIELD(RB_DW8_REGISTER_1, DATA_BYTES33, 16, 8) + FIELD(RB_DW8_REGISTER_1, DATA_BYTES34, 8, 8) + FIELD(RB_DW8_REGISTER_1, DATA_BYTES35, 0, 8) +REG32(RB_DW9_REGISTER_1, 0x412c) + FIELD(RB_DW9_REGISTER_1, DATA_BYTES36, 24, 8) + FIELD(RB_DW9_REGISTER_1, DATA_BYTES37, 16, 8) + FIELD(RB_DW9_REGISTER_1, DATA_BYTES38, 8, 8) + FIELD(RB_DW9_REGISTER_1, DATA_BYTES39, 0, 8) +REG32(RB_DW10_REGISTER_1, 0x4130) + FIELD(RB_DW10_REGISTER_1, DATA_BYTES40, 24, 8) + FIELD(RB_DW10_REGISTER_1, DATA_BYTES41, 16, 8) + FIELD(RB_DW10_REGISTER_1, DATA_BYTES42, 8, 8) + FIELD(RB_DW10_REGISTER_1, DATA_BYTES43, 0, 8) +REG32(RB_DW11_REGISTER_1, 0x4134) + FIELD(RB_DW11_REGISTER_1, DATA_BYTES44, 24, 8) + FIELD(RB_DW11_REGISTER_1, DATA_BYTES45, 16, 8) + FIELD(RB_DW11_REGISTER_1, DATA_BYTES46, 8, 8) + FIELD(RB_DW11_REGISTER_1, DATA_BYTES47, 0, 8) +REG32(RB_DW12_REGISTER_1, 0x4138) + FIELD(RB_DW12_REGISTER_1, DATA_BYTES48, 24, 8) + FIELD(RB_DW12_REGISTER_1, DATA_BYTES49, 16, 8) + FIELD(RB_DW12_REGISTER_1, DATA_BYTES50, 8, 8) + FIELD(RB_DW12_REGISTER_1, DATA_BYTES51, 0, 8) +REG32(RB_DW13_REGISTER_1, 0x413c) + FIELD(RB_DW13_REGISTER_1, DATA_BYTES52, 24, 8) + FIELD(RB_DW13_REGISTER_1, DATA_BYTES53, 16, 8) + FIELD(RB_DW13_REGISTER_1, DATA_BYTES54, 8, 8) + FIELD(RB_DW13_REGISTER_1, DATA_BYTES55, 0, 8) +REG32(RB_DW14_REGISTER_1, 0x4140) + FIELD(RB_DW14_REGISTER_1, DATA_BYTES56, 24, 8) + FIELD(RB_DW14_REGISTER_1, DATA_BYTES57, 16, 8) + FIELD(RB_DW14_REGISTER_1, DATA_BYTES58, 8, 8) + FIELD(RB_DW14_REGISTER_1, DATA_BYTES59, 0, 8) +REG32(RB_DW15_REGISTER_1, 0x4144) + FIELD(RB_DW15_REGISTER_1, DATA_BYTES60, 24, 8) + FIELD(RB_DW15_REGISTER_1, DATA_BYTES61, 16, 8) + FIELD(RB_DW15_REGISTER_1, DATA_BYTES62, 8, 8) + FIELD(RB_DW15_REGISTER_1, DATA_BYTES63, 0, 8) + +static uint8_t canfd_dlc_array[8] = {8, 12, 16, 20, 24, 32, 48, 64}; + +static void canfd_update_irq(XlnxVersalCANFDState *s) +{ + unsigned int irq = s->regs[R_INTERRUPT_STATUS_REGISTER] & + s->regs[R_INTERRUPT_ENABLE_REGISTER]; + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + /* RX watermark interrupts. */ + if (ARRAY_FIELD_EX32(s->regs, RX_FIFO_STATUS_REGISTER, FL) > + ARRAY_FIELD_EX32(s->regs, RX_FIFO_WATERMARK_REGISTER, RXFWM)) { + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXFWMFLL, 1); + } + + if (ARRAY_FIELD_EX32(s->regs, RX_FIFO_STATUS_REGISTER, FL_1) > + ARRAY_FIELD_EX32(s->regs, RX_FIFO_WATERMARK_REGISTER, RXFWM_1)) { + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXFWMFLL_1, 1); + } + + /* TX watermark interrupt. */ + if (ARRAY_FIELD_EX32(s->regs, TX_EVENT_FIFO_STATUS_REGISTER, TXE_FL) > + ARRAY_FIELD_EX32(s->regs, TX_EVENT_FIFO_WATERMARK_REGISTER, TXE_FWM)) { + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXEWMFLL, 1); + } + + trace_xlnx_canfd_update_irq(path, s->regs[R_INTERRUPT_STATUS_REGISTER], + s->regs[R_INTERRUPT_ENABLE_REGISTER], irq); + + qemu_set_irq(s->irq_canfd_int, irq); +} + +static void canfd_ier_post_write(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); + + canfd_update_irq(s); +} + +static uint64_t canfd_icr_pre_write(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); + uint32_t val = val64; + + s->regs[R_INTERRUPT_STATUS_REGISTER] &= ~val; + + /* + * RXBOFLW_BI field is automatically cleared to default if RXBOFLW bit is + * cleared in ISR. + */ + if (ARRAY_FIELD_EX32(s->regs, INTERRUPT_STATUS_REGISTER, RXFWMFLL_1)) { + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXBOFLW_BI, 0); + } + + canfd_update_irq(s); + + return 0; +} + +static void canfd_config_reset(XlnxVersalCANFDState *s) +{ + + unsigned int i; + + /* Reset all the configuration registers. */ + for (i = 0; i < R_RX_FIFO_WATERMARK_REGISTER; ++i) { + register_reset(&s->reg_info[i]); + } + + canfd_update_irq(s); +} + +static void canfd_config_mode(XlnxVersalCANFDState *s) +{ + register_reset(&s->reg_info[R_ERROR_COUNTER_REGISTER]); + register_reset(&s->reg_info[R_ERROR_STATUS_REGISTER]); + register_reset(&s->reg_info[R_STATUS_REGISTER]); + + /* Put XlnxVersalCANFDState in configuration mode. */ + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, CONFIG, 1); + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, WKUP, 0); + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, SLP, 0); + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, BSOFF, 0); + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, ERROR_BIT, 0); + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXFOFLW, 0); + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXFOFLW_1, 0); + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXOK, 0); + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXOK, 0); + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, ARBLST, 0); + + /* Clear the time stamp. */ + ptimer_transaction_begin(s->canfd_timer); + ptimer_set_count(s->canfd_timer, 0); + ptimer_transaction_commit(s->canfd_timer); + + canfd_update_irq(s); +} + +static void update_status_register_mode_bits(XlnxVersalCANFDState *s) +{ + bool sleep_status = ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, SLEEP); + bool sleep_mode = ARRAY_FIELD_EX32(s->regs, MODE_SELECT_REGISTER, SLEEP); + /* Wake up interrupt bit. */ + bool wakeup_irq_val = !sleep_mode && sleep_status; + /* Sleep interrupt bit. */ + bool sleep_irq_val = sleep_mode && !sleep_status; + + /* Clear previous core mode status bits. */ + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, LBACK, 0); + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, SLEEP, 0); + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, SNOOP, 0); + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, NORMAL, 0); + + /* set current mode bit and generate irqs accordingly. */ + if (ARRAY_FIELD_EX32(s->regs, MODE_SELECT_REGISTER, LBACK)) { + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, LBACK, 1); + } else if (ARRAY_FIELD_EX32(s->regs, MODE_SELECT_REGISTER, SLEEP)) { + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, SLEEP, 1); + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, SLP, + sleep_irq_val); + } else if (ARRAY_FIELD_EX32(s->regs, MODE_SELECT_REGISTER, SNOOP)) { + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, SNOOP, 1); + } else { + /* If all bits are zero, XlnxVersalCANFDState is set in normal mode. */ + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, NORMAL, 1); + /* Set wakeup interrupt bit. */ + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, WKUP, + wakeup_irq_val); + } + + /* Put the CANFD in error active state. */ + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, ESTAT, 1); + + canfd_update_irq(s); +} + +static uint64_t canfd_msr_pre_write(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); + uint32_t val = val64; + uint8_t multi_mode = 0; + + /* + * Multiple mode set check. This is done to make sure user doesn't set + * multiple modes. + */ + multi_mode = FIELD_EX32(val, MODE_SELECT_REGISTER, LBACK) + + FIELD_EX32(val, MODE_SELECT_REGISTER, SLEEP) + + FIELD_EX32(val, MODE_SELECT_REGISTER, SNOOP); + + if (multi_mode > 1) { + qemu_log_mask(LOG_GUEST_ERROR, "Attempting to configure several modes" + " simultaneously. One mode will be selected according to" + " their priority: LBACK > SLEEP > SNOOP.\n"); + } + + if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN) == 0) { + /* In configuration mode, any mode can be selected. */ + s->regs[R_MODE_SELECT_REGISTER] = val; + } else { + bool sleep_mode_bit = FIELD_EX32(val, MODE_SELECT_REGISTER, SLEEP); + + ARRAY_FIELD_DP32(s->regs, MODE_SELECT_REGISTER, SLEEP, sleep_mode_bit); + + if (FIELD_EX32(val, MODE_SELECT_REGISTER, LBACK)) { + qemu_log_mask(LOG_GUEST_ERROR, "Attempting to set LBACK mode" + " without setting CEN bit as 0\n"); + } else if (FIELD_EX32(val, MODE_SELECT_REGISTER, SNOOP)) { + qemu_log_mask(LOG_GUEST_ERROR, "Attempting to set SNOOP mode" + " without setting CEN bit as 0\n"); + } + + update_status_register_mode_bits(s); + } + + return s->regs[R_MODE_SELECT_REGISTER]; +} + +static void canfd_exit_sleep_mode(XlnxVersalCANFDState *s) +{ + ARRAY_FIELD_DP32(s->regs, MODE_SELECT_REGISTER, SLEEP, 0); + update_status_register_mode_bits(s); +} + +static void regs2frame(XlnxVersalCANFDState *s, qemu_can_frame *frame, + uint32_t reg_num) +{ + uint32_t i = 0; + uint32_t j = 0; + uint32_t val = 0; + uint32_t dlc_reg_val = 0; + uint32_t dlc_value = 0; + + /* Check that reg_num should be within TX register space. */ + assert(reg_num <= R_TB_ID_REGISTER + (NUM_REGS_PER_MSG_SPACE * + s->cfg.tx_fifo)); + + dlc_reg_val = s->regs[reg_num + 1]; + dlc_value = FIELD_EX32(dlc_reg_val, TB0_DLC_REGISTER, DLC); + + frame->can_id = s->regs[reg_num]; + + if (FIELD_EX32(dlc_reg_val, TB0_DLC_REGISTER, FDF)) { + /* + * CANFD frame. + * Converting dlc(0 to 15) 4 Byte data to plain length(i.e. 0 to 64) + * 1 Byte data. This is done to make it work with SocketCAN. + * On actual CANFD frame, this value can't be more than 0xF. + * Conversion table for DLC to plain length: + * + * DLC Plain Length + * 0 - 8 0 - 8 + * 9 9 - 12 + * 10 13 - 16 + * 11 17 - 20 + * 12 21 - 24 + * 13 25 - 32 + * 14 33 - 48 + * 15 49 - 64 + */ + + frame->flags = QEMU_CAN_FRMF_TYPE_FD; + + if (dlc_value < 8) { + frame->can_dlc = dlc_value; + } else { + assert((dlc_value - 8) < ARRAY_SIZE(canfd_dlc_array)); + frame->can_dlc = canfd_dlc_array[dlc_value - 8]; + } + } else { + /* + * FD Format bit not set that means it is a CAN Frame. + * Conversion table for classic CAN: + * + * DLC Plain Length + * 0 - 7 0 - 7 + * 8 - 15 8 + */ + + if (dlc_value > 8) { + frame->can_dlc = 8; + qemu_log_mask(LOG_GUEST_ERROR, "Maximum DLC value for Classic CAN" + " frame is 8. Only 8 byte data will be sent.\n"); + } else { + frame->can_dlc = dlc_value; + } + } + + for (j = 0; j < frame->can_dlc; j++) { + val = 8 * i; + + frame->data[j] = extract32(s->regs[reg_num + 2 + (j / 4)], val, 8); + i++; + + if (i % 4 == 0) { + i = 0; + } + } +} + +static void process_cancellation_requests(XlnxVersalCANFDState *s) +{ + uint32_t clear_mask = s->regs[R_TX_BUFFER_READY_REQUEST_REGISTER] & + s->regs[R_TX_BUFFER_CANCEL_REQUEST_REGISTER]; + + s->regs[R_TX_BUFFER_READY_REQUEST_REGISTER] &= ~clear_mask; + s->regs[R_TX_BUFFER_CANCEL_REQUEST_REGISTER] &= ~clear_mask; + + canfd_update_irq(s); +} + +static void store_rx_sequential(XlnxVersalCANFDState *s, + const qemu_can_frame *frame, + uint32_t fill_level, uint32_t read_index, + uint32_t store_location, uint8_t rx_fifo, + bool rx_fifo_id, uint8_t filter_index) +{ + int i; + bool is_canfd_frame; + uint8_t dlc = frame->can_dlc; + uint8_t rx_reg_num = 0; + uint32_t dlc_reg_val = 0; + uint32_t data_reg_val = 0; + + /* Getting RX0/1 fill level */ + if ((fill_level) > rx_fifo - 1) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, "%s: RX%d Buffer is full. Discarding the" + " message\n", path, rx_fifo_id); + + /* Set the corresponding RF buffer overflow interrupt. */ + if (rx_fifo_id == 0) { + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXFOFLW, 1); + } else { + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXFOFLW_1, 1); + } + } else { + uint16_t rx_timestamp = CANFD_TIMER_MAX - + ptimer_get_count(s->canfd_timer); + + if (rx_timestamp == 0xFFFF) { + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TSCNT_OFLW, 1); + } else { + ARRAY_FIELD_DP32(s->regs, TIMESTAMP_REGISTER, TIMESTAMP_CNT, + rx_timestamp); + } + + if (rx_fifo_id == 0) { + ARRAY_FIELD_DP32(s->regs, RX_FIFO_STATUS_REGISTER, FL, + fill_level + 1); + assert(store_location <= + R_RB_ID_REGISTER + (s->cfg.rx0_fifo * + NUM_REGS_PER_MSG_SPACE)); + } else { + ARRAY_FIELD_DP32(s->regs, RX_FIFO_STATUS_REGISTER, FL_1, + fill_level + 1); + assert(store_location <= + R_RB_ID_REGISTER_1 + (s->cfg.rx1_fifo * + NUM_REGS_PER_MSG_SPACE)); + } + + s->regs[store_location] = frame->can_id; + + dlc = frame->can_dlc; + + if (frame->flags == QEMU_CAN_FRMF_TYPE_FD) { + is_canfd_frame = true; + + /* Store dlc value in Xilinx specific format. */ + for (i = 0; i < ARRAY_SIZE(canfd_dlc_array); i++) { + if (canfd_dlc_array[i] == frame->can_dlc) { + dlc_reg_val = FIELD_DP32(0, RB_DLC_REGISTER, DLC, 8 + i); + } + } + } else { + is_canfd_frame = false; + + if (frame->can_dlc > 8) { + dlc = 8; + } + + dlc_reg_val = FIELD_DP32(0, RB_DLC_REGISTER, DLC, dlc); + } + + dlc_reg_val |= FIELD_DP32(0, RB_DLC_REGISTER, FDF, is_canfd_frame); + dlc_reg_val |= FIELD_DP32(0, RB_DLC_REGISTER, TIMESTAMP, rx_timestamp); + dlc_reg_val |= FIELD_DP32(0, RB_DLC_REGISTER, MATCHED_FILTER_INDEX, + filter_index); + s->regs[store_location + 1] = dlc_reg_val; + + for (i = 0; i < dlc; i++) { + /* Register size is 4 byte but frame->data each is 1 byte. */ + switch (i % 4) { + case 0: + rx_reg_num = i / 4; + + data_reg_val = FIELD_DP32(0, RB_DW0_REGISTER, DATA_BYTES3, + frame->data[i]); + break; + case 1: + data_reg_val |= FIELD_DP32(0, RB_DW0_REGISTER, DATA_BYTES2, + frame->data[i]); + break; + case 2: + data_reg_val |= FIELD_DP32(0, RB_DW0_REGISTER, DATA_BYTES1, + frame->data[i]); + break; + case 3: + data_reg_val |= FIELD_DP32(0, RB_DW0_REGISTER, DATA_BYTES0, + frame->data[i]); + /* + * Last Bytes data which means we have all 4 bytes ready to + * store in one rx regs. + */ + s->regs[store_location + rx_reg_num + 2] = data_reg_val; + break; + } + } + + if (i % 4) { + /* + * In case DLC is not multiplier of 4, data is not saved to RX FIFO + * in above switch case. Store the remaining bytes here. + */ + s->regs[store_location + rx_reg_num + 2] = data_reg_val; + } + + /* set the interrupt as RXOK. */ + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXOK, 1); + } +} + +static void update_rx_sequential(XlnxVersalCANFDState *s, + const qemu_can_frame *frame) +{ + bool filter_pass = false; + uint8_t filter_index = 0; + int i; + int filter_partition = ARRAY_FIELD_EX32(s->regs, + RX_FIFO_WATERMARK_REGISTER, RXFP); + uint32_t store_location; + uint32_t fill_level; + uint32_t read_index; + uint8_t store_index = 0; + g_autofree char *path = NULL; + /* + * If all UAF bits are set to 0, then received messages are not stored + * in the RX buffers. + */ + if (s->regs[R_ACCEPTANCE_FILTER_CONTROL_REGISTER]) { + uint32_t acceptance_filter_status = + s->regs[R_ACCEPTANCE_FILTER_CONTROL_REGISTER]; + + for (i = 0; i < 32; i++) { + if (acceptance_filter_status & 0x1) { + uint32_t msg_id_masked = s->regs[R_AFMR_REGISTER + 2 * i] & + frame->can_id; + uint32_t afir_id_masked = s->regs[R_AFIR_REGISTER + 2 * i] & + s->regs[R_AFMR_REGISTER + 2 * i]; + uint16_t std_msg_id_masked = FIELD_EX32(msg_id_masked, + AFIR_REGISTER, AIID); + uint16_t std_afir_id_masked = FIELD_EX32(afir_id_masked, + AFIR_REGISTER, AIID); + uint32_t ext_msg_id_masked = FIELD_EX32(msg_id_masked, + AFIR_REGISTER, + AIID_EXT); + uint32_t ext_afir_id_masked = FIELD_EX32(afir_id_masked, + AFIR_REGISTER, + AIID_EXT); + bool ext_ide = FIELD_EX32(s->regs[R_AFMR_REGISTER + 2 * i], + AFMR_REGISTER, AMIDE); + + if (std_msg_id_masked == std_afir_id_masked) { + if (ext_ide) { + /* Extended message ID message. */ + if (ext_msg_id_masked == ext_afir_id_masked) { + filter_pass = true; + filter_index = i; + + break; + } + } else { + /* Standard message ID. */ + filter_pass = true; + filter_index = i; + + break; + } + } + } + acceptance_filter_status >>= 1; + } + } + + if (!filter_pass) { + path = object_get_canonical_path(OBJECT(s)); + + trace_xlnx_canfd_rx_fifo_filter_reject(path, frame->can_id, + frame->can_dlc); + } else { + if (filter_index <= filter_partition) { + fill_level = ARRAY_FIELD_EX32(s->regs, RX_FIFO_STATUS_REGISTER, FL); + read_index = ARRAY_FIELD_EX32(s->regs, RX_FIFO_STATUS_REGISTER, RI); + store_index = read_index + fill_level; + + if (read_index == s->cfg.rx0_fifo - 1) { + /* + * When ri is s->cfg.rx0_fifo - 1 i.e. max, it goes cyclic that + * means we reset the ri to 0x0. + */ + read_index = 0; + ARRAY_FIELD_DP32(s->regs, RX_FIFO_STATUS_REGISTER, RI, + read_index); + } + + if (store_index > s->cfg.rx0_fifo - 1) { + store_index -= s->cfg.rx0_fifo - 1; + } + + store_location = R_RB_ID_REGISTER + + (store_index * NUM_REGS_PER_MSG_SPACE); + + store_rx_sequential(s, frame, fill_level, read_index, + store_location, s->cfg.rx0_fifo, 0, + filter_index); + } else { + /* RX 1 fill level message */ + fill_level = ARRAY_FIELD_EX32(s->regs, RX_FIFO_STATUS_REGISTER, + FL_1); + read_index = ARRAY_FIELD_EX32(s->regs, RX_FIFO_STATUS_REGISTER, + RI_1); + store_index = read_index + fill_level; + + if (read_index == s->cfg.rx1_fifo - 1) { + /* + * When ri is s->cfg.rx1_fifo - 1 i.e. max, it goes cyclic that + * means we reset the ri to 0x0. + */ + read_index = 0; + ARRAY_FIELD_DP32(s->regs, RX_FIFO_STATUS_REGISTER, RI_1, + read_index); + } + + if (store_index > s->cfg.rx1_fifo - 1) { + store_index -= s->cfg.rx1_fifo - 1; + } + + store_location = R_RB_ID_REGISTER_1 + + (store_index * NUM_REGS_PER_MSG_SPACE); + + store_rx_sequential(s, frame, fill_level, read_index, + store_location, s->cfg.rx1_fifo, 1, + filter_index); + } + + path = object_get_canonical_path(OBJECT(s)); + + trace_xlnx_canfd_rx_data(path, frame->can_id, frame->can_dlc, + frame->flags); + canfd_update_irq(s); + } +} + +static bool tx_ready_check(XlnxVersalCANFDState *s) +{ + if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, SRST)) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, "%s: Attempting to transfer data while" + " XlnxVersalCANFDState is in reset mode\n", path); + + return false; + } + + if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN) == 0) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, "%s: Attempting to transfer data while" + " XlnxVersalCANFDState is in configuration mode." + " Reset the core so operations can start fresh\n", + path); + return false; + } + + if (ARRAY_FIELD_EX32(s->regs, MODE_SELECT_REGISTER, SNOOP)) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, "%s: Attempting to transfer data while" + " XlnxVersalCANFDState is in SNOOP MODE\n", + path); + return false; + } + + return true; +} + +static void tx_fifo_stamp(XlnxVersalCANFDState *s, uint32_t tb0_regid) +{ + /* + * If EFC bit in DLC message is set, this means we will store the + * event of this transmitted message with time stamp. + */ + uint32_t dlc_reg_val = 0; + + if (FIELD_EX32(s->regs[tb0_regid + 1], TB0_DLC_REGISTER, EFC)) { + uint8_t dlc_val = FIELD_EX32(s->regs[tb0_regid + 1], TB0_DLC_REGISTER, + DLC); + bool fdf_val = FIELD_EX32(s->regs[tb0_regid + 1], TB0_DLC_REGISTER, + FDF); + bool brs_val = FIELD_EX32(s->regs[tb0_regid + 1], TB0_DLC_REGISTER, + BRS); + uint8_t mm_val = FIELD_EX32(s->regs[tb0_regid + 1], TB0_DLC_REGISTER, + MM); + uint8_t fill_level = ARRAY_FIELD_EX32(s->regs, + TX_EVENT_FIFO_STATUS_REGISTER, + TXE_FL); + uint8_t read_index = ARRAY_FIELD_EX32(s->regs, + TX_EVENT_FIFO_STATUS_REGISTER, + TXE_RI); + uint8_t store_index = fill_level + read_index; + + if ((fill_level) > s->cfg.tx_fifo - 1) { + qemu_log_mask(LOG_GUEST_ERROR, "TX Event Buffer is full." + " Discarding the message\n"); + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXEOFLW, 1); + } else { + if (read_index == s->cfg.tx_fifo - 1) { + /* + * When ri is s->cfg.tx_fifo - 1 i.e. max, it goes cyclic that + * means we reset the ri to 0x0. + */ + read_index = 0; + ARRAY_FIELD_DP32(s->regs, TX_EVENT_FIFO_STATUS_REGISTER, TXE_RI, + read_index); + } + + if (store_index > s->cfg.tx_fifo - 1) { + store_index -= s->cfg.tx_fifo - 1; + } + + assert(store_index < s->cfg.tx_fifo); + + uint32_t tx_event_reg0_id = R_TXE_FIFO_TB_ID_REGISTER + + (store_index * 2); + + /* Store message ID in TX event register. */ + s->regs[tx_event_reg0_id] = s->regs[tb0_regid]; + + uint16_t tx_timestamp = CANFD_TIMER_MAX - + ptimer_get_count(s->canfd_timer); + + /* Store DLC with time stamp in DLC regs. */ + dlc_reg_val = FIELD_DP32(0, TXE_FIFO_TB_DLC_REGISTER, DLC, dlc_val); + dlc_reg_val |= FIELD_DP32(0, TXE_FIFO_TB_DLC_REGISTER, FDF, + fdf_val); + dlc_reg_val |= FIELD_DP32(0, TXE_FIFO_TB_DLC_REGISTER, BRS, + brs_val); + dlc_reg_val |= FIELD_DP32(0, TXE_FIFO_TB_DLC_REGISTER, ET, 0x3); + dlc_reg_val |= FIELD_DP32(0, TXE_FIFO_TB_DLC_REGISTER, MM, mm_val); + dlc_reg_val |= FIELD_DP32(0, TXE_FIFO_TB_DLC_REGISTER, TIMESTAMP, + tx_timestamp); + s->regs[tx_event_reg0_id + 1] = dlc_reg_val; + + ARRAY_FIELD_DP32(s->regs, TX_EVENT_FIFO_STATUS_REGISTER, TXE_FL, + fill_level + 1); + } + } +} + +static gint g_cmp_ids(gconstpointer data1, gconstpointer data2) +{ + tx_ready_reg_info *tx_reg_1 = (tx_ready_reg_info *) data1; + tx_ready_reg_info *tx_reg_2 = (tx_ready_reg_info *) data2; + + return tx_reg_1->can_id - tx_reg_2->can_id; +} + +static void free_list(GSList *list) +{ + GSList *iterator = NULL; + + for (iterator = list; iterator != NULL; iterator = iterator->next) { + g_free((tx_ready_reg_info *)iterator->data); + } + + g_slist_free(list); + + return; +} + +static GSList *prepare_tx_data(XlnxVersalCANFDState *s) +{ + uint8_t i = 0; + GSList *list = NULL; + uint32_t reg_num = 0; + uint32_t reg_ready = s->regs[R_TX_BUFFER_READY_REQUEST_REGISTER]; + + /* First find the messages which are ready for transmission. */ + for (i = 0; i < s->cfg.tx_fifo; i++) { + if (reg_ready & 1) { + reg_num = R_TB_ID_REGISTER + (NUM_REGS_PER_MSG_SPACE * i); + tx_ready_reg_info *temp = g_new(tx_ready_reg_info, 1); + + temp->can_id = s->regs[reg_num]; + temp->reg_num = reg_num; + list = g_slist_prepend(list, temp); + list = g_slist_sort(list, g_cmp_ids); + } + + reg_ready >>= 1; + } + + s->regs[R_TX_BUFFER_READY_REQUEST_REGISTER] = 0; + s->regs[R_TX_BUFFER_CANCEL_REQUEST_REGISTER] = 0; + + return list; +} + +static void transfer_data(XlnxVersalCANFDState *s) +{ + bool canfd_tx = tx_ready_check(s); + GSList *list, *iterator = NULL; + qemu_can_frame frame; + + if (!canfd_tx) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, "%s: Controller not enabled for data" + " transfer\n", path); + return; + } + + list = prepare_tx_data(s); + if (list == NULL) { + return; + } + + for (iterator = list; iterator != NULL; iterator = iterator->next) { + regs2frame(s, &frame, + ((tx_ready_reg_info *)iterator->data)->reg_num); + + if (ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, LBACK)) { + update_rx_sequential(s, &frame); + tx_fifo_stamp(s, ((tx_ready_reg_info *)iterator->data)->reg_num); + + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXOK, 1); + } else { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + trace_xlnx_canfd_tx_data(path, frame.can_id, frame.can_dlc, + frame.flags); + can_bus_client_send(&s->bus_client, &frame, 1); + tx_fifo_stamp(s, + ((tx_ready_reg_info *)iterator->data)->reg_num); + + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXRRS, 1); + + if (ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, SLEEP)) { + canfd_exit_sleep_mode(s); + } + } + } + + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXOK, 1); + free_list(list); + + canfd_update_irq(s); +} + +static uint64_t canfd_srr_pre_write(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); + uint32_t val = val64; + + ARRAY_FIELD_DP32(s->regs, SOFTWARE_RESET_REGISTER, CEN, + FIELD_EX32(val, SOFTWARE_RESET_REGISTER, CEN)); + + if (FIELD_EX32(val, SOFTWARE_RESET_REGISTER, SRST)) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + trace_xlnx_canfd_reset(path, val64); + + /* First, core will do software reset then will enter in config mode. */ + canfd_config_reset(s); + } else if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN) == 0) { + canfd_config_mode(s); + } else { + /* + * Leave config mode. Now XlnxVersalCANFD core will enter Normal, Sleep, + * snoop or Loopback mode depending upon LBACK, SLEEP, SNOOP register + * states. + */ + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, CONFIG, 0); + + ptimer_transaction_begin(s->canfd_timer); + ptimer_set_count(s->canfd_timer, 0); + ptimer_transaction_commit(s->canfd_timer); + update_status_register_mode_bits(s); + transfer_data(s); + } + + return s->regs[R_SOFTWARE_RESET_REGISTER]; +} + +static uint64_t filter_mask(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); + uint32_t reg_idx = (reg->access->addr) / 4; + uint32_t val = val64; + uint32_t filter_offset = (reg_idx - R_AFMR_REGISTER) / 2; + + if (!(s->regs[R_ACCEPTANCE_FILTER_CONTROL_REGISTER] & + (1 << filter_offset))) { + s->regs[reg_idx] = val; + } else { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, "%s: Acceptance filter %d not enabled\n", + path, filter_offset + 1); + } + + return s->regs[reg_idx]; +} + +static uint64_t filter_id(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); + hwaddr reg_idx = (reg->access->addr) / 4; + uint32_t val = val64; + uint32_t filter_offset = (reg_idx - R_AFIR_REGISTER) / 2; + + if (!(s->regs[R_ACCEPTANCE_FILTER_CONTROL_REGISTER] & + (1 << filter_offset))) { + s->regs[reg_idx] = val; + } else { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, "%s: Acceptance filter %d not enabled\n", + path, filter_offset + 1); + } + + return s->regs[reg_idx]; +} + +static uint64_t canfd_tx_fifo_status_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); + uint32_t val = val64; + uint8_t read_ind = 0; + uint8_t fill_ind = ARRAY_FIELD_EX32(s->regs, TX_EVENT_FIFO_STATUS_REGISTER, + TXE_FL); + + if (FIELD_EX32(val, TX_EVENT_FIFO_STATUS_REGISTER, TXE_IRI) && fill_ind) { + read_ind = ARRAY_FIELD_EX32(s->regs, TX_EVENT_FIFO_STATUS_REGISTER, + TXE_RI) + 1; + + if (read_ind > s->cfg.tx_fifo - 1) { + read_ind = 0; + } + + /* + * Increase the read index by 1 and decrease the fill level by 1. + */ + ARRAY_FIELD_DP32(s->regs, TX_EVENT_FIFO_STATUS_REGISTER, TXE_RI, + read_ind); + ARRAY_FIELD_DP32(s->regs, TX_EVENT_FIFO_STATUS_REGISTER, TXE_FL, + fill_ind - 1); + } + + return s->regs[R_TX_EVENT_FIFO_STATUS_REGISTER]; +} + +static uint64_t canfd_rx_fifo_status_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); + uint32_t val = val64; + uint8_t read_ind = 0; + uint8_t fill_ind = 0; + + if (FIELD_EX32(val, RX_FIFO_STATUS_REGISTER, IRI)) { + /* FL index is zero, setting IRI bit has no effect. */ + if (FIELD_EX32(val, RX_FIFO_STATUS_REGISTER, FL) != 0) { + read_ind = FIELD_EX32(val, RX_FIFO_STATUS_REGISTER, RI) + 1; + + if (read_ind > s->cfg.rx0_fifo - 1) { + read_ind = 0; + } + + fill_ind = FIELD_EX32(val, RX_FIFO_STATUS_REGISTER, FL) - 1; + + ARRAY_FIELD_DP32(s->regs, RX_FIFO_STATUS_REGISTER, RI, read_ind); + ARRAY_FIELD_DP32(s->regs, RX_FIFO_STATUS_REGISTER, FL, fill_ind); + } + } + + if (FIELD_EX32(val, RX_FIFO_STATUS_REGISTER, IRI_1)) { + /* FL_1 index is zero, setting IRI_1 bit has no effect. */ + if (FIELD_EX32(val, RX_FIFO_STATUS_REGISTER, FL_1) != 0) { + read_ind = FIELD_EX32(val, RX_FIFO_STATUS_REGISTER, RI_1) + 1; + + if (read_ind > s->cfg.rx1_fifo - 1) { + read_ind = 0; + } + + fill_ind = FIELD_EX32(val, RX_FIFO_STATUS_REGISTER, FL_1) - 1; + + ARRAY_FIELD_DP32(s->regs, RX_FIFO_STATUS_REGISTER, RI_1, read_ind); + ARRAY_FIELD_DP32(s->regs, RX_FIFO_STATUS_REGISTER, FL_1, fill_ind); + } + } + + return s->regs[R_RX_FIFO_STATUS_REGISTER]; +} + +static uint64_t canfd_tsr_pre_write(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); + uint32_t val = val64; + + if (FIELD_EX32(val, TIMESTAMP_REGISTER, CTS)) { + ARRAY_FIELD_DP32(s->regs, TIMESTAMP_REGISTER, TIMESTAMP_CNT, 0); + ptimer_transaction_begin(s->canfd_timer); + ptimer_set_count(s->canfd_timer, 0); + ptimer_transaction_commit(s->canfd_timer); + } + + return 0; +} + +static uint64_t canfd_trr_reg_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); + + if (ARRAY_FIELD_EX32(s->regs, MODE_SELECT_REGISTER, SNOOP)) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, "%s: Controller is in SNOOP mode." + " tx_ready_register will stay in reset mode\n", path); + return 0; + } else { + return val64; + } +} + +static void canfd_trr_reg_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); + + transfer_data(s); +} + +static void canfd_cancel_reg_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); + + process_cancellation_requests(s); +} + +static uint64_t canfd_write_check_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); + uint32_t val = val64; + + if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN) == 0) { + return val; + } + return 0; +} + +static const RegisterAccessInfo canfd_tx_regs[] = { + { .name = "TB_ID_REGISTER", .addr = A_TB_ID_REGISTER, + },{ .name = "TB0_DLC_REGISTER", .addr = A_TB0_DLC_REGISTER, + },{ .name = "TB_DW0_REGISTER", .addr = A_TB_DW0_REGISTER, + },{ .name = "TB_DW1_REGISTER", .addr = A_TB_DW1_REGISTER, + },{ .name = "TB_DW2_REGISTER", .addr = A_TB_DW2_REGISTER, + },{ .name = "TB_DW3_REGISTER", .addr = A_TB_DW3_REGISTER, + },{ .name = "TB_DW4_REGISTER", .addr = A_TB_DW4_REGISTER, + },{ .name = "TB_DW5_REGISTER", .addr = A_TB_DW5_REGISTER, + },{ .name = "TB_DW6_REGISTER", .addr = A_TB_DW6_REGISTER, + },{ .name = "TB_DW7_REGISTER", .addr = A_TB_DW7_REGISTER, + },{ .name = "TB_DW8_REGISTER", .addr = A_TB_DW8_REGISTER, + },{ .name = "TB_DW9_REGISTER", .addr = A_TB_DW9_REGISTER, + },{ .name = "TB_DW10_REGISTER", .addr = A_TB_DW10_REGISTER, + },{ .name = "TB_DW11_REGISTER", .addr = A_TB_DW11_REGISTER, + },{ .name = "TB_DW12_REGISTER", .addr = A_TB_DW12_REGISTER, + },{ .name = "TB_DW13_REGISTER", .addr = A_TB_DW13_REGISTER, + },{ .name = "TB_DW14_REGISTER", .addr = A_TB_DW14_REGISTER, + },{ .name = "TB_DW15_REGISTER", .addr = A_TB_DW15_REGISTER, + } +}; + +static const RegisterAccessInfo canfd_rx0_regs[] = { + { .name = "RB_ID_REGISTER", .addr = A_RB_ID_REGISTER, + .ro = 0xffffffff, + },{ .name = "RB_DLC_REGISTER", .addr = A_RB_DLC_REGISTER, + .ro = 0xfe1fffff, + },{ .name = "RB_DW0_REGISTER", .addr = A_RB_DW0_REGISTER, + .ro = 0xffffffff, + },{ .name = "RB_DW1_REGISTER", .addr = A_RB_DW1_REGISTER, + .ro = 0xffffffff, + },{ .name = "RB_DW2_REGISTER", .addr = A_RB_DW2_REGISTER, + .ro = 0xffffffff, + },{ .name = "RB_DW3_REGISTER", .addr = A_RB_DW3_REGISTER, + .ro = 0xffffffff, + },{ .name = "RB_DW4_REGISTER", .addr = A_RB_DW4_REGISTER, + .ro = 0xffffffff, + },{ .name = "RB_DW5_REGISTER", .addr = A_RB_DW5_REGISTER, + .ro = 0xffffffff, + },{ .name = "RB_DW6_REGISTER", .addr = A_RB_DW6_REGISTER, + .ro = 0xffffffff, + },{ .name = "RB_DW7_REGISTER", .addr = A_RB_DW7_REGISTER, + .ro = 0xffffffff, + },{ .name = "RB_DW8_REGISTER", .addr = A_RB_DW8_REGISTER, + .ro = 0xffffffff, + },{ .name = "RB_DW9_REGISTER", .addr = A_RB_DW9_REGISTER, + .ro = 0xffffffff, + },{ .name = "RB_DW10_REGISTER", .addr = A_RB_DW10_REGISTER, + .ro = 0xffffffff, + },{ .name = "RB_DW11_REGISTER", .addr = A_RB_DW11_REGISTER, + .ro = 0xffffffff, + },{ .name = "RB_DW12_REGISTER", .addr = A_RB_DW12_REGISTER, + .ro = 0xffffffff, + },{ .name = "RB_DW13_REGISTER", .addr = A_RB_DW13_REGISTER, + .ro = 0xffffffff, + },{ .name = "RB_DW14_REGISTER", .addr = A_RB_DW14_REGISTER, + .ro = 0xffffffff, + },{ .name = "RB_DW15_REGISTER", .addr = A_RB_DW15_REGISTER, + .ro = 0xffffffff, + } +}; + +static const RegisterAccessInfo canfd_rx1_regs[] = { + { .name = "RB_ID_REGISTER_1", .addr = A_RB_ID_REGISTER_1, + .ro = 0xffffffff, + },{ .name = "RB_DLC_REGISTER_1", .addr = A_RB_DLC_REGISTER_1, + .ro = 0xfe1fffff, + },{ .name = "RB0_DW0_REGISTER_1", .addr = A_RB0_DW0_REGISTER_1, + .ro = 0xffffffff, + },{ .name = "RB_DW1_REGISTER_1", .addr = A_RB_DW1_REGISTER_1, + .ro = 0xffffffff, + },{ .name = "RB_DW2_REGISTER_1", .addr = A_RB_DW2_REGISTER_1, + .ro = 0xffffffff, + },{ .name = "RB_DW3_REGISTER_1", .addr = A_RB_DW3_REGISTER_1, + .ro = 0xffffffff, + },{ .name = "RB_DW4_REGISTER_1", .addr = A_RB_DW4_REGISTER_1, + .ro = 0xffffffff, + },{ .name = "RB_DW5_REGISTER_1", .addr = A_RB_DW5_REGISTER_1, + .ro = 0xffffffff, + },{ .name = "RB_DW6_REGISTER_1", .addr = A_RB_DW6_REGISTER_1, + .ro = 0xffffffff, + },{ .name = "RB_DW7_REGISTER_1", .addr = A_RB_DW7_REGISTER_1, + .ro = 0xffffffff, + },{ .name = "RB_DW8_REGISTER_1", .addr = A_RB_DW8_REGISTER_1, + .ro = 0xffffffff, + },{ .name = "RB_DW9_REGISTER_1", .addr = A_RB_DW9_REGISTER_1, + .ro = 0xffffffff, + },{ .name = "RB_DW10_REGISTER_1", .addr = A_RB_DW10_REGISTER_1, + .ro = 0xffffffff, + },{ .name = "RB_DW11_REGISTER_1", .addr = A_RB_DW11_REGISTER_1, + .ro = 0xffffffff, + },{ .name = "RB_DW12_REGISTER_1", .addr = A_RB_DW12_REGISTER_1, + .ro = 0xffffffff, + },{ .name = "RB_DW13_REGISTER_1", .addr = A_RB_DW13_REGISTER_1, + .ro = 0xffffffff, + },{ .name = "RB_DW14_REGISTER_1", .addr = A_RB_DW14_REGISTER_1, + .ro = 0xffffffff, + },{ .name = "RB_DW15_REGISTER_1", .addr = A_RB_DW15_REGISTER_1, + .ro = 0xffffffff, + } +}; + +/* Acceptance filter registers. */ +static const RegisterAccessInfo canfd_af_regs[] = { + { .name = "AFMR_REGISTER", .addr = A_AFMR_REGISTER, + .pre_write = filter_mask, + },{ .name = "AFIR_REGISTER", .addr = A_AFIR_REGISTER, + .pre_write = filter_id, + } +}; + +static const RegisterAccessInfo canfd_txe_regs[] = { + { .name = "TXE_FIFO_TB_ID_REGISTER", .addr = A_TXE_FIFO_TB_ID_REGISTER, + .ro = 0xffffffff, + },{ .name = "TXE_FIFO_TB_DLC_REGISTER", .addr = A_TXE_FIFO_TB_DLC_REGISTER, + .ro = 0xffffffff, + } +}; + +static const RegisterAccessInfo canfd_regs_info[] = { + { .name = "SOFTWARE_RESET_REGISTER", .addr = A_SOFTWARE_RESET_REGISTER, + .pre_write = canfd_srr_pre_write, + },{ .name = "MODE_SELECT_REGISTER", .addr = A_MODE_SELECT_REGISTER, + .pre_write = canfd_msr_pre_write, + },{ .name = "ARBITRATION_PHASE_BAUD_RATE_PRESCALER_REGISTER", + .addr = A_ARBITRATION_PHASE_BAUD_RATE_PRESCALER_REGISTER, + .pre_write = canfd_write_check_prew, + },{ .name = "ARBITRATION_PHASE_BIT_TIMING_REGISTER", + .addr = A_ARBITRATION_PHASE_BIT_TIMING_REGISTER, + .pre_write = canfd_write_check_prew, + },{ .name = "ERROR_COUNTER_REGISTER", .addr = A_ERROR_COUNTER_REGISTER, + .ro = 0xffff, + },{ .name = "ERROR_STATUS_REGISTER", .addr = A_ERROR_STATUS_REGISTER, + .w1c = 0xf1f, + },{ .name = "STATUS_REGISTER", .addr = A_STATUS_REGISTER, + .reset = 0x1, + .ro = 0x7f17ff, + },{ .name = "INTERRUPT_STATUS_REGISTER", + .addr = A_INTERRUPT_STATUS_REGISTER, + .ro = 0xffffff7f, + },{ .name = "INTERRUPT_ENABLE_REGISTER", + .addr = A_INTERRUPT_ENABLE_REGISTER, + .post_write = canfd_ier_post_write, + },{ .name = "INTERRUPT_CLEAR_REGISTER", + .addr = A_INTERRUPT_CLEAR_REGISTER, .pre_write = canfd_icr_pre_write, + },{ .name = "TIMESTAMP_REGISTER", .addr = A_TIMESTAMP_REGISTER, + .ro = 0xffff0000, + .pre_write = canfd_tsr_pre_write, + },{ .name = "DATA_PHASE_BAUD_RATE_PRESCALER_REGISTER", + .addr = A_DATA_PHASE_BAUD_RATE_PRESCALER_REGISTER, + .pre_write = canfd_write_check_prew, + },{ .name = "DATA_PHASE_BIT_TIMING_REGISTER", + .addr = A_DATA_PHASE_BIT_TIMING_REGISTER, + .pre_write = canfd_write_check_prew, + },{ .name = "TX_BUFFER_READY_REQUEST_REGISTER", + .addr = A_TX_BUFFER_READY_REQUEST_REGISTER, + .pre_write = canfd_trr_reg_prew, + .post_write = canfd_trr_reg_postw, + },{ .name = "INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER", + .addr = A_INTERRUPT_ENABLE_TX_BUFFER_READY_REQUEST_REGISTER, + },{ .name = "TX_BUFFER_CANCEL_REQUEST_REGISTER", + .addr = A_TX_BUFFER_CANCEL_REQUEST_REGISTER, + .post_write = canfd_cancel_reg_postw, + },{ .name = "INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER", + .addr = A_INTERRUPT_ENABLE_TX_BUFFER_CANCELLATION_REQUEST_REGISTER, + },{ .name = "TX_EVENT_FIFO_STATUS_REGISTER", + .addr = A_TX_EVENT_FIFO_STATUS_REGISTER, + .ro = 0x3f1f, .pre_write = canfd_tx_fifo_status_prew, + },{ .name = "TX_EVENT_FIFO_WATERMARK_REGISTER", + .addr = A_TX_EVENT_FIFO_WATERMARK_REGISTER, + .reset = 0xf, + .pre_write = canfd_write_check_prew, + },{ .name = "ACCEPTANCE_FILTER_CONTROL_REGISTER", + .addr = A_ACCEPTANCE_FILTER_CONTROL_REGISTER, + },{ .name = "RX_FIFO_STATUS_REGISTER", .addr = A_RX_FIFO_STATUS_REGISTER, + .ro = 0x7f3f7f3f, .pre_write = canfd_rx_fifo_status_prew, + },{ .name = "RX_FIFO_WATERMARK_REGISTER", + .addr = A_RX_FIFO_WATERMARK_REGISTER, + .reset = 0x1f0f0f, + .pre_write = canfd_write_check_prew, + } +}; + +static void xlnx_versal_canfd_ptimer_cb(void *opaque) +{ + /* No action required on the timer rollover. */ +} + +static const MemoryRegionOps canfd_ops = { + .read = register_read_memory, + .write = register_write_memory, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void canfd_reset(DeviceState *dev) +{ + XlnxVersalCANFDState *s = XILINX_CANFD(dev); + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(s->reg_info); ++i) { + register_reset(&s->reg_info[i]); + } + + ptimer_transaction_begin(s->canfd_timer); + ptimer_set_count(s->canfd_timer, 0); + ptimer_transaction_commit(s->canfd_timer); +} + +static bool can_xilinx_canfd_receive(CanBusClientState *client) +{ + XlnxVersalCANFDState *s = container_of(client, XlnxVersalCANFDState, + bus_client); + + bool reset_state = ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, SRST); + bool can_enabled = ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN); + + return !reset_state && can_enabled; +} + +static ssize_t canfd_xilinx_receive(CanBusClientState *client, + const qemu_can_frame *buf, + size_t buf_size) +{ + XlnxVersalCANFDState *s = container_of(client, XlnxVersalCANFDState, + bus_client); + const qemu_can_frame *frame = buf; + + assert(buf_size > 0); + + if (ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, LBACK)) { + /* + * XlnxVersalCANFDState will not participate in normal bus communication + * and does not receive any messages transmitted by other CAN nodes. + */ + return 1; + } + + /* Update the status register that we are receiving message. */ + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, BBSY, 1); + + if (ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, SNOOP)) { + /* Snoop Mode: Just keep the data. no response back. */ + update_rx_sequential(s, frame); + } else { + if ((ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, SLEEP))) { + /* + * XlnxVersalCANFDState is in sleep mode. Any data on bus will bring + * it to the wake up state. + */ + canfd_exit_sleep_mode(s); + } + + update_rx_sequential(s, frame); + } + + /* Message processing done. Update the status back to !busy */ + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, BBSY, 0); + return 1; +} + +static CanBusClientInfo canfd_xilinx_bus_client_info = { + .can_receive = can_xilinx_canfd_receive, + .receive = canfd_xilinx_receive, +}; + +static int xlnx_canfd_connect_to_bus(XlnxVersalCANFDState *s, + CanBusState *bus) +{ + s->bus_client.info = &canfd_xilinx_bus_client_info; + + return can_bus_insert_client(bus, &s->bus_client); +} + +#define NUM_REG_PER_AF ARRAY_SIZE(canfd_af_regs) +#define NUM_AF 32 +#define NUM_REG_PER_TXE ARRAY_SIZE(canfd_txe_regs) +#define NUM_TXE 32 + +static int canfd_populate_regarray(XlnxVersalCANFDState *s, + RegisterInfoArray *r_array, int pos, + const RegisterAccessInfo *rae, + int num_rae) +{ + int i; + + for (i = 0; i < num_rae; i++) { + int index = rae[i].addr / 4; + RegisterInfo *r = &s->reg_info[index]; + + object_initialize(r, sizeof(*r), TYPE_REGISTER); + + *r = (RegisterInfo) { + .data = &s->regs[index], + .data_size = sizeof(uint32_t), + .access = &rae[i], + .opaque = OBJECT(s), + }; + + r_array->r[i + pos] = r; + } + return i + pos; +} + +static void canfd_create_rai(RegisterAccessInfo *rai_array, + const RegisterAccessInfo *canfd_regs, + int template_rai_array_sz, + int num_template_to_copy) +{ + int i; + int reg_num; + + for (reg_num = 0; reg_num < num_template_to_copy; reg_num++) { + int pos = reg_num * template_rai_array_sz; + + memcpy(rai_array + pos, canfd_regs, + template_rai_array_sz * sizeof(RegisterAccessInfo)); + + for (i = 0; i < template_rai_array_sz; i++) { + const char *name = canfd_regs[i].name; + uint64_t addr = canfd_regs[i].addr; + rai_array[i + pos].name = g_strdup_printf("%s%d", name, reg_num); + rai_array[i + pos].addr = addr + pos * 4; + } + } +} + +static RegisterInfoArray *canfd_create_regarray(XlnxVersalCANFDState *s) +{ + const char *device_prefix = object_get_typename(OBJECT(s)); + uint64_t memory_size = XLNX_VERSAL_CANFD_R_MAX * 4; + int num_regs; + int pos = 0; + RegisterInfoArray *r_array; + + num_regs = ARRAY_SIZE(canfd_regs_info) + + s->cfg.tx_fifo * NUM_REGS_PER_MSG_SPACE + + s->cfg.rx0_fifo * NUM_REGS_PER_MSG_SPACE + + NUM_AF * NUM_REG_PER_AF + + NUM_TXE * NUM_REG_PER_TXE; + + s->tx_regs = g_new0(RegisterAccessInfo, + s->cfg.tx_fifo * ARRAY_SIZE(canfd_tx_regs)); + + canfd_create_rai(s->tx_regs, canfd_tx_regs, + ARRAY_SIZE(canfd_tx_regs), s->cfg.tx_fifo); + + s->rx0_regs = g_new0(RegisterAccessInfo, + s->cfg.rx0_fifo * ARRAY_SIZE(canfd_rx0_regs)); + + canfd_create_rai(s->rx0_regs, canfd_rx0_regs, + ARRAY_SIZE(canfd_rx0_regs), s->cfg.rx0_fifo); + + s->af_regs = g_new0(RegisterAccessInfo, + NUM_AF * ARRAY_SIZE(canfd_af_regs)); + + canfd_create_rai(s->af_regs, canfd_af_regs, + ARRAY_SIZE(canfd_af_regs), NUM_AF); + + s->txe_regs = g_new0(RegisterAccessInfo, + NUM_TXE * ARRAY_SIZE(canfd_txe_regs)); + + canfd_create_rai(s->txe_regs, canfd_txe_regs, + ARRAY_SIZE(canfd_txe_regs), NUM_TXE); + + if (s->cfg.enable_rx_fifo1) { + num_regs += s->cfg.rx1_fifo * NUM_REGS_PER_MSG_SPACE; + + s->rx1_regs = g_new0(RegisterAccessInfo, + s->cfg.rx1_fifo * ARRAY_SIZE(canfd_rx1_regs)); + + canfd_create_rai(s->rx1_regs, canfd_rx1_regs, + ARRAY_SIZE(canfd_rx1_regs), s->cfg.rx1_fifo); + } + + r_array = g_new0(RegisterInfoArray, 1); + r_array->r = g_new0(RegisterInfo * , num_regs); + r_array->num_elements = num_regs; + r_array->prefix = device_prefix; + + pos = canfd_populate_regarray(s, r_array, pos, + canfd_regs_info, + ARRAY_SIZE(canfd_regs_info)); + pos = canfd_populate_regarray(s, r_array, pos, + s->tx_regs, s->cfg.tx_fifo * + NUM_REGS_PER_MSG_SPACE); + pos = canfd_populate_regarray(s, r_array, pos, + s->rx0_regs, s->cfg.rx0_fifo * + NUM_REGS_PER_MSG_SPACE); + if (s->cfg.enable_rx_fifo1) { + pos = canfd_populate_regarray(s, r_array, pos, + s->rx1_regs, s->cfg.rx1_fifo * + NUM_REGS_PER_MSG_SPACE); + } + pos = canfd_populate_regarray(s, r_array, pos, + s->af_regs, NUM_AF * NUM_REG_PER_AF); + pos = canfd_populate_regarray(s, r_array, pos, + s->txe_regs, NUM_TXE * NUM_REG_PER_TXE); + + memory_region_init_io(&r_array->mem, OBJECT(s), &canfd_ops, r_array, + device_prefix, memory_size); + return r_array; +} + +static void canfd_realize(DeviceState *dev, Error **errp) +{ + XlnxVersalCANFDState *s = XILINX_CANFD(dev); + RegisterInfoArray *reg_array; + + reg_array = canfd_create_regarray(s); + memory_region_add_subregion(&s->iomem, 0x00, ®_array->mem); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq_canfd_int); + + if (s->canfdbus) { + if (xlnx_canfd_connect_to_bus(s, s->canfdbus) < 0) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + error_setg(errp, "%s: xlnx_canfd_connect_to_bus failed", path); + return; + } + + } + + /* Allocate a new timer. */ + s->canfd_timer = ptimer_init(xlnx_versal_canfd_ptimer_cb, s, + PTIMER_POLICY_WRAP_AFTER_ONE_PERIOD | + PTIMER_POLICY_TRIGGER_ONLY_ON_DECREMENT | + PTIMER_POLICY_NO_IMMEDIATE_RELOAD); + + ptimer_transaction_begin(s->canfd_timer); + + ptimer_set_freq(s->canfd_timer, s->cfg.ext_clk_freq); + ptimer_set_limit(s->canfd_timer, CANFD_TIMER_MAX, 1); + ptimer_run(s->canfd_timer, 0); + ptimer_transaction_commit(s->canfd_timer); +} + +static void canfd_init(Object *obj) +{ + XlnxVersalCANFDState *s = XILINX_CANFD(obj); + + memory_region_init(&s->iomem, obj, TYPE_XILINX_CANFD, + XLNX_VERSAL_CANFD_R_MAX * 4); +} + +static const VMStateDescription vmstate_canfd = { + .name = TYPE_XILINX_CANFD, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, XlnxVersalCANFDState, + XLNX_VERSAL_CANFD_R_MAX), + VMSTATE_PTIMER(canfd_timer, XlnxVersalCANFDState), + VMSTATE_END_OF_LIST(), + } +}; + +static Property canfd_core_properties[] = { + DEFINE_PROP_UINT8("rx-fifo0", XlnxVersalCANFDState, cfg.rx0_fifo, 0x40), + DEFINE_PROP_UINT8("rx-fifo1", XlnxVersalCANFDState, cfg.rx1_fifo, 0x40), + DEFINE_PROP_UINT8("tx-fifo", XlnxVersalCANFDState, cfg.tx_fifo, 0x20), + DEFINE_PROP_BOOL("enable-rx-fifo1", XlnxVersalCANFDState, + cfg.enable_rx_fifo1, true), + DEFINE_PROP_UINT32("ext_clk_freq", XlnxVersalCANFDState, cfg.ext_clk_freq, + CANFD_DEFAULT_CLOCK), + DEFINE_PROP_LINK("canfdbus", XlnxVersalCANFDState, canfdbus, TYPE_CAN_BUS, + CanBusState *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void canfd_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = canfd_reset; + dc->realize = canfd_realize; + device_class_set_props(dc, canfd_core_properties); + dc->vmsd = &vmstate_canfd; +} + +static const TypeInfo canfd_info = { + .name = TYPE_XILINX_CANFD, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxVersalCANFDState), + .class_init = canfd_class_init, + .instance_init = canfd_init, +}; + +static void canfd_register_types(void) +{ + type_register_static(&canfd_info); +} + +type_init(canfd_register_types) diff --git a/hw/net/e1000.c b/hw/net/e1000.c index 23d660619f..aae5f0bdc0 100644 --- a/hw/net/e1000.c +++ b/hw/net/e1000.c @@ -637,9 +637,8 @@ xmit_seg(E1000State *s) e1000x_inc_reg_if_not_full(s->mac_reg, TPT); e1000x_grow_8reg_if_not_full(s->mac_reg, TOTL, s->tx.size + 4); - s->mac_reg[GPTC] = s->mac_reg[TPT]; - s->mac_reg[GOTCL] = s->mac_reg[TOTL]; - s->mac_reg[GOTCH] = s->mac_reg[TOTH]; + e1000x_inc_reg_if_not_full(s->mac_reg, GPTC); + e1000x_grow_8reg_if_not_full(s->mac_reg, GOTCL, s->tx.size + 4); } static void @@ -805,38 +804,11 @@ start_xmit(E1000State *s) } static int -receive_filter(E1000State *s, const uint8_t *buf, int size) +receive_filter(E1000State *s, const void *buf) { - uint32_t rctl = s->mac_reg[RCTL]; - int isbcast = is_broadcast_ether_addr(buf); - int ismcast = is_multicast_ether_addr(buf); - - if (e1000x_is_vlan_packet(buf, le16_to_cpu(s->mac_reg[VET])) && - e1000x_vlan_rx_filter_enabled(s->mac_reg)) { - uint16_t vid = lduw_be_p(&PKT_GET_VLAN_HDR(buf)->h_tci); - uint32_t vfta = - ldl_le_p((uint32_t *)(s->mac_reg + VFTA) + - ((vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK)); - if ((vfta & (1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK))) == 0) { - return 0; - } - } - - if (!isbcast && !ismcast && (rctl & E1000_RCTL_UPE)) { /* promiscuous ucast */ - return 1; - } - - if (ismcast && (rctl & E1000_RCTL_MPE)) { /* promiscuous mcast */ - e1000x_inc_reg_if_not_full(s->mac_reg, MPRC); - return 1; - } - - if (isbcast && (rctl & E1000_RCTL_BAM)) { /* broadcast enabled */ - e1000x_inc_reg_if_not_full(s->mac_reg, BPRC); - return 1; - } - - return e1000x_rx_group_filter(s->mac_reg, buf); + return (!e1000x_is_vlan_packet(buf, s->mac_reg[VET]) || + e1000x_rx_vlan_filter(s->mac_reg, PKT_GET_VLAN_HDR(buf))) && + e1000x_rx_group_filter(s->mac_reg, buf); } static void @@ -923,6 +895,7 @@ e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt) size_t desc_offset; size_t desc_size; size_t total_size; + eth_pkt_types_e pkt_type; if (!e1000x_hw_rx_enabled(s->mac_reg)) { return -1; @@ -951,7 +924,7 @@ e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt) return size; } - if (!receive_filter(s, filter_buf, size)) { + if (!receive_filter(s, filter_buf)) { return size; } @@ -972,6 +945,7 @@ e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt) size -= 4; } + pkt_type = get_eth_packet_type(PKT_GET_ETH_HDR(filter_buf)); rdh_start = s->mac_reg[RDH]; desc_offset = 0; total_size = size + e1000x_fcs_len(s->mac_reg); @@ -1037,7 +1011,7 @@ e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt) } } while (desc_offset < total_size); - e1000x_update_rx_total_stats(s->mac_reg, size, total_size); + e1000x_update_rx_total_stats(s->mac_reg, pkt_type, size, total_size); n = E1000_ICS_RXT0; if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH]) diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c index c0c09b6965..9f185d099c 100644 --- a/hw/net/e1000e_core.c +++ b/hw/net/e1000e_core.c @@ -165,14 +165,14 @@ e1000e_intrmgr_on_throttling_timer(void *opaque) timer->running = false; - if (msi_enabled(timer->core->owner)) { - trace_e1000e_irq_msi_notify_postponed(); - /* Clear msi_causes_pending to fire MSI eventually */ - timer->core->msi_causes_pending = 0; - e1000e_set_interrupt_cause(timer->core, 0); - } else { - trace_e1000e_irq_legacy_notify_postponed(); - e1000e_set_interrupt_cause(timer->core, 0); + if (timer->core->mac[IMS] & timer->core->mac[ICR]) { + if (msi_enabled(timer->core->owner)) { + trace_e1000e_irq_msi_notify_postponed(); + msi_notify(timer->core->owner, 0); + } else { + trace_e1000e_irq_legacy_notify_postponed(); + e1000e_raise_legacy_irq(timer->core); + } } } @@ -366,10 +366,6 @@ static void e1000e_intrmgr_fire_all_timers(E1000ECore *core) { int i; - uint32_t val = e1000e_intmgr_collect_delayed_causes(core); - - trace_e1000e_irq_adding_delayed_causes(val, core->mac[ICR]); - core->mac[ICR] |= val; if (core->itr.running) { timer_del(core->itr.timer); @@ -537,7 +533,7 @@ e1000e_rss_get_hash_type(E1000ECore *core, struct NetRxPkt *pkt) ip6info->rss_ex_dst_valid, ip6info->rss_ex_src_valid, core->mac[MRQC], - E1000_MRQC_EN_TCPIPV6(core->mac[MRQC]), + E1000_MRQC_EN_TCPIPV6EX(core->mac[MRQC]), E1000_MRQC_EN_IPV6EX(core->mac[MRQC]), E1000_MRQC_EN_IPV6(core->mac[MRQC])); @@ -546,8 +542,8 @@ e1000e_rss_get_hash_type(E1000ECore *core, struct NetRxPkt *pkt) ip6info->rss_ex_src_valid))) { if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP && - E1000_MRQC_EN_TCPIPV6(core->mac[MRQC])) { - return E1000_MRQ_RSS_TYPE_IPV6TCP; + E1000_MRQC_EN_TCPIPV6EX(core->mac[MRQC])) { + return E1000_MRQ_RSS_TYPE_IPV6TCPEX; } if (E1000_MRQC_EN_IPV6EX(core->mac[MRQC])) { @@ -581,7 +577,7 @@ e1000e_rss_calc_hash(E1000ECore *core, case E1000_MRQ_RSS_TYPE_IPV4TCP: type = NetPktRssIpV4Tcp; break; - case E1000_MRQ_RSS_TYPE_IPV6TCP: + case E1000_MRQ_RSS_TYPE_IPV6TCPEX: type = NetPktRssIpV6TcpEx; break; case E1000_MRQ_RSS_TYPE_IPV6: @@ -711,9 +707,8 @@ e1000e_on_tx_done_update_stats(E1000ECore *core, struct NetTxPkt *tx_pkt) g_assert_not_reached(); } - core->mac[GPTC] = core->mac[TPT]; - core->mac[GOTCL] = core->mac[TOTL]; - core->mac[GOTCH] = core->mac[TOTH]; + e1000x_inc_reg_if_not_full(core->mac, GPTC); + e1000x_grow_8reg_if_not_full(core->mac, GOTCL, tot_len); } static void @@ -747,7 +742,8 @@ e1000e_process_tx_desc(E1000ECore *core, addr = le64_to_cpu(dp->buffer_addr); if (!tx->skip_cp) { - if (!net_tx_pkt_add_raw_fragment(tx->tx_pkt, addr, split_size)) { + if (!net_tx_pkt_add_raw_fragment_pci(tx->tx_pkt, core->owner, + addr, split_size)) { tx->skip_cp = true; } } @@ -765,7 +761,7 @@ e1000e_process_tx_desc(E1000ECore *core, } tx->skip_cp = false; - net_tx_pkt_reset(tx->tx_pkt, core->owner); + net_tx_pkt_reset(tx->tx_pkt, net_tx_pkt_unmap_frag_pci, core->owner); tx->sum_needed = 0; tx->cptse = 0; @@ -959,6 +955,8 @@ e1000e_start_xmit(E1000ECore *core, const E1000E_TxRing *txr) if (!ide || !e1000e_intrmgr_delay_tx_causes(core, &cause)) { e1000e_set_interrupt_cause(core, cause); } + + net_tx_pkt_reset(txr->tx->tx_pkt, net_tx_pkt_unmap_frag_pci, core->owner); } static bool @@ -1034,48 +1032,11 @@ e1000e_rx_l4_cso_enabled(E1000ECore *core) } static bool -e1000e_receive_filter(E1000ECore *core, const uint8_t *buf, int size) +e1000e_receive_filter(E1000ECore *core, const void *buf) { - uint32_t rctl = core->mac[RCTL]; - - if (e1000x_is_vlan_packet(buf, core->mac[VET]) && - e1000x_vlan_rx_filter_enabled(core->mac)) { - uint16_t vid = lduw_be_p(&PKT_GET_VLAN_HDR(buf)->h_tci); - uint32_t vfta = - ldl_le_p((uint32_t *)(core->mac + VFTA) + - ((vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK)); - if ((vfta & (1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK))) == 0) { - trace_e1000e_rx_flt_vlan_mismatch(vid); - return false; - } else { - trace_e1000e_rx_flt_vlan_match(vid); - } - } - - switch (net_rx_pkt_get_packet_type(core->rx_pkt)) { - case ETH_PKT_UCAST: - if (rctl & E1000_RCTL_UPE) { - return true; /* promiscuous ucast */ - } - break; - - case ETH_PKT_BCAST: - if (rctl & E1000_RCTL_BAM) { - return true; /* broadcast enabled */ - } - break; - - case ETH_PKT_MCAST: - if (rctl & E1000_RCTL_MPE) { - return true; /* promiscuous mcast */ - } - break; - - default: - g_assert_not_reached(); - } - - return e1000x_rx_group_filter(core->mac, buf); + return (!e1000x_is_vlan_packet(buf, core->mac[VET]) || + e1000x_rx_vlan_filter(core->mac, PKT_GET_VLAN_HDR(buf))) && + e1000x_rx_group_filter(core->mac, buf); } static inline void @@ -1149,6 +1110,11 @@ e1000e_verify_csum_in_sw(E1000ECore *core, return; } + if (l4hdr_proto != ETH_L4_HDR_PROTO_TCP && + l4hdr_proto != ETH_L4_HDR_PROTO_UDP) { + return; + } + if (!net_rx_pkt_validate_l4_csum(pkt, &csum_valid)) { trace_e1000e_rx_metadata_l4_csum_validation_failed(); return; @@ -1281,9 +1247,8 @@ e1000e_build_rx_metadata(E1000ECore *core, trace_e1000e_rx_metadata_l4_cso_disabled(); } - trace_e1000e_rx_metadata_status_flags(*status_flags); - func_exit: + trace_e1000e_rx_metadata_status_flags(*status_flags); *status_flags = cpu_to_le32(*status_flags); } @@ -1488,24 +1453,10 @@ e1000e_write_to_rx_buffers(E1000ECore *core, } static void -e1000e_update_rx_stats(E1000ECore *core, - size_t data_size, - size_t data_fcs_size) +e1000e_update_rx_stats(E1000ECore *core, size_t pkt_size, size_t pkt_fcs_size) { - e1000x_update_rx_total_stats(core->mac, data_size, data_fcs_size); - - switch (net_rx_pkt_get_packet_type(core->rx_pkt)) { - case ETH_PKT_BCAST: - e1000x_inc_reg_if_not_full(core->mac, BPRC); - break; - - case ETH_PKT_MCAST: - e1000x_inc_reg_if_not_full(core->mac, MPRC); - break; - - default: - break; - } + eth_pkt_types_e pkt_type = net_rx_pkt_get_packet_type(core->rx_pkt); + e1000x_update_rx_total_stats(core->mac, pkt_type, pkt_size, pkt_fcs_size); } static inline bool @@ -1700,12 +1651,9 @@ static ssize_t e1000e_receive_internal(E1000ECore *core, const struct iovec *iov, int iovcnt, bool has_vnet) { - static const int maximum_ethernet_hdr_len = (ETH_HLEN + 4); - - uint32_t n = 0; - uint8_t min_buf[ETH_ZLEN]; + uint32_t causes = 0; + uint8_t buf[ETH_ZLEN]; struct iovec min_iov; - uint8_t *filter_buf; size_t size, orig_size; size_t iov_ofs = 0; E1000E_RxRing rxr; @@ -1728,24 +1676,21 @@ e1000e_receive_internal(E1000ECore *core, const struct iovec *iov, int iovcnt, net_rx_pkt_unset_vhdr(core->rx_pkt); } - filter_buf = iov->iov_base + iov_ofs; orig_size = iov_size(iov, iovcnt); size = orig_size - iov_ofs; /* Pad to minimum Ethernet frame length */ - if (size < sizeof(min_buf)) { - iov_to_buf(iov, iovcnt, iov_ofs, min_buf, size); - memset(&min_buf[size], 0, sizeof(min_buf) - size); + if (size < sizeof(buf)) { + iov_to_buf(iov, iovcnt, iov_ofs, buf, size); + memset(&buf[size], 0, sizeof(buf) - size); e1000x_inc_reg_if_not_full(core->mac, RUC); - min_iov.iov_base = filter_buf = min_buf; - min_iov.iov_len = size = sizeof(min_buf); + min_iov.iov_base = buf; + min_iov.iov_len = size = sizeof(buf); iovcnt = 1; iov = &min_iov; iov_ofs = 0; - } else if (iov->iov_len < maximum_ethernet_hdr_len) { - /* This is very unlikely, but may happen. */ - iov_to_buf(iov, iovcnt, iov_ofs, min_buf, maximum_ethernet_hdr_len); - filter_buf = min_buf; + } else { + iov_to_buf(iov, iovcnt, iov_ofs, buf, ETH_HLEN + 4); } /* Discard oversized packets if !LPE and !SBP. */ @@ -1754,15 +1699,16 @@ e1000e_receive_internal(E1000ECore *core, const struct iovec *iov, int iovcnt, } net_rx_pkt_set_packet_type(core->rx_pkt, - get_eth_packet_type(PKT_GET_ETH_HDR(filter_buf))); + get_eth_packet_type(PKT_GET_ETH_HDR(buf))); - if (!e1000e_receive_filter(core, filter_buf, size)) { + if (!e1000e_receive_filter(core, buf)) { trace_e1000e_rx_flt_dropped(); return orig_size; } net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs, - e1000x_vlan_enabled(core->mac), core->mac[VET]); + e1000x_vlan_enabled(core->mac) ? 0 : -1, + core->mac[VET], 0); e1000e_rss_parse_packet(core, core->rx_pkt, &rss_info); e1000e_rx_ring_init(core, &rxr, rss_info.queue); @@ -1779,32 +1725,32 @@ e1000e_receive_internal(E1000ECore *core, const struct iovec *iov, int iovcnt, /* Perform small receive detection (RSRPD) */ if (total_size < core->mac[RSRPD]) { - n |= E1000_ICS_SRPD; + causes |= E1000_ICS_SRPD; } /* Perform ACK receive detection */ if (!(core->mac[RFCTL] & E1000_RFCTL_ACK_DIS) && (e1000e_is_tcp_ack(core, core->rx_pkt))) { - n |= E1000_ICS_ACK; + causes |= E1000_ICS_ACK; } /* Check if receive descriptor minimum threshold hit */ rdmts_hit = e1000e_rx_descr_threshold_hit(core, rxr.i); - n |= e1000e_rx_wb_interrupt_cause(core, rxr.i->idx, rdmts_hit); + causes |= e1000e_rx_wb_interrupt_cause(core, rxr.i->idx, rdmts_hit); trace_e1000e_rx_written_to_guest(rxr.i->idx); } else { - n |= E1000_ICS_RXO; + causes |= E1000_ICS_RXO; retval = 0; trace_e1000e_rx_not_written_to_guest(rxr.i->idx); } - if (!e1000e_intrmgr_delay_rx_causes(core, &n)) { - trace_e1000e_rx_interrupt_set(n); - e1000e_set_interrupt_cause(core, n); + if (!e1000e_intrmgr_delay_rx_causes(core, &causes)) { + trace_e1000e_rx_interrupt_set(causes); + e1000e_set_interrupt_cause(core, causes); } else { - trace_e1000e_rx_interrupt_delayed(n); + trace_e1000e_rx_interrupt_delayed(causes); } return retval; @@ -2024,13 +1970,6 @@ void(*e1000e_phyreg_writeops[E1000E_PHY_PAGES][E1000E_PHY_PAGE_SIZE]) } }; -static inline void -e1000e_clear_ims_bits(E1000ECore *core, uint32_t bits) -{ - trace_e1000e_irq_clear_ims(bits, core->mac[IMS], core->mac[IMS] & ~bits); - core->mac[IMS] &= ~bits; -} - static inline bool e1000e_postpone_interrupt(E1000IntrDelayTimer *timer) { @@ -2088,7 +2027,6 @@ e1000e_msix_notify_one(E1000ECore *core, uint32_t cause, uint32_t int_cfg) effective_eiac = core->mac[EIAC] & cause; core->mac[ICR] &= ~effective_eiac; - core->msi_causes_pending &= ~effective_eiac; if (!(core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) { core->mac[IMS] &= ~effective_eiac; @@ -2180,33 +2118,17 @@ e1000e_fix_icr_asserted(E1000ECore *core) trace_e1000e_irq_fix_icr_asserted(core->mac[ICR]); } -static void -e1000e_send_msi(E1000ECore *core, bool msix) +static void e1000e_raise_interrupts(E1000ECore *core, + size_t index, uint32_t causes) { - uint32_t causes = core->mac[ICR] & core->mac[IMS] & ~E1000_ICR_ASSERTED; - - core->msi_causes_pending &= causes; - causes ^= core->msi_causes_pending; - if (causes == 0) { - return; - } - core->msi_causes_pending |= causes; + bool is_msix = msix_enabled(core->owner); + uint32_t old_causes = core->mac[IMS] & core->mac[ICR]; + uint32_t raised_causes; - if (msix) { - e1000e_msix_notify(core, causes); - } else { - if (!e1000e_itr_should_postpone(core)) { - trace_e1000e_irq_msi_notify(causes); - msi_notify(core->owner, 0); - } - } -} + trace_e1000e_irq_set(index << 2, + core->mac[index], core->mac[index] | causes); -static void -e1000e_update_interrupt_state(E1000ECore *core) -{ - bool interrupts_pending; - bool is_msix = msix_enabled(core->owner); + core->mac[index] |= causes; /* Set ICR[OTHER] for MSI-X */ if (is_msix) { @@ -2228,40 +2150,58 @@ e1000e_update_interrupt_state(E1000ECore *core) */ core->mac[ICS] = core->mac[ICR]; - interrupts_pending = (core->mac[IMS] & core->mac[ICR]) ? true : false; - if (!interrupts_pending) { - core->msi_causes_pending = 0; - } - trace_e1000e_irq_pending_interrupts(core->mac[ICR] & core->mac[IMS], core->mac[ICR], core->mac[IMS]); - if (is_msix || msi_enabled(core->owner)) { - if (interrupts_pending) { - e1000e_send_msi(core, is_msix); - } - } else { - if (interrupts_pending) { - if (!e1000e_itr_should_postpone(core)) { - e1000e_raise_legacy_irq(core); - } + raised_causes = core->mac[IMS] & core->mac[ICR] & ~old_causes; + if (!raised_causes) { + return; + } + + if (is_msix) { + e1000e_msix_notify(core, raised_causes & ~E1000_ICR_ASSERTED); + } else if (!e1000e_itr_should_postpone(core)) { + if (msi_enabled(core->owner)) { + trace_e1000e_irq_msi_notify(raised_causes); + msi_notify(core->owner, 0); } else { - e1000e_lower_legacy_irq(core); + e1000e_raise_legacy_irq(core); } } } -static void -e1000e_set_interrupt_cause(E1000ECore *core, uint32_t val) +static void e1000e_lower_interrupts(E1000ECore *core, + size_t index, uint32_t causes) { - trace_e1000e_irq_set_cause_entry(val, core->mac[ICR]); + trace_e1000e_irq_clear(index << 2, + core->mac[index], core->mac[index] & ~causes); - val |= e1000e_intmgr_collect_delayed_causes(core); - core->mac[ICR] |= val; + core->mac[index] &= ~causes; + + /* + * Make sure ICR and ICS registers have the same value. + * The spec says that the ICS register is write-only. However in practice, + * on real hardware ICS is readable, and for reads it has the same value as + * ICR (except that ICS does not have the clear on read behaviour of ICR). + * + * The VxWorks PRO/1000 driver uses this behaviour. + */ + core->mac[ICS] = core->mac[ICR]; + + trace_e1000e_irq_pending_interrupts(core->mac[ICR] & core->mac[IMS], + core->mac[ICR], core->mac[IMS]); - trace_e1000e_irq_set_cause_exit(val, core->mac[ICR]); + if (!(core->mac[IMS] & core->mac[ICR]) && + !msix_enabled(core->owner) && !msi_enabled(core->owner)) { + e1000e_lower_legacy_irq(core); + } +} - e1000e_update_interrupt_state(core); +static void +e1000e_set_interrupt_cause(E1000ECore *core, uint32_t val) +{ + val |= e1000e_intmgr_collect_delayed_causes(core); + e1000e_raise_interrupts(core, ICR, val); } static inline void @@ -2527,30 +2467,27 @@ e1000e_set_ics(E1000ECore *core, int index, uint32_t val) static void e1000e_set_icr(E1000ECore *core, int index, uint32_t val) { - uint32_t icr = 0; if ((core->mac[ICR] & E1000_ICR_ASSERTED) && (core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) { trace_e1000e_irq_icr_process_iame(); - e1000e_clear_ims_bits(core, core->mac[IAM]); + e1000e_lower_interrupts(core, IMS, core->mac[IAM]); } - icr = core->mac[ICR] & ~val; /* * Windows driver expects that the "receive overrun" bit and other * ones to be cleared when the "Other" bit (#24) is cleared. */ - icr = (val & E1000_ICR_OTHER) ? (icr & ~E1000_ICR_OTHER_CAUSES) : icr; - trace_e1000e_irq_icr_write(val, core->mac[ICR], icr); - core->mac[ICR] = icr; - e1000e_update_interrupt_state(core); + if (val & E1000_ICR_OTHER) { + val |= E1000_ICR_OTHER_CAUSES; + } + e1000e_lower_interrupts(core, ICR, val); } static void e1000e_set_imc(E1000ECore *core, int index, uint32_t val) { trace_e1000e_irq_ims_clear_set_imc(val); - e1000e_clear_ims_bits(core, val); - e1000e_update_interrupt_state(core); + e1000e_lower_interrupts(core, IMS, val); } static void @@ -2571,9 +2508,6 @@ e1000e_set_ims(E1000ECore *core, int index, uint32_t val) uint32_t valid_val = val & ims_valid_mask; - trace_e1000e_irq_set_ims(val, core->mac[IMS], core->mac[IMS] | valid_val); - core->mac[IMS] |= valid_val; - if ((valid_val & ims_ext_mask) && (core->mac[CTRL_EXT] & E1000_CTRL_EXT_PBA_CLR) && msix_enabled(core->owner)) { @@ -2586,7 +2520,7 @@ e1000e_set_ims(E1000ECore *core, int index, uint32_t val) e1000e_intrmgr_fire_all_timers(core); } - e1000e_update_interrupt_state(core); + e1000e_raise_interrupts(core, IMS, valid_val); } static void @@ -2659,28 +2593,25 @@ static uint32_t e1000e_mac_icr_read(E1000ECore *core, int index) { uint32_t ret = core->mac[ICR]; - trace_e1000e_irq_icr_read_entry(ret); if (core->mac[IMS] == 0) { trace_e1000e_irq_icr_clear_zero_ims(); - core->mac[ICR] = 0; + e1000e_lower_interrupts(core, ICR, 0xffffffff); } if (!msix_enabled(core->owner)) { trace_e1000e_irq_icr_clear_nonmsix_icr_read(); - core->mac[ICR] = 0; + e1000e_lower_interrupts(core, ICR, 0xffffffff); } if ((core->mac[ICR] & E1000_ICR_ASSERTED) && (core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) { trace_e1000e_irq_icr_clear_iame(); - core->mac[ICR] = 0; + e1000e_lower_interrupts(core, ICR, 0xffffffff); trace_e1000e_irq_icr_process_iame(); - e1000e_clear_ims_bits(core, core->mac[IAM]); + e1000e_lower_interrupts(core, IMS, core->mac[IAM]); } - trace_e1000e_irq_icr_read_exit(core->mac[ICR]); - e1000e_update_interrupt_state(core); return ret; } @@ -3422,7 +3353,7 @@ e1000e_core_pci_realize(E1000ECore *core, qemu_add_vm_change_state_handler(e1000e_vm_state_change, core); for (i = 0; i < E1000E_NUM_QUEUES; i++) { - net_tx_pkt_init(&core->tx[i].tx_pkt, core->owner, E1000E_MAX_TX_FRAGS); + net_tx_pkt_init(&core->tx[i].tx_pkt, E1000E_MAX_TX_FRAGS); } net_rx_pkt_init(&core->rx_pkt); @@ -3447,7 +3378,6 @@ e1000e_core_pci_uninit(E1000ECore *core) qemu_del_vm_change_state_handler(core->vmstate); for (i = 0; i < E1000E_NUM_QUEUES; i++) { - net_tx_pkt_reset(core->tx[i].tx_pkt, core->owner); net_tx_pkt_uninit(core->tx[i].tx_pkt); } @@ -3572,7 +3502,6 @@ static void e1000e_reset(E1000ECore *core, bool sw) e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac); for (i = 0; i < ARRAY_SIZE(core->tx); i++) { - net_tx_pkt_reset(core->tx[i].tx_pkt, core->owner); memset(&core->tx[i].props, 0, sizeof(core->tx[i].props)); core->tx[i].skip_cp = false; } diff --git a/hw/net/e1000e_core.h b/hw/net/e1000e_core.h index 213a70530d..66b025cc43 100644 --- a/hw/net/e1000e_core.h +++ b/hw/net/e1000e_core.h @@ -111,8 +111,6 @@ struct E1000Core { PCIDevice *owner; void (*owner_start_recv)(PCIDevice *d); - uint32_t msi_causes_pending; - int64_t timadj; }; diff --git a/hw/net/e1000x_common.c b/hw/net/e1000x_common.c index b844af590a..212873fd77 100644 --- a/hw/net/e1000x_common.c +++ b/hw/net/e1000x_common.c @@ -58,33 +58,64 @@ bool e1000x_is_vlan_packet(const void *buf, uint16_t vet) return res; } -bool e1000x_rx_group_filter(uint32_t *mac, const uint8_t *buf) +bool e1000x_rx_vlan_filter(uint32_t *mac, const struct vlan_header *vhdr) +{ + if (e1000x_vlan_rx_filter_enabled(mac)) { + uint16_t vid = lduw_be_p(&vhdr->h_tci); + uint32_t vfta = + ldl_le_p((uint32_t *)(mac + VFTA) + + ((vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK)); + if ((vfta & (1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK))) == 0) { + trace_e1000x_rx_flt_vlan_mismatch(vid); + return false; + } + + trace_e1000x_rx_flt_vlan_match(vid); + } + + return true; +} + +bool e1000x_rx_group_filter(uint32_t *mac, const struct eth_header *ehdr) { static const int mta_shift[] = { 4, 3, 2, 0 }; uint32_t f, ra[2], *rp, rctl = mac[RCTL]; + if (is_broadcast_ether_addr(ehdr->h_dest)) { + if (rctl & E1000_RCTL_BAM) { + return true; + } + } else if (is_multicast_ether_addr(ehdr->h_dest)) { + if (rctl & E1000_RCTL_MPE) { + return true; + } + } else { + if (rctl & E1000_RCTL_UPE) { + return true; + } + } + for (rp = mac + RA; rp < mac + RA + 32; rp += 2) { if (!(rp[1] & E1000_RAH_AV)) { continue; } ra[0] = cpu_to_le32(rp[0]); ra[1] = cpu_to_le32(rp[1]); - if (!memcmp(buf, (uint8_t *)ra, ETH_ALEN)) { + if (!memcmp(ehdr->h_dest, (uint8_t *)ra, ETH_ALEN)) { trace_e1000x_rx_flt_ucast_match((int)(rp - mac - RA) / 2, - MAC_ARG(buf)); + MAC_ARG(ehdr->h_dest)); return true; } } - trace_e1000x_rx_flt_ucast_mismatch(MAC_ARG(buf)); + trace_e1000x_rx_flt_ucast_mismatch(MAC_ARG(ehdr->h_dest)); f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3]; - f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff; + f = (((ehdr->h_dest[5] << 8) | ehdr->h_dest[4]) >> f) & 0xfff; if (mac[MTA + (f >> 5)] & (1 << (f & 0x1f))) { - e1000x_inc_reg_if_not_full(mac, MPRC); return true; } - trace_e1000x_rx_flt_inexact_mismatch(MAC_ARG(buf), + trace_e1000x_rx_flt_inexact_mismatch(MAC_ARG(ehdr->h_dest), (rctl >> E1000_RCTL_MO_SHIFT) & 3, f >> 5, mac[MTA + (f >> 5)]); @@ -109,16 +140,16 @@ bool e1000x_hw_rx_enabled(uint32_t *mac) bool e1000x_is_oversized(uint32_t *mac, size_t size) { + size_t header_size = sizeof(struct eth_header) + sizeof(struct vlan_header); /* this is the size past which hardware will drop packets when setting LPE=0 */ - static const int maximum_ethernet_vlan_size = 1522; + size_t maximum_short_size = header_size + ETH_MTU; /* this is the size past which hardware will drop packets when setting LPE=1 */ - static const int maximum_ethernet_lpe_size = 16 * KiB; + size_t maximum_large_size = 16 * KiB - ETH_FCS_LEN; - if ((size > maximum_ethernet_lpe_size || - (size > maximum_ethernet_vlan_size - && !(mac[RCTL] & E1000_RCTL_LPE))) + if ((size > maximum_large_size || + (size > maximum_short_size && !(mac[RCTL] & E1000_RCTL_LPE))) && !(mac[RCTL] & E1000_RCTL_SBP)) { e1000x_inc_reg_if_not_full(mac, ROC); trace_e1000x_rx_oversized(size); @@ -212,23 +243,36 @@ e1000x_rxbufsize(uint32_t rctl) void e1000x_update_rx_total_stats(uint32_t *mac, - size_t data_size, - size_t data_fcs_size) + eth_pkt_types_e pkt_type, + size_t pkt_size, + size_t pkt_fcs_size) { static const int PRCregs[6] = { PRC64, PRC127, PRC255, PRC511, PRC1023, PRC1522 }; - e1000x_increase_size_stats(mac, PRCregs, data_fcs_size); + e1000x_increase_size_stats(mac, PRCregs, pkt_fcs_size); e1000x_inc_reg_if_not_full(mac, TPR); - mac[GPRC] = mac[TPR]; + e1000x_inc_reg_if_not_full(mac, GPRC); /* TOR - Total Octets Received: * This register includes bytes received in a packet from the <Destination * Address> field through the <CRC> field, inclusively. * Always include FCS length (4) in size. */ - e1000x_grow_8reg_if_not_full(mac, TORL, data_size + 4); - mac[GORCL] = mac[TORL]; - mac[GORCH] = mac[TORH]; + e1000x_grow_8reg_if_not_full(mac, TORL, pkt_size + 4); + e1000x_grow_8reg_if_not_full(mac, GORCL, pkt_size + 4); + + switch (pkt_type) { + case ETH_PKT_BCAST: + e1000x_inc_reg_if_not_full(mac, BPRC); + break; + + case ETH_PKT_MCAST: + e1000x_inc_reg_if_not_full(mac, MPRC); + break; + + default: + break; + } } void diff --git a/hw/net/e1000x_common.h b/hw/net/e1000x_common.h index 911abd8a90..be291684de 100644 --- a/hw/net/e1000x_common.h +++ b/hw/net/e1000x_common.h @@ -91,8 +91,9 @@ e1000x_update_regs_on_link_up(uint32_t *mac, uint16_t *phy) } void e1000x_update_rx_total_stats(uint32_t *mac, - size_t data_size, - size_t data_fcs_size); + eth_pkt_types_e pkt_type, + size_t pkt_size, + size_t pkt_fcs_size); void e1000x_core_prepare_eeprom(uint16_t *eeprom, const uint16_t *templ, @@ -106,7 +107,9 @@ bool e1000x_rx_ready(PCIDevice *d, uint32_t *mac); bool e1000x_is_vlan_packet(const void *buf, uint16_t vet); -bool e1000x_rx_group_filter(uint32_t *mac, const uint8_t *buf); +bool e1000x_rx_vlan_filter(uint32_t *mac, const struct vlan_header *vhdr); + +bool e1000x_rx_group_filter(uint32_t *mac, const struct eth_header *ehdr); bool e1000x_hw_rx_enabled(uint32_t *mac); diff --git a/hw/net/e1000x_regs.h b/hw/net/e1000x_regs.h index 6d3c4c6d3a..13760c66d3 100644 --- a/hw/net/e1000x_regs.h +++ b/hw/net/e1000x_regs.h @@ -290,18 +290,18 @@ #define E1000_RETA_IDX(hash) ((hash) & (BIT(7) - 1)) #define E1000_RETA_VAL(reta, hash) (((uint8_t *)(reta))[E1000_RETA_IDX(hash)]) -#define E1000_MRQC_EN_TCPIPV4(mrqc) ((mrqc) & BIT(16)) -#define E1000_MRQC_EN_IPV4(mrqc) ((mrqc) & BIT(17)) -#define E1000_MRQC_EN_TCPIPV6(mrqc) ((mrqc) & BIT(18)) -#define E1000_MRQC_EN_IPV6EX(mrqc) ((mrqc) & BIT(19)) -#define E1000_MRQC_EN_IPV6(mrqc) ((mrqc) & BIT(20)) - -#define E1000_MRQ_RSS_TYPE_NONE (0) -#define E1000_MRQ_RSS_TYPE_IPV4TCP (1) -#define E1000_MRQ_RSS_TYPE_IPV4 (2) -#define E1000_MRQ_RSS_TYPE_IPV6TCP (3) -#define E1000_MRQ_RSS_TYPE_IPV6EX (4) -#define E1000_MRQ_RSS_TYPE_IPV6 (5) +#define E1000_MRQC_EN_TCPIPV4(mrqc) ((mrqc) & BIT(16)) +#define E1000_MRQC_EN_IPV4(mrqc) ((mrqc) & BIT(17)) +#define E1000_MRQC_EN_TCPIPV6EX(mrqc) ((mrqc) & BIT(18)) +#define E1000_MRQC_EN_IPV6EX(mrqc) ((mrqc) & BIT(19)) +#define E1000_MRQC_EN_IPV6(mrqc) ((mrqc) & BIT(20)) + +#define E1000_MRQ_RSS_TYPE_NONE (0) +#define E1000_MRQ_RSS_TYPE_IPV4TCP (1) +#define E1000_MRQ_RSS_TYPE_IPV4 (2) +#define E1000_MRQ_RSS_TYPE_IPV6TCPEX (3) +#define E1000_MRQ_RSS_TYPE_IPV6EX (4) +#define E1000_MRQ_RSS_TYPE_IPV6 (5) #define E1000_ICR_ASSERTED BIT(31) #define E1000_EIAC_MASK 0x01F00000 diff --git a/hw/net/igb.c b/hw/net/igb.c index 51a7e9133e..1c989d7677 100644 --- a/hw/net/igb.c +++ b/hw/net/igb.c @@ -433,16 +433,16 @@ static void igb_pci_realize(PCIDevice *pci_dev, Error **errp) pcie_ari_init(pci_dev, 0x150, 1); - pcie_sriov_pf_init(pci_dev, IGB_CAP_SRIOV_OFFSET, "igbvf", + pcie_sriov_pf_init(pci_dev, IGB_CAP_SRIOV_OFFSET, TYPE_IGBVF, IGB_82576_VF_DEV_ID, IGB_MAX_VF_FUNCTIONS, IGB_MAX_VF_FUNCTIONS, IGB_VF_OFFSET, IGB_VF_STRIDE); - pcie_sriov_pf_init_vf_bar(pci_dev, 0, + pcie_sriov_pf_init_vf_bar(pci_dev, IGBVF_MMIO_BAR_IDX, PCI_BASE_ADDRESS_MEM_TYPE_64 | PCI_BASE_ADDRESS_MEM_PREFETCH, - 16 * KiB); - pcie_sriov_pf_init_vf_bar(pci_dev, 3, + IGBVF_MMIO_SIZE); + pcie_sriov_pf_init_vf_bar(pci_dev, IGBVF_MSIX_BAR_IDX, PCI_BASE_ADDRESS_MEM_TYPE_64 | PCI_BASE_ADDRESS_MEM_PREFETCH, - 16 * KiB); + IGBVF_MSIX_SIZE); igb_init_net_peer(s, pci_dev, macaddr); diff --git a/hw/net/igb_common.h b/hw/net/igb_common.h index 69ac490f75..5c261ba9d3 100644 --- a/hw/net/igb_common.h +++ b/hw/net/igb_common.h @@ -28,6 +28,14 @@ #include "igb_regs.h" +#define TYPE_IGBVF "igbvf" + +#define IGBVF_MMIO_BAR_IDX (0) +#define IGBVF_MSIX_BAR_IDX (3) + +#define IGBVF_MMIO_SIZE (16 * 1024) +#define IGBVF_MSIX_SIZE (16 * 1024) + #define defreg(x) x = (E1000_##x >> 2) #define defreg_indexed(x, i) x##i = (E1000_##x(i) >> 2) #define defreg_indexeda(x, i) x##i##_A = (E1000_##x##_A(i) >> 2) @@ -43,7 +51,7 @@ defreg_indexeda(x, 0), defreg_indexeda(x, 1), \ defreg_indexeda(x, 2), defreg_indexeda(x, 3) -#define defregv(x) defreg_indexed(x, 0), defreg_indexed(x, 1), \ +#define defreg8(x) defreg_indexed(x, 0), defreg_indexed(x, 1), \ defreg_indexed(x, 2), defreg_indexed(x, 3), \ defreg_indexed(x, 4), defreg_indexed(x, 5), \ defreg_indexed(x, 6), defreg_indexed(x, 7) @@ -114,6 +122,8 @@ enum { defreg(EICS), defreg(EIMS), defreg(EIMC), defreg(EIAM), defreg(EICR), defreg(IVAR_MISC), defreg(GPIE), + defreg(TSYNCRXCFG), defreg8(ETQF), + defreg(RXPBS), defregd(RDBAL), defregd(RDBAH), defregd(RDLEN), defregd(SRRCTL), defregd(RDH), defregd(RDT), defregd(RXDCTL), defregd(RXCTL), defregd(RQDPC), defreg(RA2), @@ -125,15 +135,15 @@ enum { defreg(VT_CTL), - defregv(P2VMAILBOX), defregv(V2PMAILBOX), defreg(MBVFICR), defreg(MBVFIMR), + defreg8(P2VMAILBOX), defreg8(V2PMAILBOX), defreg(MBVFICR), defreg(MBVFIMR), defreg(VFLRE), defreg(VFRE), defreg(VFTE), defreg(WVBR), defreg(QDE), defreg(DTXSWC), defreg_indexed(VLVF, 0), - defregv(VMOLR), defreg(RPLOLR), defregv(VMBMEM), defregv(VMVIR), + defreg8(VMOLR), defreg(RPLOLR), defreg8(VMBMEM), defreg8(VMVIR), - defregv(PVTCTRL), defregv(PVTEICS), defregv(PVTEIMS), defregv(PVTEIMC), - defregv(PVTEIAC), defregv(PVTEIAM), defregv(PVTEICR), defregv(PVFGPRC), - defregv(PVFGPTC), defregv(PVFGORC), defregv(PVFGOTC), defregv(PVFMPRC), - defregv(PVFGPRLBC), defregv(PVFGPTLBC), defregv(PVFGORLBC), defregv(PVFGOTLBC), + defreg8(PVTCTRL), defreg8(PVTEICS), defreg8(PVTEIMS), defreg8(PVTEIMC), + defreg8(PVTEIAC), defreg8(PVTEIAM), defreg8(PVTEICR), defreg8(PVFGPRC), + defreg8(PVFGPTC), defreg8(PVFGORC), defreg8(PVFGOTC), defreg8(PVFMPRC), + defreg8(PVFGPRLBC), defreg8(PVFGPTLBC), defreg8(PVFGORLBC), defreg8(PVFGOTLBC), defreg(MTA_A), diff --git a/hw/net/igb_core.c b/hw/net/igb_core.c index d733fed6cf..d00b1caa6a 100644 --- a/hw/net/igb_core.c +++ b/hw/net/igb_core.c @@ -67,14 +67,34 @@ typedef struct IGBTxPktVmdqCallbackContext { NetClientState *nc; } IGBTxPktVmdqCallbackContext; +typedef struct L2Header { + struct eth_header eth; + struct vlan_header vlan[2]; +} L2Header; + +typedef struct PTP2 { + uint8_t message_id_transport_specific; + uint8_t version_ptp; + uint16_t message_length; + uint8_t subdomain_number; + uint8_t reserved0; + uint16_t flags; + uint64_t correction; + uint8_t reserved1[5]; + uint8_t source_communication_technology; + uint32_t source_uuid_lo; + uint16_t source_uuid_hi; + uint16_t source_port_id; + uint16_t sequence_id; + uint8_t control; + uint8_t log_message_period; +} PTP2; + static ssize_t igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt, bool has_vnet, bool *external_tx); -static inline void -igb_set_interrupt_cause(IGBCore *core, uint32_t val); - -static void igb_update_interrupt_state(IGBCore *core); +static void igb_raise_interrupts(IGBCore *core, size_t index, uint32_t causes); static void igb_reset(IGBCore *core, bool sw); static inline void @@ -92,23 +112,31 @@ igb_lower_legacy_irq(IGBCore *core) pci_set_irq(core->owner, 0); } -static void igb_msix_notify(IGBCore *core, unsigned int vector) +static void igb_msix_notify(IGBCore *core, unsigned int cause) { PCIDevice *dev = core->owner; uint16_t vfn; + uint32_t effective_eiac; + unsigned int vector; - vfn = 8 - (vector + 2) / IGBVF_MSIX_VEC_NUM; + vfn = 8 - (cause + 2) / IGBVF_MSIX_VEC_NUM; if (vfn < pcie_sriov_num_vfs(core->owner)) { dev = pcie_sriov_get_vf_at_index(core->owner, vfn); assert(dev); - vector = (vector + 2) % IGBVF_MSIX_VEC_NUM; - } else if (vector >= IGB_MSIX_VEC_NUM) { + vector = (cause + 2) % IGBVF_MSIX_VEC_NUM; + } else if (cause >= IGB_MSIX_VEC_NUM) { qemu_log_mask(LOG_GUEST_ERROR, "igb: Tried to use vector unavailable for PF"); return; + } else { + vector = cause; } msix_notify(dev, vector); + + trace_e1000e_irq_icr_clear_eiac(core->mac[EICR], core->mac[EIAC]); + effective_eiac = core->mac[EIAC] & BIT(cause); + core->mac[EICR] &= ~effective_eiac; } static inline void @@ -274,6 +302,11 @@ igb_rss_get_hash_type(IGBCore *core, struct NetRxPkt *pkt) return E1000_MRQ_RSS_TYPE_IPV4TCP; } + if (l4hdr_proto == ETH_L4_HDR_PROTO_UDP && + (core->mac[MRQC] & E1000_MRQC_RSS_FIELD_IPV4_UDP)) { + return E1000_MRQ_RSS_TYPE_IPV4UDP; + } + if (E1000_MRQC_EN_IPV4(core->mac[MRQC])) { return E1000_MRQ_RSS_TYPE_IPV4; } @@ -296,7 +329,7 @@ igb_rss_get_hash_type(IGBCore *core, struct NetRxPkt *pkt) ip6info->rss_ex_dst_valid, ip6info->rss_ex_src_valid, core->mac[MRQC], - E1000_MRQC_EN_TCPIPV6(core->mac[MRQC]), + E1000_MRQC_EN_TCPIPV6EX(core->mac[MRQC]), E1000_MRQC_EN_IPV6EX(core->mac[MRQC]), E1000_MRQC_EN_IPV6(core->mac[MRQC])); @@ -305,8 +338,13 @@ igb_rss_get_hash_type(IGBCore *core, struct NetRxPkt *pkt) ip6info->rss_ex_src_valid))) { if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP && - E1000_MRQC_EN_TCPIPV6(core->mac[MRQC])) { - return E1000_MRQ_RSS_TYPE_IPV6TCP; + E1000_MRQC_EN_TCPIPV6EX(core->mac[MRQC])) { + return E1000_MRQ_RSS_TYPE_IPV6TCPEX; + } + + if (l4hdr_proto == ETH_L4_HDR_PROTO_UDP && + (core->mac[MRQC] & E1000_MRQC_RSS_FIELD_IPV6_UDP)) { + return E1000_MRQ_RSS_TYPE_IPV6UDP; } if (E1000_MRQC_EN_IPV6EX(core->mac[MRQC])) { @@ -338,7 +376,7 @@ igb_rss_calc_hash(IGBCore *core, struct NetRxPkt *pkt, E1000E_RSSInfo *info) case E1000_MRQ_RSS_TYPE_IPV4TCP: type = NetPktRssIpV4Tcp; break; - case E1000_MRQ_RSS_TYPE_IPV6TCP: + case E1000_MRQ_RSS_TYPE_IPV6TCPEX: type = NetPktRssIpV6TcpEx; break; case E1000_MRQ_RSS_TYPE_IPV6: @@ -347,6 +385,12 @@ igb_rss_calc_hash(IGBCore *core, struct NetRxPkt *pkt, E1000E_RSSInfo *info) case E1000_MRQ_RSS_TYPE_IPV6EX: type = NetPktRssIpV6Ex; break; + case E1000_MRQ_RSS_TYPE_IPV4UDP: + type = NetPktRssIpV4Udp; + break; + case E1000_MRQ_RSS_TYPE_IPV6UDP: + type = NetPktRssIpV6Udp; + break; default: assert(false); return 0; @@ -402,7 +446,7 @@ igb_tx_insert_vlan(IGBCore *core, uint16_t qn, struct igb_tx *tx, } } - if (insert_vlan && e1000x_vlan_enabled(core->mac)) { + if (insert_vlan) { net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt, vlan, core->mac[VET] & 0xffff); } @@ -411,9 +455,10 @@ igb_tx_insert_vlan(IGBCore *core, uint16_t qn, struct igb_tx *tx, static bool igb_setup_tx_offloads(IGBCore *core, struct igb_tx *tx) { + uint32_t idx = (tx->first_olinfo_status >> 4) & 1; + if (tx->first_cmd_type_len & E1000_ADVTXD_DCMD_TSE) { - uint32_t idx = (tx->first_olinfo_status >> 4) & 1; - uint32_t mss = tx->ctx[idx].mss_l4len_idx >> 16; + uint32_t mss = tx->ctx[idx].mss_l4len_idx >> E1000_ADVTXD_MSS_SHIFT; if (!net_tx_pkt_build_vheader(tx->tx_pkt, true, true, mss)) { return false; } @@ -423,10 +468,11 @@ igb_setup_tx_offloads(IGBCore *core, struct igb_tx *tx) return true; } - if (tx->first_olinfo_status & E1000_ADVTXD_POTS_TXSM) { - if (!net_tx_pkt_build_vheader(tx->tx_pkt, false, true, 0)) { - return false; - } + if ((tx->first_olinfo_status & E1000_ADVTXD_POTS_TXSM) && + !((tx->ctx[idx].type_tucmd_mlhl & E1000_ADVTXD_TUCMD_L4T_SCTP) ? + net_tx_pkt_update_sctp_checksum(tx->tx_pkt) : + net_tx_pkt_build_vheader(tx->tx_pkt, false, true, 0))) { + return false; } if (tx->first_olinfo_status & E1000_ADVTXD_POTS_IXSM) { @@ -538,9 +584,8 @@ igb_on_tx_done_update_stats(IGBCore *core, struct NetTxPkt *tx_pkt, int qn) g_assert_not_reached(); } - core->mac[GPTC] = core->mac[TPT]; - core->mac[GOTCL] = core->mac[TOTL]; - core->mac[GOTCH] = core->mac[TOTH]; + e1000x_inc_reg_if_not_full(core->mac, GPTC); + e1000x_grow_8reg_if_not_full(core->mac, GOTCL, tot_len); if (core->mac[MRQC] & 1) { uint16_t pool = qn % IGB_NUM_VM_POOLS; @@ -598,7 +643,8 @@ igb_process_tx_desc(IGBCore *core, length = cmd_type_len & 0xFFFF; if (!tx->skip_cp) { - if (!net_tx_pkt_add_raw_fragment(tx->tx_pkt, buffer_addr, length)) { + if (!net_tx_pkt_add_raw_fragment_pci(tx->tx_pkt, dev, + buffer_addr, length)) { tx->skip_cp = true; } } @@ -607,8 +653,15 @@ igb_process_tx_desc(IGBCore *core, if (!tx->skip_cp && net_tx_pkt_parse(tx->tx_pkt)) { idx = (tx->first_olinfo_status >> 4) & 1; igb_tx_insert_vlan(core, queue_index, tx, - tx->ctx[idx].vlan_macip_lens >> 16, - !!(cmd_type_len & E1000_TXD_CMD_VLE)); + tx->ctx[idx].vlan_macip_lens >> IGB_TX_FLAGS_VLAN_SHIFT, + !!(tx->first_cmd_type_len & E1000_TXD_CMD_VLE)); + + if ((tx->first_cmd_type_len & E1000_ADVTXD_MAC_TSTAMP) && + (core->mac[TSYNCTXCTL] & E1000_TSYNCTXCTL_ENABLED) && + !(core->mac[TSYNCTXCTL] & E1000_TSYNCTXCTL_VALID)) { + core->mac[TSYNCTXCTL] |= E1000_TSYNCTXCTL_VALID; + e1000x_timestamp(core->mac, core->timadj, TXSTMPL, TXSTMPH); + } if (igb_tx_pkt_send(core, tx, queue_index)) { igb_on_tx_done_update_stats(core, tx->tx_pkt, queue_index); @@ -617,7 +670,7 @@ igb_process_tx_desc(IGBCore *core, tx->first = true; tx->skip_cp = false; - net_tx_pkt_reset(tx->tx_pkt, dev); + net_tx_pkt_reset(tx->tx_pkt, net_tx_pkt_unmap_frag_pci, dev); } } @@ -843,8 +896,6 @@ igb_start_xmit(IGBCore *core, const IGB_TxRing *txr) d = core->owner; } - net_tx_pkt_reset(txr->tx->tx_pkt, d); - while (!igb_ring_empty(core, txi)) { base = igb_ring_head_descr(core, txi); @@ -859,9 +910,11 @@ igb_start_xmit(IGBCore *core, const IGB_TxRing *txr) } if (eic) { - core->mac[EICR] |= eic; - igb_set_interrupt_cause(core, E1000_ICR_TXDW); + igb_raise_interrupts(core, EICR, eic); + igb_raise_interrupts(core, ICR, E1000_ICR_TXDW); } + + net_tx_pkt_reset(txr->tx->tx_pkt, net_tx_pkt_unmap_frag_pci, d); } static uint32_t @@ -949,49 +1002,83 @@ igb_rx_l4_cso_enabled(IGBCore *core) return !!(core->mac[RXCSUM] & E1000_RXCSUM_TUOFLD); } -static bool -igb_rx_is_oversized(IGBCore *core, uint16_t qn, size_t size) +static bool igb_rx_is_oversized(IGBCore *core, const struct eth_header *ehdr, + size_t size, size_t vlan_num, + bool lpe, uint16_t rlpml) { - uint16_t pool = qn % IGB_NUM_VM_POOLS; - bool lpe = !!(core->mac[VMOLR0 + pool] & E1000_VMOLR_LPE); - int max_ethernet_lpe_size = - core->mac[VMOLR0 + pool] & E1000_VMOLR_RLPML_MASK; - int max_ethernet_vlan_size = 1522; - - return size > (lpe ? max_ethernet_lpe_size : max_ethernet_vlan_size); + size_t vlan_header_size = sizeof(struct vlan_header) * vlan_num; + size_t header_size = sizeof(struct eth_header) + vlan_header_size; + return lpe ? size + ETH_FCS_LEN > rlpml : size > header_size + ETH_MTU; } -static uint16_t igb_receive_assign(IGBCore *core, const struct eth_header *ehdr, - size_t size, E1000E_RSSInfo *rss_info, - bool *external_tx) +static uint16_t igb_receive_assign(IGBCore *core, const struct iovec *iov, + size_t iovcnt, size_t iov_ofs, + const L2Header *l2_header, size_t size, + E1000E_RSSInfo *rss_info, + uint16_t *etqf, bool *ts, bool *external_tx) { static const int ta_shift[] = { 4, 3, 2, 0 }; + const struct eth_header *ehdr = &l2_header->eth; uint32_t f, ra[2], *macp, rctl = core->mac[RCTL]; uint16_t queues = 0; uint16_t oversized = 0; - uint16_t vid = lduw_be_p(&PKT_GET_VLAN_HDR(ehdr)->h_tci) & VLAN_VID_MASK; - bool accepted = false; + size_t vlan_num = 0; + PTP2 ptp2; + bool lpe; + uint16_t rlpml; int i; memset(rss_info, 0, sizeof(E1000E_RSSInfo)); + *ts = false; if (external_tx) { *external_tx = true; } - if (e1000x_is_vlan_packet(ehdr, core->mac[VET] & 0xffff) && - e1000x_vlan_rx_filter_enabled(core->mac)) { - uint32_t vfta = - ldl_le_p((uint32_t *)(core->mac + VFTA) + - ((vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK)); - if ((vfta & (1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK))) == 0) { - trace_e1000e_rx_flt_vlan_mismatch(vid); - return queues; - } else { - trace_e1000e_rx_flt_vlan_match(vid); + if (core->mac[CTRL_EXT] & BIT(26)) { + if (be16_to_cpu(ehdr->h_proto) == core->mac[VET] >> 16 && + be16_to_cpu(l2_header->vlan[0].h_proto) == (core->mac[VET] & 0xffff)) { + vlan_num = 2; + } + } else { + if (be16_to_cpu(ehdr->h_proto) == (core->mac[VET] & 0xffff)) { + vlan_num = 1; } } + lpe = !!(core->mac[RCTL] & E1000_RCTL_LPE); + rlpml = core->mac[RLPML]; + if (!(core->mac[RCTL] & E1000_RCTL_SBP) && + igb_rx_is_oversized(core, ehdr, size, vlan_num, lpe, rlpml)) { + trace_e1000x_rx_oversized(size); + return queues; + } + + for (*etqf = 0; *etqf < 8; (*etqf)++) { + if ((core->mac[ETQF0 + *etqf] & E1000_ETQF_FILTER_ENABLE) && + be16_to_cpu(ehdr->h_proto) == (core->mac[ETQF0 + *etqf] & E1000_ETQF_ETYPE_MASK)) { + if ((core->mac[ETQF0 + *etqf] & E1000_ETQF_1588) && + (core->mac[TSYNCRXCTL] & E1000_TSYNCRXCTL_ENABLED) && + !(core->mac[TSYNCRXCTL] & E1000_TSYNCRXCTL_VALID) && + iov_to_buf(iov, iovcnt, iov_ofs + ETH_HLEN, &ptp2, sizeof(ptp2)) >= sizeof(ptp2) && + (ptp2.version_ptp & 15) == 2 && + ptp2.message_id_transport_specific == ((core->mac[TSYNCRXCFG] >> 8) & 255)) { + e1000x_timestamp(core->mac, core->timadj, RXSTMPL, RXSTMPH); + *ts = true; + core->mac[TSYNCRXCTL] |= E1000_TSYNCRXCTL_VALID; + core->mac[RXSATRL] = le32_to_cpu(ptp2.source_uuid_lo); + core->mac[RXSATRH] = le16_to_cpu(ptp2.source_uuid_hi) | + (le16_to_cpu(ptp2.sequence_id) << 16); + } + break; + } + } + + if (vlan_num && + !e1000x_rx_vlan_filter(core->mac, l2_header->vlan + vlan_num - 1)) { + return queues; + } + if (core->mac[MRQC] & 1) { if (is_broadcast_ether_addr(ehdr->h_dest)) { for (i = 0; i < IGB_NUM_VM_POOLS; i++) { @@ -1042,7 +1129,9 @@ static uint16_t igb_receive_assign(IGBCore *core, const struct eth_header *ehdr, if (e1000x_vlan_rx_filter_enabled(core->mac)) { uint16_t mask = 0; - if (e1000x_is_vlan_packet(ehdr, core->mac[VET] & 0xffff)) { + if (vlan_num) { + uint16_t vid = be16_to_cpu(l2_header->vlan[vlan_num - 1].h_tci) & VLAN_VID_MASK; + for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { if ((core->mac[VLVF0 + i] & E1000_VLVF_VLANID_MASK) == vid && (core->mac[VLVF0 + i] & E1000_VLVF_VLANID_ENABLE)) { @@ -1070,7 +1159,11 @@ static uint16_t igb_receive_assign(IGBCore *core, const struct eth_header *ehdr, queues &= core->mac[VFRE]; if (queues) { for (i = 0; i < IGB_NUM_VM_POOLS; i++) { - if ((queues & BIT(i)) && igb_rx_is_oversized(core, i, size)) { + lpe = !!(core->mac[VMOLR0 + i] & E1000_VMOLR_LPE); + rlpml = core->mac[VMOLR0 + i] & E1000_VMOLR_RLPML_MASK; + if ((queues & BIT(i)) && + igb_rx_is_oversized(core, ehdr, size, vlan_num, + lpe, rlpml)) { oversized |= BIT(i); } } @@ -1097,33 +1190,7 @@ static uint16_t igb_receive_assign(IGBCore *core, const struct eth_header *ehdr, } } } else { - switch (net_rx_pkt_get_packet_type(core->rx_pkt)) { - case ETH_PKT_UCAST: - if (rctl & E1000_RCTL_UPE) { - accepted = true; /* promiscuous ucast */ - } - break; - - case ETH_PKT_BCAST: - if (rctl & E1000_RCTL_BAM) { - accepted = true; /* broadcast enabled */ - } - break; - - case ETH_PKT_MCAST: - if (rctl & E1000_RCTL_MPE) { - accepted = true; /* promiscuous mcast */ - } - break; - - default: - g_assert_not_reached(); - } - - if (!accepted) { - accepted = e1000x_rx_group_filter(core->mac, ehdr->h_dest); - } - + bool accepted = e1000x_rx_group_filter(core->mac, ehdr); if (!accepted) { for (macp = core->mac + RA2; macp < core->mac + RA2 + 16; macp += 2) { if (!(macp[1] & E1000_RAH_AV)) { @@ -1217,7 +1284,7 @@ static void igb_build_rx_metadata(IGBCore *core, struct NetRxPkt *pkt, bool is_eop, - const E1000E_RSSInfo *rss_info, + const E1000E_RSSInfo *rss_info, uint16_t etqf, bool ts, uint16_t *pkt_info, uint16_t *hdr_info, uint32_t *rss, uint32_t *status_flags, @@ -1225,9 +1292,8 @@ igb_build_rx_metadata(IGBCore *core, uint16_t *vlan_tag) { struct virtio_net_hdr *vhdr; - bool hasip4, hasip6; + bool hasip4, hasip6, csum_valid; EthL4HdrProto l4hdr_proto; - uint32_t pkt_type; *status_flags = E1000_RXD_STAT_DD; @@ -1266,34 +1332,47 @@ igb_build_rx_metadata(IGBCore *core, trace_e1000e_rx_metadata_ack(); } - if (hasip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_DIS)) { - trace_e1000e_rx_metadata_ipv6_filtering_disabled(); - pkt_type = E1000_RXD_PKT_MAC; - } else if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP || - l4hdr_proto == ETH_L4_HDR_PROTO_UDP) { - pkt_type = hasip4 ? E1000_RXD_PKT_IP4_XDP : E1000_RXD_PKT_IP6_XDP; - } else if (hasip4 || hasip6) { - pkt_type = hasip4 ? E1000_RXD_PKT_IP4 : E1000_RXD_PKT_IP6; - } else { - pkt_type = E1000_RXD_PKT_MAC; - } + if (pkt_info) { + *pkt_info = rss_info->enabled ? rss_info->type : 0; - trace_e1000e_rx_metadata_pkt_type(pkt_type); + if (etqf < 8) { + *pkt_info |= (BIT(11) | etqf) << 4; + } else { + if (hasip4) { + *pkt_info |= E1000_ADVRXD_PKT_IP4; + } - if (pkt_info) { - if (rss_info->enabled) { - *pkt_info = rss_info->type; - } + if (hasip6) { + *pkt_info |= E1000_ADVRXD_PKT_IP6; + } - *pkt_info |= (pkt_type << 4); - } else { - *status_flags |= E1000_RXD_PKT_TYPE(pkt_type); + switch (l4hdr_proto) { + case ETH_L4_HDR_PROTO_TCP: + *pkt_info |= E1000_ADVRXD_PKT_TCP; + break; + + case ETH_L4_HDR_PROTO_UDP: + *pkt_info |= E1000_ADVRXD_PKT_UDP; + break; + + case ETH_L4_HDR_PROTO_SCTP: + *pkt_info |= E1000_ADVRXD_PKT_SCTP; + break; + + default: + break; + } + } } if (hdr_info) { *hdr_info = 0; } + if (ts) { + *status_flags |= BIT(16); + } + /* RX CSO information */ if (hasip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_XSUM_DIS)) { trace_e1000e_rx_metadata_ipv6_sum_disabled(); @@ -1317,6 +1396,15 @@ igb_build_rx_metadata(IGBCore *core, if (igb_rx_l4_cso_enabled(core)) { switch (l4hdr_proto) { + case ETH_L4_HDR_PROTO_SCTP: + if (!net_rx_pkt_validate_l4_csum(pkt, &csum_valid)) { + trace_e1000e_rx_metadata_l4_csum_validation_failed(); + goto func_exit; + } + if (!csum_valid) { + *status_flags |= E1000_RXDEXT_STATERR_TCPE; + } + /* fall through */ case ETH_L4_HDR_PROTO_TCP: *status_flags |= E1000_RXD_STAT_TCPCS; break; @@ -1326,22 +1414,21 @@ igb_build_rx_metadata(IGBCore *core, break; default: - goto func_exit; + break; } } else { trace_e1000e_rx_metadata_l4_cso_disabled(); } - trace_e1000e_rx_metadata_status_flags(*status_flags); - func_exit: + trace_e1000e_rx_metadata_status_flags(*status_flags); *status_flags = cpu_to_le32(*status_flags); } static inline void igb_write_lgcy_rx_descr(IGBCore *core, struct e1000_rx_desc *desc, struct NetRxPkt *pkt, - const E1000E_RSSInfo *rss_info, + const E1000E_RSSInfo *rss_info, uint16_t etqf, bool ts, uint16_t length) { uint32_t status_flags, rss; @@ -1352,7 +1439,7 @@ igb_write_lgcy_rx_descr(IGBCore *core, struct e1000_rx_desc *desc, desc->csum = 0; igb_build_rx_metadata(core, pkt, pkt != NULL, - rss_info, + rss_info, etqf, ts, NULL, NULL, &rss, &status_flags, &ip_id, &desc->special); @@ -1363,7 +1450,7 @@ igb_write_lgcy_rx_descr(IGBCore *core, struct e1000_rx_desc *desc, static inline void igb_write_adv_rx_descr(IGBCore *core, union e1000_adv_rx_desc *desc, struct NetRxPkt *pkt, - const E1000E_RSSInfo *rss_info, + const E1000E_RSSInfo *rss_info, uint16_t etqf, bool ts, uint16_t length) { memset(&desc->wb, 0, sizeof(desc->wb)); @@ -1371,7 +1458,7 @@ igb_write_adv_rx_descr(IGBCore *core, union e1000_adv_rx_desc *desc, desc->wb.upper.length = cpu_to_le16(length); igb_build_rx_metadata(core, pkt, pkt != NULL, - rss_info, + rss_info, etqf, ts, &desc->wb.lower.lo_dword.pkt_info, &desc->wb.lower.lo_dword.hdr_info, &desc->wb.lower.hi_dword.rss, @@ -1382,12 +1469,15 @@ igb_write_adv_rx_descr(IGBCore *core, union e1000_adv_rx_desc *desc, static inline void igb_write_rx_descr(IGBCore *core, union e1000_rx_desc_union *desc, -struct NetRxPkt *pkt, const E1000E_RSSInfo *rss_info, uint16_t length) + struct NetRxPkt *pkt, const E1000E_RSSInfo *rss_info, + uint16_t etqf, bool ts, uint16_t length) { if (igb_rx_use_legacy_descriptor(core)) { - igb_write_lgcy_rx_descr(core, &desc->legacy, pkt, rss_info, length); + igb_write_lgcy_rx_descr(core, &desc->legacy, pkt, rss_info, + etqf, ts, length); } else { - igb_write_adv_rx_descr(core, &desc->adv, pkt, rss_info, length); + igb_write_adv_rx_descr(core, &desc->adv, pkt, rss_info, + etqf, ts, length); } } @@ -1438,29 +1528,17 @@ igb_write_to_rx_buffers(IGBCore *core, static void igb_update_rx_stats(IGBCore *core, const E1000E_RingInfo *rxi, - size_t data_size, size_t data_fcs_size) + size_t pkt_size, size_t pkt_fcs_size) { - e1000x_update_rx_total_stats(core->mac, data_size, data_fcs_size); - - switch (net_rx_pkt_get_packet_type(core->rx_pkt)) { - case ETH_PKT_BCAST: - e1000x_inc_reg_if_not_full(core->mac, BPRC); - break; - - case ETH_PKT_MCAST: - e1000x_inc_reg_if_not_full(core->mac, MPRC); - break; - - default: - break; - } + eth_pkt_types_e pkt_type = net_rx_pkt_get_packet_type(core->rx_pkt); + e1000x_update_rx_total_stats(core->mac, pkt_type, pkt_size, pkt_fcs_size); if (core->mac[MRQC] & 1) { uint16_t pool = rxi->idx % IGB_NUM_VM_POOLS; - core->mac[PVFGORC0 + (pool * 64)] += data_size + 4; + core->mac[PVFGORC0 + (pool * 64)] += pkt_size + 4; core->mac[PVFGPRC0 + (pool * 64)]++; - if (net_rx_pkt_get_packet_type(core->rx_pkt) == ETH_PKT_MCAST) { + if (pkt_type == ETH_PKT_MCAST) { core->mac[PVFMPRC0 + (pool * 64)]++; } } @@ -1476,7 +1554,8 @@ igb_rx_descr_threshold_hit(IGBCore *core, const E1000E_RingInfo *rxi) static void igb_write_packet_to_guest(IGBCore *core, struct NetRxPkt *pkt, const E1000E_RxRing *rxr, - const E1000E_RSSInfo *rss_info) + const E1000E_RSSInfo *rss_info, + uint16_t etqf, bool ts) { PCIDevice *d; dma_addr_t base; @@ -1558,7 +1637,7 @@ igb_write_packet_to_guest(IGBCore *core, struct NetRxPkt *pkt, } igb_write_rx_descr(core, &desc, is_last ? core->rx_pkt : NULL, - rss_info, written); + rss_info, etqf, ts, written); igb_pci_dma_write_rx_desc(core, d, base, &desc, core->rx_desc_len); igb_ring_advance(core, rxi, core->rx_desc_len / E1000_MIN_RX_DESC_LEN); @@ -1602,19 +1681,22 @@ static ssize_t igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt, bool has_vnet, bool *external_tx) { - static const int maximum_ethernet_hdr_len = (ETH_HLEN + 4); - uint16_t queues = 0; - uint32_t n = 0; - uint8_t min_buf[ETH_ZLEN]; + uint32_t causes = 0; + uint32_t ecauses = 0; + union { + L2Header l2_header; + uint8_t octets[ETH_ZLEN]; + } buf; struct iovec min_iov; - struct eth_header *ehdr; - uint8_t *filter_buf; size_t size, orig_size; size_t iov_ofs = 0; E1000E_RxRing rxr; E1000E_RSSInfo rss_info; + uint16_t etqf; + bool ts; size_t total_size; + int strip_vlan_index; int i; trace_e1000e_rx_receive_iov(iovcnt); @@ -1635,36 +1717,30 @@ igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt, net_rx_pkt_unset_vhdr(core->rx_pkt); } - filter_buf = iov->iov_base + iov_ofs; orig_size = iov_size(iov, iovcnt); size = orig_size - iov_ofs; /* Pad to minimum Ethernet frame length */ - if (size < sizeof(min_buf)) { - iov_to_buf(iov, iovcnt, iov_ofs, min_buf, size); - memset(&min_buf[size], 0, sizeof(min_buf) - size); + if (size < sizeof(buf)) { + iov_to_buf(iov, iovcnt, iov_ofs, &buf, size); + memset(&buf.octets[size], 0, sizeof(buf) - size); e1000x_inc_reg_if_not_full(core->mac, RUC); - min_iov.iov_base = filter_buf = min_buf; - min_iov.iov_len = size = sizeof(min_buf); + min_iov.iov_base = &buf; + min_iov.iov_len = size = sizeof(buf); iovcnt = 1; iov = &min_iov; iov_ofs = 0; - } else if (iov->iov_len < maximum_ethernet_hdr_len) { - /* This is very unlikely, but may happen. */ - iov_to_buf(iov, iovcnt, iov_ofs, min_buf, maximum_ethernet_hdr_len); - filter_buf = min_buf; - } - - /* Discard oversized packets if !LPE and !SBP. */ - if (e1000x_is_oversized(core->mac, size)) { - return orig_size; + } else { + iov_to_buf(iov, iovcnt, iov_ofs, &buf, sizeof(buf.l2_header)); } - ehdr = PKT_GET_ETH_HDR(filter_buf); - net_rx_pkt_set_packet_type(core->rx_pkt, get_eth_packet_type(ehdr)); - net_rx_pkt_set_protocols(core->rx_pkt, filter_buf, size); + net_rx_pkt_set_packet_type(core->rx_pkt, + get_eth_packet_type(&buf.l2_header.eth)); + net_rx_pkt_set_protocols(core->rx_pkt, iov, iovcnt, iov_ofs); - queues = igb_receive_assign(core, ehdr, size, &rss_info, external_tx); + queues = igb_receive_assign(core, iov, iovcnt, iov_ofs, + &buf.l2_header, size, + &rss_info, &etqf, &ts, external_tx); if (!queues) { trace_e1000e_rx_flt_dropped(); return orig_size; @@ -1678,36 +1754,46 @@ igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt, igb_rx_ring_init(core, &rxr, i); + if (!igb_rx_strip_vlan(core, rxr.i)) { + strip_vlan_index = -1; + } else if (core->mac[CTRL_EXT] & BIT(26)) { + strip_vlan_index = 1; + } else { + strip_vlan_index = 0; + } + net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs, - igb_rx_strip_vlan(core, rxr.i), - core->mac[VET] & 0xffff); + strip_vlan_index, + core->mac[VET] & 0xffff, + core->mac[VET] >> 16); total_size = net_rx_pkt_get_total_len(core->rx_pkt) + e1000x_fcs_len(core->mac); if (!igb_has_rxbufs(core, rxr.i, total_size)) { - n |= E1000_ICS_RXO; + causes |= E1000_ICS_RXO; trace_e1000e_rx_not_written_to_guest(rxr.i->idx); continue; } - n |= E1000_ICR_RXDW; + causes |= E1000_ICR_RXDW; igb_rx_fix_l4_csum(core, core->rx_pkt); - igb_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info); + igb_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info, etqf, ts); /* Check if receive descriptor minimum threshold hit */ if (igb_rx_descr_threshold_hit(core, rxr.i)) { - n |= E1000_ICS_RXDMT0; + causes |= E1000_ICS_RXDMT0; } - core->mac[EICR] |= igb_rx_wb_eic(core, rxr.i->idx); + ecauses |= igb_rx_wb_eic(core, rxr.i->idx); trace_e1000e_rx_written_to_guest(rxr.i->idx); } - trace_e1000e_rx_interrupt_set(n); - igb_set_interrupt_cause(core, n); + trace_e1000e_rx_interrupt_set(causes); + igb_raise_interrupts(core, EICR, ecauses); + igb_raise_interrupts(core, ICR, causes); return orig_size; } @@ -1767,7 +1853,7 @@ void igb_core_set_link_status(IGBCore *core) } if (core->mac[STATUS] != old_status) { - igb_set_interrupt_cause(core, E1000_ICR_LSC); + igb_raise_interrupts(core, ICR, E1000_ICR_LSC); } } @@ -1847,13 +1933,6 @@ igb_set_rx_control(IGBCore *core, int index, uint32_t val) } } -static inline void -igb_clear_ims_bits(IGBCore *core, uint32_t bits) -{ - trace_e1000e_irq_clear_ims(bits, core->mac[IMS], core->mac[IMS] & ~bits); - core->mac[IMS] &= ~bits; -} - static inline bool igb_postpone_interrupt(IGBIntrDelayTimer *timer) { @@ -1876,10 +1955,8 @@ igb_eitr_should_postpone(IGBCore *core, int idx) return igb_postpone_interrupt(&core->eitr[idx]); } -static void igb_send_msix(IGBCore *core) +static void igb_send_msix(IGBCore *core, uint32_t causes) { - uint32_t causes = core->mac[EICR] & core->mac[EIMS]; - uint32_t effective_eiac; int vector; for (vector = 0; vector < IGB_INTR_NUM; ++vector) { @@ -1887,10 +1964,6 @@ static void igb_send_msix(IGBCore *core) trace_e1000e_irq_msix_notify_vec(vector); igb_msix_notify(core, vector); - - trace_e1000e_irq_icr_clear_eiac(core->mac[EICR], core->mac[EIAC]); - effective_eiac = core->mac[EIAC] & BIT(vector); - core->mac[EICR] &= ~effective_eiac; } } } @@ -1906,119 +1979,116 @@ igb_fix_icr_asserted(IGBCore *core) trace_e1000e_irq_fix_icr_asserted(core->mac[ICR]); } -static void -igb_update_interrupt_state(IGBCore *core) +static void igb_raise_interrupts(IGBCore *core, size_t index, uint32_t causes) { - uint32_t icr; - uint32_t causes; + uint32_t old_causes = core->mac[ICR] & core->mac[IMS]; + uint32_t old_ecauses = core->mac[EICR] & core->mac[EIMS]; + uint32_t raised_causes; + uint32_t raised_ecauses; uint32_t int_alloc; - icr = core->mac[ICR] & core->mac[IMS]; + trace_e1000e_irq_set(index << 2, + core->mac[index], core->mac[index] | causes); - if (msix_enabled(core->owner)) { - if (icr) { - causes = 0; - if (icr & E1000_ICR_DRSTA) { - int_alloc = core->mac[IVAR_MISC] & 0xff; - if (int_alloc & E1000_IVAR_VALID) { - causes |= BIT(int_alloc & 0x1f); - } + core->mac[index] |= causes; + + if (core->mac[GPIE] & E1000_GPIE_MSIX_MODE) { + raised_causes = core->mac[ICR] & core->mac[IMS] & ~old_causes; + + if (raised_causes & E1000_ICR_DRSTA) { + int_alloc = core->mac[IVAR_MISC] & 0xff; + if (int_alloc & E1000_IVAR_VALID) { + core->mac[EICR] |= BIT(int_alloc & 0x1f); } - /* Check if other bits (excluding the TCP Timer) are enabled. */ - if (icr & ~E1000_ICR_DRSTA) { - int_alloc = (core->mac[IVAR_MISC] >> 8) & 0xff; - if (int_alloc & E1000_IVAR_VALID) { - causes |= BIT(int_alloc & 0x1f); - } - trace_e1000e_irq_add_msi_other(core->mac[EICR]); + } + /* Check if other bits (excluding the TCP Timer) are enabled. */ + if (raised_causes & ~E1000_ICR_DRSTA) { + int_alloc = (core->mac[IVAR_MISC] >> 8) & 0xff; + if (int_alloc & E1000_IVAR_VALID) { + core->mac[EICR] |= BIT(int_alloc & 0x1f); } - core->mac[EICR] |= causes; } - if ((core->mac[EICR] & core->mac[EIMS])) { - igb_send_msix(core); + raised_ecauses = core->mac[EICR] & core->mac[EIMS] & ~old_ecauses; + if (!raised_ecauses) { + return; } + + igb_send_msix(core, raised_ecauses); } else { igb_fix_icr_asserted(core); - if (icr) { - core->mac[EICR] |= (icr & E1000_ICR_DRSTA) | E1000_EICR_OTHER; - } else { - core->mac[EICR] &= ~E1000_EICR_OTHER; + raised_causes = core->mac[ICR] & core->mac[IMS] & ~old_causes; + if (!raised_causes) { + return; } - trace_e1000e_irq_pending_interrupts(core->mac[ICR] & core->mac[IMS], - core->mac[ICR], core->mac[IMS]); + core->mac[EICR] |= (raised_causes & E1000_ICR_DRSTA) | E1000_EICR_OTHER; - if (msi_enabled(core->owner)) { - if (icr) { - msi_notify(core->owner, 0); - } + if (msix_enabled(core->owner)) { + trace_e1000e_irq_msix_notify_vec(0); + msix_notify(core->owner, 0); + } else if (msi_enabled(core->owner)) { + trace_e1000e_irq_msi_notify(raised_causes); + msi_notify(core->owner, 0); } else { - if (icr) { - igb_raise_legacy_irq(core); - } else { - igb_lower_legacy_irq(core); - } + igb_raise_legacy_irq(core); } } } -static void -igb_set_interrupt_cause(IGBCore *core, uint32_t val) +static void igb_lower_interrupts(IGBCore *core, size_t index, uint32_t causes) { - trace_e1000e_irq_set_cause_entry(val, core->mac[ICR]); + trace_e1000e_irq_clear(index << 2, + core->mac[index], core->mac[index] & ~causes); - core->mac[ICR] |= val; + core->mac[index] &= ~causes; - trace_e1000e_irq_set_cause_exit(val, core->mac[ICR]); + trace_e1000e_irq_pending_interrupts(core->mac[ICR] & core->mac[IMS], + core->mac[ICR], core->mac[IMS]); - igb_update_interrupt_state(core); + if (!(core->mac[ICR] & core->mac[IMS]) && + !(core->mac[GPIE] & E1000_GPIE_MSIX_MODE)) { + core->mac[EICR] &= ~E1000_EICR_OTHER; + + if (!msix_enabled(core->owner) && !msi_enabled(core->owner)) { + igb_lower_legacy_irq(core); + } + } } static void igb_set_eics(IGBCore *core, int index, uint32_t val) { bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE); + uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK; trace_igb_irq_write_eics(val, msix); - - core->mac[EICS] |= - val & (msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK); - - /* - * TODO: Move to igb_update_interrupt_state if EICS is modified in other - * places. - */ - core->mac[EICR] = core->mac[EICS]; - - igb_update_interrupt_state(core); + igb_raise_interrupts(core, EICR, val & mask); } static void igb_set_eims(IGBCore *core, int index, uint32_t val) { bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE); + uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK; trace_igb_irq_write_eims(val, msix); - - core->mac[EIMS] |= - val & (msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK); - - igb_update_interrupt_state(core); + igb_raise_interrupts(core, EIMS, val & mask); } static void mailbox_interrupt_to_vf(IGBCore *core, uint16_t vfn) { uint32_t ent = core->mac[VTIVAR_MISC + vfn]; + uint32_t causes; if ((ent & E1000_IVAR_VALID)) { - core->mac[EICR] |= (ent & 0x3) << (22 - vfn * IGBVF_MSIX_VEC_NUM); - igb_update_interrupt_state(core); + causes = (ent & 0x3) << (22 - vfn * IGBVF_MSIX_VEC_NUM); + igb_raise_interrupts(core, EICR, causes); } } static void mailbox_interrupt_to_pf(IGBCore *core) { - igb_set_interrupt_cause(core, E1000_ICR_VMMB); + igb_raise_interrupts(core, ICR, E1000_ICR_VMMB); } static void igb_set_pfmailbox(IGBCore *core, int index, uint32_t val) @@ -2109,13 +2179,12 @@ static void igb_w1c(IGBCore *core, int index, uint32_t val) static void igb_set_eimc(IGBCore *core, int index, uint32_t val) { bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE); + uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK; - /* Interrupts are disabled via a write to EIMC and reflected in EIMS. */ - core->mac[EIMS] &= - ~(val & (msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK)); + trace_igb_irq_write_eimc(val, msix); - trace_igb_irq_write_eimc(val, core->mac[EIMS], msix); - igb_update_interrupt_state(core); + /* Interrupts are disabled via a write to EIMC and reflected in EIMS. */ + igb_lower_interrupts(core, EIMS, val & mask); } static void igb_set_eiac(IGBCore *core, int index, uint32_t val) @@ -2155,11 +2224,10 @@ static void igb_set_eicr(IGBCore *core, int index, uint32_t val) * TODO: In IOV mode, only bit zero of this vector is available for the PF * function. */ - core->mac[EICR] &= - ~(val & (msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK)); + uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK; trace_igb_irq_write_eicr(val, msix); - igb_update_interrupt_state(core); + igb_lower_interrupts(core, EICR, val & mask); } static void igb_set_vtctrl(IGBCore *core, int index, uint32_t val) @@ -2259,7 +2327,7 @@ igb_autoneg_timer(void *opaque) igb_update_flowctl_status(core); /* signal link status change to the guest */ - igb_set_interrupt_cause(core, E1000_ICR_LSC); + igb_raise_interrupts(core, ICR, E1000_ICR_LSC); } } @@ -2332,7 +2400,7 @@ igb_set_mdic(IGBCore *core, int index, uint32_t val) core->mac[MDIC] = val | E1000_MDIC_READY; if (val & E1000_MDIC_INT_EN) { - igb_set_interrupt_cause(core, E1000_ICR_MDAC); + igb_raise_interrupts(core, ICR, E1000_ICR_MDAC); } } @@ -2440,49 +2508,39 @@ static void igb_set_ics(IGBCore *core, int index, uint32_t val) { trace_e1000e_irq_write_ics(val); - igb_set_interrupt_cause(core, val); + igb_raise_interrupts(core, ICR, val); } static void igb_set_imc(IGBCore *core, int index, uint32_t val) { trace_e1000e_irq_ims_clear_set_imc(val); - igb_clear_ims_bits(core, val); - igb_update_interrupt_state(core); + igb_lower_interrupts(core, IMS, val); } static void igb_set_ims(IGBCore *core, int index, uint32_t val) { - uint32_t valid_val = val & 0x77D4FBFD; - - trace_e1000e_irq_set_ims(val, core->mac[IMS], core->mac[IMS] | valid_val); - core->mac[IMS] |= valid_val; - igb_update_interrupt_state(core); + igb_raise_interrupts(core, IMS, val & 0x77D4FBFD); } -static void igb_commit_icr(IGBCore *core) +static void igb_nsicr(IGBCore *core) { /* - * If GPIE.NSICR = 0, then the copy of IAM to IMS will occur only if at + * If GPIE.NSICR = 0, then the clear of IMS will occur only if at * least one bit is set in the IMS and there is a true interrupt as * reflected in ICR.INTA. */ if ((core->mac[GPIE] & E1000_GPIE_NSICR) || (core->mac[IMS] && (core->mac[ICR] & E1000_ICR_INT_ASSERTED))) { - igb_set_ims(core, IMS, core->mac[IAM]); - } else { - igb_update_interrupt_state(core); + igb_lower_interrupts(core, IMS, core->mac[IAM]); } } static void igb_set_icr(IGBCore *core, int index, uint32_t val) { - uint32_t icr = core->mac[ICR] & ~val; - - trace_igb_irq_icr_write(val, core->mac[ICR], icr); - core->mac[ICR] = icr; - igb_commit_icr(core); + igb_nsicr(core); + igb_lower_interrupts(core, ICR, val); } static uint32_t @@ -2533,21 +2591,21 @@ static uint32_t igb_mac_icr_read(IGBCore *core, int index) { uint32_t ret = core->mac[ICR]; - trace_e1000e_irq_icr_read_entry(ret); if (core->mac[GPIE] & E1000_GPIE_NSICR) { trace_igb_irq_icr_clear_gpie_nsicr(); - core->mac[ICR] = 0; + igb_lower_interrupts(core, ICR, 0xffffffff); } else if (core->mac[IMS] == 0) { trace_e1000e_irq_icr_clear_zero_ims(); - core->mac[ICR] = 0; + igb_lower_interrupts(core, ICR, 0xffffffff); + } else if (core->mac[ICR] & E1000_ICR_INT_ASSERTED) { + igb_lower_interrupts(core, ICR, 0xffffffff); } else if (!msix_enabled(core->owner)) { trace_e1000e_irq_icr_clear_nonmsix_icr_read(); - core->mac[ICR] = 0; + igb_lower_interrupts(core, ICR, 0xffffffff); } - trace_e1000e_irq_icr_read_exit(core->mac[ICR]); - igb_commit_icr(core); + igb_nsicr(core); return ret; } @@ -3282,6 +3340,8 @@ static const readops igb_macreg_readops[] = { [EIAM] = igb_mac_readreg, [IVAR0 ... IVAR0 + 7] = igb_mac_readreg, igb_getreg(IVAR_MISC), + igb_getreg(TSYNCRXCFG), + [ETQF0 ... ETQF0 + 7] = igb_mac_readreg, igb_getreg(VT_CTL), [P2VMAILBOX0 ... P2VMAILBOX7] = igb_mac_readreg, [V2PMAILBOX0 ... V2PMAILBOX7] = igb_mac_vfmailbox_read, @@ -3689,6 +3749,8 @@ static const writeops igb_macreg_writeops[] = { [EIMS] = igb_set_eims, [IVAR0 ... IVAR0 + 7] = igb_mac_writereg, igb_putreg(IVAR_MISC), + igb_putreg(TSYNCRXCFG), + [ETQF0 ... ETQF0 + 7] = igb_mac_writereg, igb_putreg(VT_CTL), [P2VMAILBOX0 ... P2VMAILBOX7] = igb_set_pfmailbox, [V2PMAILBOX0 ... V2PMAILBOX7] = igb_set_vfmailbox, @@ -3955,7 +4017,7 @@ igb_core_pci_realize(IGBCore *core, core->vmstate = qemu_add_vm_change_state_handler(igb_vm_state_change, core); for (i = 0; i < IGB_NUM_QUEUES; i++) { - net_tx_pkt_init(&core->tx[i].tx_pkt, NULL, E1000E_MAX_TX_FRAGS); + net_tx_pkt_init(&core->tx[i].tx_pkt, E1000E_MAX_TX_FRAGS); } net_rx_pkt_init(&core->rx_pkt); @@ -3980,7 +4042,6 @@ igb_core_pci_uninit(IGBCore *core) qemu_del_vm_change_state_handler(core->vmstate); for (i = 0; i < IGB_NUM_QUEUES; i++) { - net_tx_pkt_reset(core->tx[i].tx_pkt, NULL); net_tx_pkt_uninit(core->tx[i].tx_pkt); } @@ -4073,54 +4134,54 @@ static const uint32_t igb_mac_reg_init[] = { [VMOLR0 ... VMOLR0 + 7] = 0x2600 | E1000_VMOLR_STRCRC, [RPLOLR] = E1000_RPLOLR_STRCRC, [RLPML] = 0x2600, - [TXCTL0] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL1] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL2] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL3] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL4] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL5] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL6] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL7] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL8] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL9] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL10] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL11] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL12] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL13] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL14] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL15] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL0] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL1] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL2] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL3] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL4] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL5] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL6] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL7] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL8] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL9] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL10] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL11] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL12] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL13] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL14] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL15] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, }; static void igb_reset(IGBCore *core, bool sw) @@ -4159,7 +4220,6 @@ static void igb_reset(IGBCore *core, bool sw) for (i = 0; i < ARRAY_SIZE(core->tx); i++) { tx = &core->tx[i]; - net_tx_pkt_reset(tx->tx_pkt, NULL); memset(tx->ctx, 0, sizeof(tx->ctx)); tx->first = true; tx->skip_cp = false; diff --git a/hw/net/igb_regs.h b/hw/net/igb_regs.h index c5c5b3c3b8..82ff195dfc 100644 --- a/hw/net/igb_regs.h +++ b/hw/net/igb_regs.h @@ -42,11 +42,6 @@ union e1000_adv_tx_desc { } wb; }; -#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ -#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ -#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor Extension (1=Adv) */ -#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP/UDP Segmentation Enable */ - #define E1000_ADVTXD_POTS_IXSM 0x00000100 /* Insert TCP/UDP Checksum */ #define E1000_ADVTXD_POTS_TXSM 0x00000200 /* Insert TCP/UDP Checksum */ @@ -151,6 +146,10 @@ union e1000_adv_rx_desc { #define IGB_82576_VF_DEV_ID 0x10CA #define IGB_I350_VF_DEV_ID 0x1520 +/* VLAN info */ +#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGB_TX_FLAGS_VLAN_SHIFT 16 + /* from igb/e1000_82575.h */ #define E1000_MRQC_ENABLE_RSS_MQ 0x00000002 @@ -160,6 +159,29 @@ union e1000_adv_rx_desc { #define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 #define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 +/* Adv Transmit Descriptor Config Masks */ +#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ +/* IPSec Encrypt Enable for ESP */ +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +/* Adv ctxt IPSec SA IDX mask */ +/* Adv ctxt IPSec ESP len mask */ + /* Additional Transmit Descriptor Control definitions */ #define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ @@ -188,6 +210,15 @@ union e1000_adv_rx_desc { #define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */ #define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ +/* ETQF register bit definitions */ +#define E1000_ETQF_FILTER_ENABLE BIT(26) +#define E1000_ETQF_1588 BIT(30) +#define E1000_ETQF_IMM_INT BIT(29) +#define E1000_ETQF_QUEUE_ENABLE BIT(31) +#define E1000_ETQF_QUEUE_SHIFT 16 +#define E1000_ETQF_QUEUE_MASK 0x00070000 +#define E1000_ETQF_ETYPE_MASK 0x0000FFFF + #define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */ #define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */ #define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ @@ -291,6 +322,9 @@ union e1000_adv_rx_desc { /* E1000_EITR_CNT_IGNR is only for 82576 and newer */ #define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ +#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */ +#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */ + /* PCI Express Control */ #define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 #define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 @@ -362,6 +396,20 @@ union e1000_adv_rx_desc { #define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ #define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ +#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ + +/* Filtering Registers */ +#define E1000_SAQF(_n) (0x5980 + 4 * (_n)) +#define E1000_DAQF(_n) (0x59A0 + 4 * (_n)) +#define E1000_SPQF(_n) (0x59C0 + 4 * (_n)) +#define E1000_FTQF(_n) (0x59E0 + 4 * (_n)) +#define E1000_SAQF0 E1000_SAQF(0) +#define E1000_DAQF0 E1000_DAQF(0) +#define E1000_SPQF0 E1000_SPQF(0) +#define E1000_FTQF0 E1000_FTQF(0) +#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ +#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ + #define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) #define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ @@ -637,10 +685,19 @@ union e1000_adv_rx_desc { #define E1000_RSS_QUEUE(reta, hash) (E1000_RETA_VAL(reta, hash) & 0x0F) +#define E1000_MRQ_RSS_TYPE_IPV4UDP 7 +#define E1000_MRQ_RSS_TYPE_IPV6UDP 8 + #define E1000_STATUS_IOV_MODE 0x00040000 #define E1000_STATUS_NUM_VFS_SHIFT 14 +#define E1000_ADVRXD_PKT_IP4 BIT(4) +#define E1000_ADVRXD_PKT_IP6 BIT(6) +#define E1000_ADVRXD_PKT_TCP BIT(8) +#define E1000_ADVRXD_PKT_UDP BIT(9) +#define E1000_ADVRXD_PKT_SCTP BIT(10) + static inline uint8_t igb_ivar_entry_rx(uint8_t i) { return i < 8 ? i * 4 : (i - 8) * 4 + 2; diff --git a/hw/net/igbvf.c b/hw/net/igbvf.c index 70beb7af50..284ea61184 100644 --- a/hw/net/igbvf.c +++ b/hw/net/igbvf.c @@ -50,15 +50,8 @@ #include "trace.h" #include "qapi/error.h" -#define TYPE_IGBVF "igbvf" OBJECT_DECLARE_SIMPLE_TYPE(IgbVfState, IGBVF) -#define IGBVF_MMIO_BAR_IDX (0) -#define IGBVF_MSIX_BAR_IDX (3) - -#define IGBVF_MMIO_SIZE (16 * 1024) -#define IGBVF_MSIX_SIZE (16 * 1024) - struct IgbVfState { PCIDevice parent_obj; diff --git a/hw/net/meson.build b/hw/net/meson.build index e2be0654a1..2632634df3 100644 --- a/hw/net/meson.build +++ b/hw/net/meson.build @@ -1,75 +1,75 @@ -softmmu_ss.add(when: 'CONFIG_DP8393X', if_true: files('dp8393x.c')) -softmmu_ss.add(when: 'CONFIG_XEN', if_true: files('xen_nic.c')) -softmmu_ss.add(when: 'CONFIG_NE2000_COMMON', if_true: files('ne2000.c')) +system_ss.add(when: 'CONFIG_DP8393X', if_true: files('dp8393x.c')) +system_ss.add(when: 'CONFIG_XEN', if_true: files('xen_nic.c')) +system_ss.add(when: 'CONFIG_NE2000_COMMON', if_true: files('ne2000.c')) # PCI network cards -softmmu_ss.add(when: 'CONFIG_NE2000_PCI', if_true: files('ne2000-pci.c')) -softmmu_ss.add(when: 'CONFIG_EEPRO100_PCI', if_true: files('eepro100.c')) -softmmu_ss.add(when: 'CONFIG_PCNET_PCI', if_true: files('pcnet-pci.c')) -softmmu_ss.add(when: 'CONFIG_PCNET_COMMON', if_true: files('pcnet.c')) -softmmu_ss.add(when: 'CONFIG_E1000_PCI', if_true: files('e1000.c', 'e1000x_common.c')) -softmmu_ss.add(when: 'CONFIG_E1000E_PCI_EXPRESS', if_true: files('net_tx_pkt.c', 'net_rx_pkt.c')) -softmmu_ss.add(when: 'CONFIG_E1000E_PCI_EXPRESS', if_true: files('e1000e.c', 'e1000e_core.c', 'e1000x_common.c')) -softmmu_ss.add(when: 'CONFIG_IGB_PCI_EXPRESS', if_true: files('net_tx_pkt.c', 'net_rx_pkt.c')) -softmmu_ss.add(when: 'CONFIG_IGB_PCI_EXPRESS', if_true: files('igb.c', 'igbvf.c', 'igb_core.c')) -softmmu_ss.add(when: 'CONFIG_RTL8139_PCI', if_true: files('rtl8139.c')) -softmmu_ss.add(when: 'CONFIG_TULIP', if_true: files('tulip.c')) -softmmu_ss.add(when: 'CONFIG_VMXNET3_PCI', if_true: files('net_tx_pkt.c', 'net_rx_pkt.c')) -softmmu_ss.add(when: 'CONFIG_VMXNET3_PCI', if_true: files('vmxnet3.c')) +system_ss.add(when: 'CONFIG_NE2000_PCI', if_true: files('ne2000-pci.c')) +system_ss.add(when: 'CONFIG_EEPRO100_PCI', if_true: files('eepro100.c')) +system_ss.add(when: 'CONFIG_PCNET_PCI', if_true: files('pcnet-pci.c')) +system_ss.add(when: 'CONFIG_PCNET_COMMON', if_true: files('pcnet.c')) +system_ss.add(when: 'CONFIG_E1000_PCI', if_true: files('e1000.c', 'e1000x_common.c')) +system_ss.add(when: 'CONFIG_E1000E_PCI_EXPRESS', if_true: files('net_tx_pkt.c', 'net_rx_pkt.c')) +system_ss.add(when: 'CONFIG_E1000E_PCI_EXPRESS', if_true: files('e1000e.c', 'e1000e_core.c', 'e1000x_common.c')) +system_ss.add(when: 'CONFIG_IGB_PCI_EXPRESS', if_true: files('net_tx_pkt.c', 'net_rx_pkt.c')) +system_ss.add(when: 'CONFIG_IGB_PCI_EXPRESS', if_true: files('igb.c', 'igbvf.c', 'igb_core.c')) +system_ss.add(when: 'CONFIG_RTL8139_PCI', if_true: files('rtl8139.c')) +system_ss.add(when: 'CONFIG_TULIP', if_true: files('tulip.c')) +system_ss.add(when: 'CONFIG_VMXNET3_PCI', if_true: files('net_tx_pkt.c', 'net_rx_pkt.c')) +system_ss.add(when: 'CONFIG_VMXNET3_PCI', if_true: files('vmxnet3.c')) -softmmu_ss.add(when: 'CONFIG_SMC91C111', if_true: files('smc91c111.c')) -softmmu_ss.add(when: 'CONFIG_LAN9118', if_true: files('lan9118.c')) -softmmu_ss.add(when: 'CONFIG_NE2000_ISA', if_true: files('ne2000-isa.c')) -softmmu_ss.add(when: 'CONFIG_OPENCORES_ETH', if_true: files('opencores_eth.c')) -softmmu_ss.add(when: 'CONFIG_XGMAC', if_true: files('xgmac.c')) -softmmu_ss.add(when: 'CONFIG_MIPSNET', if_true: files('mipsnet.c')) -softmmu_ss.add(when: 'CONFIG_XILINX_AXI', if_true: files('xilinx_axienet.c')) -softmmu_ss.add(when: 'CONFIG_ALLWINNER_EMAC', if_true: files('allwinner_emac.c')) -softmmu_ss.add(when: 'CONFIG_ALLWINNER_SUN8I_EMAC', if_true: files('allwinner-sun8i-emac.c')) -softmmu_ss.add(when: 'CONFIG_IMX_FEC', if_true: files('imx_fec.c')) -softmmu_ss.add(when: 'CONFIG_MSF2', if_true: files('msf2-emac.c')) -softmmu_ss.add(when: 'CONFIG_MARVELL_88W8618', if_true: files('mv88w8618_eth.c')) +system_ss.add(when: 'CONFIG_SMC91C111', if_true: files('smc91c111.c')) +system_ss.add(when: 'CONFIG_LAN9118', if_true: files('lan9118.c')) +system_ss.add(when: 'CONFIG_NE2000_ISA', if_true: files('ne2000-isa.c')) +system_ss.add(when: 'CONFIG_OPENCORES_ETH', if_true: files('opencores_eth.c')) +system_ss.add(when: 'CONFIG_XGMAC', if_true: files('xgmac.c')) +system_ss.add(when: 'CONFIG_MIPSNET', if_true: files('mipsnet.c')) +system_ss.add(when: 'CONFIG_XILINX_AXI', if_true: files('xilinx_axienet.c')) +system_ss.add(when: 'CONFIG_ALLWINNER_EMAC', if_true: files('allwinner_emac.c')) +system_ss.add(when: 'CONFIG_ALLWINNER_SUN8I_EMAC', if_true: files('allwinner-sun8i-emac.c')) +system_ss.add(when: 'CONFIG_IMX_FEC', if_true: files('imx_fec.c')) +system_ss.add(when: 'CONFIG_MSF2', if_true: files('msf2-emac.c')) +system_ss.add(when: 'CONFIG_MARVELL_88W8618', if_true: files('mv88w8618_eth.c')) -softmmu_ss.add(when: 'CONFIG_CADENCE', if_true: files('cadence_gem.c')) -softmmu_ss.add(when: 'CONFIG_STELLARIS_ENET', if_true: files('stellaris_enet.c')) -softmmu_ss.add(when: 'CONFIG_LANCE', if_true: files('lance.c')) -softmmu_ss.add(when: 'CONFIG_LASI_I82596', if_true: files('lasi_i82596.c')) -softmmu_ss.add(when: 'CONFIG_I82596_COMMON', if_true: files('i82596.c')) -softmmu_ss.add(when: 'CONFIG_SUNHME', if_true: files('sunhme.c')) -softmmu_ss.add(when: 'CONFIG_FTGMAC100', if_true: files('ftgmac100.c')) -softmmu_ss.add(when: 'CONFIG_SUNGEM', if_true: files('sungem.c')) -softmmu_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_emc.c')) +system_ss.add(when: 'CONFIG_CADENCE', if_true: files('cadence_gem.c')) +system_ss.add(when: 'CONFIG_STELLARIS_ENET', if_true: files('stellaris_enet.c')) +system_ss.add(when: 'CONFIG_LANCE', if_true: files('lance.c')) +system_ss.add(when: 'CONFIG_LASI_I82596', if_true: files('lasi_i82596.c')) +system_ss.add(when: 'CONFIG_I82596_COMMON', if_true: files('i82596.c')) +system_ss.add(when: 'CONFIG_SUNHME', if_true: files('sunhme.c')) +system_ss.add(when: 'CONFIG_FTGMAC100', if_true: files('ftgmac100.c')) +system_ss.add(when: 'CONFIG_SUNGEM', if_true: files('sungem.c')) +system_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_emc.c')) -softmmu_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_eth.c')) -softmmu_ss.add(when: 'CONFIG_COLDFIRE', if_true: files('mcf_fec.c')) +system_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_eth.c')) +system_ss.add(when: 'CONFIG_COLDFIRE', if_true: files('mcf_fec.c')) specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr_llan.c')) -specific_ss.add(when: 'CONFIG_XILINX_ETHLITE', if_true: files('xilinx_ethlite.c')) +system_ss.add(when: 'CONFIG_XILINX_ETHLITE', if_true: files('xilinx_ethlite.c')) -softmmu_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('net_rx_pkt.c')) +system_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('net_rx_pkt.c')) specific_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('virtio-net.c')) if have_vhost_net - softmmu_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('vhost_net.c'), if_false: files('vhost_net-stub.c')) - softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('vhost_net-stub.c')) + system_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('vhost_net.c'), if_false: files('vhost_net-stub.c')) + system_ss.add(when: 'CONFIG_ALL', if_true: files('vhost_net-stub.c')) else - softmmu_ss.add(files('vhost_net-stub.c')) + system_ss.add(files('vhost_net-stub.c')) endif -softmmu_ss.add(when: 'CONFIG_ETSEC', if_true: files( +system_ss.add(when: 'CONFIG_ETSEC', if_true: files( 'fsl_etsec/etsec.c', 'fsl_etsec/miim.c', 'fsl_etsec/registers.c', 'fsl_etsec/rings.c', )) -softmmu_ss.add(when: 'CONFIG_ROCKER', if_true: files( +system_ss.add(when: 'CONFIG_ROCKER', if_true: files( 'rocker/rocker.c', 'rocker/rocker_desc.c', 'rocker/rocker_fp.c', 'rocker/rocker_of_dpa.c', 'rocker/rocker_world.c', ), if_false: files('rocker/qmp-norocker.c')) -softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('rocker/qmp-norocker.c')) -softmmu_ss.add(files('rocker/rocker-hmp-cmds.c')) +system_ss.add(when: 'CONFIG_ALL', if_true: files('rocker/qmp-norocker.c')) +system_ss.add(files('rocker/rocker-hmp-cmds.c')) subdir('can') diff --git a/hw/net/msf2-emac.c b/hw/net/msf2-emac.c index 7ccd3e5142..db3a04deb1 100644 --- a/hw/net/msf2-emac.c +++ b/hw/net/msf2-emac.c @@ -118,14 +118,18 @@ static void emac_load_desc(MSF2EmacState *s, EmacDesc *d, hwaddr desc) d->next = le32_to_cpu(d->next); } -static void emac_store_desc(MSF2EmacState *s, EmacDesc *d, hwaddr desc) +static void emac_store_desc(MSF2EmacState *s, const EmacDesc *d, hwaddr desc) { - /* Convert from host endianness into LE. */ - d->pktaddr = cpu_to_le32(d->pktaddr); - d->pktsize = cpu_to_le32(d->pktsize); - d->next = cpu_to_le32(d->next); - - address_space_write(&s->dma_as, desc, MEMTXATTRS_UNSPECIFIED, d, sizeof *d); + EmacDesc outd; + /* + * Convert from host endianness into LE. We use a local struct because + * calling code may still want to look at the fields afterwards. + */ + outd.pktaddr = cpu_to_le32(d->pktaddr); + outd.pktsize = cpu_to_le32(d->pktsize); + outd.next = cpu_to_le32(d->next); + + address_space_write(&s->dma_as, desc, MEMTXATTRS_UNSPECIFIED, &outd, sizeof outd); } static void msf2_dma_tx(MSF2EmacState *s) diff --git a/hw/net/net_rx_pkt.c b/hw/net/net_rx_pkt.c index 39cdea06de..32e5f3f9cf 100644 --- a/hw/net/net_rx_pkt.c +++ b/hw/net/net_rx_pkt.c @@ -16,6 +16,7 @@ */ #include "qemu/osdep.h" +#include "qemu/crc32c.h" #include "trace.h" #include "net_rx_pkt.h" #include "net/checksum.h" @@ -23,7 +24,10 @@ struct NetRxPkt { struct virtio_net_hdr virt_hdr; - uint8_t ehdr_buf[sizeof(struct eth_header) + sizeof(struct vlan_header)]; + struct { + struct eth_header eth; + struct vlan_header vlan; + } ehdr_buf; struct iovec *vec; uint16_t vec_len_total; uint16_t vec_len; @@ -89,7 +93,7 @@ net_rx_pkt_pull_data(struct NetRxPkt *pkt, if (pkt->ehdr_buf_len) { net_rx_pkt_iovec_realloc(pkt, iovcnt + 1); - pkt->vec[0].iov_base = pkt->ehdr_buf; + pkt->vec[0].iov_base = &pkt->ehdr_buf; pkt->vec[0].iov_len = pkt->ehdr_buf_len; pkt->tot_len = pllen + pkt->ehdr_buf_len; @@ -103,7 +107,7 @@ net_rx_pkt_pull_data(struct NetRxPkt *pkt, iov, iovcnt, ploff, pkt->tot_len); } - eth_get_protocols(pkt->vec, pkt->vec_len, &pkt->hasip4, &pkt->hasip6, + eth_get_protocols(pkt->vec, pkt->vec_len, 0, &pkt->hasip4, &pkt->hasip6, &pkt->l3hdr_off, &pkt->l4hdr_off, &pkt->l5hdr_off, &pkt->ip6hdr_info, &pkt->ip4hdr_info, &pkt->l4hdr_info); @@ -120,7 +124,7 @@ void net_rx_pkt_attach_iovec(struct NetRxPkt *pkt, assert(pkt); if (strip_vlan) { - pkt->ehdr_buf_len = eth_strip_vlan(iov, iovcnt, iovoff, pkt->ehdr_buf, + pkt->ehdr_buf_len = eth_strip_vlan(iov, iovcnt, iovoff, &pkt->ehdr_buf, &ploff, &tci); } else { pkt->ehdr_buf_len = 0; @@ -133,20 +137,17 @@ void net_rx_pkt_attach_iovec(struct NetRxPkt *pkt, void net_rx_pkt_attach_iovec_ex(struct NetRxPkt *pkt, const struct iovec *iov, int iovcnt, - size_t iovoff, bool strip_vlan, - uint16_t vet) + size_t iovoff, int strip_vlan_index, + uint16_t vet, uint16_t vet_ext) { uint16_t tci = 0; uint16_t ploff = iovoff; assert(pkt); - if (strip_vlan) { - pkt->ehdr_buf_len = eth_strip_vlan_ex(iov, iovcnt, iovoff, vet, - pkt->ehdr_buf, - &ploff, &tci); - } else { - pkt->ehdr_buf_len = 0; - } + pkt->ehdr_buf_len = eth_strip_vlan_ex(iov, iovcnt, iovoff, + strip_vlan_index, vet, vet_ext, + &pkt->ehdr_buf, + &ploff, &tci); pkt->tci = tci; @@ -186,17 +187,13 @@ size_t net_rx_pkt_get_total_len(struct NetRxPkt *pkt) return pkt->tot_len; } -void net_rx_pkt_set_protocols(struct NetRxPkt *pkt, const void *data, - size_t len) +void net_rx_pkt_set_protocols(struct NetRxPkt *pkt, + const struct iovec *iov, size_t iovcnt, + size_t iovoff) { - const struct iovec iov = { - .iov_base = (void *)data, - .iov_len = len - }; - assert(pkt); - eth_get_protocols(&iov, 1, &pkt->hasip4, &pkt->hasip6, + eth_get_protocols(iov, iovcnt, iovoff, &pkt->hasip4, &pkt->hasip6, &pkt->l3hdr_off, &pkt->l4hdr_off, &pkt->l5hdr_off, &pkt->ip6hdr_info, &pkt->ip4hdr_info, &pkt->l4hdr_info); } @@ -240,11 +237,6 @@ eth_ip4_hdr_info *net_rx_pkt_get_ip4_info(struct NetRxPkt *pkt) return &pkt->ip4hdr_info; } -eth_l4_hdr_info *net_rx_pkt_get_l4_info(struct NetRxPkt *pkt) -{ - return &pkt->l4hdr_info; -} - static inline void _net_rx_rss_add_chunk(uint8_t *rss_input, size_t *bytes_written, void *ptr, size_t size) @@ -560,32 +552,73 @@ _net_rx_pkt_calc_l4_csum(struct NetRxPkt *pkt) return csum; } -bool net_rx_pkt_validate_l4_csum(struct NetRxPkt *pkt, bool *csum_valid) +static bool +_net_rx_pkt_validate_sctp_sum(struct NetRxPkt *pkt) { - uint16_t csum; + size_t csum_off; + size_t off = pkt->l4hdr_off; + size_t vec_len = pkt->vec_len; + struct iovec *vec; + uint32_t calculated = 0; + uint32_t original; + bool valid; - trace_net_rx_pkt_l4_csum_validate_entry(); + for (vec = pkt->vec; vec->iov_len < off; vec++) { + off -= vec->iov_len; + vec_len--; + } - if (pkt->l4hdr_info.proto != ETH_L4_HDR_PROTO_TCP && - pkt->l4hdr_info.proto != ETH_L4_HDR_PROTO_UDP) { - trace_net_rx_pkt_l4_csum_validate_not_xxp(); + csum_off = off + 8; + + if (!iov_to_buf(vec, vec_len, csum_off, &original, sizeof(original))) { return false; } - if (pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP && - pkt->l4hdr_info.hdr.udp.uh_sum == 0) { - trace_net_rx_pkt_l4_csum_validate_udp_with_no_checksum(); + if (!iov_from_buf(vec, vec_len, csum_off, + &calculated, sizeof(calculated))) { return false; } + calculated = crc32c(0xffffffff, + (uint8_t *)vec->iov_base + off, vec->iov_len - off); + calculated = iov_crc32c(calculated ^ 0xffffffff, vec + 1, vec_len - 1); + valid = calculated == le32_to_cpu(original); + iov_from_buf(vec, vec_len, csum_off, &original, sizeof(original)); + + return valid; +} + +bool net_rx_pkt_validate_l4_csum(struct NetRxPkt *pkt, bool *csum_valid) +{ + uint32_t csum; + + trace_net_rx_pkt_l4_csum_validate_entry(); + if (pkt->hasip4 && pkt->ip4hdr_info.fragment) { trace_net_rx_pkt_l4_csum_validate_ip4_fragment(); return false; } - csum = _net_rx_pkt_calc_l4_csum(pkt); + switch (pkt->l4hdr_info.proto) { + case ETH_L4_HDR_PROTO_UDP: + if (pkt->l4hdr_info.hdr.udp.uh_sum == 0) { + trace_net_rx_pkt_l4_csum_validate_udp_with_no_checksum(); + return false; + } + /* fall through */ + case ETH_L4_HDR_PROTO_TCP: + csum = _net_rx_pkt_calc_l4_csum(pkt); + *csum_valid = ((csum == 0) || (csum == 0xFFFF)); + break; + + case ETH_L4_HDR_PROTO_SCTP: + *csum_valid = _net_rx_pkt_validate_sctp_sum(pkt); + break; - *csum_valid = ((csum == 0) || (csum == 0xFFFF)); + default: + trace_net_rx_pkt_l4_csum_validate_not_xxp(); + return false; + } trace_net_rx_pkt_l4_csum_validate_csum(*csum_valid); diff --git a/hw/net/net_rx_pkt.h b/hw/net/net_rx_pkt.h index d00b484900..55ec67a1a7 100644 --- a/hw/net/net_rx_pkt.h +++ b/hw/net/net_rx_pkt.h @@ -55,12 +55,14 @@ size_t net_rx_pkt_get_total_len(struct NetRxPkt *pkt); * parse and set packet analysis results * * @pkt: packet - * @data: pointer to the data buffer to be parsed - * @len: data length + * @iov: received data scatter-gather list + * @iovcnt: number of elements in iov + * @iovoff: data start offset in the iov * */ -void net_rx_pkt_set_protocols(struct NetRxPkt *pkt, const void *data, - size_t len); +void net_rx_pkt_set_protocols(struct NetRxPkt *pkt, + const struct iovec *iov, size_t iovcnt, + size_t iovoff); /** * fetches packet analysis results @@ -117,15 +119,6 @@ eth_ip6_hdr_info *net_rx_pkt_get_ip6_info(struct NetRxPkt *pkt); */ eth_ip4_hdr_info *net_rx_pkt_get_ip4_info(struct NetRxPkt *pkt); -/** - * fetches L4 header analysis results - * - * Return: pointer to analysis results structure which is stored in internal - * packet area. - * - */ -eth_l4_hdr_info *net_rx_pkt_get_l4_info(struct NetRxPkt *pkt); - typedef enum { NetPktRssIpV4, NetPktRssIpV4Tcp, @@ -230,18 +223,19 @@ void net_rx_pkt_attach_iovec(struct NetRxPkt *pkt, /** * attach scatter-gather data to rx packet * -* @pkt: packet -* @iov: received data scatter-gather list -* @iovcnt number of elements in iov -* @iovoff data start offset in the iov -* @strip_vlan: should the module strip vlan from data -* @vet: VLAN tag Ethernet type +* @pkt: packet +* @iov: received data scatter-gather list +* @iovcnt: number of elements in iov +* @iovoff: data start offset in the iov +* @strip_vlan_index: index of Q tag if it is to be stripped. negative otherwise. +* @vet: VLAN tag Ethernet type +* @vet_ext: outer VLAN tag Ethernet type * */ void net_rx_pkt_attach_iovec_ex(struct NetRxPkt *pkt, - const struct iovec *iov, int iovcnt, - size_t iovoff, bool strip_vlan, - uint16_t vet); + const struct iovec *iov, int iovcnt, + size_t iovoff, int strip_vlan_index, + uint16_t vet, uint16_t vet_ext); /** * attach data to rx packet diff --git a/hw/net/net_tx_pkt.c b/hw/net/net_tx_pkt.c index 8dc8568ba2..2e5f58b3c9 100644 --- a/hw/net/net_tx_pkt.c +++ b/hw/net/net_tx_pkt.c @@ -16,12 +16,13 @@ */ #include "qemu/osdep.h" -#include "net_tx_pkt.h" +#include "qemu/crc32c.h" #include "net/eth.h" #include "net/checksum.h" #include "net/tap.h" #include "net/net.h" #include "hw/pci/pci_device.h" +#include "net_tx_pkt.h" enum { NET_TX_PKT_VHDR_FRAG = 0, @@ -32,8 +33,6 @@ enum { /* TX packet private context */ struct NetTxPkt { - PCIDevice *pci_dev; - struct virtio_net_hdr virt_hdr; struct iovec *raw; @@ -42,7 +41,10 @@ struct NetTxPkt { struct iovec *vec; - uint8_t l2_hdr[ETH_MAX_L2_HDR_LEN]; + struct { + struct eth_header eth; + struct vlan_header vlan[3]; + } l2_hdr; union { struct ip_header ip; struct ip6_header ip6; @@ -59,13 +61,10 @@ struct NetTxPkt { uint8_t l4proto; }; -void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev, - uint32_t max_frags) +void net_tx_pkt_init(struct NetTxPkt **pkt, uint32_t max_frags) { struct NetTxPkt *p = g_malloc0(sizeof *p); - p->pci_dev = pci_dev; - p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG); p->raw = g_new(struct iovec, max_frags); @@ -137,6 +136,23 @@ void net_tx_pkt_update_ip_checksums(struct NetTxPkt *pkt) pkt->virt_hdr.csum_offset, &csum, sizeof(csum)); } +bool net_tx_pkt_update_sctp_checksum(struct NetTxPkt *pkt) +{ + uint32_t csum = 0; + struct iovec *pl_start_frag = pkt->vec + NET_TX_PKT_PL_START_FRAG; + + if (iov_from_buf(pl_start_frag, pkt->payload_frags, 8, &csum, sizeof(csum)) < sizeof(csum)) { + return false; + } + + csum = cpu_to_le32(iov_crc32c(0xffffffff, pl_start_frag, pkt->payload_frags)); + if (iov_from_buf(pl_start_frag, pkt->payload_frags, 8, &csum, sizeof(csum)) < sizeof(csum)) { + return false; + } + + return true; +} + static void net_tx_pkt_calculate_hdr_len(struct NetTxPkt *pkt) { pkt->hdr_len = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len + @@ -370,24 +386,17 @@ bool net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable, void net_tx_pkt_setup_vlan_header_ex(struct NetTxPkt *pkt, uint16_t vlan, uint16_t vlan_ethtype) { - bool is_new; assert(pkt); - eth_setup_vlan_headers_ex(pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_base, - vlan, vlan_ethtype, &is_new); + eth_setup_vlan_headers(pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_base, + &pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len, + vlan, vlan_ethtype); - /* update l2hdrlen */ - if (is_new) { - pkt->hdr_len += sizeof(struct vlan_header); - pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len += - sizeof(struct vlan_header); - } + pkt->hdr_len += sizeof(struct vlan_header); } -bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, hwaddr pa, - size_t len) +bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, void *base, size_t len) { - hwaddr mapped_len = 0; struct iovec *ventry; assert(pkt); @@ -395,23 +404,12 @@ bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, hwaddr pa, return false; } - if (!len) { - return true; - } - ventry = &pkt->raw[pkt->raw_frags]; - mapped_len = len; - - ventry->iov_base = pci_dma_map(pkt->pci_dev, pa, - &mapped_len, DMA_DIRECTION_TO_DEVICE); + ventry->iov_base = base; + ventry->iov_len = len; + pkt->raw_frags++; - if ((ventry->iov_base != NULL) && (len == mapped_len)) { - ventry->iov_len = mapped_len; - pkt->raw_frags++; - return true; - } else { - return false; - } + return true; } bool net_tx_pkt_has_fragments(struct NetTxPkt *pkt) @@ -445,7 +443,8 @@ void net_tx_pkt_dump(struct NetTxPkt *pkt) #endif } -void net_tx_pkt_reset(struct NetTxPkt *pkt, PCIDevice *pci_dev) +void net_tx_pkt_reset(struct NetTxPkt *pkt, + NetTxPktFreeFrag callback, void *context) { int i; @@ -465,17 +464,37 @@ void net_tx_pkt_reset(struct NetTxPkt *pkt, PCIDevice *pci_dev) assert(pkt->raw); for (i = 0; i < pkt->raw_frags; i++) { assert(pkt->raw[i].iov_base); - pci_dma_unmap(pkt->pci_dev, pkt->raw[i].iov_base, - pkt->raw[i].iov_len, DMA_DIRECTION_TO_DEVICE, 0); + callback(context, pkt->raw[i].iov_base, pkt->raw[i].iov_len); } } - pkt->pci_dev = pci_dev; pkt->raw_frags = 0; pkt->hdr_len = 0; pkt->l4proto = 0; } +void net_tx_pkt_unmap_frag_pci(void *context, void *base, size_t len) +{ + pci_dma_unmap(context, base, len, DMA_DIRECTION_TO_DEVICE, 0); +} + +bool net_tx_pkt_add_raw_fragment_pci(struct NetTxPkt *pkt, PCIDevice *pci_dev, + dma_addr_t pa, size_t len) +{ + dma_addr_t mapped_len = len; + void *base = pci_dma_map(pci_dev, pa, &mapped_len, DMA_DIRECTION_TO_DEVICE); + if (!base) { + return false; + } + + if (mapped_len != len || !net_tx_pkt_add_raw_fragment(pkt, base, len)) { + net_tx_pkt_unmap_frag_pci(pci_dev, base, mapped_len); + return false; + } + + return true; +} + static void net_tx_pkt_do_sw_csum(struct NetTxPkt *pkt, struct iovec *iov, uint32_t iov_len, uint16_t csl) @@ -697,7 +716,7 @@ static void net_tx_pkt_udp_fragment_fix(struct NetTxPkt *pkt, } static bool net_tx_pkt_do_sw_fragmentation(struct NetTxPkt *pkt, - NetTxPktCallback callback, + NetTxPktSend callback, void *context) { uint8_t gso_type = pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN; @@ -794,7 +813,7 @@ bool net_tx_pkt_send(struct NetTxPkt *pkt, NetClientState *nc) } bool net_tx_pkt_send_custom(struct NetTxPkt *pkt, bool offload, - NetTxPktCallback callback, void *context) + NetTxPktSend callback, void *context) { assert(pkt); diff --git a/hw/net/net_tx_pkt.h b/hw/net/net_tx_pkt.h index e5ce6f20bc..0a716e74a5 100644 --- a/hw/net/net_tx_pkt.h +++ b/hw/net/net_tx_pkt.h @@ -26,17 +26,16 @@ struct NetTxPkt; -typedef void (* NetTxPktCallback)(void *, const struct iovec *, int, const struct iovec *, int); +typedef void (*NetTxPktFreeFrag)(void *, void *, size_t); +typedef void (*NetTxPktSend)(void *, const struct iovec *, int, const struct iovec *, int); /** * Init function for tx packet functionality * * @pkt: packet pointer - * @pci_dev: PCI device processing this packet * @max_frags: max tx ip fragments */ -void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev, - uint32_t max_frags); +void net_tx_pkt_init(struct NetTxPkt **pkt, uint32_t max_frags); /** * Clean all tx packet resources. @@ -95,12 +94,11 @@ net_tx_pkt_setup_vlan_header(struct NetTxPkt *pkt, uint16_t vlan) * populate data fragment into pkt context. * * @pkt: packet - * @pa: physical address of fragment + * @base: pointer to fragment * @len: length of fragment * */ -bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, hwaddr pa, - size_t len); +bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, void *base, size_t len); /** * Fix ip header fields and calculate IP header and pseudo header checksums. @@ -119,6 +117,14 @@ void net_tx_pkt_update_ip_checksums(struct NetTxPkt *pkt); void net_tx_pkt_update_ip_hdr_checksum(struct NetTxPkt *pkt); /** + * Calculate the SCTP checksum. + * + * @pkt: packet + * + */ +bool net_tx_pkt_update_sctp_checksum(struct NetTxPkt *pkt); + +/** * get length of all populated data. * * @pkt: packet @@ -148,10 +154,30 @@ void net_tx_pkt_dump(struct NetTxPkt *pkt); * reset tx packet private context (needed to be called between packets) * * @pkt: packet - * @dev: PCI device processing the next packet + * @callback: function to free the fragments + * @context: pointer to be passed to the callback + */ +void net_tx_pkt_reset(struct NetTxPkt *pkt, + NetTxPktFreeFrag callback, void *context); + +/** + * Unmap a fragment mapped from a PCI device. + * + * @context: PCI device owning fragment + * @base: pointer to fragment + * @len: length of fragment + */ +void net_tx_pkt_unmap_frag_pci(void *context, void *base, size_t len); + +/** + * map data fragment from PCI device and populate it into pkt context. * + * @pci_dev: PCI device owning fragment + * @pa: physical address of fragment + * @len: length of fragment */ -void net_tx_pkt_reset(struct NetTxPkt *pkt, PCIDevice *dev); +bool net_tx_pkt_add_raw_fragment_pci(struct NetTxPkt *pkt, PCIDevice *pci_dev, + dma_addr_t pa, size_t len); /** * Send packet to qemu. handles sw offloads if vhdr is not supported. @@ -173,7 +199,7 @@ bool net_tx_pkt_send(struct NetTxPkt *pkt, NetClientState *nc); * @ret: operation result */ bool net_tx_pkt_send_custom(struct NetTxPkt *pkt, bool offload, - NetTxPktCallback callback, void *context); + NetTxPktSend callback, void *context); /** * parse raw packet data and analyze offload requirements. diff --git a/hw/net/npcm7xx_emc.c b/hw/net/npcm7xx_emc.c index 7c86bb52e5..8156f701b0 100644 --- a/hw/net/npcm7xx_emc.c +++ b/hw/net/npcm7xx_emc.c @@ -98,6 +98,8 @@ static const char *emc_reg_name(int regno) static void emc_reset(NPCM7xxEMCState *emc) { + uint32_t value; + trace_npcm7xx_emc_reset(emc->emc_num); memset(&emc->regs[0], 0, sizeof(emc->regs)); @@ -112,6 +114,16 @@ static void emc_reset(NPCM7xxEMCState *emc) emc->tx_active = false; emc->rx_active = false; + + /* Set the MAC address in the register space. */ + value = (emc->conf.macaddr.a[0] << 24) | + (emc->conf.macaddr.a[1] << 16) | + (emc->conf.macaddr.a[2] << 8) | + emc->conf.macaddr.a[3]; + emc->regs[REG_CAMM_BASE] = value; + + value = (emc->conf.macaddr.a[4] << 24) | (emc->conf.macaddr.a[5] << 16); + emc->regs[REG_CAML_BASE] = value; } static void npcm7xx_emc_reset(DeviceState *dev) @@ -432,13 +444,25 @@ static bool emc_receive_filter1(NPCM7xxEMCState *emc, const uint8_t *buf, } case ETH_PKT_UCAST: { bool matches; + uint32_t value; + struct MACAddr mac; if (emc->regs[REG_CAMCMR] & REG_CAMCMR_AUP) { return true; } + + value = emc->regs[REG_CAMM_BASE]; + mac.a[0] = value >> 24; + mac.a[1] = value >> 16; + mac.a[2] = value >> 8; + mac.a[3] = value >> 0; + value = emc->regs[REG_CAML_BASE]; + mac.a[4] = value >> 24; + mac.a[5] = value >> 16; + matches = ((emc->regs[REG_CAMCMR] & REG_CAMCMR_ECMP) && /* We only support one CAM register, CAM0. */ (emc->regs[REG_CAMEN] & (1 << 0)) && - memcmp(buf, emc->conf.macaddr.a, ETH_ALEN) == 0); + memcmp(buf, mac.a, ETH_ALEN) == 0); if (emc->regs[REG_CAMCMR] & REG_CAMCMR_CCAM) { *fail_reason = "MACADDR matched, comparison complemented"; return !matches; @@ -661,15 +685,9 @@ static void npcm7xx_emc_write(void *opaque, hwaddr offset, break; case REG_CAMM_BASE + 0: emc->regs[reg] = value; - emc->conf.macaddr.a[0] = value >> 24; - emc->conf.macaddr.a[1] = value >> 16; - emc->conf.macaddr.a[2] = value >> 8; - emc->conf.macaddr.a[3] = value >> 0; break; case REG_CAML_BASE + 0: emc->regs[reg] = value; - emc->conf.macaddr.a[4] = value >> 24; - emc->conf.macaddr.a[5] = value >> 16; break; case REG_MCMDR: { uint32_t prev; diff --git a/hw/net/rtl8139.c b/hw/net/rtl8139.c index 5a5aaf868d..5f1a4d359b 100644 --- a/hw/net/rtl8139.c +++ b/hw/net/rtl8139.c @@ -2154,6 +2154,9 @@ static int rtl8139_cplus_transmit_one(RTL8139State *s) int large_send_mss = (txdw0 >> CP_TC_LGSEN_MSS_SHIFT) & CP_TC_LGSEN_MSS_MASK; + if (large_send_mss == 0) { + goto skip_offload; + } DPRINTF("+++ C+ mode offloaded task TSO IP data %d " "frame data %d specified MSS=%d\n", diff --git a/hw/net/trace-events b/hw/net/trace-events index d35554fce8..e4a98b2c7d 100644 --- a/hw/net/trace-events +++ b/hw/net/trace-events @@ -106,6 +106,8 @@ e1000_receiver_overrun(size_t s, uint32_t rdh, uint32_t rdt) "Receiver overrun: # e1000x_common.c e1000x_rx_can_recv_disabled(bool link_up, bool rx_enabled, bool pci_master) "link_up: %d, rx_enabled %d, pci_master %d" e1000x_vlan_is_vlan_pkt(bool is_vlan_pkt, uint16_t eth_proto, uint16_t vet) "Is VLAN packet: %d, ETH proto: 0x%X, VET: 0x%X" +e1000x_rx_flt_vlan_mismatch(uint16_t vid) "VID mismatch: 0x%X" +e1000x_rx_flt_vlan_match(uint16_t vid) "VID match: 0x%X" e1000x_rx_flt_ucast_match(uint32_t idx, uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5) "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x" e1000x_rx_flt_ucast_mismatch(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5) "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x" e1000x_rx_flt_inexact_mismatch(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5, uint32_t mo, uint32_t mta, uint32_t mta_val) "inexact mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] 0x%x" @@ -154,8 +156,6 @@ e1000e_rx_can_recv_rings_full(void) "Cannot receive: all rings are full" e1000e_rx_can_recv(void) "Can receive" e1000e_rx_has_buffers(int ridx, uint32_t free_desc, size_t total_size, uint32_t desc_buf_size) "ring #%d: free descr: %u, packet size %zu, descr buffer size %u" e1000e_rx_null_descriptor(void) "Null RX descriptor!!" -e1000e_rx_flt_vlan_mismatch(uint16_t vid) "VID mismatch: 0x%X" -e1000e_rx_flt_vlan_match(uint16_t vid) "VID match: 0x%X" e1000e_rx_desc_ps_read(uint64_t a0, uint64_t a1, uint64_t a2, uint64_t a3) "buffers: [0x%"PRIx64", 0x%"PRIx64", 0x%"PRIx64", 0x%"PRIx64"]" e1000e_rx_desc_ps_write(uint16_t a0, uint16_t a1, uint16_t a2, uint16_t a3) "bytes written: [%u, %u, %u, %u]" e1000e_rx_desc_buff_sizes(uint32_t b0, uint32_t b1, uint32_t b2, uint32_t b3) "buffer sizes: [%u, %u, %u, %u]" @@ -179,7 +179,7 @@ e1000e_rx_rss_disabled(void) "RSS is disabled" e1000e_rx_rss_type(uint32_t type) "RSS type is %u" e1000e_rx_rss_ip4(int l4hdr_proto, uint32_t mrqc, bool tcpipv4_enabled, bool ipv4_enabled) "RSS IPv4: L4 header protocol %d, mrqc 0x%X, tcpipv4 enabled %d, ipv4 enabled %d" e1000e_rx_rss_ip6_rfctl(uint32_t rfctl) "RSS IPv6: rfctl 0x%X" -e1000e_rx_rss_ip6(bool ex_dis, bool new_ex_dis, int l4hdr_proto, bool has_ext_headers, bool ex_dst_valid, bool ex_src_valid, uint32_t mrqc, bool tcpipv6_enabled, bool ipv6ex_enabled, bool ipv6_enabled) "RSS IPv6: ex_dis: %d, new_ex_dis: %d, L4 header protocol %d, has_ext_headers %d, ex_dst_valid %d, ex_src_valid %d, mrqc 0x%X, tcpipv6 enabled %d, ipv6ex enabled %d, ipv6 enabled %d" +e1000e_rx_rss_ip6(bool ex_dis, bool new_ex_dis, int l4hdr_proto, bool has_ext_headers, bool ex_dst_valid, bool ex_src_valid, uint32_t mrqc, bool tcpipv6ex_enabled, bool ipv6ex_enabled, bool ipv6_enabled) "RSS IPv6: ex_dis: %d, new_ex_dis: %d, L4 header protocol %d, has_ext_headers %d, ex_dst_valid %d, ex_src_valid %d, mrqc 0x%X, tcpipv6ex enabled %d, ipv6ex enabled %d, ipv6 enabled %d" e1000e_rx_metadata_protocols(bool hasip4, bool hasip6, int l4hdr_protocol) "protocols: ip4: %d, ip6: %d, l4hdr: %d" e1000e_rx_metadata_vlan(uint16_t vlan_tag) "VLAN tag is 0x%X" @@ -205,21 +205,16 @@ e1000e_irq_msix_notify_postponed_vec(int idx) "Sending MSI-X postponed by EITR[% e1000e_irq_legacy_notify(bool level) "IRQ line state: %d" e1000e_irq_msix_notify_vec(uint32_t vector) "MSI-X notify vector 0x%x" e1000e_irq_postponed_by_xitr(uint32_t reg) "Interrupt postponed by [E]ITR register 0x%x" -e1000e_irq_clear_ims(uint32_t bits, uint32_t old_ims, uint32_t new_ims) "Clearing IMS bits 0x%x: 0x%x --> 0x%x" -e1000e_irq_set_ims(uint32_t bits, uint32_t old_ims, uint32_t new_ims) "Setting IMS bits 0x%x: 0x%x --> 0x%x" +e1000e_irq_clear(uint32_t offset, uint32_t old, uint32_t new) "Clearing interrupt register 0x%x: 0x%x --> 0x%x" +e1000e_irq_set(uint32_t offset, uint32_t old, uint32_t new) "Setting interrupt register 0x%x: 0x%x --> 0x%x" e1000e_irq_fix_icr_asserted(uint32_t new_val) "ICR_ASSERTED bit fixed: 0x%x" e1000e_irq_add_msi_other(uint32_t new_val) "ICR_OTHER bit added: 0x%x" e1000e_irq_pending_interrupts(uint32_t pending, uint32_t icr, uint32_t ims) "ICR PENDING: 0x%x (ICR: 0x%x, IMS: 0x%x)" -e1000e_irq_set_cause_entry(uint32_t val, uint32_t icr) "Going to set IRQ cause 0x%x, ICR: 0x%x" -e1000e_irq_set_cause_exit(uint32_t val, uint32_t icr) "Set IRQ cause 0x%x, ICR: 0x%x" -e1000e_irq_icr_write(uint32_t bits, uint32_t old_icr, uint32_t new_icr) "Clearing ICR bits 0x%x: 0x%x --> 0x%x" e1000e_irq_write_ics(uint32_t val) "Adding ICR bits 0x%x" e1000e_irq_icr_process_iame(void) "Clearing IMS bits due to IAME" e1000e_irq_read_ics(uint32_t ics) "Current ICS: 0x%x" e1000e_irq_read_ims(uint32_t ims) "Current IMS: 0x%x" e1000e_irq_icr_clear_nonmsix_icr_read(void) "Clearing ICR on read due to non MSI-X int" -e1000e_irq_icr_read_entry(uint32_t icr) "Starting ICR read. Current ICR: 0x%x" -e1000e_irq_icr_read_exit(uint32_t icr) "Ending ICR read. Current ICR: 0x%x" e1000e_irq_icr_clear_zero_ims(void) "Clearing ICR on read due to zero IMS" e1000e_irq_icr_clear_iame(void) "Clearing ICR on read due to IAME" e1000e_irq_iam_clear_eiame(uint32_t iam, uint32_t cause) "Clearing IMS due to EIAME, IAM: 0x%X, cause: 0x%X" @@ -235,7 +230,6 @@ e1000e_irq_tidv_fpd_not_running(void) "FPD written while TIDV was not running" e1000e_irq_eitr_set(uint32_t eitr_num, uint32_t val) "EITR[%u] = %u" e1000e_irq_itr_set(uint32_t val) "ITR = %u" e1000e_irq_fire_all_timers(uint32_t val) "Firing all delay/throttling timers on all interrupts enable (0x%X written to IMS)" -e1000e_irq_adding_delayed_causes(uint32_t val, uint32_t icr) "Merging delayed causes 0x%X to ICR 0x%X" e1000e_irq_msix_pending_clearing(uint32_t cause, uint32_t int_cfg, uint32_t vec) "Clearing MSI-X pending bit for cause 0x%x, IVAR config 0x%x, vector %u" e1000e_wrn_msix_vec_wrong(uint32_t cause, uint32_t cfg) "Invalid configuration for cause 0x%x: 0x%x" @@ -288,12 +282,11 @@ igb_rx_desc_buff_write(uint64_t addr, uint16_t offset, const void* source, uint3 igb_rx_metadata_rss(uint32_t rss) "RSS data: 0x%X" igb_irq_icr_clear_gpie_nsicr(void) "Clearing ICR on read due to GPIE.NSICR enabled" -igb_irq_icr_write(uint32_t bits, uint32_t old_icr, uint32_t new_icr) "Clearing ICR bits 0x%x: 0x%x --> 0x%x" igb_irq_set_iam(uint32_t icr) "Update IAM: 0x%x" igb_irq_read_iam(uint32_t icr) "Current IAM: 0x%x" igb_irq_write_eics(uint32_t val, bool msix) "Update EICS: 0x%x MSI-X: %d" igb_irq_write_eims(uint32_t val, bool msix) "Update EIMS: 0x%x MSI-X: %d" -igb_irq_write_eimc(uint32_t val, uint32_t eims, bool msix) "Update EIMC: 0x%x EIMS: 0x%x MSI-X: %d" +igb_irq_write_eimc(uint32_t val, bool msix) "Update EIMC: 0x%x MSI-X: %d" igb_irq_write_eiac(uint32_t val) "Update EIAC: 0x%x" igb_irq_write_eiam(uint32_t val, bool msix) "Update EIAM: 0x%x MSI-X: %d" igb_irq_write_eicr(uint32_t val, bool msix) "Update EICR: 0x%x MSI-X: %d" diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 53e1c32643..6df6b7329d 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -805,7 +805,6 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features, } if (!get_vhost_net(nc->peer)) { - virtio_add_feature(&features, VIRTIO_F_RING_RESET); return features; } @@ -1835,9 +1834,12 @@ static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf, VIRTIO_NET_HASH_REPORT_UDPv6, VIRTIO_NET_HASH_REPORT_UDPv6_EX }; + struct iovec iov = { + .iov_base = (void *)buf, + .iov_len = size + }; - net_rx_pkt_set_protocols(pkt, buf + n->host_hdr_len, - size - n->host_hdr_len); + net_rx_pkt_set_protocols(pkt, &iov, 1, n->host_hdr_len); net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto); net_hash_type = virtio_net_get_hash_type(hasip4, hasip6, l4hdr_proto, n->rss_data.hash_types); @@ -2917,7 +2919,8 @@ static void virtio_net_add_queue(VirtIONet *n, int index) n->vqs[index].tx_vq = virtio_add_queue(vdev, n->net_conf.tx_queue_size, virtio_net_handle_tx_bh); - n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]); + n->vqs[index].tx_bh = qemu_bh_new_guarded(virtio_net_tx_bh, &n->vqs[index], + &DEVICE(vdev)->mem_reentrancy_guard); } n->vqs[index].tx_waiting = 0; diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c index f7b874c139..18b9edfdb2 100644 --- a/hw/net/vmxnet3.c +++ b/hw/net/vmxnet3.c @@ -651,9 +651,8 @@ static void vmxnet3_process_tx_queue(VMXNET3State *s, int qidx) data_len = (txd.len > 0) ? txd.len : VMXNET3_MAX_TX_BUF_SIZE; data_pa = txd.addr; - if (!net_tx_pkt_add_raw_fragment(s->tx_pkt, - data_pa, - data_len)) { + if (!net_tx_pkt_add_raw_fragment_pci(s->tx_pkt, PCI_DEVICE(s), + data_pa, data_len)) { s->skip_current_tx_pkt = true; } } @@ -678,9 +677,12 @@ static void vmxnet3_process_tx_queue(VMXNET3State *s, int qidx) vmxnet3_complete_packet(s, qidx, txd_idx); s->tx_sop = true; s->skip_current_tx_pkt = false; - net_tx_pkt_reset(s->tx_pkt, PCI_DEVICE(s)); + net_tx_pkt_reset(s->tx_pkt, + net_tx_pkt_unmap_frag_pci, PCI_DEVICE(s)); } } + + net_tx_pkt_reset(s->tx_pkt, net_tx_pkt_unmap_frag_pci, PCI_DEVICE(s)); } static inline void @@ -1159,7 +1161,6 @@ static void vmxnet3_deactivate_device(VMXNET3State *s) { if (s->device_active) { VMW_CBPRN("Deactivating vmxnet3..."); - net_tx_pkt_reset(s->tx_pkt, PCI_DEVICE(s)); net_tx_pkt_uninit(s->tx_pkt); net_rx_pkt_uninit(s->rx_pkt); s->device_active = false; @@ -1519,7 +1520,7 @@ static void vmxnet3_activate_device(VMXNET3State *s) /* Preallocate TX packet wrapper */ VMW_CFPRN("Max TX fragments is %u", s->max_tx_frags); - net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s), s->max_tx_frags); + net_tx_pkt_init(&s->tx_pkt, s->max_tx_frags); net_rx_pkt_init(&s->rx_pkt); /* Read rings memory locations for RX queues */ @@ -2001,7 +2002,12 @@ vmxnet3_receive(NetClientState *nc, const uint8_t *buf, size_t size) get_eth_packet_type(PKT_GET_ETH_HDR(buf))); if (vmxnet3_rx_filter_may_indicate(s, buf, size)) { - net_rx_pkt_set_protocols(s->rx_pkt, buf, size); + struct iovec iov = { + .iov_base = (void *)buf, + .iov_len = size + }; + + net_rx_pkt_set_protocols(s->rx_pkt, &iov, 1, 0); vmxnet3_rx_need_csum_calculate(s->rx_pkt, buf, size); net_rx_pkt_attach_data(s->rx_pkt, buf, size, s->rx_vlan_stripping); bytes_indicated = vmxnet3_indicate_packet(s) ? size : -1; @@ -2399,7 +2405,7 @@ static int vmxnet3_post_load(void *opaque, int version_id) { VMXNET3State *s = opaque; - net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s), s->max_tx_frags); + net_tx_pkt_init(&s->tx_pkt, s->max_tx_frags); net_rx_pkt_init(&s->rx_pkt); if (s->msix_used) { diff --git a/hw/net/xilinx_ethlite.c b/hw/net/xilinx_ethlite.c index 99c22819ea..89f4f3b254 100644 --- a/hw/net/xilinx_ethlite.c +++ b/hw/net/xilinx_ethlite.c @@ -25,7 +25,7 @@ #include "qemu/osdep.h" #include "qemu/module.h" #include "qom/object.h" -#include "cpu.h" /* FIXME should not use tswap* */ +#include "exec/tswap.h" #include "hw/sysbus.h" #include "hw/irq.h" #include "hw/qdev-properties.h" diff --git a/hw/nubus/meson.build b/hw/nubus/meson.build index 9287c633aa..e7ebda8993 100644 --- a/hw/nubus/meson.build +++ b/hw/nubus/meson.build @@ -4,4 +4,4 @@ nubus_ss.add(files('nubus-bus.c')) nubus_ss.add(files('nubus-bridge.c')) nubus_ss.add(when: 'CONFIG_Q800', if_true: files('mac-nubus-bridge.c')) -softmmu_ss.add_all(when: 'CONFIG_NUBUS', if_true: nubus_ss) +system_ss.add_all(when: 'CONFIG_NUBUS', if_true: nubus_ss) diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c index f59dfe1cbe..fd917fcda1 100644 --- a/hw/nvme/ctrl.c +++ b/hw/nvme/ctrl.c @@ -4607,7 +4607,8 @@ static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr, QTAILQ_INSERT_TAIL(&(sq->req_list), &sq->io_req[i], entry); } - sq->bh = qemu_bh_new(nvme_process_sq, sq); + sq->bh = qemu_bh_new_guarded(nvme_process_sq, sq, + &DEVICE(sq->ctrl)->mem_reentrancy_guard); if (n->dbbuf_enabled) { sq->db_addr = n->dbbuf_dbs + (sqid << 3); @@ -5253,7 +5254,8 @@ static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr, } } n->cq[cqid] = cq; - cq->bh = qemu_bh_new(nvme_post_cqes, cq); + cq->bh = qemu_bh_new_guarded(nvme_post_cqes, cq, + &DEVICE(cq->ctrl)->mem_reentrancy_guard); } static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeRequest *req) diff --git a/hw/nvme/meson.build b/hw/nvme/meson.build index 3cf40046ee..1a6a2ca2f3 100644 --- a/hw/nvme/meson.build +++ b/hw/nvme/meson.build @@ -1 +1 @@ -softmmu_ss.add(when: 'CONFIG_NVME_PCI', if_true: files('ctrl.c', 'dif.c', 'ns.c', 'subsys.c')) +system_ss.add(when: 'CONFIG_NVME_PCI', if_true: files('ctrl.c', 'dif.c', 'ns.c', 'subsys.c')) diff --git a/hw/nvram/meson.build b/hw/nvram/meson.build index f5ee9f6b88..988dff6f8e 100644 --- a/hw/nvram/meson.build +++ b/hw/nvram/meson.build @@ -3,21 +3,21 @@ if have_system or have_tools qom_ss.add(files('fw_cfg-interface.c')) endif -softmmu_ss.add(files('fw_cfg.c')) -softmmu_ss.add(when: 'CONFIG_CHRP_NVRAM', if_true: files('chrp_nvram.c')) -softmmu_ss.add(when: 'CONFIG_DS1225Y', if_true: files('ds1225y.c')) -softmmu_ss.add(when: 'CONFIG_NMC93XX_EEPROM', if_true: files('eeprom93xx.c')) -softmmu_ss.add(when: 'CONFIG_AT24C', if_true: files('eeprom_at24c.c')) -softmmu_ss.add(when: 'CONFIG_MAC_NVRAM', if_true: files('mac_nvram.c')) -softmmu_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_otp.c')) -softmmu_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_nvm.c')) -softmmu_ss.add(when: 'CONFIG_XLNX_EFUSE_CRC', if_true: files('xlnx-efuse-crc.c')) -softmmu_ss.add(when: 'CONFIG_XLNX_EFUSE', if_true: files('xlnx-efuse.c')) -softmmu_ss.add(when: 'CONFIG_XLNX_EFUSE_VERSAL', if_true: files( +system_ss.add(files('fw_cfg.c')) +system_ss.add(when: 'CONFIG_CHRP_NVRAM', if_true: files('chrp_nvram.c')) +system_ss.add(when: 'CONFIG_DS1225Y', if_true: files('ds1225y.c')) +system_ss.add(when: 'CONFIG_NMC93XX_EEPROM', if_true: files('eeprom93xx.c')) +system_ss.add(when: 'CONFIG_AT24C', if_true: files('eeprom_at24c.c')) +system_ss.add(when: 'CONFIG_MAC_NVRAM', if_true: files('mac_nvram.c')) +system_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_otp.c')) +system_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_nvm.c')) +system_ss.add(when: 'CONFIG_XLNX_EFUSE_CRC', if_true: files('xlnx-efuse-crc.c')) +system_ss.add(when: 'CONFIG_XLNX_EFUSE', if_true: files('xlnx-efuse.c')) +system_ss.add(when: 'CONFIG_XLNX_EFUSE_VERSAL', if_true: files( 'xlnx-versal-efuse-cache.c', 'xlnx-versal-efuse-ctrl.c')) -softmmu_ss.add(when: 'CONFIG_XLNX_EFUSE_ZYNQMP', if_true: files( +system_ss.add(when: 'CONFIG_XLNX_EFUSE_ZYNQMP', if_true: files( 'xlnx-zynqmp-efuse.c')) -softmmu_ss.add(when: 'CONFIG_XLNX_BBRAM', if_true: files('xlnx-bbram.c')) +system_ss.add(when: 'CONFIG_XLNX_BBRAM', if_true: files('xlnx-bbram.c')) specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr_nvram.c')) diff --git a/hw/pci-bridge/Kconfig b/hw/pci-bridge/Kconfig index 02614f49aa..67077366cc 100644 --- a/hw/pci-bridge/Kconfig +++ b/hw/pci-bridge/Kconfig @@ -3,6 +3,11 @@ config PCIE_PORT default y if PCI_DEVICES depends on PCI_EXPRESS && MSI_NONBROKEN +config PCIE_PCI_BRIDGE + bool + default y if PCIE_PORT + depends on PCIE_PORT + config PXB bool default y if Q35 || ARM_VIRT diff --git a/hw/pci-bridge/cxl_upstream.c b/hw/pci-bridge/cxl_upstream.c index 9df436cb73..ef47e5d625 100644 --- a/hw/pci-bridge/cxl_upstream.c +++ b/hw/pci-bridge/cxl_upstream.c @@ -346,6 +346,9 @@ static void cxl_usp_realize(PCIDevice *d, Error **errp) cxl_cstate->cdat.free_cdat_table = free_default_cdat_table; cxl_cstate->cdat.private = d; cxl_doe_cdat_init(cxl_cstate, errp); + if (*errp) { + goto err_cap; + } return; diff --git a/hw/pci-bridge/meson.build b/hw/pci-bridge/meson.build index fe92d43de6..6d5ad9f37b 100644 --- a/hw/pci-bridge/meson.build +++ b/hw/pci-bridge/meson.build @@ -2,7 +2,8 @@ pci_ss = ss.source_set() pci_ss.add(files('pci_bridge_dev.c')) pci_ss.add(when: 'CONFIG_I82801B11', if_true: files('i82801b11.c')) pci_ss.add(when: 'CONFIG_IOH3420', if_true: files('ioh3420.c')) -pci_ss.add(when: 'CONFIG_PCIE_PORT', if_true: files('pcie_root_port.c', 'gen_pcie_root_port.c', 'pcie_pci_bridge.c')) +pci_ss.add(when: 'CONFIG_PCIE_PORT', if_true: files('pcie_root_port.c', 'gen_pcie_root_port.c')) +pci_ss.add(when: 'CONFIG_PCIE_PCI_BRIDGE', if_true: files('pcie_pci_bridge.c')) pci_ss.add(when: 'CONFIG_PXB', if_true: files('pci_expander_bridge.c'), if_false: files('pci_expander_bridge_stubs.c')) pci_ss.add(when: 'CONFIG_XIO3130', if_true: files('xio3130_upstream.c', 'xio3130_downstream.c')) @@ -11,6 +12,6 @@ pci_ss.add(when: 'CONFIG_CXL', if_true: files('cxl_root_port.c', 'cxl_upstream.c # Sun4u pci_ss.add(when: 'CONFIG_SIMBA', if_true: files('simba.c')) -softmmu_ss.add_all(when: 'CONFIG_PCI', if_true: pci_ss) +system_ss.add_all(when: 'CONFIG_PCI', if_true: pci_ss) -softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('pci_expander_bridge_stubs.c')) +system_ss.add(when: 'CONFIG_ALL', if_true: files('pci_expander_bridge_stubs.c')) diff --git a/hw/pci-bridge/pci_expander_bridge.c b/hw/pci-bridge/pci_expander_bridge.c index ead33f0c05..613857b601 100644 --- a/hw/pci-bridge/pci_expander_bridge.c +++ b/hw/pci-bridge/pci_expander_bridge.c @@ -50,24 +50,8 @@ struct PXBBus { char bus_path[8]; }; -#define TYPE_PXB_DEVICE "pxb" -DECLARE_INSTANCE_CHECKER(PXBDev, PXB_DEV, - TYPE_PXB_DEVICE) - -#define TYPE_PXB_PCIE_DEVICE "pxb-pcie" -DECLARE_INSTANCE_CHECKER(PXBDev, PXB_PCIE_DEV, - TYPE_PXB_PCIE_DEVICE) - -static PXBDev *convert_to_pxb(PCIDevice *dev) -{ - /* A CXL PXB's parent bus is PCIe, so the normal check won't work */ - if (object_dynamic_cast(OBJECT(dev), TYPE_PXB_CXL_DEVICE)) { - return PXB_CXL_DEV(dev); - } - - return pci_bus_is_express(pci_get_bus(dev)) - ? PXB_PCIE_DEV(dev) : PXB_DEV(dev); -} +#define TYPE_PXB_PCIE_DEV "pxb-pcie" +OBJECT_DECLARE_SIMPLE_TYPE(PXBPCIEDev, PXB_PCIE_DEV) static GList *pxb_dev_list; @@ -89,14 +73,14 @@ bool cxl_get_hb_passthrough(PCIHostState *hb) static int pxb_bus_num(PCIBus *bus) { - PXBDev *pxb = convert_to_pxb(bus->parent_dev); + PXBDev *pxb = PXB_DEV(bus->parent_dev); return pxb->bus_nr; } static uint16_t pxb_bus_numa_node(PCIBus *bus) { - PXBDev *pxb = convert_to_pxb(bus->parent_dev); + PXBDev *pxb = PXB_DEV(bus->parent_dev); return pxb->numa_node; } @@ -154,7 +138,7 @@ static char *pxb_host_ofw_unit_address(const SysBusDevice *dev) pxb_host = PCI_HOST_BRIDGE(dev); pxb_bus = pxb_host->bus; - pxb_dev = convert_to_pxb(pxb_bus->parent_dev); + pxb_dev = PXB_DEV(pxb_bus->parent_dev); position = g_list_index(pxb_dev_list, pxb_dev); assert(position >= 0); @@ -212,8 +196,8 @@ static void pxb_cxl_realize(DeviceState *dev, Error **errp) */ void pxb_cxl_hook_up_registers(CXLState *cxl_state, PCIBus *bus, Error **errp) { - PXBDev *pxb = PXB_CXL_DEV(pci_bridge_get_device(bus)); - CXLHost *cxl = pxb->cxl.cxl_host_bridge; + PXBCXLDev *pxb = PXB_CXL_DEV(pci_bridge_get_device(bus)); + CXLHost *cxl = pxb->cxl_host_bridge; CXLComponentState *cxl_cstate = &cxl->cxl_cstate; struct MemoryRegion *mr = &cxl_cstate->crb.component_registers; hwaddr offset; @@ -299,7 +283,7 @@ static int pxb_map_irq_fn(PCIDevice *pci_dev, int pin) static void pxb_cxl_dev_reset(DeviceState *dev) { - CXLHost *cxl = PXB_CXL_DEV(dev)->cxl.cxl_host_bridge; + CXLHost *cxl = PXB_CXL_DEV(dev)->cxl_host_bridge; CXLComponentState *cxl_cstate = &cxl->cxl_cstate; PCIHostState *hb = PCI_HOST_BRIDGE(cxl); uint32_t *reg_state = cxl_cstate->crb.cache_mem_registers; @@ -311,7 +295,7 @@ static void pxb_cxl_dev_reset(DeviceState *dev) * The CXL specification allows for host bridges with no HDM decoders * if they only have a single root port. */ - if (!PXB_DEV(dev)->hdm_for_passthrough) { + if (!PXB_CXL_DEV(dev)->hdm_for_passthrough) { dsp_count = pcie_count_ds_ports(hb->bus); } /* Initial reset will have 0 dsp so wait until > 0 */ @@ -337,7 +321,7 @@ static gint pxb_compare(gconstpointer a, gconstpointer b) static void pxb_dev_realize_common(PCIDevice *dev, enum BusType type, Error **errp) { - PXBDev *pxb = convert_to_pxb(dev); + PXBDev *pxb = PXB_DEV(dev); DeviceState *ds, *bds = NULL; PCIBus *bus; const char *dev_name = NULL; @@ -365,7 +349,7 @@ static void pxb_dev_realize_common(PCIDevice *dev, enum BusType type, } else if (type == CXL) { bus = pci_root_bus_new(ds, dev_name, NULL, NULL, 0, TYPE_PXB_CXL_BUS); bus->flags |= PCI_BUS_CXL; - PXB_CXL_DEV(dev)->cxl.cxl_host_bridge = PXB_CXL_HOST(ds); + PXB_CXL_DEV(dev)->cxl_host_bridge = PXB_CXL_HOST(ds); } else { bus = pci_root_bus_new(ds, "pxb-internal", NULL, NULL, 0, TYPE_PXB_BUS); bds = qdev_new("pci-bridge"); @@ -418,7 +402,7 @@ static void pxb_dev_realize(PCIDevice *dev, Error **errp) static void pxb_dev_exitfn(PCIDevice *pci_dev) { - PXBDev *pxb = convert_to_pxb(pci_dev); + PXBDev *pxb = PXB_DEV(pci_dev); pxb_dev_list = g_list_remove(pxb_dev_list, pxb); } @@ -449,7 +433,7 @@ static void pxb_dev_class_init(ObjectClass *klass, void *data) } static const TypeInfo pxb_dev_info = { - .name = TYPE_PXB_DEVICE, + .name = TYPE_PXB_DEV, .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(PXBDev), .class_init = pxb_dev_class_init, @@ -481,15 +465,14 @@ static void pxb_pcie_dev_class_init(ObjectClass *klass, void *data) k->class_id = PCI_CLASS_BRIDGE_HOST; dc->desc = "PCI Express Expander Bridge"; - device_class_set_props(dc, pxb_dev_properties); dc->hotpluggable = false; set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); } static const TypeInfo pxb_pcie_dev_info = { - .name = TYPE_PXB_PCIE_DEVICE, - .parent = TYPE_PCI_DEVICE, - .instance_size = sizeof(PXBDev), + .name = TYPE_PXB_PCIE_DEV, + .parent = TYPE_PXB_DEV, + .instance_size = sizeof(PXBPCIEDev), .class_init = pxb_pcie_dev_class_init, .interfaces = (InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, @@ -510,11 +493,7 @@ static void pxb_cxl_dev_realize(PCIDevice *dev, Error **errp) } static Property pxb_cxl_dev_properties[] = { - /* Note: 0 is not a legal PXB bus number. */ - DEFINE_PROP_UINT8("bus_nr", PXBDev, bus_nr, 0), - DEFINE_PROP_UINT16("numa_node", PXBDev, numa_node, NUMA_NODE_UNASSIGNED), - DEFINE_PROP_BOOL("bypass_iommu", PXBDev, bypass_iommu, false), - DEFINE_PROP_BOOL("hdm_for_passthrough", PXBDev, hdm_for_passthrough, false), + DEFINE_PROP_BOOL("hdm_for_passthrough", PXBCXLDev, hdm_for_passthrough, false), DEFINE_PROP_END_OF_LIST(), }; @@ -540,9 +519,9 @@ static void pxb_cxl_dev_class_init(ObjectClass *klass, void *data) } static const TypeInfo pxb_cxl_dev_info = { - .name = TYPE_PXB_CXL_DEVICE, - .parent = TYPE_PCI_DEVICE, - .instance_size = sizeof(PXBDev), + .name = TYPE_PXB_CXL_DEV, + .parent = TYPE_PXB_PCIE_DEV, + .instance_size = sizeof(PXBCXLDev), .class_init = pxb_cxl_dev_class_init, .interfaces = (InterfaceInfo[]){ diff --git a/hw/pci-host/bonito.c b/hw/pci-host/bonito.c index 1cf25bab8d..4701481b9b 100644 --- a/hw/pci-host/bonito.c +++ b/hw/pci-host/bonito.c @@ -656,7 +656,7 @@ static void bonito_pci_realize(PCIDevice *dev, Error **errp) PCIBonitoState *s = PCI_BONITO(dev); SysBusDevice *sysbus = SYS_BUS_DEVICE(s->pcihost); PCIHostState *phb = PCI_HOST_BRIDGE(s->pcihost); - BonitoState *bs = BONITO_PCI_HOST_BRIDGE(s->pcihost); + BonitoState *bs = s->pcihost; MemoryRegion *pcimem_alias = g_new(MemoryRegion, 1); /* diff --git a/hw/pci-host/i440fx.c b/hw/pci-host/i440fx.c index 262f82c303..61e7b97ff4 100644 --- a/hw/pci-host/i440fx.c +++ b/hw/pci-host/i440fx.c @@ -27,6 +27,7 @@ #include "qemu/range.h" #include "hw/i386/pc.h" #include "hw/pci/pci.h" +#include "hw/pci/pci_bus.h" #include "hw/pci/pci_host.h" #include "hw/pci-host/i440fx.h" #include "hw/qdev-properties.h" @@ -217,10 +218,10 @@ static void i440fx_pcihost_realize(DeviceState *dev, Error **errp) PCIHostState *s = PCI_HOST_BRIDGE(dev); SysBusDevice *sbd = SYS_BUS_DEVICE(dev); - sysbus_add_io(sbd, 0xcf8, &s->conf_mem); + memory_region_add_subregion(s->bus->address_space_io, 0xcf8, &s->conf_mem); sysbus_init_ioports(sbd, 0xcf8, 4); - sysbus_add_io(sbd, 0xcfc, &s->data_mem); + memory_region_add_subregion(s->bus->address_space_io, 0xcfc, &s->data_mem); sysbus_init_ioports(sbd, 0xcfc, 4); /* register i440fx 0xcf8 port as coalesced pio */ @@ -291,12 +292,12 @@ PCIBus *i440fx_init(const char *pci_type, object_property_add_const_link(qdev_get_machine(), "smram", OBJECT(&f->smram)); - init_pam(dev, f->ram_memory, f->system_memory, f->pci_address_space, - &f->pam_regions[0], PAM_BIOS_BASE, PAM_BIOS_SIZE); + init_pam(&f->pam_regions[0], OBJECT(d), f->ram_memory, f->system_memory, + f->pci_address_space, PAM_BIOS_BASE, PAM_BIOS_SIZE); for (i = 0; i < ARRAY_SIZE(f->pam_regions) - 1; ++i) { - init_pam(dev, f->ram_memory, f->system_memory, f->pci_address_space, - &f->pam_regions[i+1], PAM_EXPAN_BASE + i * PAM_EXPAN_SIZE, - PAM_EXPAN_SIZE); + init_pam(&f->pam_regions[i + 1], OBJECT(d), f->ram_memory, + f->system_memory, f->pci_address_space, + PAM_EXPAN_BASE + i * PAM_EXPAN_SIZE, PAM_EXPAN_SIZE); } ram_size = ram_size / 8 / 1024 / 1024; diff --git a/hw/pci-host/meson.build b/hw/pci-host/meson.build index 9a813d552e..64eada76fe 100644 --- a/hw/pci-host/meson.build +++ b/hw/pci-host/meson.build @@ -29,7 +29,7 @@ pci_ss.add(when: 'CONFIG_VERSATILE_PCI', if_true: files('versatile.c')) # HPPA devices pci_ss.add(when: 'CONFIG_DINO', if_true: files('dino.c')) -softmmu_ss.add_all(when: 'CONFIG_PCI', if_true: pci_ss) +system_ss.add_all(when: 'CONFIG_PCI', if_true: pci_ss) specific_ss.add(when: 'CONFIG_PCI_POWERNV', if_true: files( 'pnv_phb3.c', diff --git a/hw/pci-host/pam.c b/hw/pci-host/pam.c index 454dd120db..68e9884d27 100644 --- a/hw/pci-host/pam.c +++ b/hw/pci-host/pam.c @@ -30,24 +30,24 @@ #include "qemu/osdep.h" #include "hw/pci-host/pam.h" -void init_pam(DeviceState *dev, MemoryRegion *ram_memory, +void init_pam(PAMMemoryRegion *mem, Object *owner, MemoryRegion *ram_memory, MemoryRegion *system_memory, MemoryRegion *pci_address_space, - PAMMemoryRegion *mem, uint32_t start, uint32_t size) + uint32_t start, uint32_t size) { int i; /* RAM */ - memory_region_init_alias(&mem->alias[3], OBJECT(dev), "pam-ram", ram_memory, + memory_region_init_alias(&mem->alias[3], owner, "pam-ram", ram_memory, start, size); /* ROM (XXX: not quite correct) */ - memory_region_init_alias(&mem->alias[1], OBJECT(dev), "pam-rom", ram_memory, + memory_region_init_alias(&mem->alias[1], owner, "pam-rom", ram_memory, start, size); memory_region_set_readonly(&mem->alias[1], true); /* XXX: should distinguish read/write cases */ - memory_region_init_alias(&mem->alias[0], OBJECT(dev), "pam-pci", pci_address_space, + memory_region_init_alias(&mem->alias[0], owner, "pam-pci", pci_address_space, start, size); - memory_region_init_alias(&mem->alias[2], OBJECT(dev), "pam-pci", ram_memory, + memory_region_init_alias(&mem->alias[2], owner, "pam-pci", ram_memory, start, size); memory_region_transaction_begin(); diff --git a/hw/pci-host/q35.c b/hw/pci-host/q35.c index 26390863d6..fd18920e7f 100644 --- a/hw/pci-host/q35.c +++ b/hw/pci-host/q35.c @@ -50,10 +50,12 @@ static void q35_host_realize(DeviceState *dev, Error **errp) Q35PCIHost *s = Q35_HOST_DEVICE(dev); SysBusDevice *sbd = SYS_BUS_DEVICE(dev); - sysbus_add_io(sbd, MCH_HOST_BRIDGE_CONFIG_ADDR, &pci->conf_mem); + memory_region_add_subregion(s->mch.address_space_io, + MCH_HOST_BRIDGE_CONFIG_ADDR, &pci->conf_mem); sysbus_init_ioports(sbd, MCH_HOST_BRIDGE_CONFIG_ADDR, 4); - sysbus_add_io(sbd, MCH_HOST_BRIDGE_CONFIG_DATA, &pci->data_mem); + memory_region_add_subregion(s->mch.address_space_io, + MCH_HOST_BRIDGE_CONFIG_DATA, &pci->data_mem); sysbus_init_ioports(sbd, MCH_HOST_BRIDGE_CONFIG_DATA, 4); /* register q35 0xcf8 port as coalesced pio */ @@ -643,12 +645,12 @@ static void mch_realize(PCIDevice *d, Error **errp) object_property_add_const_link(qdev_get_machine(), "smram", OBJECT(&mch->smram)); - init_pam(DEVICE(mch), mch->ram_memory, mch->system_memory, - mch->pci_address_space, &mch->pam_regions[0], + init_pam(&mch->pam_regions[0], OBJECT(mch), mch->ram_memory, + mch->system_memory, mch->pci_address_space, PAM_BIOS_BASE, PAM_BIOS_SIZE); for (i = 0; i < ARRAY_SIZE(mch->pam_regions) - 1; ++i) { - init_pam(DEVICE(mch), mch->ram_memory, mch->system_memory, - mch->pci_address_space, &mch->pam_regions[i+1], + init_pam(&mch->pam_regions[i + 1], OBJECT(mch), mch->ram_memory, + mch->system_memory, mch->pci_address_space, PAM_EXPAN_BASE + i * PAM_EXPAN_SIZE, PAM_EXPAN_SIZE); } } diff --git a/hw/pci-host/raven.c b/hw/pci-host/raven.c index 072ffe3c5e..9a11ac4b2b 100644 --- a/hw/pci-host/raven.c +++ b/hw/pci-host/raven.c @@ -294,6 +294,13 @@ static void raven_pcihost_initfn(Object *obj) memory_region_init(&s->pci_memory, obj, "pci-memory", 0x3f000000); address_space_init(&s->pci_io_as, &s->pci_io, "raven-io"); + /* + * Raven's raven_io_ops use the address-space API to access pci-conf-idx + * (which is also owned by the raven device). As such, mark the + * pci_io_non_contiguous as re-entrancy safe. + */ + s->pci_io_non_contiguous.disable_reentrancy_guard = true; + /* CPU address space */ memory_region_add_subregion(address_space_mem, PCI_IO_BASE_ADDR, &s->pci_io); diff --git a/hw/pci/meson.build b/hw/pci/meson.build index 4fcd888b27..b1855452f5 100644 --- a/hw/pci/meson.build +++ b/hw/pci/meson.build @@ -16,8 +16,8 @@ pci_ss.add(files( # CONFIG_PCI_EXPRESS=n. pci_ss.add(files('pcie.c', 'pcie_aer.c')) pci_ss.add(files('pcie_doe.c')) -softmmu_ss.add(when: 'CONFIG_PCI_EXPRESS', if_true: files('pcie_port.c', 'pcie_host.c')) -softmmu_ss.add_all(when: 'CONFIG_PCI', if_true: pci_ss) +system_ss.add(when: 'CONFIG_PCI_EXPRESS', if_true: files('pcie_port.c', 'pcie_host.c')) +system_ss.add_all(when: 'CONFIG_PCI', if_true: pci_ss) -softmmu_ss.add(when: 'CONFIG_PCI', if_false: files('pci-stub.c')) -softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('pci-stub.c')) +system_ss.add(when: 'CONFIG_PCI', if_false: files('pci-stub.c')) +system_ss.add(when: 'CONFIG_ALL', if_true: files('pci-stub.c')) diff --git a/hw/pci/pci.c b/hw/pci/pci.c index def5000e7b..bf38905b7d 100644 --- a/hw/pci/pci.c +++ b/hw/pci/pci.c @@ -79,6 +79,8 @@ static Property pci_props[] = { DEFINE_PROP_STRING("failover_pair_id", PCIDevice, failover_pair_id), DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0), + DEFINE_PROP_BIT("x-pcie-err-unc-mask", PCIDevice, cap_present, + QEMU_PCIE_ERR_UNC_MASK_BITNR, true), DEFINE_PROP_END_OF_LIST() }; @@ -558,6 +560,7 @@ void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq, bus->set_irq = set_irq; bus->irq_opaque = irq_opaque; bus->nirq = nirq; + g_free(bus->irq_count); bus->irq_count = g_malloc0(nirq * sizeof(bus->irq_count[0])); } @@ -573,6 +576,7 @@ void pci_bus_irqs_cleanup(PCIBus *bus) bus->irq_opaque = NULL; bus->nirq = 0; g_free(bus->irq_count); + bus->irq_count = NULL; } PCIBus *pci_register_root_bus(DeviceState *parent, const char *name, @@ -1116,6 +1120,21 @@ static bool pci_bus_devfn_reserved(PCIBus *bus, int devfn) return bus->slot_reserved_mask & (1UL << PCI_SLOT(devfn)); } +uint32_t pci_bus_get_slot_reserved_mask(PCIBus *bus) +{ + return bus->slot_reserved_mask; +} + +void pci_bus_set_slot_reserved_mask(PCIBus *bus, uint32_t mask) +{ + bus->slot_reserved_mask |= mask; +} + +void pci_bus_clear_slot_reserved_mask(PCIBus *bus, uint32_t mask) +{ + bus->slot_reserved_mask &= ~mask; +} + /* -1 for devfn means auto assign */ static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, const char *name, int devfn, @@ -1427,9 +1446,7 @@ pcibus_t pci_bar_address(PCIDevice *d, { pcibus_t new_addr, last_addr; uint16_t cmd = pci_get_word(d->config + PCI_COMMAND); - Object *machine = qdev_get_machine(); - ObjectClass *oc = object_get_class(machine); - MachineClass *mc = MACHINE_CLASS(oc); + MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); bool allow_0_address = mc->pci_allow_0_address; if (type & PCI_BASE_ADDRESS_SPACE_IO) { @@ -2292,15 +2309,14 @@ static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, Error **errp) { int64_t size; - char *path; + g_autofree char *path = NULL; void *ptr; char name[32]; const VMStateDescription *vmsd; - if (!pdev->romfile) - return; - if (strlen(pdev->romfile) == 0) + if (!pdev->romfile || !strlen(pdev->romfile)) { return; + } if (!pdev->rom_bar) { /* @@ -2335,23 +2351,20 @@ static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, size = get_image_size(path); if (size < 0) { error_setg(errp, "failed to find romfile \"%s\"", pdev->romfile); - g_free(path); return; } else if (size == 0) { error_setg(errp, "romfile \"%s\" is empty", pdev->romfile); - g_free(path); return; } else if (size > 2 * GiB) { error_setg(errp, "romfile \"%s\" too large (size cannot exceed 2 GiB)", pdev->romfile); - g_free(path); return; } if (pdev->romsize != -1) { if (size > pdev->romsize) { - error_setg(errp, "romfile \"%s\" (%u bytes) is too large for ROM size %u", + error_setg(errp, "romfile \"%s\" (%u bytes) " + "is too large for ROM size %u", pdev->romfile, (uint32_t)size, pdev->romsize); - g_free(path); return; } } else { @@ -2359,21 +2372,18 @@ static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, } vmsd = qdev_get_vmsd(DEVICE(pdev)); + snprintf(name, sizeof(name), "%s.rom", + vmsd ? vmsd->name : object_get_typename(OBJECT(pdev))); - if (vmsd) { - snprintf(name, sizeof(name), "%s.rom", vmsd->name); - } else { - snprintf(name, sizeof(name), "%s.rom", object_get_typename(OBJECT(pdev))); - } pdev->has_rom = true; - memory_region_init_rom(&pdev->rom, OBJECT(pdev), name, pdev->romsize, &error_fatal); + memory_region_init_rom(&pdev->rom, OBJECT(pdev), name, pdev->romsize, + &error_fatal); + ptr = memory_region_get_ram_ptr(&pdev->rom); if (load_image_size(path, ptr, size) < 0) { error_setg(errp, "failed to load romfile \"%s\"", pdev->romfile); - g_free(path); return; } - g_free(path); if (is_default_rom) { /* Only the default rom images will be patched (if needed). */ diff --git a/hw/pci/pci_bridge.c b/hw/pci/pci_bridge.c index dd5af508f9..e7b9345615 100644 --- a/hw/pci/pci_bridge.c +++ b/hw/pci/pci_bridge.c @@ -184,11 +184,11 @@ static void pci_bridge_init_vga_aliases(PCIBridge *br, PCIBus *parent, } } -static PCIBridgeWindows *pci_bridge_region_init(PCIBridge *br) +static void pci_bridge_region_init(PCIBridge *br) { PCIDevice *pd = PCI_DEVICE(br); PCIBus *parent = pci_get_bus(pd); - PCIBridgeWindows *w = g_new(PCIBridgeWindows, 1); + PCIBridgeWindows *w = &br->windows; uint16_t cmd = pci_get_word(pd->config + PCI_COMMAND); pci_bridge_init_alias(br, &w->alias_pref_mem, @@ -211,8 +211,6 @@ static PCIBridgeWindows *pci_bridge_region_init(PCIBridge *br) cmd & PCI_COMMAND_IO); pci_bridge_init_vga_aliases(br, parent, w->alias_vga); - - return w; } static void pci_bridge_region_del(PCIBridge *br, PCIBridgeWindows *w) @@ -234,19 +232,18 @@ static void pci_bridge_region_cleanup(PCIBridge *br, PCIBridgeWindows *w) object_unparent(OBJECT(&w->alias_vga[QEMU_PCI_VGA_IO_LO])); object_unparent(OBJECT(&w->alias_vga[QEMU_PCI_VGA_IO_HI])); object_unparent(OBJECT(&w->alias_vga[QEMU_PCI_VGA_MEM])); - g_free(w); } void pci_bridge_update_mappings(PCIBridge *br) { - PCIBridgeWindows *w = br->windows; + PCIBridgeWindows *w = &br->windows; /* Make updates atomic to: handle the case of one VCPU updating the bridge * while another accesses an unaffected region. */ memory_region_transaction_begin(); - pci_bridge_region_del(br, br->windows); + pci_bridge_region_del(br, w); pci_bridge_region_cleanup(br, w); - br->windows = pci_bridge_region_init(br); + pci_bridge_region_init(br); memory_region_transaction_commit(); } @@ -385,7 +382,7 @@ void pci_bridge_initfn(PCIDevice *dev, const char *typename) sec_bus->address_space_io = &br->address_space_io; memory_region_init(&br->address_space_io, OBJECT(br), "pci_bridge_io", 4 * GiB); - br->windows = pci_bridge_region_init(br); + pci_bridge_region_init(br); QLIST_INIT(&sec_bus->child); QLIST_INSERT_HEAD(&parent->child, sec_bus, sibling); } @@ -396,8 +393,8 @@ void pci_bridge_exitfn(PCIDevice *pci_dev) PCIBridge *s = PCI_BRIDGE(pci_dev); assert(QLIST_EMPTY(&s->sec_bus.child)); QLIST_REMOVE(&s->sec_bus, sibling); - pci_bridge_region_del(s, s->windows); - pci_bridge_region_cleanup(s, s->windows); + pci_bridge_region_del(s, &s->windows); + pci_bridge_region_cleanup(s, &s->windows); /* object_unparent() is called automatically during device deletion */ } diff --git a/hw/pci/pcie_aer.c b/hw/pci/pcie_aer.c index 103667c368..374d593ead 100644 --- a/hw/pci/pcie_aer.c +++ b/hw/pci/pcie_aer.c @@ -112,10 +112,13 @@ int pcie_aer_init(PCIDevice *dev, uint8_t cap_ver, uint16_t offset, pci_set_long(dev->w1cmask + offset + PCI_ERR_UNCOR_STATUS, PCI_ERR_UNC_SUPPORTED); - pci_set_long(dev->config + offset + PCI_ERR_UNCOR_MASK, - PCI_ERR_UNC_MASK_DEFAULT); - pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_MASK, - PCI_ERR_UNC_SUPPORTED); + + if (dev->cap_present & QEMU_PCIE_ERR_UNC_MASK) { + pci_set_long(dev->config + offset + PCI_ERR_UNCOR_MASK, + PCI_ERR_UNC_MASK_DEFAULT); + pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_MASK, + PCI_ERR_UNC_SUPPORTED); + } pci_set_long(dev->config + offset + PCI_ERR_UNCOR_SEVER, PCI_ERR_UNC_SEVERITY_DEFAULT); diff --git a/hw/pcmcia/meson.build b/hw/pcmcia/meson.build index 51f2512b8e..04e29c109c 100644 --- a/hw/pcmcia/meson.build +++ b/hw/pcmcia/meson.build @@ -1,2 +1,2 @@ -softmmu_ss.add(when: 'CONFIG_PCMCIA', if_true: files('pcmcia.c')) -softmmu_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx.c')) +system_ss.add(when: 'CONFIG_PCMCIA', if_true: files('pcmcia.c')) +system_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx.c')) diff --git a/hw/ppc/Kconfig b/hw/ppc/Kconfig index c898021b5f..5dfbf47ef5 100644 --- a/hw/ppc/Kconfig +++ b/hw/ppc/Kconfig @@ -3,7 +3,7 @@ config PSERIES imply PCI_DEVICES imply TEST_DEVICES imply VIRTIO_VGA - imply NVDIMM + select NVDIMM select DIMM select PCI select SPAPR_VSCSI @@ -115,6 +115,7 @@ config MAC_NEWWORLD select MAC_PMU select UNIN_PCI select FW_CFG_PPC + select USB_OHCI_PCI config E500 bool diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c index 117c9c08ed..b6eb599751 100644 --- a/hw/ppc/e500.c +++ b/hw/ppc/e500.c @@ -898,6 +898,7 @@ void ppce500_init(MachineState *machine) MemoryRegion *address_space_mem = get_system_memory(); PPCE500MachineState *pms = PPCE500_MACHINE(machine); const PPCE500MachineClass *pmc = PPCE500_MACHINE_GET_CLASS(machine); + MachineClass *mc = MACHINE_CLASS(pmc); PCIBus *pci_bus; CPUPPCState *env = NULL; uint64_t loadaddr; @@ -1073,7 +1074,7 @@ void ppce500_init(MachineState *machine) if (pci_bus) { /* Register network interfaces. */ for (i = 0; i < nb_nics; i++) { - pci_nic_init_nofail(&nd_table[i], pci_bus, "virtio-net-pci", NULL); + pci_nic_init_nofail(&nd_table[i], pci_bus, mc->default_nic, NULL); } } diff --git a/hw/ppc/e500plat.c b/hw/ppc/e500plat.c index 3032bd3f6d..7aa2f2107a 100644 --- a/hw/ppc/e500plat.c +++ b/hw/ppc/e500plat.c @@ -99,6 +99,7 @@ static void e500plat_machine_class_init(ObjectClass *oc, void *data) mc->max_cpus = 32; mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("e500v2_v30"); mc->default_ram_id = "mpc8544ds.ram"; + mc->default_nic = "virtio-net-pci"; machine_class_allow_dynamic_sysbus_dev(mc, TYPE_ETSEC_COMMON); } diff --git a/hw/ppc/mac_newworld.c b/hw/ppc/mac_newworld.c index 460c14b5e3..535710314a 100644 --- a/hw/ppc/mac_newworld.c +++ b/hw/ppc/mac_newworld.c @@ -132,6 +132,7 @@ static void ppc_core99_reset(void *opaque) static void ppc_core99_init(MachineState *machine) { Core99MachineState *core99_machine = CORE99_MACHINE(machine); + MachineClass *mc = MACHINE_GET_CLASS(machine); PowerPCCPU *cpu = NULL; CPUPPCState *env = NULL; char *filename; @@ -444,7 +445,7 @@ static void ppc_core99_init(MachineState *machine) } for (i = 0; i < nb_nics; i++) { - pci_nic_init_nofail(&nd_table[i], pci_bus, "sungem", NULL); + pci_nic_init_nofail(&nd_table[i], pci_bus, mc->default_nic, NULL); } /* The NewWorld NVRAM is not located in the MacIO device */ @@ -577,6 +578,7 @@ static void core99_machine_class_init(ObjectClass *oc, void *data) mc->max_cpus = 1; mc->default_boot_order = "cd"; mc->default_display = "std"; + mc->default_nic = "sungem"; mc->kvm_type = core99_kvm_type; #ifdef TARGET_PPC64 mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("970fx_v3.1"); diff --git a/hw/ppc/mac_oldworld.c b/hw/ppc/mac_oldworld.c index 2e4cc3fe0b..510ff0eaaf 100644 --- a/hw/ppc/mac_oldworld.c +++ b/hw/ppc/mac_oldworld.c @@ -87,6 +87,7 @@ static void ppc_heathrow_reset(void *opaque) static void ppc_heathrow_init(MachineState *machine) { const char *bios_name = machine->firmware ?: PROM_FILENAME; + MachineClass *mc = MACHINE_GET_CLASS(machine); PowerPCCPU *cpu = NULL; CPUPPCState *env = NULL; char *filename; @@ -276,7 +277,7 @@ static void ppc_heathrow_init(MachineState *machine) pci_vga_init(pci_bus); for (i = 0; i < nb_nics; i++) { - pci_nic_init_nofail(&nd_table[i], pci_bus, "ne2k_pci", NULL); + pci_nic_init_nofail(&nd_table[i], pci_bus, mc->default_nic, NULL); } /* MacIO IDE */ @@ -424,6 +425,7 @@ static void heathrow_class_init(ObjectClass *oc, void *data) mc->kvm_type = heathrow_kvm_type; mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("750_v3.1"); mc->default_display = "std"; + mc->default_nic = "ne2k_pci"; mc->ignore_boot_device_suffixes = true; mc->default_ram_id = "ppc_heathrow.ram"; fwc->get_dev_path = heathrow_fw_dev_path; diff --git a/hw/ppc/mpc8544ds.c b/hw/ppc/mpc8544ds.c index 7dd5219736..b7130903d6 100644 --- a/hw/ppc/mpc8544ds.c +++ b/hw/ppc/mpc8544ds.c @@ -61,6 +61,7 @@ static void mpc8544ds_machine_class_init(ObjectClass *oc, void *data) mc->max_cpus = 15; mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("e500v2_v30"); mc->default_ram_id = "mpc8544ds.ram"; + mc->default_nic = "virtio-net-pci"; } #define TYPE_MPC8544DS_MACHINE MACHINE_TYPE_NAME("mpc8544ds") diff --git a/hw/ppc/pegasos2.c b/hw/ppc/pegasos2.c index f1650be5ee..af5489de26 100644 --- a/hw/ppc/pegasos2.c +++ b/hw/ppc/pegasos2.c @@ -524,7 +524,7 @@ static void pegasos2_machine_class_init(ObjectClass *oc, void *data) mc->block_default_type = IF_IDE; mc->default_boot_order = "cd"; mc->default_display = "std"; - mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("7400_v2.9"); + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("7457_v1.2"); mc->default_ram_id = "pegasos2.ram"; mc->default_ram_size = 512 * MiB; diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c index 11cb48af2f..590fc64b32 100644 --- a/hw/ppc/pnv.c +++ b/hw/ppc/pnv.c @@ -2171,7 +2171,7 @@ static void pnv_machine_power9_class_init(ObjectClass *oc, void *data) }; mc->desc = "IBM PowerNV (Non-Virtualized) POWER9"; - mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power9_v2.0"); + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power9_v2.2"); compat_props_add(mc->compat_props, phb_compat, G_N_ELEMENTS(phb_compat)); xfc->match_nvt = pnv_match_nvt; diff --git a/hw/ppc/pnv_core.c b/hw/ppc/pnv_core.c index 410f31bdf8..0bc3ad41c8 100644 --- a/hw/ppc/pnv_core.c +++ b/hw/ppc/pnv_core.c @@ -348,7 +348,7 @@ static const TypeInfo pnv_core_infos[] = { DEFINE_PNV_CORE_TYPE(power8, "power8e_v2.1"), DEFINE_PNV_CORE_TYPE(power8, "power8_v2.0"), DEFINE_PNV_CORE_TYPE(power8, "power8nvl_v1.0"), - DEFINE_PNV_CORE_TYPE(power9, "power9_v2.0"), + DEFINE_PNV_CORE_TYPE(power9, "power9_v2.2"), DEFINE_PNV_CORE_TYPE(power10, "power10_v2.0"), }; diff --git a/hw/ppc/pnv_lpc.c b/hw/ppc/pnv_lpc.c index 01f44c19eb..d692858bee 100644 --- a/hw/ppc/pnv_lpc.c +++ b/hw/ppc/pnv_lpc.c @@ -734,14 +734,17 @@ static void pnv_lpc_realize(DeviceState *dev, Error **errp) /* Create MMIO regions for LPC HC and OPB registers */ memory_region_init_io(&lpc->opb_master_regs, OBJECT(dev), &opb_master_ops, lpc, "lpc-opb-master", LPC_OPB_REGS_OPB_SIZE); + lpc->opb_master_regs.disable_reentrancy_guard = true; memory_region_add_subregion(&lpc->opb_mr, LPC_OPB_REGS_OPB_ADDR, &lpc->opb_master_regs); memory_region_init_io(&lpc->lpc_hc_regs, OBJECT(dev), &lpc_hc_ops, lpc, "lpc-hc", LPC_HC_REGS_OPB_SIZE); + /* xscom writes to lpc-hc. As such mark lpc-hc re-entrancy safe */ + lpc->lpc_hc_regs.disable_reentrancy_guard = true; memory_region_add_subregion(&lpc->opb_mr, LPC_HC_REGS_OPB_ADDR, &lpc->lpc_hc_regs); - qdev_init_gpio_out(DEVICE(dev), &lpc->psi_irq, 1); + qdev_init_gpio_out(dev, &lpc->psi_irq, 1); } static void pnv_lpc_class_init(ObjectClass *klass, void *data) diff --git a/hw/ppc/pnv_occ.c b/hw/ppc/pnv_occ.c index 9fa6d91d31..48123ceae1 100644 --- a/hw/ppc/pnv_occ.c +++ b/hw/ppc/pnv_occ.c @@ -278,7 +278,7 @@ static void pnv_occ_realize(DeviceState *dev, Error **errp) occ, "occ-common-area", PNV_OCC_SENSOR_DATA_BLOCK_SIZE); - qdev_init_gpio_out(DEVICE(dev), &occ->psi_irq, 1); + qdev_init_gpio_out(dev, &occ->psi_irq, 1); } static void pnv_occ_class_init(ObjectClass *klass, void *data) diff --git a/hw/ppc/pnv_sbe.c b/hw/ppc/pnv_sbe.c index 1c7812a135..74cee4eea7 100644 --- a/hw/ppc/pnv_sbe.c +++ b/hw/ppc/pnv_sbe.c @@ -381,7 +381,7 @@ static void pnv_sbe_realize(DeviceState *dev, Error **errp) psc->xscom_mbox_ops, sbe, "xscom-sbe-mbox", psc->xscom_mbox_size); - qdev_init_gpio_out(DEVICE(dev), &sbe->psi_irq, 1); + qdev_init_gpio_out(dev, &sbe->psi_irq, 1); sbe->timer = timer_new_us(QEMU_CLOCK_VIRTUAL, sbe_timer, sbe); } diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c index 4e816c68c7..1b1220c423 100644 --- a/hw/ppc/ppc.c +++ b/hw/ppc/ppc.c @@ -798,6 +798,8 @@ static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp, int64_t signed_decr; /* Truncate value to decr_width and sign extend for simplicity */ + value = extract64(value, 0, nr_bits); + decr = extract64(decr, 0, nr_bits); signed_value = sextract64(value, 0, nr_bits); signed_decr = sextract64(decr, 0, nr_bits); @@ -809,11 +811,7 @@ static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp, } /* - * Going from 2 -> 1, 1 -> 0 or 0 -> -1 is the event to generate a DEC - * interrupt. - * - * If we get a really small DEC value, we can assume that by the time we - * handled it we should inject an interrupt already. + * Going from 1 -> 0 or 0 -> -1 is the event to generate a DEC interrupt. * * On MSB level based DEC implementations the MSB always means the interrupt * is pending, so raise it on those. @@ -821,8 +819,7 @@ static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp, * On MSB edge based DEC implementations the MSB going from 0 -> 1 triggers * an edge interrupt, so raise it here too. */ - if ((value < 3) || - ((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && signed_value < 0) || + if (((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && signed_value < 0) || ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && signed_value < 0 && signed_decr >= 0)) { (*raise_excp)(cpu); diff --git a/hw/ppc/ppc440_bamboo.c b/hw/ppc/ppc440_bamboo.c index 2880c81cb1..f969fa3c29 100644 --- a/hw/ppc/ppc440_bamboo.c +++ b/hw/ppc/ppc440_bamboo.c @@ -161,6 +161,7 @@ static void bamboo_init(MachineState *machine) { const char *kernel_filename = machine->kernel_filename; const char *initrd_filename = machine->initrd_filename; + MachineClass *mc = MACHINE_GET_CLASS(machine); unsigned int pci_irq_nrs[4] = { 28, 27, 26, 25 }; MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *isa = g_new(MemoryRegion, 1); @@ -246,7 +247,7 @@ static void bamboo_init(MachineState *machine) * There are no PCI NICs on the Bamboo board, but there are * PCI slots, so we can pick whatever default model we want. */ - pci_nic_init_nofail(&nd_table[i], pcibus, "e1000", NULL); + pci_nic_init_nofail(&nd_table[i], pcibus, mc->default_nic, NULL); } } @@ -296,6 +297,7 @@ static void bamboo_machine_init(MachineClass *mc) mc->init = bamboo_init; mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("440epb"); mc->default_ram_id = "ppc4xx.sdram"; + mc->default_nic = "e1000"; } DEFINE_MACHINE("bamboo", bamboo_machine_init) diff --git a/hw/ppc/prep.c b/hw/ppc/prep.c index d00280c0f8..33bf232f8b 100644 --- a/hw/ppc/prep.c +++ b/hw/ppc/prep.c @@ -229,6 +229,7 @@ static int prep_set_cmos_checksum(DeviceState *dev, void *opaque) static void ibm_40p_init(MachineState *machine) { const char *bios_name = machine->firmware ?: "openbios-ppc"; + MachineClass *mc = MACHINE_GET_CLASS(machine); CPUPPCState *env = NULL; uint16_t cmos_checksum; PowerPCCPU *cpu; @@ -270,9 +271,11 @@ static void ibm_40p_init(MachineState *machine) } /* PCI -> ISA bridge */ - i82378_dev = DEVICE(pci_create_simple(pci_bus, PCI_DEVFN(11, 0), "i82378")); + i82378_dev = DEVICE(pci_new(PCI_DEVFN(11, 0), "i82378")); qdev_connect_gpio_out(i82378_dev, 0, qdev_get_gpio_in(DEVICE(cpu), PPC6xx_INPUT_INT)); + qdev_realize_and_unref(i82378_dev, BUS(pci_bus), &error_fatal); + sysbus_connect_irq(pcihost, 0, qdev_get_gpio_in(i82378_dev, 15)); isa_bus = ISA_BUS(qdev_get_child_bus(i82378_dev, "isa.0")); @@ -323,7 +326,7 @@ static void ibm_40p_init(MachineState *machine) pci_vga_init(pci_bus); for (i = 0; i < nb_nics; i++) { - pci_nic_init_nofail(&nd_table[i], pci_bus, "pcnet", + pci_nic_init_nofail(&nd_table[i], pci_bus, mc->default_nic, i == 0 ? "3" : NULL); } } @@ -427,6 +430,7 @@ static void ibm_40p_machine_init(MachineClass *mc) mc->default_boot_order = "c"; mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("604"); mc->default_display = "std"; + mc->default_nic = "pcnet"; } DEFINE_MACHINE("40p", ibm_40p_machine_init) diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index ddc9c7b1a1..dcb7f1c70a 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -2166,7 +2166,7 @@ static void htab_save_first_pass(QEMUFile *f, SpaprMachineState *spapr, break; } } - } while ((index < htabslots) && !qemu_file_rate_limit(f)); + } while ((index < htabslots) && !migration_rate_exceeded(f)); if (index >= htabslots) { assert(index == htabslots); @@ -2237,7 +2237,7 @@ static int htab_save_later_pass(QEMUFile *f, SpaprMachineState *spapr, assert(index == htabslots); index = 0; } - } while ((examined < htabslots) && (!qemu_file_rate_limit(f) || final)); + } while ((examined < htabslots) && (!migration_rate_exceeded(f) || final)); if (index >= htabslots) { assert(index == htabslots); @@ -4631,7 +4631,7 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data) smc->dr_lmb_enabled = true; smc->update_dt_enabled = true; - mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power9_v2.0"); + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power9_v2.2"); mc->has_hotpluggable_cpus = true; mc->nvdimm_supported = true; smc->resize_hpt_default = SPAPR_RESIZE_HPT_ENABLED; @@ -4673,6 +4673,13 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data) smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_ON; smc->default_caps.caps[SPAPR_CAP_FWNMI] = SPAPR_CAP_ON; smc->default_caps.caps[SPAPR_CAP_RPT_INVALIDATE] = SPAPR_CAP_OFF; + + /* + * This cap specifies whether the AIL 3 mode for + * H_SET_RESOURCE is supported. The default is modified + * by default_caps_with_cpu(). + */ + smc->default_caps.caps[SPAPR_CAP_AIL_MODE_3] = SPAPR_CAP_ON; spapr_caps_add_properties(smc); smc->irq = &spapr_irq_dual; smc->dr_phb_enabled = true; diff --git a/hw/ppc/spapr_caps.c b/hw/ppc/spapr_caps.c index b4283055c1..3fd45a6dec 100644 --- a/hw/ppc/spapr_caps.c +++ b/hw/ppc/spapr_caps.c @@ -614,6 +614,33 @@ static void cap_rpt_invalidate_apply(SpaprMachineState *spapr, } } +static void cap_ail_mode_3_apply(SpaprMachineState *spapr, + uint8_t val, Error **errp) +{ + ERRP_GUARD(); + PowerPCCPU *cpu = POWERPC_CPU(first_cpu); + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + + if (!val) { + return; + } + + if (tcg_enabled()) { + /* AIL-3 is only supported on POWER8 and above CPUs. */ + if (!(pcc->insns_flags2 & PPC2_ISA207S)) { + error_setg(errp, "TCG only supports cap-ail-mode-3 on POWER8 and later CPUs"); + error_append_hint(errp, "Try appending -machine cap-ail-mode-3=off\n"); + return; + } + } else if (kvm_enabled()) { + if (!kvmppc_supports_ail_3()) { + error_setg(errp, "KVM implementation does not support cap-ail-mode-3"); + error_append_hint(errp, "Try appending -machine cap-ail-mode-3=off\n"); + return; + } + } +} + SpaprCapabilityInfo capability_table[SPAPR_CAP_NUM] = { [SPAPR_CAP_HTM] = { .name = "htm", @@ -731,6 +758,15 @@ SpaprCapabilityInfo capability_table[SPAPR_CAP_NUM] = { .type = "bool", .apply = cap_rpt_invalidate_apply, }, + [SPAPR_CAP_AIL_MODE_3] = { + .name = "ail-mode-3", + .description = "Alternate Interrupt Location (AIL) mode 3 support", + .index = SPAPR_CAP_AIL_MODE_3, + .get = spapr_cap_get_bool, + .set = spapr_cap_set_bool, + .type = "bool", + .apply = cap_ail_mode_3_apply, + }, }; static SpaprCapabilities default_caps_with_cpu(SpaprMachineState *spapr, @@ -750,6 +786,7 @@ static SpaprCapabilities default_caps_with_cpu(SpaprMachineState *spapr, 0, spapr->max_compat_pvr)) { caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_OFF; caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_BROKEN; + caps.caps[SPAPR_CAP_AIL_MODE_3] = SPAPR_CAP_OFF; } if (!ppc_type_check_compat(cputype, CPU_POWERPC_LOGICAL_2_06_PLUS, diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c index 8a4861f45a..9b88dd549a 100644 --- a/hw/ppc/spapr_cpu_core.c +++ b/hw/ppc/spapr_cpu_core.c @@ -390,6 +390,7 @@ static const TypeInfo spapr_cpu_core_type_infos[] = { DEFINE_SPAPR_CPU_CORE_TYPE("power8nvl_v1.0"), DEFINE_SPAPR_CPU_CORE_TYPE("power9_v1.0"), DEFINE_SPAPR_CPU_CORE_TYPE("power9_v2.0"), + DEFINE_SPAPR_CPU_CORE_TYPE("power9_v2.2"), DEFINE_SPAPR_CPU_CORE_TYPE("power10_v1.0"), DEFINE_SPAPR_CPU_CORE_TYPE("power10_v2.0"), #ifdef CONFIG_KVM diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c index ec4def62f8..b904755575 100644 --- a/hw/ppc/spapr_hcall.c +++ b/hw/ppc/spapr_hcall.c @@ -817,30 +817,32 @@ static target_ulong h_set_mode_resource_le(PowerPCCPU *cpu, } static target_ulong h_set_mode_resource_addr_trans_mode(PowerPCCPU *cpu, + SpaprMachineState *spapr, target_ulong mflags, target_ulong value1, target_ulong value2) { - PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); - - if (!(pcc->insns_flags2 & PPC2_ISA207S)) { - return H_P2; - } if (value1) { return H_P3; } + if (value2) { return H_P4; } - if (mflags == 1) { - /* AIL=1 is reserved in POWER8/POWER9/POWER10 */ + /* + * AIL-1 is not architected, and AIL-2 is not supported by QEMU spapr. + * It is supported for faithful emulation of bare metal systems, but for + * compatibility concerns we leave it out of the pseries machine. + */ + if (mflags != 0 && mflags != 3) { return H_UNSUPPORTED_FLAG; } - if (mflags == 2 && (pcc->insns_flags2 & PPC2_ISA310)) { - /* AIL=2 is reserved in POWER10 (ISA v3.1) */ - return H_UNSUPPORTED_FLAG; + if (mflags == 3) { + if (!spapr_get_cap(spapr, SPAPR_CAP_AIL_MODE_3)) { + return H_UNSUPPORTED_FLAG; + } } spapr_set_all_lpcrs(mflags << LPCR_AIL_SHIFT, LPCR_AIL); @@ -859,7 +861,7 @@ static target_ulong h_set_mode(PowerPCCPU *cpu, SpaprMachineState *spapr, ret = h_set_mode_resource_le(cpu, spapr, args[0], args[2], args[3]); break; case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE: - ret = h_set_mode_resource_addr_trans_mode(cpu, args[0], + ret = h_set_mode_resource_addr_trans_mode(cpu, spapr, args[0], args[2], args[3]); break; } @@ -1566,8 +1568,6 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu, struct kvmppc_hv_guest_state hv_state; struct kvmppc_pt_regs *regs; hwaddr len; - uint64_t cr; - int i; if (spapr->nested_ptcr == 0) { return H_NOT_AVAILABLE; @@ -1616,12 +1616,7 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu, env->lr = regs->link; env->ctr = regs->ctr; cpu_write_xer(env, regs->xer); - - cr = regs->ccr; - for (i = 7; i >= 0; i--) { - env->crf[i] = cr & 15; - cr >>= 4; - } + ppc_set_cr(env, regs->ccr); env->msr = regs->msr; env->nip = regs->nip; @@ -1698,8 +1693,6 @@ void spapr_exit_nested(PowerPCCPU *cpu, int excp) struct kvmppc_hv_guest_state *hvstate; struct kvmppc_pt_regs *regs; hwaddr len; - uint64_t cr; - int i; assert(spapr_cpu->in_nested); @@ -1757,12 +1750,7 @@ void spapr_exit_nested(PowerPCCPU *cpu, int excp) regs->link = env->lr; regs->ctr = env->ctr; regs->xer = cpu_read_xer(env); - - cr = 0; - for (i = 0; i < 8; i++) { - cr |= (env->crf[i] & 15) << (4 * (7 - i)); - } - regs->ccr = cr; + regs->ccr = ppc_get_cr(env); if (excp == POWERPC_EXCP_MCHECK || excp == POWERPC_EXCP_RESET || diff --git a/hw/ppc/spapr_nvdimm.c b/hw/ppc/spapr_nvdimm.c index 04a64cada3..a8688243a6 100644 --- a/hw/ppc/spapr_nvdimm.c +++ b/hw/ppc/spapr_nvdimm.c @@ -496,7 +496,6 @@ static int spapr_nvdimm_flush_post_load(void *opaque, int version_id) { SpaprNVDIMMDevice *s_nvdimm = (SpaprNVDIMMDevice *)opaque; SpaprNVDIMMDeviceFlushState *state; - ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context()); HostMemoryBackend *backend = MEMORY_BACKEND(PC_DIMM(s_nvdimm)->hostmem); bool is_pmem = object_property_get_bool(OBJECT(backend), "pmem", NULL); bool pmem_override = object_property_get_bool(OBJECT(s_nvdimm), @@ -517,7 +516,7 @@ static int spapr_nvdimm_flush_post_load(void *opaque, int version_id) } QLIST_FOREACH(state, &s_nvdimm->pending_nvdimm_flush_states, node) { - thread_pool_submit_aio(pool, flush_worker_cb, state, + thread_pool_submit_aio(flush_worker_cb, state, spapr_nvdimm_flush_completion_cb, state); } @@ -664,7 +663,6 @@ static target_ulong h_scm_flush(PowerPCCPU *cpu, SpaprMachineState *spapr, PCDIMMDevice *dimm; HostMemoryBackend *backend = NULL; SpaprNVDIMMDeviceFlushState *state; - ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context()); int fd; if (!drc || !drc->dev || @@ -699,7 +697,7 @@ static target_ulong h_scm_flush(PowerPCCPU *cpu, SpaprMachineState *spapr, state->drcidx = drc_index; - thread_pool_submit_aio(pool, flush_worker_cb, state, + thread_pool_submit_aio(flush_worker_cb, state, spapr_nvdimm_flush_completion_cb, state); continue_token = state->continue_token; diff --git a/hw/rdma/Kconfig b/hw/rdma/Kconfig index 8e2211288f..840320bdc0 100644 --- a/hw/rdma/Kconfig +++ b/hw/rdma/Kconfig @@ -1,3 +1,3 @@ config VMW_PVRDMA default y if PCI_DEVICES - depends on PVRDMA && PCI && MSI_NONBROKEN + depends on PVRDMA && MSI_NONBROKEN && VMXNET3_PCI diff --git a/hw/rdma/meson.build b/hw/rdma/meson.build index 7325f40c32..363c9b8c83 100644 --- a/hw/rdma/meson.build +++ b/hw/rdma/meson.build @@ -1,10 +1,12 @@ -specific_ss.add(when: 'CONFIG_VMW_PVRDMA', if_true: files( +system_ss.add(when: 'CONFIG_VMW_PVRDMA', if_true: files( 'rdma.c', 'rdma_backend.c', - 'rdma_rm.c', 'rdma_utils.c', + 'vmw/pvrdma_qp_ops.c', +)) +specific_ss.add(when: 'CONFIG_VMW_PVRDMA', if_true: files( + 'rdma_rm.c', 'vmw/pvrdma_cmd.c', 'vmw/pvrdma_dev_ring.c', 'vmw/pvrdma_main.c', - 'vmw/pvrdma_qp_ops.c', )) diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c index cfd85de3e6..038d564433 100644 --- a/hw/rdma/rdma_rm.c +++ b/hw/rdma/rdma_rm.c @@ -23,10 +23,6 @@ #include "rdma_backend.h" #include "rdma_rm.h" -/* Page directory and page tables */ -#define PG_DIR_SZ { TARGET_PAGE_SIZE / sizeof(__u64) } -#define PG_TBL_SZ { TARGET_PAGE_SIZE / sizeof(__u64) } - void rdma_format_device_counters(RdmaDeviceResources *dev_res, GString *buf) { g_string_append_printf(buf, "\ttx : %" PRId64 "\n", diff --git a/hw/remote/meson.build b/hw/remote/meson.build index ab25c04906..a1e8708c73 100644 --- a/hw/remote/meson.build +++ b/hw/remote/meson.build @@ -14,4 +14,4 @@ remote_ss.add(when: 'CONFIG_VFIO_USER_SERVER', if_true: libvfio_user_dep) specific_ss.add(when: 'CONFIG_MULTIPROCESS', if_true: files('memory.c')) specific_ss.add(when: 'CONFIG_MULTIPROCESS', if_true: files('proxy-memory-listener.c')) -softmmu_ss.add_all(when: 'CONFIG_MULTIPROCESS', if_true: remote_ss) +system_ss.add_all(when: 'CONFIG_MULTIPROCESS', if_true: remote_ss) diff --git a/hw/remote/trace-events b/hw/remote/trace-events index c167b3c7a5..0d1b7d56a5 100644 --- a/hw/remote/trace-events +++ b/hw/remote/trace-events @@ -5,8 +5,8 @@ mpqemu_recv_io_error(int cmd, int size, int nfds) "failed to receive %d size %d, # vfio-user-obj.c vfu_prop(const char *prop, const char *val) "vfu: setting %s as %s" -vfu_cfg_read(uint32_t offset, uint32_t val) "vfu: cfg: 0x%u -> 0x%x" -vfu_cfg_write(uint32_t offset, uint32_t val) "vfu: cfg: 0x%u <- 0x%x" +vfu_cfg_read(uint32_t offset, uint32_t val) "vfu: cfg: 0x%x -> 0x%x" +vfu_cfg_write(uint32_t offset, uint32_t val) "vfu: cfg: 0x%x <- 0x%x" vfu_dma_register(uint64_t gpa, size_t len) "vfu: registering GPA 0x%"PRIx64", %zu bytes" vfu_dma_unregister(uint64_t gpa) "vfu: unregistering GPA 0x%"PRIx64"" vfu_bar_register(int i, uint64_t addr, uint64_t size) "vfu: BAR %d: addr 0x%"PRIx64" size 0x%"PRIx64"" diff --git a/hw/remote/vfio-user-obj.c b/hw/remote/vfio-user-obj.c index 88ffafc73e..8b10c32a3c 100644 --- a/hw/remote/vfio-user-obj.c +++ b/hw/remote/vfio-user-obj.c @@ -30,6 +30,11 @@ * * notes - x-vfio-user-server could block IO and monitor during the * initialization phase. + * + * When x-remote machine has the auto-shutdown property + * enabled (default), x-vfio-user-server terminates after the last + * client disconnects. Otherwise, it will continue running until + * explicitly killed. */ #include "qemu/osdep.h" @@ -61,9 +66,12 @@ OBJECT_DECLARE_TYPE(VfuObject, VfuObjectClass, VFU_OBJECT) /** - * VFU_OBJECT_ERROR - reports an error message. If auto_shutdown - * is set, it aborts the machine on error. Otherwise, it logs an - * error message without aborting. + * VFU_OBJECT_ERROR - reports an error message. + * + * If auto_shutdown is set, it aborts the machine on error. Otherwise, + * it logs an error message without aborting. auto_shutdown is disabled + * when the server serves clients from multiple VMs; as such, an error + * from one VM shouldn't be able to disrupt other VM's services. */ #define VFU_OBJECT_ERROR(o, fmt, ...) \ { \ diff --git a/hw/riscv/numa.c b/hw/riscv/numa.c index 4720102561..e0414d5b1b 100644 --- a/hw/riscv/numa.c +++ b/hw/riscv/numa.c @@ -207,6 +207,12 @@ int64_t riscv_numa_get_default_cpu_node_id(const MachineState *ms, int idx) { int64_t nidx = 0; + if (ms->numa_state->num_nodes > ms->smp.cpus) { + error_report("Number of NUMA nodes (%d)" + " cannot exceed the number of available CPUs (%d).", + ms->numa_state->num_nodes, ms->smp.max_cpus); + exit(EXIT_FAILURE); + } if (ms->numa_state->num_nodes) { nidx = idx / (ms->smp.cpus / ms->numa_state->num_nodes); if (ms->numa_state->num_nodes <= nidx) { diff --git a/hw/riscv/opentitan.c b/hw/riscv/opentitan.c index bc678766e7..6a2fcc4ade 100644 --- a/hw/riscv/opentitan.c +++ b/hw/riscv/opentitan.c @@ -75,11 +75,11 @@ static const MemMapEntry ibex_memmap[] = { [IBEX_DEV_FLASH_VIRTUAL] = { 0x80000000, 0x80000 }, }; -static void opentitan_board_init(MachineState *machine) +static void opentitan_machine_init(MachineState *machine) { MachineClass *mc = MACHINE_GET_CLASS(machine); + OpenTitanState *s = OPENTITAN_MACHINE(machine); const MemMapEntry *memmap = ibex_memmap; - OpenTitanState *s = g_new0(OpenTitanState, 1); MemoryRegion *sys_mem = get_system_memory(); if (machine->ram_size != mc->default_ram_size) { @@ -108,18 +108,18 @@ static void opentitan_board_init(MachineState *machine) } } -static void opentitan_machine_init(MachineClass *mc) +static void opentitan_machine_class_init(ObjectClass *oc, void *data) { + MachineClass *mc = MACHINE_CLASS(oc); + mc->desc = "RISC-V Board compatible with OpenTitan"; - mc->init = opentitan_board_init; + mc->init = opentitan_machine_init; mc->max_cpus = 1; mc->default_cpu_type = TYPE_RISCV_CPU_IBEX; mc->default_ram_id = "riscv.lowrisc.ibex.ram"; mc->default_ram_size = ibex_memmap[IBEX_DEV_RAM].size; } -DEFINE_MACHINE("opentitan", opentitan_machine_init) - static void lowrisc_ibex_soc_init(Object *obj) { LowRISCIbexSoCState *s = RISCV_IBEX_SOC(obj); @@ -320,17 +320,19 @@ static void lowrisc_ibex_soc_class_init(ObjectClass *oc, void *data) dc->user_creatable = false; } -static const TypeInfo lowrisc_ibex_soc_type_info = { - .name = TYPE_RISCV_IBEX_SOC, - .parent = TYPE_DEVICE, - .instance_size = sizeof(LowRISCIbexSoCState), - .instance_init = lowrisc_ibex_soc_init, - .class_init = lowrisc_ibex_soc_class_init, +static const TypeInfo open_titan_types[] = { + { + .name = TYPE_RISCV_IBEX_SOC, + .parent = TYPE_DEVICE, + .instance_size = sizeof(LowRISCIbexSoCState), + .instance_init = lowrisc_ibex_soc_init, + .class_init = lowrisc_ibex_soc_class_init, + }, { + .name = TYPE_OPENTITAN_MACHINE, + .parent = TYPE_MACHINE, + .instance_size = sizeof(OpenTitanState), + .class_init = opentitan_machine_class_init, + } }; -static void lowrisc_ibex_soc_register_types(void) -{ - type_register_static(&lowrisc_ibex_soc_type_info); -} - -type_init(lowrisc_ibex_soc_register_types) +DEFINE_TYPES(open_titan_types) diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c index a584d5b3a2..2c5546560a 100644 --- a/hw/riscv/spike.c +++ b/hw/riscv/spike.c @@ -332,6 +332,11 @@ static void spike_board_init(MachineState *machine) htif_custom_base); } +static void spike_set_signature(Object *obj, const char *val, Error **errp) +{ + sig_file = g_strdup(val); +} + static void spike_machine_instance_init(Object *obj) { } @@ -350,6 +355,14 @@ static void spike_machine_class_init(ObjectClass *oc, void *data) mc->get_default_cpu_node_id = riscv_numa_get_default_cpu_node_id; mc->numa_mem_supported = true; mc->default_ram_id = "riscv.spike.ram"; + object_class_property_add_str(oc, "signature", NULL, spike_set_signature); + object_class_property_set_description(oc, "signature", + "File to write ACT test signature"); + object_class_property_add_uint8_ptr(oc, "signature-granularity", + &line_size, OBJ_PROP_FLAG_WRITE); + object_class_property_set_description(oc, "signature-granularity", + "Size of each line in ACT signature " + "file"); } static const TypeInfo spike_machine_typeinfo = { diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c index 4e3efbee16..95708d890e 100644 --- a/hw/riscv/virt.c +++ b/hw/riscv/virt.c @@ -1245,7 +1245,8 @@ static void virt_machine_done(Notifier *notifier, void *data) target_ulong firmware_end_addr, kernel_start_addr; const char *firmware_name = riscv_default_firmware_name(&s->soc[0]); uint32_t fdt_load_addr; - uint64_t kernel_entry; + uint64_t kernel_entry = 0; + BlockBackend *pflash_blk0; /* * Only direct boot kernel is currently supported for KVM VM, @@ -1266,42 +1267,32 @@ static void virt_machine_done(Notifier *notifier, void *data) firmware_end_addr = riscv_find_and_load_firmware(machine, firmware_name, start_addr, NULL); - if (drive_get(IF_PFLASH, 0, 1)) { - /* - * S-mode FW like EDK2 will be kept in second plash (unit 1). - * When both kernel, initrd and pflash options are provided in the - * command line, the kernel and initrd will be copied to the fw_cfg - * table and opensbi will jump to the flash address which is the - * entry point of S-mode FW. It is the job of the S-mode FW to load - * the kernel and initrd using fw_cfg table. - * - * If only pflash is given but not -kernel, then it is the job of - * of the S-mode firmware to locate and load the kernel. - * In either case, the next_addr for opensbi will be the flash address. - */ - riscv_setup_firmware_boot(machine); - kernel_entry = virt_memmap[VIRT_FLASH].base + - virt_memmap[VIRT_FLASH].size / 2; - } else if (machine->kernel_filename) { + pflash_blk0 = pflash_cfi01_get_blk(s->flash[0]); + if (pflash_blk0) { + if (machine->firmware && !strcmp(machine->firmware, "none") && + !kvm_enabled()) { + /* + * Pflash was supplied but bios is none and not KVM guest, + * let's overwrite the address we jump to after reset to + * the base of the flash. + */ + start_addr = virt_memmap[VIRT_FLASH].base; + } else { + /* + * Pflash was supplied but either KVM guest or bios is not none. + * In this case, base of the flash would contain S-mode payload. + */ + riscv_setup_firmware_boot(machine); + kernel_entry = virt_memmap[VIRT_FLASH].base; + } + } + + if (machine->kernel_filename && !kernel_entry) { kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc[0], firmware_end_addr); kernel_entry = riscv_load_kernel(machine, &s->soc[0], kernel_start_addr, true, NULL); - } else { - /* - * If dynamic firmware is used, it doesn't know where is the next mode - * if kernel argument is not set. - */ - kernel_entry = 0; - } - - if (drive_get(IF_PFLASH, 0, 0)) { - /* - * Pflash was supplied, let's overwrite the address we jump to after - * reset to the base of the flash. - */ - start_addr = virt_memmap[VIRT_FLASH].base; } fdt_load_addr = riscv_compute_fdt_addr(memmap[VIRT_DRAM].base, @@ -1488,7 +1479,7 @@ static void virt_machine_init(MachineState *machine) for (i = 0; i < VIRTIO_COUNT; i++) { sysbus_create_simple("virtio-mmio", memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size, - qdev_get_gpio_in(DEVICE(virtio_irqchip), VIRTIO_IRQ + i)); + qdev_get_gpio_in(virtio_irqchip, VIRTIO_IRQ + i)); } gpex_pcie_init(system_memory, @@ -1499,18 +1490,16 @@ static void virt_machine_init(MachineState *machine) virt_high_pcie_memmap.base, virt_high_pcie_memmap.size, memmap[VIRT_PCIE_PIO].base, - DEVICE(pcie_irqchip)); + pcie_irqchip); - create_platform_bus(s, DEVICE(mmio_irqchip)); + create_platform_bus(s, mmio_irqchip); serial_mm_init(system_memory, memmap[VIRT_UART0].base, - 0, qdev_get_gpio_in(DEVICE(mmio_irqchip), UART0_IRQ), 399193, + 0, qdev_get_gpio_in(mmio_irqchip, UART0_IRQ), 399193, serial_hd(0), DEVICE_LITTLE_ENDIAN); sysbus_create_simple("goldfish_rtc", memmap[VIRT_RTC].base, - qdev_get_gpio_in(DEVICE(mmio_irqchip), RTC_IRQ)); - - virt_flash_create(s); + qdev_get_gpio_in(mmio_irqchip, RTC_IRQ)); for (i = 0; i < ARRAY_SIZE(s->flash); i++) { /* Map legacy -drive if=pflash to machine properties */ @@ -1538,6 +1527,8 @@ static void virt_machine_instance_init(Object *obj) { RISCVVirtState *s = RISCV_VIRT_MACHINE(obj); + virt_flash_create(s); + s->oem_id = g_strndup(ACPI_BUILD_APPNAME6, 6); s->oem_table_id = g_strndup(ACPI_BUILD_APPNAME8, 8); s->acpi = ON_OFF_AUTO_AUTO; diff --git a/hw/rtc/meson.build b/hw/rtc/meson.build index 34a4d316fa..3ea2affe0b 100644 --- a/hw/rtc/meson.build +++ b/hw/rtc/meson.build @@ -1,16 +1,16 @@ -softmmu_ss.add(when: 'CONFIG_DS1338', if_true: files('ds1338.c')) -softmmu_ss.add(when: 'CONFIG_M41T80', if_true: files('m41t80.c')) -softmmu_ss.add(when: 'CONFIG_M48T59', if_true: files('m48t59.c')) -softmmu_ss.add(when: 'CONFIG_PL031', if_true: files('pl031.c')) -softmmu_ss.add(when: 'CONFIG_TWL92230', if_true: files('twl92230.c')) -softmmu_ss.add(when: ['CONFIG_ISA_BUS', 'CONFIG_M48T59'], if_true: files('m48t59-isa.c')) -softmmu_ss.add(when: 'CONFIG_XLNX_ZYNQMP', if_true: files('xlnx-zynqmp-rtc.c')) +system_ss.add(when: 'CONFIG_DS1338', if_true: files('ds1338.c')) +system_ss.add(when: 'CONFIG_M41T80', if_true: files('m41t80.c')) +system_ss.add(when: 'CONFIG_M48T59', if_true: files('m48t59.c')) +system_ss.add(when: 'CONFIG_PL031', if_true: files('pl031.c')) +system_ss.add(when: 'CONFIG_TWL92230', if_true: files('twl92230.c')) +system_ss.add(when: ['CONFIG_ISA_BUS', 'CONFIG_M48T59'], if_true: files('m48t59-isa.c')) +system_ss.add(when: 'CONFIG_XLNX_ZYNQMP', if_true: files('xlnx-zynqmp-rtc.c')) -softmmu_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_rtc.c')) -softmmu_ss.add(when: 'CONFIG_SUN4V_RTC', if_true: files('sun4v-rtc.c')) -softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_rtc.c')) -softmmu_ss.add(when: 'CONFIG_GOLDFISH_RTC', if_true: files('goldfish_rtc.c')) -softmmu_ss.add(when: 'CONFIG_LS7A_RTC', if_true: files('ls7a_rtc.c')) -softmmu_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-rtc.c')) -softmmu_ss.add(when: 'CONFIG_MC146818RTC', if_true: files('mc146818rtc.c')) +system_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_rtc.c')) +system_ss.add(when: 'CONFIG_SUN4V_RTC', if_true: files('sun4v-rtc.c')) +system_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_rtc.c')) +system_ss.add(when: 'CONFIG_GOLDFISH_RTC', if_true: files('goldfish_rtc.c')) +system_ss.add(when: 'CONFIG_LS7A_RTC', if_true: files('ls7a_rtc.c')) +system_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-rtc.c')) +system_ss.add(when: 'CONFIG_MC146818RTC', if_true: files('mc146818rtc.c')) diff --git a/hw/rx/rx62n.c b/hw/rx/rx62n.c index fa5add9f9d..3e887a0fc7 100644 --- a/hw/rx/rx62n.c +++ b/hw/rx/rx62n.c @@ -154,7 +154,7 @@ static void register_icu(RX62NState *s) sysbus_connect_irq(icu, 0, qdev_get_gpio_in(DEVICE(&s->cpu), RX_CPU_IRQ)); sysbus_connect_irq(icu, 1, qdev_get_gpio_in(DEVICE(&s->cpu), RX_CPU_FIR)); sysbus_connect_irq(icu, 2, s->irq[SWI]); - sysbus_mmio_map(SYS_BUS_DEVICE(icu), 0, RX62N_ICU_BASE); + sysbus_mmio_map(icu, 0, RX62N_ICU_BASE); } static void register_tmr(RX62NState *s, int unit) diff --git a/hw/s390x/pv.c b/hw/s390x/pv.c index 49ea38236c..b63f3784c6 100644 --- a/hw/s390x/pv.c +++ b/hw/s390x/pv.c @@ -13,6 +13,7 @@ #include <linux/kvm.h> +#include "qemu/units.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "sysemu/kvm.h" @@ -115,7 +116,7 @@ static void *s390_pv_do_unprot_async_fn(void *p) return NULL; } -bool s390_pv_vm_try_disable_async(void) +bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms) { /* * t is only needed to create the thread; once qemu_thread_create @@ -123,7 +124,12 @@ bool s390_pv_vm_try_disable_async(void) */ QemuThread t; - if (!kvm_check_extension(kvm_state, KVM_CAP_S390_PROTECTED_ASYNC_DISABLE)) { + /* + * If the feature is not present or if the VM is not larger than 2 GiB, + * KVM_PV_ASYNC_CLEANUP_PREPARE fill fail; no point in attempting it. + */ + if ((MACHINE(ms)->maxram_size <= 2 * GiB) || + !kvm_check_extension(kvm_state, KVM_CAP_S390_PROTECTED_ASYNC_DISABLE)) { return false; } if (s390_pv_cmd(KVM_PV_ASYNC_CLEANUP_PREPARE, NULL) != 0) { diff --git a/hw/s390x/s390-stattrib.c b/hw/s390x/s390-stattrib.c index aed919ad7d..220e845d12 100644 --- a/hw/s390x/s390-stattrib.c +++ b/hw/s390x/s390-stattrib.c @@ -209,7 +209,7 @@ static int cmma_save(QEMUFile *f, void *opaque, int final) return -ENOMEM; } - while (final ? 1 : qemu_file_rate_limit(f) == 0) { + while (final ? 1 : migration_rate_exceeded(f) == 0) { reallen = sac->get_stattr(sas, &start_gfn, buflen, buf); if (reallen < 0) { g_free(buf); diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c index e6f2c62625..2dece8eab8 100644 --- a/hw/s390x/s390-virtio-ccw.c +++ b/hw/s390x/s390-virtio-ccw.c @@ -245,6 +245,7 @@ static void s390_create_sclpconsole(const char *type, Chardev *chardev) static void ccw_init(MachineState *machine) { + MachineClass *mc = MACHINE_GET_CLASS(machine); int ret; VirtualCssBus *css_bus; DeviceState *dev; @@ -292,7 +293,7 @@ static void ccw_init(MachineState *machine) } /* Create VirtIO network adapters */ - s390_create_virtio_net(BUS(css_bus), "virtio-net-ccw"); + s390_create_virtio_net(BUS(css_bus), mc->default_nic); /* init consoles */ if (serial_hd(0)) { @@ -330,7 +331,7 @@ static inline void s390_do_cpu_ipl(CPUState *cs, run_on_cpu_data arg) static void s390_machine_unprotect(S390CcwMachineState *ms) { - if (!s390_pv_vm_try_disable_async()) { + if (!s390_pv_vm_try_disable_async(ms)) { s390_pv_vm_disable(); } ms->pv = false; @@ -746,6 +747,7 @@ static void ccw_machine_class_init(ObjectClass *oc, void *data) hc->unplug_request = s390_machine_device_unplug_request; nc->nmi_monitor_handler = s390_nmi; mc->default_ram_id = "s390.ram"; + mc->default_nic = "virtio-net-ccw"; object_class_property_add_bool(oc, "aes-key-wrap", machine_get_aes_key_wrap, diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c index e33e5207ab..f44de1a8c1 100644 --- a/hw/s390x/virtio-ccw.c +++ b/hw/s390x/virtio-ccw.c @@ -237,6 +237,7 @@ static int virtio_ccw_set_vqs(SubchDev *sch, VqInfoBlock *info, return -EINVAL; } virtio_queue_set_num(vdev, index, num); + virtio_init_region_cache(vdev, index); } else if (virtio_queue_get_num(vdev, index) > num) { /* Fail if we don't have a big enough queue. */ return -EINVAL; diff --git a/hw/scsi/esp-pci.c b/hw/scsi/esp-pci.c index 2f7f11e70b..4e890db0e2 100644 --- a/hw/scsi/esp-pci.c +++ b/hw/scsi/esp-pci.c @@ -79,7 +79,7 @@ struct PCIESPState { static void esp_pci_handle_idle(PCIESPState *pci, uint32_t val) { - ESPState *s = ESP(&pci->esp); + ESPState *s = &pci->esp; trace_esp_pci_dma_idle(val); esp_dma_enable(s, 0, 0); @@ -93,7 +93,7 @@ static void esp_pci_handle_blast(PCIESPState *pci, uint32_t val) static void esp_pci_handle_abort(PCIESPState *pci, uint32_t val) { - ESPState *s = ESP(&pci->esp); + ESPState *s = &pci->esp; trace_esp_pci_dma_abort(val); if (s->current_req) { @@ -103,7 +103,7 @@ static void esp_pci_handle_abort(PCIESPState *pci, uint32_t val) static void esp_pci_handle_start(PCIESPState *pci, uint32_t val) { - ESPState *s = ESP(&pci->esp); + ESPState *s = &pci->esp; trace_esp_pci_dma_start(val); @@ -161,7 +161,7 @@ static void esp_pci_dma_write(PCIESPState *pci, uint32_t saddr, uint32_t val) static uint32_t esp_pci_dma_read(PCIESPState *pci, uint32_t saddr) { - ESPState *s = ESP(&pci->esp); + ESPState *s = &pci->esp; uint32_t val; val = pci->dma_regs[saddr]; @@ -183,7 +183,7 @@ static void esp_pci_io_write(void *opaque, hwaddr addr, uint64_t val, unsigned int size) { PCIESPState *pci = opaque; - ESPState *s = ESP(&pci->esp); + ESPState *s = &pci->esp; if (size < 4 || addr & 3) { /* need to upgrade request: we only support 4-bytes accesses */ @@ -228,7 +228,7 @@ static uint64_t esp_pci_io_read(void *opaque, hwaddr addr, unsigned int size) { PCIESPState *pci = opaque; - ESPState *s = ESP(&pci->esp); + ESPState *s = &pci->esp; uint32_t ret; if (addr < 0x40) { @@ -315,7 +315,7 @@ static const MemoryRegionOps esp_pci_io_ops = { static void esp_pci_hard_reset(DeviceState *dev) { PCIESPState *pci = PCI_ESP(dev); - ESPState *s = ESP(&pci->esp); + ESPState *s = &pci->esp; esp_hard_reset(s); pci->dma_regs[DMA_CMD] &= ~(DMA_CMD_DIR | DMA_CMD_INTE_D | DMA_CMD_INTE_P @@ -366,7 +366,7 @@ static void esp_pci_scsi_realize(PCIDevice *dev, Error **errp) { PCIESPState *pci = PCI_ESP(dev); DeviceState *d = DEVICE(dev); - ESPState *s = ESP(&pci->esp); + ESPState *s = &pci->esp; uint8_t *pci_conf; if (!qdev_realize(DEVICE(s), NULL, errp)) { @@ -394,7 +394,7 @@ static void esp_pci_scsi_realize(PCIDevice *dev, Error **errp) static void esp_pci_scsi_exit(PCIDevice *d) { PCIESPState *pci = PCI_ESP(d); - ESPState *s = ESP(&pci->esp); + ESPState *s = &pci->esp; qemu_free_irq(s->irq); } diff --git a/hw/scsi/lsi53c895a.c b/hw/scsi/lsi53c895a.c index af93557a9a..f7d45b0b20 100644 --- a/hw/scsi/lsi53c895a.c +++ b/hw/scsi/lsi53c895a.c @@ -1134,15 +1134,24 @@ static void lsi_execute_script(LSIState *s) uint32_t addr, addr_high; int opcode; int insn_processed = 0; + static int reentrancy_level; + + reentrancy_level++; s->istat1 |= LSI_ISTAT1_SRUN; again: - if (++insn_processed > LSI_MAX_INSN) { - /* Some windows drivers make the device spin waiting for a memory - location to change. If we have been executed a lot of code then - assume this is the case and force an unexpected device disconnect. - This is apparently sufficient to beat the drivers into submission. - */ + /* + * Some windows drivers make the device spin waiting for a memory location + * to change. If we have executed more than LSI_MAX_INSN instructions then + * assume this is the case and force an unexpected device disconnect. This + * is apparently sufficient to beat the drivers into submission. + * + * Another issue (CVE-2023-0330) can occur if the script is programmed to + * trigger itself again and again. Avoid this problem by stopping after + * being called multiple times in a reentrant way (8 is an arbitrary value + * which should be enough for all valid use cases). + */ + if (++insn_processed > LSI_MAX_INSN || reentrancy_level > 8) { if (!(s->sien0 & LSI_SIST0_UDC)) { qemu_log_mask(LOG_GUEST_ERROR, "lsi_scsi: inf. loop with UDC masked"); @@ -1596,6 +1605,8 @@ again: } } trace_lsi_execute_script_stop(); + + reentrancy_level--; } static uint8_t lsi_reg_readb(LSIState *s, int offset) @@ -2302,6 +2313,13 @@ static void lsi_scsi_realize(PCIDevice *dev, Error **errp) memory_region_init_io(&s->io_io, OBJECT(s), &lsi_io_ops, s, "lsi-io", 256); + /* + * Since we use the address-space API to interact with ram_io, disable the + * re-entrancy guard. + */ + s->ram_io.disable_reentrancy_guard = true; + s->mmio_io.disable_reentrancy_guard = true; + address_space_init(&s->pci_io_as, pci_address_space_io(dev), "lsi-pci-io"); qdev_init_gpio_out(d, &s->ext_irq, 1); diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c index 9cbbb16121..32c70c9e99 100644 --- a/hw/scsi/megasas.c +++ b/hw/scsi/megasas.c @@ -42,6 +42,7 @@ #define MEGASAS_MAX_FRAMES 2048 /* Firmware limit at 65535 */ #define MEGASAS_DEFAULT_FRAMES 1000 /* Windows requires this */ #define MEGASAS_GEN2_DEFAULT_FRAMES 1008 /* Windows requires this */ +#define MEGASAS_MIN_SGE 64 #define MEGASAS_MAX_SGE 128 /* Firmware limit */ #define MEGASAS_DEFAULT_SGE 80 #define MEGASAS_MAX_SECTORS 0xFFFF /* No real limit */ @@ -2356,6 +2357,7 @@ static void megasas_scsi_realize(PCIDevice *dev, Error **errp) MegasasState *s = MEGASAS(dev); MegasasBaseClass *b = MEGASAS_GET_CLASS(s); uint8_t *pci_conf; + uint32_t sge; int i, bar_type; Error *err = NULL; int ret; @@ -2424,13 +2426,15 @@ static void megasas_scsi_realize(PCIDevice *dev, Error **errp) if (!s->hba_serial) { s->hba_serial = g_strdup(MEGASAS_HBA_SERIAL); } - if (s->fw_sge >= MEGASAS_MAX_SGE - MFI_PASS_FRAME_SIZE) { - s->fw_sge = MEGASAS_MAX_SGE - MFI_PASS_FRAME_SIZE; - } else if (s->fw_sge >= 128 - MFI_PASS_FRAME_SIZE) { - s->fw_sge = 128 - MFI_PASS_FRAME_SIZE; - } else { - s->fw_sge = 64 - MFI_PASS_FRAME_SIZE; + + sge = s->fw_sge + MFI_PASS_FRAME_SIZE; + if (sge < MEGASAS_MIN_SGE) { + sge = MEGASAS_MIN_SGE; + } else if (sge >= MEGASAS_MAX_SGE) { + sge = MEGASAS_MAX_SGE; } + s->fw_sge = sge - MFI_PASS_FRAME_SIZE; + if (s->fw_cmds > MEGASAS_MAX_FRAMES) { s->fw_cmds = MEGASAS_MAX_FRAMES; } diff --git a/hw/scsi/meson.build b/hw/scsi/meson.build index 923a34f344..7a1e7f13f0 100644 --- a/hw/scsi/meson.build +++ b/hw/scsi/meson.build @@ -11,7 +11,7 @@ scsi_ss.add(when: 'CONFIG_LSI_SCSI_PCI', if_true: files('lsi53c895a.c')) scsi_ss.add(when: 'CONFIG_MEGASAS_SCSI_PCI', if_true: files('megasas.c')) scsi_ss.add(when: 'CONFIG_MPTSAS_SCSI_PCI', if_true: files('mptsas.c', 'mptconfig.c', 'mptendian.c')) scsi_ss.add(when: 'CONFIG_VMW_PVSCSI_SCSI_PCI', if_true: files('vmw_pvscsi.c')) -softmmu_ss.add_all(when: 'CONFIG_SCSI', if_true: scsi_ss) +system_ss.add_all(when: 'CONFIG_SCSI', if_true: scsi_ss) specific_scsi_ss = ss.source_set() diff --git a/hw/scsi/mptsas.c b/hw/scsi/mptsas.c index c485da792c..3de288b454 100644 --- a/hw/scsi/mptsas.c +++ b/hw/scsi/mptsas.c @@ -1322,7 +1322,8 @@ static void mptsas_scsi_realize(PCIDevice *dev, Error **errp) } s->max_devices = MPTSAS_NUM_PORTS; - s->request_bh = qemu_bh_new(mptsas_fetch_requests, s); + s->request_bh = qemu_bh_new_guarded(mptsas_fetch_requests, s, + &DEVICE(dev)->mem_reentrancy_guard); scsi_bus_init(&s->bus, sizeof(s->bus), &dev->qdev, &mptsas_scsi_info); } diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c index c97176110c..f80f4cb4fc 100644 --- a/hw/scsi/scsi-bus.c +++ b/hw/scsi/scsi-bus.c @@ -60,8 +60,7 @@ static SCSIDevice *do_scsi_device_find(SCSIBus *bus, * the user access the device. */ - if (retval && !include_unrealized && - !qatomic_load_acquire(&retval->qdev.realized)) { + if (retval && !include_unrealized && !qdev_is_realized(&retval->qdev)) { retval = NULL; } @@ -193,7 +192,8 @@ static void scsi_dma_restart_cb(void *opaque, bool running, RunState state) AioContext *ctx = blk_get_aio_context(s->conf.blk); /* The reference is dropped in scsi_dma_restart_bh.*/ object_ref(OBJECT(s)); - s->bh = aio_bh_new(ctx, scsi_dma_restart_bh, s); + s->bh = aio_bh_new_guarded(ctx, scsi_dma_restart_bh, s, + &DEVICE(s)->mem_reentrancy_guard); qemu_bh_schedule(s->bh); } } @@ -487,7 +487,8 @@ static bool scsi_target_emulate_report_luns(SCSITargetReq *r) DeviceState *qdev = kid->child; SCSIDevice *dev = SCSI_DEVICE(qdev); - if (dev->channel == channel && dev->id == id && dev->lun != 0) { + if (dev->channel == channel && dev->id == id && dev->lun != 0 && + qdev_is_realized(&dev->qdev)) { store_lun(tmp, dev->lun); g_byte_array_append(buf, tmp, 8); len += 8; @@ -1668,6 +1669,46 @@ void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense) scsi_device_set_ua(sdev, sense); } +void scsi_device_drained_begin(SCSIDevice *sdev) +{ + SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, sdev->qdev.parent_bus); + if (!bus) { + return; + } + + assert(qemu_get_current_aio_context() == qemu_get_aio_context()); + assert(bus->drain_count < INT_MAX); + + /* + * Multiple BlockBackends can be on a SCSIBus and each may begin/end + * draining at any time. Keep a counter so HBAs only see begin/end once. + */ + if (bus->drain_count++ == 0) { + trace_scsi_bus_drained_begin(bus, sdev); + if (bus->info->drained_begin) { + bus->info->drained_begin(bus); + } + } +} + +void scsi_device_drained_end(SCSIDevice *sdev) +{ + SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, sdev->qdev.parent_bus); + if (!bus) { + return; + } + + assert(qemu_get_current_aio_context() == qemu_get_aio_context()); + assert(bus->drain_count > 0); + + if (bus->drain_count-- == 1) { + trace_scsi_bus_drained_end(bus, sdev); + if (bus->info->drained_end) { + bus->info->drained_end(bus); + } + } +} + static char *scsibus_get_dev_path(DeviceState *dev) { SCSIDevice *d = SCSI_DEVICE(dev); diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c index 97c9b1c8cd..e0d79c7966 100644 --- a/hw/scsi/scsi-disk.c +++ b/hw/scsi/scsi-disk.c @@ -2360,6 +2360,20 @@ static void scsi_disk_reset(DeviceState *dev) s->qdev.scsi_version = s->qdev.default_scsi_version; } +static void scsi_disk_drained_begin(void *opaque) +{ + SCSIDiskState *s = opaque; + + scsi_device_drained_begin(&s->qdev); +} + +static void scsi_disk_drained_end(void *opaque) +{ + SCSIDiskState *s = opaque; + + scsi_device_drained_end(&s->qdev); +} + static void scsi_disk_resize_cb(void *opaque) { SCSIDiskState *s = opaque; @@ -2414,16 +2428,19 @@ static bool scsi_cd_is_medium_locked(void *opaque) } static const BlockDevOps scsi_disk_removable_block_ops = { - .change_media_cb = scsi_cd_change_media_cb, + .change_media_cb = scsi_cd_change_media_cb, + .drained_begin = scsi_disk_drained_begin, + .drained_end = scsi_disk_drained_end, .eject_request_cb = scsi_cd_eject_request_cb, - .is_tray_open = scsi_cd_is_tray_open, .is_medium_locked = scsi_cd_is_medium_locked, - - .resize_cb = scsi_disk_resize_cb, + .is_tray_open = scsi_cd_is_tray_open, + .resize_cb = scsi_disk_resize_cb, }; static const BlockDevOps scsi_disk_block_ops = { - .resize_cb = scsi_disk_resize_cb, + .drained_begin = scsi_disk_drained_begin, + .drained_end = scsi_disk_drained_end, + .resize_cb = scsi_disk_resize_cb, }; static void scsi_disk_unit_attention_reported(SCSIDevice *dev) diff --git a/hw/scsi/scsi-generic.c b/hw/scsi/scsi-generic.c index ac9fa662b4..2417f0ad84 100644 --- a/hw/scsi/scsi-generic.c +++ b/hw/scsi/scsi-generic.c @@ -191,12 +191,16 @@ static int scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s, int len) if ((s->type == TYPE_DISK || s->type == TYPE_ZBC) && (r->req.cmd.buf[1] & 0x01)) { page = r->req.cmd.buf[2]; - if (page == 0xb0) { + if (page == 0xb0 && r->buflen >= 8) { + uint8_t buf[16] = {}; + uint8_t buf_used = MIN(r->buflen, 16); uint64_t max_transfer = calculate_max_transfer(s); - stl_be_p(&r->buf[8], max_transfer); - /* Also take care of the opt xfer len. */ - stl_be_p(&r->buf[12], - MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12]))); + + memcpy(buf, r->buf, buf_used); + stl_be_p(&buf[8], max_transfer); + stl_be_p(&buf[12], MIN_NON_ZERO(max_transfer, ldl_be_p(&buf[12]))); + memcpy(r->buf + 8, buf + 8, buf_used - 8); + } else if (s->needs_vpd_bl_emulation && page == 0x00 && r->buflen >= 4) { /* * Now we're capable of supplying the VPD Block Limits diff --git a/hw/scsi/trace-events b/hw/scsi/trace-events index ab238293f0..bdd4e2c7c7 100644 --- a/hw/scsi/trace-events +++ b/hw/scsi/trace-events @@ -6,6 +6,8 @@ scsi_req_cancel(int target, int lun, int tag) "target %d lun %d tag %d" scsi_req_data(int target, int lun, int tag, int len) "target %d lun %d tag %d len %d" scsi_req_data_canceled(int target, int lun, int tag, int len) "target %d lun %d tag %d len %d" scsi_req_dequeue(int target, int lun, int tag) "target %d lun %d tag %d" +scsi_bus_drained_begin(void *bus, void *sdev) "bus %p sdev %p" +scsi_bus_drained_end(void *bus, void *sdev) "bus %p sdev %p" scsi_req_continue(int target, int lun, int tag) "target %d lun %d tag %d" scsi_req_continue_canceled(int target, int lun, int tag) "target %d lun %d tag %d" scsi_req_parsed(int target, int lun, int tag, int cmd, int mode, int xfer) "target %d lun %d tag %d command %d dir %d length %d" diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c index 20bb91766e..d55de4c8ca 100644 --- a/hw/scsi/virtio-scsi-dataplane.c +++ b/hw/scsi/virtio-scsi-dataplane.c @@ -71,12 +71,26 @@ static void virtio_scsi_dataplane_stop_bh(void *opaque) { VirtIOSCSI *s = opaque; VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s); + EventNotifier *host_notifier; int i; virtio_queue_aio_detach_host_notifier(vs->ctrl_vq, s->ctx); + host_notifier = virtio_queue_get_host_notifier(vs->ctrl_vq); + + /* + * Test and clear notifier after disabling event, in case poll callback + * didn't have time to run. + */ + virtio_queue_host_notifier_read(host_notifier); + virtio_queue_aio_detach_host_notifier(vs->event_vq, s->ctx); + host_notifier = virtio_queue_get_host_notifier(vs->event_vq); + virtio_queue_host_notifier_read(host_notifier); + for (i = 0; i < vs->conf.num_queues; i++) { virtio_queue_aio_detach_host_notifier(vs->cmd_vqs[i], s->ctx); + host_notifier = virtio_queue_get_host_notifier(vs->cmd_vqs[i]); + virtio_queue_host_notifier_read(host_notifier); } } @@ -144,14 +158,16 @@ int virtio_scsi_dataplane_start(VirtIODevice *vdev) s->dataplane_starting = false; s->dataplane_started = true; - aio_context_acquire(s->ctx); - virtio_queue_aio_attach_host_notifier(vs->ctrl_vq, s->ctx); - virtio_queue_aio_attach_host_notifier_no_poll(vs->event_vq, s->ctx); + if (s->bus.drain_count == 0) { + aio_context_acquire(s->ctx); + virtio_queue_aio_attach_host_notifier(vs->ctrl_vq, s->ctx); + virtio_queue_aio_attach_host_notifier_no_poll(vs->event_vq, s->ctx); - for (i = 0; i < vs->conf.num_queues; i++) { - virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], s->ctx); + for (i = 0; i < vs->conf.num_queues; i++) { + virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], s->ctx); + } + aio_context_release(s->ctx); } - aio_context_release(s->ctx); return 0; fail_host_notifiers: @@ -197,9 +213,9 @@ void virtio_scsi_dataplane_stop(VirtIODevice *vdev) } s->dataplane_stopping = true; - aio_context_acquire(s->ctx); - aio_wait_bh_oneshot(s->ctx, virtio_scsi_dataplane_stop_bh, s); - aio_context_release(s->ctx); + if (s->bus.drain_count == 0) { + aio_wait_bh_oneshot(s->ctx, virtio_scsi_dataplane_stop_bh, s); + } blk_drain_all(); /* ensure there are no in-flight requests */ diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c index 612c525d9d..9c8ef0aaa6 100644 --- a/hw/scsi/virtio-scsi.c +++ b/hw/scsi/virtio-scsi.c @@ -799,7 +799,7 @@ static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req) return -ENOBUFS; } scsi_req_ref(req->sreq); - blk_io_plug(d->conf.blk); + blk_io_plug(); object_unref(OBJECT(d)); return 0; } @@ -810,7 +810,7 @@ static void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req) if (scsi_req_enqueue(sreq)) { scsi_req_continue(sreq); } - blk_io_unplug(sreq->dev->conf.blk); + blk_io_unplug(); scsi_req_unref(sreq); } @@ -836,7 +836,7 @@ static void virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq) while (!QTAILQ_EMPTY(&reqs)) { req = QTAILQ_FIRST(&reqs); QTAILQ_REMOVE(&reqs, req, next); - blk_io_unplug(req->sreq->dev->conf.blk); + blk_io_unplug(); scsi_req_unref(req->sreq); virtqueue_detach_element(req->vq, &req->elem, 0); virtio_scsi_free_req(req); @@ -933,13 +933,27 @@ static void virtio_scsi_reset(VirtIODevice *vdev) s->events_dropped = false; } -static void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev, - uint32_t event, uint32_t reason) +typedef struct { + uint32_t event; + uint32_t reason; + union { + /* Used by messages specific to a device */ + struct { + uint32_t id; + uint32_t lun; + } address; + }; +} VirtIOSCSIEventInfo; + +static void virtio_scsi_push_event(VirtIOSCSI *s, + const VirtIOSCSIEventInfo *info) { VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s); VirtIOSCSIReq *req; VirtIOSCSIEvent *evt; VirtIODevice *vdev = VIRTIO_DEVICE(s); + uint32_t event = info->event; + uint32_t reason = info->reason; if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { return; @@ -965,27 +979,28 @@ static void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev, memset(evt, 0, sizeof(VirtIOSCSIEvent)); evt->event = virtio_tswap32(vdev, event); evt->reason = virtio_tswap32(vdev, reason); - if (!dev) { - assert(event == VIRTIO_SCSI_T_EVENTS_MISSED); - } else { + if (event != VIRTIO_SCSI_T_EVENTS_MISSED) { evt->lun[0] = 1; - evt->lun[1] = dev->id; + evt->lun[1] = info->address.id; /* Linux wants us to keep the same encoding we use for REPORT LUNS. */ - if (dev->lun >= 256) { - evt->lun[2] = (dev->lun >> 8) | 0x40; + if (info->address.lun >= 256) { + evt->lun[2] = (info->address.lun >> 8) | 0x40; } - evt->lun[3] = dev->lun & 0xFF; + evt->lun[3] = info->address.lun & 0xFF; } trace_virtio_scsi_event(virtio_scsi_get_lun(evt->lun), event, reason); - + virtio_scsi_complete_req(req); } static void virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq) { if (s->events_dropped) { - virtio_scsi_push_event(s, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); + VirtIOSCSIEventInfo info = { + .event = VIRTIO_SCSI_T_NO_EVENT, + }; + virtio_scsi_push_event(s, &info); } } @@ -1009,9 +1024,17 @@ static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense) if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_CHANGE) && dev->type != TYPE_ROM) { + VirtIOSCSIEventInfo info = { + .event = VIRTIO_SCSI_T_PARAM_CHANGE, + .reason = sense.asc | (sense.ascq << 8), + .address = { + .id = dev->id, + .lun = dev->lun, + }, + }; + virtio_scsi_acquire(s); - virtio_scsi_push_event(s, dev, VIRTIO_SCSI_T_PARAM_CHANGE, - sense.asc | (sense.ascq << 8)); + virtio_scsi_push_event(s, &info); virtio_scsi_release(s); } } @@ -1046,10 +1069,17 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev, } if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) { + VirtIOSCSIEventInfo info = { + .event = VIRTIO_SCSI_T_TRANSPORT_RESET, + .reason = VIRTIO_SCSI_EVT_RESET_RESCAN, + .address = { + .id = sd->id, + .lun = sd->lun, + }, + }; + virtio_scsi_acquire(s); - virtio_scsi_push_event(s, sd, - VIRTIO_SCSI_T_TRANSPORT_RESET, - VIRTIO_SCSI_EVT_RESET_RESCAN); + virtio_scsi_push_event(s, &info); scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED)); virtio_scsi_release(s); } @@ -1061,20 +1091,16 @@ static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev, VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev); VirtIOSCSI *s = VIRTIO_SCSI(vdev); SCSIDevice *sd = SCSI_DEVICE(dev); - AioContext *ctx = s->ctx ?: qemu_get_aio_context(); + VirtIOSCSIEventInfo info = { + .event = VIRTIO_SCSI_T_TRANSPORT_RESET, + .reason = VIRTIO_SCSI_EVT_RESET_REMOVED, + .address = { + .id = sd->id, + .lun = sd->lun, + }, + }; - if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) { - virtio_scsi_acquire(s); - virtio_scsi_push_event(s, sd, - VIRTIO_SCSI_T_TRANSPORT_RESET, - VIRTIO_SCSI_EVT_RESET_REMOVED); - scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED)); - virtio_scsi_release(s); - } - - aio_disable_external(ctx); qdev_simple_device_unplug_cb(hotplug_dev, dev, errp); - aio_enable_external(ctx); if (s->ctx) { virtio_scsi_acquire(s); @@ -1082,6 +1108,49 @@ static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev, blk_set_aio_context(sd->conf.blk, qemu_get_aio_context(), NULL); virtio_scsi_release(s); } + + if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) { + virtio_scsi_acquire(s); + virtio_scsi_push_event(s, &info); + scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED)); + virtio_scsi_release(s); + } +} + +/* Suspend virtqueue ioeventfd processing during drain */ +static void virtio_scsi_drained_begin(SCSIBus *bus) +{ + VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus); + VirtIODevice *vdev = VIRTIO_DEVICE(s); + uint32_t total_queues = VIRTIO_SCSI_VQ_NUM_FIXED + + s->parent_obj.conf.num_queues; + + if (!s->dataplane_started) { + return; + } + + for (uint32_t i = 0; i < total_queues; i++) { + VirtQueue *vq = virtio_get_queue(vdev, i); + virtio_queue_aio_detach_host_notifier(vq, s->ctx); + } +} + +/* Resume virtqueue ioeventfd processing after drain */ +static void virtio_scsi_drained_end(SCSIBus *bus) +{ + VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus); + VirtIODevice *vdev = VIRTIO_DEVICE(s); + uint32_t total_queues = VIRTIO_SCSI_VQ_NUM_FIXED + + s->parent_obj.conf.num_queues; + + if (!s->dataplane_started) { + return; + } + + for (uint32_t i = 0; i < total_queues; i++) { + VirtQueue *vq = virtio_get_queue(vdev, i); + virtio_queue_aio_attach_host_notifier(vq, s->ctx); + } } static struct SCSIBusInfo virtio_scsi_scsi_info = { @@ -1098,6 +1167,8 @@ static struct SCSIBusInfo virtio_scsi_scsi_info = { .get_sg_list = virtio_scsi_get_sg_list, .save_request = virtio_scsi_save_request, .load_request = virtio_scsi_load_request, + .drained_begin = virtio_scsi_drained_begin, + .drained_end = virtio_scsi_drained_end, }; void virtio_scsi_common_realize(DeviceState *dev, diff --git a/hw/scsi/vmw_pvscsi.c b/hw/scsi/vmw_pvscsi.c index fa76696855..4de34536e9 100644 --- a/hw/scsi/vmw_pvscsi.c +++ b/hw/scsi/vmw_pvscsi.c @@ -1184,7 +1184,8 @@ pvscsi_realizefn(PCIDevice *pci_dev, Error **errp) pcie_endpoint_cap_init(pci_dev, PVSCSI_EXP_EP_OFFSET); } - s->completion_worker = qemu_bh_new(pvscsi_process_completion_queue, s); + s->completion_worker = qemu_bh_new_guarded(pvscsi_process_completion_queue, s, + &DEVICE(pci_dev)->mem_reentrancy_guard); scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(pci_dev), &pvscsi_scsi_info); /* override default SCSI bus hotplug-handler, with pvscsi's one */ diff --git a/hw/sd/allwinner-sdhost.c b/hw/sd/allwinner-sdhost.c index 51e5e90830..1a576d62ae 100644 --- a/hw/sd/allwinner-sdhost.c +++ b/hw/sd/allwinner-sdhost.c @@ -77,6 +77,7 @@ enum { REG_SD_DATA1_CRC = 0x12C, /* CRC Data 1 from card/eMMC */ REG_SD_DATA0_CRC = 0x130, /* CRC Data 0 from card/eMMC */ REG_SD_CRC_STA = 0x134, /* CRC status from card/eMMC during write */ + REG_SD_SAMP_DL = 0x144, /* Sample Delay Control (sun50i-a64) */ REG_SD_FIFO = 0x200, /* Read/Write FIFO */ }; @@ -158,6 +159,7 @@ enum { REG_SD_RES_CRC_RST = 0x0, REG_SD_DATA_CRC_RST = 0x0, REG_SD_CRC_STA_RST = 0x0, + REG_SD_SAMPLE_DL_RST = 0x00002000, REG_SD_FIFO_RST = 0x0, }; @@ -191,7 +193,7 @@ static void allwinner_sdhost_update_irq(AwSdHostState *s) } trace_allwinner_sdhost_update_irq(irq); - qemu_set_irq(s->irq, irq); + qemu_set_irq(s->irq, !!irq); } static void allwinner_sdhost_update_transfer_cnt(AwSdHostState *s, @@ -302,6 +304,30 @@ static void allwinner_sdhost_auto_stop(AwSdHostState *s) } } +static void read_descriptor(AwSdHostState *s, hwaddr desc_addr, + TransferDescriptor *desc) +{ + uint32_t desc_words[4]; + dma_memory_read(&s->dma_as, desc_addr, &desc_words, sizeof(desc_words), + MEMTXATTRS_UNSPECIFIED); + desc->status = le32_to_cpu(desc_words[0]); + desc->size = le32_to_cpu(desc_words[1]); + desc->addr = le32_to_cpu(desc_words[2]); + desc->next = le32_to_cpu(desc_words[3]); +} + +static void write_descriptor(AwSdHostState *s, hwaddr desc_addr, + const TransferDescriptor *desc) +{ + uint32_t desc_words[4]; + desc_words[0] = cpu_to_le32(desc->status); + desc_words[1] = cpu_to_le32(desc->size); + desc_words[2] = cpu_to_le32(desc->addr); + desc_words[3] = cpu_to_le32(desc->next); + dma_memory_write(&s->dma_as, desc_addr, &desc_words, sizeof(desc_words), + MEMTXATTRS_UNSPECIFIED); +} + static uint32_t allwinner_sdhost_process_desc(AwSdHostState *s, hwaddr desc_addr, TransferDescriptor *desc, @@ -312,9 +338,7 @@ static uint32_t allwinner_sdhost_process_desc(AwSdHostState *s, uint32_t num_bytes = max_bytes; uint8_t buf[1024]; - /* Read descriptor */ - dma_memory_read(&s->dma_as, desc_addr, desc, sizeof(*desc), - MEMTXATTRS_UNSPECIFIED); + read_descriptor(s, desc_addr, desc); if (desc->size == 0) { desc->size = klass->max_desc_size; } else if (desc->size > klass->max_desc_size) { @@ -356,8 +380,7 @@ static uint32_t allwinner_sdhost_process_desc(AwSdHostState *s, /* Clear hold flag and flush descriptor */ desc->status &= ~DESC_STATUS_HOLD; - dma_memory_write(&s->dma_as, desc_addr, desc, sizeof(*desc), - MEMTXATTRS_UNSPECIFIED); + write_descriptor(s, desc_addr, desc); return num_done; } @@ -438,6 +461,7 @@ static uint64_t allwinner_sdhost_read(void *opaque, hwaddr offset, { AwSdHostState *s = AW_SDHOST(opaque); AwSdHostClass *sc = AW_SDHOST_GET_CLASS(s); + bool out_of_bounds = false; uint32_t res = 0; switch (offset) { @@ -556,13 +580,24 @@ static uint64_t allwinner_sdhost_read(void *opaque, hwaddr offset, case REG_SD_FIFO: /* Read/Write FIFO */ res = allwinner_sdhost_fifo_read(s); break; + case REG_SD_SAMP_DL: /* Sample Delay */ + if (sc->can_calibrate) { + res = s->sample_delay; + } else { + out_of_bounds = true; + } + break; default: - qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset %" - HWADDR_PRIx"\n", __func__, offset); + out_of_bounds = true; res = 0; break; } + if (out_of_bounds) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset %" + HWADDR_PRIx"\n", __func__, offset); + } + trace_allwinner_sdhost_read(offset, res, size); return res; } @@ -581,6 +616,7 @@ static void allwinner_sdhost_write(void *opaque, hwaddr offset, { AwSdHostState *s = AW_SDHOST(opaque); AwSdHostClass *sc = AW_SDHOST_GET_CLASS(s); + bool out_of_bounds = false; trace_allwinner_sdhost_write(offset, value, size); @@ -704,10 +740,21 @@ static void allwinner_sdhost_write(void *opaque, hwaddr offset, case REG_SD_DATA0_CRC: /* CRC Data 0 from card/eMMC */ case REG_SD_CRC_STA: /* CRC status from card/eMMC in write operation */ break; + case REG_SD_SAMP_DL: /* Sample delay control */ + if (sc->can_calibrate) { + s->sample_delay = value; + } else { + out_of_bounds = true; + } + break; default: + out_of_bounds = true; + break; + } + + if (out_of_bounds) { qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset %" HWADDR_PRIx"\n", __func__, offset); - break; } } @@ -756,6 +803,7 @@ static const VMStateDescription vmstate_allwinner_sdhost = { VMSTATE_UINT32(response_crc, AwSdHostState), VMSTATE_UINT32_ARRAY(data_crc, AwSdHostState, 8), VMSTATE_UINT32(status_crc, AwSdHostState), + VMSTATE_UINT32(sample_delay, AwSdHostState), VMSTATE_END_OF_LIST() } }; @@ -794,6 +842,7 @@ static void allwinner_sdhost_realize(DeviceState *dev, Error **errp) static void allwinner_sdhost_reset(DeviceState *dev) { AwSdHostState *s = AW_SDHOST(dev); + AwSdHostClass *sc = AW_SDHOST_GET_CLASS(s); s->global_ctl = REG_SD_GCTL_RST; s->clock_ctl = REG_SD_CKCR_RST; @@ -834,6 +883,10 @@ static void allwinner_sdhost_reset(DeviceState *dev) } s->status_crc = REG_SD_CRC_STA_RST; + + if (sc->can_calibrate) { + s->sample_delay = REG_SD_SAMPLE_DL_RST; + } } static void allwinner_sdhost_bus_class_init(ObjectClass *klass, void *data) @@ -858,6 +911,7 @@ static void allwinner_sdhost_sun4i_class_init(ObjectClass *klass, void *data) AwSdHostClass *sc = AW_SDHOST_CLASS(klass); sc->max_desc_size = 8 * KiB; sc->is_sun4i = true; + sc->can_calibrate = false; } static void allwinner_sdhost_sun5i_class_init(ObjectClass *klass, void *data) @@ -865,6 +919,25 @@ static void allwinner_sdhost_sun5i_class_init(ObjectClass *klass, void *data) AwSdHostClass *sc = AW_SDHOST_CLASS(klass); sc->max_desc_size = 64 * KiB; sc->is_sun4i = false; + sc->can_calibrate = false; +} + +static void allwinner_sdhost_sun50i_a64_class_init(ObjectClass *klass, + void *data) +{ + AwSdHostClass *sc = AW_SDHOST_CLASS(klass); + sc->max_desc_size = 64 * KiB; + sc->is_sun4i = false; + sc->can_calibrate = true; +} + +static void allwinner_sdhost_sun50i_a64_emmc_class_init(ObjectClass *klass, + void *data) +{ + AwSdHostClass *sc = AW_SDHOST_CLASS(klass); + sc->max_desc_size = 8 * KiB; + sc->is_sun4i = false; + sc->can_calibrate = true; } static const TypeInfo allwinner_sdhost_info = { @@ -889,6 +962,18 @@ static const TypeInfo allwinner_sdhost_sun5i_info = { .class_init = allwinner_sdhost_sun5i_class_init, }; +static const TypeInfo allwinner_sdhost_sun50i_a64_info = { + .name = TYPE_AW_SDHOST_SUN50I_A64, + .parent = TYPE_AW_SDHOST, + .class_init = allwinner_sdhost_sun50i_a64_class_init, +}; + +static const TypeInfo allwinner_sdhost_sun50i_a64_emmc_info = { + .name = TYPE_AW_SDHOST_SUN50I_A64_EMMC, + .parent = TYPE_AW_SDHOST, + .class_init = allwinner_sdhost_sun50i_a64_emmc_class_init, +}; + static const TypeInfo allwinner_sdhost_bus_info = { .name = TYPE_AW_SDHOST_BUS, .parent = TYPE_SD_BUS, @@ -901,6 +986,8 @@ static void allwinner_sdhost_register_types(void) type_register_static(&allwinner_sdhost_info); type_register_static(&allwinner_sdhost_sun4i_info); type_register_static(&allwinner_sdhost_sun5i_info); + type_register_static(&allwinner_sdhost_sun50i_a64_info); + type_register_static(&allwinner_sdhost_sun50i_a64_emmc_info); type_register_static(&allwinner_sdhost_bus_info); } diff --git a/hw/sd/meson.build b/hw/sd/meson.build index 807ca07b7c..abfac9e461 100644 --- a/hw/sd/meson.build +++ b/hw/sd/meson.build @@ -1,13 +1,13 @@ -softmmu_ss.add(when: 'CONFIG_PL181', if_true: files('pl181.c')) -softmmu_ss.add(when: 'CONFIG_SD', if_true: files('sd.c', 'core.c', 'sdmmc-internal.c')) -softmmu_ss.add(when: 'CONFIG_SDHCI', if_true: files('sdhci.c')) -softmmu_ss.add(when: 'CONFIG_SDHCI_PCI', if_true: files('sdhci-pci.c')) -softmmu_ss.add(when: 'CONFIG_SSI_SD', if_true: files('ssi-sd.c')) +system_ss.add(when: 'CONFIG_PL181', if_true: files('pl181.c')) +system_ss.add(when: 'CONFIG_SD', if_true: files('sd.c', 'core.c', 'sdmmc-internal.c')) +system_ss.add(when: 'CONFIG_SDHCI', if_true: files('sdhci.c')) +system_ss.add(when: 'CONFIG_SDHCI_PCI', if_true: files('sdhci-pci.c')) +system_ss.add(when: 'CONFIG_SSI_SD', if_true: files('ssi-sd.c')) -softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_mmc.c')) -softmmu_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_mmci.c')) -softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_sdhost.c')) -softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_sdhci.c')) -softmmu_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-sdhost.c')) -softmmu_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_sdhci.c')) -softmmu_ss.add(when: 'CONFIG_CADENCE_SDHCI', if_true: files('cadence_sdhci.c')) +system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_mmc.c')) +system_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_mmci.c')) +system_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_sdhost.c')) +system_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_sdhci.c')) +system_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-sdhost.c')) +system_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_sdhci.c')) +system_ss.add(when: 'CONFIG_CADENCE_SDHCI', if_true: files('cadence_sdhci.c')) diff --git a/hw/sensor/meson.build b/hw/sensor/meson.build index 9e9be602c3..30e20e27b8 100644 --- a/hw/sensor/meson.build +++ b/hw/sensor/meson.build @@ -1,9 +1,9 @@ -softmmu_ss.add(when: 'CONFIG_TMP105', if_true: files('tmp105.c')) -softmmu_ss.add(when: 'CONFIG_TMP421', if_true: files('tmp421.c')) -softmmu_ss.add(when: 'CONFIG_DPS310', if_true: files('dps310.c')) -softmmu_ss.add(when: 'CONFIG_EMC141X', if_true: files('emc141x.c')) -softmmu_ss.add(when: 'CONFIG_ADM1272', if_true: files('adm1272.c')) -softmmu_ss.add(when: 'CONFIG_MAX34451', if_true: files('max34451.c')) -softmmu_ss.add(when: 'CONFIG_LSM303DLHC_MAG', if_true: files('lsm303dlhc_mag.c')) -softmmu_ss.add(when: 'CONFIG_ISL_PMBUS_VR', if_true: files('isl_pmbus_vr.c')) -softmmu_ss.add(when: 'CONFIG_MAX31785', if_true: files('max31785.c')) +system_ss.add(when: 'CONFIG_TMP105', if_true: files('tmp105.c')) +system_ss.add(when: 'CONFIG_TMP421', if_true: files('tmp421.c')) +system_ss.add(when: 'CONFIG_DPS310', if_true: files('dps310.c')) +system_ss.add(when: 'CONFIG_EMC141X', if_true: files('emc141x.c')) +system_ss.add(when: 'CONFIG_ADM1272', if_true: files('adm1272.c')) +system_ss.add(when: 'CONFIG_MAX34451', if_true: files('max34451.c')) +system_ss.add(when: 'CONFIG_LSM303DLHC_MAG', if_true: files('lsm303dlhc_mag.c')) +system_ss.add(when: 'CONFIG_ISL_PMBUS_VR', if_true: files('isl_pmbus_vr.c')) +system_ss.add(when: 'CONFIG_MAX31785', if_true: files('max31785.c')) diff --git a/hw/sh4/r2d.c b/hw/sh4/r2d.c index 826a0a31b5..4944994e9c 100644 --- a/hw/sh4/r2d.c +++ b/hw/sh4/r2d.c @@ -232,6 +232,7 @@ static void r2d_init(MachineState *machine) const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; + MachineClass *mc = MACHINE_GET_CLASS(machine); SuperHCPU *cpu; CPUSH4State *env; ResetData *reset_info; @@ -310,7 +311,7 @@ static void r2d_init(MachineState *machine) /* NIC: rtl8139 on-board, and 2 slots. */ for (i = 0; i < nb_nics; i++) pci_nic_init_nofail(&nd_table[i], pci_bus, - "rtl8139", i == 0 ? "2" : NULL); + mc->default_nic, i == 0 ? "2" : NULL); /* USB keyboard */ usb_create_simple(usb_bus_find(-1), "usb-kbd"); @@ -375,6 +376,7 @@ static void r2d_machine_init(MachineClass *mc) mc->init = r2d_init; mc->block_default_type = IF_IDE; mc->default_cpu_type = TYPE_SH7751R_CPU; + mc->default_nic = "rtl8139"; } DEFINE_MACHINE("r2d", r2d_machine_init) diff --git a/hw/smbios/meson.build b/hw/smbios/meson.build index 9e762c7108..6eeae4b35c 100644 --- a/hw/smbios/meson.build +++ b/hw/smbios/meson.build @@ -4,10 +4,10 @@ smbios_ss.add(when: 'CONFIG_IPMI', if_true: files('smbios_type_38.c'), if_false: files('smbios_type_38-stub.c')) -softmmu_ss.add_all(when: 'CONFIG_SMBIOS', if_true: smbios_ss) -softmmu_ss.add(when: 'CONFIG_SMBIOS', if_false: files('smbios-stub.c')) +system_ss.add_all(when: 'CONFIG_SMBIOS', if_true: smbios_ss) +system_ss.add(when: 'CONFIG_SMBIOS', if_false: files('smbios-stub.c')) -softmmu_ss.add(when: 'CONFIG_ALL', if_true: files( +system_ss.add(when: 'CONFIG_ALL', if_true: files( 'smbios-stub.c', 'smbios_type_38-stub.c', )) diff --git a/hw/sparc/sun4m.c b/hw/sparc/sun4m.c index d9288326d6..17bf5f2879 100644 --- a/hw/sparc/sun4m.c +++ b/hw/sparc/sun4m.c @@ -982,7 +982,7 @@ static void sun4m_hw_init(MachineState *machine) qdev_realize_and_unref(ms_kb_orgate, NULL, &error_fatal); sysbus_connect_irq(s, 0, qdev_get_gpio_in(ms_kb_orgate, 0)); sysbus_connect_irq(s, 1, qdev_get_gpio_in(ms_kb_orgate, 1)); - qdev_connect_gpio_out(DEVICE(ms_kb_orgate), 0, slavio_irq[14]); + qdev_connect_gpio_out(ms_kb_orgate, 0, slavio_irq[14]); dev = qdev_new(TYPE_ESCC); qdev_prop_set_uint32(dev, "disabled", 0); @@ -1004,7 +1004,7 @@ static void sun4m_hw_init(MachineState *machine) qdev_realize_and_unref(serial_orgate, NULL, &error_fatal); sysbus_connect_irq(s, 0, qdev_get_gpio_in(serial_orgate, 0)); sysbus_connect_irq(s, 1, qdev_get_gpio_in(serial_orgate, 1)); - qdev_connect_gpio_out(DEVICE(serial_orgate), 0, slavio_irq[15]); + qdev_connect_gpio_out(serial_orgate, 0, slavio_irq[15]); if (hwdef->apc_base) { apc_init(hwdef->apc_base, qemu_allocate_irq(cpu_halt_signal, NULL, 0)); diff --git a/hw/sparc64/sun4u.c b/hw/sparc64/sun4u.c index a25e951f9d..29e9b6cc26 100644 --- a/hw/sparc64/sun4u.c +++ b/hw/sparc64/sun4u.c @@ -31,12 +31,11 @@ #include "hw/irq.h" #include "hw/pci/pci.h" #include "hw/pci/pci_bridge.h" -#include "hw/pci/pci_bus.h" #include "hw/pci/pci_host.h" #include "hw/qdev-properties.h" #include "hw/pci-host/sabre.h" #include "hw/char/serial.h" -#include "hw/char/parallel.h" +#include "hw/char/parallel-isa.h" #include "hw/rtc/m48t59.h" #include "migration/vmstate.h" #include "hw/input/i8042.h" @@ -554,6 +553,7 @@ static void sun4uv_init(MemoryRegion *address_space_mem, MachineState *machine, const struct hwdef *hwdef) { + MachineClass *mc = MACHINE_GET_CLASS(machine); SPARCCPU *cpu; Nvram *nvram; unsigned int i; @@ -608,9 +608,9 @@ static void sun4uv_init(MemoryRegion *address_space_mem, /* Only in-built Simba APBs can exist on the root bus, slot 0 on busA is reserved (leaving no slots free after on-board devices) however slots 0-3 are free on busB */ - pci_bus->slot_reserved_mask = 0xfffffffc; - pci_busA->slot_reserved_mask = 0xfffffff1; - pci_busB->slot_reserved_mask = 0xfffffff0; + pci_bus_set_slot_reserved_mask(pci_bus, 0xfffffffc); + pci_bus_set_slot_reserved_mask(pci_busA, 0xfffffff1); + pci_bus_set_slot_reserved_mask(pci_busB, 0xfffffff0); ebus = pci_new_multifunction(PCI_DEVFN(1, 0), true, TYPE_EBUS); qdev_prop_set_uint64(DEVICE(ebus), "console-serial-base", @@ -646,15 +646,15 @@ static void sun4uv_init(MemoryRegion *address_space_mem, PCIBus *bus; nd = &nd_table[i]; - if (!nd->model || strcmp(nd->model, "sunhme") == 0) { + if (!nd->model || strcmp(nd->model, mc->default_nic) == 0) { if (!onboard_nic) { pci_dev = pci_new_multifunction(PCI_DEVFN(1, 1), - true, "sunhme"); + true, mc->default_nic); bus = pci_busA; memcpy(&macaddr, &nd->macaddr.a, sizeof(MACAddr)); onboard_nic = true; } else { - pci_dev = pci_new(-1, "sunhme"); + pci_dev = pci_new(-1, mc->default_nic); bus = pci_busB; } } else { @@ -817,6 +817,8 @@ static void sun4u_class_init(ObjectClass *oc, void *data) mc->default_cpu_type = SPARC_CPU_TYPE_NAME("TI-UltraSparc-IIi"); mc->ignore_boot_device_suffixes = true; mc->default_display = "std"; + mc->default_nic = "sunhme"; + mc->no_parallel = !module_object_class_by_name(TYPE_ISA_PARALLEL); fwc->get_dev_path = sun4u_fw_dev_path; } @@ -841,6 +843,8 @@ static void sun4v_class_init(ObjectClass *oc, void *data) mc->default_boot_order = "c"; mc->default_cpu_type = SPARC_CPU_TYPE_NAME("Sun-UltraSparc-T1"); mc->default_display = "std"; + mc->default_nic = "sunhme"; + mc->no_parallel = !module_object_class_by_name(TYPE_ISA_PARALLEL); } static const TypeInfo sun4v_type = { diff --git a/hw/ssi/meson.build b/hw/ssi/meson.build index 904a47161a..0aebcdd614 100644 --- a/hw/ssi/meson.build +++ b/hw/ssi/meson.build @@ -1,13 +1,13 @@ -softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_smc.c')) -softmmu_ss.add(when: 'CONFIG_MSF2', if_true: files('mss-spi.c')) -softmmu_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_fiu.c', 'npcm_pspi.c')) -softmmu_ss.add(when: 'CONFIG_PL022', if_true: files('pl022.c')) -softmmu_ss.add(when: 'CONFIG_SIFIVE_SPI', if_true: files('sifive_spi.c')) -softmmu_ss.add(when: 'CONFIG_SSI', if_true: files('ssi.c')) -softmmu_ss.add(when: 'CONFIG_STM32F2XX_SPI', if_true: files('stm32f2xx_spi.c')) -softmmu_ss.add(when: 'CONFIG_XILINX_SPI', if_true: files('xilinx_spi.c')) -softmmu_ss.add(when: 'CONFIG_XILINX_SPIPS', if_true: files('xilinx_spips.c')) -softmmu_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal-ospi.c')) -softmmu_ss.add(when: 'CONFIG_IMX', if_true: files('imx_spi.c')) -softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_spi.c')) -softmmu_ss.add(when: 'CONFIG_IBEX', if_true: files('ibex_spi_host.c')) +system_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_smc.c')) +system_ss.add(when: 'CONFIG_MSF2', if_true: files('mss-spi.c')) +system_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_fiu.c', 'npcm_pspi.c')) +system_ss.add(when: 'CONFIG_PL022', if_true: files('pl022.c')) +system_ss.add(when: 'CONFIG_SIFIVE_SPI', if_true: files('sifive_spi.c')) +system_ss.add(when: 'CONFIG_SSI', if_true: files('ssi.c')) +system_ss.add(when: 'CONFIG_STM32F2XX_SPI', if_true: files('stm32f2xx_spi.c')) +system_ss.add(when: 'CONFIG_XILINX_SPI', if_true: files('xilinx_spi.c')) +system_ss.add(when: 'CONFIG_XILINX_SPIPS', if_true: files('xilinx_spips.c')) +system_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal-ospi.c')) +system_ss.add(when: 'CONFIG_IMX', if_true: files('imx_spi.c')) +system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_spi.c')) +system_ss.add(when: 'CONFIG_IBEX', if_true: files('ibex_spi_host.c')) diff --git a/hw/timer/i8254.c b/hw/timer/i8254.c index c8388ea432..c235496fc9 100644 --- a/hw/timer/i8254.c +++ b/hw/timer/i8254.c @@ -350,11 +350,6 @@ static void pit_realizefn(DeviceState *dev, Error **errp) pc->parent_realize(dev, errp); } -static Property pit_properties[] = { - DEFINE_PROP_UINT32("iobase", PITCommonState, iobase, -1), - DEFINE_PROP_END_OF_LIST(), -}; - static void pit_class_initfn(ObjectClass *klass, void *data) { PITClass *pc = PIT_CLASS(klass); @@ -366,7 +361,6 @@ static void pit_class_initfn(ObjectClass *klass, void *data) k->get_channel_info = pit_get_channel_info_common; k->post_load = pit_post_load; dc->reset = pit_reset; - device_class_set_props(dc, pit_properties); } static const TypeInfo pit_info = { diff --git a/hw/timer/i8254_common.c b/hw/timer/i8254_common.c index 050875b497..e4093e2904 100644 --- a/hw/timer/i8254_common.c +++ b/hw/timer/i8254_common.c @@ -240,6 +240,11 @@ static const VMStateDescription vmstate_pit_common = { } }; +static Property pit_common_properties[] = { + DEFINE_PROP_UINT32("iobase", PITCommonState, iobase, -1), + DEFINE_PROP_END_OF_LIST(), +}; + static void pit_common_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -252,6 +257,7 @@ static void pit_common_class_init(ObjectClass *klass, void *data) * done by board code. */ dc->user_creatable = false; + device_class_set_props(dc, pit_common_properties); } static const TypeInfo pit_common_type = { diff --git a/hw/timer/meson.build b/hw/timer/meson.build index 03092e2ceb..fc632ad445 100644 --- a/hw/timer/meson.build +++ b/hw/timer/meson.build @@ -1,40 +1,40 @@ -softmmu_ss.add(when: 'CONFIG_A9_GTIMER', if_true: files('a9gtimer.c')) -softmmu_ss.add(when: 'CONFIG_ALLWINNER_A10_PIT', if_true: files('allwinner-a10-pit.c')) -softmmu_ss.add(when: 'CONFIG_ALTERA_TIMER', if_true: files('altera_timer.c')) -softmmu_ss.add(when: 'CONFIG_ARM_MPTIMER', if_true: files('arm_mptimer.c')) -softmmu_ss.add(when: 'CONFIG_ARM_TIMER', if_true: files('arm_timer.c')) -softmmu_ss.add(when: 'CONFIG_ARM_V7M', if_true: files('armv7m_systick.c')) -softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_timer.c')) -softmmu_ss.add(when: 'CONFIG_CADENCE', if_true: files('cadence_ttc.c')) -softmmu_ss.add(when: 'CONFIG_CMSDK_APB_DUALTIMER', if_true: files('cmsdk-apb-dualtimer.c')) -softmmu_ss.add(when: 'CONFIG_CMSDK_APB_TIMER', if_true: files('cmsdk-apb-timer.c')) -softmmu_ss.add(when: 'CONFIG_RENESAS_TMR', if_true: files('renesas_tmr.c')) -softmmu_ss.add(when: 'CONFIG_RENESAS_CMT', if_true: files('renesas_cmt.c')) -softmmu_ss.add(when: 'CONFIG_DIGIC', if_true: files('digic-timer.c')) -softmmu_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_timer.c')) -softmmu_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_mct.c')) -softmmu_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_pwm.c')) -softmmu_ss.add(when: 'CONFIG_GRLIB', if_true: files('grlib_gptimer.c')) -softmmu_ss.add(when: 'CONFIG_HPET', if_true: files('hpet.c')) -softmmu_ss.add(when: 'CONFIG_I8254', if_true: files('i8254_common.c', 'i8254.c')) -softmmu_ss.add(when: 'CONFIG_IMX', if_true: files('imx_epit.c')) -softmmu_ss.add(when: 'CONFIG_IMX', if_true: files('imx_gpt.c')) -softmmu_ss.add(when: 'CONFIG_MIPS_CPS', if_true: files('mips_gictimer.c')) -softmmu_ss.add(when: 'CONFIG_MSF2', if_true: files('mss-timer.c')) -softmmu_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_timer.c')) -softmmu_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_timer.c')) -softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_gptimer.c')) -softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_synctimer.c')) -softmmu_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_timer.c')) -softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_systmr.c')) -softmmu_ss.add(when: 'CONFIG_SH_TIMER', if_true: files('sh_timer.c')) -softmmu_ss.add(when: 'CONFIG_SLAVIO', if_true: files('slavio_timer.c')) -softmmu_ss.add(when: 'CONFIG_SSE_COUNTER', if_true: files('sse-counter.c')) -softmmu_ss.add(when: 'CONFIG_SSE_TIMER', if_true: files('sse-timer.c')) -softmmu_ss.add(when: 'CONFIG_STELLARIS_GPTM', if_true: files('stellaris-gptm.c')) -softmmu_ss.add(when: 'CONFIG_STM32F2XX_TIMER', if_true: files('stm32f2xx_timer.c')) -softmmu_ss.add(when: 'CONFIG_XILINX', if_true: files('xilinx_timer.c')) +system_ss.add(when: 'CONFIG_A9_GTIMER', if_true: files('a9gtimer.c')) +system_ss.add(when: 'CONFIG_ALLWINNER_A10_PIT', if_true: files('allwinner-a10-pit.c')) +system_ss.add(when: 'CONFIG_ALTERA_TIMER', if_true: files('altera_timer.c')) +system_ss.add(when: 'CONFIG_ARM_MPTIMER', if_true: files('arm_mptimer.c')) +system_ss.add(when: 'CONFIG_ARM_TIMER', if_true: files('arm_timer.c')) +system_ss.add(when: 'CONFIG_ARM_V7M', if_true: files('armv7m_systick.c')) +system_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_timer.c')) +system_ss.add(when: 'CONFIG_CADENCE', if_true: files('cadence_ttc.c')) +system_ss.add(when: 'CONFIG_CMSDK_APB_DUALTIMER', if_true: files('cmsdk-apb-dualtimer.c')) +system_ss.add(when: 'CONFIG_CMSDK_APB_TIMER', if_true: files('cmsdk-apb-timer.c')) +system_ss.add(when: 'CONFIG_RENESAS_TMR', if_true: files('renesas_tmr.c')) +system_ss.add(when: 'CONFIG_RENESAS_CMT', if_true: files('renesas_cmt.c')) +system_ss.add(when: 'CONFIG_DIGIC', if_true: files('digic-timer.c')) +system_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_timer.c')) +system_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_mct.c')) +system_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_pwm.c')) +system_ss.add(when: 'CONFIG_GRLIB', if_true: files('grlib_gptimer.c')) +system_ss.add(when: 'CONFIG_HPET', if_true: files('hpet.c')) +system_ss.add(when: 'CONFIG_I8254', if_true: files('i8254_common.c', 'i8254.c')) +system_ss.add(when: 'CONFIG_IMX', if_true: files('imx_epit.c')) +system_ss.add(when: 'CONFIG_IMX', if_true: files('imx_gpt.c')) +system_ss.add(when: 'CONFIG_MIPS_CPS', if_true: files('mips_gictimer.c')) +system_ss.add(when: 'CONFIG_MSF2', if_true: files('mss-timer.c')) +system_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_timer.c')) +system_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_timer.c')) +system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_gptimer.c')) +system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_synctimer.c')) +system_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_timer.c')) +system_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_systmr.c')) +system_ss.add(when: 'CONFIG_SH_TIMER', if_true: files('sh_timer.c')) +system_ss.add(when: 'CONFIG_SLAVIO', if_true: files('slavio_timer.c')) +system_ss.add(when: 'CONFIG_SSE_COUNTER', if_true: files('sse-counter.c')) +system_ss.add(when: 'CONFIG_SSE_TIMER', if_true: files('sse-timer.c')) +system_ss.add(when: 'CONFIG_STELLARIS_GPTM', if_true: files('stellaris-gptm.c')) +system_ss.add(when: 'CONFIG_STM32F2XX_TIMER', if_true: files('stm32f2xx_timer.c')) +system_ss.add(when: 'CONFIG_XILINX', if_true: files('xilinx_timer.c')) specific_ss.add(when: 'CONFIG_IBEX', if_true: files('ibex_timer.c')) -softmmu_ss.add(when: 'CONFIG_SIFIVE_PWM', if_true: files('sifive_pwm.c')) +system_ss.add(when: 'CONFIG_SIFIVE_PWM', if_true: files('sifive_pwm.c')) specific_ss.add(when: 'CONFIG_AVR_TIMER16', if_true: files('avr_timer16.c')) diff --git a/hw/timer/nrf51_timer.c b/hw/timer/nrf51_timer.c index 42be79c736..50c6772383 100644 --- a/hw/timer/nrf51_timer.c +++ b/hw/timer/nrf51_timer.c @@ -45,7 +45,12 @@ static uint32_t update_counter(NRF51TimerState *s, int64_t now) uint32_t ticks = ns_to_ticks(s, now - s->update_counter_ns); s->counter = (s->counter + ticks) % BIT(bitwidths[s->bitmode]); - s->update_counter_ns = now; + /* + * Only advance the sync time to the timestamp of the last tick, + * not all the way to 'now', so we don't lose time if we do + * multiple resyncs in a single tick. + */ + s->update_counter_ns += ticks_to_ns(s, ticks); return ticks; } diff --git a/hw/tpm/meson.build b/hw/tpm/meson.build index 76fe3cb098..6968e60b3f 100644 --- a/hw/tpm/meson.build +++ b/hw/tpm/meson.build @@ -1,9 +1,9 @@ -softmmu_ss.add(when: 'CONFIG_TPM_TIS', if_true: files('tpm_tis_common.c')) -softmmu_ss.add(when: 'CONFIG_TPM_TIS_ISA', if_true: files('tpm_tis_isa.c')) -softmmu_ss.add(when: 'CONFIG_TPM_TIS_SYSBUS', if_true: files('tpm_tis_sysbus.c')) -softmmu_ss.add(when: 'CONFIG_TPM_TIS_I2C', if_true: files('tpm_tis_i2c.c')) -softmmu_ss.add(when: 'CONFIG_TPM_CRB', if_true: files('tpm_crb.c')) -softmmu_ss.add(when: 'CONFIG_TPM_TIS', if_true: files('tpm_ppi.c')) -softmmu_ss.add(when: 'CONFIG_TPM_CRB', if_true: files('tpm_ppi.c')) +system_ss.add(when: 'CONFIG_TPM_TIS', if_true: files('tpm_tis_common.c')) +system_ss.add(when: 'CONFIG_TPM_TIS_ISA', if_true: files('tpm_tis_isa.c')) +system_ss.add(when: 'CONFIG_TPM_TIS_SYSBUS', if_true: files('tpm_tis_sysbus.c')) +system_ss.add(when: 'CONFIG_TPM_TIS_I2C', if_true: files('tpm_tis_i2c.c')) +system_ss.add(when: 'CONFIG_TPM_CRB', if_true: files('tpm_crb.c')) +system_ss.add(when: 'CONFIG_TPM_TIS', if_true: files('tpm_ppi.c')) +system_ss.add(when: 'CONFIG_TPM_CRB', if_true: files('tpm_ppi.c')) specific_ss.add(when: 'CONFIG_TPM_SPAPR', if_true: files('tpm_spapr.c')) diff --git a/hw/usb/Kconfig b/hw/usb/Kconfig index ce4f433976..0ec6def4b8 100644 --- a/hw/usb/Kconfig +++ b/hw/usb/Kconfig @@ -136,5 +136,4 @@ config USB_DWC3 config XLNX_USB_SUBSYS bool - default y if XLNX_VERSAL select USB_DWC3 diff --git a/hw/usb/dev-uas.c b/hw/usb/dev-uas.c index 88f99c05d5..f013ded91e 100644 --- a/hw/usb/dev-uas.c +++ b/hw/usb/dev-uas.c @@ -937,7 +937,8 @@ static void usb_uas_realize(USBDevice *dev, Error **errp) QTAILQ_INIT(&uas->results); QTAILQ_INIT(&uas->requests); - uas->status_bh = qemu_bh_new(usb_uas_send_status_bh, uas); + uas->status_bh = qemu_bh_new_guarded(usb_uas_send_status_bh, uas, + &d->mem_reentrancy_guard); dev->flags |= (1 << USB_DEV_FLAG_IS_SCSI_STORAGE); scsi_bus_init(&uas->bus, sizeof(uas->bus), DEVICE(dev), &usb_uas_scsi_info); diff --git a/hw/usb/hcd-dwc2.c b/hw/usb/hcd-dwc2.c index 8755e9cbb0..a0c4e782b2 100644 --- a/hw/usb/hcd-dwc2.c +++ b/hw/usb/hcd-dwc2.c @@ -1364,7 +1364,8 @@ static void dwc2_realize(DeviceState *dev, Error **errp) s->fi = USB_FRMINTVL - 1; s->eof_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, dwc2_frame_boundary, s); s->frame_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, dwc2_work_timer, s); - s->async_bh = qemu_bh_new(dwc2_work_bh, s); + s->async_bh = qemu_bh_new_guarded(dwc2_work_bh, s, + &dev->mem_reentrancy_guard); sysbus_init_irq(sbd, &s->irq); } diff --git a/hw/usb/hcd-ehci-pci.c b/hw/usb/hcd-ehci-pci.c index 4c37c8e227..345444a573 100644 --- a/hw/usb/hcd-ehci-pci.c +++ b/hw/usb/hcd-ehci-pci.c @@ -74,7 +74,7 @@ static void usb_ehci_pci_realize(PCIDevice *dev, Error **errp) static void usb_ehci_pci_init(Object *obj) { - DeviceClass *dc = OBJECT_GET_CLASS(DeviceClass, obj, TYPE_DEVICE); + DeviceClass *dc = DEVICE_GET_CLASS(obj); EHCIPCIState *i = PCI_EHCI(obj); EHCIState *s = &i->ehci; diff --git a/hw/usb/hcd-ehci.c b/hw/usb/hcd-ehci.c index d4da8dcb8d..c930c60921 100644 --- a/hw/usb/hcd-ehci.c +++ b/hw/usb/hcd-ehci.c @@ -2533,7 +2533,8 @@ void usb_ehci_realize(EHCIState *s, DeviceState *dev, Error **errp) } s->frame_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, ehci_work_timer, s); - s->async_bh = qemu_bh_new(ehci_work_bh, s); + s->async_bh = qemu_bh_new_guarded(ehci_work_bh, s, + &dev->mem_reentrancy_guard); s->device = dev; s->vmstate = qemu_add_vm_change_state_handler(usb_ehci_vm_state_change, s); diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c index 88d2b4b13c..cc5cde6983 100644 --- a/hw/usb/hcd-ohci.c +++ b/hw/usb/hcd-ohci.c @@ -1239,6 +1239,8 @@ static void ohci_frame_boundary(void *opaque) /* Increment frame number and take care of endianness. */ ohci->frame_number = (ohci->frame_number + 1) & 0xffff; hcca.frame = cpu_to_le16(ohci->frame_number); + /* When the HC updates frame number, set pad to 0. Ref OHCI Spec 4.4.1*/ + hcca.pad = 0; if (ohci->done_count == 0 && !(ohci->intr_status & OHCI_INTR_WD)) { if (!ohci->done) { diff --git a/hw/usb/hcd-uhci.c b/hw/usb/hcd-uhci.c index 8ac1175ad2..77baaa7a6b 100644 --- a/hw/usb/hcd-uhci.c +++ b/hw/usb/hcd-uhci.c @@ -1190,7 +1190,7 @@ void usb_uhci_common_realize(PCIDevice *dev, Error **errp) USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL); } } - s->bh = qemu_bh_new(uhci_bh, s); + s->bh = qemu_bh_new_guarded(uhci_bh, s, &DEVICE(dev)->mem_reentrancy_guard); s->frame_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, uhci_frame_timer, s); s->num_ports_vmstate = NB_PORTS; QTAILQ_INIT(&s->queues); diff --git a/hw/usb/host-libusb.c b/hw/usb/host-libusb.c index 176868d345..f500db85ab 100644 --- a/hw/usb/host-libusb.c +++ b/hw/usb/host-libusb.c @@ -1141,7 +1141,8 @@ static void usb_host_nodev_bh(void *opaque) static void usb_host_nodev(USBHostDevice *s) { if (!s->bh_nodev) { - s->bh_nodev = qemu_bh_new(usb_host_nodev_bh, s); + s->bh_nodev = qemu_bh_new_guarded(usb_host_nodev_bh, s, + &DEVICE(s)->mem_reentrancy_guard); } qemu_bh_schedule(s->bh_nodev); } @@ -1739,7 +1740,8 @@ static int usb_host_post_load(void *opaque, int version_id) USBHostDevice *dev = opaque; if (!dev->bh_postld) { - dev->bh_postld = qemu_bh_new(usb_host_post_load_bh, dev); + dev->bh_postld = qemu_bh_new_guarded(usb_host_post_load_bh, dev, + &DEVICE(dev)->mem_reentrancy_guard); } qemu_bh_schedule(dev->bh_postld); dev->bh_postld_pending = true; diff --git a/hw/usb/meson.build b/hw/usb/meson.build index 599dc24f0d..e94149ebde 100644 --- a/hw/usb/meson.build +++ b/hw/usb/meson.build @@ -1,7 +1,7 @@ hw_usb_modules = {} # usb subsystem core -softmmu_ss.add(when: 'CONFIG_USB', if_true: files( +system_ss.add(when: 'CONFIG_USB', if_true: files( 'bus.c', 'combined-packet.c', 'core.c', @@ -12,42 +12,42 @@ softmmu_ss.add(when: 'CONFIG_USB', if_true: files( )) # usb host adapters -softmmu_ss.add(when: 'CONFIG_USB_UHCI', if_true: files('hcd-uhci.c')) -softmmu_ss.add(when: 'CONFIG_USB_OHCI', if_true: files('hcd-ohci.c')) -softmmu_ss.add(when: 'CONFIG_USB_OHCI_PCI', if_true: files('hcd-ohci-pci.c')) -softmmu_ss.add(when: 'CONFIG_USB_EHCI', if_true: files('hcd-ehci.c')) -softmmu_ss.add(when: 'CONFIG_USB_EHCI_PCI', if_true: files('hcd-ehci-pci.c')) -softmmu_ss.add(when: 'CONFIG_USB_EHCI_SYSBUS', if_true: files('hcd-ehci.c', 'hcd-ehci-sysbus.c')) -softmmu_ss.add(when: 'CONFIG_USB_XHCI', if_true: files('hcd-xhci.c')) -softmmu_ss.add(when: 'CONFIG_USB_XHCI_PCI', if_true: files('hcd-xhci-pci.c')) -softmmu_ss.add(when: 'CONFIG_USB_XHCI_SYSBUS', if_true: files('hcd-xhci-sysbus.c')) -softmmu_ss.add(when: 'CONFIG_USB_XHCI_NEC', if_true: files('hcd-xhci-nec.c')) -softmmu_ss.add(when: 'CONFIG_USB_MUSB', if_true: files('hcd-musb.c')) -softmmu_ss.add(when: 'CONFIG_USB_DWC2', if_true: files('hcd-dwc2.c')) -softmmu_ss.add(when: 'CONFIG_USB_DWC3', if_true: files('hcd-dwc3.c')) +system_ss.add(when: 'CONFIG_USB_UHCI', if_true: files('hcd-uhci.c')) +system_ss.add(when: 'CONFIG_USB_OHCI', if_true: files('hcd-ohci.c')) +system_ss.add(when: 'CONFIG_USB_OHCI_PCI', if_true: files('hcd-ohci-pci.c')) +system_ss.add(when: 'CONFIG_USB_EHCI', if_true: files('hcd-ehci.c')) +system_ss.add(when: 'CONFIG_USB_EHCI_PCI', if_true: files('hcd-ehci-pci.c')) +system_ss.add(when: 'CONFIG_USB_EHCI_SYSBUS', if_true: files('hcd-ehci.c', 'hcd-ehci-sysbus.c')) +system_ss.add(when: 'CONFIG_USB_XHCI', if_true: files('hcd-xhci.c')) +system_ss.add(when: 'CONFIG_USB_XHCI_PCI', if_true: files('hcd-xhci-pci.c')) +system_ss.add(when: 'CONFIG_USB_XHCI_SYSBUS', if_true: files('hcd-xhci-sysbus.c')) +system_ss.add(when: 'CONFIG_USB_XHCI_NEC', if_true: files('hcd-xhci-nec.c')) +system_ss.add(when: 'CONFIG_USB_MUSB', if_true: files('hcd-musb.c')) +system_ss.add(when: 'CONFIG_USB_DWC2', if_true: files('hcd-dwc2.c')) +system_ss.add(when: 'CONFIG_USB_DWC3', if_true: files('hcd-dwc3.c')) -softmmu_ss.add(when: 'CONFIG_TUSB6010', if_true: files('tusb6010.c')) -softmmu_ss.add(when: 'CONFIG_IMX', if_true: files('chipidea.c')) -softmmu_ss.add(when: 'CONFIG_IMX_USBPHY', if_true: files('imx-usb-phy.c')) -softmmu_ss.add(when: 'CONFIG_VT82C686', if_true: files('vt82c686-uhci-pci.c')) -softmmu_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal-usb2-ctrl-regs.c')) -softmmu_ss.add(when: 'CONFIG_XLNX_USB_SUBSYS', if_true: files('xlnx-usb-subsystem.c')) +system_ss.add(when: 'CONFIG_TUSB6010', if_true: files('tusb6010.c')) +system_ss.add(when: 'CONFIG_IMX', if_true: files('chipidea.c')) +system_ss.add(when: 'CONFIG_IMX_USBPHY', if_true: files('imx-usb-phy.c')) +system_ss.add(when: 'CONFIG_VT82C686', if_true: files('vt82c686-uhci-pci.c')) +system_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal-usb2-ctrl-regs.c')) +system_ss.add(when: 'CONFIG_XLNX_USB_SUBSYS', if_true: files('xlnx-usb-subsystem.c')) # emulated usb devices -softmmu_ss.add(when: 'CONFIG_USB', if_true: files('dev-hub.c')) -softmmu_ss.add(when: 'CONFIG_USB', if_true: files('dev-hid.c')) -softmmu_ss.add(when: 'CONFIG_USB_TABLET_WACOM', if_true: files('dev-wacom.c')) -softmmu_ss.add(when: 'CONFIG_USB_STORAGE_CORE', if_true: files('dev-storage.c')) -softmmu_ss.add(when: 'CONFIG_USB_STORAGE_BOT', if_true: files('dev-storage-bot.c')) -softmmu_ss.add(when: 'CONFIG_USB_STORAGE_CLASSIC', if_true: files('dev-storage-classic.c')) -softmmu_ss.add(when: 'CONFIG_USB_STORAGE_UAS', if_true: files('dev-uas.c')) -softmmu_ss.add(when: 'CONFIG_USB_AUDIO', if_true: files('dev-audio.c')) -softmmu_ss.add(when: 'CONFIG_USB_SERIAL', if_true: files('dev-serial.c')) -softmmu_ss.add(when: 'CONFIG_USB_NETWORK', if_true: files('dev-network.c')) -softmmu_ss.add(when: ['CONFIG_POSIX', 'CONFIG_USB_STORAGE_MTP'], if_true: files('dev-mtp.c')) +system_ss.add(when: 'CONFIG_USB', if_true: files('dev-hub.c')) +system_ss.add(when: 'CONFIG_USB', if_true: files('dev-hid.c')) +system_ss.add(when: 'CONFIG_USB_TABLET_WACOM', if_true: files('dev-wacom.c')) +system_ss.add(when: 'CONFIG_USB_STORAGE_CORE', if_true: files('dev-storage.c')) +system_ss.add(when: 'CONFIG_USB_STORAGE_BOT', if_true: files('dev-storage-bot.c')) +system_ss.add(when: 'CONFIG_USB_STORAGE_CLASSIC', if_true: files('dev-storage-classic.c')) +system_ss.add(when: 'CONFIG_USB_STORAGE_UAS', if_true: files('dev-uas.c')) +system_ss.add(when: 'CONFIG_USB_AUDIO', if_true: files('dev-audio.c')) +system_ss.add(when: 'CONFIG_USB_SERIAL', if_true: files('dev-serial.c')) +system_ss.add(when: 'CONFIG_USB_NETWORK', if_true: files('dev-network.c')) +system_ss.add(when: ['CONFIG_POSIX', 'CONFIG_USB_STORAGE_MTP'], if_true: files('dev-mtp.c')) # smartcard -softmmu_ss.add(when: 'CONFIG_USB_SMARTCARD', if_true: files('dev-smartcard-reader.c')) +system_ss.add(when: 'CONFIG_USB_SMARTCARD', if_true: files('dev-smartcard-reader.c')) if cacard.found() usbsmartcard_ss = ss.source_set() @@ -57,15 +57,15 @@ if cacard.found() endif # U2F -softmmu_ss.add(when: 'CONFIG_USB_U2F', if_true: files('u2f.c')) -softmmu_ss.add(when: ['CONFIG_LINUX', 'CONFIG_USB_U2F'], if_true: [libudev, files('u2f-passthru.c')]) +system_ss.add(when: 'CONFIG_USB_U2F', if_true: files('u2f.c')) +system_ss.add(when: ['CONFIG_LINUX', 'CONFIG_USB_U2F'], if_true: [libudev, files('u2f-passthru.c')]) if u2f.found() - softmmu_ss.add(when: 'CONFIG_USB_U2F', if_true: [u2f, files('u2f-emulated.c')]) + system_ss.add(when: 'CONFIG_USB_U2F', if_true: [u2f, files('u2f-emulated.c')]) endif # CanoKey if canokey.found() - softmmu_ss.add(when: 'CONFIG_USB_CANOKEY', if_true: [canokey, files('canokey.c')]) + system_ss.add(when: 'CONFIG_USB_CANOKEY', if_true: [canokey, files('canokey.c')]) endif # usb redirect @@ -84,6 +84,6 @@ if libusb.found() hw_usb_modules += {'host': usbhost_ss} endif -softmmu_ss.add(when: ['CONFIG_USB', 'CONFIG_XEN_BUS', libusb], if_true: files('xen-usb.c')) +system_ss.add(when: ['CONFIG_USB', 'CONFIG_XEN_BUS', libusb], if_true: files('xen-usb.c')) modules += { 'hw-usb': hw_usb_modules } diff --git a/hw/usb/redirect.c b/hw/usb/redirect.c index fd7df599bc..39fbaaab16 100644 --- a/hw/usb/redirect.c +++ b/hw/usb/redirect.c @@ -1441,8 +1441,10 @@ static void usbredir_realize(USBDevice *udev, Error **errp) } } - dev->chardev_close_bh = qemu_bh_new(usbredir_chardev_close_bh, dev); - dev->device_reject_bh = qemu_bh_new(usbredir_device_reject_bh, dev); + dev->chardev_close_bh = qemu_bh_new_guarded(usbredir_chardev_close_bh, dev, + &DEVICE(dev)->mem_reentrancy_guard); + dev->device_reject_bh = qemu_bh_new_guarded(usbredir_device_reject_bh, dev, + &DEVICE(dev)->mem_reentrancy_guard); dev->attach_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, usbredir_do_attach, dev); packet_id_queue_init(&dev->cancelled, dev, "cancelled"); diff --git a/hw/usb/xen-usb.c b/hw/usb/xen-usb.c index 66cb3f7c24..38ee660a30 100644 --- a/hw/usb/xen-usb.c +++ b/hw/usb/xen-usb.c @@ -1032,7 +1032,8 @@ static void usbback_alloc(struct XenLegacyDevice *xendev) QTAILQ_INIT(&usbif->req_free_q); QSIMPLEQ_INIT(&usbif->hotplug_q); - usbif->bh = qemu_bh_new(usbback_bh, usbif); + usbif->bh = qemu_bh_new_guarded(usbback_bh, usbif, + &DEVICE(xendev)->mem_reentrancy_guard); } static int usbback_free(struct XenLegacyDevice *xendev) diff --git a/hw/vfio/common.c b/hw/vfio/common.c index 4d01ea3515..fa8fd949b1 100644 --- a/hw/vfio/common.c +++ b/hw/vfio/common.c @@ -478,7 +478,8 @@ static bool vfio_devices_all_dirty_tracking(VFIOContainer *container) VFIODevice *vbasedev; MigrationState *ms = migrate_get_current(); - if (!migration_is_setup_or_active(ms->state)) { + if (ms->state != MIGRATION_STATUS_ACTIVE && + ms->state != MIGRATION_STATUS_DEVICE) { return false; } @@ -1746,6 +1747,7 @@ static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova, { bool all_device_dirty_tracking = vfio_devices_all_device_dirty_tracking(container); + uint64_t dirty_pages; VFIOBitmap vbmap; int ret; @@ -1771,11 +1773,11 @@ static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova, goto out; } - cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, ram_addr, - vbmap.pages); + dirty_pages = cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, ram_addr, + vbmap.pages); trace_vfio_get_dirty_bitmap(container->fd, iova, size, vbmap.size, - ram_addr); + ram_addr, dirty_pages); out: g_free(vbmap.bitmap); diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c index ec9a854361..73874a94de 100644 --- a/hw/vfio/pci.c +++ b/hw/vfio/pci.c @@ -2066,6 +2066,54 @@ static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos, Error **errp) return 0; } +static int vfio_setup_rebar_ecap(VFIOPCIDevice *vdev, uint16_t pos) +{ + uint32_t ctrl; + int i, nbar; + + ctrl = pci_get_long(vdev->pdev.config + pos + PCI_REBAR_CTRL); + nbar = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >> PCI_REBAR_CTRL_NBAR_SHIFT; + + for (i = 0; i < nbar; i++) { + uint32_t cap; + int size; + + ctrl = pci_get_long(vdev->pdev.config + pos + PCI_REBAR_CTRL + (i * 8)); + size = (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT; + + /* The cap register reports sizes 1MB to 128TB, with 4 reserved bits */ + cap = size <= 27 ? 1U << (size + 4) : 0; + + /* + * The PCIe spec (v6.0.1, 7.8.6) requires HW to support at least one + * size in the range 1MB to 512GB. We intend to mask all sizes except + * the one currently enabled in the size field, therefore if it's + * outside the range, hide the whole capability as this virtualization + * trick won't work. If >512GB resizable BARs start to appear, we + * might need an opt-in or reservation scheme in the kernel. + */ + if (!(cap & PCI_REBAR_CAP_SIZES)) { + return -EINVAL; + } + + /* Hide all sizes reported in the ctrl reg per above requirement. */ + ctrl &= (PCI_REBAR_CTRL_BAR_SIZE | + PCI_REBAR_CTRL_NBAR_MASK | + PCI_REBAR_CTRL_BAR_IDX); + + /* + * The BAR size field is RW, however we've mangled the capability + * register such that we only report a single size, ie. the current + * BAR size. A write of an unsupported value is undefined, therefore + * the register field is essentially RO. + */ + vfio_add_emulated_long(vdev, pos + PCI_REBAR_CAP + (i * 8), cap, ~0); + vfio_add_emulated_long(vdev, pos + PCI_REBAR_CTRL + (i * 8), ctrl, ~0); + } + + return 0; +} + static void vfio_add_ext_cap(VFIOPCIDevice *vdev) { PCIDevice *pdev = &vdev->pdev; @@ -2139,9 +2187,13 @@ static void vfio_add_ext_cap(VFIOPCIDevice *vdev) case 0: /* kernel masked capability */ case PCI_EXT_CAP_ID_SRIOV: /* Read-only VF BARs confuse OVMF */ case PCI_EXT_CAP_ID_ARI: /* XXX Needs next function virtualization */ - case PCI_EXT_CAP_ID_REBAR: /* Can't expose read-only */ trace_vfio_add_ext_cap_dropped(vdev->vbasedev.name, cap_id, next); break; + case PCI_EXT_CAP_ID_REBAR: + if (!vfio_setup_rebar_ecap(vdev, next)) { + pcie_add_capability(pdev, cap_id, cap_ver, next, size); + } + break; default: pcie_add_capability(pdev, cap_id, cap_ver, next, size); } @@ -2856,6 +2908,8 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) int groupid; int i, ret; bool is_mdev; + char uuid[UUID_FMT_LEN]; + char *name; if (!vbasedev->sysfsdev) { if (!(~vdev->host.domain || ~vdev->host.bus || @@ -2936,7 +2990,15 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) goto error; } - ret = vfio_get_device(group, vbasedev->name, vbasedev, errp); + if (!qemu_uuid_is_null(&vdev->vf_token)) { + qemu_uuid_unparse(&vdev->vf_token, uuid); + name = g_strdup_printf("%s vf_token=%s", vbasedev->name, uuid); + } else { + name = g_strdup(vbasedev->name); + } + + ret = vfio_get_device(group, name, vbasedev, errp); + g_free(name); if (ret) { vfio_put_group(group); goto error; @@ -3268,6 +3330,7 @@ static void vfio_instance_init(Object *obj) static Property vfio_pci_dev_properties[] = { DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice, host), + DEFINE_PROP_UUID_NODEFAULT("vf-token", VFIOPCIDevice, vf_token), DEFINE_PROP_STRING("sysfsdev", VFIOPCIDevice, vbasedev.sysfsdev), DEFINE_PROP_ON_OFF_AUTO("x-pre-copy-dirty-page-tracking", VFIOPCIDevice, vbasedev.pre_copy_dirty_page_tracking, diff --git a/hw/vfio/pci.h b/hw/vfio/pci.h index 177abcc8fb..2674476d6c 100644 --- a/hw/vfio/pci.h +++ b/hw/vfio/pci.h @@ -137,6 +137,7 @@ struct VFIOPCIDevice { VFIOVGA *vga; /* 0xa0000, 0x3b0, 0x3c0 */ void *igd_opregion; PCIHostDeviceAddress host; + QemuUUID vf_token; EventNotifier err_notifier; EventNotifier req_notifier; int (*resetfn)(struct VFIOPCIDevice *); diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events index 646e42fd27..cfb60c354d 100644 --- a/hw/vfio/trace-events +++ b/hw/vfio/trace-events @@ -120,7 +120,7 @@ vfio_region_sparse_mmap_header(const char *name, int index, int nr_areas) "Devic vfio_region_sparse_mmap_entry(int i, unsigned long start, unsigned long end) "sparse entry %d [0x%lx - 0x%lx]" vfio_get_dev_region(const char *name, int index, uint32_t type, uint32_t subtype) "%s index %d, %08x/%08x" vfio_dma_unmap_overflow_workaround(void) "" -vfio_get_dirty_bitmap(int fd, uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start) "container fd=%d, iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64 +vfio_get_dirty_bitmap(int fd, uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start, uint64_t dirty_pages) "container fd=%d, iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64" dirty_pages=%"PRIu64 vfio_iommu_map_dirty_notify(uint64_t iova_start, uint64_t iova_end) "iommu dirty @ 0x%"PRIx64" - 0x%"PRIx64 # platform.c diff --git a/hw/virtio/meson.build b/hw/virtio/meson.build index bdec78bfc6..e83c37fffd 100644 --- a/hw/virtio/meson.build +++ b/hw/virtio/meson.build @@ -62,11 +62,11 @@ virtio_pci_ss.add(when: 'CONFIG_VHOST_VDPA_DEV', if_true: files('vdpa-dev-pci.c' specific_virtio_ss.add_all(when: 'CONFIG_VIRTIO_PCI', if_true: virtio_pci_ss) -softmmu_ss.add_all(when: 'CONFIG_VIRTIO', if_true: softmmu_virtio_ss) -softmmu_ss.add(when: 'CONFIG_VIRTIO', if_false: files('vhost-stub.c')) -softmmu_ss.add(when: 'CONFIG_VIRTIO', if_false: files('virtio-stub.c')) -softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('vhost-stub.c')) -softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('virtio-stub.c')) -softmmu_ss.add(files('virtio-hmp-cmds.c')) +system_ss.add_all(when: 'CONFIG_VIRTIO', if_true: softmmu_virtio_ss) +system_ss.add(when: 'CONFIG_VIRTIO', if_false: files('vhost-stub.c')) +system_ss.add(when: 'CONFIG_VIRTIO', if_false: files('virtio-stub.c')) +system_ss.add(when: 'CONFIG_ALL', if_true: files('vhost-stub.c')) +system_ss.add(when: 'CONFIG_ALL', if_true: files('virtio-stub.c')) +system_ss.add(files('virtio-hmp-cmds.c')) specific_ss.add_all(when: 'CONFIG_VIRTIO', if_true: specific_virtio_ss) diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c index 8361e70d1b..bd7c12b6d3 100644 --- a/hw/virtio/vhost-shadow-virtqueue.c +++ b/hw/virtio/vhost-shadow-virtqueue.c @@ -68,7 +68,7 @@ bool vhost_svq_valid_features(uint64_t features, Error **errp) */ static uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq) { - return svq->vring.num - (svq->shadow_avail_idx - svq->shadow_used_idx); + return svq->num_free; } /** @@ -263,6 +263,7 @@ int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, return -EINVAL; } + svq->num_free -= ndescs; svq->desc_state[qemu_head].elem = elem; svq->desc_state[qemu_head].ndescs = ndescs; vhost_svq_kick(svq); @@ -449,6 +450,7 @@ static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq, last_used_chain = vhost_svq_last_desc_of_chain(svq, num, used_elem.id); svq->desc_next[last_used_chain] = svq->free_head; svq->free_head = used_elem.id; + svq->num_free += num; *len = used_elem.len; return g_steal_pointer(&svq->desc_state[used_elem.id].elem); @@ -659,6 +661,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev, svq->iova_tree = iova_tree; svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq)); + svq->num_free = svq->vring.num; driver_size = vhost_svq_driver_area_size(svq); device_size = vhost_svq_device_area_size(svq); svq->vring.desc = qemu_memalign(qemu_real_host_page_size(), driver_size); diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h index 926a4897b1..6efe051a70 100644 --- a/hw/virtio/vhost-shadow-virtqueue.h +++ b/hw/virtio/vhost-shadow-virtqueue.h @@ -107,6 +107,9 @@ typedef struct VhostShadowVirtqueue { /* Next head to consume from the device */ uint16_t last_used_idx; + + /* Size of SVQ vring free descriptors */ + uint16_t num_free; } VhostShadowVirtqueue; bool vhost_svq_valid_features(uint64_t features, Error **errp); diff --git a/hw/virtio/vhost-user-i2c.c b/hw/virtio/vhost-user-i2c.c index 60eaf0d95b..4eef3f0633 100644 --- a/hw/virtio/vhost-user-i2c.c +++ b/hw/virtio/vhost-user-i2c.c @@ -128,6 +128,14 @@ static void vu_i2c_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask) { VHostUserI2C *i2c = VHOST_USER_I2C(vdev); + /* + * We don't support interrupts, return early if index is set to + * VIRTIO_CONFIG_IRQ_IDX. + */ + if (idx == VIRTIO_CONFIG_IRQ_IDX) { + return; + } + vhost_virtqueue_mask(&i2c->vhost_dev, vdev, idx, mask); } @@ -135,6 +143,14 @@ static bool vu_i2c_guest_notifier_pending(VirtIODevice *vdev, int idx) { VHostUserI2C *i2c = VHOST_USER_I2C(vdev); + /* + * We don't support interrupts, return early if index is set to + * VIRTIO_CONFIG_IRQ_IDX. + */ + if (idx == VIRTIO_CONFIG_IRQ_IDX) { + return false; + } + return vhost_virtqueue_pending(&i2c->vhost_dev, idx); } diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c index e5285df4ba..74a2a28663 100644 --- a/hw/virtio/vhost-user.c +++ b/hw/virtio/vhost-user.c @@ -42,17 +42,7 @@ #define VHOST_USER_F_PROTOCOL_FEATURES 30 #define VHOST_USER_BACKEND_MAX_FDS 8 -/* - * Set maximum number of RAM slots supported to - * the maximum number supported by the target - * hardware plaform. - */ -#if defined(TARGET_X86) || defined(TARGET_X86_64) || \ - defined(TARGET_ARM) || defined(TARGET_AARCH64) -#include "hw/acpi/acpi.h" -#define VHOST_USER_MAX_RAM_SLOTS ACPI_MAX_RAM_SLOTS - -#elif defined(TARGET_PPC) || defined(TARGET_PPC64) +#if defined(TARGET_PPC) || defined(TARGET_PPC64) #include "hw/ppc/spapr.h" #define VHOST_USER_MAX_RAM_SLOTS SPAPR_MAX_RAM_SLOTS @@ -483,6 +473,7 @@ static MemoryRegion *vhost_user_get_mr_data(uint64_t addr, ram_addr_t *offset, assert((uintptr_t)addr == addr); mr = memory_region_from_host((void *)(uintptr_t)addr, offset); *fd = memory_region_get_fd(mr); + *offset += mr->ram_block->fd_offset; return mr; } @@ -2677,7 +2668,20 @@ static int vhost_user_dev_start(struct vhost_dev *dev, bool started) VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK); } else { - return vhost_user_set_status(dev, 0); + return 0; + } +} + +static void vhost_user_reset_status(struct vhost_dev *dev) +{ + /* Set device status only for last queue pair */ + if (dev->vq_index + dev->nvqs != dev->vq_index_end) { + return; + } + + if (virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_STATUS)) { + vhost_user_set_status(dev, 0); } } @@ -2716,4 +2720,5 @@ const VhostOps user_ops = { .vhost_get_inflight_fd = vhost_user_get_inflight_fd, .vhost_set_inflight_fd = vhost_user_set_inflight_fd, .vhost_dev_start = vhost_user_dev_start, + .vhost_reset_status = vhost_user_reset_status, }; diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c index bc6bad23d5..b3094e8a8b 100644 --- a/hw/virtio/vhost-vdpa.c +++ b/hw/virtio/vhost-vdpa.c @@ -26,6 +26,7 @@ #include "cpu.h" #include "trace.h" #include "qapi/error.h" +#include "hw/virtio/virtio-access.h" /* * Return one past the end of the end of section. Be careful with uint64_t @@ -60,13 +61,21 @@ static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section, iova_min, section->offset_within_address_space); return true; } + /* + * While using vIOMMU, sometimes the section will be larger than iova_max, + * but the memory that actually maps is smaller, so move the check to + * function vhost_vdpa_iommu_map_notify(). That function will use the actual + * size that maps to the kernel + */ - llend = vhost_vdpa_section_end(section); - if (int128_gt(llend, int128_make64(iova_max))) { - error_report("RAM section out of device range (max=0x%" PRIx64 - ", end addr=0x%" PRIx64 ")", - iova_max, int128_get64(llend)); - return true; + if (!memory_region_is_iommu(section->mr)) { + llend = vhost_vdpa_section_end(section); + if (int128_gt(llend, int128_make64(iova_max))) { + error_report("RAM section out of device range (max=0x%" PRIx64 + ", end addr=0x%" PRIx64 ")", + iova_max, int128_get64(llend)); + return true; + } } return false; @@ -185,6 +194,115 @@ static void vhost_vdpa_listener_commit(MemoryListener *listener) v->iotlb_batch_begin_sent = false; } +static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) +{ + struct vdpa_iommu *iommu = container_of(n, struct vdpa_iommu, n); + + hwaddr iova = iotlb->iova + iommu->iommu_offset; + struct vhost_vdpa *v = iommu->dev; + void *vaddr; + int ret; + Int128 llend; + + if (iotlb->target_as != &address_space_memory) { + error_report("Wrong target AS \"%s\", only system memory is allowed", + iotlb->target_as->name ? iotlb->target_as->name : "none"); + return; + } + RCU_READ_LOCK_GUARD(); + /* check if RAM section out of device range */ + llend = int128_add(int128_makes64(iotlb->addr_mask), int128_makes64(iova)); + if (int128_gt(llend, int128_make64(v->iova_range.last))) { + error_report("RAM section out of device range (max=0x%" PRIx64 + ", end addr=0x%" PRIx64 ")", + v->iova_range.last, int128_get64(llend)); + return; + } + + if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { + bool read_only; + + if (!memory_get_xlat_addr(iotlb, &vaddr, NULL, &read_only, NULL)) { + return; + } + ret = vhost_vdpa_dma_map(v, VHOST_VDPA_GUEST_PA_ASID, iova, + iotlb->addr_mask + 1, vaddr, read_only); + if (ret) { + error_report("vhost_vdpa_dma_map(%p, 0x%" HWADDR_PRIx ", " + "0x%" HWADDR_PRIx ", %p) = %d (%m)", + v, iova, iotlb->addr_mask + 1, vaddr, ret); + } + } else { + ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova, + iotlb->addr_mask + 1); + if (ret) { + error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", " + "0x%" HWADDR_PRIx ") = %d (%m)", + v, iova, iotlb->addr_mask + 1, ret); + } + } +} + +static void vhost_vdpa_iommu_region_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); + + struct vdpa_iommu *iommu; + Int128 end; + int iommu_idx; + IOMMUMemoryRegion *iommu_mr; + int ret; + + iommu_mr = IOMMU_MEMORY_REGION(section->mr); + + iommu = g_malloc0(sizeof(*iommu)); + end = int128_add(int128_make64(section->offset_within_region), + section->size); + end = int128_sub(end, int128_one()); + iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr, + MEMTXATTRS_UNSPECIFIED); + iommu->iommu_mr = iommu_mr; + iommu_notifier_init(&iommu->n, vhost_vdpa_iommu_map_notify, + IOMMU_NOTIFIER_IOTLB_EVENTS, + section->offset_within_region, + int128_get64(end), + iommu_idx); + iommu->iommu_offset = section->offset_within_address_space - + section->offset_within_region; + iommu->dev = v; + + ret = memory_region_register_iommu_notifier(section->mr, &iommu->n, NULL); + if (ret) { + g_free(iommu); + return; + } + + QLIST_INSERT_HEAD(&v->iommu_list, iommu, iommu_next); + memory_region_iommu_replay(iommu->iommu_mr, &iommu->n); + + return; +} + +static void vhost_vdpa_iommu_region_del(MemoryListener *listener, + MemoryRegionSection *section) +{ + struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); + + struct vdpa_iommu *iommu; + + QLIST_FOREACH(iommu, &v->iommu_list, iommu_next) + { + if (MEMORY_REGION(iommu->iommu_mr) == section->mr && + iommu->n.start == section->offset_within_region) { + memory_region_unregister_iommu_notifier(section->mr, &iommu->n); + QLIST_REMOVE(iommu, iommu_next); + g_free(iommu); + break; + } + } +} + static void vhost_vdpa_listener_region_add(MemoryListener *listener, MemoryRegionSection *section) { @@ -199,6 +317,10 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, v->iova_range.last)) { return; } + if (memory_region_is_iommu(section->mr)) { + vhost_vdpa_iommu_region_add(listener, section); + return; + } if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != (section->offset_within_region & ~TARGET_PAGE_MASK))) { @@ -278,6 +400,9 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, v->iova_range.last)) { return; } + if (memory_region_is_iommu(section->mr)) { + vhost_vdpa_iommu_region_del(listener, section); + } if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != (section->offset_within_region & ~TARGET_PAGE_MASK))) { @@ -288,7 +413,8 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); llend = vhost_vdpa_section_end(section); - trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend)); + trace_vhost_vdpa_listener_region_del(v, iova, + int128_get64(int128_sub(llend, int128_one()))); if (int128_ge(int128_make64(iova), llend)) { return; @@ -315,10 +441,28 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, vhost_iova_tree_remove(v->iova_tree, *result); } vhost_vdpa_iotlb_batch_begin_once(v); + /* + * The unmap ioctl doesn't accept a full 64-bit. need to check it + */ + if (int128_eq(llsize, int128_2_64())) { + llsize = int128_rshift(llsize, 1); + ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova, + int128_get64(llsize)); + + if (ret) { + error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", " + "0x%" HWADDR_PRIx ") = %d (%m)", + v, iova, int128_get64(llsize), ret); + } + iova += int128_get64(llsize); + } ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova, int128_get64(llsize)); + if (ret) { - error_report("vhost_vdpa dma unmap error!"); + error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", " + "0x%" HWADDR_PRIx ") = %d (%m)", + v, iova, int128_get64(llsize), ret); } memory_region_unref(section->mr); @@ -1163,7 +1307,13 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) } if (started) { - memory_listener_register(&v->listener, &address_space_memory); + if (vhost_dev_has_iommu(dev) && (v->shadow_vqs_enabled)) { + error_report("SVQ can not work while IOMMU enable, please disable" + "IOMMU and try again"); + return -1; + } + memory_listener_register(&v->listener, dev->vdev->dma_as); + return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); } diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index a266396576..23da579ce2 100644 --- a/hw/virtio/vhost.c +++ b/hw/virtio/vhost.c @@ -107,7 +107,7 @@ static void vhost_dev_sync_region(struct vhost_dev *dev, } } -static bool vhost_dev_has_iommu(struct vhost_dev *dev) +bool vhost_dev_has_iommu(struct vhost_dev *dev) { VirtIODevice *vdev = dev->vdev; @@ -1291,18 +1291,6 @@ void vhost_virtqueue_stop(struct vhost_dev *dev, 0, virtio_queue_get_desc_size(vdev, idx)); } -static void vhost_eventfd_add(MemoryListener *listener, - MemoryRegionSection *section, - bool match_data, uint64_t data, EventNotifier *e) -{ -} - -static void vhost_eventfd_del(MemoryListener *listener, - MemoryRegionSection *section, - bool match_data, uint64_t data, EventNotifier *e) -{ -} - static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev, int n, uint32_t timeout) { @@ -1457,8 +1445,6 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, .log_sync = vhost_log_sync, .log_global_start = vhost_log_global_start, .log_global_stop = vhost_log_global_stop, - .eventfd_add = vhost_eventfd_add, - .eventfd_del = vhost_eventfd_del, .priority = 10 }; diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c index 746f07c4d2..d004cf29d2 100644 --- a/hw/virtio/virtio-balloon.c +++ b/hw/virtio/virtio-balloon.c @@ -32,6 +32,7 @@ #include "qemu/error-report.h" #include "migration/misc.h" #include "migration/migration.h" +#include "migration/options.h" #include "hw/virtio/virtio-bus.h" #include "hw/virtio/virtio-access.h" @@ -729,37 +730,14 @@ static void virtio_balloon_get_config(VirtIODevice *vdev, uint8_t *config_data) memcpy(config_data, &config, virtio_balloon_config_size(dev)); } -static int build_dimm_list(Object *obj, void *opaque) -{ - GSList **list = opaque; - - if (object_dynamic_cast(obj, TYPE_PC_DIMM)) { - DeviceState *dev = DEVICE(obj); - if (dev->realized) { /* only realized DIMMs matter */ - *list = g_slist_prepend(*list, dev); - } - } - - object_child_foreach(obj, build_dimm_list, opaque); - return 0; -} - static ram_addr_t get_current_ram_size(void) { - GSList *list = NULL, *item; - ram_addr_t size = current_machine->ram_size; - - build_dimm_list(qdev_get_machine(), &list); - for (item = list; item; item = g_slist_next(item)) { - Object *obj = OBJECT(item->data); - if (!strcmp(object_get_typename(obj), TYPE_PC_DIMM)) { - size += object_property_get_int(obj, PC_DIMM_SIZE_PROP, - &error_abort); - } + MachineState *machine = MACHINE(qdev_get_machine()); + if (machine->device_memory) { + return machine->ram_size + machine->device_memory->dimm_size; + } else { + return machine->ram_size; } - g_slist_free(list); - - return size; } static bool virtio_balloon_page_poison_support(void *opaque) @@ -908,8 +886,9 @@ static void virtio_balloon_device_realize(DeviceState *dev, Error **errp) precopy_add_notifier(&s->free_page_hint_notify); object_ref(OBJECT(s->iothread)); - s->free_page_bh = aio_bh_new(iothread_get_aio_context(s->iothread), - virtio_ballloon_get_free_page_hints, s); + s->free_page_bh = aio_bh_new_guarded(iothread_get_aio_context(s->iothread), + virtio_ballloon_get_free_page_hints, s, + &dev->mem_reentrancy_guard); } if (virtio_has_feature(s->host_features, VIRTIO_BALLOON_F_REPORTING)) { diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c index 802e1b9659..c729a1f79e 100644 --- a/hw/virtio/virtio-crypto.c +++ b/hw/virtio/virtio-crypto.c @@ -476,15 +476,17 @@ static void virtio_crypto_free_request(VirtIOCryptoReq *req) size_t max_len; CryptoDevBackendSymOpInfo *op_info = req->op_info.u.sym_op_info; - max_len = op_info->iv_len + - op_info->aad_len + - op_info->src_len + - op_info->dst_len + - op_info->digest_result_len; - - /* Zeroize and free request data structure */ - memset(op_info, 0, sizeof(*op_info) + max_len); - g_free(op_info); + if (op_info) { + max_len = op_info->iv_len + + op_info->aad_len + + op_info->src_len + + op_info->dst_len + + op_info->digest_result_len; + + /* Zeroize and free request data structure */ + memset(op_info, 0, sizeof(*op_info) + max_len); + g_free(op_info); + } } else if (req->flags == QCRYPTODEV_BACKEND_ALG_ASYM) { CryptoDevBackendAsymOpInfo *op_info = req->op_info.u.asym_op_info; if (op_info) { @@ -1074,7 +1076,8 @@ static void virtio_crypto_device_realize(DeviceState *dev, Error **errp) vcrypto->vqs[i].dataq = virtio_add_queue(vdev, 1024, virtio_crypto_handle_dataq_bh); vcrypto->vqs[i].dataq_bh = - qemu_bh_new(virtio_crypto_dataq_bh, &vcrypto->vqs[i]); + qemu_bh_new_guarded(virtio_crypto_dataq_bh, &vcrypto->vqs[i], + &dev->mem_reentrancy_guard); vcrypto->vqs[i].vcrypto = vcrypto; } diff --git a/hw/virtio/virtio-input-pci.c b/hw/virtio/virtio-input-pci.c index a9d0992389..a53edf46c4 100644 --- a/hw/virtio/virtio-input-pci.c +++ b/hw/virtio/virtio-input-pci.c @@ -25,10 +25,11 @@ struct VirtIOInputPCI { VirtIOInput vdev; }; -#define TYPE_VIRTIO_INPUT_HID_PCI "virtio-input-hid-pci" -#define TYPE_VIRTIO_KEYBOARD_PCI "virtio-keyboard-pci" -#define TYPE_VIRTIO_MOUSE_PCI "virtio-mouse-pci" -#define TYPE_VIRTIO_TABLET_PCI "virtio-tablet-pci" +#define TYPE_VIRTIO_INPUT_HID_PCI "virtio-input-hid-pci" +#define TYPE_VIRTIO_KEYBOARD_PCI "virtio-keyboard-pci" +#define TYPE_VIRTIO_MOUSE_PCI "virtio-mouse-pci" +#define TYPE_VIRTIO_TABLET_PCI "virtio-tablet-pci" +#define TYPE_VIRTIO_MULTITOUCH_PCI "virtio-multitouch-pci" OBJECT_DECLARE_SIMPLE_TYPE(VirtIOInputHIDPCI, VIRTIO_INPUT_HID_PCI) struct VirtIOInputHIDPCI { @@ -102,6 +103,14 @@ static void virtio_tablet_initfn(Object *obj) TYPE_VIRTIO_TABLET); } +static void virtio_multitouch_initfn(Object *obj) +{ + VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_MULTITOUCH); +} + static const TypeInfo virtio_input_pci_info = { .name = TYPE_VIRTIO_INPUT_PCI, .parent = TYPE_VIRTIO_PCI, @@ -140,6 +149,13 @@ static const VirtioPCIDeviceTypeInfo virtio_tablet_pci_info = { .instance_init = virtio_tablet_initfn, }; +static const VirtioPCIDeviceTypeInfo virtio_multitouch_pci_info = { + .generic_name = TYPE_VIRTIO_MULTITOUCH_PCI, + .parent = TYPE_VIRTIO_INPUT_HID_PCI, + .instance_size = sizeof(VirtIOInputHIDPCI), + .instance_init = virtio_multitouch_initfn, +}; + static void virtio_pci_input_register(void) { /* Base types: */ @@ -150,6 +166,7 @@ static void virtio_pci_input_register(void) virtio_pci_types_register(&virtio_keyboard_pci_info); virtio_pci_types_register(&virtio_mouse_pci_info); virtio_pci_types_register(&virtio_tablet_pci_info); + virtio_pci_types_register(&virtio_multitouch_pci_info); } type_init(virtio_pci_input_register) diff --git a/hw/virtio/virtio-mem-pci.c b/hw/virtio/virtio-mem-pci.c index e8c338c5d9..b85c12668d 100644 --- a/hw/virtio/virtio-mem-pci.c +++ b/hw/virtio/virtio-mem-pci.c @@ -42,7 +42,7 @@ static MemoryRegion *virtio_mem_pci_get_memory_region(MemoryDeviceState *md, Error **errp) { VirtIOMEMPCI *pci_mem = VIRTIO_MEM_PCI(md); - VirtIOMEM *vmem = VIRTIO_MEM(&pci_mem->vdev); + VirtIOMEM *vmem = &pci_mem->vdev; VirtIOMEMClass *vmc = VIRTIO_MEM_GET_CLASS(vmem); return vmc->get_memory_region(vmem, errp); @@ -60,7 +60,7 @@ static void virtio_mem_pci_fill_device_info(const MemoryDeviceState *md, { VirtioMEMDeviceInfo *vi = g_new0(VirtioMEMDeviceInfo, 1); VirtIOMEMPCI *pci_mem = VIRTIO_MEM_PCI(md); - VirtIOMEM *vmem = VIRTIO_MEM(&pci_mem->vdev); + VirtIOMEM *vmem = &pci_mem->vdev; VirtIOMEMClass *vpc = VIRTIO_MEM_GET_CLASS(vmem); DeviceState *dev = DEVICE(md); @@ -123,7 +123,7 @@ static void virtio_mem_pci_instance_init(Object *obj) TYPE_VIRTIO_MEM); dev->size_change_notifier.notify = virtio_mem_pci_size_change_notify; - vmem = VIRTIO_MEM(&dev->vdev); + vmem = &dev->vdev; vmc = VIRTIO_MEM_GET_CLASS(vmem); /* * We never remove the notifier again, as we expect both devices to diff --git a/hw/virtio/virtio-mem.c b/hw/virtio/virtio-mem.c index 957fe77dc0..538b695c29 100644 --- a/hw/virtio/virtio-mem.c +++ b/hw/virtio/virtio-mem.c @@ -1341,7 +1341,7 @@ static Property virtio_mem_properties[] = { TYPE_MEMORY_BACKEND, HostMemoryBackend *), #if defined(VIRTIO_MEM_HAS_LEGACY_GUESTS) DEFINE_PROP_ON_OFF_AUTO(VIRTIO_MEM_UNPLUGGED_INACCESSIBLE_PROP, VirtIOMEM, - unplugged_inaccessible, ON_OFF_AUTO_AUTO), + unplugged_inaccessible, ON_OFF_AUTO_ON), #endif DEFINE_PROP_BOOL(VIRTIO_MEM_EARLY_MIGRATION_PROP, VirtIOMEM, early_migration, true), diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c index 23ba625eb6..c2c6d85475 100644 --- a/hw/virtio/virtio-mmio.c +++ b/hw/virtio/virtio-mmio.c @@ -354,6 +354,7 @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value, if (proxy->legacy) { virtio_queue_update_rings(vdev, vdev->queue_sel); } else { + virtio_init_region_cache(vdev, vdev->queue_sel); proxy->vqs[vdev->queue_sel].num = value; } break; diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index 247325c193..edbc0daa18 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -716,6 +716,38 @@ virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr, } } +static void virtio_pci_ats_ctrl_trigger(PCIDevice *pci_dev, bool enable) +{ + VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + + vdev->device_iotlb_enabled = enable; + + if (k->toggle_device_iotlb) { + k->toggle_device_iotlb(vdev); + } +} + +static void pcie_ats_config_write(PCIDevice *dev, uint32_t address, + uint32_t val, int len) +{ + uint32_t off; + uint16_t ats_cap = dev->exp.ats_cap; + + if (!ats_cap || address < ats_cap) { + return; + } + off = address - ats_cap; + if (off >= PCI_EXT_CAP_ATS_SIZEOF) { + return; + } + + if (range_covers_byte(off, len, PCI_ATS_CTRL + 1)) { + virtio_pci_ats_ctrl_trigger(dev, !!(val & PCI_ATS_CTRL_ENABLE)); + } +} + static void virtio_write_config(PCIDevice *pci_dev, uint32_t address, uint32_t val, int len) { @@ -729,6 +761,10 @@ static void virtio_write_config(PCIDevice *pci_dev, uint32_t address, pcie_cap_flr_write_config(pci_dev, address, val, len); } + if (proxy->flags & VIRTIO_PCI_FLAG_ATS) { + pcie_ats_config_write(pci_dev, address, val, len); + } + if (range_covers_byte(address, len, PCI_COMMAND)) { if (!(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { virtio_set_disabled(vdev, true); @@ -1554,6 +1590,7 @@ static void virtio_pci_common_write(void *opaque, hwaddr addr, proxy->vqs[vdev->queue_sel].num = val; virtio_queue_set_num(vdev, vdev->queue_sel, proxy->vqs[vdev->queue_sel].num); + virtio_init_region_cache(vdev, vdev->queue_sel); break; case VIRTIO_PCI_COMMON_Q_MSIX: vector = virtio_queue_vector(vdev, vdev->queue_sel); diff --git a/hw/virtio/virtio-pmem-pci.c b/hw/virtio/virtio-pmem-pci.c index 1b89ade9d1..197d219204 100644 --- a/hw/virtio/virtio-pmem-pci.c +++ b/hw/virtio/virtio-pmem-pci.c @@ -42,7 +42,7 @@ static MemoryRegion *virtio_pmem_pci_get_memory_region(MemoryDeviceState *md, Error **errp) { VirtIOPMEMPCI *pci_pmem = VIRTIO_PMEM_PCI(md); - VirtIOPMEM *pmem = VIRTIO_PMEM(&pci_pmem->vdev); + VirtIOPMEM *pmem = &pci_pmem->vdev; VirtIOPMEMClass *vpc = VIRTIO_PMEM_GET_CLASS(pmem); return vpc->get_memory_region(pmem, errp); @@ -52,7 +52,7 @@ static uint64_t virtio_pmem_pci_get_plugged_size(const MemoryDeviceState *md, Error **errp) { VirtIOPMEMPCI *pci_pmem = VIRTIO_PMEM_PCI(md); - VirtIOPMEM *pmem = VIRTIO_PMEM(&pci_pmem->vdev); + VirtIOPMEM *pmem = &pci_pmem->vdev; VirtIOPMEMClass *vpc = VIRTIO_PMEM_GET_CLASS(pmem); MemoryRegion *mr = vpc->get_memory_region(pmem, errp); @@ -65,7 +65,7 @@ static void virtio_pmem_pci_fill_device_info(const MemoryDeviceState *md, { VirtioPMEMDeviceInfo *vi = g_new0(VirtioPMEMDeviceInfo, 1); VirtIOPMEMPCI *pci_pmem = VIRTIO_PMEM_PCI(md); - VirtIOPMEM *pmem = VIRTIO_PMEM(&pci_pmem->vdev); + VirtIOPMEM *pmem = &pci_pmem->vdev; VirtIOPMEMClass *vpc = VIRTIO_PMEM_GET_CLASS(pmem); DeviceState *dev = DEVICE(md); diff --git a/hw/virtio/virtio-pmem.c b/hw/virtio/virtio-pmem.c index dff402f08f..c3512c2dae 100644 --- a/hw/virtio/virtio-pmem.c +++ b/hw/virtio/virtio-pmem.c @@ -70,7 +70,6 @@ static void virtio_pmem_flush(VirtIODevice *vdev, VirtQueue *vq) VirtIODeviceRequest *req_data; VirtIOPMEM *pmem = VIRTIO_PMEM(vdev); HostMemoryBackend *backend = MEMORY_BACKEND(pmem->memdev); - ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context()); trace_virtio_pmem_flush_request(); req_data = virtqueue_pop(vq, sizeof(VirtIODeviceRequest)); @@ -88,7 +87,7 @@ static void virtio_pmem_flush(VirtIODevice *vdev, VirtQueue *vq) req_data->fd = memory_region_get_fd(&backend->mr); req_data->pmem = pmem; req_data->vdev = vdev; - thread_pool_submit_aio(pool, worker_cb, req_data, done_cb, req_data); + thread_pool_submit_aio(worker_cb, req_data, done_cb, req_data); } static void virtio_pmem_get_config(VirtIODevice *vdev, uint8_t *config) diff --git a/hw/virtio/virtio-qmp.c b/hw/virtio/virtio-qmp.c index b70148aba9..3528fc628d 100644 --- a/hw/virtio/virtio-qmp.c +++ b/hw/virtio/virtio-qmp.c @@ -176,6 +176,8 @@ static const qmp_virtio_feature_map_t virtio_blk_feature_map[] = { "VIRTIO_BLK_F_DISCARD: Discard command supported"), FEATURE_ENTRY(VIRTIO_BLK_F_WRITE_ZEROES, \ "VIRTIO_BLK_F_WRITE_ZEROES: Write zeroes command supported"), + FEATURE_ENTRY(VIRTIO_BLK_F_ZONED, \ + "VIRTIO_BLK_F_ZONED: Zoned block devices"), #ifndef VIRTIO_BLK_NO_LEGACY FEATURE_ENTRY(VIRTIO_BLK_F_BARRIER, \ "VIRTIO_BLK_F_BARRIER: Request barriers supported"), @@ -329,7 +331,7 @@ static const qmp_virtio_feature_map_t virtio_net_feature_map[] = { static const qmp_virtio_feature_map_t virtio_scsi_feature_map[] = { FEATURE_ENTRY(VIRTIO_SCSI_F_INOUT, \ "VIRTIO_SCSI_F_INOUT: Requests including read and writable data " - "buffers suppoted"), + "buffers supported"), FEATURE_ENTRY(VIRTIO_SCSI_F_HOTPLUG, \ "VIRTIO_SCSI_F_HOTPLUG: Reporting and handling hot-plug events " "supported"), @@ -666,7 +668,7 @@ VirtioDeviceFeatures *qmp_decode_features(uint16_t device_id, uint64_t bitmap) VirtioInfoList *qmp_x_query_virtio(Error **errp) { VirtioInfoList *list = NULL; - VirtioInfoList *node; + VirtioInfo *node; VirtIODevice *vdev; QTAILQ_FOREACH(vdev, &virtio_list, next) { @@ -680,11 +682,10 @@ VirtioInfoList *qmp_x_query_virtio(Error **errp) if (!strncmp(is_realized->str, "false", 4)) { QTAILQ_REMOVE(&virtio_list, vdev, next); } else { - node = g_new0(VirtioInfoList, 1); - node->value = g_new(VirtioInfo, 1); - node->value->path = g_strdup(dev->canonical_path); - node->value->name = g_strdup(vdev->name); - QAPI_LIST_PREPEND(list, node->value); + node = g_new(VirtioInfo, 1); + node->path = g_strdup(dev->canonical_path); + node->name = g_strdup(vdev->name); + QAPI_LIST_PREPEND(list, node); } g_string_free(is_realized, true); } diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index 98c4819fcc..295a603e58 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -226,7 +226,7 @@ static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq) } } -static void virtio_init_region_cache(VirtIODevice *vdev, int n) +void virtio_init_region_cache(VirtIODevice *vdev, int n) { VirtQueue *vq = &vdev->vq[n]; VRingMemoryRegionCaches *old = vq->vring.caches; @@ -3491,7 +3491,7 @@ static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n) void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx) { - aio_set_event_notifier(ctx, &vq->host_notifier, true, + aio_set_event_notifier(ctx, &vq->host_notifier, virtio_queue_host_notifier_read, virtio_queue_host_notifier_aio_poll, virtio_queue_host_notifier_aio_poll_ready); @@ -3508,17 +3508,14 @@ void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx) */ void virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue *vq, AioContext *ctx) { - aio_set_event_notifier(ctx, &vq->host_notifier, true, + aio_set_event_notifier(ctx, &vq->host_notifier, virtio_queue_host_notifier_read, NULL, NULL); } void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx) { - aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL, NULL); - /* Test and clear notifier before after disabling event, - * in case poll callback didn't have time to run. */ - virtio_queue_host_notifier_read(&vq->host_notifier); + aio_set_event_notifier(ctx, &vq->host_notifier, NULL, NULL, NULL); } void virtio_queue_host_notifier_read(EventNotifier *n) diff --git a/hw/watchdog/meson.build b/hw/watchdog/meson.build index 5dcd4fbe2f..15370565bd 100644 --- a/hw/watchdog/meson.build +++ b/hw/watchdog/meson.build @@ -1,10 +1,10 @@ -softmmu_ss.add(files('watchdog.c')) -softmmu_ss.add(when: 'CONFIG_ALLWINNER_WDT', if_true: files('allwinner-wdt.c')) -softmmu_ss.add(when: 'CONFIG_CMSDK_APB_WATCHDOG', if_true: files('cmsdk-apb-watchdog.c')) -softmmu_ss.add(when: 'CONFIG_WDT_IB6300ESB', if_true: files('wdt_i6300esb.c')) -softmmu_ss.add(when: 'CONFIG_WDT_IB700', if_true: files('wdt_ib700.c')) -softmmu_ss.add(when: 'CONFIG_WDT_DIAG288', if_true: files('wdt_diag288.c')) -softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('wdt_aspeed.c')) -softmmu_ss.add(when: 'CONFIG_WDT_IMX2', if_true: files('wdt_imx2.c')) -softmmu_ss.add(when: 'CONFIG_WDT_SBSA', if_true: files('sbsa_gwdt.c')) +system_ss.add(files('watchdog.c')) +system_ss.add(when: 'CONFIG_ALLWINNER_WDT', if_true: files('allwinner-wdt.c')) +system_ss.add(when: 'CONFIG_CMSDK_APB_WATCHDOG', if_true: files('cmsdk-apb-watchdog.c')) +system_ss.add(when: 'CONFIG_WDT_IB6300ESB', if_true: files('wdt_i6300esb.c')) +system_ss.add(when: 'CONFIG_WDT_IB700', if_true: files('wdt_ib700.c')) +system_ss.add(when: 'CONFIG_WDT_DIAG288', if_true: files('wdt_diag288.c')) +system_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('wdt_aspeed.c')) +system_ss.add(when: 'CONFIG_WDT_IMX2', if_true: files('wdt_imx2.c')) +system_ss.add(when: 'CONFIG_WDT_SBSA', if_true: files('sbsa_gwdt.c')) specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr_watchdog.c')) diff --git a/hw/xen/meson.build b/hw/xen/meson.build index 19c6aabc7c..277f9f292b 100644 --- a/hw/xen/meson.build +++ b/hw/xen/meson.build @@ -1,4 +1,4 @@ -softmmu_ss.add(when: ['CONFIG_XEN_BUS'], if_true: files( +system_ss.add(when: ['CONFIG_XEN_BUS'], if_true: files( 'xen-backend.c', 'xen-bus-helper.c', 'xen-bus.c', @@ -7,7 +7,7 @@ softmmu_ss.add(when: ['CONFIG_XEN_BUS'], if_true: files( 'xen_pvdev.c', )) -softmmu_ss.add(when: ['CONFIG_XEN', xen], if_true: files( +system_ss.add(when: ['CONFIG_XEN', xen], if_true: files( 'xen-operations.c', )) @@ -26,3 +26,10 @@ else endif specific_ss.add_all(when: ['CONFIG_XEN', xen], if_true: xen_specific_ss) + +xen_ss = ss.source_set() + +xen_ss.add(when: 'CONFIG_XEN', if_true: files( + 'xen-mapcache.c', + 'xen-hvm-common.c', +)) diff --git a/hw/xen/trace-events b/hw/xen/trace-events index 55c9e1df68..67a6c41926 100644 --- a/hw/xen/trace-events +++ b/hw/xen/trace-events @@ -41,3 +41,22 @@ xs_node_vprintf(char *path, char *value) "%s %s" xs_node_vscanf(char *path, char *value) "%s %s" xs_node_watch(char *path) "%s" xs_node_unwatch(char *path) "%s" + +# xen-hvm.c +xen_ram_alloc(unsigned long ram_addr, unsigned long size) "requested: 0x%lx, size 0x%lx" +xen_client_set_memory(uint64_t start_addr, unsigned long size, bool log_dirty) "0x%"PRIx64" size 0x%lx, log_dirty %i" +handle_ioreq(void *req, uint32_t type, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p type=%d dir=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d" +handle_ioreq_read(void *req, uint32_t type, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p read type=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d" +handle_ioreq_write(void *req, uint32_t type, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p write type=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d" +cpu_ioreq_pio(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p pio dir=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d" +cpu_ioreq_pio_read_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio read reg data=0x%"PRIx64" port=0x%"PRIx64" size=%d" +cpu_ioreq_pio_write_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio write reg data=0x%"PRIx64" port=0x%"PRIx64" size=%d" +cpu_ioreq_move(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p copy dir=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d" +xen_map_resource_ioreq(uint32_t id, void *addr) "id: %u addr: %p" +cpu_ioreq_config_read(void *req, uint32_t sbdf, uint32_t reg, uint32_t size, uint32_t data) "I/O=%p sbdf=0x%x reg=%u size=%u data=0x%x" +cpu_ioreq_config_write(void *req, uint32_t sbdf, uint32_t reg, uint32_t size, uint32_t data) "I/O=%p sbdf=0x%x reg=%u size=%u data=0x%x" + +# xen-mapcache.c +xen_map_cache(uint64_t phys_addr) "want 0x%"PRIx64 +xen_remap_bucket(uint64_t index) "index 0x%"PRIx64 +xen_map_cache_return(void* ptr) "%p" diff --git a/hw/xen/xen-bus.c b/hw/xen/xen-bus.c index c59850b1de..ece8ec40cd 100644 --- a/hw/xen/xen-bus.c +++ b/hw/xen/xen-bus.c @@ -842,12 +842,15 @@ void xen_device_set_event_channel_context(XenDevice *xendev, } if (channel->ctx) - aio_set_fd_handler(channel->ctx, qemu_xen_evtchn_fd(channel->xeh), true, + aio_set_fd_handler(channel->ctx, qemu_xen_evtchn_fd(channel->xeh), NULL, NULL, NULL, NULL, NULL); channel->ctx = ctx; - aio_set_fd_handler(channel->ctx, qemu_xen_evtchn_fd(channel->xeh), true, - xen_device_event, NULL, xen_device_poll, NULL, channel); + if (ctx) { + aio_set_fd_handler(channel->ctx, qemu_xen_evtchn_fd(channel->xeh), + xen_device_event, NULL, xen_device_poll, NULL, + channel); + } } XenEventChannel *xen_device_bind_event_channel(XenDevice *xendev, @@ -920,8 +923,10 @@ void xen_device_unbind_event_channel(XenDevice *xendev, QLIST_REMOVE(channel, list); - aio_set_fd_handler(channel->ctx, qemu_xen_evtchn_fd(channel->xeh), true, - NULL, NULL, NULL, NULL, NULL); + if (channel->ctx) { + aio_set_fd_handler(channel->ctx, qemu_xen_evtchn_fd(channel->xeh), + NULL, NULL, NULL, NULL, NULL); + } if (qemu_xen_evtchn_unbind(channel->xeh, channel->local_port) < 0) { error_setg_errno(errp, errno, "xenevtchn_unbind failed"); diff --git a/hw/xen/xen-hvm-common.c b/hw/xen/xen-hvm-common.c new file mode 100644 index 0000000000..42339c96bd --- /dev/null +++ b/hw/xen/xen-hvm-common.c @@ -0,0 +1,879 @@ +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "trace.h" + +#include "hw/pci/pci_host.h" +#include "hw/xen/xen-hvm-common.h" +#include "hw/xen/xen-bus.h" +#include "hw/boards.h" +#include "hw/xen/arch_hvm.h" + +MemoryRegion ram_memory; + +void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr, + Error **errp) +{ + unsigned long nr_pfn; + xen_pfn_t *pfn_list; + int i; + + if (runstate_check(RUN_STATE_INMIGRATE)) { + /* RAM already populated in Xen */ + fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT + " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n", + __func__, size, ram_addr); + return; + } + + if (mr == &ram_memory) { + return; + } + + trace_xen_ram_alloc(ram_addr, size); + + nr_pfn = size >> TARGET_PAGE_BITS; + pfn_list = g_new(xen_pfn_t, nr_pfn); + + for (i = 0; i < nr_pfn; i++) { + pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i; + } + + if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) { + error_setg(errp, "xen: failed to populate ram at " RAM_ADDR_FMT, + ram_addr); + } + + g_free(pfn_list); +} + +static void xen_set_memory(struct MemoryListener *listener, + MemoryRegionSection *section, + bool add) +{ + XenIOState *state = container_of(listener, XenIOState, memory_listener); + + if (section->mr == &ram_memory) { + return; + } else { + if (add) { + xen_map_memory_section(xen_domid, state->ioservid, + section); + } else { + xen_unmap_memory_section(xen_domid, state->ioservid, + section); + } + } + + arch_xen_set_memory(state, section, add); +} + +void xen_region_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + memory_region_ref(section->mr); + xen_set_memory(listener, section, true); +} + +void xen_region_del(MemoryListener *listener, + MemoryRegionSection *section) +{ + xen_set_memory(listener, section, false); + memory_region_unref(section->mr); +} + +void xen_io_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + XenIOState *state = container_of(listener, XenIOState, io_listener); + MemoryRegion *mr = section->mr; + + if (mr->ops == &unassigned_io_ops) { + return; + } + + memory_region_ref(mr); + + xen_map_io_section(xen_domid, state->ioservid, section); +} + +void xen_io_del(MemoryListener *listener, + MemoryRegionSection *section) +{ + XenIOState *state = container_of(listener, XenIOState, io_listener); + MemoryRegion *mr = section->mr; + + if (mr->ops == &unassigned_io_ops) { + return; + } + + xen_unmap_io_section(xen_domid, state->ioservid, section); + + memory_region_unref(mr); +} + +void xen_device_realize(DeviceListener *listener, + DeviceState *dev) +{ + XenIOState *state = container_of(listener, XenIOState, device_listener); + + if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + PCIDevice *pci_dev = PCI_DEVICE(dev); + XenPciDevice *xendev = g_new(XenPciDevice, 1); + + xendev->pci_dev = pci_dev; + xendev->sbdf = PCI_BUILD_BDF(pci_dev_bus_num(pci_dev), + pci_dev->devfn); + QLIST_INSERT_HEAD(&state->dev_list, xendev, entry); + + xen_map_pcidev(xen_domid, state->ioservid, pci_dev); + } +} + +void xen_device_unrealize(DeviceListener *listener, + DeviceState *dev) +{ + XenIOState *state = container_of(listener, XenIOState, device_listener); + + if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + PCIDevice *pci_dev = PCI_DEVICE(dev); + XenPciDevice *xendev, *next; + + xen_unmap_pcidev(xen_domid, state->ioservid, pci_dev); + + QLIST_FOREACH_SAFE(xendev, &state->dev_list, entry, next) { + if (xendev->pci_dev == pci_dev) { + QLIST_REMOVE(xendev, entry); + g_free(xendev); + break; + } + } + } +} + +MemoryListener xen_io_listener = { + .name = "xen-io", + .region_add = xen_io_add, + .region_del = xen_io_del, + .priority = 10, +}; + +DeviceListener xen_device_listener = { + .realize = xen_device_realize, + .unrealize = xen_device_unrealize, +}; + +/* get the ioreq packets from share mem */ +static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu) +{ + ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu); + + if (req->state != STATE_IOREQ_READY) { + DPRINTF("I/O request not ready: " + "%x, ptr: %x, port: %"PRIx64", " + "data: %"PRIx64", count: %u, size: %u\n", + req->state, req->data_is_ptr, req->addr, + req->data, req->count, req->size); + return NULL; + } + + xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */ + + req->state = STATE_IOREQ_INPROCESS; + return req; +} + +/* use poll to get the port notification */ +/* ioreq_vec--out,the */ +/* retval--the number of ioreq packet */ +static ioreq_t *cpu_get_ioreq(XenIOState *state) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + unsigned int max_cpus = ms->smp.max_cpus; + int i; + evtchn_port_t port; + + port = qemu_xen_evtchn_pending(state->xce_handle); + if (port == state->bufioreq_local_port) { + timer_mod(state->buffered_io_timer, + BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); + return NULL; + } + + if (port != -1) { + for (i = 0; i < max_cpus; i++) { + if (state->ioreq_local_port[i] == port) { + break; + } + } + + if (i == max_cpus) { + hw_error("Fatal error while trying to get io event!\n"); + } + + /* unmask the wanted port again */ + qemu_xen_evtchn_unmask(state->xce_handle, port); + + /* get the io packet from shared memory */ + state->send_vcpu = i; + return cpu_get_ioreq_from_shared_memory(state, i); + } + + /* read error or read nothing */ + return NULL; +} + +static uint32_t do_inp(uint32_t addr, unsigned long size) +{ + switch (size) { + case 1: + return cpu_inb(addr); + case 2: + return cpu_inw(addr); + case 4: + return cpu_inl(addr); + default: + hw_error("inp: bad size: %04x %lx", addr, size); + } +} + +static void do_outp(uint32_t addr, + unsigned long size, uint32_t val) +{ + switch (size) { + case 1: + return cpu_outb(addr, val); + case 2: + return cpu_outw(addr, val); + case 4: + return cpu_outl(addr, val); + default: + hw_error("outp: bad size: %04x %lx", addr, size); + } +} + +/* + * Helper functions which read/write an object from/to physical guest + * memory, as part of the implementation of an ioreq. + * + * Equivalent to + * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i, + * val, req->size, 0/1) + * except without the integer overflow problems. + */ +static void rw_phys_req_item(hwaddr addr, + ioreq_t *req, uint32_t i, void *val, int rw) +{ + /* Do everything unsigned so overflow just results in a truncated result + * and accesses to undesired parts of guest memory, which is up + * to the guest */ + hwaddr offset = (hwaddr)req->size * i; + if (req->df) { + addr -= offset; + } else { + addr += offset; + } + cpu_physical_memory_rw(addr, val, req->size, rw); +} + +static inline void read_phys_req_item(hwaddr addr, + ioreq_t *req, uint32_t i, void *val) +{ + rw_phys_req_item(addr, req, i, val, 0); +} +static inline void write_phys_req_item(hwaddr addr, + ioreq_t *req, uint32_t i, void *val) +{ + rw_phys_req_item(addr, req, i, val, 1); +} + + +void cpu_ioreq_pio(ioreq_t *req) +{ + uint32_t i; + + trace_cpu_ioreq_pio(req, req->dir, req->df, req->data_is_ptr, req->addr, + req->data, req->count, req->size); + + if (req->size > sizeof(uint32_t)) { + hw_error("PIO: bad size (%u)", req->size); + } + + if (req->dir == IOREQ_READ) { + if (!req->data_is_ptr) { + req->data = do_inp(req->addr, req->size); + trace_cpu_ioreq_pio_read_reg(req, req->data, req->addr, + req->size); + } else { + uint32_t tmp; + + for (i = 0; i < req->count; i++) { + tmp = do_inp(req->addr, req->size); + write_phys_req_item(req->data, req, i, &tmp); + } + } + } else if (req->dir == IOREQ_WRITE) { + if (!req->data_is_ptr) { + trace_cpu_ioreq_pio_write_reg(req, req->data, req->addr, + req->size); + do_outp(req->addr, req->size, req->data); + } else { + for (i = 0; i < req->count; i++) { + uint32_t tmp = 0; + + read_phys_req_item(req->data, req, i, &tmp); + do_outp(req->addr, req->size, tmp); + } + } + } +} + +static void cpu_ioreq_move(ioreq_t *req) +{ + uint32_t i; + + trace_cpu_ioreq_move(req, req->dir, req->df, req->data_is_ptr, req->addr, + req->data, req->count, req->size); + + if (req->size > sizeof(req->data)) { + hw_error("MMIO: bad size (%u)", req->size); + } + + if (!req->data_is_ptr) { + if (req->dir == IOREQ_READ) { + for (i = 0; i < req->count; i++) { + read_phys_req_item(req->addr, req, i, &req->data); + } + } else if (req->dir == IOREQ_WRITE) { + for (i = 0; i < req->count; i++) { + write_phys_req_item(req->addr, req, i, &req->data); + } + } + } else { + uint64_t tmp; + + if (req->dir == IOREQ_READ) { + for (i = 0; i < req->count; i++) { + read_phys_req_item(req->addr, req, i, &tmp); + write_phys_req_item(req->data, req, i, &tmp); + } + } else if (req->dir == IOREQ_WRITE) { + for (i = 0; i < req->count; i++) { + read_phys_req_item(req->data, req, i, &tmp); + write_phys_req_item(req->addr, req, i, &tmp); + } + } + } +} + +static void cpu_ioreq_config(XenIOState *state, ioreq_t *req) +{ + uint32_t sbdf = req->addr >> 32; + uint32_t reg = req->addr; + XenPciDevice *xendev; + + if (req->size != sizeof(uint8_t) && req->size != sizeof(uint16_t) && + req->size != sizeof(uint32_t)) { + hw_error("PCI config access: bad size (%u)", req->size); + } + + if (req->count != 1) { + hw_error("PCI config access: bad count (%u)", req->count); + } + + QLIST_FOREACH(xendev, &state->dev_list, entry) { + if (xendev->sbdf != sbdf) { + continue; + } + + if (!req->data_is_ptr) { + if (req->dir == IOREQ_READ) { + req->data = pci_host_config_read_common( + xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, + req->size); + trace_cpu_ioreq_config_read(req, xendev->sbdf, reg, + req->size, req->data); + } else if (req->dir == IOREQ_WRITE) { + trace_cpu_ioreq_config_write(req, xendev->sbdf, reg, + req->size, req->data); + pci_host_config_write_common( + xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, + req->data, req->size); + } + } else { + uint32_t tmp; + + if (req->dir == IOREQ_READ) { + tmp = pci_host_config_read_common( + xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, + req->size); + trace_cpu_ioreq_config_read(req, xendev->sbdf, reg, + req->size, tmp); + write_phys_req_item(req->data, req, 0, &tmp); + } else if (req->dir == IOREQ_WRITE) { + read_phys_req_item(req->data, req, 0, &tmp); + trace_cpu_ioreq_config_write(req, xendev->sbdf, reg, + req->size, tmp); + pci_host_config_write_common( + xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, + tmp, req->size); + } + } + } +} + +static void handle_ioreq(XenIOState *state, ioreq_t *req) +{ + trace_handle_ioreq(req, req->type, req->dir, req->df, req->data_is_ptr, + req->addr, req->data, req->count, req->size); + + if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) && + (req->size < sizeof (target_ulong))) { + req->data &= ((target_ulong) 1 << (8 * req->size)) - 1; + } + + if (req->dir == IOREQ_WRITE) + trace_handle_ioreq_write(req, req->type, req->df, req->data_is_ptr, + req->addr, req->data, req->count, req->size); + + switch (req->type) { + case IOREQ_TYPE_PIO: + cpu_ioreq_pio(req); + break; + case IOREQ_TYPE_COPY: + cpu_ioreq_move(req); + break; + case IOREQ_TYPE_TIMEOFFSET: + break; + case IOREQ_TYPE_INVALIDATE: + xen_invalidate_map_cache(); + break; + case IOREQ_TYPE_PCI_CONFIG: + cpu_ioreq_config(state, req); + break; + default: + arch_handle_ioreq(state, req); + } + if (req->dir == IOREQ_READ) { + trace_handle_ioreq_read(req, req->type, req->df, req->data_is_ptr, + req->addr, req->data, req->count, req->size); + } +} + +static bool handle_buffered_iopage(XenIOState *state) +{ + buffered_iopage_t *buf_page = state->buffered_io_page; + buf_ioreq_t *buf_req = NULL; + bool handled_ioreq = false; + ioreq_t req; + int qw; + + if (!buf_page) { + return 0; + } + + memset(&req, 0x00, sizeof(req)); + req.state = STATE_IOREQ_READY; + req.count = 1; + req.dir = IOREQ_WRITE; + + for (;;) { + uint32_t rdptr = buf_page->read_pointer, wrptr; + + xen_rmb(); + wrptr = buf_page->write_pointer; + xen_rmb(); + if (rdptr != buf_page->read_pointer) { + continue; + } + if (rdptr == wrptr) { + break; + } + buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM]; + req.size = 1U << buf_req->size; + req.addr = buf_req->addr; + req.data = buf_req->data; + req.type = buf_req->type; + xen_rmb(); + qw = (req.size == 8); + if (qw) { + if (rdptr + 1 == wrptr) { + hw_error("Incomplete quad word buffered ioreq"); + } + buf_req = &buf_page->buf_ioreq[(rdptr + 1) % + IOREQ_BUFFER_SLOT_NUM]; + req.data |= ((uint64_t)buf_req->data) << 32; + xen_rmb(); + } + + handle_ioreq(state, &req); + + /* Only req.data may get updated by handle_ioreq(), albeit even that + * should not happen as such data would never make it to the guest (we + * can only usefully see writes here after all). + */ + assert(req.state == STATE_IOREQ_READY); + assert(req.count == 1); + assert(req.dir == IOREQ_WRITE); + assert(!req.data_is_ptr); + + qatomic_add(&buf_page->read_pointer, qw + 1); + handled_ioreq = true; + } + + return handled_ioreq; +} + +static void handle_buffered_io(void *opaque) +{ + XenIOState *state = opaque; + + if (handle_buffered_iopage(state)) { + timer_mod(state->buffered_io_timer, + BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); + } else { + timer_del(state->buffered_io_timer); + qemu_xen_evtchn_unmask(state->xce_handle, state->bufioreq_local_port); + } +} + +static void cpu_handle_ioreq(void *opaque) +{ + XenIOState *state = opaque; + ioreq_t *req = cpu_get_ioreq(state); + + handle_buffered_iopage(state); + if (req) { + ioreq_t copy = *req; + + xen_rmb(); + handle_ioreq(state, ©); + req->data = copy.data; + + if (req->state != STATE_IOREQ_INPROCESS) { + fprintf(stderr, "Badness in I/O request ... not in service?!: " + "%x, ptr: %x, port: %"PRIx64", " + "data: %"PRIx64", count: %u, size: %u, type: %u\n", + req->state, req->data_is_ptr, req->addr, + req->data, req->count, req->size, req->type); + destroy_hvm_domain(false); + return; + } + + xen_wmb(); /* Update ioreq contents /then/ update state. */ + + /* + * We do this before we send the response so that the tools + * have the opportunity to pick up on the reset before the + * guest resumes and does a hlt with interrupts disabled which + * causes Xen to powerdown the domain. + */ + if (runstate_is_running()) { + ShutdownCause request; + + if (qemu_shutdown_requested_get()) { + destroy_hvm_domain(false); + } + request = qemu_reset_requested_get(); + if (request) { + qemu_system_reset(request); + destroy_hvm_domain(true); + } + } + + req->state = STATE_IORESP_READY; + qemu_xen_evtchn_notify(state->xce_handle, + state->ioreq_local_port[state->send_vcpu]); + } +} + +static void xen_main_loop_prepare(XenIOState *state) +{ + int evtchn_fd = -1; + + if (state->xce_handle != NULL) { + evtchn_fd = qemu_xen_evtchn_fd(state->xce_handle); + } + + state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io, + state); + + if (evtchn_fd != -1) { + CPUState *cpu_state; + + DPRINTF("%s: Init cpu_by_vcpu_id\n", __func__); + CPU_FOREACH(cpu_state) { + DPRINTF("%s: cpu_by_vcpu_id[%d]=%p\n", + __func__, cpu_state->cpu_index, cpu_state); + state->cpu_by_vcpu_id[cpu_state->cpu_index] = cpu_state; + } + qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state); + } +} + + +void xen_hvm_change_state_handler(void *opaque, bool running, + RunState rstate) +{ + XenIOState *state = opaque; + + if (running) { + xen_main_loop_prepare(state); + } + + xen_set_ioreq_server_state(xen_domid, + state->ioservid, + (rstate == RUN_STATE_RUNNING)); +} + +void xen_exit_notifier(Notifier *n, void *data) +{ + XenIOState *state = container_of(n, XenIOState, exit); + + xen_destroy_ioreq_server(xen_domid, state->ioservid); + if (state->fres != NULL) { + xenforeignmemory_unmap_resource(xen_fmem, state->fres); + } + + qemu_xen_evtchn_close(state->xce_handle); + xs_daemon_close(state->xenstore); +} + +static int xen_map_ioreq_server(XenIOState *state) +{ + void *addr = NULL; + xen_pfn_t ioreq_pfn; + xen_pfn_t bufioreq_pfn; + evtchn_port_t bufioreq_evtchn; + int rc; + + /* + * Attempt to map using the resource API and fall back to normal + * foreign mapping if this is not supported. + */ + QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_bufioreq != 0); + QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_ioreq(0) != 1); + state->fres = xenforeignmemory_map_resource(xen_fmem, xen_domid, + XENMEM_resource_ioreq_server, + state->ioservid, 0, 2, + &addr, + PROT_READ | PROT_WRITE, 0); + if (state->fres != NULL) { + trace_xen_map_resource_ioreq(state->ioservid, addr); + state->buffered_io_page = addr; + state->shared_page = addr + XC_PAGE_SIZE; + } else if (errno != EOPNOTSUPP) { + error_report("failed to map ioreq server resources: error %d handle=%p", + errno, xen_xc); + return -1; + } + + rc = xen_get_ioreq_server_info(xen_domid, state->ioservid, + (state->shared_page == NULL) ? + &ioreq_pfn : NULL, + (state->buffered_io_page == NULL) ? + &bufioreq_pfn : NULL, + &bufioreq_evtchn); + if (rc < 0) { + error_report("failed to get ioreq server info: error %d handle=%p", + errno, xen_xc); + return rc; + } + + if (state->shared_page == NULL) { + DPRINTF("shared page at pfn %lx\n", ioreq_pfn); + + state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid, + PROT_READ | PROT_WRITE, + 1, &ioreq_pfn, NULL); + if (state->shared_page == NULL) { + error_report("map shared IO page returned error %d handle=%p", + errno, xen_xc); + } + } + + if (state->buffered_io_page == NULL) { + DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn); + + state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid, + PROT_READ | PROT_WRITE, + 1, &bufioreq_pfn, + NULL); + if (state->buffered_io_page == NULL) { + error_report("map buffered IO page returned error %d", errno); + return -1; + } + } + + if (state->shared_page == NULL || state->buffered_io_page == NULL) { + return -1; + } + + DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn); + + state->bufioreq_remote_port = bufioreq_evtchn; + + return 0; +} + +void destroy_hvm_domain(bool reboot) +{ + xc_interface *xc_handle; + int sts; + int rc; + + unsigned int reason = reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff; + + if (xen_dmod) { + rc = xendevicemodel_shutdown(xen_dmod, xen_domid, reason); + if (!rc) { + return; + } + if (errno != ENOTTY /* old Xen */) { + error_report("xendevicemodel_shutdown failed with error %d", errno); + } + /* well, try the old thing then */ + } + + xc_handle = xc_interface_open(0, 0, 0); + if (xc_handle == NULL) { + fprintf(stderr, "Cannot acquire xenctrl handle\n"); + } else { + sts = xc_domain_shutdown(xc_handle, xen_domid, reason); + if (sts != 0) { + fprintf(stderr, "xc_domain_shutdown failed to issue %s, " + "sts %d, %s\n", reboot ? "reboot" : "poweroff", + sts, strerror(errno)); + } else { + fprintf(stderr, "Issued domain %d %s\n", xen_domid, + reboot ? "reboot" : "poweroff"); + } + xc_interface_close(xc_handle); + } +} + +void xen_shutdown_fatal_error(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vfprintf(stderr, fmt, ap); + va_end(ap); + fprintf(stderr, "Will destroy the domain.\n"); + /* destroy the domain */ + qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_ERROR); +} + +static void xen_do_ioreq_register(XenIOState *state, + unsigned int max_cpus, + MemoryListener xen_memory_listener) +{ + int i, rc; + + state->exit.notify = xen_exit_notifier; + qemu_add_exit_notifier(&state->exit); + + /* + * Register wake-up support in QMP query-current-machine API + */ + qemu_register_wakeup_support(); + + rc = xen_map_ioreq_server(state); + if (rc < 0) { + goto err; + } + + /* Note: cpus is empty at this point in init */ + state->cpu_by_vcpu_id = g_new0(CPUState *, max_cpus); + + rc = xen_set_ioreq_server_state(xen_domid, state->ioservid, true); + if (rc < 0) { + error_report("failed to enable ioreq server info: error %d handle=%p", + errno, xen_xc); + goto err; + } + + state->ioreq_local_port = g_new0(evtchn_port_t, max_cpus); + + /* FIXME: how about if we overflow the page here? */ + for (i = 0; i < max_cpus; i++) { + rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid, + xen_vcpu_eport(state->shared_page, + i)); + if (rc == -1) { + error_report("shared evtchn %d bind error %d", i, errno); + goto err; + } + state->ioreq_local_port[i] = rc; + } + + rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid, + state->bufioreq_remote_port); + if (rc == -1) { + error_report("buffered evtchn bind error %d", errno); + goto err; + } + state->bufioreq_local_port = rc; + + /* Init RAM management */ +#ifdef XEN_COMPAT_PHYSMAP + xen_map_cache_init(xen_phys_offset_to_gaddr, state); +#else + xen_map_cache_init(NULL, state); +#endif + + qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state); + + state->memory_listener = xen_memory_listener; + memory_listener_register(&state->memory_listener, &address_space_memory); + + state->io_listener = xen_io_listener; + memory_listener_register(&state->io_listener, &address_space_io); + + state->device_listener = xen_device_listener; + QLIST_INIT(&state->dev_list); + device_listener_register(&state->device_listener); + + return; + +err: + error_report("xen hardware virtual machine initialisation failed"); + exit(1); +} + +void xen_register_ioreq(XenIOState *state, unsigned int max_cpus, + MemoryListener xen_memory_listener) +{ + int rc; + + setup_xen_backend_ops(); + + state->xce_handle = qemu_xen_evtchn_open(); + if (state->xce_handle == NULL) { + error_report("xen: event channel open failed with error %d", errno); + goto err; + } + + state->xenstore = xs_daemon_open(); + if (state->xenstore == NULL) { + error_report("xen: xenstore open failed with error %d", errno); + goto err; + } + + rc = xen_create_ioreq_server(xen_domid, &state->ioservid); + if (!rc) { + xen_do_ioreq_register(state, max_cpus, xen_memory_listener); + } else { + warn_report("xen: failed to create ioreq server"); + } + + xen_bus_init(); + + xen_be_init(); + + return; + +err: + error_report("xen hardware virtual machine backend registration failed"); + exit(1); +} diff --git a/hw/i386/xen/xen-mapcache.c b/hw/xen/xen-mapcache.c index f7d974677d..f7d974677d 100644 --- a/hw/i386/xen/xen-mapcache.c +++ b/hw/xen/xen-mapcache.c diff --git a/hw/xen/xen-operations.c b/hw/xen/xen-operations.c index 4b78fbf4bd..e00983ec44 100644 --- a/hw/xen/xen-operations.c +++ b/hw/xen/xen-operations.c @@ -28,46 +28,13 @@ #include <xenctrl.h> /* - * We don't support Xen prior to 4.2.0. + * We don't support Xen prior to 4.7.1. */ -/* Xen 4.2 through 4.6 */ -#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701 - -typedef xc_evtchn xenevtchn_handle; -typedef evtchn_port_or_error_t xenevtchn_port_or_error_t; - -#define xenevtchn_open(l, f) xc_evtchn_open(l, f); -#define xenevtchn_close(h) xc_evtchn_close(h) -#define xenevtchn_fd(h) xc_evtchn_fd(h) -#define xenevtchn_pending(h) xc_evtchn_pending(h) -#define xenevtchn_notify(h, p) xc_evtchn_notify(h, p) -#define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p) -#define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p) -#define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p) - -typedef xc_gnttab xengnttab_handle; - -#define xengnttab_open(l, f) xc_gnttab_open(l, f) -#define xengnttab_close(h) xc_gnttab_close(h) -#define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n) -#define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p) -#define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n) -#define xengnttab_map_grant_refs(h, c, d, r, p) \ - xc_gnttab_map_grant_refs(h, c, d, r, p) -#define xengnttab_map_domain_grant_refs(h, c, d, r, p) \ - xc_gnttab_map_domain_grant_refs(h, c, d, r, p) - -typedef xc_interface xenforeignmemory_handle; - -#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */ - #include <xenevtchn.h> #include <xengnttab.h> #include <xenforeignmemory.h> -#endif - /* Xen before 4.8 */ static int libxengnttab_fallback_grant_copy(xengnttab_handle *xgt, @@ -223,26 +190,6 @@ static struct gnttab_backend_ops libxengnttab_backend_ops = { .unmap = libxengnttab_backend_unmap, }; -#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701 - -static void *libxenforeignmem_backend_map(uint32_t dom, void *addr, int prot, - size_t pages, xfn_pfn_t *pfns, - int *errs) -{ - if (errs) { - return xc_map_foreign_bulk(xen_xc, dom, prot, pfns, errs, pages); - } else { - return xc_map_foreign_pages(xen_xc, dom, prot, pfns, pages); - } -} - -static int libxenforeignmem_backend_unmap(void *addr, size_t pages) -{ - return munmap(addr, pages * XC_PAGE_SIZE); -} - -#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */ - static void *libxenforeignmem_backend_map(uint32_t dom, void *addr, int prot, size_t pages, xen_pfn_t *pfns, int *errs) @@ -256,8 +203,6 @@ static int libxenforeignmem_backend_unmap(void *addr, size_t pages) return xenforeignmemory_unmap(xen_fmem, addr, pages); } -#endif - struct foreignmem_backend_ops libxenforeignmem_backend_ops = { .map = libxenforeignmem_backend_map, .unmap = libxenforeignmem_backend_unmap, @@ -287,7 +232,7 @@ static void watch_event(void *opaque) static struct qemu_xs_handle *libxenstore_open(void) { struct xs_handle *xsh = xs_open(0); - struct qemu_xs_handle *h = g_new0(struct qemu_xs_handle, 1); + struct qemu_xs_handle *h; if (!xsh) { return NULL; diff --git a/hw/xen/xen_pt.c b/hw/xen/xen_pt.c index 2d33d178ad..a540149639 100644 --- a/hw/xen/xen_pt.c +++ b/hw/xen/xen_pt.c @@ -57,7 +57,6 @@ #include <sys/ioctl.h> #include "hw/pci/pci.h" -#include "hw/pci/pci_bus.h" #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" #include "xen_pt.h" @@ -951,7 +950,7 @@ void xen_igd_reserve_slot(PCIBus *pci_bus) } XEN_PT_LOG(0, "Reserving PCI slot 2 for IGD\n"); - pci_bus->slot_reserved_mask |= XEN_PCI_IGD_SLOT_MASK; + pci_bus_set_slot_reserved_mask(pci_bus, XEN_PCI_IGD_SLOT_MASK); } static void xen_igd_clear_slot(DeviceState *qdev, Error **errp) @@ -971,7 +970,7 @@ static void xen_igd_clear_slot(DeviceState *qdev, Error **errp) return; } - if (!(pci_bus->slot_reserved_mask & XEN_PCI_IGD_SLOT_MASK)) { + if (!(pci_bus_get_slot_reserved_mask(pci_bus) & XEN_PCI_IGD_SLOT_MASK)) { xpdc->pci_qdev_realize(qdev, errp); return; } @@ -982,7 +981,7 @@ static void xen_igd_clear_slot(DeviceState *qdev, Error **errp) s->real_device.dev == XEN_PCI_IGD_DEV && s->real_device.func == XEN_PCI_IGD_FN && s->real_device.vendor_id == PCI_VENDOR_ID_INTEL) { - pci_bus->slot_reserved_mask &= ~XEN_PCI_IGD_SLOT_MASK; + pci_bus_clear_slot_reserved_mask(pci_bus, XEN_PCI_IGD_SLOT_MASK); XEN_PT_LOG(pci_dev, "Intel IGD found, using slot 2\n"); } xpdc->pci_qdev_realize(qdev, errp); diff --git a/hw/xtensa/virt.c b/hw/xtensa/virt.c index a18e3fc910..b87f842e74 100644 --- a/hw/xtensa/virt.c +++ b/hw/xtensa/virt.c @@ -38,7 +38,8 @@ #include "xtensa_memory.h" #include "xtensa_sim.h" -static void create_pcie(CPUXtensaState *env, int irq_base, hwaddr addr_base) +static void create_pcie(MachineState *ms, CPUXtensaState *env, int irq_base, + hwaddr addr_base) { hwaddr base_ecam = addr_base + 0x00100000; hwaddr size_ecam = 0x03f00000; @@ -54,6 +55,7 @@ static void create_pcie(CPUXtensaState *env, int irq_base, hwaddr addr_base) MemoryRegion *mmio_alias; MemoryRegion *mmio_reg; + MachineClass *mc = MACHINE_GET_CLASS(ms); DeviceState *dev; PCIHostState *pci; qemu_irq *extints; @@ -104,7 +106,7 @@ static void create_pcie(CPUXtensaState *env, int irq_base, hwaddr addr_base) NICInfo *nd = &nd_table[i]; if (!nd->model) { - nd->model = g_strdup("virtio"); + nd->model = g_strdup(mc->default_nic); } pci_nic_init_nofail(nd, pci->bus, nd->model, NULL); @@ -117,7 +119,7 @@ static void xtensa_virt_init(MachineState *machine) XtensaCPU *cpu = xtensa_sim_common_init(machine); CPUXtensaState *env = &cpu->env; - create_pcie(env, 0, 0xf0000000); + create_pcie(machine, env, 0, 0xf0000000); xtensa_sim_load_kernel(cpu, machine); } @@ -127,6 +129,7 @@ static void xtensa_virt_machine_init(MachineClass *mc) mc->init = xtensa_virt_init; mc->max_cpus = 32; mc->default_cpu_type = XTENSA_DEFAULT_CPU_TYPE; + mc->default_nic = "virtio-net-pci"; } DEFINE_MACHINE("virt", xtensa_virt_machine_init) |