diff options
Diffstat (limited to 'hw')
87 files changed, 4375 insertions, 981 deletions
diff --git a/hw/arm/Makefile.objs b/hw/arm/Makefile.objs index d51fcecaf2..2902f47b4c 100644 --- a/hw/arm/Makefile.objs +++ b/hw/arm/Makefile.objs @@ -36,3 +36,4 @@ obj-$(CONFIG_MSF2) += msf2-soc.o msf2-som.o obj-$(CONFIG_IOTKIT) += iotkit.o obj-$(CONFIG_FSL_IMX7) += fsl-imx7.o mcimx7d-sabre.o obj-$(CONFIG_ARM_SMMUV3) += smmu-common.o smmuv3.o +obj-$(CONFIG_FSL_IMX6UL) += fsl-imx6ul.o mcimx6ul-evk.o diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c index 6b07666057..4bf9131b81 100644 --- a/hw/arm/armv7m.c +++ b/hw/arm/armv7m.c @@ -202,6 +202,7 @@ static void armv7m_realize(DeviceState *dev, Error **errp) */ qdev_pass_gpios(DEVICE(&s->nvic), dev, NULL); qdev_pass_gpios(DEVICE(&s->nvic), dev, "SYSRESETREQ"); + qdev_pass_gpios(DEVICE(&s->nvic), dev, "NMI"); /* Wire the NVIC up to the CPU */ sbd = SYS_BUS_DEVICE(&s->nvic); @@ -211,25 +212,27 @@ static void armv7m_realize(DeviceState *dev, Error **errp) memory_region_add_subregion(&s->container, 0xe000e000, sysbus_mmio_get_region(sbd, 0)); - for (i = 0; i < ARRAY_SIZE(s->bitband); i++) { - Object *obj = OBJECT(&s->bitband[i]); - SysBusDevice *sbd = SYS_BUS_DEVICE(&s->bitband[i]); - - object_property_set_int(obj, bitband_input_addr[i], "base", &err); - if (err != NULL) { - error_propagate(errp, err); - return; + if (s->enable_bitband) { + for (i = 0; i < ARRAY_SIZE(s->bitband); i++) { + Object *obj = OBJECT(&s->bitband[i]); + SysBusDevice *sbd = SYS_BUS_DEVICE(&s->bitband[i]); + + object_property_set_int(obj, bitband_input_addr[i], "base", &err); + if (err != NULL) { + error_propagate(errp, err); + return; + } + object_property_set_link(obj, OBJECT(s->board_memory), + "source-memory", &error_abort); + object_property_set_bool(obj, true, "realized", &err); + if (err != NULL) { + error_propagate(errp, err); + return; + } + + memory_region_add_subregion(&s->container, bitband_output_addr[i], + sysbus_mmio_get_region(sbd, 0)); } - object_property_set_link(obj, OBJECT(s->board_memory), - "source-memory", &error_abort); - object_property_set_bool(obj, true, "realized", &err); - if (err != NULL) { - error_propagate(errp, err); - return; - } - - memory_region_add_subregion(&s->container, bitband_output_addr[i], - sysbus_mmio_get_region(sbd, 0)); } } @@ -239,6 +242,7 @@ static Property armv7m_properties[] = { MemoryRegion *), DEFINE_PROP_LINK("idau", ARMv7MState, idau, TYPE_IDAU_INTERFACE, Object *), DEFINE_PROP_UINT32("init-svtor", ARMv7MState, init_svtor, 0), + DEFINE_PROP_BOOL("enable-bitband", ARMv7MState, enable_bitband, false), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c index bb9d33848d..bb9590f1ae 100644 --- a/hw/arm/aspeed.c +++ b/hw/arm/aspeed.c @@ -31,6 +31,7 @@ static struct arm_boot_info aspeed_board_binfo = { typedef struct AspeedBoardState { AspeedSoCState soc; MemoryRegion ram; + MemoryRegion max_ram; } AspeedBoardState; typedef struct AspeedBoardConfig { @@ -127,6 +128,27 @@ static const AspeedBoardConfig aspeed_boards[] = { }, }; +/* + * The max ram region is for firmwares that scan the address space + * with load/store to guess how much RAM the SoC has. + */ +static uint64_t max_ram_read(void *opaque, hwaddr offset, unsigned size) +{ + return 0; +} + +static void max_ram_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + /* Discard writes */ +} + +static const MemoryRegionOps max_ram_ops = { + .read = max_ram_read, + .write = max_ram_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + #define FIRMWARE_ADDR 0x0 static void write_boot_rom(DriveInfo *dinfo, hwaddr addr, size_t rom_size, @@ -187,6 +209,7 @@ static void aspeed_board_init(MachineState *machine, AspeedBoardState *bmc; AspeedSoCClass *sc; DriveInfo *drive0 = drive_get(IF_MTD, 0, 0); + ram_addr_t max_ram_size; bmc = g_new0(AspeedBoardState, 1); object_initialize(&bmc->soc, (sizeof(bmc->soc)), cfg->soc_name); @@ -226,6 +249,14 @@ static void aspeed_board_init(MachineState *machine, object_property_add_const_link(OBJECT(&bmc->soc), "ram", OBJECT(&bmc->ram), &error_abort); + max_ram_size = object_property_get_uint(OBJECT(&bmc->soc), "max-ram-size", + &error_abort); + memory_region_init_io(&bmc->max_ram, NULL, &max_ram_ops, NULL, + "max_ram", max_ram_size - ram_size); + memory_region_add_subregion(get_system_memory(), + sc->info->sdram_base + ram_size, + &bmc->max_ram); + aspeed_board_init_flashes(&bmc->soc.fmc, cfg->fmc_model, &error_abort); aspeed_board_init_flashes(&bmc->soc.spi[0], cfg->spi_model, &error_abort); diff --git a/hw/arm/aspeed_soc.c b/hw/arm/aspeed_soc.c index e68911af0f..a27233d487 100644 --- a/hw/arm/aspeed_soc.c +++ b/hw/arm/aspeed_soc.c @@ -155,6 +155,8 @@ static void aspeed_soc_init(Object *obj) sc->info->silicon_rev); object_property_add_alias(obj, "ram-size", OBJECT(&s->sdmc), "ram-size", &error_abort); + object_property_add_alias(obj, "max-ram-size", OBJECT(&s->sdmc), + "max-ram-size", &error_abort); for (i = 0; i < sc->info->wdts_num; i++) { object_initialize(&s->wdt[i], sizeof(s->wdt[i]), TYPE_ASPEED_WDT); diff --git a/hw/arm/boot.c b/hw/arm/boot.c index e09201cc97..ca9467e583 100644 --- a/hw/arm/boot.c +++ b/hw/arm/boot.c @@ -818,9 +818,9 @@ static int do_arm_linux_init(Object *obj, void *opaque) return 0; } -static uint64_t arm_load_elf(struct arm_boot_info *info, uint64_t *pentry, - uint64_t *lowaddr, uint64_t *highaddr, - int elf_machine, AddressSpace *as) +static int64_t arm_load_elf(struct arm_boot_info *info, uint64_t *pentry, + uint64_t *lowaddr, uint64_t *highaddr, + int elf_machine, AddressSpace *as) { bool elf_is64; union { @@ -829,7 +829,7 @@ static uint64_t arm_load_elf(struct arm_boot_info *info, uint64_t *pentry, } elf_header; int data_swab = 0; bool big_endian; - uint64_t ret = -1; + int64_t ret = -1; Error *err = NULL; diff --git a/hw/arm/fsl-imx6ul.c b/hw/arm/fsl-imx6ul.c new file mode 100644 index 0000000000..258f470623 --- /dev/null +++ b/hw/arm/fsl-imx6ul.c @@ -0,0 +1,617 @@ +/* + * Copyright (c) 2018 Jean-Christophe Dubois <jcd@tribudubois.net> + * + * i.MX6UL SOC emulation. + * + * Based on hw/arm/fsl-imx7.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "hw/arm/fsl-imx6ul.h" +#include "hw/misc/unimp.h" +#include "sysemu/sysemu.h" +#include "qemu/error-report.h" + +#define NAME_SIZE 20 + +static void fsl_imx6ul_init(Object *obj) +{ + FslIMX6ULState *s = FSL_IMX6UL(obj); + char name[NAME_SIZE]; + int i; + + for (i = 0; i < MIN(smp_cpus, FSL_IMX6UL_NUM_CPUS); i++) { + snprintf(name, NAME_SIZE, "cpu%d", i); + object_initialize_child(obj, name, &s->cpu[i], sizeof(s->cpu[i]), + "cortex-a7-" TYPE_ARM_CPU, &error_abort, NULL); + } + + /* + * A7MPCORE + */ + sysbus_init_child_obj(obj, "a7mpcore", &s->a7mpcore, sizeof(s->a7mpcore), + TYPE_A15MPCORE_PRIV); + + /* + * CCM + */ + sysbus_init_child_obj(obj, "ccm", &s->ccm, sizeof(s->ccm), TYPE_IMX6UL_CCM); + + /* + * SRC + */ + sysbus_init_child_obj(obj, "src", &s->src, sizeof(s->src), TYPE_IMX6_SRC); + + /* + * GPCv2 + */ + sysbus_init_child_obj(obj, "gpcv2", &s->gpcv2, sizeof(s->gpcv2), + TYPE_IMX_GPCV2); + + /* + * SNVS + */ + sysbus_init_child_obj(obj, "snvs", &s->snvs, sizeof(s->snvs), + TYPE_IMX7_SNVS); + + /* + * GPR + */ + sysbus_init_child_obj(obj, "gpr", &s->gpr, sizeof(s->gpr), + TYPE_IMX7_GPR); + + /* + * GPIOs 1 to 5 + */ + for (i = 0; i < FSL_IMX6UL_NUM_GPIOS; i++) { + snprintf(name, NAME_SIZE, "gpio%d", i); + sysbus_init_child_obj(obj, name, &s->gpio[i], sizeof(s->gpio[i]), + TYPE_IMX_GPIO); + } + + /* + * GPT 1, 2 + */ + for (i = 0; i < FSL_IMX6UL_NUM_GPTS; i++) { + snprintf(name, NAME_SIZE, "gpt%d", i); + sysbus_init_child_obj(obj, name, &s->gpt[i], sizeof(s->gpt[i]), + TYPE_IMX7_GPT); + } + + /* + * EPIT 1, 2 + */ + for (i = 0; i < FSL_IMX6UL_NUM_EPITS; i++) { + snprintf(name, NAME_SIZE, "epit%d", i + 1); + sysbus_init_child_obj(obj, name, &s->epit[i], sizeof(s->epit[i]), + TYPE_IMX_EPIT); + } + + /* + * eCSPI + */ + for (i = 0; i < FSL_IMX6UL_NUM_ECSPIS; i++) { + snprintf(name, NAME_SIZE, "spi%d", i + 1); + sysbus_init_child_obj(obj, name, &s->spi[i], sizeof(s->spi[i]), + TYPE_IMX_SPI); + } + + /* + * I2C + */ + for (i = 0; i < FSL_IMX6UL_NUM_I2CS; i++) { + snprintf(name, NAME_SIZE, "i2c%d", i + 1); + sysbus_init_child_obj(obj, name, &s->i2c[i], sizeof(s->i2c[i]), + TYPE_IMX_I2C); + } + + /* + * UART + */ + for (i = 0; i < FSL_IMX6UL_NUM_UARTS; i++) { + snprintf(name, NAME_SIZE, "uart%d", i); + sysbus_init_child_obj(obj, name, &s->uart[i], sizeof(s->uart[i]), + TYPE_IMX_SERIAL); + } + + /* + * Ethernet + */ + for (i = 0; i < FSL_IMX6UL_NUM_ETHS; i++) { + snprintf(name, NAME_SIZE, "eth%d", i); + sysbus_init_child_obj(obj, name, &s->eth[i], sizeof(s->eth[i]), + TYPE_IMX_ENET); + } + + /* + * SDHCI + */ + for (i = 0; i < FSL_IMX6UL_NUM_USDHCS; i++) { + snprintf(name, NAME_SIZE, "usdhc%d", i); + sysbus_init_child_obj(obj, name, &s->usdhc[i], sizeof(s->usdhc[i]), + TYPE_IMX_USDHC); + } + + /* + * Watchdog + */ + for (i = 0; i < FSL_IMX6UL_NUM_WDTS; i++) { + snprintf(name, NAME_SIZE, "wdt%d", i); + sysbus_init_child_obj(obj, name, &s->wdt[i], sizeof(s->wdt[i]), + TYPE_IMX2_WDT); + } +} + +static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) +{ + FslIMX6ULState *s = FSL_IMX6UL(dev); + int i; + qemu_irq irq; + char name[NAME_SIZE]; + + if (smp_cpus > FSL_IMX6UL_NUM_CPUS) { + error_setg(errp, "%s: Only %d CPUs are supported (%d requested)", + TYPE_FSL_IMX6UL, FSL_IMX6UL_NUM_CPUS, smp_cpus); + return; + } + + for (i = 0; i < smp_cpus; i++) { + Object *o = OBJECT(&s->cpu[i]); + + object_property_set_int(o, QEMU_PSCI_CONDUIT_SMC, + "psci-conduit", &error_abort); + + /* On uniprocessor, the CBAR is set to 0 */ + if (smp_cpus > 1) { + object_property_set_int(o, FSL_IMX6UL_A7MPCORE_ADDR, + "reset-cbar", &error_abort); + } + + if (i) { + /* Secondary CPUs start in PSCI powered-down state */ + object_property_set_bool(o, true, + "start-powered-off", &error_abort); + } + + object_property_set_bool(o, true, "realized", &error_abort); + } + + /* + * A7MPCORE + */ + object_property_set_int(OBJECT(&s->a7mpcore), smp_cpus, "num-cpu", + &error_abort); + object_property_set_int(OBJECT(&s->a7mpcore), + FSL_IMX6UL_MAX_IRQ + GIC_INTERNAL, + "num-irq", &error_abort); + object_property_set_bool(OBJECT(&s->a7mpcore), true, "realized", + &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->a7mpcore), 0, FSL_IMX6UL_A7MPCORE_ADDR); + + for (i = 0; i < smp_cpus; i++) { + SysBusDevice *sbd = SYS_BUS_DEVICE(&s->a7mpcore); + DeviceState *d = DEVICE(qemu_get_cpu(i)); + + irq = qdev_get_gpio_in(d, ARM_CPU_IRQ); + sysbus_connect_irq(sbd, i, irq); + sysbus_connect_irq(sbd, i + smp_cpus, qdev_get_gpio_in(d, ARM_CPU_FIQ)); + } + + /* + * A7MPCORE DAP + */ + create_unimplemented_device("a7mpcore-dap", FSL_IMX6UL_A7MPCORE_DAP_ADDR, + 0x100000); + + /* + * GPT 1, 2 + */ + for (i = 0; i < FSL_IMX6UL_NUM_GPTS; i++) { + static const hwaddr FSL_IMX6UL_GPTn_ADDR[FSL_IMX6UL_NUM_GPTS] = { + FSL_IMX6UL_GPT1_ADDR, + FSL_IMX6UL_GPT2_ADDR, + }; + + static const int FSL_IMX6UL_GPTn_IRQ[FSL_IMX6UL_NUM_GPTS] = { + FSL_IMX6UL_GPT1_IRQ, + FSL_IMX6UL_GPT2_IRQ, + }; + + s->gpt[i].ccm = IMX_CCM(&s->ccm); + object_property_set_bool(OBJECT(&s->gpt[i]), true, "realized", + &error_abort); + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpt[i]), 0, + FSL_IMX6UL_GPTn_ADDR[i]); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpt[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + FSL_IMX6UL_GPTn_IRQ[i])); + } + + /* + * EPIT 1, 2 + */ + for (i = 0; i < FSL_IMX6UL_NUM_EPITS; i++) { + static const hwaddr FSL_IMX6UL_EPITn_ADDR[FSL_IMX6UL_NUM_EPITS] = { + FSL_IMX6UL_EPIT1_ADDR, + FSL_IMX6UL_EPIT2_ADDR, + }; + + static const int FSL_IMX6UL_EPITn_IRQ[FSL_IMX6UL_NUM_EPITS] = { + FSL_IMX6UL_EPIT1_IRQ, + FSL_IMX6UL_EPIT2_IRQ, + }; + + s->epit[i].ccm = IMX_CCM(&s->ccm); + object_property_set_bool(OBJECT(&s->epit[i]), true, "realized", + &error_abort); + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->epit[i]), 0, + FSL_IMX6UL_EPITn_ADDR[i]); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->epit[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + FSL_IMX6UL_EPITn_IRQ[i])); + } + + /* + * GPIO + */ + for (i = 0; i < FSL_IMX6UL_NUM_GPIOS; i++) { + static const hwaddr FSL_IMX6UL_GPIOn_ADDR[FSL_IMX6UL_NUM_GPIOS] = { + FSL_IMX6UL_GPIO1_ADDR, + FSL_IMX6UL_GPIO2_ADDR, + FSL_IMX6UL_GPIO3_ADDR, + FSL_IMX6UL_GPIO4_ADDR, + FSL_IMX6UL_GPIO5_ADDR, + }; + + static const int FSL_IMX6UL_GPIOn_LOW_IRQ[FSL_IMX6UL_NUM_GPIOS] = { + FSL_IMX6UL_GPIO1_LOW_IRQ, + FSL_IMX6UL_GPIO2_LOW_IRQ, + FSL_IMX6UL_GPIO3_LOW_IRQ, + FSL_IMX6UL_GPIO4_LOW_IRQ, + FSL_IMX6UL_GPIO5_LOW_IRQ, + }; + + static const int FSL_IMX6UL_GPIOn_HIGH_IRQ[FSL_IMX6UL_NUM_GPIOS] = { + FSL_IMX6UL_GPIO1_HIGH_IRQ, + FSL_IMX6UL_GPIO2_HIGH_IRQ, + FSL_IMX6UL_GPIO3_HIGH_IRQ, + FSL_IMX6UL_GPIO4_HIGH_IRQ, + FSL_IMX6UL_GPIO5_HIGH_IRQ, + }; + + object_property_set_bool(OBJECT(&s->gpio[i]), true, "realized", + &error_abort); + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpio[i]), 0, + FSL_IMX6UL_GPIOn_ADDR[i]); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + FSL_IMX6UL_GPIOn_LOW_IRQ[i])); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 1, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + FSL_IMX6UL_GPIOn_HIGH_IRQ[i])); + } + + /* + * IOMUXC and IOMUXC_GPR + */ + for (i = 0; i < 1; i++) { + static const hwaddr FSL_IMX6UL_IOMUXCn_ADDR[FSL_IMX6UL_NUM_IOMUXCS] = { + FSL_IMX6UL_IOMUXC_ADDR, + FSL_IMX6UL_IOMUXC_GPR_ADDR, + }; + + snprintf(name, NAME_SIZE, "iomuxc%d", i); + create_unimplemented_device(name, FSL_IMX6UL_IOMUXCn_ADDR[i], 0x4000); + } + + /* + * CCM + */ + object_property_set_bool(OBJECT(&s->ccm), true, "realized", &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ccm), 0, FSL_IMX6UL_CCM_ADDR); + + /* + * SRC + */ + object_property_set_bool(OBJECT(&s->src), true, "realized", &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->src), 0, FSL_IMX6UL_SRC_ADDR); + + /* + * GPCv2 + */ + object_property_set_bool(OBJECT(&s->gpcv2), true, + "realized", &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpcv2), 0, FSL_IMX6UL_GPC_ADDR); + + /* Initialize all ECSPI */ + for (i = 0; i < FSL_IMX6UL_NUM_ECSPIS; i++) { + static const hwaddr FSL_IMX6UL_SPIn_ADDR[FSL_IMX6UL_NUM_ECSPIS] = { + FSL_IMX6UL_ECSPI1_ADDR, + FSL_IMX6UL_ECSPI2_ADDR, + FSL_IMX6UL_ECSPI3_ADDR, + FSL_IMX6UL_ECSPI4_ADDR, + }; + + static const int FSL_IMX6UL_SPIn_IRQ[FSL_IMX6UL_NUM_ECSPIS] = { + FSL_IMX6UL_ECSPI1_IRQ, + FSL_IMX6UL_ECSPI2_IRQ, + FSL_IMX6UL_ECSPI3_IRQ, + FSL_IMX6UL_ECSPI4_IRQ, + }; + + /* Initialize the SPI */ + object_property_set_bool(OBJECT(&s->spi[i]), true, "realized", + &error_abort); + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0, + FSL_IMX6UL_SPIn_ADDR[i]); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->spi[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + FSL_IMX6UL_SPIn_IRQ[i])); + } + + /* + * I2C + */ + for (i = 0; i < FSL_IMX6UL_NUM_I2CS; i++) { + static const hwaddr FSL_IMX6UL_I2Cn_ADDR[FSL_IMX6UL_NUM_I2CS] = { + FSL_IMX6UL_I2C1_ADDR, + FSL_IMX6UL_I2C2_ADDR, + FSL_IMX6UL_I2C3_ADDR, + FSL_IMX6UL_I2C4_ADDR, + }; + + static const int FSL_IMX6UL_I2Cn_IRQ[FSL_IMX6UL_NUM_I2CS] = { + FSL_IMX6UL_I2C1_IRQ, + FSL_IMX6UL_I2C2_IRQ, + FSL_IMX6UL_I2C3_IRQ, + FSL_IMX6UL_I2C4_IRQ, + }; + + object_property_set_bool(OBJECT(&s->i2c[i]), true, "realized", + &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c[i]), 0, FSL_IMX6UL_I2Cn_ADDR[i]); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + FSL_IMX6UL_I2Cn_IRQ[i])); + } + + /* + * UART + */ + for (i = 0; i < FSL_IMX6UL_NUM_UARTS; i++) { + static const hwaddr FSL_IMX6UL_UARTn_ADDR[FSL_IMX6UL_NUM_UARTS] = { + FSL_IMX6UL_UART1_ADDR, + FSL_IMX6UL_UART2_ADDR, + FSL_IMX6UL_UART3_ADDR, + FSL_IMX6UL_UART4_ADDR, + FSL_IMX6UL_UART5_ADDR, + FSL_IMX6UL_UART6_ADDR, + FSL_IMX6UL_UART7_ADDR, + FSL_IMX6UL_UART8_ADDR, + }; + + static const int FSL_IMX6UL_UARTn_IRQ[FSL_IMX6UL_NUM_UARTS] = { + FSL_IMX6UL_UART1_IRQ, + FSL_IMX6UL_UART2_IRQ, + FSL_IMX6UL_UART3_IRQ, + FSL_IMX6UL_UART4_IRQ, + FSL_IMX6UL_UART5_IRQ, + FSL_IMX6UL_UART6_IRQ, + FSL_IMX6UL_UART7_IRQ, + FSL_IMX6UL_UART8_IRQ, + }; + + qdev_prop_set_chr(DEVICE(&s->uart[i]), "chardev", serial_hd(i)); + + object_property_set_bool(OBJECT(&s->uart[i]), true, "realized", + &error_abort); + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->uart[i]), 0, + FSL_IMX6UL_UARTn_ADDR[i]); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + FSL_IMX6UL_UARTn_IRQ[i])); + } + + /* + * Ethernet + */ + for (i = 0; i < FSL_IMX6UL_NUM_ETHS; i++) { + static const hwaddr FSL_IMX6UL_ENETn_ADDR[FSL_IMX6UL_NUM_ETHS] = { + FSL_IMX6UL_ENET1_ADDR, + FSL_IMX6UL_ENET2_ADDR, + }; + + static const int FSL_IMX6UL_ENETn_IRQ[FSL_IMX6UL_NUM_ETHS] = { + FSL_IMX6UL_ENET1_IRQ, + FSL_IMX6UL_ENET2_IRQ, + }; + + static const int FSL_IMX6UL_ENETn_TIMER_IRQ[FSL_IMX6UL_NUM_ETHS] = { + FSL_IMX6UL_ENET1_TIMER_IRQ, + FSL_IMX6UL_ENET2_TIMER_IRQ, + }; + + object_property_set_uint(OBJECT(&s->eth[i]), + FSL_IMX6UL_ETH_NUM_TX_RINGS, + "tx-ring-num", &error_abort); + qdev_set_nic_properties(DEVICE(&s->eth[i]), &nd_table[i]); + object_property_set_bool(OBJECT(&s->eth[i]), true, "realized", + &error_abort); + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->eth[i]), 0, + FSL_IMX6UL_ENETn_ADDR[i]); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->eth[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + FSL_IMX6UL_ENETn_IRQ[i])); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->eth[i]), 1, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + FSL_IMX6UL_ENETn_TIMER_IRQ[i])); + } + + /* + * USDHC + */ + for (i = 0; i < FSL_IMX6UL_NUM_USDHCS; i++) { + static const hwaddr FSL_IMX6UL_USDHCn_ADDR[FSL_IMX6UL_NUM_USDHCS] = { + FSL_IMX6UL_USDHC1_ADDR, + FSL_IMX6UL_USDHC2_ADDR, + }; + + static const int FSL_IMX6UL_USDHCn_IRQ[FSL_IMX6UL_NUM_USDHCS] = { + FSL_IMX6UL_USDHC1_IRQ, + FSL_IMX6UL_USDHC2_IRQ, + }; + + object_property_set_bool(OBJECT(&s->usdhc[i]), true, "realized", + &error_abort); + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->usdhc[i]), 0, + FSL_IMX6UL_USDHCn_ADDR[i]); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->usdhc[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + FSL_IMX6UL_USDHCn_IRQ[i])); + } + + /* + * SNVS + */ + object_property_set_bool(OBJECT(&s->snvs), true, "realized", &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->snvs), 0, FSL_IMX6UL_SNVS_HP_ADDR); + + /* + * Watchdog + */ + for (i = 0; i < FSL_IMX6UL_NUM_WDTS; i++) { + static const hwaddr FSL_IMX6UL_WDOGn_ADDR[FSL_IMX6UL_NUM_WDTS] = { + FSL_IMX6UL_WDOG1_ADDR, + FSL_IMX6UL_WDOG2_ADDR, + FSL_IMX6UL_WDOG3_ADDR, + }; + + object_property_set_bool(OBJECT(&s->wdt[i]), true, "realized", + &error_abort); + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->wdt[i]), 0, + FSL_IMX6UL_WDOGn_ADDR[i]); + } + + /* + * GPR + */ + object_property_set_bool(OBJECT(&s->gpr), true, "realized", + &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpr), 0, FSL_IMX6UL_IOMUXC_GPR_ADDR); + + /* + * SDMA + */ + create_unimplemented_device("sdma", FSL_IMX6UL_SDMA_ADDR, 0x4000); + + /* + * APHB_DMA + */ + create_unimplemented_device("aphb_dma", FSL_IMX6UL_APBH_DMA_ADDR, + FSL_IMX6UL_APBH_DMA_SIZE); + + /* + * ADCs + */ + for (i = 0; i < FSL_IMX6UL_NUM_ADCS; i++) { + static const hwaddr FSL_IMX6UL_ADCn_ADDR[FSL_IMX6UL_NUM_ADCS] = { + FSL_IMX6UL_ADC1_ADDR, + FSL_IMX6UL_ADC2_ADDR, + }; + + snprintf(name, NAME_SIZE, "adc%d", i); + create_unimplemented_device(name, FSL_IMX6UL_ADCn_ADDR[i], 0x4000); + } + + /* + * LCD + */ + create_unimplemented_device("lcdif", FSL_IMX6UL_LCDIF_ADDR, 0x4000); + + /* + * ROM memory + */ + memory_region_init_rom(&s->rom, NULL, "imx6ul.rom", + FSL_IMX6UL_ROM_SIZE, &error_abort); + memory_region_add_subregion(get_system_memory(), FSL_IMX6UL_ROM_ADDR, + &s->rom); + + /* + * CAAM memory + */ + memory_region_init_rom(&s->caam, NULL, "imx6ul.caam", + FSL_IMX6UL_CAAM_MEM_SIZE, &error_abort); + memory_region_add_subregion(get_system_memory(), FSL_IMX6UL_CAAM_MEM_ADDR, + &s->caam); + + /* + * OCRAM memory + */ + memory_region_init_ram(&s->ocram, NULL, "imx6ul.ocram", + FSL_IMX6UL_OCRAM_MEM_SIZE, + &error_abort); + memory_region_add_subregion(get_system_memory(), FSL_IMX6UL_OCRAM_MEM_ADDR, + &s->ocram); + + /* + * internal OCRAM (128 KB) is aliased over 512 KB + */ + memory_region_init_alias(&s->ocram_alias, NULL, "imx6ul.ocram_alias", + &s->ocram, 0, FSL_IMX6UL_OCRAM_ALIAS_SIZE); + memory_region_add_subregion(get_system_memory(), + FSL_IMX6UL_OCRAM_ALIAS_ADDR, &s->ocram_alias); +} + +static void fsl_imx6ul_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = fsl_imx6ul_realize; + dc->desc = "i.MX6UL SOC"; + /* Reason: Uses serial_hds and nd_table in realize() directly */ + dc->user_creatable = false; +} + +static const TypeInfo fsl_imx6ul_type_info = { + .name = TYPE_FSL_IMX6UL, + .parent = TYPE_DEVICE, + .instance_size = sizeof(FslIMX6ULState), + .instance_init = fsl_imx6ul_init, + .class_init = fsl_imx6ul_class_init, +}; + +static void fsl_imx6ul_register_types(void) +{ + type_register_static(&fsl_imx6ul_type_info); +} +type_init(fsl_imx6ul_register_types) diff --git a/hw/arm/mcimx6ul-evk.c b/hw/arm/mcimx6ul-evk.c new file mode 100644 index 0000000000..fb2b015bf6 --- /dev/null +++ b/hw/arm/mcimx6ul-evk.c @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2018 Jean-Christophe Dubois <jcd@tribudubois.net> + * + * MCIMX6UL_EVK Board System emulation. + * + * This code is licensed under the GPL, version 2 or later. + * See the file `COPYING' in the top level directory. + * + * It (partially) emulates a mcimx6ul_evk board, with a Freescale + * i.MX6ul SoC + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "hw/arm/fsl-imx6ul.h" +#include "hw/boards.h" +#include "sysemu/sysemu.h" +#include "qemu/error-report.h" +#include "sysemu/qtest.h" + +typedef struct { + FslIMX6ULState soc; + MemoryRegion ram; +} MCIMX6ULEVK; + +static void mcimx6ul_evk_init(MachineState *machine) +{ + static struct arm_boot_info boot_info; + MCIMX6ULEVK *s = g_new0(MCIMX6ULEVK, 1); + int i; + + if (machine->ram_size > FSL_IMX6UL_MMDC_SIZE) { + error_report("RAM size " RAM_ADDR_FMT " above max supported (%08x)", + machine->ram_size, FSL_IMX6UL_MMDC_SIZE); + exit(1); + } + + boot_info = (struct arm_boot_info) { + .loader_start = FSL_IMX6UL_MMDC_ADDR, + .board_id = -1, + .ram_size = machine->ram_size, + .kernel_filename = machine->kernel_filename, + .kernel_cmdline = machine->kernel_cmdline, + .initrd_filename = machine->initrd_filename, + .nb_cpus = smp_cpus, + }; + + object_initialize_child(OBJECT(machine), "soc", &s->soc, sizeof(s->soc), + TYPE_FSL_IMX6UL, &error_fatal, NULL); + + object_property_set_bool(OBJECT(&s->soc), true, "realized", &error_fatal); + + memory_region_allocate_system_memory(&s->ram, NULL, "mcimx6ul-evk.ram", + machine->ram_size); + memory_region_add_subregion(get_system_memory(), + FSL_IMX6UL_MMDC_ADDR, &s->ram); + + for (i = 0; i < FSL_IMX6UL_NUM_USDHCS; i++) { + BusState *bus; + DeviceState *carddev; + DriveInfo *di; + BlockBackend *blk; + + di = drive_get_next(IF_SD); + blk = di ? blk_by_legacy_dinfo(di) : NULL; + bus = qdev_get_child_bus(DEVICE(&s->soc.usdhc[i]), "sd-bus"); + carddev = qdev_create(bus, TYPE_SD_CARD); + qdev_prop_set_drive(carddev, "drive", blk, &error_fatal); + object_property_set_bool(OBJECT(carddev), true, + "realized", &error_fatal); + } + + if (!qtest_enabled()) { + arm_load_kernel(&s->soc.cpu[0], &boot_info); + } +} + +static void mcimx6ul_evk_machine_init(MachineClass *mc) +{ + mc->desc = "Freescale i.MX6UL Evaluation Kit (Cortex A7)"; + mc->init = mcimx6ul_evk_init; + mc->max_cpus = FSL_IMX6UL_NUM_CPUS; +} +DEFINE_MACHINE("mcimx6ul-evk", mcimx6ul_evk_machine_init) diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c index 22180c56fb..dc0f34abe5 100644 --- a/hw/arm/mps2-tz.c +++ b/hw/arm/mps2-tz.c @@ -107,16 +107,6 @@ static void make_ram_alias(MemoryRegion *mr, const char *name, memory_region_add_subregion(get_system_memory(), base, mr); } -static void init_sysbus_child(Object *parent, const char *childname, - void *child, size_t childsize, - const char *childtype) -{ - object_initialize(child, childsize, childtype); - object_property_add_child(parent, childname, OBJECT(child), &error_abort); - qdev_set_parent_bus(DEVICE(child), sysbus_get_default()); - -} - /* Most of the devices in the AN505 FPGA image sit behind * Peripheral Protection Controllers. These data structures * define the layout of which devices sit behind which PPCs. @@ -149,9 +139,9 @@ static MemoryRegion *make_unimp_dev(MPS2TZMachineState *mms, */ UnimplementedDeviceState *uds = opaque; - init_sysbus_child(OBJECT(mms), name, uds, - sizeof(UnimplementedDeviceState), - TYPE_UNIMPLEMENTED_DEVICE); + sysbus_init_child_obj(OBJECT(mms), name, uds, + sizeof(UnimplementedDeviceState), + TYPE_UNIMPLEMENTED_DEVICE); qdev_prop_set_string(DEVICE(uds), "name", name); qdev_prop_set_uint64(DEVICE(uds), "size", size); object_property_set_bool(OBJECT(uds), true, "realized", &error_fatal); @@ -170,8 +160,8 @@ static MemoryRegion *make_uart(MPS2TZMachineState *mms, void *opaque, DeviceState *iotkitdev = DEVICE(&mms->iotkit); DeviceState *orgate_dev = DEVICE(&mms->uart_irq_orgate); - init_sysbus_child(OBJECT(mms), name, uart, - sizeof(mms->uart[0]), TYPE_CMSDK_APB_UART); + sysbus_init_child_obj(OBJECT(mms), name, uart, sizeof(mms->uart[0]), + TYPE_CMSDK_APB_UART); qdev_prop_set_chr(DEVICE(uart), "chardev", serial_hd(i)); qdev_prop_set_uint32(DEVICE(uart), "pclk-frq", SYSCLK_FRQ); object_property_set_bool(OBJECT(uart), true, "realized", &error_fatal); @@ -248,8 +238,8 @@ static MemoryRegion *make_mpc(MPS2TZMachineState *mms, void *opaque, memory_region_init_ram(ssram, NULL, name, ramsize[i], &error_fatal); - init_sysbus_child(OBJECT(mms), mpcname, mpc, - sizeof(mms->ssram_mpc[0]), TYPE_TZ_MPC); + sysbus_init_child_obj(OBJECT(mms), mpcname, mpc, sizeof(mms->ssram_mpc[0]), + TYPE_TZ_MPC); object_property_set_link(OBJECT(mpc), OBJECT(ssram), "downstream", &error_fatal); object_property_set_bool(OBJECT(mpc), true, "realized", &error_fatal); @@ -288,8 +278,8 @@ static void mps2tz_common_init(MachineState *machine) exit(1); } - init_sysbus_child(OBJECT(machine), "iotkit", &mms->iotkit, - sizeof(mms->iotkit), TYPE_IOTKIT); + sysbus_init_child_obj(OBJECT(machine), "iotkit", &mms->iotkit, + sizeof(mms->iotkit), TYPE_IOTKIT); iotkitdev = DEVICE(&mms->iotkit); object_property_set_link(OBJECT(&mms->iotkit), OBJECT(system_memory), "memory", &error_abort); @@ -421,8 +411,8 @@ static void mps2tz_common_init(MachineState *machine) int port; char *gpioname; - init_sysbus_child(OBJECT(machine), ppcinfo->name, ppc, - sizeof(TZPPC), TYPE_TZ_PPC); + sysbus_init_child_obj(OBJECT(machine), ppcinfo->name, ppc, + sizeof(TZPPC), TYPE_TZ_PPC); ppcdev = DEVICE(ppc); for (port = 0; port < TZ_NUM_PORTS; port++) { diff --git a/hw/arm/mps2.c b/hw/arm/mps2.c index c3946da317..0a0ae867d9 100644 --- a/hw/arm/mps2.c +++ b/hw/arm/mps2.c @@ -186,6 +186,7 @@ static void mps2_common_init(MachineState *machine) g_assert_not_reached(); } qdev_prop_set_string(armv7m, "cpu-type", machine->cpu_type); + qdev_prop_set_bit(armv7m, "enable-bitband", true); object_property_set_link(OBJECT(&mms->armv7m), OBJECT(system_memory), "memory", &error_abort); object_property_set_bool(OBJECT(&mms->armv7m), true, "realized", diff --git a/hw/arm/msf2-soc.c b/hw/arm/msf2-soc.c index dbefade644..2702e90b45 100644 --- a/hw/arm/msf2-soc.c +++ b/hw/arm/msf2-soc.c @@ -117,6 +117,7 @@ static void m2sxxx_soc_realize(DeviceState *dev_soc, Error **errp) armv7m = DEVICE(&s->armv7m); qdev_prop_set_uint32(armv7m, "num-irq", 81); qdev_prop_set_string(armv7m, "cpu-type", s->cpu_type); + qdev_prop_set_bit(armv7m, "enable-bitband", true); object_property_set_link(OBJECT(&s->armv7m), OBJECT(get_system_memory()), "memory", &error_abort); object_property_set_bool(OBJECT(&s->armv7m), true, "realized", &err); diff --git a/hw/arm/realview.c b/hw/arm/realview.c index cd585d9469..ab8c14fde3 100644 --- a/hw/arm/realview.c +++ b/hw/arm/realview.c @@ -201,7 +201,13 @@ static void realview_init(MachineState *machine, pl011_create(0x1000c000, pic[15], serial_hd(3)); /* DMA controller is optional, apparently. */ - sysbus_create_simple("pl081", 0x10030000, pic[24]); + dev = qdev_create(NULL, "pl081"); + object_property_set_link(OBJECT(dev), OBJECT(sysmem), "downstream", + &error_fatal); + qdev_init_nofail(dev); + busdev = SYS_BUS_DEVICE(dev); + sysbus_mmio_map(busdev, 0, 0x10030000); + sysbus_connect_irq(busdev, 0, pic[24]); sysbus_create_simple("sp804", 0x10011000, pic[4]); sysbus_create_simple("sp804", 0x10012000, pic[5]); diff --git a/hw/arm/stellaris.c b/hw/arm/stellaris.c index dc521b4a5a..6c69ce79b2 100644 --- a/hw/arm/stellaris.c +++ b/hw/arm/stellaris.c @@ -1304,6 +1304,7 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) nvic = qdev_create(NULL, TYPE_ARMV7M); qdev_prop_set_uint32(nvic, "num-irq", NUM_IRQ_LINES); qdev_prop_set_string(nvic, "cpu-type", ms->cpu_type); + qdev_prop_set_bit(nvic, "enable-bitband", true); object_property_set_link(OBJECT(nvic), OBJECT(get_system_memory()), "memory", &error_abort); /* This will exit with an error if the user passed us a bad cpu_type */ diff --git a/hw/arm/stm32f205_soc.c b/hw/arm/stm32f205_soc.c index c486d06a8b..980e5af13c 100644 --- a/hw/arm/stm32f205_soc.c +++ b/hw/arm/stm32f205_soc.c @@ -109,6 +109,7 @@ static void stm32f205_soc_realize(DeviceState *dev_soc, Error **errp) armv7m = DEVICE(&s->armv7m); qdev_prop_set_uint32(armv7m, "num-irq", 96); qdev_prop_set_string(armv7m, "cpu-type", s->cpu_type); + qdev_prop_set_bit(armv7m, "enable-bitband", true); object_property_set_link(OBJECT(&s->armv7m), OBJECT(get_system_memory()), "memory", &error_abort); object_property_set_bool(OBJECT(&s->armv7m), true, "realized", &err); diff --git a/hw/arm/versatilepb.c b/hw/arm/versatilepb.c index a5a06b6d40..8b74857059 100644 --- a/hw/arm/versatilepb.c +++ b/hw/arm/versatilepb.c @@ -287,7 +287,14 @@ static void versatile_init(MachineState *machine, int board_id) pl011_create(0x101f3000, pic[14], serial_hd(2)); pl011_create(0x10009000, sic[6], serial_hd(3)); - sysbus_create_simple("pl080", 0x10130000, pic[17]); + dev = qdev_create(NULL, "pl080"); + object_property_set_link(OBJECT(dev), OBJECT(sysmem), "downstream", + &error_fatal); + qdev_init_nofail(dev); + busdev = SYS_BUS_DEVICE(dev); + sysbus_mmio_map(busdev, 0, 0x10130000); + sysbus_connect_irq(busdev, 0, pic[17]); + sysbus_create_simple("sp804", 0x101e2000, pic[4]); sysbus_create_simple("sp804", 0x101e3000, pic[5]); diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c index 6ea47e2588..ce31abd62c 100644 --- a/hw/arm/virt-acpi-build.c +++ b/hw/arm/virt-acpi-build.c @@ -659,6 +659,8 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) gicc->length = sizeof(*gicc); if (vms->gic_version == 2) { gicc->base_address = cpu_to_le64(memmap[VIRT_GIC_CPU].base); + gicc->gich_base_address = cpu_to_le64(memmap[VIRT_GIC_HYP].base); + gicc->gicv_base_address = cpu_to_le64(memmap[VIRT_GIC_VCPU].base); } gicc->cpu_interface_number = cpu_to_le32(i); gicc->arm_mpidr = cpu_to_le64(armcpu->mp_affinity); @@ -668,8 +670,8 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) if (arm_feature(&armcpu->env, ARM_FEATURE_PMU)) { gicc->performance_interrupt = cpu_to_le32(PPI(VIRTUAL_PMU_IRQ)); } - if (vms->virt && vms->gic_version == 3) { - gicc->vgic_interrupt = cpu_to_le32(PPI(ARCH_GICV3_MAINT_IRQ)); + if (vms->virt) { + gicc->vgic_interrupt = cpu_to_le32(PPI(ARCH_GIC_MAINT_IRQ)); } } diff --git a/hw/arm/virt.c b/hw/arm/virt.c index 281ddcdf6e..0b57f87abc 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -131,6 +131,8 @@ static const MemMapEntry a15memmap[] = { [VIRT_GIC_DIST] = { 0x08000000, 0x00010000 }, [VIRT_GIC_CPU] = { 0x08010000, 0x00010000 }, [VIRT_GIC_V2M] = { 0x08020000, 0x00001000 }, + [VIRT_GIC_HYP] = { 0x08030000, 0x00010000 }, + [VIRT_GIC_VCPU] = { 0x08040000, 0x00010000 }, /* The space in between here is reserved for GICv3 CPU/vCPU/HYP */ [VIRT_GIC_ITS] = { 0x08080000, 0x00020000 }, /* This redistributor space allows up to 2*64kB*123 CPUs */ @@ -440,18 +442,33 @@ static void fdt_add_gic_node(VirtMachineState *vms) if (vms->virt) { qemu_fdt_setprop_cells(vms->fdt, nodename, "interrupts", - GIC_FDT_IRQ_TYPE_PPI, ARCH_GICV3_MAINT_IRQ, + GIC_FDT_IRQ_TYPE_PPI, ARCH_GIC_MAINT_IRQ, GIC_FDT_IRQ_FLAGS_LEVEL_HI); } } else { /* 'cortex-a15-gic' means 'GIC v2' */ qemu_fdt_setprop_string(vms->fdt, nodename, "compatible", "arm,cortex-a15-gic"); - qemu_fdt_setprop_sized_cells(vms->fdt, nodename, "reg", - 2, vms->memmap[VIRT_GIC_DIST].base, - 2, vms->memmap[VIRT_GIC_DIST].size, - 2, vms->memmap[VIRT_GIC_CPU].base, - 2, vms->memmap[VIRT_GIC_CPU].size); + if (!vms->virt) { + qemu_fdt_setprop_sized_cells(vms->fdt, nodename, "reg", + 2, vms->memmap[VIRT_GIC_DIST].base, + 2, vms->memmap[VIRT_GIC_DIST].size, + 2, vms->memmap[VIRT_GIC_CPU].base, + 2, vms->memmap[VIRT_GIC_CPU].size); + } else { + qemu_fdt_setprop_sized_cells(vms->fdt, nodename, "reg", + 2, vms->memmap[VIRT_GIC_DIST].base, + 2, vms->memmap[VIRT_GIC_DIST].size, + 2, vms->memmap[VIRT_GIC_CPU].base, + 2, vms->memmap[VIRT_GIC_CPU].size, + 2, vms->memmap[VIRT_GIC_HYP].base, + 2, vms->memmap[VIRT_GIC_HYP].size, + 2, vms->memmap[VIRT_GIC_VCPU].base, + 2, vms->memmap[VIRT_GIC_VCPU].size); + qemu_fdt_setprop_cells(vms->fdt, nodename, "interrupts", + GIC_FDT_IRQ_TYPE_PPI, ARCH_GIC_MAINT_IRQ, + GIC_FDT_IRQ_FLAGS_LEVEL_HI); + } } qemu_fdt_setprop_cell(vms->fdt, nodename, "phandle", vms->gic_phandle); @@ -573,6 +590,11 @@ static void create_gic(VirtMachineState *vms, qemu_irq *pic) qdev_prop_set_uint32(gicdev, "redist-region-count[1]", MIN(smp_cpus - redist0_count, redist1_capacity)); } + } else { + if (!kvm_irqchip_in_kernel()) { + qdev_prop_set_bit(gicdev, "has-virtualization-extensions", + vms->virt); + } } qdev_init_nofail(gicdev); gicbusdev = SYS_BUS_DEVICE(gicdev); @@ -584,6 +606,10 @@ static void create_gic(VirtMachineState *vms, qemu_irq *pic) } } else { sysbus_mmio_map(gicbusdev, 1, vms->memmap[VIRT_GIC_CPU].base); + if (vms->virt) { + sysbus_mmio_map(gicbusdev, 2, vms->memmap[VIRT_GIC_HYP].base); + sysbus_mmio_map(gicbusdev, 3, vms->memmap[VIRT_GIC_VCPU].base); + } } /* Wire the outputs from each CPU's generic timer and the GICv3 @@ -610,9 +636,17 @@ static void create_gic(VirtMachineState *vms, qemu_irq *pic) ppibase + timer_irq[irq])); } - qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt", 0, - qdev_get_gpio_in(gicdev, ppibase - + ARCH_GICV3_MAINT_IRQ)); + if (type == 3) { + qemu_irq irq = qdev_get_gpio_in(gicdev, + ppibase + ARCH_GIC_MAINT_IRQ); + qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt", + 0, irq); + } else if (vms->virt) { + qemu_irq irq = qdev_get_gpio_in(gicdev, + ppibase + ARCH_GIC_MAINT_IRQ); + sysbus_connect_irq(gicbusdev, i + 4 * smp_cpus, irq); + } + qdev_connect_gpio_out_named(cpudev, "pmu-interrupt", 0, qdev_get_gpio_in(gicdev, ppibase + VIRTUAL_PMU_IRQ)); @@ -1757,10 +1791,7 @@ static void machvirt_machine_init(void) } type_init(machvirt_machine_init); -#define VIRT_COMPAT_2_12 \ - HW_COMPAT_2_12 - -static void virt_3_0_instance_init(Object *obj) +static void virt_3_1_instance_init(Object *obj) { VirtMachineState *vms = VIRT_MACHINE(obj); VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); @@ -1830,10 +1861,24 @@ static void virt_3_0_instance_init(Object *obj) vms->irqmap = a15irqmap; } +static void virt_machine_3_1_options(MachineClass *mc) +{ +} +DEFINE_VIRT_MACHINE_AS_LATEST(3, 1) + +static void virt_3_0_instance_init(Object *obj) +{ + virt_3_1_instance_init(obj); +} + static void virt_machine_3_0_options(MachineClass *mc) { + virt_machine_3_1_options(mc); } -DEFINE_VIRT_MACHINE_AS_LATEST(3, 0) +DEFINE_VIRT_MACHINE(3, 0) + +#define VIRT_COMPAT_2_12 \ + HW_COMPAT_2_12 static void virt_2_12_instance_init(Object *obj) { diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c index 8de4868eb9..c195040350 100644 --- a/hw/arm/xlnx-zynqmp.c +++ b/hw/arm/xlnx-zynqmp.c @@ -29,12 +29,17 @@ #define ARM_PHYS_TIMER_PPI 30 #define ARM_VIRT_TIMER_PPI 27 +#define ARM_HYP_TIMER_PPI 26 +#define ARM_SEC_TIMER_PPI 29 +#define GIC_MAINTENANCE_PPI 25 #define GEM_REVISION 0x40070106 #define GIC_BASE_ADDR 0xf9000000 #define GIC_DIST_ADDR 0xf9010000 #define GIC_CPU_ADDR 0xf9020000 +#define GIC_VIFACE_ADDR 0xf9040000 +#define GIC_VCPU_ADDR 0xf9060000 #define SATA_INTR 133 #define SATA_ADDR 0xFD0C0000 @@ -111,11 +116,54 @@ static const int adma_ch_intr[XLNX_ZYNQMP_NUM_ADMA_CH] = { typedef struct XlnxZynqMPGICRegion { int region_index; uint32_t address; + uint32_t offset; + bool virt; } XlnxZynqMPGICRegion; static const XlnxZynqMPGICRegion xlnx_zynqmp_gic_regions[] = { - { .region_index = 0, .address = GIC_DIST_ADDR, }, - { .region_index = 1, .address = GIC_CPU_ADDR, }, + /* Distributor */ + { + .region_index = 0, + .address = GIC_DIST_ADDR, + .offset = 0, + .virt = false + }, + + /* CPU interface */ + { + .region_index = 1, + .address = GIC_CPU_ADDR, + .offset = 0, + .virt = false + }, + { + .region_index = 1, + .address = GIC_CPU_ADDR + 0x10000, + .offset = 0x1000, + .virt = false + }, + + /* Virtual interface */ + { + .region_index = 2, + .address = GIC_VIFACE_ADDR, + .offset = 0, + .virt = true + }, + + /* Virtual CPU interface */ + { + .region_index = 3, + .address = GIC_VCPU_ADDR, + .offset = 0, + .virt = true + }, + { + .region_index = 3, + .address = GIC_VCPU_ADDR + 0x10000, + .offset = 0x1000, + .virt = true + }, }; static inline int arm_gic_ppi_index(int cpu_nr, int ppi_index) @@ -281,6 +329,9 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp) qdev_prop_set_uint32(DEVICE(&s->gic), "num-irq", GIC_NUM_SPI_INTR + 32); qdev_prop_set_uint32(DEVICE(&s->gic), "revision", 2); qdev_prop_set_uint32(DEVICE(&s->gic), "num-cpu", num_apus); + qdev_prop_set_bit(DEVICE(&s->gic), "has-security-extensions", s->secure); + qdev_prop_set_bit(DEVICE(&s->gic), + "has-virtualization-extensions", s->virt); /* Realize APUs before realizing the GIC. KVM requires this. */ for (i = 0; i < num_apus; i++) { @@ -325,19 +376,23 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp) for (i = 0; i < XLNX_ZYNQMP_GIC_REGIONS; i++) { SysBusDevice *gic = SYS_BUS_DEVICE(&s->gic); const XlnxZynqMPGICRegion *r = &xlnx_zynqmp_gic_regions[i]; - MemoryRegion *mr = sysbus_mmio_get_region(gic, r->region_index); + MemoryRegion *mr; uint32_t addr = r->address; int j; - sysbus_mmio_map(gic, r->region_index, addr); + if (r->virt && !s->virt) { + continue; + } + mr = sysbus_mmio_get_region(gic, r->region_index); for (j = 0; j < XLNX_ZYNQMP_GIC_ALIASES; j++) { MemoryRegion *alias = &s->gic_mr[i][j]; - addr += XLNX_ZYNQMP_GIC_REGION_SIZE; memory_region_init_alias(alias, OBJECT(s), "zynqmp-gic-alias", mr, - 0, XLNX_ZYNQMP_GIC_REGION_SIZE); + r->offset, XLNX_ZYNQMP_GIC_REGION_SIZE); memory_region_add_subregion(system_memory, addr, alias); + + addr += XLNX_ZYNQMP_GIC_REGION_SIZE; } } @@ -347,12 +402,33 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp) sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i, qdev_get_gpio_in(DEVICE(&s->apu_cpu[i]), ARM_CPU_IRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + num_apus, + qdev_get_gpio_in(DEVICE(&s->apu_cpu[i]), + ARM_CPU_FIQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + num_apus * 2, + qdev_get_gpio_in(DEVICE(&s->apu_cpu[i]), + ARM_CPU_VIRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + num_apus * 3, + qdev_get_gpio_in(DEVICE(&s->apu_cpu[i]), + ARM_CPU_VFIQ)); irq = qdev_get_gpio_in(DEVICE(&s->gic), arm_gic_ppi_index(i, ARM_PHYS_TIMER_PPI)); - qdev_connect_gpio_out(DEVICE(&s->apu_cpu[i]), 0, irq); + qdev_connect_gpio_out(DEVICE(&s->apu_cpu[i]), GTIMER_PHYS, irq); irq = qdev_get_gpio_in(DEVICE(&s->gic), arm_gic_ppi_index(i, ARM_VIRT_TIMER_PPI)); - qdev_connect_gpio_out(DEVICE(&s->apu_cpu[i]), 1, irq); + qdev_connect_gpio_out(DEVICE(&s->apu_cpu[i]), GTIMER_VIRT, irq); + irq = qdev_get_gpio_in(DEVICE(&s->gic), + arm_gic_ppi_index(i, ARM_HYP_TIMER_PPI)); + qdev_connect_gpio_out(DEVICE(&s->apu_cpu[i]), GTIMER_HYP, irq); + irq = qdev_get_gpio_in(DEVICE(&s->gic), + arm_gic_ppi_index(i, ARM_SEC_TIMER_PPI)); + qdev_connect_gpio_out(DEVICE(&s->apu_cpu[i]), GTIMER_SEC, irq); + + if (s->virt) { + irq = qdev_get_gpio_in(DEVICE(&s->gic), + arm_gic_ppi_index(i, GIC_MAINTENANCE_PPI)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + num_apus * 4, irq); + } } if (s->has_rpu) { diff --git a/hw/block/block.c b/hw/block/block.c index b91e2b6d7e..cf0eb826f1 100644 --- a/hw/block/block.c +++ b/hw/block/block.c @@ -15,19 +15,6 @@ #include "qapi/qapi-types-block.h" #include "qemu/error-report.h" -void blkconf_serial(BlockConf *conf, char **serial) -{ - DriveInfo *dinfo; - - if (!*serial) { - /* try to fall back to value set with legacy -drive serial=... */ - dinfo = blk_legacy_dinfo(conf->blk); - if (dinfo) { - *serial = g_strdup(dinfo->serial); - } - } -} - void blkconf_blocksizes(BlockConf *conf) { BlockBackend *blk = conf->blk; @@ -108,20 +95,6 @@ bool blkconf_geometry(BlockConf *conf, int *ptrans, unsigned cyls_max, unsigned heads_max, unsigned secs_max, Error **errp) { - DriveInfo *dinfo; - - if (!conf->cyls && !conf->heads && !conf->secs) { - /* try to fall back to value set with legacy -drive cyls=... */ - dinfo = blk_legacy_dinfo(conf->blk); - if (dinfo) { - conf->cyls = dinfo->cyls; - conf->heads = dinfo->heads; - conf->secs = dinfo->secs; - if (ptrans) { - *ptrans = dinfo->trans; - } - } - } if (!conf->cyls && !conf->heads && !conf->secs) { hd_geometry_guess(conf->blk, &conf->cyls, &conf->heads, &conf->secs, diff --git a/hw/block/nvme.c b/hw/block/nvme.c index 5e508ab1b3..fc7dacb816 100644 --- a/hw/block/nvme.c +++ b/hw/block/nvme.c @@ -1217,7 +1217,6 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp) return; } - blkconf_serial(&n->conf, &n->serial); if (!n->serial) { error_setg(errp, "serial property not set"); return; diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c index 50b5c869e3..225fe44b7a 100644 --- a/hw/block/virtio-blk.c +++ b/hw/block/virtio-blk.c @@ -935,7 +935,6 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp) return; } - blkconf_serial(&conf->conf, &conf->serial); if (!blkconf_apply_backend_options(&conf->conf, blk_is_read_only(conf->conf.blk), true, errp)) { diff --git a/hw/char/imx_serial.c b/hw/char/imx_serial.c index 0747db9f2b..1e363190e3 100644 --- a/hw/char/imx_serial.c +++ b/hw/char/imx_serial.c @@ -74,8 +74,9 @@ static void imx_update(IMXSerialState *s) mask = (s->ucr1 & UCR1_TXMPTYEN) ? USR2_TXFE : 0; /* * TCEN and TXDC are both bit 3 + * RDR and DREN are both bit 0 */ - mask |= s->ucr4 & UCR4_TCEN; + mask |= s->ucr4 & (UCR4_TCEN | UCR4_DREN); usr2 = s->usr2 & mask; diff --git a/hw/core/generic-loader.c b/hw/core/generic-loader.c index cb0e68486d..fde32cbda1 100644 --- a/hw/core/generic-loader.c +++ b/hw/core/generic-loader.c @@ -147,6 +147,10 @@ static void generic_loader_realize(DeviceState *dev, Error **errp) size = load_uimage_as(s->file, &entry, NULL, NULL, NULL, NULL, as); } + + if (size < 0) { + size = load_targphys_hex_as(s->file, &entry, as); + } } if (size < 0 || s->force_raw) { diff --git a/hw/core/loader.c b/hw/core/loader.c index bbb6e65bb5..390987a05c 100644 --- a/hw/core/loader.c +++ b/hw/core/loader.c @@ -840,6 +840,8 @@ struct Rom { char *fw_dir; char *fw_file; + bool committed; + hwaddr addr; QTAILQ_ENTRY(Rom) next; }; @@ -847,6 +849,17 @@ struct Rom { static FWCfgState *fw_cfg; static QTAILQ_HEAD(, Rom) roms = QTAILQ_HEAD_INITIALIZER(roms); +/* rom->data must be heap-allocated (do not use with rom_add_elf_program()) */ +static void rom_free(Rom *rom) +{ + g_free(rom->data); + g_free(rom->path); + g_free(rom->name); + g_free(rom->fw_dir); + g_free(rom->fw_file); + g_free(rom); +} + static inline bool rom_order_compare(Rom *rom, Rom *item) { return ((uintptr_t)(void *)rom->as > (uintptr_t)(void *)item->as) || @@ -866,6 +879,8 @@ static void rom_insert(Rom *rom) rom->as = &address_space_memory; } + rom->committed = false; + /* List is ordered by load address in the same address space */ QTAILQ_FOREACH(item, &roms, next) { if (rom_order_compare(rom, item)) { @@ -995,15 +1010,7 @@ err: if (fd != -1) close(fd); - g_free(rom->data); - g_free(rom->path); - g_free(rom->name); - if (fw_dir) { - g_free(rom->fw_dir); - g_free(rom->fw_file); - } - g_free(rom); - + rom_free(rom); return -1; } @@ -1165,6 +1172,34 @@ void rom_reset_order_override(void) fw_cfg_reset_order_override(fw_cfg); } +void rom_transaction_begin(void) +{ + Rom *rom; + + /* Ignore ROMs added without the transaction API */ + QTAILQ_FOREACH(rom, &roms, next) { + rom->committed = true; + } +} + +void rom_transaction_end(bool commit) +{ + Rom *rom; + Rom *tmp; + + QTAILQ_FOREACH_SAFE(rom, &roms, next, tmp) { + if (rom->committed) { + continue; + } + if (commit) { + rom->committed = true; + } else { + QTAILQ_REMOVE(&roms, rom, next); + rom_free(rom); + } + } +} + static Rom *find_rom(hwaddr addr, size_t size) { Rom *rom; @@ -1286,3 +1321,252 @@ void hmp_info_roms(Monitor *mon, const QDict *qdict) } } } + +typedef enum HexRecord HexRecord; +enum HexRecord { + DATA_RECORD = 0, + EOF_RECORD, + EXT_SEG_ADDR_RECORD, + START_SEG_ADDR_RECORD, + EXT_LINEAR_ADDR_RECORD, + START_LINEAR_ADDR_RECORD, +}; + +/* Each record contains a 16-bit address which is combined with the upper 16 + * bits of the implicit "next address" to form a 32-bit address. + */ +#define NEXT_ADDR_MASK 0xffff0000 + +#define DATA_FIELD_MAX_LEN 0xff +#define LEN_EXCEPT_DATA 0x5 +/* 0x5 = sizeof(byte_count) + sizeof(address) + sizeof(record_type) + + * sizeof(checksum) */ +typedef struct { + uint8_t byte_count; + uint16_t address; + uint8_t record_type; + uint8_t data[DATA_FIELD_MAX_LEN]; + uint8_t checksum; +} HexLine; + +/* return 0 or -1 if error */ +static bool parse_record(HexLine *line, uint8_t *our_checksum, const uint8_t c, + uint32_t *index, const bool in_process) +{ + /* +-------+---------------+-------+---------------------+--------+ + * | byte | |record | | | + * | count | address | type | data |checksum| + * +-------+---------------+-------+---------------------+--------+ + * ^ ^ ^ ^ ^ ^ + * |1 byte | 2 bytes |1 byte | 0-255 bytes | 1 byte | + */ + uint8_t value = 0; + uint32_t idx = *index; + /* ignore space */ + if (g_ascii_isspace(c)) { + return true; + } + if (!g_ascii_isxdigit(c) || !in_process) { + return false; + } + value = g_ascii_xdigit_value(c); + value = (idx & 0x1) ? (value & 0xf) : (value << 4); + if (idx < 2) { + line->byte_count |= value; + } else if (2 <= idx && idx < 6) { + line->address <<= 4; + line->address += g_ascii_xdigit_value(c); + } else if (6 <= idx && idx < 8) { + line->record_type |= value; + } else if (8 <= idx && idx < 8 + 2 * line->byte_count) { + line->data[(idx - 8) >> 1] |= value; + } else if (8 + 2 * line->byte_count <= idx && + idx < 10 + 2 * line->byte_count) { + line->checksum |= value; + } else { + return false; + } + *our_checksum += value; + ++(*index); + return true; +} + +typedef struct { + const char *filename; + HexLine line; + uint8_t *bin_buf; + hwaddr *start_addr; + int total_size; + uint32_t next_address_to_write; + uint32_t current_address; + uint32_t current_rom_index; + uint32_t rom_start_address; + AddressSpace *as; +} HexParser; + +/* return size or -1 if error */ +static int handle_record_type(HexParser *parser) +{ + HexLine *line = &(parser->line); + switch (line->record_type) { + case DATA_RECORD: + parser->current_address = + (parser->next_address_to_write & NEXT_ADDR_MASK) | line->address; + /* verify this is a contiguous block of memory */ + if (parser->current_address != parser->next_address_to_write) { + if (parser->current_rom_index != 0) { + rom_add_blob_fixed_as(parser->filename, parser->bin_buf, + parser->current_rom_index, + parser->rom_start_address, parser->as); + } + parser->rom_start_address = parser->current_address; + parser->current_rom_index = 0; + } + + /* copy from line buffer to output bin_buf */ + memcpy(parser->bin_buf + parser->current_rom_index, line->data, + line->byte_count); + parser->current_rom_index += line->byte_count; + parser->total_size += line->byte_count; + /* save next address to write */ + parser->next_address_to_write = + parser->current_address + line->byte_count; + break; + + case EOF_RECORD: + if (parser->current_rom_index != 0) { + rom_add_blob_fixed_as(parser->filename, parser->bin_buf, + parser->current_rom_index, + parser->rom_start_address, parser->as); + } + return parser->total_size; + case EXT_SEG_ADDR_RECORD: + case EXT_LINEAR_ADDR_RECORD: + if (line->byte_count != 2 && line->address != 0) { + return -1; + } + + if (parser->current_rom_index != 0) { + rom_add_blob_fixed_as(parser->filename, parser->bin_buf, + parser->current_rom_index, + parser->rom_start_address, parser->as); + } + + /* save next address to write, + * in case of non-contiguous block of memory */ + parser->next_address_to_write = (line->data[0] << 12) | + (line->data[1] << 4); + if (line->record_type == EXT_LINEAR_ADDR_RECORD) { + parser->next_address_to_write <<= 12; + } + + parser->rom_start_address = parser->next_address_to_write; + parser->current_rom_index = 0; + break; + + case START_SEG_ADDR_RECORD: + if (line->byte_count != 4 && line->address != 0) { + return -1; + } + + /* x86 16-bit CS:IP segmented addressing */ + *(parser->start_addr) = (((line->data[0] << 8) | line->data[1]) << 4) + + ((line->data[2] << 8) | line->data[3]); + break; + + case START_LINEAR_ADDR_RECORD: + if (line->byte_count != 4 && line->address != 0) { + return -1; + } + + *(parser->start_addr) = ldl_be_p(line->data); + break; + + default: + return -1; + } + + return parser->total_size; +} + +/* return size or -1 if error */ +static int parse_hex_blob(const char *filename, hwaddr *addr, uint8_t *hex_blob, + size_t hex_blob_size, AddressSpace *as) +{ + bool in_process = false; /* avoid re-enter and + * check whether record begin with ':' */ + uint8_t *end = hex_blob + hex_blob_size; + uint8_t our_checksum = 0; + uint32_t record_index = 0; + HexParser parser = { + .filename = filename, + .bin_buf = g_malloc(hex_blob_size), + .start_addr = addr, + .as = as, + }; + + rom_transaction_begin(); + + for (; hex_blob < end; ++hex_blob) { + switch (*hex_blob) { + case '\r': + case '\n': + if (!in_process) { + break; + } + + in_process = false; + if ((LEN_EXCEPT_DATA + parser.line.byte_count) * 2 != + record_index || + our_checksum != 0) { + parser.total_size = -1; + goto out; + } + + if (handle_record_type(&parser) == -1) { + parser.total_size = -1; + goto out; + } + break; + + /* start of a new record. */ + case ':': + memset(&parser.line, 0, sizeof(HexLine)); + in_process = true; + record_index = 0; + break; + + /* decoding lines */ + default: + if (!parse_record(&parser.line, &our_checksum, *hex_blob, + &record_index, in_process)) { + parser.total_size = -1; + goto out; + } + break; + } + } + +out: + g_free(parser.bin_buf); + rom_transaction_end(parser.total_size != -1); + return parser.total_size; +} + +/* return size or -1 if error */ +int load_targphys_hex_as(const char *filename, hwaddr *entry, AddressSpace *as) +{ + gsize hex_blob_size; + gchar *hex_blob; + int total_size = 0; + + if (!g_file_get_contents(filename, &hex_blob, &hex_blob_size, NULL)) { + return -1; + } + + total_size = parse_hex_blob(filename, entry, (uint8_t *)hex_blob, + hex_blob_size, as); + + g_free(hex_blob); + return total_size; +} diff --git a/hw/core/machine.c b/hw/core/machine.c index a9aeb22f03..6b68e1218f 100644 --- a/hw/core/machine.c +++ b/hw/core/machine.c @@ -793,8 +793,9 @@ void machine_run_board_init(MachineState *machine) MachineClass *machine_class = MACHINE_GET_CLASS(machine); numa_complete_configuration(machine); - if (nb_numa_nodes) + if (nb_numa_nodes) { machine_numa_finish_cpu_init(machine); + } /* If the machine supports the valid_cpu_types check and the user * specified a CPU with -cpu check here that the user CPU is supported. diff --git a/hw/core/sysbus.c b/hw/core/sysbus.c index 3c8e53b188..7ac36ad3e7 100644 --- a/hw/core/sysbus.c +++ b/hw/core/sysbus.c @@ -293,16 +293,8 @@ static char *sysbus_get_fw_dev_path(DeviceState *dev) { SysBusDevice *s = SYS_BUS_DEVICE(dev); SysBusDeviceClass *sbc = SYS_BUS_DEVICE_GET_CLASS(s); - /* for the explicit unit address fallback case: */ char *addr, *fw_dev_path; - if (s->num_mmio) { - return g_strdup_printf("%s@" TARGET_FMT_plx, qdev_fw_name(dev), - s->mmio[0].addr); - } - if (s->num_pio) { - return g_strdup_printf("%s@i%04x", qdev_fw_name(dev), s->pio[0]); - } if (sbc->explicit_ofw_unit_address) { addr = sbc->explicit_ofw_unit_address(s); if (addr) { @@ -311,6 +303,13 @@ static char *sysbus_get_fw_dev_path(DeviceState *dev) return fw_dev_path; } } + if (s->num_mmio) { + return g_strdup_printf("%s@" TARGET_FMT_plx, qdev_fw_name(dev), + s->mmio[0].addr); + } + if (s->num_pio) { + return g_strdup_printf("%s@i%04x", qdev_fw_name(dev), s->pio[0]); + } return g_strdup(qdev_fw_name(dev)); } diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index ec366f4c35..3ddd29c0de 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -421,6 +421,11 @@ static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id) scanout->height ?: 480, "Guest disabled display."); } + + if (g->disable_scanout) { + g->disable_scanout(g, scanout_id); + } + dpy_gfx_replace_surface(scanout->con, ds); scanout->resource_id = 0; scanout->ds = NULL; diff --git a/hw/display/virtio-vga.c b/hw/display/virtio-vga.c index 8d3d9e14a7..701d980872 100644 --- a/hw/display/virtio-vga.c +++ b/hw/display/virtio-vga.c @@ -75,6 +75,16 @@ static void virtio_vga_gl_block(void *opaque, bool block) } } +static void virtio_vga_disable_scanout(VirtIOGPU *g, int scanout_id) +{ + VirtIOVGA *vvga = container_of(g, VirtIOVGA, vdev); + + if (scanout_id == 0) { + /* reset surface if needed */ + vvga->vga.graphic_mode = -1; + } +} + static const GraphicHwOps virtio_vga_ops = { .invalidate = virtio_vga_invalidate_display, .gfx_update = virtio_vga_update_display, @@ -156,6 +166,7 @@ static void virtio_vga_realize(VirtIOPCIProxy *vpci_dev, Error **errp) vvga->vga_mrs, true); vga->con = g->scanout[0].con; + g->disable_scanout = virtio_vga_disable_scanout; graphic_console_set_hwops(vga->con, &virtio_vga_ops, vvga); for (i = 0; i < g->conf.max_outputs; i++) { diff --git a/hw/dma/pl080.c b/hw/dma/pl080.c index 7724c93b8f..ef15d3e628 100644 --- a/hw/dma/pl080.c +++ b/hw/dma/pl080.c @@ -11,8 +11,9 @@ #include "hw/sysbus.h" #include "exec/address-spaces.h" #include "qemu/log.h" +#include "hw/dma/pl080.h" +#include "qapi/error.h" -#define PL080_MAX_CHANNELS 8 #define PL080_CONF_E 0x1 #define PL080_CONF_M1 0x2 #define PL080_CONF_M2 0x4 @@ -30,36 +31,6 @@ #define PL080_CCTRL_D 0x02000000 #define PL080_CCTRL_S 0x01000000 -typedef struct { - uint32_t src; - uint32_t dest; - uint32_t lli; - uint32_t ctrl; - uint32_t conf; -} pl080_channel; - -#define TYPE_PL080 "pl080" -#define PL080(obj) OBJECT_CHECK(PL080State, (obj), TYPE_PL080) - -typedef struct PL080State { - SysBusDevice parent_obj; - - MemoryRegion iomem; - uint8_t tc_int; - uint8_t tc_mask; - uint8_t err_int; - uint8_t err_mask; - uint32_t conf; - uint32_t sync; - uint32_t req_single; - uint32_t req_burst; - pl080_channel chan[PL080_MAX_CHANNELS]; - int nchannels; - /* Flag to avoid recursive DMA invocations. */ - int running; - qemu_irq irq; -} PL080State; - static const VMStateDescription vmstate_pl080_channel = { .name = "pl080_channel", .version_id = 1, @@ -105,11 +76,12 @@ static const unsigned char pl081_id[] = static void pl080_update(PL080State *s) { - if ((s->tc_int & s->tc_mask) - || (s->err_int & s->err_mask)) - qemu_irq_raise(s->irq); - else - qemu_irq_lower(s->irq); + bool tclevel = (s->tc_int & s->tc_mask); + bool errlevel = (s->err_int & s->err_mask); + + qemu_set_irq(s->interr, errlevel); + qemu_set_irq(s->inttc, tclevel); + qemu_set_irq(s->irq, errlevel || tclevel); } static void pl080_run(PL080State *s) @@ -138,7 +110,6 @@ static void pl080_run(PL080State *s) if ((s->conf & PL080_CONF_E) == 0) return; -hw_error("DMA active\n"); /* If we are already in the middle of a DMA operation then indicate that there may be new DMA requests and return immediately. */ if (s->running) { @@ -190,14 +161,16 @@ again: swidth = 1 << ((ch->ctrl >> 18) & 7); dwidth = 1 << ((ch->ctrl >> 21) & 7); for (n = 0; n < dwidth; n+= swidth) { - cpu_physical_memory_read(ch->src, buff + n, swidth); + address_space_read(&s->downstream_as, ch->src, + MEMTXATTRS_UNSPECIFIED, buff + n, swidth); if (ch->ctrl & PL080_CCTRL_SI) ch->src += swidth; } xsize = (dwidth < swidth) ? swidth : dwidth; /* ??? This may pad the value incorrectly for dwidth < 32. */ for (n = 0; n < xsize; n += dwidth) { - cpu_physical_memory_write(ch->dest + n, buff + n, dwidth); + address_space_write(&s->downstream_as, ch->dest + n, + MEMTXATTRS_UNSPECIFIED, buff + n, dwidth); if (ch->ctrl & PL080_CCTRL_DI) ch->dest += swidth; } @@ -207,19 +180,19 @@ again: if (size == 0) { /* Transfer complete. */ if (ch->lli) { - ch->src = address_space_ldl_le(&address_space_memory, + ch->src = address_space_ldl_le(&s->downstream_as, ch->lli, MEMTXATTRS_UNSPECIFIED, NULL); - ch->dest = address_space_ldl_le(&address_space_memory, + ch->dest = address_space_ldl_le(&s->downstream_as, ch->lli + 4, MEMTXATTRS_UNSPECIFIED, NULL); - ch->ctrl = address_space_ldl_le(&address_space_memory, + ch->ctrl = address_space_ldl_le(&s->downstream_as, ch->lli + 12, MEMTXATTRS_UNSPECIFIED, NULL); - ch->lli = address_space_ldl_le(&address_space_memory, + ch->lli = address_space_ldl_le(&s->downstream_as, ch->lli + 8, MEMTXATTRS_UNSPECIFIED, NULL); @@ -255,7 +228,7 @@ static uint64_t pl080_read(void *opaque, hwaddr offset, i = (offset & 0xe0) >> 5; if (i >= s->nchannels) goto bad_offset; - switch (offset >> 2) { + switch ((offset >> 2) & 7) { case 0: /* SrcAddr */ return s->chan[i].src; case 1: /* DestAddr */ @@ -316,7 +289,7 @@ static void pl080_write(void *opaque, hwaddr offset, i = (offset & 0xe0) >> 5; if (i >= s->nchannels) goto bad_offset; - switch (offset >> 2) { + switch ((offset >> 2) & 7) { case 0: /* SrcAddr */ s->chan[i].src = value; break; @@ -334,6 +307,7 @@ static void pl080_write(void *opaque, hwaddr offset, pl080_run(s); break; } + return; } switch (offset >> 2) { case 2: /* IntTCClear */ @@ -374,6 +348,30 @@ static const MemoryRegionOps pl080_ops = { .endianness = DEVICE_NATIVE_ENDIAN, }; +static void pl080_reset(DeviceState *dev) +{ + PL080State *s = PL080(dev); + int i; + + s->tc_int = 0; + s->tc_mask = 0; + s->err_int = 0; + s->err_mask = 0; + s->conf = 0; + s->sync = 0; + s->req_single = 0; + s->req_burst = 0; + s->running = 0; + + for (i = 0; i < s->nchannels; i++) { + s->chan[i].src = 0; + s->chan[i].dest = 0; + s->chan[i].lli = 0; + s->chan[i].ctrl = 0; + s->chan[i].conf = 0; + } +} + static void pl080_init(Object *obj) { SysBusDevice *sbd = SYS_BUS_DEVICE(obj); @@ -382,9 +380,23 @@ static void pl080_init(Object *obj) memory_region_init_io(&s->iomem, OBJECT(s), &pl080_ops, s, "pl080", 0x1000); sysbus_init_mmio(sbd, &s->iomem); sysbus_init_irq(sbd, &s->irq); + sysbus_init_irq(sbd, &s->interr); + sysbus_init_irq(sbd, &s->inttc); s->nchannels = 8; } +static void pl080_realize(DeviceState *dev, Error **errp) +{ + PL080State *s = PL080(dev); + + if (!s->downstream) { + error_setg(errp, "PL080 'downstream' link not set"); + return; + } + + address_space_init(&s->downstream_as, s->downstream, "pl080-downstream"); +} + static void pl081_init(Object *obj) { PL080State *s = PL080(obj); @@ -392,11 +404,20 @@ static void pl081_init(Object *obj) s->nchannels = 2; } +static Property pl080_properties[] = { + DEFINE_PROP_LINK("downstream", PL080State, downstream, + TYPE_MEMORY_REGION, MemoryRegion *), + DEFINE_PROP_END_OF_LIST(), +}; + static void pl080_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->vmsd = &vmstate_pl080; + dc->realize = pl080_realize; + dc->props = pl080_properties; + dc->reset = pl080_reset; } static const TypeInfo pl080_info = { @@ -408,7 +429,7 @@ static const TypeInfo pl080_info = { }; static const TypeInfo pl081_info = { - .name = "pl081", + .name = TYPE_PL081, .parent = TYPE_PL080, .instance_init = pl081_init, }; diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c index 9e8350c55d..e1ee8ae9e0 100644 --- a/hw/i386/acpi-build.c +++ b/hw/i386/acpi-build.c @@ -2269,7 +2269,16 @@ static void build_srat_hotpluggable_memory(GArray *table_data, uint64_t base, numamem = acpi_data_push(table_data, sizeof *numamem); if (!info) { - build_srat_memory(numamem, cur, end - cur, default_node, + /* + * Entry is required for Windows to enable memory hotplug in OS + * and for Linux to enable SWIOTLB when booted with less than + * 4G of RAM. Windows works better if the entry sets proximity + * to the highest NUMA node in the machine at the end of the + * reserved space. + * Memory devices may override proximity set by this entry, + * providing _PXM method if necessary. + */ + build_srat_memory(numamem, end - 1, 1, default_node, MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED); break; } @@ -2392,9 +2401,12 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) mem_len = next_base - pcms->below_4g_mem_size; next_base = mem_base + mem_len; } - numamem = acpi_data_push(table_data, sizeof *numamem); - build_srat_memory(numamem, mem_base, mem_len, i - 1, - MEM_AFFINITY_ENABLED); + + if (mem_len > 0) { + numamem = acpi_data_push(table_data, sizeof *numamem); + build_srat_memory(numamem, mem_base, mem_len, i - 1, + MEM_AFFINITY_ENABLED); + } } slots = (table_data->len - numa_start) / sizeof *numamem; for (; slots < pcms->numa_nodes + 2; slots++) { @@ -2402,14 +2414,6 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) build_srat_memory(numamem, 0, 0, 0, MEM_AFFINITY_NOFLAGS); } - /* - * Entry is required for Windows to enable memory hotplug in OS - * and for Linux to enable SWIOTLB when booted with less than - * 4G of RAM. Windows works better if the entry sets proximity - * to the highest NUMA node in the machine. - * Memory devices may override proximity set by this entry, - * providing _PXM method if necessary. - */ if (hotplugabble_address_space_size) { build_srat_hotpluggable_memory(table_data, machine->device_memory->base, hotplugabble_address_space_size, diff --git a/hw/ide/qdev.c b/hw/ide/qdev.c index f395d24592..573b022e1e 100644 --- a/hw/ide/qdev.c +++ b/hw/ide/qdev.c @@ -188,7 +188,6 @@ static void ide_dev_initfn(IDEDevice *dev, IDEDriveKind kind, Error **errp) return; } - blkconf_serial(&dev->conf, &dev->serial); if (kind != IDE_CD) { if (!blkconf_geometry(&dev->conf, &dev->chs_trans, 65535, 16, 255, errp)) { diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c index 34dc84ae81..c1b35fc1ee 100644 --- a/hw/intc/arm_gic.c +++ b/hw/intc/arm_gic.c @@ -61,6 +61,11 @@ static inline int gic_get_current_cpu(GICState *s) return 0; } +static inline int gic_get_current_vcpu(GICState *s) +{ + return gic_get_current_cpu(s) + GIC_NCPU; +} + /* Return true if this GIC config has interrupt groups, which is * true if we're a GICv2, or a GICv1 with the security extensions. */ @@ -69,97 +74,288 @@ static inline bool gic_has_groups(GICState *s) return s->revision == 2 || s->security_extn; } +static inline bool gic_cpu_ns_access(GICState *s, int cpu, MemTxAttrs attrs) +{ + return !gic_is_vcpu(cpu) && s->security_extn && !attrs.secure; +} + +static inline void gic_get_best_irq(GICState *s, int cpu, + int *best_irq, int *best_prio, int *group) +{ + int irq; + int cm = 1 << cpu; + + *best_irq = 1023; + *best_prio = 0x100; + + for (irq = 0; irq < s->num_irq; irq++) { + if (GIC_DIST_TEST_ENABLED(irq, cm) && gic_test_pending(s, irq, cm) && + (!GIC_DIST_TEST_ACTIVE(irq, cm)) && + (irq < GIC_INTERNAL || GIC_DIST_TARGET(irq) & cm)) { + if (GIC_DIST_GET_PRIORITY(irq, cpu) < *best_prio) { + *best_prio = GIC_DIST_GET_PRIORITY(irq, cpu); + *best_irq = irq; + } + } + } + + if (*best_irq < 1023) { + *group = GIC_DIST_TEST_GROUP(*best_irq, cm); + } +} + +static inline void gic_get_best_virq(GICState *s, int cpu, + int *best_irq, int *best_prio, int *group) +{ + int lr_idx = 0; + + *best_irq = 1023; + *best_prio = 0x100; + + for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) { + uint32_t lr_entry = s->h_lr[lr_idx][cpu]; + int state = GICH_LR_STATE(lr_entry); + + if (state == GICH_LR_STATE_PENDING) { + int prio = GICH_LR_PRIORITY(lr_entry); + + if (prio < *best_prio) { + *best_prio = prio; + *best_irq = GICH_LR_VIRT_ID(lr_entry); + *group = GICH_LR_GROUP(lr_entry); + } + } + } +} + +/* Return true if IRQ signaling is enabled for the given cpu and at least one + * of the given groups: + * - in the non-virt case, the distributor must be enabled for one of the + * given groups + * - in the virt case, the virtual interface must be enabled. + * - in all cases, the (v)CPU interface must be enabled for one of the given + * groups. + */ +static inline bool gic_irq_signaling_enabled(GICState *s, int cpu, bool virt, + int group_mask) +{ + if (!virt && !(s->ctlr & group_mask)) { + return false; + } + + if (virt && !(s->h_hcr[cpu] & R_GICH_HCR_EN_MASK)) { + return false; + } + + if (!(s->cpu_ctlr[cpu] & group_mask)) { + return false; + } + + return true; +} + /* TODO: Many places that call this routine could be optimized. */ /* Update interrupt status after enabled or pending bits have been changed. */ -void gic_update(GICState *s) +static inline void gic_update_internal(GICState *s, bool virt) { int best_irq; int best_prio; - int irq; int irq_level, fiq_level; - int cpu; - int cm; + int cpu, cpu_iface; + int group = 0; + qemu_irq *irq_lines = virt ? s->parent_virq : s->parent_irq; + qemu_irq *fiq_lines = virt ? s->parent_vfiq : s->parent_fiq; for (cpu = 0; cpu < s->num_cpu; cpu++) { - cm = 1 << cpu; - s->current_pending[cpu] = 1023; - if (!(s->ctlr & (GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1)) - || !(s->cpu_ctlr[cpu] & (GICC_CTLR_EN_GRP0 | GICC_CTLR_EN_GRP1))) { - qemu_irq_lower(s->parent_irq[cpu]); - qemu_irq_lower(s->parent_fiq[cpu]); + cpu_iface = virt ? (cpu + GIC_NCPU) : cpu; + + s->current_pending[cpu_iface] = 1023; + if (!gic_irq_signaling_enabled(s, cpu, virt, + GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1)) { + qemu_irq_lower(irq_lines[cpu]); + qemu_irq_lower(fiq_lines[cpu]); continue; } - best_prio = 0x100; - best_irq = 1023; - for (irq = 0; irq < s->num_irq; irq++) { - if (GIC_TEST_ENABLED(irq, cm) && gic_test_pending(s, irq, cm) && - (!GIC_TEST_ACTIVE(irq, cm)) && - (irq < GIC_INTERNAL || GIC_TARGET(irq) & cm)) { - if (GIC_GET_PRIORITY(irq, cpu) < best_prio) { - best_prio = GIC_GET_PRIORITY(irq, cpu); - best_irq = irq; - } - } + + if (virt) { + gic_get_best_virq(s, cpu, &best_irq, &best_prio, &group); + } else { + gic_get_best_irq(s, cpu, &best_irq, &best_prio, &group); } if (best_irq != 1023) { - trace_gic_update_bestirq(cpu, best_irq, best_prio, - s->priority_mask[cpu], s->running_priority[cpu]); + trace_gic_update_bestirq(virt ? "vcpu" : "cpu", cpu, + best_irq, best_prio, + s->priority_mask[cpu_iface], + s->running_priority[cpu_iface]); } irq_level = fiq_level = 0; - if (best_prio < s->priority_mask[cpu]) { - s->current_pending[cpu] = best_irq; - if (best_prio < s->running_priority[cpu]) { - int group = GIC_TEST_GROUP(best_irq, cm); - - if (extract32(s->ctlr, group, 1) && - extract32(s->cpu_ctlr[cpu], group, 1)) { - if (group == 0 && s->cpu_ctlr[cpu] & GICC_CTLR_FIQ_EN) { + if (best_prio < s->priority_mask[cpu_iface]) { + s->current_pending[cpu_iface] = best_irq; + if (best_prio < s->running_priority[cpu_iface]) { + if (gic_irq_signaling_enabled(s, cpu, virt, 1 << group)) { + if (group == 0 && + s->cpu_ctlr[cpu_iface] & GICC_CTLR_FIQ_EN) { DPRINTF("Raised pending FIQ %d (cpu %d)\n", - best_irq, cpu); + best_irq, cpu_iface); fiq_level = 1; - trace_gic_update_set_irq(cpu, "fiq", fiq_level); + trace_gic_update_set_irq(cpu, virt ? "vfiq" : "fiq", + fiq_level); } else { DPRINTF("Raised pending IRQ %d (cpu %d)\n", - best_irq, cpu); + best_irq, cpu_iface); irq_level = 1; - trace_gic_update_set_irq(cpu, "irq", irq_level); + trace_gic_update_set_irq(cpu, virt ? "virq" : "irq", + irq_level); } } } } - qemu_set_irq(s->parent_irq[cpu], irq_level); - qemu_set_irq(s->parent_fiq[cpu], fiq_level); + qemu_set_irq(irq_lines[cpu], irq_level); + qemu_set_irq(fiq_lines[cpu], fiq_level); } } -void gic_set_pending_private(GICState *s, int cpu, int irq) +static void gic_update(GICState *s) { - int cm = 1 << cpu; + gic_update_internal(s, false); +} - if (gic_test_pending(s, irq, cm)) { - return; +/* Return true if this LR is empty, i.e. the corresponding bit + * in ELRSR is set. + */ +static inline bool gic_lr_entry_is_free(uint32_t entry) +{ + return (GICH_LR_STATE(entry) == GICH_LR_STATE_INVALID) + && (GICH_LR_HW(entry) || !GICH_LR_EOI(entry)); +} + +/* Return true if this LR should trigger an EOI maintenance interrupt, i.e. the + * corrsponding bit in EISR is set. + */ +static inline bool gic_lr_entry_is_eoi(uint32_t entry) +{ + return (GICH_LR_STATE(entry) == GICH_LR_STATE_INVALID) + && !GICH_LR_HW(entry) && GICH_LR_EOI(entry); +} + +static inline void gic_extract_lr_info(GICState *s, int cpu, + int *num_eoi, int *num_valid, int *num_pending) +{ + int lr_idx; + + *num_eoi = 0; + *num_valid = 0; + *num_pending = 0; + + for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) { + uint32_t *entry = &s->h_lr[lr_idx][cpu]; + + if (gic_lr_entry_is_eoi(*entry)) { + (*num_eoi)++; + } + + if (GICH_LR_STATE(*entry) != GICH_LR_STATE_INVALID) { + (*num_valid)++; + } + + if (GICH_LR_STATE(*entry) == GICH_LR_STATE_PENDING) { + (*num_pending)++; + } } +} - DPRINTF("Set %d pending cpu %d\n", irq, cpu); - GIC_SET_PENDING(irq, cm); - gic_update(s); +static void gic_compute_misr(GICState *s, int cpu) +{ + uint32_t value = 0; + int vcpu = cpu + GIC_NCPU; + + int num_eoi, num_valid, num_pending; + + gic_extract_lr_info(s, cpu, &num_eoi, &num_valid, &num_pending); + + /* EOI */ + if (num_eoi) { + value |= R_GICH_MISR_EOI_MASK; + } + + /* U: true if only 0 or 1 LR entry is valid */ + if ((s->h_hcr[cpu] & R_GICH_HCR_UIE_MASK) && (num_valid < 2)) { + value |= R_GICH_MISR_U_MASK; + } + + /* LRENP: EOICount is not 0 */ + if ((s->h_hcr[cpu] & R_GICH_HCR_LRENPIE_MASK) && + ((s->h_hcr[cpu] & R_GICH_HCR_EOICount_MASK) != 0)) { + value |= R_GICH_MISR_LRENP_MASK; + } + + /* NP: no pending interrupts */ + if ((s->h_hcr[cpu] & R_GICH_HCR_NPIE_MASK) && (num_pending == 0)) { + value |= R_GICH_MISR_NP_MASK; + } + + /* VGrp0E: group0 virq signaling enabled */ + if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP0EIE_MASK) && + (s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP0)) { + value |= R_GICH_MISR_VGrp0E_MASK; + } + + /* VGrp0D: group0 virq signaling disabled */ + if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP0DIE_MASK) && + !(s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP0)) { + value |= R_GICH_MISR_VGrp0D_MASK; + } + + /* VGrp1E: group1 virq signaling enabled */ + if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP1EIE_MASK) && + (s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP1)) { + value |= R_GICH_MISR_VGrp1E_MASK; + } + + /* VGrp1D: group1 virq signaling disabled */ + if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP1DIE_MASK) && + !(s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP1)) { + value |= R_GICH_MISR_VGrp1D_MASK; + } + + s->h_misr[cpu] = value; +} + +static void gic_update_maintenance(GICState *s) +{ + int cpu = 0; + int maint_level; + + for (cpu = 0; cpu < s->num_cpu; cpu++) { + gic_compute_misr(s, cpu); + maint_level = (s->h_hcr[cpu] & R_GICH_HCR_EN_MASK) && s->h_misr[cpu]; + + trace_gic_update_maintenance_irq(cpu, maint_level); + qemu_set_irq(s->maintenance_irq[cpu], maint_level); + } +} + +static void gic_update_virt(GICState *s) +{ + gic_update_internal(s, true); + gic_update_maintenance(s); } static void gic_set_irq_11mpcore(GICState *s, int irq, int level, int cm, int target) { if (level) { - GIC_SET_LEVEL(irq, cm); - if (GIC_TEST_EDGE_TRIGGER(irq) || GIC_TEST_ENABLED(irq, cm)) { + GIC_DIST_SET_LEVEL(irq, cm); + if (GIC_DIST_TEST_EDGE_TRIGGER(irq) || GIC_DIST_TEST_ENABLED(irq, cm)) { DPRINTF("Set %d pending mask %x\n", irq, target); - GIC_SET_PENDING(irq, target); + GIC_DIST_SET_PENDING(irq, target); } } else { - GIC_CLEAR_LEVEL(irq, cm); + GIC_DIST_CLEAR_LEVEL(irq, cm); } } @@ -167,13 +363,13 @@ static void gic_set_irq_generic(GICState *s, int irq, int level, int cm, int target) { if (level) { - GIC_SET_LEVEL(irq, cm); + GIC_DIST_SET_LEVEL(irq, cm); DPRINTF("Set %d pending mask %x\n", irq, target); - if (GIC_TEST_EDGE_TRIGGER(irq)) { - GIC_SET_PENDING(irq, target); + if (GIC_DIST_TEST_EDGE_TRIGGER(irq)) { + GIC_DIST_SET_PENDING(irq, target); } } else { - GIC_CLEAR_LEVEL(irq, cm); + GIC_DIST_CLEAR_LEVEL(irq, cm); } } @@ -192,7 +388,7 @@ static void gic_set_irq(void *opaque, int irq, int level) /* The first external input line is internal interrupt 32. */ cm = ALL_CPU_MASK; irq += GIC_INTERNAL; - target = GIC_TARGET(irq); + target = GIC_DIST_TARGET(irq); } else { int cpu; irq -= (s->num_irq - GIC_INTERNAL); @@ -204,7 +400,7 @@ static void gic_set_irq(void *opaque, int irq, int level) assert(irq >= GIC_NR_SGIS); - if (level == GIC_TEST_LEVEL(irq, cm)) { + if (level == GIC_DIST_TEST_LEVEL(irq, cm)) { return; } @@ -224,11 +420,12 @@ static uint16_t gic_get_current_pending_irq(GICState *s, int cpu, uint16_t pending_irq = s->current_pending[cpu]; if (pending_irq < GIC_MAXIRQ && gic_has_groups(s)) { - int group = GIC_TEST_GROUP(pending_irq, (1 << cpu)); + int group = gic_test_group(s, pending_irq, cpu); + /* On a GIC without the security extensions, reading this register * behaves in the same way as a secure access to a GIC with them. */ - bool secure = !s->security_extn || attrs.secure; + bool secure = !gic_cpu_ns_access(s, cpu, attrs); if (group == 0 && !secure) { /* Group0 interrupts hidden from Non-secure access */ @@ -255,7 +452,7 @@ static int gic_get_group_priority(GICState *s, int cpu, int irq) if (gic_has_groups(s) && !(s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) && - GIC_TEST_GROUP(irq, (1 << cpu))) { + gic_test_group(s, irq, cpu)) { bpr = s->abpr[cpu] - 1; assert(bpr >= 0); } else { @@ -268,7 +465,7 @@ static int gic_get_group_priority(GICState *s, int cpu, int irq) */ mask = ~0U << ((bpr & 7) + 1); - return GIC_GET_PRIORITY(irq, cpu) & mask; + return gic_get_priority(s, irq, cpu) & mask; } static void gic_activate_irq(GICState *s, int cpu, int irq) @@ -277,18 +474,25 @@ static void gic_activate_irq(GICState *s, int cpu, int irq) * and update the running priority. */ int prio = gic_get_group_priority(s, cpu, irq); - int preemption_level = prio >> (GIC_MIN_BPR + 1); + int min_bpr = gic_is_vcpu(cpu) ? GIC_VIRT_MIN_BPR : GIC_MIN_BPR; + int preemption_level = prio >> (min_bpr + 1); int regno = preemption_level / 32; int bitno = preemption_level % 32; + uint32_t *papr = NULL; - if (gic_has_groups(s) && GIC_TEST_GROUP(irq, (1 << cpu))) { - s->nsapr[regno][cpu] |= (1 << bitno); + if (gic_is_vcpu(cpu)) { + assert(regno == 0); + papr = &s->h_apr[gic_get_vcpu_real_id(cpu)]; + } else if (gic_has_groups(s) && gic_test_group(s, irq, cpu)) { + papr = &s->nsapr[regno][cpu]; } else { - s->apr[regno][cpu] |= (1 << bitno); + papr = &s->apr[regno][cpu]; } + *papr |= (1 << bitno); + s->running_priority[cpu] = prio; - GIC_SET_ACTIVE(irq, 1 << cpu); + gic_set_active(s, irq, cpu); } static int gic_get_prio_from_apr_bits(GICState *s, int cpu) @@ -297,6 +501,16 @@ static int gic_get_prio_from_apr_bits(GICState *s, int cpu) * on the set bits in the Active Priority Registers. */ int i; + + if (gic_is_vcpu(cpu)) { + uint32_t apr = s->h_apr[gic_get_vcpu_real_id(cpu)]; + if (apr) { + return ctz32(apr) << (GIC_VIRT_MIN_BPR + 1); + } else { + return 0x100; + } + } + for (i = 0; i < GIC_NR_APRS; i++) { uint32_t apr = s->apr[i][cpu] | s->nsapr[i][cpu]; if (!apr) { @@ -325,83 +539,111 @@ static void gic_drop_prio(GICState *s, int cpu, int group) * running priority will be wrong, so interrupts that should preempt * might not do so, and interrupts that should not preempt might do so. */ - int i; + if (gic_is_vcpu(cpu)) { + int rcpu = gic_get_vcpu_real_id(cpu); - for (i = 0; i < GIC_NR_APRS; i++) { - uint32_t *papr = group ? &s->nsapr[i][cpu] : &s->apr[i][cpu]; - if (!*papr) { - continue; + if (s->h_apr[rcpu]) { + /* Clear lowest set bit */ + s->h_apr[rcpu] &= s->h_apr[rcpu] - 1; + } + } else { + int i; + + for (i = 0; i < GIC_NR_APRS; i++) { + uint32_t *papr = group ? &s->nsapr[i][cpu] : &s->apr[i][cpu]; + if (!*papr) { + continue; + } + /* Clear lowest set bit */ + *papr &= *papr - 1; + break; } - /* Clear lowest set bit */ - *papr &= *papr - 1; - break; } s->running_priority[cpu] = gic_get_prio_from_apr_bits(s, cpu); } +static inline uint32_t gic_clear_pending_sgi(GICState *s, int irq, int cpu) +{ + int src; + uint32_t ret; + + if (!gic_is_vcpu(cpu)) { + /* Lookup the source CPU for the SGI and clear this in the + * sgi_pending map. Return the src and clear the overall pending + * state on this CPU if the SGI is not pending from any CPUs. + */ + assert(s->sgi_pending[irq][cpu] != 0); + src = ctz32(s->sgi_pending[irq][cpu]); + s->sgi_pending[irq][cpu] &= ~(1 << src); + if (s->sgi_pending[irq][cpu] == 0) { + gic_clear_pending(s, irq, cpu); + } + ret = irq | ((src & 0x7) << 10); + } else { + uint32_t *lr_entry = gic_get_lr_entry(s, irq, cpu); + src = GICH_LR_CPUID(*lr_entry); + + gic_clear_pending(s, irq, cpu); + ret = irq | (src << 10); + } + + return ret; +} + uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs) { - int ret, irq, src; - int cm = 1 << cpu; + int ret, irq; /* gic_get_current_pending_irq() will return 1022 or 1023 appropriately * for the case where this GIC supports grouping and the pending interrupt * is in the wrong group. */ irq = gic_get_current_pending_irq(s, cpu, attrs); - trace_gic_acknowledge_irq(cpu, irq); + trace_gic_acknowledge_irq(gic_is_vcpu(cpu) ? "vcpu" : "cpu", + gic_get_vcpu_real_id(cpu), irq); if (irq >= GIC_MAXIRQ) { DPRINTF("ACK, no pending interrupt or it is hidden: %d\n", irq); return irq; } - if (GIC_GET_PRIORITY(irq, cpu) >= s->running_priority[cpu]) { + if (gic_get_priority(s, irq, cpu) >= s->running_priority[cpu]) { DPRINTF("ACK, pending interrupt (%d) has insufficient priority\n", irq); return 1023; } + gic_activate_irq(s, cpu, irq); + if (s->revision == REV_11MPCORE) { /* Clear pending flags for both level and edge triggered interrupts. * Level triggered IRQs will be reasserted once they become inactive. */ - GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm); + gic_clear_pending(s, irq, cpu); ret = irq; } else { if (irq < GIC_NR_SGIS) { - /* Lookup the source CPU for the SGI and clear this in the - * sgi_pending map. Return the src and clear the overall pending - * state on this CPU if the SGI is not pending from any CPUs. - */ - assert(s->sgi_pending[irq][cpu] != 0); - src = ctz32(s->sgi_pending[irq][cpu]); - s->sgi_pending[irq][cpu] &= ~(1 << src); - if (s->sgi_pending[irq][cpu] == 0) { - GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm); - } - ret = irq | ((src & 0x7) << 10); + ret = gic_clear_pending_sgi(s, irq, cpu); } else { - /* Clear pending state for both level and edge triggered - * interrupts. (level triggered interrupts with an active line - * remain pending, see gic_test_pending) - */ - GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm); + gic_clear_pending(s, irq, cpu); ret = irq; } } - gic_activate_irq(s, cpu, irq); - gic_update(s); + if (gic_is_vcpu(cpu)) { + gic_update_virt(s); + } else { + gic_update(s); + } DPRINTF("ACK %d\n", irq); return ret; } -void gic_set_priority(GICState *s, int cpu, int irq, uint8_t val, +void gic_dist_set_priority(GICState *s, int cpu, int irq, uint8_t val, MemTxAttrs attrs) { if (s->security_extn && !attrs.secure) { - if (!GIC_TEST_GROUP(irq, (1 << cpu))) { + if (!GIC_DIST_TEST_GROUP(irq, (1 << cpu))) { return; /* Ignore Non-secure access of Group0 IRQ */ } val = 0x80 | (val >> 1); /* Non-secure view */ @@ -414,13 +656,13 @@ void gic_set_priority(GICState *s, int cpu, int irq, uint8_t val, } } -static uint32_t gic_get_priority(GICState *s, int cpu, int irq, +static uint32_t gic_dist_get_priority(GICState *s, int cpu, int irq, MemTxAttrs attrs) { - uint32_t prio = GIC_GET_PRIORITY(irq, cpu); + uint32_t prio = GIC_DIST_GET_PRIORITY(irq, cpu); if (s->security_extn && !attrs.secure) { - if (!GIC_TEST_GROUP(irq, (1 << cpu))) { + if (!GIC_DIST_TEST_GROUP(irq, (1 << cpu))) { return 0; /* Non-secure access cannot read priority of Group0 IRQ */ } prio = (prio << 1) & 0xff; /* Non-secure view */ @@ -431,7 +673,7 @@ static uint32_t gic_get_priority(GICState *s, int cpu, int irq, static void gic_set_priority_mask(GICState *s, int cpu, uint8_t pmask, MemTxAttrs attrs) { - if (s->security_extn && !attrs.secure) { + if (gic_cpu_ns_access(s, cpu, attrs)) { if (s->priority_mask[cpu] & 0x80) { /* Priority Mask in upper half */ pmask = 0x80 | (pmask >> 1); @@ -447,7 +689,7 @@ static uint32_t gic_get_priority_mask(GICState *s, int cpu, MemTxAttrs attrs) { uint32_t pmask = s->priority_mask[cpu]; - if (s->security_extn && !attrs.secure) { + if (gic_cpu_ns_access(s, cpu, attrs)) { if (pmask & 0x80) { /* Priority Mask in upper half, return Non-secure view */ pmask = (pmask << 1) & 0xff; @@ -463,7 +705,7 @@ static uint32_t gic_get_cpu_control(GICState *s, int cpu, MemTxAttrs attrs) { uint32_t ret = s->cpu_ctlr[cpu]; - if (s->security_extn && !attrs.secure) { + if (gic_cpu_ns_access(s, cpu, attrs)) { /* Construct the NS banked view of GICC_CTLR from the correct * bits of the S banked view. We don't need to move the bypass * control bits because we don't implement that (IMPDEF) part @@ -479,7 +721,7 @@ static void gic_set_cpu_control(GICState *s, int cpu, uint32_t value, { uint32_t mask; - if (s->security_extn && !attrs.secure) { + if (gic_cpu_ns_access(s, cpu, attrs)) { /* The NS view can only write certain bits in the register; * the rest are unchanged */ @@ -510,7 +752,7 @@ static uint8_t gic_get_running_priority(GICState *s, int cpu, MemTxAttrs attrs) return 0xff; } - if (s->security_extn && !attrs.secure) { + if (gic_cpu_ns_access(s, cpu, attrs)) { if (s->running_priority[cpu] & 0x80) { /* Running priority in upper half of range: return the Non-secure * view of the priority. @@ -534,7 +776,7 @@ static bool gic_eoi_split(GICState *s, int cpu, MemTxAttrs attrs) /* Before GICv2 prio-drop and deactivate are not separable */ return false; } - if (s->security_extn && !attrs.secure) { + if (gic_cpu_ns_access(s, cpu, attrs)) { return s->cpu_ctlr[cpu] & GICC_CTLR_EOIMODE_NS; } return s->cpu_ctlr[cpu] & GICC_CTLR_EOIMODE; @@ -542,23 +784,21 @@ static bool gic_eoi_split(GICState *s, int cpu, MemTxAttrs attrs) static void gic_deactivate_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs) { - int cm = 1 << cpu; int group; - if (irq >= s->num_irq) { + if (irq >= GIC_MAXIRQ || (!gic_is_vcpu(cpu) && irq >= s->num_irq)) { /* * This handles two cases: * 1. If software writes the ID of a spurious interrupt [ie 1023] * to the GICC_DIR, the GIC ignores that write. * 2. If software writes the number of a non-existent interrupt * this must be a subcase of "value written is not an active interrupt" - * and so this is UNPREDICTABLE. We choose to ignore it. + * and so this is UNPREDICTABLE. We choose to ignore it. For vCPUs, + * all IRQs potentially exist, so this limit does not apply. */ return; } - group = gic_has_groups(s) && GIC_TEST_GROUP(irq, cm); - if (!gic_eoi_split(s, cpu, attrs)) { /* This is UNPREDICTABLE; we choose to ignore it */ qemu_log_mask(LOG_GUEST_ERROR, @@ -566,20 +806,70 @@ static void gic_deactivate_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs) return; } - if (s->security_extn && !attrs.secure && !group) { + if (gic_is_vcpu(cpu) && !gic_virq_is_valid(s, irq, cpu)) { + /* This vIRQ does not have an LR entry which is either active or + * pending and active. Increment EOICount and ignore the write. + */ + int rcpu = gic_get_vcpu_real_id(cpu); + s->h_hcr[rcpu] += 1 << R_GICH_HCR_EOICount_SHIFT; + + /* Update the virtual interface in case a maintenance interrupt should + * be raised. + */ + gic_update_virt(s); + return; + } + + group = gic_has_groups(s) && gic_test_group(s, irq, cpu); + + if (gic_cpu_ns_access(s, cpu, attrs) && !group) { DPRINTF("Non-secure DI for Group0 interrupt %d ignored\n", irq); return; } - GIC_CLEAR_ACTIVE(irq, cm); + gic_clear_active(s, irq, cpu); } -void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs) +static void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs) { int cm = 1 << cpu; int group; DPRINTF("EOI %d\n", irq); + if (gic_is_vcpu(cpu)) { + /* The call to gic_prio_drop() will clear a bit in GICH_APR iff the + * running prio is < 0x100. + */ + bool prio_drop = s->running_priority[cpu] < 0x100; + + if (irq >= GIC_MAXIRQ) { + /* Ignore spurious interrupt */ + return; + } + + gic_drop_prio(s, cpu, 0); + + if (!gic_eoi_split(s, cpu, attrs)) { + bool valid = gic_virq_is_valid(s, irq, cpu); + if (prio_drop && !valid) { + /* We are in a situation where: + * - V_CTRL.EOIMode is false (no EOI split), + * - The call to gic_drop_prio() cleared a bit in GICH_APR, + * - This vIRQ does not have an LR entry which is either + * active or pending and active. + * In that case, we must increment EOICount. + */ + int rcpu = gic_get_vcpu_real_id(cpu); + s->h_hcr[rcpu] += 1 << R_GICH_HCR_EOICount_SHIFT; + } else if (valid) { + gic_clear_active(s, irq, cpu); + } + } + + gic_update_virt(s); + return; + } + if (irq >= s->num_irq) { /* This handles two cases: * 1. If software writes the ID of a spurious interrupt [ie 1023] @@ -598,16 +888,17 @@ void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs) if (s->revision == REV_11MPCORE) { /* Mark level triggered interrupts as pending if they are still raised. */ - if (!GIC_TEST_EDGE_TRIGGER(irq) && GIC_TEST_ENABLED(irq, cm) - && GIC_TEST_LEVEL(irq, cm) && (GIC_TARGET(irq) & cm) != 0) { + if (!GIC_DIST_TEST_EDGE_TRIGGER(irq) && GIC_DIST_TEST_ENABLED(irq, cm) + && GIC_DIST_TEST_LEVEL(irq, cm) + && (GIC_DIST_TARGET(irq) & cm) != 0) { DPRINTF("Set %d pending mask %x\n", irq, cm); - GIC_SET_PENDING(irq, cm); + GIC_DIST_SET_PENDING(irq, cm); } } - group = gic_has_groups(s) && GIC_TEST_GROUP(irq, cm); + group = gic_has_groups(s) && gic_test_group(s, irq, cpu); - if (s->security_extn && !attrs.secure && !group) { + if (gic_cpu_ns_access(s, cpu, attrs) && !group) { DPRINTF("Non-secure EOI for Group0 interrupt %d ignored\n", irq); return; } @@ -621,7 +912,7 @@ void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs) /* In GICv2 the guest can choose to split priority-drop and deactivate */ if (!gic_eoi_split(s, cpu, attrs)) { - GIC_CLEAR_ACTIVE(irq, cm); + gic_clear_active(s, irq, cpu); } gic_update(s); } @@ -669,7 +960,7 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs) goto bad_reg; } for (i = 0; i < 8; i++) { - if (GIC_TEST_GROUP(irq + i, cm)) { + if (GIC_DIST_TEST_GROUP(irq + i, cm)) { res |= (1 << i); } } @@ -689,11 +980,11 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs) res = 0; for (i = 0; i < 8; i++) { if (s->security_extn && !attrs.secure && - !GIC_TEST_GROUP(irq + i, 1 << cpu)) { + !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { continue; /* Ignore Non-secure access of Group0 IRQ */ } - if (GIC_TEST_ENABLED(irq + i, cm)) { + if (GIC_DIST_TEST_ENABLED(irq + i, cm)) { res |= (1 << i); } } @@ -710,7 +1001,7 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs) mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; for (i = 0; i < 8; i++) { if (s->security_extn && !attrs.secure && - !GIC_TEST_GROUP(irq + i, 1 << cpu)) { + !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { continue; /* Ignore Non-secure access of Group0 IRQ */ } @@ -719,19 +1010,27 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs) } } } else if (offset < 0x400) { - /* Interrupt Active. */ - irq = (offset - 0x300) * 8 + GIC_BASE_IRQ; + /* Interrupt Set/Clear Active. */ + if (offset < 0x380) { + irq = (offset - 0x300) * 8; + } else if (s->revision == 2) { + irq = (offset - 0x380) * 8; + } else { + goto bad_reg; + } + + irq += GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; res = 0; mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; for (i = 0; i < 8; i++) { if (s->security_extn && !attrs.secure && - !GIC_TEST_GROUP(irq + i, 1 << cpu)) { + !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { continue; /* Ignore Non-secure access of Group0 IRQ */ } - if (GIC_TEST_ACTIVE(irq + i, mask)) { + if (GIC_DIST_TEST_ACTIVE(irq + i, mask)) { res |= (1 << i); } } @@ -740,7 +1039,7 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs) irq = (offset - 0x400) + GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; - res = gic_get_priority(s, cpu, irq, attrs); + res = gic_dist_get_priority(s, cpu, irq, attrs); } else if (offset < 0xc00) { /* Interrupt CPU Target. */ if (s->num_cpu == 1 && s->revision != REV_11MPCORE) { @@ -756,7 +1055,7 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs) } else if (irq < GIC_INTERNAL) { res = cm; } else { - res = GIC_TARGET(irq); + res = GIC_DIST_TARGET(irq); } } } else if (offset < 0xf00) { @@ -767,14 +1066,16 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs) res = 0; for (i = 0; i < 4; i++) { if (s->security_extn && !attrs.secure && - !GIC_TEST_GROUP(irq + i, 1 << cpu)) { + !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { continue; /* Ignore Non-secure access of Group0 IRQ */ } - if (GIC_TEST_MODEL(irq + i)) + if (GIC_DIST_TEST_MODEL(irq + i)) { res |= (1 << (i * 2)); - if (GIC_TEST_EDGE_TRIGGER(irq + i)) + } + if (GIC_DIST_TEST_EDGE_TRIGGER(irq + i)) { res |= (2 << (i * 2)); + } } } else if (offset < 0xf10) { goto bad_reg; @@ -792,7 +1093,7 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs) } if (s->security_extn && !attrs.secure && - !GIC_TEST_GROUP(irq, 1 << cpu)) { + !GIC_DIST_TEST_GROUP(irq, 1 << cpu)) { res = 0; /* Ignore Non-secure access of Group0 IRQ */ } else { res = s->sgi_pending[irq][cpu]; @@ -833,20 +1134,23 @@ static MemTxResult gic_dist_read(void *opaque, hwaddr offset, uint64_t *data, switch (size) { case 1: *data = gic_dist_readb(opaque, offset, attrs); - return MEMTX_OK; + break; case 2: *data = gic_dist_readb(opaque, offset, attrs); *data |= gic_dist_readb(opaque, offset + 1, attrs) << 8; - return MEMTX_OK; + break; case 4: *data = gic_dist_readb(opaque, offset, attrs); *data |= gic_dist_readb(opaque, offset + 1, attrs) << 8; *data |= gic_dist_readb(opaque, offset + 2, attrs) << 16; *data |= gic_dist_readb(opaque, offset + 3, attrs) << 24; - return MEMTX_OK; + break; default: return MEMTX_ERROR; } + + trace_gic_dist_read(offset, size, *data); + return MEMTX_OK; } static void gic_dist_writeb(void *opaque, hwaddr offset, @@ -888,10 +1192,10 @@ static void gic_dist_writeb(void *opaque, hwaddr offset, int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; if (value & (1 << i)) { /* Group1 (Non-secure) */ - GIC_SET_GROUP(irq + i, cm); + GIC_DIST_SET_GROUP(irq + i, cm); } else { /* Group0 (Secure) */ - GIC_CLEAR_GROUP(irq + i, cm); + GIC_DIST_CLEAR_GROUP(irq + i, cm); } } } @@ -910,25 +1214,26 @@ static void gic_dist_writeb(void *opaque, hwaddr offset, for (i = 0; i < 8; i++) { if (value & (1 << i)) { int mask = - (irq < GIC_INTERNAL) ? (1 << cpu) : GIC_TARGET(irq + i); + (irq < GIC_INTERNAL) ? (1 << cpu) + : GIC_DIST_TARGET(irq + i); int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; if (s->security_extn && !attrs.secure && - !GIC_TEST_GROUP(irq + i, 1 << cpu)) { + !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { continue; /* Ignore Non-secure access of Group0 IRQ */ } - if (!GIC_TEST_ENABLED(irq + i, cm)) { + if (!GIC_DIST_TEST_ENABLED(irq + i, cm)) { DPRINTF("Enabled IRQ %d\n", irq + i); trace_gic_enable_irq(irq + i); } - GIC_SET_ENABLED(irq + i, cm); + GIC_DIST_SET_ENABLED(irq + i, cm); /* If a raised level triggered IRQ enabled then mark is as pending. */ - if (GIC_TEST_LEVEL(irq + i, mask) - && !GIC_TEST_EDGE_TRIGGER(irq + i)) { + if (GIC_DIST_TEST_LEVEL(irq + i, mask) + && !GIC_DIST_TEST_EDGE_TRIGGER(irq + i)) { DPRINTF("Set %d pending mask %x\n", irq + i, mask); - GIC_SET_PENDING(irq + i, mask); + GIC_DIST_SET_PENDING(irq + i, mask); } } } @@ -946,15 +1251,15 @@ static void gic_dist_writeb(void *opaque, hwaddr offset, int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; if (s->security_extn && !attrs.secure && - !GIC_TEST_GROUP(irq + i, 1 << cpu)) { + !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { continue; /* Ignore Non-secure access of Group0 IRQ */ } - if (GIC_TEST_ENABLED(irq + i, cm)) { + if (GIC_DIST_TEST_ENABLED(irq + i, cm)) { DPRINTF("Disabled IRQ %d\n", irq + i); trace_gic_disable_irq(irq + i); } - GIC_CLEAR_ENABLED(irq + i, cm); + GIC_DIST_CLEAR_ENABLED(irq + i, cm); } } } else if (offset < 0x280) { @@ -969,11 +1274,11 @@ static void gic_dist_writeb(void *opaque, hwaddr offset, for (i = 0; i < 8; i++) { if (value & (1 << i)) { if (s->security_extn && !attrs.secure && - !GIC_TEST_GROUP(irq + i, 1 << cpu)) { + !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { continue; /* Ignore Non-secure access of Group0 IRQ */ } - GIC_SET_PENDING(irq + i, GIC_TARGET(irq + i)); + GIC_DIST_SET_PENDING(irq + i, GIC_DIST_TARGET(irq + i)); } } } else if (offset < 0x300) { @@ -987,7 +1292,7 @@ static void gic_dist_writeb(void *opaque, hwaddr offset, for (i = 0; i < 8; i++) { if (s->security_extn && !attrs.secure && - !GIC_TEST_GROUP(irq + i, 1 << cpu)) { + !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { continue; /* Ignore Non-secure access of Group0 IRQ */ } @@ -995,18 +1300,63 @@ static void gic_dist_writeb(void *opaque, hwaddr offset, for per-CPU interrupts. It's unclear whether this is the corect behavior. */ if (value & (1 << i)) { - GIC_CLEAR_PENDING(irq + i, ALL_CPU_MASK); + GIC_DIST_CLEAR_PENDING(irq + i, ALL_CPU_MASK); + } + } + } else if (offset < 0x380) { + /* Interrupt Set Active. */ + if (s->revision != 2) { + goto bad_reg; + } + + irq = (offset - 0x300) * 8 + GIC_BASE_IRQ; + if (irq >= s->num_irq) { + goto bad_reg; + } + + /* This register is banked per-cpu for PPIs */ + int cm = irq < GIC_INTERNAL ? (1 << cpu) : ALL_CPU_MASK; + + for (i = 0; i < 8; i++) { + if (s->security_extn && !attrs.secure && + !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { + continue; /* Ignore Non-secure access of Group0 IRQ */ + } + + if (value & (1 << i)) { + GIC_DIST_SET_ACTIVE(irq + i, cm); } } } else if (offset < 0x400) { - /* Interrupt Active. */ - goto bad_reg; + /* Interrupt Clear Active. */ + if (s->revision != 2) { + goto bad_reg; + } + + irq = (offset - 0x380) * 8 + GIC_BASE_IRQ; + if (irq >= s->num_irq) { + goto bad_reg; + } + + /* This register is banked per-cpu for PPIs */ + int cm = irq < GIC_INTERNAL ? (1 << cpu) : ALL_CPU_MASK; + + for (i = 0; i < 8; i++) { + if (s->security_extn && !attrs.secure && + !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { + continue; /* Ignore Non-secure access of Group0 IRQ */ + } + + if (value & (1 << i)) { + GIC_DIST_CLEAR_ACTIVE(irq + i, cm); + } + } } else if (offset < 0x800) { /* Interrupt Priority. */ irq = (offset - 0x400) + GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; - gic_set_priority(s, cpu, irq, value, attrs); + gic_dist_set_priority(s, cpu, irq, value, attrs); } else if (offset < 0xc00) { /* Interrupt CPU Target. RAZ/WI on uniprocessor GICs, with the * annoying exception of the 11MPCore's GIC. @@ -1032,21 +1382,21 @@ static void gic_dist_writeb(void *opaque, hwaddr offset, value |= 0xaa; for (i = 0; i < 4; i++) { if (s->security_extn && !attrs.secure && - !GIC_TEST_GROUP(irq + i, 1 << cpu)) { + !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { continue; /* Ignore Non-secure access of Group0 IRQ */ } if (s->revision == REV_11MPCORE) { if (value & (1 << (i * 2))) { - GIC_SET_MODEL(irq + i); + GIC_DIST_SET_MODEL(irq + i); } else { - GIC_CLEAR_MODEL(irq + i); + GIC_DIST_CLEAR_MODEL(irq + i); } } if (value & (2 << (i * 2))) { - GIC_SET_EDGE_TRIGGER(irq + i); + GIC_DIST_SET_EDGE_TRIGGER(irq + i); } else { - GIC_CLEAR_EDGE_TRIGGER(irq + i); + GIC_DIST_CLEAR_EDGE_TRIGGER(irq + i); } } } else if (offset < 0xf10) { @@ -1060,10 +1410,10 @@ static void gic_dist_writeb(void *opaque, hwaddr offset, irq = (offset - 0xf10); if (!s->security_extn || attrs.secure || - GIC_TEST_GROUP(irq, 1 << cpu)) { + GIC_DIST_TEST_GROUP(irq, 1 << cpu)) { s->sgi_pending[irq][cpu] &= ~value; if (s->sgi_pending[irq][cpu] == 0) { - GIC_CLEAR_PENDING(irq, 1 << cpu); + GIC_DIST_CLEAR_PENDING(irq, 1 << cpu); } } } else if (offset < 0xf30) { @@ -1074,8 +1424,8 @@ static void gic_dist_writeb(void *opaque, hwaddr offset, irq = (offset - 0xf20); if (!s->security_extn || attrs.secure || - GIC_TEST_GROUP(irq, 1 << cpu)) { - GIC_SET_PENDING(irq, 1 << cpu); + GIC_DIST_TEST_GROUP(irq, 1 << cpu)) { + GIC_DIST_SET_PENDING(irq, 1 << cpu); s->sgi_pending[irq][cpu] |= value; } } else { @@ -1122,7 +1472,7 @@ static void gic_dist_writel(void *opaque, hwaddr offset, mask = ALL_CPU_MASK; break; } - GIC_SET_PENDING(irq, mask); + GIC_DIST_SET_PENDING(irq, mask); target_cpu = ctz32(mask); while (target_cpu < GIC_NCPU) { s->sgi_pending[irq][target_cpu] |= (1 << cpu); @@ -1139,6 +1489,8 @@ static void gic_dist_writel(void *opaque, hwaddr offset, static MemTxResult gic_dist_write(void *opaque, hwaddr offset, uint64_t data, unsigned size, MemTxAttrs attrs) { + trace_gic_dist_write(offset, size, data); + switch (size) { case 1: gic_dist_writeb(opaque, offset, data, attrs); @@ -1227,7 +1579,7 @@ static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset, *data = gic_get_priority_mask(s, cpu, attrs); break; case 0x08: /* Binary Point */ - if (s->security_extn && !attrs.secure) { + if (gic_cpu_ns_access(s, cpu, attrs)) { if (s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) { /* NS view of BPR when CBPR is 1 */ *data = MIN(s->bpr[cpu] + 1, 7); @@ -1254,7 +1606,7 @@ static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset, * With security extensions, secure access: ABPR (alias of NS BPR) * With security extensions, nonsecure access: RAZ/WI */ - if (!gic_has_groups(s) || (s->security_extn && !attrs.secure)) { + if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) { *data = 0; } else { *data = s->abpr[cpu]; @@ -1263,10 +1615,13 @@ static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset, case 0xd0: case 0xd4: case 0xd8: case 0xdc: { int regno = (offset - 0xd0) / 4; + int nr_aprs = gic_is_vcpu(cpu) ? GIC_VIRT_NR_APRS : GIC_NR_APRS; - if (regno >= GIC_NR_APRS || s->revision != 2) { + if (regno >= nr_aprs || s->revision != 2) { *data = 0; - } else if (s->security_extn && !attrs.secure) { + } else if (gic_is_vcpu(cpu)) { + *data = s->h_apr[gic_get_vcpu_real_id(cpu)]; + } else if (gic_cpu_ns_access(s, cpu, attrs)) { /* NS view of GICC_APR<n> is the top half of GIC_NSAPR<n> */ *data = gic_apr_ns_view(s, regno, cpu); } else { @@ -1279,7 +1634,7 @@ static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset, int regno = (offset - 0xe0) / 4; if (regno >= GIC_NR_APRS || s->revision != 2 || !gic_has_groups(s) || - (s->security_extn && !attrs.secure)) { + gic_cpu_ns_access(s, cpu, attrs) || gic_is_vcpu(cpu)) { *data = 0; } else { *data = s->nsapr[regno][cpu]; @@ -1292,12 +1647,18 @@ static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset, *data = 0; break; } + + trace_gic_cpu_read(gic_is_vcpu(cpu) ? "vcpu" : "cpu", + gic_get_vcpu_real_id(cpu), offset, *data); return MEMTX_OK; } static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset, uint32_t value, MemTxAttrs attrs) { + trace_gic_cpu_write(gic_is_vcpu(cpu) ? "vcpu" : "cpu", + gic_get_vcpu_real_id(cpu), offset, value); + switch (offset) { case 0x00: /* Control */ gic_set_cpu_control(s, cpu, value, attrs); @@ -1306,7 +1667,7 @@ static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset, gic_set_priority_mask(s, cpu, value, attrs); break; case 0x08: /* Binary Point */ - if (s->security_extn && !attrs.secure) { + if (gic_cpu_ns_access(s, cpu, attrs)) { if (s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) { /* WI when CBPR is 1 */ return MEMTX_OK; @@ -1314,14 +1675,15 @@ static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset, s->abpr[cpu] = MAX(value & 0x7, GIC_MIN_ABPR); } } else { - s->bpr[cpu] = MAX(value & 0x7, GIC_MIN_BPR); + int min_bpr = gic_is_vcpu(cpu) ? GIC_VIRT_MIN_BPR : GIC_MIN_BPR; + s->bpr[cpu] = MAX(value & 0x7, min_bpr); } break; case 0x10: /* End Of Interrupt */ gic_complete_irq(s, cpu, value & 0x3ff, attrs); return MEMTX_OK; case 0x1c: /* Aliased Binary Point */ - if (!gic_has_groups(s) || (s->security_extn && !attrs.secure)) { + if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) { /* unimplemented, or NS access: RAZ/WI */ return MEMTX_OK; } else { @@ -1331,11 +1693,14 @@ static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset, case 0xd0: case 0xd4: case 0xd8: case 0xdc: { int regno = (offset - 0xd0) / 4; + int nr_aprs = gic_is_vcpu(cpu) ? GIC_VIRT_NR_APRS : GIC_NR_APRS; - if (regno >= GIC_NR_APRS || s->revision != 2) { + if (regno >= nr_aprs || s->revision != 2) { return MEMTX_OK; } - if (s->security_extn && !attrs.secure) { + if (gic_is_vcpu(cpu)) { + s->h_apr[gic_get_vcpu_real_id(cpu)] = value; + } else if (gic_cpu_ns_access(s, cpu, attrs)) { /* NS view of GICC_APR<n> is the top half of GIC_NSAPR<n> */ gic_apr_write_ns_view(s, regno, cpu, value); } else { @@ -1350,7 +1715,10 @@ static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset, if (regno >= GIC_NR_APRS || s->revision != 2) { return MEMTX_OK; } - if (!gic_has_groups(s) || (s->security_extn && !attrs.secure)) { + if (gic_is_vcpu(cpu)) { + return MEMTX_OK; + } + if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) { return MEMTX_OK; } s->nsapr[regno][cpu] = value; @@ -1365,7 +1733,13 @@ static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset, "gic_cpu_write: Bad offset %x\n", (int)offset); return MEMTX_OK; } - gic_update(s); + + if (gic_is_vcpu(cpu)) { + gic_update_virt(s); + } else { + gic_update(s); + } + return MEMTX_OK; } @@ -1407,6 +1781,222 @@ static MemTxResult gic_do_cpu_write(void *opaque, hwaddr addr, return gic_cpu_write(s, id, addr, value, attrs); } +static MemTxResult gic_thisvcpu_read(void *opaque, hwaddr addr, uint64_t *data, + unsigned size, MemTxAttrs attrs) +{ + GICState *s = (GICState *)opaque; + + return gic_cpu_read(s, gic_get_current_vcpu(s), addr, data, attrs); +} + +static MemTxResult gic_thisvcpu_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size, + MemTxAttrs attrs) +{ + GICState *s = (GICState *)opaque; + + return gic_cpu_write(s, gic_get_current_vcpu(s), addr, value, attrs); +} + +static uint32_t gic_compute_eisr(GICState *s, int cpu, int lr_start) +{ + int lr_idx; + uint32_t ret = 0; + + for (lr_idx = lr_start; lr_idx < s->num_lrs; lr_idx++) { + uint32_t *entry = &s->h_lr[lr_idx][cpu]; + ret = deposit32(ret, lr_idx - lr_start, 1, + gic_lr_entry_is_eoi(*entry)); + } + + return ret; +} + +static uint32_t gic_compute_elrsr(GICState *s, int cpu, int lr_start) +{ + int lr_idx; + uint32_t ret = 0; + + for (lr_idx = lr_start; lr_idx < s->num_lrs; lr_idx++) { + uint32_t *entry = &s->h_lr[lr_idx][cpu]; + ret = deposit32(ret, lr_idx - lr_start, 1, + gic_lr_entry_is_free(*entry)); + } + + return ret; +} + +static void gic_vmcr_write(GICState *s, uint32_t value, MemTxAttrs attrs) +{ + int vcpu = gic_get_current_vcpu(s); + uint32_t ctlr; + uint32_t abpr; + uint32_t bpr; + uint32_t prio_mask; + + ctlr = FIELD_EX32(value, GICH_VMCR, VMCCtlr); + abpr = FIELD_EX32(value, GICH_VMCR, VMABP); + bpr = FIELD_EX32(value, GICH_VMCR, VMBP); + prio_mask = FIELD_EX32(value, GICH_VMCR, VMPriMask) << 3; + + gic_set_cpu_control(s, vcpu, ctlr, attrs); + s->abpr[vcpu] = MAX(abpr, GIC_VIRT_MIN_ABPR); + s->bpr[vcpu] = MAX(bpr, GIC_VIRT_MIN_BPR); + gic_set_priority_mask(s, vcpu, prio_mask, attrs); +} + +static MemTxResult gic_hyp_read(void *opaque, int cpu, hwaddr addr, + uint64_t *data, MemTxAttrs attrs) +{ + GICState *s = ARM_GIC(opaque); + int vcpu = cpu + GIC_NCPU; + + switch (addr) { + case A_GICH_HCR: /* Hypervisor Control */ + *data = s->h_hcr[cpu]; + break; + + case A_GICH_VTR: /* VGIC Type */ + *data = FIELD_DP32(0, GICH_VTR, ListRegs, s->num_lrs - 1); + *data = FIELD_DP32(*data, GICH_VTR, PREbits, + GIC_VIRT_MAX_GROUP_PRIO_BITS - 1); + *data = FIELD_DP32(*data, GICH_VTR, PRIbits, + (7 - GIC_VIRT_MIN_BPR) - 1); + break; + + case A_GICH_VMCR: /* Virtual Machine Control */ + *data = FIELD_DP32(0, GICH_VMCR, VMCCtlr, + extract32(s->cpu_ctlr[vcpu], 0, 10)); + *data = FIELD_DP32(*data, GICH_VMCR, VMABP, s->abpr[vcpu]); + *data = FIELD_DP32(*data, GICH_VMCR, VMBP, s->bpr[vcpu]); + *data = FIELD_DP32(*data, GICH_VMCR, VMPriMask, + extract32(s->priority_mask[vcpu], 3, 5)); + break; + + case A_GICH_MISR: /* Maintenance Interrupt Status */ + *data = s->h_misr[cpu]; + break; + + case A_GICH_EISR0: /* End of Interrupt Status 0 and 1 */ + case A_GICH_EISR1: + *data = gic_compute_eisr(s, cpu, (addr - A_GICH_EISR0) * 8); + break; + + case A_GICH_ELRSR0: /* Empty List Status 0 and 1 */ + case A_GICH_ELRSR1: + *data = gic_compute_elrsr(s, cpu, (addr - A_GICH_ELRSR0) * 8); + break; + + case A_GICH_APR: /* Active Priorities */ + *data = s->h_apr[cpu]; + break; + + case A_GICH_LR0 ... A_GICH_LR63: /* List Registers */ + { + int lr_idx = (addr - A_GICH_LR0) / 4; + + if (lr_idx > s->num_lrs) { + *data = 0; + } else { + *data = s->h_lr[lr_idx][cpu]; + } + break; + } + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "gic_hyp_read: Bad offset %" HWADDR_PRIx "\n", addr); + return MEMTX_OK; + } + + trace_gic_hyp_read(addr, *data); + return MEMTX_OK; +} + +static MemTxResult gic_hyp_write(void *opaque, int cpu, hwaddr addr, + uint64_t value, MemTxAttrs attrs) +{ + GICState *s = ARM_GIC(opaque); + int vcpu = cpu + GIC_NCPU; + + trace_gic_hyp_write(addr, value); + + switch (addr) { + case A_GICH_HCR: /* Hypervisor Control */ + s->h_hcr[cpu] = value & GICH_HCR_MASK; + break; + + case A_GICH_VMCR: /* Virtual Machine Control */ + gic_vmcr_write(s, value, attrs); + break; + + case A_GICH_APR: /* Active Priorities */ + s->h_apr[cpu] = value; + s->running_priority[vcpu] = gic_get_prio_from_apr_bits(s, vcpu); + break; + + case A_GICH_LR0 ... A_GICH_LR63: /* List Registers */ + { + int lr_idx = (addr - A_GICH_LR0) / 4; + + if (lr_idx > s->num_lrs) { + return MEMTX_OK; + } + + s->h_lr[lr_idx][cpu] = value & GICH_LR_MASK; + trace_gic_lr_entry(cpu, lr_idx, s->h_lr[lr_idx][cpu]); + break; + } + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "gic_hyp_write: Bad offset %" HWADDR_PRIx "\n", addr); + return MEMTX_OK; + } + + gic_update_virt(s); + return MEMTX_OK; +} + +static MemTxResult gic_thiscpu_hyp_read(void *opaque, hwaddr addr, uint64_t *data, + unsigned size, MemTxAttrs attrs) +{ + GICState *s = (GICState *)opaque; + + return gic_hyp_read(s, gic_get_current_cpu(s), addr, data, attrs); +} + +static MemTxResult gic_thiscpu_hyp_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size, + MemTxAttrs attrs) +{ + GICState *s = (GICState *)opaque; + + return gic_hyp_write(s, gic_get_current_cpu(s), addr, value, attrs); +} + +static MemTxResult gic_do_hyp_read(void *opaque, hwaddr addr, uint64_t *data, + unsigned size, MemTxAttrs attrs) +{ + GICState **backref = (GICState **)opaque; + GICState *s = *backref; + int id = (backref - s->backref); + + return gic_hyp_read(s, id, addr, data, attrs); +} + +static MemTxResult gic_do_hyp_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size, + MemTxAttrs attrs) +{ + GICState **backref = (GICState **)opaque; + GICState *s = *backref; + int id = (backref - s->backref); + + return gic_hyp_write(s, id + GIC_NCPU, addr, value, attrs); + +} + static const MemoryRegionOps gic_ops[2] = { { .read_with_attrs = gic_dist_read, @@ -1426,11 +2016,24 @@ static const MemoryRegionOps gic_cpu_ops = { .endianness = DEVICE_NATIVE_ENDIAN, }; -/* This function is used by nvic model */ -void gic_init_irqs_and_distributor(GICState *s) -{ - gic_init_irqs_and_mmio(s, gic_set_irq, gic_ops); -} +static const MemoryRegionOps gic_virt_ops[2] = { + { + .read_with_attrs = gic_thiscpu_hyp_read, + .write_with_attrs = gic_thiscpu_hyp_write, + .endianness = DEVICE_NATIVE_ENDIAN, + }, + { + .read_with_attrs = gic_thisvcpu_read, + .write_with_attrs = gic_thisvcpu_write, + .endianness = DEVICE_NATIVE_ENDIAN, + } +}; + +static const MemoryRegionOps gic_viface_ops = { + .read_with_attrs = gic_do_hyp_read, + .write_with_attrs = gic_do_hyp_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; static void arm_gic_realize(DeviceState *dev, Error **errp) { @@ -1453,8 +2056,11 @@ static void arm_gic_realize(DeviceState *dev, Error **errp) return; } - /* This creates distributor and main CPU interface (s->cpuiomem[0]) */ - gic_init_irqs_and_mmio(s, gic_set_irq, gic_ops); + /* This creates distributor, main CPU interface (s->cpuiomem[0]) and if + * enabled, virtualization extensions related interfaces (main virtual + * interface (s->vifaceiomem[0]) and virtual CPU interface). + */ + gic_init_irqs_and_mmio(s, gic_set_irq, gic_ops, gic_virt_ops); /* Extra core-specific regions for the CPU interfaces. This is * necessary for "franken-GIC" implementations, for example on @@ -1470,6 +2076,19 @@ static void arm_gic_realize(DeviceState *dev, Error **errp) &s->backref[i], "gic_cpu", 0x100); sysbus_init_mmio(sbd, &s->cpuiomem[i+1]); } + + /* Extra core-specific regions for virtual interfaces. This is required by + * the GICv2 specification. + */ + if (s->virt_extn) { + for (i = 0; i < s->num_cpu; i++) { + memory_region_init_io(&s->vifaceiomem[i + 1], OBJECT(s), + &gic_viface_ops, &s->backref[i], + "gic_viface", 0x1000); + sysbus_init_mmio(sbd, &s->vifaceiomem[i + 1]); + } + } + } static void arm_gic_class_init(ObjectClass *klass, void *data) diff --git a/hw/intc/arm_gic_common.c b/hw/intc/arm_gic_common.c index aee50a20e0..547dc41185 100644 --- a/hw/intc/arm_gic_common.c +++ b/hw/intc/arm_gic_common.c @@ -46,6 +46,13 @@ static int gic_post_load(void *opaque, int version_id) return 0; } +static bool gic_virt_state_needed(void *opaque) +{ + GICState *s = (GICState *)opaque; + + return s->virt_extn; +} + static const VMStateDescription vmstate_gic_irq_state = { .name = "arm_gic_irq_state", .version_id = 1, @@ -62,6 +69,30 @@ static const VMStateDescription vmstate_gic_irq_state = { } }; +static const VMStateDescription vmstate_gic_virt_state = { + .name = "arm_gic_virt_state", + .version_id = 1, + .minimum_version_id = 1, + .needed = gic_virt_state_needed, + .fields = (VMStateField[]) { + /* Virtual interface */ + VMSTATE_UINT32_ARRAY(h_hcr, GICState, GIC_NCPU), + VMSTATE_UINT32_ARRAY(h_misr, GICState, GIC_NCPU), + VMSTATE_UINT32_2DARRAY(h_lr, GICState, GIC_MAX_LR, GIC_NCPU), + VMSTATE_UINT32_ARRAY(h_apr, GICState, GIC_NCPU), + + /* Virtual CPU interfaces */ + VMSTATE_UINT32_SUB_ARRAY(cpu_ctlr, GICState, GIC_NCPU, GIC_NCPU), + VMSTATE_UINT16_SUB_ARRAY(priority_mask, GICState, GIC_NCPU, GIC_NCPU), + VMSTATE_UINT16_SUB_ARRAY(running_priority, GICState, GIC_NCPU, GIC_NCPU), + VMSTATE_UINT16_SUB_ARRAY(current_pending, GICState, GIC_NCPU, GIC_NCPU), + VMSTATE_UINT8_SUB_ARRAY(bpr, GICState, GIC_NCPU, GIC_NCPU), + VMSTATE_UINT8_SUB_ARRAY(abpr, GICState, GIC_NCPU, GIC_NCPU), + + VMSTATE_END_OF_LIST() + } +}; + static const VMStateDescription vmstate_gic = { .name = "arm_gic", .version_id = 12, @@ -70,26 +101,31 @@ static const VMStateDescription vmstate_gic = { .post_load = gic_post_load, .fields = (VMStateField[]) { VMSTATE_UINT32(ctlr, GICState), - VMSTATE_UINT32_ARRAY(cpu_ctlr, GICState, GIC_NCPU), + VMSTATE_UINT32_SUB_ARRAY(cpu_ctlr, GICState, 0, GIC_NCPU), VMSTATE_STRUCT_ARRAY(irq_state, GICState, GIC_MAXIRQ, 1, vmstate_gic_irq_state, gic_irq_state), VMSTATE_UINT8_ARRAY(irq_target, GICState, GIC_MAXIRQ), VMSTATE_UINT8_2DARRAY(priority1, GICState, GIC_INTERNAL, GIC_NCPU), VMSTATE_UINT8_ARRAY(priority2, GICState, GIC_MAXIRQ - GIC_INTERNAL), VMSTATE_UINT8_2DARRAY(sgi_pending, GICState, GIC_NR_SGIS, GIC_NCPU), - VMSTATE_UINT16_ARRAY(priority_mask, GICState, GIC_NCPU), - VMSTATE_UINT16_ARRAY(running_priority, GICState, GIC_NCPU), - VMSTATE_UINT16_ARRAY(current_pending, GICState, GIC_NCPU), - VMSTATE_UINT8_ARRAY(bpr, GICState, GIC_NCPU), - VMSTATE_UINT8_ARRAY(abpr, GICState, GIC_NCPU), + VMSTATE_UINT16_SUB_ARRAY(priority_mask, GICState, 0, GIC_NCPU), + VMSTATE_UINT16_SUB_ARRAY(running_priority, GICState, 0, GIC_NCPU), + VMSTATE_UINT16_SUB_ARRAY(current_pending, GICState, 0, GIC_NCPU), + VMSTATE_UINT8_SUB_ARRAY(bpr, GICState, 0, GIC_NCPU), + VMSTATE_UINT8_SUB_ARRAY(abpr, GICState, 0, GIC_NCPU), VMSTATE_UINT32_2DARRAY(apr, GICState, GIC_NR_APRS, GIC_NCPU), VMSTATE_UINT32_2DARRAY(nsapr, GICState, GIC_NR_APRS, GIC_NCPU), VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &vmstate_gic_virt_state, + NULL } }; void gic_init_irqs_and_mmio(GICState *s, qemu_irq_handler handler, - const MemoryRegionOps *ops) + const MemoryRegionOps *ops, + const MemoryRegionOps *virt_ops) { SysBusDevice *sbd = SYS_BUS_DEVICE(s); int i = s->num_irq - GIC_INTERNAL; @@ -116,6 +152,11 @@ void gic_init_irqs_and_mmio(GICState *s, qemu_irq_handler handler, for (i = 0; i < s->num_cpu; i++) { sysbus_init_irq(sbd, &s->parent_vfiq[i]); } + if (s->virt_extn) { + for (i = 0; i < s->num_cpu; i++) { + sysbus_init_irq(sbd, &s->maintenance_irq[i]); + } + } /* Distributor */ memory_region_init_io(&s->iomem, OBJECT(s), ops, s, "gic_dist", 0x1000); @@ -127,6 +168,17 @@ void gic_init_irqs_and_mmio(GICState *s, qemu_irq_handler handler, memory_region_init_io(&s->cpuiomem[0], OBJECT(s), ops ? &ops[1] : NULL, s, "gic_cpu", s->revision == 2 ? 0x2000 : 0x100); sysbus_init_mmio(sbd, &s->cpuiomem[0]); + + if (s->virt_extn) { + memory_region_init_io(&s->vifaceiomem[0], OBJECT(s), virt_ops, + s, "gic_viface", 0x1000); + sysbus_init_mmio(sbd, &s->vifaceiomem[0]); + + memory_region_init_io(&s->vcpuiomem, OBJECT(s), + virt_ops ? &virt_ops[1] : NULL, + s, "gic_vcpu", 0x2000); + sysbus_init_mmio(sbd, &s->vcpuiomem); + } } static void arm_gic_common_realize(DeviceState *dev, Error **errp) @@ -163,6 +215,48 @@ static void arm_gic_common_realize(DeviceState *dev, Error **errp) "the security extensions"); return; } + + if (s->virt_extn) { + if (s->revision != 2) { + error_setg(errp, "GIC virtualization extensions are only " + "supported by revision 2"); + return; + } + + /* For now, set the number of implemented LRs to 4, as found in most + * real GICv2. This could be promoted as a QOM property if we need to + * emulate a variant with another num_lrs. + */ + s->num_lrs = 4; + } +} + +static inline void arm_gic_common_reset_irq_state(GICState *s, int first_cpu, + int resetprio) +{ + int i, j; + + for (i = first_cpu; i < first_cpu + s->num_cpu; i++) { + if (s->revision == REV_11MPCORE) { + s->priority_mask[i] = 0xf0; + } else { + s->priority_mask[i] = resetprio; + } + s->current_pending[i] = 1023; + s->running_priority[i] = 0x100; + s->cpu_ctlr[i] = 0; + s->bpr[i] = gic_is_vcpu(i) ? GIC_VIRT_MIN_BPR : GIC_MIN_BPR; + s->abpr[i] = gic_is_vcpu(i) ? GIC_VIRT_MIN_ABPR : GIC_MIN_ABPR; + + if (!gic_is_vcpu(i)) { + for (j = 0; j < GIC_INTERNAL; j++) { + s->priority1[j][i] = resetprio; + } + for (j = 0; j < GIC_NR_SGIS; j++) { + s->sgi_pending[j][i] = 0; + } + } + } } static void arm_gic_common_reset(DeviceState *dev) @@ -185,27 +279,18 @@ static void arm_gic_common_reset(DeviceState *dev) } memset(s->irq_state, 0, GIC_MAXIRQ * sizeof(gic_irq_state)); - for (i = 0 ; i < s->num_cpu; i++) { - if (s->revision == REV_11MPCORE) { - s->priority_mask[i] = 0xf0; - } else { - s->priority_mask[i] = resetprio; - } - s->current_pending[i] = 1023; - s->running_priority[i] = 0x100; - s->cpu_ctlr[i] = 0; - s->bpr[i] = GIC_MIN_BPR; - s->abpr[i] = GIC_MIN_ABPR; - for (j = 0; j < GIC_INTERNAL; j++) { - s->priority1[j][i] = resetprio; - } - for (j = 0; j < GIC_NR_SGIS; j++) { - s->sgi_pending[j][i] = 0; - } + arm_gic_common_reset_irq_state(s, 0, resetprio); + + if (s->virt_extn) { + /* vCPU states are stored at indexes GIC_NCPU .. GIC_NCPU+num_cpu. + * The exposed vCPU interface does not have security extensions. + */ + arm_gic_common_reset_irq_state(s, GIC_NCPU, 0); } + for (i = 0; i < GIC_NR_SGIS; i++) { - GIC_SET_ENABLED(i, ALL_CPU_MASK); - GIC_SET_EDGE_TRIGGER(i); + GIC_DIST_SET_ENABLED(i, ALL_CPU_MASK); + GIC_DIST_SET_EDGE_TRIGGER(i); } for (i = 0; i < ARRAY_SIZE(s->priority2); i++) { @@ -222,7 +307,20 @@ static void arm_gic_common_reset(DeviceState *dev) } if (s->security_extn && s->irq_reset_nonsecure) { for (i = 0; i < GIC_MAXIRQ; i++) { - GIC_SET_GROUP(i, ALL_CPU_MASK); + GIC_DIST_SET_GROUP(i, ALL_CPU_MASK); + } + } + + if (s->virt_extn) { + for (i = 0; i < s->num_lrs; i++) { + for (j = 0; j < s->num_cpu; j++) { + s->h_lr[i][j] = 0; + } + } + + for (i = 0; i < s->num_cpu; i++) { + s->h_hcr[i] = 0; + s->h_misr[i] = 0; } } @@ -255,6 +353,8 @@ static Property arm_gic_common_properties[] = { DEFINE_PROP_UINT32("revision", GICState, revision, 1), /* True if the GIC should implement the security extensions */ DEFINE_PROP_BOOL("has-security-extensions", GICState, security_extn, 0), + /* True if the GIC should implement the virtualization extensions */ + DEFINE_PROP_BOOL("has-virtualization-extensions", GICState, virt_extn, 0), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/intc/arm_gic_kvm.c b/hw/intc/arm_gic_kvm.c index 86665080bd..a611e8ee12 100644 --- a/hw/intc/arm_gic_kvm.c +++ b/hw/intc/arm_gic_kvm.c @@ -140,10 +140,10 @@ static void translate_group(GICState *s, int irq, int cpu, int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; if (to_kernel) { - *field = GIC_TEST_GROUP(irq, cm); + *field = GIC_DIST_TEST_GROUP(irq, cm); } else { if (*field & 1) { - GIC_SET_GROUP(irq, cm); + GIC_DIST_SET_GROUP(irq, cm); } } } @@ -154,10 +154,10 @@ static void translate_enabled(GICState *s, int irq, int cpu, int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; if (to_kernel) { - *field = GIC_TEST_ENABLED(irq, cm); + *field = GIC_DIST_TEST_ENABLED(irq, cm); } else { if (*field & 1) { - GIC_SET_ENABLED(irq, cm); + GIC_DIST_SET_ENABLED(irq, cm); } } } @@ -171,7 +171,7 @@ static void translate_pending(GICState *s, int irq, int cpu, *field = gic_test_pending(s, irq, cm); } else { if (*field & 1) { - GIC_SET_PENDING(irq, cm); + GIC_DIST_SET_PENDING(irq, cm); /* TODO: Capture is level-line is held high in the kernel */ } } @@ -183,10 +183,10 @@ static void translate_active(GICState *s, int irq, int cpu, int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; if (to_kernel) { - *field = GIC_TEST_ACTIVE(irq, cm); + *field = GIC_DIST_TEST_ACTIVE(irq, cm); } else { if (*field & 1) { - GIC_SET_ACTIVE(irq, cm); + GIC_DIST_SET_ACTIVE(irq, cm); } } } @@ -195,10 +195,10 @@ static void translate_trigger(GICState *s, int irq, int cpu, uint32_t *field, bool to_kernel) { if (to_kernel) { - *field = (GIC_TEST_EDGE_TRIGGER(irq)) ? 0x2 : 0x0; + *field = (GIC_DIST_TEST_EDGE_TRIGGER(irq)) ? 0x2 : 0x0; } else { if (*field & 0x2) { - GIC_SET_EDGE_TRIGGER(irq); + GIC_DIST_SET_EDGE_TRIGGER(irq); } } } @@ -207,9 +207,10 @@ static void translate_priority(GICState *s, int irq, int cpu, uint32_t *field, bool to_kernel) { if (to_kernel) { - *field = GIC_GET_PRIORITY(irq, cpu) & 0xff; + *field = GIC_DIST_GET_PRIORITY(irq, cpu) & 0xff; } else { - gic_set_priority(s, cpu, irq, *field & 0xff, MEMTXATTRS_UNSPECIFIED); + gic_dist_set_priority(s, cpu, irq, + *field & 0xff, MEMTXATTRS_UNSPECIFIED); } } @@ -510,6 +511,12 @@ static void kvm_arm_gic_realize(DeviceState *dev, Error **errp) return; } + if (s->virt_extn) { + error_setg(errp, "the in-kernel VGIC does not implement the " + "virtualization extensions"); + return; + } + if (!kvm_arm_gic_can_save_restore(s)) { error_setg(&s->migration_blocker, "This operating system kernel does " "not support vGICv2 migration"); @@ -521,7 +528,7 @@ static void kvm_arm_gic_realize(DeviceState *dev, Error **errp) } } - gic_init_irqs_and_mmio(s, kvm_arm_gicv2_set_irq, NULL); + gic_init_irqs_and_mmio(s, kvm_arm_gicv2_set_irq, NULL, NULL); for (i = 0; i < s->num_irq - GIC_INTERNAL; i++) { qemu_irq irq = qdev_get_gpio_in(dev, i); diff --git a/hw/intc/arm_gicv3_common.c b/hw/intc/arm_gicv3_common.c index ff326b374a..52480c3b4c 100644 --- a/hw/intc/arm_gicv3_common.c +++ b/hw/intc/arm_gicv3_common.c @@ -29,6 +29,41 @@ #include "hw/arm/linux-boot-if.h" #include "sysemu/kvm.h" + +static void gicv3_gicd_no_migration_shift_bug_post_load(GICv3State *cs) +{ + if (cs->gicd_no_migration_shift_bug) { + return; + } + + /* Older versions of QEMU had a bug in the handling of state save/restore + * to the KVM GICv3: they got the offset in the bitmap arrays wrong, + * so that instead of the data for external interrupts 32 and up + * starting at bit position 32 in the bitmap, it started at bit + * position 64. If we're receiving data from a QEMU with that bug, + * we must move the data down into the right place. + */ + memmove(cs->group, (uint8_t *)cs->group + GIC_INTERNAL / 8, + sizeof(cs->group) - GIC_INTERNAL / 8); + memmove(cs->grpmod, (uint8_t *)cs->grpmod + GIC_INTERNAL / 8, + sizeof(cs->grpmod) - GIC_INTERNAL / 8); + memmove(cs->enabled, (uint8_t *)cs->enabled + GIC_INTERNAL / 8, + sizeof(cs->enabled) - GIC_INTERNAL / 8); + memmove(cs->pending, (uint8_t *)cs->pending + GIC_INTERNAL / 8, + sizeof(cs->pending) - GIC_INTERNAL / 8); + memmove(cs->active, (uint8_t *)cs->active + GIC_INTERNAL / 8, + sizeof(cs->active) - GIC_INTERNAL / 8); + memmove(cs->edge_trigger, (uint8_t *)cs->edge_trigger + GIC_INTERNAL / 8, + sizeof(cs->edge_trigger) - GIC_INTERNAL / 8); + + /* + * While this new version QEMU doesn't have this kind of bug as we fix it, + * so it needs to set the flag to true to indicate that and it's necessary + * for next migration to work from this new version QEMU. + */ + cs->gicd_no_migration_shift_bug = true; +} + static int gicv3_pre_save(void *opaque) { GICv3State *s = (GICv3State *)opaque; @@ -46,6 +81,8 @@ static int gicv3_post_load(void *opaque, int version_id) GICv3State *s = (GICv3State *)opaque; ARMGICv3CommonClass *c = ARM_GICV3_COMMON_GET_CLASS(s); + gicv3_gicd_no_migration_shift_bug_post_load(s); + if (c->post_load) { c->post_load(s); } @@ -73,7 +110,7 @@ static const VMStateDescription vmstate_gicv3_cpu_virt = { } }; -static int icc_sre_el1_reg_pre_load(void *opaque) +static int vmstate_gicv3_cpu_pre_load(void *opaque) { GICv3CPUState *cs = opaque; @@ -97,7 +134,6 @@ const VMStateDescription vmstate_gicv3_cpu_sre_el1 = { .name = "arm_gicv3_cpu/sre_el1", .version_id = 1, .minimum_version_id = 1, - .pre_load = icc_sre_el1_reg_pre_load, .needed = icc_sre_el1_reg_needed, .fields = (VMStateField[]) { VMSTATE_UINT64(icc_sre_el1, GICv3CPUState), @@ -109,6 +145,7 @@ static const VMStateDescription vmstate_gicv3_cpu = { .name = "arm_gicv3_cpu", .version_id = 1, .minimum_version_id = 1, + .pre_load = vmstate_gicv3_cpu_pre_load, .fields = (VMStateField[]) { VMSTATE_UINT32(level, GICv3CPUState), VMSTATE_UINT32(gicr_ctlr, GICv3CPUState), @@ -134,15 +171,12 @@ static const VMStateDescription vmstate_gicv3_cpu = { }, .subsections = (const VMStateDescription * []) { &vmstate_gicv3_cpu_virt, - NULL - }, - .subsections = (const VMStateDescription * []) { &vmstate_gicv3_cpu_sre_el1, NULL } }; -static int gicv3_gicd_no_migration_shift_bug_pre_load(void *opaque) +static int gicv3_pre_load(void *opaque) { GICv3State *cs = opaque; @@ -164,51 +198,16 @@ static int gicv3_gicd_no_migration_shift_bug_pre_load(void *opaque) return 0; } -static int gicv3_gicd_no_migration_shift_bug_post_load(void *opaque, - int version_id) +static bool needed_always(void *opaque) { - GICv3State *cs = opaque; - - if (cs->gicd_no_migration_shift_bug) { - return 0; - } - - /* Older versions of QEMU had a bug in the handling of state save/restore - * to the KVM GICv3: they got the offset in the bitmap arrays wrong, - * so that instead of the data for external interrupts 32 and up - * starting at bit position 32 in the bitmap, it started at bit - * position 64. If we're receiving data from a QEMU with that bug, - * we must move the data down into the right place. - */ - memmove(cs->group, (uint8_t *)cs->group + GIC_INTERNAL / 8, - sizeof(cs->group) - GIC_INTERNAL / 8); - memmove(cs->grpmod, (uint8_t *)cs->grpmod + GIC_INTERNAL / 8, - sizeof(cs->grpmod) - GIC_INTERNAL / 8); - memmove(cs->enabled, (uint8_t *)cs->enabled + GIC_INTERNAL / 8, - sizeof(cs->enabled) - GIC_INTERNAL / 8); - memmove(cs->pending, (uint8_t *)cs->pending + GIC_INTERNAL / 8, - sizeof(cs->pending) - GIC_INTERNAL / 8); - memmove(cs->active, (uint8_t *)cs->active + GIC_INTERNAL / 8, - sizeof(cs->active) - GIC_INTERNAL / 8); - memmove(cs->edge_trigger, (uint8_t *)cs->edge_trigger + GIC_INTERNAL / 8, - sizeof(cs->edge_trigger) - GIC_INTERNAL / 8); - - /* - * While this new version QEMU doesn't have this kind of bug as we fix it, - * so it needs to set the flag to true to indicate that and it's necessary - * for next migration to work from this new version QEMU. - */ - cs->gicd_no_migration_shift_bug = true; - - return 0; + return true; } const VMStateDescription vmstate_gicv3_gicd_no_migration_shift_bug = { .name = "arm_gicv3/gicd_no_migration_shift_bug", .version_id = 1, .minimum_version_id = 1, - .pre_load = gicv3_gicd_no_migration_shift_bug_pre_load, - .post_load = gicv3_gicd_no_migration_shift_bug_post_load, + .needed = needed_always, .fields = (VMStateField[]) { VMSTATE_BOOL(gicd_no_migration_shift_bug, GICv3State), VMSTATE_END_OF_LIST() @@ -219,6 +218,7 @@ static const VMStateDescription vmstate_gicv3 = { .name = "arm_gicv3", .version_id = 1, .minimum_version_id = 1, + .pre_load = gicv3_pre_load, .pre_save = gicv3_pre_save, .post_load = gicv3_post_load, .priority = MIG_PRI_GICV3, diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c index 2a60568d82..068a8e8e9b 100644 --- a/hw/intc/arm_gicv3_cpuif.c +++ b/hw/intc/arm_gicv3_cpuif.c @@ -85,7 +85,10 @@ static bool icv_access(CPUARMState *env, int hcr_flags) * * access if NS EL1 and either IMO or FMO == 1: * CTLR, DIR, PMR, RPR */ - return (env->cp15.hcr_el2 & hcr_flags) && arm_current_el(env) == 1 + bool flagmatch = ((hcr_flags & HCR_IMO) && arm_hcr_el2_imo(env)) || + ((hcr_flags & HCR_FMO) && arm_hcr_el2_fmo(env)); + + return flagmatch && arm_current_el(env) == 1 && !arm_is_secure_below_el3(env); } @@ -1549,8 +1552,8 @@ static void icc_dir_write(CPUARMState *env, const ARMCPRegInfo *ri, /* No need to include !IsSecure in route_*_to_el2 as it's only * tested in cases where we know !IsSecure is true. */ - route_fiq_to_el2 = env->cp15.hcr_el2 & HCR_FMO; - route_irq_to_el2 = env->cp15.hcr_el2 & HCR_IMO; + route_fiq_to_el2 = arm_hcr_el2_fmo(env); + route_irq_to_el2 = arm_hcr_el2_imo(env); switch (arm_current_el(env)) { case 3: @@ -1893,7 +1896,7 @@ static CPAccessResult gicv3_irqfiq_access(CPUARMState *env, switch (el) { case 1: if (arm_is_secure_below_el3(env) || - ((env->cp15.hcr_el2 & (HCR_IMO | HCR_FMO)) == 0)) { + (arm_hcr_el2_imo(env) == 0 && arm_hcr_el2_fmo(env) == 0)) { r = CP_ACCESS_TRAP_EL3; } break; @@ -1933,7 +1936,7 @@ static CPAccessResult gicv3_dir_access(CPUARMState *env, static CPAccessResult gicv3_sgi_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { - if ((env->cp15.hcr_el2 & (HCR_IMO | HCR_FMO)) && + if ((arm_hcr_el2_imo(env) || arm_hcr_el2_fmo(env)) && arm_current_el(env) == 1 && !arm_is_secure_below_el3(env)) { /* Takes priority over a possible EL3 trap */ return CP_ACCESS_TRAP_EL2; @@ -1958,8 +1961,7 @@ static CPAccessResult gicv3_fiq_access(CPUARMState *env, if (env->cp15.scr_el3 & SCR_FIQ) { switch (el) { case 1: - if (arm_is_secure_below_el3(env) || - ((env->cp15.hcr_el2 & HCR_FMO) == 0)) { + if (arm_is_secure_below_el3(env) || !arm_hcr_el2_fmo(env)) { r = CP_ACCESS_TRAP_EL3; } break; @@ -1998,8 +2000,7 @@ static CPAccessResult gicv3_irq_access(CPUARMState *env, if (env->cp15.scr_el3 & SCR_IRQ) { switch (el) { case 1: - if (arm_is_secure_below_el3(env) || - ((env->cp15.hcr_el2 & HCR_IMO) == 0)) { + if (arm_is_secure_below_el3(env) || !arm_hcr_el2_imo(env)) { r = CP_ACCESS_TRAP_EL3; } break; diff --git a/hw/intc/arm_gicv3_its_kvm.c b/hw/intc/arm_gicv3_its_kvm.c index 271ebe461c..01573abb48 100644 --- a/hw/intc/arm_gicv3_its_kvm.c +++ b/hw/intc/arm_gicv3_its_kvm.c @@ -211,7 +211,7 @@ static void kvm_arm_its_reset(DeviceState *dev) return; } - error_report("ITS KVM: full reset is not supported by the host kernel"); + warn_report("ITS KVM: full reset is not supported by the host kernel"); if (!kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, GITS_CTLR)) { diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c index cd1e7f1729..0d816fdd2c 100644 --- a/hw/intc/armv7m_nvic.c +++ b/hw/intc/armv7m_nvic.c @@ -420,6 +420,8 @@ static void set_prio(NVICState *s, unsigned irq, bool secure, uint8_t prio) assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */ assert(irq < s->num_irq); + prio &= MAKE_64BIT_MASK(8 - s->num_prio_bits, s->num_prio_bits); + if (secure) { assert(exc_is_banked(irq)); s->sec_vectors[irq].prio = prio; @@ -772,6 +774,24 @@ static void set_irq_level(void *opaque, int n, int level) } } +/* callback when external NMI line is changed */ +static void nvic_nmi_trigger(void *opaque, int n, int level) +{ + NVICState *s = opaque; + + trace_nvic_set_nmi_level(level); + + /* + * The architecture doesn't specify whether NMI should share + * the normal-interrupt behaviour of being resampled on + * exception handler return. We choose not to, so just + * set NMI pending here and don't track the current level. + */ + if (level) { + armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false); + } +} + static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) { ARMCPU *cpu = s->cpu; @@ -779,6 +799,9 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) switch (offset) { case 4: /* Interrupt Control Type. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) { + goto bad_offset; + } return ((s->num_irq - NVIC_FIRST_IRQ) / 32) - 1; case 0xc: /* CPPWR */ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { @@ -867,6 +890,9 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) } return val; case 0xd10: /* System Control. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) { + goto bad_offset; + } return cpu->env.v7m.scr[attrs.secure]; case 0xd14: /* Configuration Control. */ /* The BFHFNMIGN bit is the only non-banked bit; we @@ -876,6 +902,9 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) val |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK; return val; case 0xd24: /* System Handler Control and State (SHCSR) */ + if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) { + goto bad_offset; + } val = 0; if (attrs.secure) { if (s->sec_vectors[ARMV7M_EXCP_MEM].active) { @@ -988,12 +1017,21 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) } return val; case 0xd2c: /* Hard Fault Status. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } return cpu->env.v7m.hfsr; case 0xd30: /* Debug Fault Status. */ return cpu->env.v7m.dfsr; case 0xd34: /* MMFAR MemManage Fault Address */ + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } return cpu->env.v7m.mmfar[attrs.secure]; case 0xd38: /* Bus Fault Address. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } return cpu->env.v7m.bfar; case 0xd3c: /* Aux Fault Status. */ /* TODO: Implement fault status registers. */ @@ -1263,9 +1301,12 @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value, "Setting VECTRESET when not in DEBUG mode " "is UNPREDICTABLE\n"); } - s->prigroup[attrs.secure] = extract32(value, - R_V7M_AIRCR_PRIGROUP_SHIFT, - R_V7M_AIRCR_PRIGROUP_LENGTH); + if (arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + s->prigroup[attrs.secure] = + extract32(value, + R_V7M_AIRCR_PRIGROUP_SHIFT, + R_V7M_AIRCR_PRIGROUP_LENGTH); + } if (attrs.secure) { /* These bits are only writable by secure */ cpu->env.v7m.aircr = value & @@ -1288,6 +1329,9 @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value, } break; case 0xd10: /* System Control. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) { + goto bad_offset; + } /* We don't implement deep-sleep so these bits are RAZ/WI. * The other bits in the register are banked. * QEMU's implementation ignores SEVONPEND and SLEEPONEXIT, which @@ -1297,6 +1341,10 @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value, cpu->env.v7m.scr[attrs.secure] = value; break; case 0xd14: /* Configuration Control. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } + /* Enforce RAZ/WI on reserved and must-RAZ/WI bits */ value &= (R_V7M_CCR_STKALIGN_MASK | R_V7M_CCR_BFHFNMIGN_MASK | @@ -1321,6 +1369,9 @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value, cpu->env.v7m.ccr[attrs.secure] = value; break; case 0xd24: /* System Handler Control and State (SHCSR) */ + if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) { + goto bad_offset; + } if (attrs.secure) { s->sec_vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0; /* Secure HardFault active bit cannot be written */ @@ -1389,15 +1440,24 @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value, nvic_irq_update(s); break; case 0xd2c: /* Hard Fault Status. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } cpu->env.v7m.hfsr &= ~value; /* W1C */ break; case 0xd30: /* Debug Fault Status. */ cpu->env.v7m.dfsr &= ~value; /* W1C */ break; case 0xd34: /* Mem Manage Address. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } cpu->env.v7m.mmfar[attrs.secure] = value; return; case 0xd38: /* Bus Fault Address. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } cpu->env.v7m.bfar = value; return; case 0xd3c: /* Aux Fault Status. */ @@ -1627,6 +1687,11 @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value, case 0xf00: /* Software Triggered Interrupt Register */ { int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ; + + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } + if (excnum < s->num_irq) { armv7m_nvic_set_pending(s, excnum, false); } @@ -1752,6 +1817,11 @@ static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr, break; case 0x300 ... 0x33f: /* NVIC Active */ val = 0; + + if (!arm_feature(&s->cpu->env, ARM_FEATURE_V7)) { + break; + } + startvec = 8 * (offset - 0x300) + NVIC_FIRST_IRQ; /* vector # */ for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) { @@ -1771,7 +1841,13 @@ static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr, } } break; - case 0xd18 ... 0xd23: /* System Handler Priority (SHPR1, SHPR2, SHPR3) */ + case 0xd18: /* System Handler Priority (SHPR1) */ + if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) { + val = 0; + break; + } + /* fall through */ + case 0xd1c ... 0xd23: /* System Handler Priority (SHPR2, SHPR3) */ val = 0; for (i = 0; i < size; i++) { unsigned hdlidx = (offset - 0xd14) + i; @@ -1784,6 +1860,10 @@ static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr, } break; case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */ + if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) { + val = 0; + break; + }; /* The BFSR bits [15:8] are shared between security states * and we store them in the NS copy */ @@ -1876,7 +1956,12 @@ static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr, } nvic_irq_update(s); return MEMTX_OK; - case 0xd18 ... 0xd23: /* System Handler Priority (SHPR1, SHPR2, SHPR3) */ + case 0xd18: /* System Handler Priority (SHPR1) */ + if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) { + return MEMTX_OK; + } + /* fall through */ + case 0xd1c ... 0xd23: /* System Handler Priority (SHPR2, SHPR3) */ for (i = 0; i < size; i++) { unsigned hdlidx = (offset - 0xd14) + i; int newprio = extract32(value, i * 8, 8); @@ -1890,6 +1975,9 @@ static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr, nvic_irq_update(s); return MEMTX_OK; case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */ + if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) { + return MEMTX_OK; + } /* All bits are W1C, so construct 32 bit value with 0s in * the parts not written by the access size */ @@ -2203,6 +2291,8 @@ static void armv7m_nvic_realize(DeviceState *dev, Error **errp) /* include space for internal exception vectors */ s->num_irq += NVIC_FIRST_IRQ; + s->num_prio_bits = arm_feature(&s->cpu->env, ARM_FEATURE_V7) ? 8 : 2; + object_property_set_bool(OBJECT(&s->systick[M_REG_NS]), true, "realized", &err); if (err != NULL) { @@ -2310,6 +2400,7 @@ static void armv7m_nvic_instance_init(Object *obj) qdev_init_gpio_out_named(dev, &nvic->sysresetreq, "SYSRESETREQ", 1); qdev_init_gpio_in_named(dev, nvic_systick_trigger, "systick-trigger", M_REG_NUM_BANKS); + qdev_init_gpio_in_named(dev, nvic_nmi_trigger, "NMI", 1); } static void armv7m_nvic_class_init(ObjectClass *klass, void *data) diff --git a/hw/intc/gic_internal.h b/hw/intc/gic_internal.h index 7fe87b13de..45c2af0bf5 100644 --- a/hw/intc/gic_internal.h +++ b/hw/intc/gic_internal.h @@ -21,36 +21,38 @@ #ifndef QEMU_ARM_GIC_INTERNAL_H #define QEMU_ARM_GIC_INTERNAL_H +#include "hw/registerfields.h" #include "hw/intc/arm_gic.h" #define ALL_CPU_MASK ((unsigned)(((1 << GIC_NCPU) - 1))) #define GIC_BASE_IRQ 0 -#define GIC_SET_ENABLED(irq, cm) s->irq_state[irq].enabled |= (cm) -#define GIC_CLEAR_ENABLED(irq, cm) s->irq_state[irq].enabled &= ~(cm) -#define GIC_TEST_ENABLED(irq, cm) ((s->irq_state[irq].enabled & (cm)) != 0) -#define GIC_SET_PENDING(irq, cm) s->irq_state[irq].pending |= (cm) -#define GIC_CLEAR_PENDING(irq, cm) s->irq_state[irq].pending &= ~(cm) -#define GIC_SET_ACTIVE(irq, cm) s->irq_state[irq].active |= (cm) -#define GIC_CLEAR_ACTIVE(irq, cm) s->irq_state[irq].active &= ~(cm) -#define GIC_TEST_ACTIVE(irq, cm) ((s->irq_state[irq].active & (cm)) != 0) -#define GIC_SET_MODEL(irq) s->irq_state[irq].model = true -#define GIC_CLEAR_MODEL(irq) s->irq_state[irq].model = false -#define GIC_TEST_MODEL(irq) s->irq_state[irq].model -#define GIC_SET_LEVEL(irq, cm) s->irq_state[irq].level |= (cm) -#define GIC_CLEAR_LEVEL(irq, cm) s->irq_state[irq].level &= ~(cm) -#define GIC_TEST_LEVEL(irq, cm) ((s->irq_state[irq].level & (cm)) != 0) -#define GIC_SET_EDGE_TRIGGER(irq) s->irq_state[irq].edge_trigger = true -#define GIC_CLEAR_EDGE_TRIGGER(irq) s->irq_state[irq].edge_trigger = false -#define GIC_TEST_EDGE_TRIGGER(irq) (s->irq_state[irq].edge_trigger) -#define GIC_GET_PRIORITY(irq, cpu) (((irq) < GIC_INTERNAL) ? \ +#define GIC_DIST_SET_ENABLED(irq, cm) (s->irq_state[irq].enabled |= (cm)) +#define GIC_DIST_CLEAR_ENABLED(irq, cm) (s->irq_state[irq].enabled &= ~(cm)) +#define GIC_DIST_TEST_ENABLED(irq, cm) ((s->irq_state[irq].enabled & (cm)) != 0) +#define GIC_DIST_SET_PENDING(irq, cm) (s->irq_state[irq].pending |= (cm)) +#define GIC_DIST_CLEAR_PENDING(irq, cm) (s->irq_state[irq].pending &= ~(cm)) +#define GIC_DIST_SET_ACTIVE(irq, cm) (s->irq_state[irq].active |= (cm)) +#define GIC_DIST_CLEAR_ACTIVE(irq, cm) (s->irq_state[irq].active &= ~(cm)) +#define GIC_DIST_TEST_ACTIVE(irq, cm) ((s->irq_state[irq].active & (cm)) != 0) +#define GIC_DIST_SET_MODEL(irq) (s->irq_state[irq].model = true) +#define GIC_DIST_CLEAR_MODEL(irq) (s->irq_state[irq].model = false) +#define GIC_DIST_TEST_MODEL(irq) (s->irq_state[irq].model) +#define GIC_DIST_SET_LEVEL(irq, cm) (s->irq_state[irq].level |= (cm)) +#define GIC_DIST_CLEAR_LEVEL(irq, cm) (s->irq_state[irq].level &= ~(cm)) +#define GIC_DIST_TEST_LEVEL(irq, cm) ((s->irq_state[irq].level & (cm)) != 0) +#define GIC_DIST_SET_EDGE_TRIGGER(irq) (s->irq_state[irq].edge_trigger = true) +#define GIC_DIST_CLEAR_EDGE_TRIGGER(irq) \ + (s->irq_state[irq].edge_trigger = false) +#define GIC_DIST_TEST_EDGE_TRIGGER(irq) (s->irq_state[irq].edge_trigger) +#define GIC_DIST_GET_PRIORITY(irq, cpu) (((irq) < GIC_INTERNAL) ? \ s->priority1[irq][cpu] : \ s->priority2[(irq) - GIC_INTERNAL]) -#define GIC_TARGET(irq) s->irq_target[irq] -#define GIC_CLEAR_GROUP(irq, cm) (s->irq_state[irq].group &= ~(cm)) -#define GIC_SET_GROUP(irq, cm) (s->irq_state[irq].group |= (cm)) -#define GIC_TEST_GROUP(irq, cm) ((s->irq_state[irq].group & (cm)) != 0) +#define GIC_DIST_TARGET(irq) (s->irq_target[irq]) +#define GIC_DIST_CLEAR_GROUP(irq, cm) (s->irq_state[irq].group &= ~(cm)) +#define GIC_DIST_SET_GROUP(irq, cm) (s->irq_state[irq].group |= (cm)) +#define GIC_DIST_TEST_GROUP(irq, cm) ((s->irq_state[irq].group & (cm)) != 0) #define GICD_CTLR_EN_GRP0 (1U << 0) #define GICD_CTLR_EN_GRP1 (1U << 1) @@ -63,6 +65,91 @@ #define GICC_CTLR_EOIMODE (1U << 9) #define GICC_CTLR_EOIMODE_NS (1U << 10) +REG32(GICH_HCR, 0x0) + FIELD(GICH_HCR, EN, 0, 1) + FIELD(GICH_HCR, UIE, 1, 1) + FIELD(GICH_HCR, LRENPIE, 2, 1) + FIELD(GICH_HCR, NPIE, 3, 1) + FIELD(GICH_HCR, VGRP0EIE, 4, 1) + FIELD(GICH_HCR, VGRP0DIE, 5, 1) + FIELD(GICH_HCR, VGRP1EIE, 6, 1) + FIELD(GICH_HCR, VGRP1DIE, 7, 1) + FIELD(GICH_HCR, EOICount, 27, 5) + +#define GICH_HCR_MASK \ + (R_GICH_HCR_EN_MASK | R_GICH_HCR_UIE_MASK | \ + R_GICH_HCR_LRENPIE_MASK | R_GICH_HCR_NPIE_MASK | \ + R_GICH_HCR_VGRP0EIE_MASK | R_GICH_HCR_VGRP0DIE_MASK | \ + R_GICH_HCR_VGRP1EIE_MASK | R_GICH_HCR_VGRP1DIE_MASK | \ + R_GICH_HCR_EOICount_MASK) + +REG32(GICH_VTR, 0x4) + FIELD(GICH_VTR, ListRegs, 0, 6) + FIELD(GICH_VTR, PREbits, 26, 3) + FIELD(GICH_VTR, PRIbits, 29, 3) + +REG32(GICH_VMCR, 0x8) + FIELD(GICH_VMCR, VMCCtlr, 0, 10) + FIELD(GICH_VMCR, VMABP, 18, 3) + FIELD(GICH_VMCR, VMBP, 21, 3) + FIELD(GICH_VMCR, VMPriMask, 27, 5) + +REG32(GICH_MISR, 0x10) + FIELD(GICH_MISR, EOI, 0, 1) + FIELD(GICH_MISR, U, 1, 1) + FIELD(GICH_MISR, LRENP, 2, 1) + FIELD(GICH_MISR, NP, 3, 1) + FIELD(GICH_MISR, VGrp0E, 4, 1) + FIELD(GICH_MISR, VGrp0D, 5, 1) + FIELD(GICH_MISR, VGrp1E, 6, 1) + FIELD(GICH_MISR, VGrp1D, 7, 1) + +REG32(GICH_EISR0, 0x20) +REG32(GICH_EISR1, 0x24) +REG32(GICH_ELRSR0, 0x30) +REG32(GICH_ELRSR1, 0x34) +REG32(GICH_APR, 0xf0) + +REG32(GICH_LR0, 0x100) + FIELD(GICH_LR0, VirtualID, 0, 10) + FIELD(GICH_LR0, PhysicalID, 10, 10) + FIELD(GICH_LR0, CPUID, 10, 3) + FIELD(GICH_LR0, EOI, 19, 1) + FIELD(GICH_LR0, Priority, 23, 5) + FIELD(GICH_LR0, State, 28, 2) + FIELD(GICH_LR0, Grp1, 30, 1) + FIELD(GICH_LR0, HW, 31, 1) + +/* Last LR register */ +REG32(GICH_LR63, 0x1fc) + +#define GICH_LR_MASK \ + (R_GICH_LR0_VirtualID_MASK | R_GICH_LR0_PhysicalID_MASK | \ + R_GICH_LR0_CPUID_MASK | R_GICH_LR0_EOI_MASK | \ + R_GICH_LR0_Priority_MASK | R_GICH_LR0_State_MASK | \ + R_GICH_LR0_Grp1_MASK | R_GICH_LR0_HW_MASK) + +#define GICH_LR_STATE_INVALID 0 +#define GICH_LR_STATE_PENDING 1 +#define GICH_LR_STATE_ACTIVE 2 +#define GICH_LR_STATE_ACTIVE_PENDING 3 + +#define GICH_LR_VIRT_ID(entry) (FIELD_EX32(entry, GICH_LR0, VirtualID)) +#define GICH_LR_PHYS_ID(entry) (FIELD_EX32(entry, GICH_LR0, PhysicalID)) +#define GICH_LR_CPUID(entry) (FIELD_EX32(entry, GICH_LR0, CPUID)) +#define GICH_LR_EOI(entry) (FIELD_EX32(entry, GICH_LR0, EOI)) +#define GICH_LR_PRIORITY(entry) (FIELD_EX32(entry, GICH_LR0, Priority) << 3) +#define GICH_LR_STATE(entry) (FIELD_EX32(entry, GICH_LR0, State)) +#define GICH_LR_GROUP(entry) (FIELD_EX32(entry, GICH_LR0, Grp1)) +#define GICH_LR_HW(entry) (FIELD_EX32(entry, GICH_LR0, HW)) + +#define GICH_LR_CLEAR_PENDING(entry) \ + ((entry) &= ~(GICH_LR_STATE_PENDING << R_GICH_LR0_State_SHIFT)) +#define GICH_LR_SET_ACTIVE(entry) \ + ((entry) |= (GICH_LR_STATE_ACTIVE << R_GICH_LR0_State_SHIFT)) +#define GICH_LR_CLEAR_ACTIVE(entry) \ + ((entry) &= ~(GICH_LR_STATE_ACTIVE << R_GICH_LR0_State_SHIFT)) + /* Valid bits for GICC_CTLR for GICv1, v1 with security extensions, * GICv2 and GICv2 with security extensions: */ @@ -74,13 +161,9 @@ /* The special cases for the revision property: */ #define REV_11MPCORE 0 -void gic_set_pending_private(GICState *s, int cpu, int irq); uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs); -void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs); -void gic_update(GICState *s); -void gic_init_irqs_and_distributor(GICState *s); -void gic_set_priority(GICState *s, int cpu, int irq, uint8_t val, - MemTxAttrs attrs); +void gic_dist_set_priority(GICState *s, int cpu, int irq, uint8_t val, + MemTxAttrs attrs); static inline bool gic_test_pending(GICState *s, int irq, int cm) { @@ -93,7 +176,148 @@ static inline bool gic_test_pending(GICState *s, int irq, int cm) * GICD_ISPENDR to set the state pending. */ return (s->irq_state[irq].pending & cm) || - (!GIC_TEST_EDGE_TRIGGER(irq) && GIC_TEST_LEVEL(irq, cm)); + (!GIC_DIST_TEST_EDGE_TRIGGER(irq) && GIC_DIST_TEST_LEVEL(irq, cm)); + } +} + +static inline bool gic_is_vcpu(int cpu) +{ + return cpu >= GIC_NCPU; +} + +static inline int gic_get_vcpu_real_id(int cpu) +{ + return (cpu >= GIC_NCPU) ? (cpu - GIC_NCPU) : cpu; +} + +/* Return true if the given vIRQ state exists in a LR and is either active or + * pending and active. + * + * This function is used to check that a guest's `end of interrupt' or + * `interrupts deactivation' request is valid, and matches with a LR of an + * already acknowledged vIRQ (i.e. has the active bit set in its state). + */ +static inline bool gic_virq_is_valid(GICState *s, int irq, int vcpu) +{ + int cpu = gic_get_vcpu_real_id(vcpu); + int lr_idx; + + for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) { + uint32_t *entry = &s->h_lr[lr_idx][cpu]; + + if ((GICH_LR_VIRT_ID(*entry) == irq) && + (GICH_LR_STATE(*entry) & GICH_LR_STATE_ACTIVE)) { + return true; + } + } + + return false; +} + +/* Return a pointer on the LR entry matching the given vIRQ. + * + * This function is used to retrieve an LR for which we know for sure that the + * corresponding vIRQ exists in the current context (i.e. its current state is + * not `invalid'): + * - Either the corresponding vIRQ has been validated with gic_virq_is_valid() + * so it is `active' or `active and pending', + * - Or it was pending and has been selected by gic_get_best_virq(). It is now + * `pending', `active' or `active and pending', depending on what the guest + * already did with this vIRQ. + * + * Having multiple LRs with the same VirtualID leads to UNPREDICTABLE + * behaviour in the GIC. We choose to return the first one that matches. + */ +static inline uint32_t *gic_get_lr_entry(GICState *s, int irq, int vcpu) +{ + int cpu = gic_get_vcpu_real_id(vcpu); + int lr_idx; + + for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) { + uint32_t *entry = &s->h_lr[lr_idx][cpu]; + + if ((GICH_LR_VIRT_ID(*entry) == irq) && + (GICH_LR_STATE(*entry) != GICH_LR_STATE_INVALID)) { + return entry; + } + } + + g_assert_not_reached(); +} + +static inline bool gic_test_group(GICState *s, int irq, int cpu) +{ + if (gic_is_vcpu(cpu)) { + uint32_t *entry = gic_get_lr_entry(s, irq, cpu); + return GICH_LR_GROUP(*entry); + } else { + return GIC_DIST_TEST_GROUP(irq, 1 << cpu); + } +} + +static inline void gic_clear_pending(GICState *s, int irq, int cpu) +{ + if (gic_is_vcpu(cpu)) { + uint32_t *entry = gic_get_lr_entry(s, irq, cpu); + GICH_LR_CLEAR_PENDING(*entry); + } else { + /* Clear pending state for both level and edge triggered + * interrupts. (level triggered interrupts with an active line + * remain pending, see gic_test_pending) + */ + GIC_DIST_CLEAR_PENDING(irq, GIC_DIST_TEST_MODEL(irq) ? ALL_CPU_MASK + : (1 << cpu)); + } +} + +static inline void gic_set_active(GICState *s, int irq, int cpu) +{ + if (gic_is_vcpu(cpu)) { + uint32_t *entry = gic_get_lr_entry(s, irq, cpu); + GICH_LR_SET_ACTIVE(*entry); + } else { + GIC_DIST_SET_ACTIVE(irq, 1 << cpu); + } +} + +static inline void gic_clear_active(GICState *s, int irq, int cpu) +{ + if (gic_is_vcpu(cpu)) { + uint32_t *entry = gic_get_lr_entry(s, irq, cpu); + GICH_LR_CLEAR_ACTIVE(*entry); + + if (GICH_LR_HW(*entry)) { + /* Hardware interrupt. We must forward the deactivation request to + * the distributor. + */ + int phys_irq = GICH_LR_PHYS_ID(*entry); + int rcpu = gic_get_vcpu_real_id(cpu); + + if (phys_irq < GIC_NR_SGIS || phys_irq >= GIC_MAXIRQ) { + /* UNPREDICTABLE behaviour, we choose to ignore the request */ + return; + } + + /* This is equivalent to a NS write to DIR on the physical CPU + * interface. Hence group0 interrupt deactivation is ignored if + * the GIC is secure. + */ + if (!s->security_extn || GIC_DIST_TEST_GROUP(phys_irq, 1 << rcpu)) { + GIC_DIST_CLEAR_ACTIVE(phys_irq, 1 << rcpu); + } + } + } else { + GIC_DIST_CLEAR_ACTIVE(irq, 1 << cpu); + } +} + +static inline int gic_get_priority(GICState *s, int irq, int cpu) +{ + if (gic_is_vcpu(cpu)) { + uint32_t *entry = gic_get_lr_entry(s, irq, cpu); + return GICH_LR_PRIORITY(*entry); + } else { + return GIC_DIST_GET_PRIORITY(irq, cpu); } } diff --git a/hw/intc/trace-events b/hw/intc/trace-events index 5fb18e65c9..7769869a13 100644 --- a/hw/intc/trace-events +++ b/hw/intc/trace-events @@ -92,9 +92,17 @@ aspeed_vic_write(uint64_t offset, unsigned size, uint32_t data) "To 0x%" PRIx64 gic_enable_irq(int irq) "irq %d enabled" gic_disable_irq(int irq) "irq %d disabled" gic_set_irq(int irq, int level, int cpumask, int target) "irq %d level %d cpumask 0x%x target 0x%x" -gic_update_bestirq(int cpu, int irq, int prio, int priority_mask, int running_priority) "cpu %d irq %d priority %d cpu priority mask %d cpu running priority %d" +gic_update_bestirq(const char *s, int cpu, int irq, int prio, int priority_mask, int running_priority) "%s %d irq %d priority %d cpu priority mask %d cpu running priority %d" gic_update_set_irq(int cpu, const char *name, int level) "cpu[%d]: %s = %d" -gic_acknowledge_irq(int cpu, int irq) "cpu %d acknowledged irq %d" +gic_acknowledge_irq(const char *s, int cpu, int irq) "%s %d acknowledged irq %d" +gic_cpu_write(const char *s, int cpu, int addr, uint32_t val) "%s %d iface write at 0x%08x 0x%08" PRIx32 +gic_cpu_read(const char *s, int cpu, int addr, uint32_t val) "%s %d iface read at 0x%08x: 0x%08" PRIx32 +gic_hyp_read(int addr, uint32_t val) "hyp read at 0x%08x: 0x%08" PRIx32 +gic_hyp_write(int addr, uint32_t val) "hyp write at 0x%08x: 0x%08" PRIx32 +gic_dist_read(int addr, unsigned int size, uint32_t val) "dist read at 0x%08x size %u: 0x%08" PRIx32 +gic_dist_write(int addr, unsigned int size, uint32_t val) "dist write at 0x%08x size %u: 0x%08" PRIx32 +gic_lr_entry(int cpu, int entry, uint32_t val) "cpu %d: new lr entry %d: 0x%08" PRIx32 +gic_update_maintenance_irq(int cpu, int val) "cpu %d: maintenance = %d" # hw/intc/arm_gicv3_cpuif.c gicv3_icc_pmr_read(uint32_t cpu, uint64_t val) "GICv3 ICC_PMR read cpu 0x%x value 0x%" PRIx64 @@ -184,6 +192,7 @@ nvic_acknowledge_irq(int irq, int prio) "NVIC acknowledge IRQ: %d now active (pr nvic_get_pending_irq_info(int irq, bool secure) "NVIC next IRQ %d: targets_secure: %d" nvic_complete_irq(int irq, bool secure) "NVIC complete IRQ %d (secure %d)" nvic_set_irq_level(int irq, int level) "NVIC external irq %d level set to %d" +nvic_set_nmi_level(int level) "NVIC external NMI level set to %d" nvic_sysreg_read(uint64_t addr, uint32_t value, unsigned size) "NVIC sysreg read addr 0x%" PRIx64 " data 0x%" PRIx32 " size %u" nvic_sysreg_write(uint64_t addr, uint32_t value, unsigned size) "NVIC sysreg write addr 0x%" PRIx64 " data 0x%" PRIx32 " size %u" diff --git a/hw/misc/Makefile.objs b/hw/misc/Makefile.objs index 9350900845..22714b0851 100644 --- a/hw/misc/Makefile.objs +++ b/hw/misc/Makefile.objs @@ -36,6 +36,7 @@ obj-$(CONFIG_IMX) += imx_ccm.o obj-$(CONFIG_IMX) += imx31_ccm.o obj-$(CONFIG_IMX) += imx25_ccm.o obj-$(CONFIG_IMX) += imx6_ccm.o +obj-$(CONFIG_IMX) += imx6ul_ccm.o obj-$(CONFIG_IMX) += imx6_src.o obj-$(CONFIG_IMX) += imx7_ccm.o obj-$(CONFIG_IMX) += imx2_wdt.o @@ -70,5 +71,4 @@ obj-$(CONFIG_PVPANIC) += pvpanic.o obj-$(CONFIG_HYPERV_TESTDEV) += hyperv_testdev.o obj-$(CONFIG_AUX) += auxbus.o obj-$(CONFIG_ASPEED_SOC) += aspeed_scu.o aspeed_sdmc.o -obj-y += mmio_interface.o obj-$(CONFIG_MSF2) += msf2-sysreg.o diff --git a/hw/misc/aspeed_sdmc.c b/hw/misc/aspeed_sdmc.c index 0df008e52a..eec77f2435 100644 --- a/hw/misc/aspeed_sdmc.c +++ b/hw/misc/aspeed_sdmc.c @@ -23,6 +23,14 @@ /* Configuration Register */ #define R_CONF (0x04 / 4) +/* Control/Status Register #1 (ast2500) */ +#define R_STATUS1 (0x60 / 4) +#define PHY_BUSY_STATE BIT(0) + +#define R_ECC_TEST_CTRL (0x70 / 4) +#define ECC_TEST_FINISHED BIT(12) +#define ECC_TEST_FAIL BIT(13) + /* * Configuration register Ox4 (for Aspeed AST2400 SOC) * @@ -126,15 +134,33 @@ static void aspeed_sdmc_write(void *opaque, hwaddr addr, uint64_t data, case AST2400_A0_SILICON_REV: case AST2400_A1_SILICON_REV: data &= ~ASPEED_SDMC_READONLY_MASK; + data |= s->fixed_conf; break; case AST2500_A0_SILICON_REV: case AST2500_A1_SILICON_REV: data &= ~ASPEED_SDMC_AST2500_READONLY_MASK; + data |= s->fixed_conf; break; default: g_assert_not_reached(); } } + if (s->silicon_rev == AST2500_A0_SILICON_REV || + s->silicon_rev == AST2500_A1_SILICON_REV) { + switch (addr) { + case R_STATUS1: + /* Will never return 'busy' */ + data &= ~PHY_BUSY_STATE; + break; + case R_ECC_TEST_CTRL: + /* Always done, always happy */ + data |= ECC_TEST_FINISHED; + data &= ~ECC_TEST_FAIL; + break; + default: + break; + } + } s->regs[addr] = data; } @@ -198,25 +224,7 @@ static void aspeed_sdmc_reset(DeviceState *dev) memset(s->regs, 0, sizeof(s->regs)); /* Set ram size bit and defaults values */ - switch (s->silicon_rev) { - case AST2400_A0_SILICON_REV: - case AST2400_A1_SILICON_REV: - s->regs[R_CONF] |= - ASPEED_SDMC_VGA_COMPAT | - ASPEED_SDMC_DRAM_SIZE(s->ram_bits); - break; - - case AST2500_A0_SILICON_REV: - case AST2500_A1_SILICON_REV: - s->regs[R_CONF] |= - ASPEED_SDMC_HW_VERSION(1) | - ASPEED_SDMC_VGA_APERTURE(ASPEED_SDMC_VGA_64MB) | - ASPEED_SDMC_DRAM_SIZE(s->ram_bits); - break; - - default: - g_assert_not_reached(); - } + s->regs[R_CONF] = s->fixed_conf; } static void aspeed_sdmc_realize(DeviceState *dev, Error **errp) @@ -234,10 +242,18 @@ static void aspeed_sdmc_realize(DeviceState *dev, Error **errp) case AST2400_A0_SILICON_REV: case AST2400_A1_SILICON_REV: s->ram_bits = ast2400_rambits(s); + s->max_ram_size = 512 << 20; + s->fixed_conf = ASPEED_SDMC_VGA_COMPAT | + ASPEED_SDMC_DRAM_SIZE(s->ram_bits); break; case AST2500_A0_SILICON_REV: case AST2500_A1_SILICON_REV: s->ram_bits = ast2500_rambits(s); + s->max_ram_size = 1024 << 20; + s->fixed_conf = ASPEED_SDMC_HW_VERSION(1) | + ASPEED_SDMC_VGA_APERTURE(ASPEED_SDMC_VGA_64MB) | + ASPEED_SDMC_CACHE_INITIAL_DONE | + ASPEED_SDMC_DRAM_SIZE(s->ram_bits); break; default: g_assert_not_reached(); @@ -261,6 +277,7 @@ static const VMStateDescription vmstate_aspeed_sdmc = { static Property aspeed_sdmc_properties[] = { DEFINE_PROP_UINT32("silicon-rev", AspeedSDMCState, silicon_rev, 0), DEFINE_PROP_UINT64("ram-size", AspeedSDMCState, ram_size, 0), + DEFINE_PROP_UINT64("max-ram-size", AspeedSDMCState, max_ram_size, 0), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/misc/imx6ul_ccm.c b/hw/misc/imx6ul_ccm.c new file mode 100644 index 0000000000..90bc374271 --- /dev/null +++ b/hw/misc/imx6ul_ccm.c @@ -0,0 +1,886 @@ +/* + * IMX6UL Clock Control Module + * + * Copyright (c) 2018 Jean-Christophe Dubois <jcd@tribudubois.net> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * To get the timer frequencies right, we need to emulate at least part of + * the CCM. + */ + +#include "qemu/osdep.h" +#include "hw/registerfields.h" +#include "hw/misc/imx6ul_ccm.h" +#include "qemu/log.h" + +#include "trace.h" + +static const char *imx6ul_ccm_reg_name(uint32_t reg) +{ + static char unknown[20]; + + switch (reg) { + case CCM_CCR: + return "CCR"; + case CCM_CCDR: + return "CCDR"; + case CCM_CSR: + return "CSR"; + case CCM_CCSR: + return "CCSR"; + case CCM_CACRR: + return "CACRR"; + case CCM_CBCDR: + return "CBCDR"; + case CCM_CBCMR: + return "CBCMR"; + case CCM_CSCMR1: + return "CSCMR1"; + case CCM_CSCMR2: + return "CSCMR2"; + case CCM_CSCDR1: + return "CSCDR1"; + case CCM_CS1CDR: + return "CS1CDR"; + case CCM_CS2CDR: + return "CS2CDR"; + case CCM_CDCDR: + return "CDCDR"; + case CCM_CHSCCDR: + return "CHSCCDR"; + case CCM_CSCDR2: + return "CSCDR2"; + case CCM_CSCDR3: + return "CSCDR3"; + case CCM_CDHIPR: + return "CDHIPR"; + case CCM_CTOR: + return "CTOR"; + case CCM_CLPCR: + return "CLPCR"; + case CCM_CISR: + return "CISR"; + case CCM_CIMR: + return "CIMR"; + case CCM_CCOSR: + return "CCOSR"; + case CCM_CGPR: + return "CGPR"; + case CCM_CCGR0: + return "CCGR0"; + case CCM_CCGR1: + return "CCGR1"; + case CCM_CCGR2: + return "CCGR2"; + case CCM_CCGR3: + return "CCGR3"; + case CCM_CCGR4: + return "CCGR4"; + case CCM_CCGR5: + return "CCGR5"; + case CCM_CCGR6: + return "CCGR6"; + case CCM_CMEOR: + return "CMEOR"; + default: + sprintf(unknown, "%d ?", reg); + return unknown; + } +} + +static const char *imx6ul_analog_reg_name(uint32_t reg) +{ + static char unknown[20]; + + switch (reg) { + case CCM_ANALOG_PLL_ARM: + return "PLL_ARM"; + case CCM_ANALOG_PLL_ARM_SET: + return "PLL_ARM_SET"; + case CCM_ANALOG_PLL_ARM_CLR: + return "PLL_ARM_CLR"; + case CCM_ANALOG_PLL_ARM_TOG: + return "PLL_ARM_TOG"; + case CCM_ANALOG_PLL_USB1: + return "PLL_USB1"; + case CCM_ANALOG_PLL_USB1_SET: + return "PLL_USB1_SET"; + case CCM_ANALOG_PLL_USB1_CLR: + return "PLL_USB1_CLR"; + case CCM_ANALOG_PLL_USB1_TOG: + return "PLL_USB1_TOG"; + case CCM_ANALOG_PLL_USB2: + return "PLL_USB2"; + case CCM_ANALOG_PLL_USB2_SET: + return "PLL_USB2_SET"; + case CCM_ANALOG_PLL_USB2_CLR: + return "PLL_USB2_CLR"; + case CCM_ANALOG_PLL_USB2_TOG: + return "PLL_USB2_TOG"; + case CCM_ANALOG_PLL_SYS: + return "PLL_SYS"; + case CCM_ANALOG_PLL_SYS_SET: + return "PLL_SYS_SET"; + case CCM_ANALOG_PLL_SYS_CLR: + return "PLL_SYS_CLR"; + case CCM_ANALOG_PLL_SYS_TOG: + return "PLL_SYS_TOG"; + case CCM_ANALOG_PLL_SYS_SS: + return "PLL_SYS_SS"; + case CCM_ANALOG_PLL_SYS_NUM: + return "PLL_SYS_NUM"; + case CCM_ANALOG_PLL_SYS_DENOM: + return "PLL_SYS_DENOM"; + case CCM_ANALOG_PLL_AUDIO: + return "PLL_AUDIO"; + case CCM_ANALOG_PLL_AUDIO_SET: + return "PLL_AUDIO_SET"; + case CCM_ANALOG_PLL_AUDIO_CLR: + return "PLL_AUDIO_CLR"; + case CCM_ANALOG_PLL_AUDIO_TOG: + return "PLL_AUDIO_TOG"; + case CCM_ANALOG_PLL_AUDIO_NUM: + return "PLL_AUDIO_NUM"; + case CCM_ANALOG_PLL_AUDIO_DENOM: + return "PLL_AUDIO_DENOM"; + case CCM_ANALOG_PLL_VIDEO: + return "PLL_VIDEO"; + case CCM_ANALOG_PLL_VIDEO_SET: + return "PLL_VIDEO_SET"; + case CCM_ANALOG_PLL_VIDEO_CLR: + return "PLL_VIDEO_CLR"; + case CCM_ANALOG_PLL_VIDEO_TOG: + return "PLL_VIDEO_TOG"; + case CCM_ANALOG_PLL_VIDEO_NUM: + return "PLL_VIDEO_NUM"; + case CCM_ANALOG_PLL_VIDEO_DENOM: + return "PLL_VIDEO_DENOM"; + case CCM_ANALOG_PLL_ENET: + return "PLL_ENET"; + case CCM_ANALOG_PLL_ENET_SET: + return "PLL_ENET_SET"; + case CCM_ANALOG_PLL_ENET_CLR: + return "PLL_ENET_CLR"; + case CCM_ANALOG_PLL_ENET_TOG: + return "PLL_ENET_TOG"; + case CCM_ANALOG_PFD_480: + return "PFD_480"; + case CCM_ANALOG_PFD_480_SET: + return "PFD_480_SET"; + case CCM_ANALOG_PFD_480_CLR: + return "PFD_480_CLR"; + case CCM_ANALOG_PFD_480_TOG: + return "PFD_480_TOG"; + case CCM_ANALOG_PFD_528: + return "PFD_528"; + case CCM_ANALOG_PFD_528_SET: + return "PFD_528_SET"; + case CCM_ANALOG_PFD_528_CLR: + return "PFD_528_CLR"; + case CCM_ANALOG_PFD_528_TOG: + return "PFD_528_TOG"; + case CCM_ANALOG_MISC0: + return "MISC0"; + case CCM_ANALOG_MISC0_SET: + return "MISC0_SET"; + case CCM_ANALOG_MISC0_CLR: + return "MISC0_CLR"; + case CCM_ANALOG_MISC0_TOG: + return "MISC0_TOG"; + case CCM_ANALOG_MISC2: + return "MISC2"; + case CCM_ANALOG_MISC2_SET: + return "MISC2_SET"; + case CCM_ANALOG_MISC2_CLR: + return "MISC2_CLR"; + case CCM_ANALOG_MISC2_TOG: + return "MISC2_TOG"; + case PMU_REG_1P1: + return "PMU_REG_1P1"; + case PMU_REG_3P0: + return "PMU_REG_3P0"; + case PMU_REG_2P5: + return "PMU_REG_2P5"; + case PMU_REG_CORE: + return "PMU_REG_CORE"; + case PMU_MISC1: + return "PMU_MISC1"; + case PMU_MISC1_SET: + return "PMU_MISC1_SET"; + case PMU_MISC1_CLR: + return "PMU_MISC1_CLR"; + case PMU_MISC1_TOG: + return "PMU_MISC1_TOG"; + case USB_ANALOG_DIGPROG: + return "USB_ANALOG_DIGPROG"; + default: + sprintf(unknown, "%d ?", reg); + return unknown; + } +} + +#define CKIH_FREQ 24000000 /* 24MHz crystal input */ + +static const VMStateDescription vmstate_imx6ul_ccm = { + .name = TYPE_IMX6UL_CCM, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(ccm, IMX6ULCCMState, CCM_MAX), + VMSTATE_UINT32_ARRAY(analog, IMX6ULCCMState, CCM_ANALOG_MAX), + VMSTATE_END_OF_LIST() + }, +}; + +static uint64_t imx6ul_analog_get_osc_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = CKIH_FREQ; + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint64_t imx6ul_analog_get_pll2_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = imx6ul_analog_get_osc_clk(dev); + + if (FIELD_EX32(dev->analog[CCM_ANALOG_PLL_SYS], + ANALOG_PLL_SYS, DIV_SELECT)) { + freq *= 22; + } else { + freq *= 20; + } + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint64_t imx6ul_analog_get_pll3_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = imx6ul_analog_get_osc_clk(dev) * 20; + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint64_t imx6ul_analog_get_pll2_pfd0_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = 0; + + freq = imx6ul_analog_get_pll2_clk(dev) * 18 + / FIELD_EX32(dev->analog[CCM_ANALOG_PFD_528], + ANALOG_PFD_528, PFD0_FRAC); + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint64_t imx6ul_analog_get_pll2_pfd2_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = 0; + + freq = imx6ul_analog_get_pll2_clk(dev) * 18 + / FIELD_EX32(dev->analog[CCM_ANALOG_PFD_528], + ANALOG_PFD_528, PFD2_FRAC); + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint64_t imx6ul_analog_pll2_bypass_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = 0; + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint64_t imx6ul_ccm_get_periph_clk2_sel_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = 0; + + switch (FIELD_EX32(dev->ccm[CCM_CBCMR], CBCMR, PERIPH_CLK2_SEL)) { + case 0: + freq = imx6ul_analog_get_pll3_clk(dev); + break; + case 1: + freq = imx6ul_analog_get_osc_clk(dev); + break; + case 2: + freq = imx6ul_analog_pll2_bypass_clk(dev); + break; + case 3: + /* We should never get there as 3 is a reserved value */ + qemu_log_mask(LOG_GUEST_ERROR, + "[%s]%s: unsupported PERIPH_CLK2_SEL value 3\n", + TYPE_IMX6UL_CCM, __func__); + /* freq is set to 0 as we don't know what it should be */ + break; + default: + g_assert_not_reached(); + } + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint64_t imx6ul_ccm_get_periph_clk_sel_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = 0; + + switch (FIELD_EX32(dev->ccm[CCM_CBCMR], CBCMR, PRE_PERIPH_CLK_SEL)) { + case 0: + freq = imx6ul_analog_get_pll2_clk(dev); + break; + case 1: + freq = imx6ul_analog_get_pll2_pfd2_clk(dev); + break; + case 2: + freq = imx6ul_analog_get_pll2_pfd0_clk(dev); + break; + case 3: + freq = imx6ul_analog_get_pll2_pfd2_clk(dev) / 2; + break; + default: + g_assert_not_reached(); + } + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint64_t imx6ul_ccm_get_periph_clk2_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = 0; + + freq = imx6ul_ccm_get_periph_clk2_sel_clk(dev) + / (1 + FIELD_EX32(dev->ccm[CCM_CBCDR], CBCDR, PERIPH_CLK2_PODF)); + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint64_t imx6ul_ccm_get_periph_sel_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = 0; + + switch (FIELD_EX32(dev->ccm[CCM_CBCDR], CBCDR, PERIPH_CLK_SEL)) { + case 0: + freq = imx6ul_ccm_get_periph_clk_sel_clk(dev); + break; + case 1: + freq = imx6ul_ccm_get_periph_clk2_clk(dev); + break; + default: + g_assert_not_reached(); + } + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint64_t imx6ul_ccm_get_ahb_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = 0; + + freq = imx6ul_ccm_get_periph_sel_clk(dev) + / (1 + FIELD_EX32(dev->ccm[CCM_CBCDR], CBCDR, AHB_PODF)); + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint64_t imx6ul_ccm_get_ipg_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = 0; + + freq = imx6ul_ccm_get_ahb_clk(dev) + / (1 + FIELD_EX32(dev->ccm[CCM_CBCDR], CBCDR, IPG_PODF)); + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint64_t imx6ul_ccm_get_per_sel_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = 0; + + switch (FIELD_EX32(dev->ccm[CCM_CSCMR1], CSCMR1, PERCLK_CLK_SEL)) { + case 0: + freq = imx6ul_ccm_get_ipg_clk(dev); + break; + case 1: + freq = imx6ul_analog_get_osc_clk(dev); + break; + default: + g_assert_not_reached(); + } + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint64_t imx6ul_ccm_get_per_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = 0; + + freq = imx6ul_ccm_get_per_sel_clk(dev) + / (1 + FIELD_EX32(dev->ccm[CCM_CSCMR1], CSCMR1, PERCLK_PODF)); + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint32_t imx6ul_ccm_get_clock_frequency(IMXCCMState *dev, IMXClk clock) +{ + uint32_t freq = 0; + IMX6ULCCMState *s = IMX6UL_CCM(dev); + + switch (clock) { + case CLK_NONE: + break; + case CLK_IPG: + freq = imx6ul_ccm_get_ipg_clk(s); + break; + case CLK_IPG_HIGH: + freq = imx6ul_ccm_get_per_clk(s); + break; + case CLK_32k: + freq = CKIL_FREQ; + break; + case CLK_HIGH: + freq = CKIH_FREQ; + break; + case CLK_HIGH_DIV: + freq = CKIH_FREQ / 8; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: unsupported clock %d\n", + TYPE_IMX6UL_CCM, __func__, clock); + break; + } + + trace_ccm_clock_freq(clock, freq); + + return freq; +} + +static void imx6ul_ccm_reset(DeviceState *dev) +{ + IMX6ULCCMState *s = IMX6UL_CCM(dev); + + trace_ccm_entry(); + + s->ccm[CCM_CCR] = 0x0401167F; + s->ccm[CCM_CCDR] = 0x00000000; + s->ccm[CCM_CSR] = 0x00000010; + s->ccm[CCM_CCSR] = 0x00000100; + s->ccm[CCM_CACRR] = 0x00000000; + s->ccm[CCM_CBCDR] = 0x00018D00; + s->ccm[CCM_CBCMR] = 0x24860324; + s->ccm[CCM_CSCMR1] = 0x04900080; + s->ccm[CCM_CSCMR2] = 0x03192F06; + s->ccm[CCM_CSCDR1] = 0x00490B00; + s->ccm[CCM_CS1CDR] = 0x0EC102C1; + s->ccm[CCM_CS2CDR] = 0x000336C1; + s->ccm[CCM_CDCDR] = 0x33F71F92; + s->ccm[CCM_CHSCCDR] = 0x000248A4; + s->ccm[CCM_CSCDR2] = 0x00029B48; + s->ccm[CCM_CSCDR3] = 0x00014841; + s->ccm[CCM_CDHIPR] = 0x00000000; + s->ccm[CCM_CTOR] = 0x00000000; + s->ccm[CCM_CLPCR] = 0x00000079; + s->ccm[CCM_CISR] = 0x00000000; + s->ccm[CCM_CIMR] = 0xFFFFFFFF; + s->ccm[CCM_CCOSR] = 0x000A0001; + s->ccm[CCM_CGPR] = 0x0000FE62; + s->ccm[CCM_CCGR0] = 0xFFFFFFFF; + s->ccm[CCM_CCGR1] = 0xFFFFFFFF; + s->ccm[CCM_CCGR2] = 0xFC3FFFFF; + s->ccm[CCM_CCGR3] = 0xFFFFFFFF; + s->ccm[CCM_CCGR4] = 0xFFFFFFFF; + s->ccm[CCM_CCGR5] = 0xFFFFFFFF; + s->ccm[CCM_CCGR6] = 0xFFFFFFFF; + s->ccm[CCM_CMEOR] = 0xFFFFFFFF; + + s->analog[CCM_ANALOG_PLL_ARM] = 0x00013063; + s->analog[CCM_ANALOG_PLL_USB1] = 0x00012000; + s->analog[CCM_ANALOG_PLL_USB2] = 0x00012000; + s->analog[CCM_ANALOG_PLL_SYS] = 0x00013001; + s->analog[CCM_ANALOG_PLL_SYS_SS] = 0x00000000; + s->analog[CCM_ANALOG_PLL_SYS_NUM] = 0x00000000; + s->analog[CCM_ANALOG_PLL_SYS_DENOM] = 0x00000012; + s->analog[CCM_ANALOG_PLL_AUDIO] = 0x00011006; + s->analog[CCM_ANALOG_PLL_AUDIO_NUM] = 0x05F5E100; + s->analog[CCM_ANALOG_PLL_AUDIO_DENOM] = 0x2964619C; + s->analog[CCM_ANALOG_PLL_VIDEO] = 0x0001100C; + s->analog[CCM_ANALOG_PLL_VIDEO_NUM] = 0x05F5E100; + s->analog[CCM_ANALOG_PLL_VIDEO_DENOM] = 0x10A24447; + s->analog[CCM_ANALOG_PLL_ENET] = 0x00011001; + s->analog[CCM_ANALOG_PFD_480] = 0x1311100C; + s->analog[CCM_ANALOG_PFD_528] = 0x1018101B; + + s->analog[PMU_REG_1P1] = 0x00001073; + s->analog[PMU_REG_3P0] = 0x00000F74; + s->analog[PMU_REG_2P5] = 0x00001073; + s->analog[PMU_REG_CORE] = 0x00482012; + s->analog[PMU_MISC0] = 0x04000000; + s->analog[PMU_MISC1] = 0x00000000; + s->analog[PMU_MISC2] = 0x00272727; + s->analog[PMU_LOWPWR_CTRL] = 0x00004009; + + s->analog[USB_ANALOG_USB1_VBUS_DETECT] = 0x01000004; + s->analog[USB_ANALOG_USB1_CHRG_DETECT] = 0x00000000; + s->analog[USB_ANALOG_USB1_VBUS_DETECT_STAT] = 0x00000000; + s->analog[USB_ANALOG_USB1_CHRG_DETECT_STAT] = 0x00000000; + s->analog[USB_ANALOG_USB1_MISC] = 0x00000002; + s->analog[USB_ANALOG_USB2_VBUS_DETECT] = 0x01000004; + s->analog[USB_ANALOG_USB2_CHRG_DETECT] = 0x00000000; + s->analog[USB_ANALOG_USB2_MISC] = 0x00000002; + s->analog[USB_ANALOG_DIGPROG] = 0x00640000; + + /* all PLLs need to be locked */ + s->analog[CCM_ANALOG_PLL_ARM] |= CCM_ANALOG_PLL_LOCK; + s->analog[CCM_ANALOG_PLL_USB1] |= CCM_ANALOG_PLL_LOCK; + s->analog[CCM_ANALOG_PLL_USB2] |= CCM_ANALOG_PLL_LOCK; + s->analog[CCM_ANALOG_PLL_SYS] |= CCM_ANALOG_PLL_LOCK; + s->analog[CCM_ANALOG_PLL_AUDIO] |= CCM_ANALOG_PLL_LOCK; + s->analog[CCM_ANALOG_PLL_VIDEO] |= CCM_ANALOG_PLL_LOCK; + s->analog[CCM_ANALOG_PLL_ENET] |= CCM_ANALOG_PLL_LOCK; + + s->analog[TEMPMON_TEMPSENSE0] = 0x00000001; + s->analog[TEMPMON_TEMPSENSE1] = 0x00000001; + s->analog[TEMPMON_TEMPSENSE2] = 0x00000000; +} + +static uint64_t imx6ul_ccm_read(void *opaque, hwaddr offset, unsigned size) +{ + uint32_t value = 0; + uint32_t index = offset >> 2; + IMX6ULCCMState *s = (IMX6ULCCMState *)opaque; + + assert(index < CCM_MAX); + + value = s->ccm[index]; + + trace_ccm_read_reg(imx6ul_ccm_reg_name(index), (uint32_t)value); + + return (uint64_t)value; +} + +static void imx6ul_ccm_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + uint32_t index = offset >> 2; + IMX6ULCCMState *s = (IMX6ULCCMState *)opaque; + + assert(index < CCM_MAX); + + trace_ccm_write_reg(imx6ul_ccm_reg_name(index), (uint32_t)value); + + /* + * We will do a better implementation later. In particular some bits + * cannot be written to. + */ + s->ccm[index] = (uint32_t)value; +} + +static uint64_t imx6ul_analog_read(void *opaque, hwaddr offset, unsigned size) +{ + uint32_t value; + uint32_t index = offset >> 2; + IMX6ULCCMState *s = (IMX6ULCCMState *)opaque; + + assert(index < CCM_ANALOG_MAX); + + switch (index) { + case CCM_ANALOG_PLL_ARM_SET: + case CCM_ANALOG_PLL_USB1_SET: + case CCM_ANALOG_PLL_USB2_SET: + case CCM_ANALOG_PLL_SYS_SET: + case CCM_ANALOG_PLL_AUDIO_SET: + case CCM_ANALOG_PLL_VIDEO_SET: + case CCM_ANALOG_PLL_ENET_SET: + case CCM_ANALOG_PFD_480_SET: + case CCM_ANALOG_PFD_528_SET: + case CCM_ANALOG_MISC0_SET: + case PMU_MISC1_SET: + case CCM_ANALOG_MISC2_SET: + case USB_ANALOG_USB1_VBUS_DETECT_SET: + case USB_ANALOG_USB1_CHRG_DETECT_SET: + case USB_ANALOG_USB1_MISC_SET: + case USB_ANALOG_USB2_VBUS_DETECT_SET: + case USB_ANALOG_USB2_CHRG_DETECT_SET: + case USB_ANALOG_USB2_MISC_SET: + case TEMPMON_TEMPSENSE0_SET: + case TEMPMON_TEMPSENSE1_SET: + case TEMPMON_TEMPSENSE2_SET: + /* + * All REG_NAME_SET register access are in fact targeting + * the REG_NAME register. + */ + value = s->analog[index - 1]; + break; + case CCM_ANALOG_PLL_ARM_CLR: + case CCM_ANALOG_PLL_USB1_CLR: + case CCM_ANALOG_PLL_USB2_CLR: + case CCM_ANALOG_PLL_SYS_CLR: + case CCM_ANALOG_PLL_AUDIO_CLR: + case CCM_ANALOG_PLL_VIDEO_CLR: + case CCM_ANALOG_PLL_ENET_CLR: + case CCM_ANALOG_PFD_480_CLR: + case CCM_ANALOG_PFD_528_CLR: + case CCM_ANALOG_MISC0_CLR: + case PMU_MISC1_CLR: + case CCM_ANALOG_MISC2_CLR: + case USB_ANALOG_USB1_VBUS_DETECT_CLR: + case USB_ANALOG_USB1_CHRG_DETECT_CLR: + case USB_ANALOG_USB1_MISC_CLR: + case USB_ANALOG_USB2_VBUS_DETECT_CLR: + case USB_ANALOG_USB2_CHRG_DETECT_CLR: + case USB_ANALOG_USB2_MISC_CLR: + case TEMPMON_TEMPSENSE0_CLR: + case TEMPMON_TEMPSENSE1_CLR: + case TEMPMON_TEMPSENSE2_CLR: + /* + * All REG_NAME_CLR register access are in fact targeting + * the REG_NAME register. + */ + value = s->analog[index - 2]; + break; + case CCM_ANALOG_PLL_ARM_TOG: + case CCM_ANALOG_PLL_USB1_TOG: + case CCM_ANALOG_PLL_USB2_TOG: + case CCM_ANALOG_PLL_SYS_TOG: + case CCM_ANALOG_PLL_AUDIO_TOG: + case CCM_ANALOG_PLL_VIDEO_TOG: + case CCM_ANALOG_PLL_ENET_TOG: + case CCM_ANALOG_PFD_480_TOG: + case CCM_ANALOG_PFD_528_TOG: + case CCM_ANALOG_MISC0_TOG: + case PMU_MISC1_TOG: + case CCM_ANALOG_MISC2_TOG: + case USB_ANALOG_USB1_VBUS_DETECT_TOG: + case USB_ANALOG_USB1_CHRG_DETECT_TOG: + case USB_ANALOG_USB1_MISC_TOG: + case USB_ANALOG_USB2_VBUS_DETECT_TOG: + case USB_ANALOG_USB2_CHRG_DETECT_TOG: + case USB_ANALOG_USB2_MISC_TOG: + case TEMPMON_TEMPSENSE0_TOG: + case TEMPMON_TEMPSENSE1_TOG: + case TEMPMON_TEMPSENSE2_TOG: + /* + * All REG_NAME_TOG register access are in fact targeting + * the REG_NAME register. + */ + value = s->analog[index - 3]; + break; + default: + value = s->analog[index]; + break; + } + + trace_ccm_read_reg(imx6ul_analog_reg_name(index), (uint32_t)value); + + return (uint64_t)value; +} + +static void imx6ul_analog_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + uint32_t index = offset >> 2; + IMX6ULCCMState *s = (IMX6ULCCMState *)opaque; + + assert(index < CCM_ANALOG_MAX); + + trace_ccm_write_reg(imx6ul_analog_reg_name(index), (uint32_t)value); + + switch (index) { + case CCM_ANALOG_PLL_ARM_SET: + case CCM_ANALOG_PLL_USB1_SET: + case CCM_ANALOG_PLL_USB2_SET: + case CCM_ANALOG_PLL_SYS_SET: + case CCM_ANALOG_PLL_AUDIO_SET: + case CCM_ANALOG_PLL_VIDEO_SET: + case CCM_ANALOG_PLL_ENET_SET: + case CCM_ANALOG_PFD_480_SET: + case CCM_ANALOG_PFD_528_SET: + case CCM_ANALOG_MISC0_SET: + case PMU_MISC1_SET: + case CCM_ANALOG_MISC2_SET: + case USB_ANALOG_USB1_VBUS_DETECT_SET: + case USB_ANALOG_USB1_CHRG_DETECT_SET: + case USB_ANALOG_USB1_MISC_SET: + case USB_ANALOG_USB2_VBUS_DETECT_SET: + case USB_ANALOG_USB2_CHRG_DETECT_SET: + case USB_ANALOG_USB2_MISC_SET: + /* + * All REG_NAME_SET register access are in fact targeting + * the REG_NAME register. So we change the value of the + * REG_NAME register, setting bits passed in the value. + */ + s->analog[index - 1] |= value; + break; + case CCM_ANALOG_PLL_ARM_CLR: + case CCM_ANALOG_PLL_USB1_CLR: + case CCM_ANALOG_PLL_USB2_CLR: + case CCM_ANALOG_PLL_SYS_CLR: + case CCM_ANALOG_PLL_AUDIO_CLR: + case CCM_ANALOG_PLL_VIDEO_CLR: + case CCM_ANALOG_PLL_ENET_CLR: + case CCM_ANALOG_PFD_480_CLR: + case CCM_ANALOG_PFD_528_CLR: + case CCM_ANALOG_MISC0_CLR: + case PMU_MISC1_CLR: + case CCM_ANALOG_MISC2_CLR: + case USB_ANALOG_USB1_VBUS_DETECT_CLR: + case USB_ANALOG_USB1_CHRG_DETECT_CLR: + case USB_ANALOG_USB1_MISC_CLR: + case USB_ANALOG_USB2_VBUS_DETECT_CLR: + case USB_ANALOG_USB2_CHRG_DETECT_CLR: + case USB_ANALOG_USB2_MISC_CLR: + /* + * All REG_NAME_CLR register access are in fact targeting + * the REG_NAME register. So we change the value of the + * REG_NAME register, unsetting bits passed in the value. + */ + s->analog[index - 2] &= ~value; + break; + case CCM_ANALOG_PLL_ARM_TOG: + case CCM_ANALOG_PLL_USB1_TOG: + case CCM_ANALOG_PLL_USB2_TOG: + case CCM_ANALOG_PLL_SYS_TOG: + case CCM_ANALOG_PLL_AUDIO_TOG: + case CCM_ANALOG_PLL_VIDEO_TOG: + case CCM_ANALOG_PLL_ENET_TOG: + case CCM_ANALOG_PFD_480_TOG: + case CCM_ANALOG_PFD_528_TOG: + case CCM_ANALOG_MISC0_TOG: + case PMU_MISC1_TOG: + case CCM_ANALOG_MISC2_TOG: + case USB_ANALOG_USB1_VBUS_DETECT_TOG: + case USB_ANALOG_USB1_CHRG_DETECT_TOG: + case USB_ANALOG_USB1_MISC_TOG: + case USB_ANALOG_USB2_VBUS_DETECT_TOG: + case USB_ANALOG_USB2_CHRG_DETECT_TOG: + case USB_ANALOG_USB2_MISC_TOG: + /* + * All REG_NAME_TOG register access are in fact targeting + * the REG_NAME register. So we change the value of the + * REG_NAME register, toggling bits passed in the value. + */ + s->analog[index - 3] ^= value; + break; + default: + /* + * We will do a better implementation later. In particular some bits + * cannot be written to. + */ + s->analog[index] = value; + break; + } +} + +static const struct MemoryRegionOps imx6ul_ccm_ops = { + .read = imx6ul_ccm_read, + .write = imx6ul_ccm_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + /* + * Our device would not work correctly if the guest was doing + * unaligned access. This might not be a limitation on the real + * device but in practice there is no reason for a guest to access + * this device unaligned. + */ + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static const struct MemoryRegionOps imx6ul_analog_ops = { + .read = imx6ul_analog_read, + .write = imx6ul_analog_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + /* + * Our device would not work correctly if the guest was doing + * unaligned access. This might not be a limitation on the real + * device but in practice there is no reason for a guest to access + * this device unaligned. + */ + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void imx6ul_ccm_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + SysBusDevice *sd = SYS_BUS_DEVICE(obj); + IMX6ULCCMState *s = IMX6UL_CCM(obj); + + /* initialize a container for the all memory range */ + memory_region_init(&s->container, OBJECT(dev), TYPE_IMX6UL_CCM, 0x8000); + + /* We initialize an IO memory region for the CCM part */ + memory_region_init_io(&s->ioccm, OBJECT(dev), &imx6ul_ccm_ops, s, + TYPE_IMX6UL_CCM ".ccm", CCM_MAX * sizeof(uint32_t)); + + /* Add the CCM as a subregion at offset 0 */ + memory_region_add_subregion(&s->container, 0, &s->ioccm); + + /* We initialize an IO memory region for the ANALOG part */ + memory_region_init_io(&s->ioanalog, OBJECT(dev), &imx6ul_analog_ops, s, + TYPE_IMX6UL_CCM ".analog", + CCM_ANALOG_MAX * sizeof(uint32_t)); + + /* Add the ANALOG as a subregion at offset 0x4000 */ + memory_region_add_subregion(&s->container, 0x4000, &s->ioanalog); + + sysbus_init_mmio(sd, &s->container); +} + +static void imx6ul_ccm_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + IMXCCMClass *ccm = IMX_CCM_CLASS(klass); + + dc->reset = imx6ul_ccm_reset; + dc->vmsd = &vmstate_imx6ul_ccm; + dc->desc = "i.MX6UL Clock Control Module"; + + ccm->get_clock_frequency = imx6ul_ccm_get_clock_frequency; +} + +static const TypeInfo imx6ul_ccm_info = { + .name = TYPE_IMX6UL_CCM, + .parent = TYPE_IMX_CCM, + .instance_size = sizeof(IMX6ULCCMState), + .instance_init = imx6ul_ccm_init, + .class_init = imx6ul_ccm_class_init, +}; + +static void imx6ul_ccm_register_types(void) +{ + type_register_static(&imx6ul_ccm_info); +} + +type_init(imx6ul_ccm_register_types) diff --git a/hw/misc/macio/cuda.c b/hw/misc/macio/cuda.c index 9651ed9744..c4f7a2f39b 100644 --- a/hw/misc/macio/cuda.c +++ b/hw/misc/macio/cuda.c @@ -554,9 +554,8 @@ static void cuda_init(Object *obj) CUDAState *s = CUDA(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - object_initialize(&s->mos6522_cuda, sizeof(s->mos6522_cuda), - TYPE_MOS6522_CUDA); - qdev_set_parent_bus(DEVICE(&s->mos6522_cuda), sysbus_get_default()); + sysbus_init_child_obj(obj, "mos6522-cuda", &s->mos6522_cuda, + sizeof(s->mos6522_cuda), TYPE_MOS6522_CUDA); memory_region_init_io(&s->mem, obj, &mos6522_cuda_ops, s, "cuda", 0x2000); sysbus_init_mmio(sbd, &s->mem); diff --git a/hw/misc/macio/macio.c b/hw/misc/macio/macio.c index d135e3bc2b..52aa3775f4 100644 --- a/hw/misc/macio/macio.c +++ b/hw/misc/macio/macio.c @@ -209,14 +209,11 @@ static void macio_oldworld_realize(PCIDevice *d, Error **errp) static void macio_init_ide(MacIOState *s, MACIOIDEState *ide, size_t ide_size, int index) { - gchar *name; + gchar *name = g_strdup_printf("ide[%i]", index); - object_initialize(ide, ide_size, TYPE_MACIO_IDE); - qdev_set_parent_bus(DEVICE(ide), sysbus_get_default()); + sysbus_init_child_obj(OBJECT(s), name, ide, ide_size, TYPE_MACIO_IDE); memory_region_add_subregion(&s->bar, 0x1f000 + ((index + 1) * 0x1000), &ide->mem); - name = g_strdup_printf("ide[%i]", index); - object_property_add_child(OBJECT(s), name, OBJECT(ide), NULL); g_free(name); } @@ -232,9 +229,7 @@ static void macio_oldworld_init(Object *obj) qdev_prop_allow_set_link_before_realize, 0, NULL); - object_initialize(&s->cuda, sizeof(s->cuda), TYPE_CUDA); - qdev_set_parent_bus(DEVICE(&s->cuda), sysbus_get_default()); - object_property_add_child(obj, "cuda", OBJECT(&s->cuda), NULL); + sysbus_init_child_obj(obj, "cuda", &s->cuda, sizeof(s->cuda), TYPE_CUDA); object_initialize(&os->nvram, sizeof(os->nvram), TYPE_MACIO_NVRAM); dev = DEVICE(&os->nvram); @@ -390,8 +385,8 @@ static void macio_newworld_init(Object *obj) qdev_prop_allow_set_link_before_realize, 0, NULL); - object_initialize(&ns->gpio, sizeof(ns->gpio), TYPE_MACIO_GPIO); - qdev_set_parent_bus(DEVICE(&ns->gpio), sysbus_get_default()); + sysbus_init_child_obj(obj, "gpio", &ns->gpio, sizeof(ns->gpio), + TYPE_MACIO_GPIO); for (i = 0; i < 2; i++) { macio_init_ide(s, &ns->ide[i], sizeof(ns->ide[i]), i); @@ -404,13 +399,10 @@ static void macio_instance_init(Object *obj) memory_region_init(&s->bar, obj, "macio", 0x80000); - object_initialize(&s->dbdma, sizeof(s->dbdma), TYPE_MAC_DBDMA); - qdev_set_parent_bus(DEVICE(&s->dbdma), sysbus_get_default()); - object_property_add_child(obj, "dbdma", OBJECT(&s->dbdma), NULL); + sysbus_init_child_obj(obj, "dbdma", &s->dbdma, sizeof(s->dbdma), + TYPE_MAC_DBDMA); - object_initialize(&s->escc, sizeof(s->escc), TYPE_ESCC); - qdev_set_parent_bus(DEVICE(&s->escc), sysbus_get_default()); - object_property_add_child(obj, "escc", OBJECT(&s->escc), NULL); + sysbus_init_child_obj(obj, "escc", &s->escc, sizeof(s->escc), TYPE_ESCC); } static const VMStateDescription vmstate_macio_oldworld = { diff --git a/hw/misc/macio/pmu.c b/hw/misc/macio/pmu.c index e246b0fd41..d25344f888 100644 --- a/hw/misc/macio/pmu.c +++ b/hw/misc/macio/pmu.c @@ -770,9 +770,8 @@ static void pmu_init(Object *obj) qdev_prop_allow_set_link_before_realize, 0, NULL); - object_initialize(&s->mos6522_pmu, sizeof(s->mos6522_pmu), - TYPE_MOS6522_PMU); - qdev_set_parent_bus(DEVICE(&s->mos6522_pmu), sysbus_get_default()); + sysbus_init_child_obj(obj, "mos6522-pmu", &s->mos6522_pmu, + sizeof(s->mos6522_pmu), TYPE_MOS6522_PMU); memory_region_init_io(&s->mem, obj, &mos6522_pmu_ops, s, "via-pmu", 0x2000); diff --git a/hw/misc/mmio_interface.c b/hw/misc/mmio_interface.c deleted file mode 100644 index 3b0e2039a3..0000000000 --- a/hw/misc/mmio_interface.c +++ /dev/null @@ -1,135 +0,0 @@ -/* - * mmio_interface.c - * - * Copyright (C) 2017 : GreenSocs - * http://www.greensocs.com/ , email: info@greensocs.com - * - * Developed by : - * Frederic Konrad <fred.konrad@greensocs.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option)any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, see <http://www.gnu.org/licenses/>. - * - */ - -#include "qemu/osdep.h" -#include "qemu/log.h" -#include "trace.h" -#include "hw/qdev-properties.h" -#include "hw/misc/mmio_interface.h" -#include "qapi/error.h" - -#ifndef DEBUG_MMIO_INTERFACE -#define DEBUG_MMIO_INTERFACE 0 -#endif - -static uint64_t mmio_interface_counter; - -#define DPRINTF(fmt, ...) do { \ - if (DEBUG_MMIO_INTERFACE) { \ - qemu_log("mmio_interface: 0x%" PRIX64 ": " fmt, s->id, ## __VA_ARGS__);\ - } \ -} while (0) - -static void mmio_interface_init(Object *obj) -{ - MMIOInterface *s = MMIO_INTERFACE(obj); - - if (DEBUG_MMIO_INTERFACE) { - s->id = mmio_interface_counter++; - } - - DPRINTF("interface created\n"); - s->host_ptr = 0; - s->subregion = 0; -} - -static void mmio_interface_realize(DeviceState *dev, Error **errp) -{ - MMIOInterface *s = MMIO_INTERFACE(dev); - - DPRINTF("realize from 0x%" PRIX64 " to 0x%" PRIX64 " map host pointer" - " %p\n", s->start, s->end, s->host_ptr); - - if (!s->host_ptr) { - error_setg(errp, "host_ptr property must be set"); - return; - } - - if (!s->subregion) { - error_setg(errp, "subregion property must be set"); - return; - } - - memory_region_init_ram_ptr(&s->ram_mem, OBJECT(s), "ram", - s->end - s->start + 1, s->host_ptr); - memory_region_set_readonly(&s->ram_mem, s->ro); - memory_region_add_subregion(s->subregion, s->start, &s->ram_mem); -} - -static void mmio_interface_unrealize(DeviceState *dev, Error **errp) -{ - MMIOInterface *s = MMIO_INTERFACE(dev); - - DPRINTF("unrealize from 0x%" PRIX64 " to 0x%" PRIX64 " map host pointer" - " %p\n", s->start, s->end, s->host_ptr); - memory_region_del_subregion(s->subregion, &s->ram_mem); -} - -static void mmio_interface_finalize(Object *obj) -{ - MMIOInterface *s = MMIO_INTERFACE(obj); - - DPRINTF("finalize from 0x%" PRIX64 " to 0x%" PRIX64 " map host pointer" - " %p\n", s->start, s->end, s->host_ptr); - object_unparent(OBJECT(&s->ram_mem)); -} - -static Property mmio_interface_properties[] = { - DEFINE_PROP_UINT64("start", MMIOInterface, start, 0), - DEFINE_PROP_UINT64("end", MMIOInterface, end, 0), - DEFINE_PROP_PTR("host_ptr", MMIOInterface, host_ptr), - DEFINE_PROP_BOOL("ro", MMIOInterface, ro, false), - DEFINE_PROP_MEMORY_REGION("subregion", MMIOInterface, subregion), - DEFINE_PROP_END_OF_LIST(), -}; - -static void mmio_interface_class_init(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - - dc->realize = mmio_interface_realize; - dc->unrealize = mmio_interface_unrealize; - dc->props = mmio_interface_properties; - /* Reason: pointer property "host_ptr", and this device - * is an implementation detail of the memory subsystem, - * not intended to be created directly by the user. - */ - dc->user_creatable = false; -} - -static const TypeInfo mmio_interface_info = { - .name = TYPE_MMIO_INTERFACE, - .parent = TYPE_DEVICE, - .instance_size = sizeof(MMIOInterface), - .instance_init = mmio_interface_init, - .instance_finalize = mmio_interface_finalize, - .class_init = mmio_interface_class_init, -}; - -static void mmio_interface_register_types(void) -{ - type_register_static(&mmio_interface_info); -} - -type_init(mmio_interface_register_types) diff --git a/hw/misc/trace-events b/hw/misc/trace-events index c956e1419b..1341508b33 100644 --- a/hw/misc/trace-events +++ b/hw/misc/trace-events @@ -109,3 +109,10 @@ iotkit_secctl_s_write(uint32_t offset, uint64_t data, unsigned size) "IoTKit Sec iotkit_secctl_ns_read(uint32_t offset, uint64_t data, unsigned size) "IoTKit SecCtl NS regs read: offset 0x%x data 0x%" PRIx64 " size %u" iotkit_secctl_ns_write(uint32_t offset, uint64_t data, unsigned size) "IoTKit SecCtl NS regs write: offset 0x%x data 0x%" PRIx64 " size %u" iotkit_secctl_reset(void) "IoTKit SecCtl: reset" + +# hw/misc/imx6ul_ccm.c +ccm_entry(void) "\n" +ccm_freq(uint32_t freq) "freq = %d\n" +ccm_clock_freq(uint32_t clock, uint32_t freq) "(Clock = %d) = %d\n" +ccm_read_reg(const char *reg_name, uint32_t value) "reg[%s] <= 0x%" PRIx32 "\n" +ccm_write_reg(const char *reg_name, uint32_t value) "reg[%s] => 0x%" PRIx32 "\n" diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c index b23e7f64a8..d79a568f54 100644 --- a/hw/nvram/fw_cfg.c +++ b/hw/nvram/fw_cfg.c @@ -861,7 +861,7 @@ static void fw_cfg_machine_reset(void *opaque) void *ptr; size_t len; FWCfgState *s = opaque; - char *bootindex = get_boot_devices_list(&len, false); + char *bootindex = get_boot_devices_list(&len); ptr = fw_cfg_modify_file(s, "bootorder", (uint8_t *)bootindex, len); g_free(ptr); diff --git a/hw/ppc/ppc440_pcix.c b/hw/ppc/ppc440_pcix.c index d8af04b70f..64ed07afa6 100644 --- a/hw/ppc/ppc440_pcix.c +++ b/hw/ppc/ppc440_pcix.c @@ -57,7 +57,7 @@ typedef struct PPC440PCIXState { struct PLBOutMap pom[PPC440_PCIX_NR_POMS]; struct PLBInMap pim[PPC440_PCIX_NR_PIMS]; uint32_t sts; - qemu_irq irq[PCI_NUM_PINS]; + qemu_irq irq; AddressSpace bm_as; MemoryRegion bm; @@ -418,21 +418,20 @@ static void ppc440_pcix_reset(DeviceState *dev) * This may need further refactoring for other boards. */ static int ppc440_pcix_map_irq(PCIDevice *pci_dev, int irq_num) { - int slot = pci_dev->devfn >> 3; - trace_ppc440_pcix_map_irq(pci_dev->devfn, irq_num, slot); - return slot - 1; + trace_ppc440_pcix_map_irq(pci_dev->devfn, irq_num, 0); + return 0; } static void ppc440_pcix_set_irq(void *opaque, int irq_num, int level) { - qemu_irq *pci_irqs = opaque; + qemu_irq *pci_irq = opaque; trace_ppc440_pcix_set_irq(irq_num); if (irq_num < 0) { error_report("%s: PCI irq %d", __func__, irq_num); return; } - qemu_set_irq(pci_irqs[irq_num], level); + qemu_set_irq(*pci_irq, level); } static AddressSpace *ppc440_pcix_set_iommu(PCIBus *b, void *opaque, int devfn) @@ -471,19 +470,15 @@ static int ppc440_pcix_initfn(SysBusDevice *dev) { PPC440PCIXState *s; PCIHostState *h; - int i; h = PCI_HOST_BRIDGE(dev); s = PPC440_PCIX_HOST_BRIDGE(dev); - for (i = 0; i < ARRAY_SIZE(s->irq); i++) { - sysbus_init_irq(dev, &s->irq[i]); - } - + sysbus_init_irq(dev, &s->irq); memory_region_init(&s->busmem, OBJECT(dev), "pci bus memory", UINT64_MAX); h->bus = pci_register_root_bus(DEVICE(dev), NULL, ppc440_pcix_set_irq, - ppc440_pcix_map_irq, s->irq, &s->busmem, - get_system_io(), PCI_DEVFN(0, 0), 4, TYPE_PCI_BUS); + ppc440_pcix_map_irq, &s->irq, &s->busmem, + get_system_io(), PCI_DEVFN(0, 0), 1, TYPE_PCI_BUS); s->dev = pci_create_simple(h->bus, PCI_DEVFN(0, 0), "ppc4xx-host-bridge"); diff --git a/hw/ppc/sam460ex.c b/hw/ppc/sam460ex.c index 0999efcc1e..9c77183006 100644 --- a/hw/ppc/sam460ex.c +++ b/hw/ppc/sam460ex.c @@ -515,10 +515,8 @@ static void sam460ex_init(MachineState *machine) /* PCI bus */ ppc460ex_pcie_init(env); - /* FIXME: is this correct? */ - dev = sysbus_create_varargs("ppc440-pcix-host", 0xc0ec00000, - uic[1][0], uic[1][20], uic[1][21], uic[1][22], - NULL); + /* All PCI irqs are connected to the same UIC pin (cf. UBoot source) */ + dev = sysbus_create_simple("ppc440-pcix-host", 0xc0ec00000, uic[1][0]); pci_bus = (PCIBus *)qdev_get_child_bus(dev, "pci.0"); if (!pci_bus) { error_report("couldn't create PCI controller!"); diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 421b2dd09b..e5d825374e 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -1160,7 +1160,7 @@ static void spapr_dt_chosen(sPAPRMachineState *spapr, void *fdt) const char *boot_device = machine->boot_order; char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus); size_t cb = 0; - char *bootlist = get_boot_devices_list(&cb, true); + char *bootlist = get_boot_devices_list(&cb); _FDT(chosen = fdt_add_subnode(fdt, 0, "chosen")); @@ -3949,6 +3949,7 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data) InterruptStatsProviderClass *ispc = INTERRUPT_STATS_PROVIDER_CLASS(oc); mc->desc = "pSeries Logical Partition (PAPR compliant)"; + mc->ignore_boot_device_suffixes = true; /* * We set up the default / latest behaviour here. The class_init diff --git a/hw/rdma/Makefile.objs b/hw/rdma/Makefile.objs index 3504c39d21..bd36cbf51c 100644 --- a/hw/rdma/Makefile.objs +++ b/hw/rdma/Makefile.objs @@ -1,4 +1,4 @@ -ifeq ($(CONFIG_RDMA),y) +ifeq ($(CONFIG_PVRDMA),y) obj-$(CONFIG_PCI) += rdma_utils.o rdma_backend.o rdma_rm.o obj-$(CONFIG_PCI) += vmw/pvrdma_dev_ring.o vmw/pvrdma_cmd.o \ vmw/pvrdma_qp_ops.o vmw/pvrdma_main.o diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c index e9ced6f9ef..d7a4bbd91f 100644 --- a/hw/rdma/rdma_backend.c +++ b/hw/rdma/rdma_backend.c @@ -35,6 +35,7 @@ #define VENDOR_ERR_MR_SMALL 0x208 #define THR_NAME_LEN 16 +#define THR_POLL_TO 5000 typedef struct BackendCtx { uint64_t req_id; @@ -91,35 +92,82 @@ static void *comp_handler_thread(void *arg) int rc; struct ibv_cq *ev_cq; void *ev_ctx; + int flags; + GPollFD pfds[1]; + + /* Change to non-blocking mode */ + flags = fcntl(backend_dev->channel->fd, F_GETFL); + rc = fcntl(backend_dev->channel->fd, F_SETFL, flags | O_NONBLOCK); + if (rc < 0) { + pr_dbg("Fail to change to non-blocking mode\n"); + return NULL; + } pr_dbg("Starting\n"); + pfds[0].fd = backend_dev->channel->fd; + pfds[0].events = G_IO_IN | G_IO_HUP | G_IO_ERR; + + backend_dev->comp_thread.is_running = true; + while (backend_dev->comp_thread.run) { - pr_dbg("Waiting for completion on channel %p\n", backend_dev->channel); - rc = ibv_get_cq_event(backend_dev->channel, &ev_cq, &ev_ctx); - pr_dbg("ibv_get_cq_event=%d\n", rc); - if (unlikely(rc)) { - pr_dbg("---> ibv_get_cq_event (%d)\n", rc); - continue; - } + do { + rc = qemu_poll_ns(pfds, 1, THR_POLL_TO * (int64_t)SCALE_MS); + } while (!rc && backend_dev->comp_thread.run); + + if (backend_dev->comp_thread.run) { + pr_dbg("Waiting for completion on channel %p\n", backend_dev->channel); + rc = ibv_get_cq_event(backend_dev->channel, &ev_cq, &ev_ctx); + pr_dbg("ibv_get_cq_event=%d\n", rc); + if (unlikely(rc)) { + pr_dbg("---> ibv_get_cq_event (%d)\n", rc); + continue; + } - rc = ibv_req_notify_cq(ev_cq, 0); - if (unlikely(rc)) { - pr_dbg("Error %d from ibv_req_notify_cq\n", rc); - } + rc = ibv_req_notify_cq(ev_cq, 0); + if (unlikely(rc)) { + pr_dbg("Error %d from ibv_req_notify_cq\n", rc); + } - poll_cq(backend_dev->rdma_dev_res, ev_cq); + poll_cq(backend_dev->rdma_dev_res, ev_cq); - ibv_ack_cq_events(ev_cq, 1); + ibv_ack_cq_events(ev_cq, 1); + } } pr_dbg("Going down\n"); /* TODO: Post cqe for all remaining buffs that were posted */ + backend_dev->comp_thread.is_running = false; + + qemu_thread_exit(0); + return NULL; } +static void stop_backend_thread(RdmaBackendThread *thread) +{ + thread->run = false; + while (thread->is_running) { + pr_dbg("Waiting for thread to complete\n"); + sleep(THR_POLL_TO / SCALE_US / 2); + } +} + +static void start_comp_thread(RdmaBackendDev *backend_dev) +{ + char thread_name[THR_NAME_LEN] = {0}; + + stop_backend_thread(&backend_dev->comp_thread); + + snprintf(thread_name, sizeof(thread_name), "rdma_comp_%s", + ibv_get_device_name(backend_dev->ib_dev)); + backend_dev->comp_thread.run = true; + qemu_thread_create(&backend_dev->comp_thread.thread, thread_name, + comp_handler_thread, backend_dev, QEMU_THREAD_DETACHED); +} + void rdma_backend_register_comp_handler(void (*handler)(int status, unsigned int vendor_err, void *ctx)) { @@ -223,8 +271,7 @@ static int build_host_sge_array(RdmaDeviceResources *rdma_dev_res, return VENDOR_ERR_INVLKEY | ssge[ssge_idx].lkey; } - dsge->addr = (uintptr_t)mr->user_mr.host_virt + ssge[ssge_idx].addr - - mr->user_mr.guest_start; + dsge->addr = (uintptr_t)mr->virt + ssge[ssge_idx].addr - mr->start; dsge->length = ssge[ssge_idx].length; dsge->lkey = rdma_backend_mr_lkey(&mr->backend_mr); @@ -697,7 +744,7 @@ static int init_device_caps(RdmaBackendDev *backend_dev, return 0; } -int rdma_backend_init(RdmaBackendDev *backend_dev, +int rdma_backend_init(RdmaBackendDev *backend_dev, PCIDevice *pdev, RdmaDeviceResources *rdma_dev_res, const char *backend_device_name, uint8_t port_num, uint8_t backend_gid_idx, struct ibv_device_attr *dev_attr, @@ -706,10 +753,13 @@ int rdma_backend_init(RdmaBackendDev *backend_dev, int i; int ret = 0; int num_ibv_devices; - char thread_name[THR_NAME_LEN] = {0}; struct ibv_device **dev_list; struct ibv_port_attr port_attr; + memset(backend_dev, 0, sizeof(*backend_dev)); + + backend_dev->dev = pdev; + backend_dev->backend_gid_idx = backend_gid_idx; backend_dev->port_num = port_num; backend_dev->rdma_dev_res = rdma_dev_res; @@ -800,11 +850,8 @@ int rdma_backend_init(RdmaBackendDev *backend_dev, pr_dbg("interface_id=0x%" PRIx64 "\n", be64_to_cpu(backend_dev->gid.global.interface_id)); - snprintf(thread_name, sizeof(thread_name), "rdma_comp_%s", - ibv_get_device_name(backend_dev->ib_dev)); - backend_dev->comp_thread.run = true; - qemu_thread_create(&backend_dev->comp_thread.thread, thread_name, - comp_handler_thread, backend_dev, QEMU_THREAD_DETACHED); + backend_dev->comp_thread.run = false; + backend_dev->comp_thread.is_running = false; ah_cache_init(); @@ -823,8 +870,22 @@ out: return ret; } + +void rdma_backend_start(RdmaBackendDev *backend_dev) +{ + pr_dbg("Starting rdma_backend\n"); + start_comp_thread(backend_dev); +} + +void rdma_backend_stop(RdmaBackendDev *backend_dev) +{ + pr_dbg("Stopping rdma_backend\n"); + stop_backend_thread(&backend_dev->comp_thread); +} + void rdma_backend_fini(RdmaBackendDev *backend_dev) { + rdma_backend_stop(backend_dev); g_hash_table_destroy(ah_hash); ibv_destroy_comp_channel(backend_dev->channel); ibv_close_device(backend_dev->context); diff --git a/hw/rdma/rdma_backend.h b/hw/rdma/rdma_backend.h index 3cd636dd88..86e8fe8ab6 100644 --- a/hw/rdma/rdma_backend.h +++ b/hw/rdma/rdma_backend.h @@ -46,12 +46,14 @@ static inline uint32_t rdma_backend_mr_rkey(const RdmaBackendMR *mr) return mr->ibmr ? mr->ibmr->rkey : 0; } -int rdma_backend_init(RdmaBackendDev *backend_dev, +int rdma_backend_init(RdmaBackendDev *backend_dev, PCIDevice *pdev, RdmaDeviceResources *rdma_dev_res, const char *backend_device_name, uint8_t port_num, uint8_t backend_gid_idx, struct ibv_device_attr *dev_attr, Error **errp); void rdma_backend_fini(RdmaBackendDev *backend_dev); +void rdma_backend_start(RdmaBackendDev *backend_dev); +void rdma_backend_stop(RdmaBackendDev *backend_dev); void rdma_backend_register_comp_handler(void (*handler)(int status, unsigned int vendor_err, void *ctx)); void rdma_backend_unregister_comp_handler(void); diff --git a/hw/rdma/rdma_backend_defs.h b/hw/rdma/rdma_backend_defs.h index ff5cfc26eb..7404f64002 100644 --- a/hw/rdma/rdma_backend_defs.h +++ b/hw/rdma/rdma_backend_defs.h @@ -24,7 +24,8 @@ typedef struct RdmaDeviceResources RdmaDeviceResources; typedef struct RdmaBackendThread { QemuThread thread; QemuMutex mutex; - bool run; + bool run; /* Set by thread manager to let thread know it should exit */ + bool is_running; /* Set by the thread to report its status */ } RdmaBackendThread; typedef struct RdmaBackendDev { diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c index 415da15efe..8d59a42cd1 100644 --- a/hw/rdma/rdma_rm.c +++ b/hw/rdma/rdma_rm.c @@ -144,8 +144,6 @@ int rdma_rm_alloc_mr(RdmaDeviceResources *dev_res, uint32_t pd_handle, RdmaRmMR *mr; int ret = 0; RdmaRmPD *pd; - void *addr; - size_t length; pd = rdma_rm_get_pd(dev_res, pd_handle); if (!pd) { @@ -158,40 +156,30 @@ int rdma_rm_alloc_mr(RdmaDeviceResources *dev_res, uint32_t pd_handle, pr_dbg("Failed to allocate obj in table\n"); return -ENOMEM; } - - if (!host_virt) { - /* TODO: This is my guess but not so sure that this needs to be - * done */ - length = TARGET_PAGE_SIZE; - addr = g_malloc(length); - } else { - mr->user_mr.host_virt = host_virt; - pr_dbg("host_virt=0x%p\n", mr->user_mr.host_virt); - mr->user_mr.length = guest_length; - pr_dbg("length=%zu\n", guest_length); - mr->user_mr.guest_start = guest_start; - pr_dbg("guest_start=0x%" PRIx64 "\n", mr->user_mr.guest_start); - - length = mr->user_mr.length; - addr = mr->user_mr.host_virt; + pr_dbg("mr_handle=%d\n", *mr_handle); + + pr_dbg("host_virt=0x%p\n", host_virt); + pr_dbg("guest_start=0x%" PRIx64 "\n", guest_start); + pr_dbg("length=%zu\n", guest_length); + + if (host_virt) { + mr->virt = host_virt; + mr->start = guest_start; + mr->length = guest_length; + mr->virt += (mr->start & (TARGET_PAGE_SIZE - 1)); + + ret = rdma_backend_create_mr(&mr->backend_mr, &pd->backend_pd, mr->virt, + mr->length, access_flags); + if (ret) { + pr_dbg("Fail in rdma_backend_create_mr, err=%d\n", ret); + ret = -EIO; + goto out_dealloc_mr; + } } - ret = rdma_backend_create_mr(&mr->backend_mr, &pd->backend_pd, addr, length, - access_flags); - if (ret) { - pr_dbg("Fail in rdma_backend_create_mr, err=%d\n", ret); - ret = -EIO; - goto out_dealloc_mr; - } - - if (!host_virt) { - *lkey = mr->lkey = rdma_backend_mr_lkey(&mr->backend_mr); - *rkey = mr->rkey = rdma_backend_mr_rkey(&mr->backend_mr); - } else { - /* We keep mr_handle in lkey so send and recv get get mr ptr */ - *lkey = *mr_handle; - *rkey = -1; - } + /* We keep mr_handle in lkey so send and recv get get mr ptr */ + *lkey = *mr_handle; + *rkey = -1; mr->pd_handle = pd_handle; @@ -214,7 +202,11 @@ void rdma_rm_dealloc_mr(RdmaDeviceResources *dev_res, uint32_t mr_handle) if (mr) { rdma_backend_destroy_mr(&mr->backend_mr); - munmap(mr->user_mr.host_virt, mr->user_mr.length); + pr_dbg("start=0x%" PRIx64 "\n", mr->start); + if (mr->start) { + mr->virt -= (mr->start & (TARGET_PAGE_SIZE - 1)); + munmap(mr->virt, mr->length); + } res_tbl_dealloc(&dev_res->mr_tbl, mr_handle); } } @@ -399,7 +391,7 @@ int rdma_rm_modify_qp(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, RdmaRmQP *qp; int ret; - pr_dbg("qpn=%d\n", qp_handle); + pr_dbg("qpn=0x%x\n", qp_handle); qp = rdma_rm_get_qp(dev_res, qp_handle); if (!qp) { @@ -457,7 +449,7 @@ int rdma_rm_query_qp(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, { RdmaRmQP *qp; - pr_dbg("qpn=%d\n", qp_handle); + pr_dbg("qpn=0x%x\n", qp_handle); qp = rdma_rm_get_qp(dev_res, qp_handle); if (!qp) { @@ -553,8 +545,9 @@ void rdma_rm_fini(RdmaDeviceResources *dev_res) res_tbl_free(&dev_res->uc_tbl); res_tbl_free(&dev_res->cqe_ctx_tbl); res_tbl_free(&dev_res->qp_tbl); - res_tbl_free(&dev_res->cq_tbl); res_tbl_free(&dev_res->mr_tbl); + res_tbl_free(&dev_res->cq_tbl); res_tbl_free(&dev_res->pd_tbl); + g_hash_table_destroy(dev_res->qp_hash); } diff --git a/hw/rdma/rdma_rm_defs.h b/hw/rdma/rdma_rm_defs.h index 226011176d..7228151239 100644 --- a/hw/rdma/rdma_rm_defs.h +++ b/hw/rdma/rdma_rm_defs.h @@ -55,16 +55,12 @@ typedef struct RdmaRmCQ { bool notify; } RdmaRmCQ; -typedef struct RdmaRmUserMR { - void *host_virt; - uint64_t guest_start; - size_t length; -} RdmaRmUserMR; - /* MR (DMA region) */ typedef struct RdmaRmMR { RdmaBackendMR backend_mr; - RdmaRmUserMR user_mr; + void *virt; + uint64_t start; + size_t length; uint32_t pd_handle; uint32_t lkey; uint32_t rkey; diff --git a/hw/rdma/rdma_utils.c b/hw/rdma/rdma_utils.c index d713f635f1..dc23f158f3 100644 --- a/hw/rdma/rdma_utils.c +++ b/hw/rdma/rdma_utils.c @@ -15,6 +15,10 @@ #include "rdma_utils.h" +#ifdef PVRDMA_DEBUG +unsigned long pr_dbg_cnt; +#endif + void *rdma_pci_dma_map(PCIDevice *dev, dma_addr_t addr, dma_addr_t plen) { void *p; diff --git a/hw/rdma/rdma_utils.h b/hw/rdma/rdma_utils.h index 3dc07891bc..04c7c2ef5b 100644 --- a/hw/rdma/rdma_utils.h +++ b/hw/rdma/rdma_utils.h @@ -22,18 +22,26 @@ #include "sysemu/dma.h" #define pr_info(fmt, ...) \ - fprintf(stdout, "%s: %-20s (%3d): " fmt, "pvrdma", __func__, __LINE__,\ + fprintf(stdout, "%s: %-20s (%3d): " fmt, "rdma", __func__, __LINE__,\ ## __VA_ARGS__) #define pr_err(fmt, ...) \ - fprintf(stderr, "%s: Error at %-20s (%3d): " fmt, "pvrdma", __func__, \ + fprintf(stderr, "%s: Error at %-20s (%3d): " fmt, "rdma", __func__, \ __LINE__, ## __VA_ARGS__) #ifdef PVRDMA_DEBUG +extern unsigned long pr_dbg_cnt; + +#define init_pr_dbg(void) \ +{ \ + pr_dbg_cnt = 0; \ +} + #define pr_dbg(fmt, ...) \ - fprintf(stdout, "%s: %-20s (%3d): " fmt, "pvrdma", __func__, __LINE__,\ - ## __VA_ARGS__) + fprintf(stdout, "%lx %ld: %-20s (%3d): " fmt, pthread_self(), pr_dbg_cnt++, \ + __func__, __LINE__, ## __VA_ARGS__) #else +#define init_pr_dbg(void) #define pr_dbg(fmt, ...) #endif diff --git a/hw/rdma/vmw/pvrdma.h b/hw/rdma/vmw/pvrdma.h index 81e0e0e99c..e2d9f93cdf 100644 --- a/hw/rdma/vmw/pvrdma.h +++ b/hw/rdma/vmw/pvrdma.h @@ -50,6 +50,9 @@ #define PVRDMA_HW_VERSION 17 #define PVRDMA_FW_VERSION 14 +/* Some defaults */ +#define PVRDMA_PKEY 0x7FFF + typedef struct DSRInfo { dma_addr_t dma; struct pvrdma_device_shared_region *dsr; diff --git a/hw/rdma/vmw/pvrdma_cmd.c b/hw/rdma/vmw/pvrdma_cmd.c index 14255d609f..4faeb21631 100644 --- a/hw/rdma/vmw/pvrdma_cmd.c +++ b/hw/rdma/vmw/pvrdma_cmd.c @@ -16,7 +16,6 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" #include "cpu.h" -#include <linux/types.h> #include "hw/hw.h" #include "hw/pci/pci.h" #include "hw/pci/pci_ids.h" @@ -59,6 +58,7 @@ static void *pvrdma_map_to_pdir(PCIDevice *pdev, uint64_t pdir_dma, } host_virt = mremap(curr_page, 0, length, MREMAP_MAYMOVE); + pr_dbg("mremap %p -> %p\n", curr_page, host_virt); if (host_virt == MAP_FAILED) { host_virt = NULL; error_report("PVRDMA: Failed to remap memory for host_virt"); @@ -166,7 +166,7 @@ static int query_pkey(PVRDMADev *dev, union pvrdma_cmd_req *req, resp->hdr.ack = PVRDMA_CMD_QUERY_PKEY_RESP; resp->hdr.err = 0; - resp->pkey = 0x7FFF; + resp->pkey = PVRDMA_PKEY; pr_dbg("pkey=0x%x\n", resp->pkey); return 0; @@ -524,6 +524,7 @@ static int query_qp(PVRDMADev *dev, union pvrdma_cmd_req *req, struct ibv_qp_init_attr init_attr; pr_dbg("qp_handle=%d\n", cmd->qp_handle); + pr_dbg("attr_mask=0x%x\n", cmd->attr_mask); memset(rsp, 0, sizeof(*rsp)); rsp->hdr.response = cmd->hdr.response; @@ -531,8 +532,8 @@ static int query_qp(PVRDMADev *dev, union pvrdma_cmd_req *req, rsp->hdr.err = rdma_rm_query_qp(&dev->rdma_dev_res, &dev->backend_dev, cmd->qp_handle, - (struct ibv_qp_attr *)&resp->attrs, -1, - &init_attr); + (struct ibv_qp_attr *)&resp->attrs, + cmd->attr_mask, &init_attr); pr_dbg("ret=%d\n", rsp->hdr.err); return rsp->hdr.err; diff --git a/hw/rdma/vmw/pvrdma_main.c b/hw/rdma/vmw/pvrdma_main.c index 3ed7409763..ca5fa8d981 100644 --- a/hw/rdma/vmw/pvrdma_main.c +++ b/hw/rdma/vmw/pvrdma_main.c @@ -286,8 +286,78 @@ static void init_ports(PVRDMADev *dev, Error **errp) } } +static void uninit_msix(PCIDevice *pdev, int used_vectors) +{ + PVRDMADev *dev = PVRDMA_DEV(pdev); + int i; + + for (i = 0; i < used_vectors; i++) { + msix_vector_unuse(pdev, i); + } + + msix_uninit(pdev, &dev->msix, &dev->msix); +} + +static int init_msix(PCIDevice *pdev, Error **errp) +{ + PVRDMADev *dev = PVRDMA_DEV(pdev); + int i; + int rc; + + rc = msix_init(pdev, RDMA_MAX_INTRS, &dev->msix, RDMA_MSIX_BAR_IDX, + RDMA_MSIX_TABLE, &dev->msix, RDMA_MSIX_BAR_IDX, + RDMA_MSIX_PBA, 0, NULL); + + if (rc < 0) { + error_setg(errp, "Failed to initialize MSI-X"); + return rc; + } + + for (i = 0; i < RDMA_MAX_INTRS; i++) { + rc = msix_vector_use(PCI_DEVICE(dev), i); + if (rc < 0) { + error_setg(errp, "Fail mark MSI-X vector %d", i); + uninit_msix(pdev, i); + return rc; + } + } + + return 0; +} + +static void pvrdma_fini(PCIDevice *pdev) +{ + PVRDMADev *dev = PVRDMA_DEV(pdev); + + pr_dbg("Closing device %s %x.%x\n", pdev->name, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + + pvrdma_qp_ops_fini(); + + rdma_rm_fini(&dev->rdma_dev_res); + + rdma_backend_fini(&dev->backend_dev); + + free_dsr(dev); + + if (msix_enabled(pdev)) { + uninit_msix(pdev, RDMA_MAX_INTRS); + } +} + +static void pvrdma_stop(PVRDMADev *dev) +{ + rdma_backend_stop(&dev->backend_dev); +} + +static void pvrdma_start(PVRDMADev *dev) +{ + rdma_backend_start(&dev->backend_dev); +} + static void activate_device(PVRDMADev *dev) { + pvrdma_start(dev); set_reg_val(dev, PVRDMA_REG_ERR, 0); pr_dbg("Device activated\n"); } @@ -300,7 +370,10 @@ static int unquiesce_device(PVRDMADev *dev) static int reset_device(PVRDMADev *dev) { + pvrdma_stop(dev); + pr_dbg("Device reset complete\n"); + return 0; } @@ -357,7 +430,7 @@ static void regs_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) reset_device(dev); break; } - break; + break; case PVRDMA_REG_IMR: pr_dbg("Interrupt mask=0x%" PRIx64 "\n", val); dev->interrupt_mask = val; @@ -366,7 +439,7 @@ static void regs_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) if (val == 0) { execute_command(dev); } - break; + break; default: break; } @@ -469,45 +542,6 @@ static void init_regs(PCIDevice *pdev) set_reg_val(dev, PVRDMA_REG_ERR, 0xFFFF); } -static void uninit_msix(PCIDevice *pdev, int used_vectors) -{ - PVRDMADev *dev = PVRDMA_DEV(pdev); - int i; - - for (i = 0; i < used_vectors; i++) { - msix_vector_unuse(pdev, i); - } - - msix_uninit(pdev, &dev->msix, &dev->msix); -} - -static int init_msix(PCIDevice *pdev, Error **errp) -{ - PVRDMADev *dev = PVRDMA_DEV(pdev); - int i; - int rc; - - rc = msix_init(pdev, RDMA_MAX_INTRS, &dev->msix, RDMA_MSIX_BAR_IDX, - RDMA_MSIX_TABLE, &dev->msix, RDMA_MSIX_BAR_IDX, - RDMA_MSIX_PBA, 0, NULL); - - if (rc < 0) { - error_setg(errp, "Failed to initialize MSI-X"); - return rc; - } - - for (i = 0; i < RDMA_MAX_INTRS; i++) { - rc = msix_vector_use(PCI_DEVICE(dev), i); - if (rc < 0) { - error_setg(errp, "Fail mark MSI-X vercor %d", i); - uninit_msix(pdev, i); - return rc; - } - } - - return 0; -} - static void init_dev_caps(PVRDMADev *dev) { size_t pg_tbl_bytes = TARGET_PAGE_SIZE * @@ -543,6 +577,8 @@ static void pvrdma_realize(PCIDevice *pdev, Error **errp) Object *memdev_root; bool ram_shared = false; + init_pr_dbg(); + pr_dbg("Initializing device %s %x.%x\n", pdev->name, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); @@ -575,7 +611,7 @@ static void pvrdma_realize(PCIDevice *pdev, Error **errp) goto out; } - rc = rdma_backend_init(&dev->backend_dev, &dev->rdma_dev_res, + rc = rdma_backend_init(&dev->backend_dev, pdev, &dev->rdma_dev_res, dev->backend_device_name, dev->backend_port_num, dev->backend_gid_idx, &dev->dev_attr, errp); if (rc) { @@ -602,22 +638,7 @@ out: static void pvrdma_exit(PCIDevice *pdev) { - PVRDMADev *dev = PVRDMA_DEV(pdev); - - pr_dbg("Closing device %s %x.%x\n", pdev->name, PCI_SLOT(pdev->devfn), - PCI_FUNC(pdev->devfn)); - - pvrdma_qp_ops_fini(); - - rdma_rm_fini(&dev->rdma_dev_res); - - rdma_backend_fini(&dev->backend_dev); - - free_dsr(dev); - - if (msix_enabled(pdev)) { - uninit_msix(pdev, RDMA_MAX_INTRS); - } + pvrdma_fini(pdev); } static void pvrdma_class_init(ObjectClass *klass, void *data) diff --git a/hw/rdma/vmw/pvrdma_qp_ops.c b/hw/rdma/vmw/pvrdma_qp_ops.c index 99bb51111e..c668afd0ed 100644 --- a/hw/rdma/vmw/pvrdma_qp_ops.c +++ b/hw/rdma/vmw/pvrdma_qp_ops.c @@ -69,6 +69,7 @@ static int pvrdma_post_cqe(PVRDMADev *dev, uint32_t cq_handle, return -EINVAL; } + memset(cqe1, 0, sizeof(*cqe1)); cqe1->wr_id = cqe->wr_id; cqe1->qp = cqe->qp; cqe1->opcode = cqe->opcode; @@ -129,7 +130,7 @@ int pvrdma_qp_send(PVRDMADev *dev, uint32_t qp_handle) PvrdmaSqWqe *wqe; PvrdmaRing *ring; - pr_dbg("qp_handle=%d\n", qp_handle); + pr_dbg("qp_handle=0x%x\n", qp_handle); qp = rdma_rm_get_qp(&dev->rdma_dev_res, qp_handle); if (unlikely(!qp)) { @@ -173,7 +174,7 @@ int pvrdma_qp_recv(PVRDMADev *dev, uint32_t qp_handle) PvrdmaRqWqe *wqe; PvrdmaRing *ring; - pr_dbg("qp_handle=%d\n", qp_handle); + pr_dbg("qp_handle=0x%x\n", qp_handle); qp = rdma_rm_get_qp(&dev->rdma_dev_res, qp_handle); if (unlikely(!qp)) { diff --git a/hw/s390x/3270-ccw.c b/hw/s390x/3270-ccw.c index 3af13ea027..2c8d16ccf7 100644 --- a/hw/s390x/3270-ccw.c +++ b/hw/s390x/3270-ccw.c @@ -98,13 +98,10 @@ static void emulated_ccw_3270_realize(DeviceState *ds, Error **errp) EmulatedCcw3270Class *ck = EMULATED_CCW_3270_GET_CLASS(dev); CcwDevice *cdev = CCW_DEVICE(ds); CCWDeviceClass *cdk = CCW_DEVICE_GET_CLASS(cdev); - DeviceState *parent = DEVICE(cdev); - BusState *qbus = qdev_get_parent_bus(parent); - VirtualCssBus *cbus = VIRTUAL_CSS_BUS(qbus); SubchDev *sch; Error *err = NULL; - sch = css_create_sch(cdev->devno, cbus->squash_mcss, errp); + sch = css_create_sch(cdev->devno, errp); if (!sch) { return; } diff --git a/hw/s390x/css-bridge.c b/hw/s390x/css-bridge.c index a02d708239..1bd6c8b458 100644 --- a/hw/s390x/css-bridge.c +++ b/hw/s390x/css-bridge.c @@ -106,7 +106,6 @@ VirtualCssBus *virtual_css_bus_init(void) /* Create bus on bridge device */ bus = qbus_create(TYPE_VIRTUAL_CSS_BUS, dev, "virtual-css"); cbus = VIRTUAL_CSS_BUS(bus); - cbus->squash_mcss = s390_get_squash_mcss(); /* Enable hotplugging */ qbus_set_hotplug_handler(bus, dev, &error_abort); diff --git a/hw/s390x/css.c b/hw/s390x/css.c index 5424ea4562..5a9fe45ce8 100644 --- a/hw/s390x/css.c +++ b/hw/s390x/css.c @@ -2359,15 +2359,13 @@ const PropertyInfo css_devid_ro_propinfo = { .get = get_css_devid, }; -SubchDev *css_create_sch(CssDevId bus_id, bool squash_mcss, Error **errp) +SubchDev *css_create_sch(CssDevId bus_id, Error **errp) { uint16_t schid = 0; SubchDev *sch; if (bus_id.valid) { - if (squash_mcss) { - bus_id.cssid = channel_subsys.default_cssid; - } else if (!channel_subsys.css[bus_id.cssid]) { + if (!channel_subsys.css[bus_id.cssid]) { css_create_css_image(bus_id.cssid, false); } diff --git a/hw/s390x/s390-ccw.c b/hw/s390x/s390-ccw.c index 214c940593..cad91ee626 100644 --- a/hw/s390x/s390-ccw.c +++ b/hw/s390x/s390-ccw.c @@ -67,8 +67,6 @@ static void s390_ccw_realize(S390CCWDevice *cdev, char *sysfsdev, Error **errp) CcwDevice *ccw_dev = CCW_DEVICE(cdev); CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev); DeviceState *parent = DEVICE(ccw_dev); - BusState *qbus = qdev_get_parent_bus(parent); - VirtualCssBus *cbus = VIRTUAL_CSS_BUS(qbus); SubchDev *sch; int ret; Error *err = NULL; @@ -78,7 +76,7 @@ static void s390_ccw_realize(S390CCWDevice *cdev, char *sysfsdev, Error **errp) goto out_err_propagate; } - sch = css_create_sch(ccw_dev->devno, cbus->squash_mcss, &err); + sch = css_create_sch(ccw_dev->devno, &err); if (!sch) { goto out_mdevid_free; } diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c index 7983185d04..f0f7fdcadd 100644 --- a/hw/s390x/s390-virtio-ccw.c +++ b/hw/s390x/s390-virtio-ccw.c @@ -282,19 +282,8 @@ static void ccw_init(MachineState *machine) virtio_ccw_register_hcalls(); s390_enable_css_support(s390_cpu_addr2state(0)); - /* - * Non mcss-e enabled guests only see the devices from the default - * css, which is determined by the value of the squash_mcss property. - */ - if (css_bus->squash_mcss) { - ret = css_create_css_image(0, true); - } else { - ret = css_create_css_image(VIRTUAL_CSSID, true); - } - if (qemu_opt_get(qemu_get_machine_opts(), "s390-squash-mcss")) { - warn_report("The machine property 's390-squash-mcss' is deprecated" - " (obsoleted by lifting the cssid restrictions)."); - } + + ret = css_create_css_image(VIRTUAL_CSSID, true); assert(ret == 0); if (css_migration_enabled()) { @@ -575,21 +564,6 @@ static void machine_set_loadparm(Object *obj, const char *val, Error **errp) ms->loadparm[i] = ' '; /* pad right with spaces */ } } -static inline bool machine_get_squash_mcss(Object *obj, Error **errp) -{ - S390CcwMachineState *ms = S390_CCW_MACHINE(obj); - - return ms->s390_squash_mcss; -} - -static inline void machine_set_squash_mcss(Object *obj, bool value, - Error **errp) -{ - S390CcwMachineState *ms = S390_CCW_MACHINE(obj); - - ms->s390_squash_mcss = value; -} - static inline void s390_machine_initfn(Object *obj) { object_property_add_bool(obj, "aes-key-wrap", @@ -614,13 +588,6 @@ static inline void s390_machine_initfn(Object *obj) " to upper case) to pass to machine loader, boot manager," " and guest kernel", NULL); - object_property_add_bool(obj, "s390-squash-mcss", - machine_get_squash_mcss, - machine_set_squash_mcss, NULL); - object_property_set_description(obj, "s390-squash-mcss", "(deprecated) " - "enable/disable squashing subchannels into the default css", - NULL); - object_property_set_bool(obj, false, "s390-squash-mcss", NULL); } static const TypeInfo ccw_machine_info = { @@ -673,6 +640,9 @@ bool css_migration_enabled(void) } \ type_init(ccw_machine_register_##suffix) +#define CCW_COMPAT_3_0 \ + HW_COMPAT_3_0 + #define CCW_COMPAT_2_12 \ HW_COMPAT_2_12 @@ -761,14 +731,26 @@ bool css_migration_enabled(void) .value = "0",\ }, +static void ccw_machine_3_1_instance_options(MachineState *machine) +{ +} + +static void ccw_machine_3_1_class_options(MachineClass *mc) +{ +} +DEFINE_CCW_MACHINE(3_1, "3.1", true); + static void ccw_machine_3_0_instance_options(MachineState *machine) { + ccw_machine_3_1_instance_options(machine); } static void ccw_machine_3_0_class_options(MachineClass *mc) { + ccw_machine_3_1_class_options(mc); + SET_MACHINE_COMPAT(mc, CCW_COMPAT_3_0); } -DEFINE_CCW_MACHINE(3_0, "3.0", true); +DEFINE_CCW_MACHINE(3_0, "3.0", false); static void ccw_machine_2_12_instance_options(MachineState *machine) { diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c index 7ddb378d52..2713b7feaa 100644 --- a/hw/s390x/virtio-ccw.c +++ b/hw/s390x/virtio-ccw.c @@ -694,13 +694,10 @@ static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp) VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_GET_CLASS(dev); CcwDevice *ccw_dev = CCW_DEVICE(dev); CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev); - DeviceState *parent = DEVICE(ccw_dev); - BusState *qbus = qdev_get_parent_bus(parent); - VirtualCssBus *cbus = VIRTUAL_CSS_BUS(qbus); SubchDev *sch; Error *err = NULL; - sch = css_create_sch(ccw_dev->devno, cbus->squash_mcss, errp); + sch = css_create_sch(ccw_dev->devno, errp); if (!sch) { return; } diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c index 5bb390773b..5ae7baa082 100644 --- a/hw/scsi/scsi-disk.c +++ b/hw/scsi/scsi-disk.c @@ -2379,7 +2379,6 @@ static void scsi_realize(SCSIDevice *dev, Error **errp) return; } - blkconf_serial(&s->qdev.conf, &s->serial); blkconf_blocksizes(&s->qdev.conf); if (s->qdev.conf.logical_block_size > diff --git a/hw/sd/sdhci-internal.h b/hw/sd/sdhci-internal.h index 756ef3f3c2..19665fd401 100644 --- a/hw/sd/sdhci-internal.h +++ b/hw/sd/sdhci-internal.h @@ -302,4 +302,6 @@ extern const VMStateDescription sdhci_vmstate; #define ESDHC_CTRL_4BITBUS (0x1 << 1) #define ESDHC_CTRL_8BITBUS (0x2 << 1) +#define ESDHC_PRNSTS_SDSTB (1 << 3) + #endif diff --git a/hw/sd/sdhci.c b/hw/sd/sdhci.c index 8f58c31265..81bbf03279 100644 --- a/hw/sd/sdhci.c +++ b/hw/sd/sdhci.c @@ -1651,6 +1651,14 @@ static uint64_t usdhc_read(void *opaque, hwaddr offset, unsigned size) break; + case SDHC_PRNSTS: + /* Add SDSTB (SD Clock Stable) bit to PRNSTS */ + ret = sdhci_read(opaque, offset, size) & ~ESDHC_PRNSTS_SDSTB; + if (s->clkcon & SDHC_CLOCK_INT_STABLE) { + ret |= ESDHC_PRNSTS_SDSTB; + } + break; + case ESDHC_DLL_CTRL: case ESDHC_TUNE_CTRL_STATUS: case ESDHC_UNDOCUMENTED_REG27: diff --git a/hw/sparc/sun4m.c b/hw/sparc/sun4m.c index d981de1841..3c29b68e67 100644 --- a/hw/sparc/sun4m.c +++ b/hw/sparc/sun4m.c @@ -1035,7 +1035,17 @@ static void sun4m_hw_init(const struct sun4m_hwdef *hwdef, ecc_init(hwdef->ecc_base, slavio_irq[28], hwdef->ecc_version); - fw_cfg = fw_cfg_init_mem(CFG_ADDR, CFG_ADDR + 2); + dev = qdev_create(NULL, TYPE_FW_CFG_MEM); + fw_cfg = FW_CFG(dev); + qdev_prop_set_uint32(dev, "data_width", 1); + qdev_prop_set_bit(dev, "dma_enabled", false); + object_property_add_child(OBJECT(qdev_get_machine()), TYPE_FW_CFG, + OBJECT(fw_cfg), NULL); + qdev_init_nofail(dev); + s = SYS_BUS_DEVICE(dev); + sysbus_mmio_map(s, 0, CFG_ADDR); + sysbus_mmio_map(s, 1, CFG_ADDR + 2); + fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)smp_cpus); fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)max_cpus); fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size); diff --git a/hw/sparc64/sun4u.c b/hw/sparc64/sun4u.c index 74b748497e..d16843b30e 100644 --- a/hw/sparc64/sun4u.c +++ b/hw/sparc64/sun4u.c @@ -139,7 +139,7 @@ static uint64_t sun4u_load_kernel(const char *kernel_filename, unsigned int i; long kernel_size; uint8_t *ptr; - uint64_t kernel_top; + uint64_t kernel_top = 0; linux_boot = (kernel_filename != NULL); @@ -172,7 +172,7 @@ static uint64_t sun4u_load_kernel(const char *kernel_filename, } /* load initrd above kernel */ *initrd_size = 0; - if (initrd_filename) { + if (initrd_filename && kernel_top) { *initrd_addr = TARGET_PAGE_ALIGN(kernel_top); *initrd_size = load_image_targphys(initrd_filename, diff --git a/hw/ssi/imx_spi.c b/hw/ssi/imx_spi.c index b66505ca49..02c38c9e47 100644 --- a/hw/ssi/imx_spi.c +++ b/hw/ssi/imx_spi.c @@ -208,8 +208,6 @@ static void imx_spi_flush_txfifo(IMXSPIState *s) } if (s->burst_length <= 0) { - s->regs[ECSPI_CONREG] &= ~ECSPI_CONREG_XCH; - if (!imx_spi_is_multiple_master_burst(s)) { s->regs[ECSPI_STATREG] |= ECSPI_STATREG_TC; break; @@ -219,6 +217,7 @@ static void imx_spi_flush_txfifo(IMXSPIState *s) if (fifo32_is_empty(&s->tx_fifo)) { s->regs[ECSPI_STATREG] |= ECSPI_STATREG_TC; + s->regs[ECSPI_CONREG] &= ~ECSPI_CONREG_XCH; } /* TODO: We should also use TDR and RDR bits */ diff --git a/hw/ssi/xilinx_spips.c b/hw/ssi/xilinx_spips.c index c052bfc4b3..16f88f7402 100644 --- a/hw/ssi/xilinx_spips.c +++ b/hw/ssi/xilinx_spips.c @@ -1031,14 +1031,6 @@ static const MemoryRegionOps spips_ops = { static void xilinx_qspips_invalidate_mmio_ptr(XilinxQSPIPS *q) { - XilinxSPIPS *s = &q->parent_obj; - - if ((q->mmio_execution_enabled) && (q->lqspi_cached_addr != ~0ULL)) { - /* Invalidate the current mapped mmio */ - memory_region_invalidate_mmio_ptr(&s->mmlqspi, q->lqspi_cached_addr, - LQSPI_CACHE_SIZE); - } - q->lqspi_cached_addr = ~0ULL; } @@ -1207,23 +1199,6 @@ static void lqspi_load_cache(void *opaque, hwaddr addr) } } -static void *lqspi_request_mmio_ptr(void *opaque, hwaddr addr, unsigned *size, - unsigned *offset) -{ - XilinxQSPIPS *q = opaque; - hwaddr offset_within_the_region; - - if (!q->mmio_execution_enabled) { - return NULL; - } - - offset_within_the_region = addr & ~(LQSPI_CACHE_SIZE - 1); - lqspi_load_cache(opaque, offset_within_the_region); - *size = LQSPI_CACHE_SIZE; - *offset = offset_within_the_region; - return q->lqspi_buf; -} - static uint64_t lqspi_read(void *opaque, hwaddr addr, unsigned int size) { @@ -1245,7 +1220,6 @@ lqspi_read(void *opaque, hwaddr addr, unsigned int size) static const MemoryRegionOps lqspi_ops = { .read = lqspi_read, - .request_ptr = lqspi_request_mmio_ptr, .endianness = DEVICE_NATIVE_ENDIAN, .valid = { .min_access_size = 1, @@ -1322,15 +1296,6 @@ static void xilinx_qspips_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(sbd, &s->mmlqspi); q->lqspi_cached_addr = ~0ULL; - - /* mmio_execution breaks migration better aborting than having strange - * bugs. - */ - if (q->mmio_execution_enabled) { - error_setg(&q->migration_blocker, - "enabling mmio_execution breaks migration"); - migrate_add_blocker(q->migration_blocker, &error_fatal); - } } static void xlnx_zynqmp_qspips_realize(DeviceState *dev, Error **errp) @@ -1427,16 +1392,6 @@ static Property xilinx_zynqmp_qspips_properties[] = { DEFINE_PROP_END_OF_LIST(), }; -static Property xilinx_qspips_properties[] = { - /* We had to turn this off for 2.10 as it is not compatible with migration. - * It can be enabled but will prevent the device to be migrated. - * This will go aways when a fix will be released. - */ - DEFINE_PROP_BOOL("x-mmio-exec", XilinxQSPIPS, mmio_execution_enabled, - false), - DEFINE_PROP_END_OF_LIST(), -}; - static Property xilinx_spips_properties[] = { DEFINE_PROP_UINT8("num-busses", XilinxSPIPS, num_busses, 1), DEFINE_PROP_UINT8("num-ss-bits", XilinxSPIPS, num_cs, 4), @@ -1450,7 +1405,6 @@ static void xilinx_qspips_class_init(ObjectClass *klass, void * data) XilinxSPIPSClass *xsc = XILINX_SPIPS_CLASS(klass); dc->realize = xilinx_qspips_realize; - dc->props = xilinx_qspips_properties; xsc->reg_ops = &qspips_ops; xsc->rx_fifo_size = RXFF_A_Q; xsc->tx_fifo_size = TXFF_A_Q; diff --git a/hw/timer/m48t59.c b/hw/timer/m48t59.c index f2991762ab..ca3ed445de 100644 --- a/hw/timer/m48t59.c +++ b/hw/timer/m48t59.c @@ -493,66 +493,29 @@ static uint64_t NVRAM_readb(void *opaque, hwaddr addr, unsigned size) return retval; } -static void nvram_writeb (void *opaque, hwaddr addr, uint32_t value) -{ - M48t59State *NVRAM = opaque; - - m48t59_write(NVRAM, addr, value & 0xff); -} - -static void nvram_writew (void *opaque, hwaddr addr, uint32_t value) -{ - M48t59State *NVRAM = opaque; - - m48t59_write(NVRAM, addr, (value >> 8) & 0xff); - m48t59_write(NVRAM, addr + 1, value & 0xff); -} - -static void nvram_writel (void *opaque, hwaddr addr, uint32_t value) -{ - M48t59State *NVRAM = opaque; - - m48t59_write(NVRAM, addr, (value >> 24) & 0xff); - m48t59_write(NVRAM, addr + 1, (value >> 16) & 0xff); - m48t59_write(NVRAM, addr + 2, (value >> 8) & 0xff); - m48t59_write(NVRAM, addr + 3, value & 0xff); -} - -static uint32_t nvram_readb (void *opaque, hwaddr addr) +static uint64_t nvram_read(void *opaque, hwaddr addr, unsigned size) { M48t59State *NVRAM = opaque; return m48t59_read(NVRAM, addr); } -static uint32_t nvram_readw (void *opaque, hwaddr addr) -{ - M48t59State *NVRAM = opaque; - uint32_t retval; - - retval = m48t59_read(NVRAM, addr) << 8; - retval |= m48t59_read(NVRAM, addr + 1); - return retval; -} - -static uint32_t nvram_readl (void *opaque, hwaddr addr) +static void nvram_write(void *opaque, hwaddr addr, uint64_t value, + unsigned size) { M48t59State *NVRAM = opaque; - uint32_t retval; - retval = m48t59_read(NVRAM, addr) << 24; - retval |= m48t59_read(NVRAM, addr + 1) << 16; - retval |= m48t59_read(NVRAM, addr + 2) << 8; - retval |= m48t59_read(NVRAM, addr + 3); - return retval; + return m48t59_write(NVRAM, addr, value); } static const MemoryRegionOps nvram_ops = { - .old_mmio = { - .read = { nvram_readb, nvram_readw, nvram_readl, }, - .write = { nvram_writeb, nvram_writew, nvram_writel, }, - }, - .endianness = DEVICE_NATIVE_ENDIAN, + .read = nvram_read, + .write = nvram_write, + .impl.min_access_size = 1, + .impl.max_access_size = 1, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_BIG_ENDIAN, }; static const VMStateDescription vmstate_m48t59 = { diff --git a/hw/usb/dev-storage.c b/hw/usb/dev-storage.c index 45a9487cdb..cd5551d94f 100644 --- a/hw/usb/dev-storage.c +++ b/hw/usb/dev-storage.c @@ -599,7 +599,6 @@ static void usb_msd_storage_realize(USBDevice *dev, Error **errp) return; } - blkconf_serial(&s->conf, &dev->serial); blkconf_blocksizes(&s->conf); if (!blkconf_apply_backend_options(&s->conf, blk_is_read_only(blk), true, errp)) { diff --git a/hw/vfio/ccw.c b/hw/vfio/ccw.c index 351b305e1a..e96bbdc78b 100644 --- a/hw/vfio/ccw.c +++ b/hw/vfio/ccw.c @@ -349,6 +349,15 @@ static void vfio_ccw_get_device(VFIOGroup *group, VFIOCCWDevice *vcdev, } } + /* + * All vfio-ccw devices are believed to operate in a way compatible with + * memory ballooning, ie. pages pinned in the host are in the current + * working set of the guest driver and therefore never overlap with pages + * available to the guest balloon driver. This needs to be set before + * vfio_get_device() for vfio common to handle the balloon inhibitor. + */ + vcdev->vdev.balloon_allowed = true; + if (vfio_get_device(group, vcdev->cdev.mdevid, &vcdev->vdev, errp)) { goto out_err; } diff --git a/hw/vfio/common.c b/hw/vfio/common.c index fb396cf00a..cd1f4af18a 100644 --- a/hw/vfio/common.c +++ b/hw/vfio/common.c @@ -32,6 +32,7 @@ #include "hw/hw.h" #include "qemu/error-report.h" #include "qemu/range.h" +#include "sysemu/balloon.h" #include "sysemu/kvm.h" #include "trace.h" #include "qapi/error.h" @@ -1044,6 +1045,33 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as, space = vfio_get_address_space(as); + /* + * VFIO is currently incompatible with memory ballooning insofar as the + * madvise to purge (zap) the page from QEMU's address space does not + * interact with the memory API and therefore leaves stale virtual to + * physical mappings in the IOMMU if the page was previously pinned. We + * therefore add a balloon inhibit for each group added to a container, + * whether the container is used individually or shared. This provides + * us with options to allow devices within a group to opt-in and allow + * ballooning, so long as it is done consistently for a group (for instance + * if the device is an mdev device where it is known that the host vendor + * driver will never pin pages outside of the working set of the guest + * driver, which would thus not be ballooning candidates). + * + * The first opportunity to induce pinning occurs here where we attempt to + * attach the group to existing containers within the AddressSpace. If any + * pages are already zapped from the virtual address space, such as from a + * previous ballooning opt-in, new pinning will cause valid mappings to be + * re-established. Likewise, when the overall MemoryListener for a new + * container is registered, a replay of mappings within the AddressSpace + * will occur, re-establishing any previously zapped pages as well. + * + * NB. Balloon inhibiting does not currently block operation of the + * balloon driver or revoke previously pinned pages, it only prevents + * calling madvise to modify the virtual mapping of ballooned pages. + */ + qemu_balloon_inhibit(true); + QLIST_FOREACH(container, &space->containers, next) { if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) { group->container = container; @@ -1232,6 +1260,7 @@ close_fd_exit: close(fd); put_space_exit: + qemu_balloon_inhibit(false); vfio_put_address_space(space); return ret; @@ -1352,6 +1381,9 @@ void vfio_put_group(VFIOGroup *group) return; } + if (!group->balloon_allowed) { + qemu_balloon_inhibit(false); + } vfio_kvm_device_del_group(group); vfio_disconnect_container(group); QLIST_REMOVE(group, next); @@ -1387,6 +1419,25 @@ int vfio_get_device(VFIOGroup *group, const char *name, return ret; } + /* + * Clear the balloon inhibitor for this group if the driver knows the + * device operates compatibly with ballooning. Setting must be consistent + * per group, but since compatibility is really only possible with mdev + * currently, we expect singleton groups. + */ + if (vbasedev->balloon_allowed != group->balloon_allowed) { + if (!QLIST_EMPTY(&group->device_list)) { + error_setg(errp, + "Inconsistent device balloon setting within group"); + return -1; + } + + if (!group->balloon_allowed) { + group->balloon_allowed = true; + qemu_balloon_inhibit(false); + } + } + vbasedev->fd = fd; vbasedev->group = group; QLIST_INSERT_HEAD(&group->device_list, vbasedev, next); diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c index 6cbb8fa054..056f3a887a 100644 --- a/hw/vfio/pci.c +++ b/hw/vfio/pci.c @@ -2804,12 +2804,13 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev); VFIODevice *vbasedev_iter; VFIOGroup *group; - char *tmp, group_path[PATH_MAX], *group_name; + char *tmp, *subsys, group_path[PATH_MAX], *group_name; Error *err = NULL; ssize_t len; struct stat st; int groupid; int i, ret; + bool is_mdev; if (!vdev->vbasedev.sysfsdev) { if (!(~vdev->host.domain || ~vdev->host.bus || @@ -2869,6 +2870,27 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) } } + /* + * Mediated devices *might* operate compatibly with memory ballooning, but + * we cannot know for certain, it depends on whether the mdev vendor driver + * stays in sync with the active working set of the guest driver. Prevent + * the x-balloon-allowed option unless this is minimally an mdev device. + */ + tmp = g_strdup_printf("%s/subsystem", vdev->vbasedev.sysfsdev); + subsys = realpath(tmp, NULL); + g_free(tmp); + is_mdev = (strcmp(subsys, "/sys/bus/mdev") == 0); + free(subsys); + + trace_vfio_mdev(vdev->vbasedev.name, is_mdev); + + if (vdev->vbasedev.balloon_allowed && !is_mdev) { + error_setg(errp, "x-balloon-allowed only potentially compatible " + "with mdev devices"); + vfio_put_group(group); + goto error; + } + ret = vfio_get_device(group, vdev->vbasedev.name, &vdev->vbasedev, errp); if (ret) { vfio_put_group(group); @@ -3170,6 +3192,8 @@ static Property vfio_pci_dev_properties[] = { DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice, features, VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT, false), DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice, vbasedev.no_mmap, false), + DEFINE_PROP_BOOL("x-balloon-allowed", VFIOPCIDevice, + vbasedev.balloon_allowed, false), DEFINE_PROP_BOOL("x-no-kvm-intx", VFIOPCIDevice, no_kvm_intx, false), DEFINE_PROP_BOOL("x-no-kvm-msi", VFIOPCIDevice, no_kvm_msi, false), DEFINE_PROP_BOOL("x-no-kvm-msix", VFIOPCIDevice, no_kvm_msix, false), diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events index d2a74952e3..a85e8662ea 100644 --- a/hw/vfio/trace-events +++ b/hw/vfio/trace-events @@ -39,6 +39,7 @@ vfio_pci_hot_reset_result(const char *name, const char *result) "%s hot reset: % vfio_populate_device_config(const char *name, unsigned long size, unsigned long offset, unsigned long flags) "Device %s config:\n size: 0x%lx, offset: 0x%lx, flags: 0x%lx" vfio_populate_device_get_irq_info_failure(void) "VFIO_DEVICE_GET_IRQ_INFO failure: %m" vfio_realize(const char *name, int group_id) " (%s) group %d" +vfio_mdev(const char *name, bool is_mdev) " (%s) is_mdev %d" vfio_add_ext_cap_dropped(const char *name, uint16_t cap, uint16_t offset) "%s 0x%x@0x%x" vfio_pci_reset(const char *name) " (%s)" vfio_pci_reset_flr(const char *name) "%s FLR/VFIO_DEVICE_RESET" diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index b129cb9ddd..d4cb5894a8 100644 --- a/hw/virtio/vhost.c +++ b/hw/virtio/vhost.c @@ -663,12 +663,14 @@ static void vhost_iommu_region_add(MemoryListener *listener, struct vhost_iommu *iommu; Int128 end; int iommu_idx; - IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr); + IOMMUMemoryRegion *iommu_mr; if (!memory_region_is_iommu(section->mr)) { return; } + iommu_mr = IOMMU_MEMORY_REGION(section->mr); + iommu = g_malloc0(sizeof(*iommu)); end = int128_add(int128_make64(section->offset_within_region), section->size); diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c index 1f7a87f094..b5425080c5 100644 --- a/hw/virtio/virtio-balloon.c +++ b/hw/virtio/virtio-balloon.c @@ -21,7 +21,6 @@ #include "hw/mem/pc-dimm.h" #include "sysemu/balloon.h" #include "hw/virtio/virtio-balloon.h" -#include "sysemu/kvm.h" #include "exec/address-spaces.h" #include "qapi/error.h" #include "qapi/qapi-events-misc.h" @@ -36,8 +35,7 @@ static void balloon_page(void *addr, int deflate) { - if (!qemu_balloon_is_inhibited() && (!kvm_enabled() || - kvm_has_sync_mmu())) { + if (!qemu_balloon_is_inhibited()) { qemu_madvise(addr, BALLOON_PAGE_SIZE, deflate ? QEMU_MADV_WILLNEED : QEMU_MADV_DONTNEED); } diff --git a/hw/watchdog/Makefile.objs b/hw/watchdog/Makefile.objs index 9589bed63a..3f536d1cad 100644 --- a/hw/watchdog/Makefile.objs +++ b/hw/watchdog/Makefile.objs @@ -1,4 +1,5 @@ common-obj-y += watchdog.o +common-obj-$(CONFIG_CMSDK_APB_WATCHDOG) += cmsdk-apb-watchdog.o common-obj-$(CONFIG_WDT_IB6300ESB) += wdt_i6300esb.o common-obj-$(CONFIG_WDT_IB700) += wdt_ib700.o common-obj-$(CONFIG_WDT_DIAG288) += wdt_diag288.o diff --git a/hw/watchdog/cmsdk-apb-watchdog.c b/hw/watchdog/cmsdk-apb-watchdog.c new file mode 100644 index 0000000000..eb79a73fa6 --- /dev/null +++ b/hw/watchdog/cmsdk-apb-watchdog.c @@ -0,0 +1,326 @@ +/* + * ARM CMSDK APB watchdog emulation + * + * Copyright (c) 2018 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +/* + * This is a model of the "APB watchdog" which is part of the Cortex-M + * System Design Kit (CMSDK) and documented in the Cortex-M System + * Design Kit Technical Reference Manual (ARM DDI0479C): + * https://developer.arm.com/products/system-design/system-design-kits/cortex-m-system-design-kit + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "trace.h" +#include "qapi/error.h" +#include "qemu/main-loop.h" +#include "sysemu/watchdog.h" +#include "hw/sysbus.h" +#include "hw/registerfields.h" +#include "hw/watchdog/cmsdk-apb-watchdog.h" + +REG32(WDOGLOAD, 0x0) +REG32(WDOGVALUE, 0x4) +REG32(WDOGCONTROL, 0x8) + FIELD(WDOGCONTROL, INTEN, 0, 1) + FIELD(WDOGCONTROL, RESEN, 1, 1) +#define R_WDOGCONTROL_VALID_MASK (R_WDOGCONTROL_INTEN_MASK | \ + R_WDOGCONTROL_RESEN_MASK) +REG32(WDOGINTCLR, 0xc) +REG32(WDOGRIS, 0x10) + FIELD(WDOGRIS, INT, 0, 1) +REG32(WDOGMIS, 0x14) +REG32(WDOGLOCK, 0xc00) +#define WDOG_UNLOCK_VALUE 0x1ACCE551 +REG32(WDOGITCR, 0xf00) + FIELD(WDOGITCR, ENABLE, 0, 1) +#define R_WDOGITCR_VALID_MASK R_WDOGITCR_ENABLE_MASK +REG32(WDOGITOP, 0xf04) + FIELD(WDOGITOP, WDOGRES, 0, 1) + FIELD(WDOGITOP, WDOGINT, 1, 1) +#define R_WDOGITOP_VALID_MASK (R_WDOGITOP_WDOGRES_MASK | \ + R_WDOGITOP_WDOGINT_MASK) +REG32(PID4, 0xfd0) +REG32(PID5, 0xfd4) +REG32(PID6, 0xfd8) +REG32(PID7, 0xfdc) +REG32(PID0, 0xfe0) +REG32(PID1, 0xfe4) +REG32(PID2, 0xfe8) +REG32(PID3, 0xfec) +REG32(CID0, 0xff0) +REG32(CID1, 0xff4) +REG32(CID2, 0xff8) +REG32(CID3, 0xffc) + +/* PID/CID values */ +static const int watchdog_id[] = { + 0x04, 0x00, 0x00, 0x00, /* PID4..PID7 */ + 0x24, 0xb8, 0x1b, 0x00, /* PID0..PID3 */ + 0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */ +}; + +static bool cmsdk_apb_watchdog_intstatus(CMSDKAPBWatchdog *s) +{ + /* Return masked interrupt status */ + return s->intstatus && (s->control & R_WDOGCONTROL_INTEN_MASK); +} + +static bool cmsdk_apb_watchdog_resetstatus(CMSDKAPBWatchdog *s) +{ + /* Return masked reset status */ + return s->resetstatus && (s->control & R_WDOGCONTROL_RESEN_MASK); +} + +static void cmsdk_apb_watchdog_update(CMSDKAPBWatchdog *s) +{ + bool wdogint; + bool wdogres; + + if (s->itcr) { + wdogint = s->itop & R_WDOGITOP_WDOGINT_MASK; + wdogres = s->itop & R_WDOGITOP_WDOGRES_MASK; + } else { + wdogint = cmsdk_apb_watchdog_intstatus(s); + wdogres = cmsdk_apb_watchdog_resetstatus(s); + } + + qemu_set_irq(s->wdogint, wdogint); + if (wdogres) { + watchdog_perform_action(); + } +} + +static uint64_t cmsdk_apb_watchdog_read(void *opaque, hwaddr offset, + unsigned size) +{ + CMSDKAPBWatchdog *s = CMSDK_APB_WATCHDOG(opaque); + uint64_t r; + + switch (offset) { + case A_WDOGLOAD: + r = ptimer_get_limit(s->timer); + break; + case A_WDOGVALUE: + r = ptimer_get_count(s->timer); + break; + case A_WDOGCONTROL: + r = s->control; + break; + case A_WDOGRIS: + r = s->intstatus; + break; + case A_WDOGMIS: + r = cmsdk_apb_watchdog_intstatus(s); + break; + case A_WDOGLOCK: + r = s->lock; + break; + case A_WDOGITCR: + r = s->itcr; + break; + case A_PID4 ... A_CID3: + r = watchdog_id[(offset - A_PID4) / 4]; + break; + case A_WDOGINTCLR: + case A_WDOGITOP: + qemu_log_mask(LOG_GUEST_ERROR, + "CMSDK APB watchdog read: read of WO offset %x\n", + (int)offset); + r = 0; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "CMSDK APB watchdog read: bad offset %x\n", (int)offset); + r = 0; + break; + } + trace_cmsdk_apb_watchdog_read(offset, r, size); + return r; +} + +static void cmsdk_apb_watchdog_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + CMSDKAPBWatchdog *s = CMSDK_APB_WATCHDOG(opaque); + + trace_cmsdk_apb_watchdog_write(offset, value, size); + + if (s->lock && offset != A_WDOGLOCK) { + /* Write access is disabled via WDOGLOCK */ + qemu_log_mask(LOG_GUEST_ERROR, + "CMSDK APB watchdog write: write to locked watchdog\n"); + return; + } + + switch (offset) { + case A_WDOGLOAD: + /* + * Reset the load value and the current count, and make sure + * we're counting. + */ + ptimer_set_limit(s->timer, value, 1); + ptimer_run(s->timer, 0); + break; + case A_WDOGCONTROL: + s->control = value & R_WDOGCONTROL_VALID_MASK; + cmsdk_apb_watchdog_update(s); + break; + case A_WDOGINTCLR: + s->intstatus = 0; + ptimer_set_count(s->timer, ptimer_get_limit(s->timer)); + cmsdk_apb_watchdog_update(s); + break; + case A_WDOGLOCK: + s->lock = (value != WDOG_UNLOCK_VALUE); + break; + case A_WDOGITCR: + s->itcr = value & R_WDOGITCR_VALID_MASK; + cmsdk_apb_watchdog_update(s); + break; + case A_WDOGITOP: + s->itop = value & R_WDOGITOP_VALID_MASK; + cmsdk_apb_watchdog_update(s); + break; + case A_WDOGVALUE: + case A_WDOGRIS: + case A_WDOGMIS: + case A_PID4 ... A_CID3: + qemu_log_mask(LOG_GUEST_ERROR, + "CMSDK APB watchdog write: write to RO offset 0x%x\n", + (int)offset); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "CMSDK APB watchdog write: bad offset 0x%x\n", + (int)offset); + break; + } +} + +static const MemoryRegionOps cmsdk_apb_watchdog_ops = { + .read = cmsdk_apb_watchdog_read, + .write = cmsdk_apb_watchdog_write, + .endianness = DEVICE_LITTLE_ENDIAN, + /* byte/halfword accesses are just zero-padded on reads and writes */ + .impl.min_access_size = 4, + .impl.max_access_size = 4, + .valid.min_access_size = 1, + .valid.max_access_size = 4, +}; + +static void cmsdk_apb_watchdog_tick(void *opaque) +{ + CMSDKAPBWatchdog *s = CMSDK_APB_WATCHDOG(opaque); + + if (!s->intstatus) { + /* Count expired for the first time: raise interrupt */ + s->intstatus = R_WDOGRIS_INT_MASK; + } else { + /* Count expired for the second time: raise reset and stop clock */ + s->resetstatus = 1; + ptimer_stop(s->timer); + } + cmsdk_apb_watchdog_update(s); +} + +static void cmsdk_apb_watchdog_reset(DeviceState *dev) +{ + CMSDKAPBWatchdog *s = CMSDK_APB_WATCHDOG(dev); + + trace_cmsdk_apb_watchdog_reset(); + s->control = 0; + s->intstatus = 0; + s->lock = 0; + s->itcr = 0; + s->itop = 0; + s->resetstatus = 0; + /* Set the limit and the count */ + ptimer_set_limit(s->timer, 0xffffffff, 1); + ptimer_run(s->timer, 0); +} + +static void cmsdk_apb_watchdog_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + CMSDKAPBWatchdog *s = CMSDK_APB_WATCHDOG(obj); + + memory_region_init_io(&s->iomem, obj, &cmsdk_apb_watchdog_ops, + s, "cmsdk-apb-watchdog", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->wdogint); +} + +static void cmsdk_apb_watchdog_realize(DeviceState *dev, Error **errp) +{ + CMSDKAPBWatchdog *s = CMSDK_APB_WATCHDOG(dev); + QEMUBH *bh; + + if (s->wdogclk_frq == 0) { + error_setg(errp, + "CMSDK APB watchdog: wdogclk-frq property must be set"); + return; + } + + bh = qemu_bh_new(cmsdk_apb_watchdog_tick, s); + s->timer = ptimer_init(bh, + PTIMER_POLICY_WRAP_AFTER_ONE_PERIOD | + PTIMER_POLICY_TRIGGER_ONLY_ON_DECREMENT | + PTIMER_POLICY_NO_IMMEDIATE_RELOAD | + PTIMER_POLICY_NO_COUNTER_ROUND_DOWN); + + ptimer_set_freq(s->timer, s->wdogclk_frq); +} + +static const VMStateDescription cmsdk_apb_watchdog_vmstate = { + .name = "cmsdk-apb-watchdog", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PTIMER(timer, CMSDKAPBWatchdog), + VMSTATE_UINT32(control, CMSDKAPBWatchdog), + VMSTATE_UINT32(intstatus, CMSDKAPBWatchdog), + VMSTATE_UINT32(lock, CMSDKAPBWatchdog), + VMSTATE_UINT32(itcr, CMSDKAPBWatchdog), + VMSTATE_UINT32(itop, CMSDKAPBWatchdog), + VMSTATE_UINT32(resetstatus, CMSDKAPBWatchdog), + VMSTATE_END_OF_LIST() + } +}; + +static Property cmsdk_apb_watchdog_properties[] = { + DEFINE_PROP_UINT32("wdogclk-frq", CMSDKAPBWatchdog, wdogclk_frq, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void cmsdk_apb_watchdog_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = cmsdk_apb_watchdog_realize; + dc->vmsd = &cmsdk_apb_watchdog_vmstate; + dc->reset = cmsdk_apb_watchdog_reset; + dc->props = cmsdk_apb_watchdog_properties; +} + +static const TypeInfo cmsdk_apb_watchdog_info = { + .name = TYPE_CMSDK_APB_WATCHDOG, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(CMSDKAPBWatchdog), + .instance_init = cmsdk_apb_watchdog_init, + .class_init = cmsdk_apb_watchdog_class_init, +}; + +static void cmsdk_apb_watchdog_register_types(void) +{ + type_register_static(&cmsdk_apb_watchdog_info); +} + +type_init(cmsdk_apb_watchdog_register_types); diff --git a/hw/watchdog/trace-events b/hw/watchdog/trace-events new file mode 100644 index 0000000000..fee95847df --- /dev/null +++ b/hw/watchdog/trace-events @@ -0,0 +1,6 @@ +# See docs/devel/tracing.txt for syntax documentation. + +# hw/char/cmsdk_apb_watchdog.c +cmsdk_apb_watchdog_read(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB watchdog read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +cmsdk_apb_watchdog_write(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB watchdog write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +cmsdk_apb_watchdog_reset(void) "CMSDK APB watchdog: reset" |