diff options
143 files changed, 5307 insertions, 3102 deletions
diff --git a/Makefile b/Makefile index bfc4b2c8e9..676a4a54f4 100644 --- a/Makefile +++ b/Makefile @@ -283,6 +283,13 @@ include $(SRC_PATH)/tests/vm/Makefile.include print-help-run = printf " %-30s - %s\\n" "$1" "$2" print-help = @$(call print-help-run,$1,$2) +.PHONY: update-linux-vdso +update-linux-vdso: + @for m in $(SRC_PATH)/linux-user/*/Makefile.vdso; do \ + $(MAKE) $(SUBDIR_MAKEFLAGS) -C $$(dirname $$m) -f Makefile.vdso \ + SRC_PATH=$(SRC_PATH) BUILD_DIR=$(BUILD_DIR); \ + done + .PHONY: help help: @echo 'Generic targets:' @@ -303,6 +310,9 @@ endif $(call print-help,distclean,Remove all generated files) $(call print-help,dist,Build a distributable tarball) @echo '' + @echo 'Linux-user targets:' + $(call print-help,update-linux-vdso,Build linux-user vdso images) + @echo '' @echo 'Test targets:' $(call print-help,check,Run all tests (check-help for details)) $(call print-help,bench,Run all benchmarks) diff --git a/bsd-user/arm/target_arch.h b/bsd-user/arm/target_arch.h index 561934bbd2..d80cb85c64 100644 --- a/bsd-user/arm/target_arch.h +++ b/bsd-user/arm/target_arch.h @@ -21,6 +21,7 @@ #define TARGET_ARCH_H #include "qemu.h" +#include "target/arm/cpu-features.h" void target_cpu_set_tls(CPUARMState *env, target_ulong newtls); target_ulong target_cpu_get_tls(CPUARMState *env); diff --git a/docs/system/arm/virt.rst b/docs/system/arm/virt.rst index e1697ac8f4..7c4c80180c 100644 --- a/docs/system/arm/virt.rst +++ b/docs/system/arm/virt.rst @@ -63,6 +63,7 @@ Supported guest CPU types: - ``host`` (with KVM only) - ``neoverse-n1`` (64-bit) - ``neoverse-v1`` (64-bit) +- ``neoverse-n2`` (64-bit) - ``max`` (same as ``host`` for KVM; best possible emulation with TCG) Note that the default is ``cortex-a15``, so for an AArch64 guest you must diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c index 1f78e18872..d10abb36a8 100644 --- a/hw/arm/armv7m.c +++ b/hw/arm/armv7m.c @@ -21,6 +21,7 @@ #include "qemu/module.h" #include "qemu/log.h" #include "target/arm/idau.h" +#include "target/arm/cpu-features.h" #include "migration/vmstate.h" /* Bitbanded IO. Each word corresponds to a single bit. */ diff --git a/hw/arm/bananapi_m2u.c b/hw/arm/bananapi_m2u.c index 74121d8966..8f24b18d8c 100644 --- a/hw/arm/bananapi_m2u.c +++ b/hw/arm/bananapi_m2u.c @@ -26,6 +26,7 @@ #include "hw/i2c/i2c.h" #include "hw/qdev-properties.h" #include "hw/arm/allwinner-r40.h" +#include "hw/arm/boot.h" static struct arm_boot_info bpim2u_binfo; @@ -127,7 +128,7 @@ static void bpim2u_init(MachineState *machine) bpim2u_binfo.loader_start = r40->memmap[AW_R40_DEV_SDRAM]; bpim2u_binfo.ram_size = machine->ram_size; bpim2u_binfo.psci_conduit = QEMU_PSCI_CONDUIT_SMC; - arm_load_kernel(ARM_CPU(first_cpu), machine, &bpim2u_binfo); + arm_load_kernel(&r40->cpus[0], machine, &bpim2u_binfo); } static void bpim2u_machine_init(MachineClass *mc) diff --git a/hw/arm/cubieboard.c b/hw/arm/cubieboard.c index 8c7fa91529..29146f5018 100644 --- a/hw/arm/cubieboard.c +++ b/hw/arm/cubieboard.c @@ -21,6 +21,7 @@ #include "hw/boards.h" #include "hw/qdev-properties.h" #include "hw/arm/allwinner-a10.h" +#include "hw/arm/boot.h" #include "hw/i2c/i2c.h" static struct arm_boot_info cubieboard_binfo = { diff --git a/hw/arm/exynos4_boards.c b/hw/arm/exynos4_boards.c index ef5bcbc212..b0e13eb4f0 100644 --- a/hw/arm/exynos4_boards.c +++ b/hw/arm/exynos4_boards.c @@ -134,9 +134,10 @@ exynos4_boards_init_common(MachineState *machine, static void nuri_init(MachineState *machine) { - exynos4_boards_init_common(machine, EXYNOS4_BOARD_NURI); + Exynos4BoardState *s = exynos4_boards_init_common(machine, + EXYNOS4_BOARD_NURI); - arm_load_kernel(ARM_CPU(first_cpu), machine, &exynos4_board_binfo); + arm_load_kernel(s->soc.cpu[0], machine, &exynos4_board_binfo); } static void smdkc210_init(MachineState *machine) @@ -146,7 +147,7 @@ static void smdkc210_init(MachineState *machine) lan9215_init(SMDK_LAN9118_BASE_ADDR, qemu_irq_invert(s->soc.irq_table[exynos4210_get_irq(37, 1)])); - arm_load_kernel(ARM_CPU(first_cpu), machine, &exynos4_board_binfo); + arm_load_kernel(s->soc.cpu[0], machine, &exynos4_board_binfo); } static void nuri_class_init(ObjectClass *oc, void *data) diff --git a/hw/arm/imx25_pdk.c b/hw/arm/imx25_pdk.c index b4f7f4e8a7..7dfddd49e2 100644 --- a/hw/arm/imx25_pdk.c +++ b/hw/arm/imx25_pdk.c @@ -27,6 +27,7 @@ #include "qapi/error.h" #include "hw/qdev-properties.h" #include "hw/arm/fsl-imx25.h" +#include "hw/arm/boot.h" #include "hw/boards.h" #include "qemu/error-report.h" #include "sysemu/qtest.h" diff --git a/hw/arm/kzm.c b/hw/arm/kzm.c index b1b281c9ac..9be91ebeaa 100644 --- a/hw/arm/kzm.c +++ b/hw/arm/kzm.c @@ -16,6 +16,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "hw/arm/fsl-imx31.h" +#include "hw/arm/boot.h" #include "hw/boards.h" #include "qemu/error-report.h" #include "exec/address-spaces.h" diff --git a/hw/arm/mcimx6ul-evk.c b/hw/arm/mcimx6ul-evk.c index 3ac1e2ea9b..500427e94b 100644 --- a/hw/arm/mcimx6ul-evk.c +++ b/hw/arm/mcimx6ul-evk.c @@ -13,6 +13,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "hw/arm/fsl-imx6ul.h" +#include "hw/arm/boot.h" #include "hw/boards.h" #include "hw/qdev-properties.h" #include "qemu/error-report.h" diff --git a/hw/arm/mcimx7d-sabre.c b/hw/arm/mcimx7d-sabre.c index d1778122b6..693a1023b6 100644 --- a/hw/arm/mcimx7d-sabre.c +++ b/hw/arm/mcimx7d-sabre.c @@ -15,6 +15,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "hw/arm/fsl-imx7.h" +#include "hw/arm/boot.h" #include "hw/boards.h" #include "hw/qdev-properties.h" #include "qemu/error-report.h" diff --git a/hw/arm/orangepi.c b/hw/arm/orangepi.c index 10653361ed..f3784d45ca 100644 --- a/hw/arm/orangepi.c +++ b/hw/arm/orangepi.c @@ -25,6 +25,7 @@ #include "hw/boards.h" #include "hw/qdev-properties.h" #include "hw/arm/allwinner-h3.h" +#include "hw/arm/boot.h" static struct arm_boot_info orangepi_binfo; @@ -105,7 +106,7 @@ static void orangepi_init(MachineState *machine) orangepi_binfo.loader_start = h3->memmap[AW_H3_DEV_SDRAM]; orangepi_binfo.ram_size = machine->ram_size; orangepi_binfo.psci_conduit = QEMU_PSCI_CONDUIT_SMC; - arm_load_kernel(ARM_CPU(first_cpu), machine, &orangepi_binfo); + arm_load_kernel(&h3->cpus[0], machine, &orangepi_binfo); } static void orangepi_machine_init(MachineClass *mc) diff --git a/hw/arm/pxa2xx.c b/hw/arm/pxa2xx.c index 07d5dd8691..f0bf407e66 100644 --- a/hw/arm/pxa2xx.c +++ b/hw/arm/pxa2xx.c @@ -1513,14 +1513,15 @@ PXA2xxI2CState *pxa2xx_i2c_init(hwaddr base, qdev_prop_set_uint32(dev, "size", region_size + 1); qdev_prop_set_uint32(dev, "offset", base & region_size); + /* FIXME: Should the slave device really be on a separate bus? */ + i2cbus = i2c_init_bus(dev, "dummy"); + i2c_dev = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(i2c_dev, &error_fatal); sysbus_mmio_map(i2c_dev, 0, base & ~region_size); sysbus_connect_irq(i2c_dev, 0, irq); s = PXA2XX_I2C(i2c_dev); - /* FIXME: Should the slave device really be on a separate bus? */ - i2cbus = i2c_init_bus(dev, "dummy"); s->slave = PXA2XX_I2C_SLAVE(i2c_slave_create_simple(i2cbus, TYPE_PXA2XX_I2C_SLAVE, 0)); @@ -2205,8 +2206,10 @@ PXA2xxState *pxa270_init(unsigned int sdram_size, const char *cpu_type) sysbus_create_simple("sysbus-ohci", 0x4c000000, qdev_get_gpio_in(s->pic, PXA2XX_PIC_USBH1)); - s->pcmcia[0] = pxa2xx_pcmcia_init(address_space, 0x20000000); - s->pcmcia[1] = pxa2xx_pcmcia_init(address_space, 0x30000000); + s->pcmcia[0] = PXA2XX_PCMCIA(sysbus_create_simple(TYPE_PXA2XX_PCMCIA, + 0x20000000, NULL)); + s->pcmcia[1] = PXA2XX_PCMCIA(sysbus_create_simple(TYPE_PXA2XX_PCMCIA, + 0x30000000, NULL)); sysbus_create_simple(TYPE_PXA2XX_RTC, 0x40900000, qdev_get_gpio_in(s->pic, PXA2XX_PIC_RTCALARM)); @@ -2338,8 +2341,10 @@ PXA2xxState *pxa255_init(unsigned int sdram_size) s->ssp[i] = (SSIBus *)qdev_get_child_bus(dev, "ssi"); } - s->pcmcia[0] = pxa2xx_pcmcia_init(address_space, 0x20000000); - s->pcmcia[1] = pxa2xx_pcmcia_init(address_space, 0x30000000); + s->pcmcia[0] = PXA2XX_PCMCIA(sysbus_create_simple(TYPE_PXA2XX_PCMCIA, + 0x20000000, NULL)); + s->pcmcia[1] = PXA2XX_PCMCIA(sysbus_create_simple(TYPE_PXA2XX_PCMCIA, + 0x30000000, NULL)); sysbus_create_simple(TYPE_PXA2XX_RTC, 0x40900000, qdev_get_gpio_in(s->pic, PXA2XX_PIC_RTCALARM)); diff --git a/hw/arm/pxa2xx_pic.c b/hw/arm/pxa2xx_pic.c index 47132ab982..1373a0d275 100644 --- a/hw/arm/pxa2xx_pic.c +++ b/hw/arm/pxa2xx_pic.c @@ -15,6 +15,7 @@ #include "cpu.h" #include "hw/arm/pxa.h" #include "hw/sysbus.h" +#include "hw/qdev-properties.h" #include "migration/vmstate.h" #include "qom/object.h" #include "target/arm/cpregs.h" @@ -271,12 +272,9 @@ static int pxa2xx_pic_post_load(void *opaque, int version_id) return 0; } -DeviceState *pxa2xx_pic_init(hwaddr base, ARMCPU *cpu) +static void pxa2xx_pic_reset_hold(Object *obj) { - DeviceState *dev = qdev_new(TYPE_PXA2XX_PIC); - PXA2xxPICState *s = PXA2XX_PIC(dev); - - s->cpu = cpu; + PXA2xxPICState *s = PXA2XX_PIC(obj); s->int_pending[0] = 0; s->int_pending[1] = 0; @@ -284,8 +282,23 @@ DeviceState *pxa2xx_pic_init(hwaddr base, ARMCPU *cpu) s->int_enabled[1] = 0; s->is_fiq[0] = 0; s->is_fiq[1] = 0; +} +DeviceState *pxa2xx_pic_init(hwaddr base, ARMCPU *cpu) +{ + DeviceState *dev = qdev_new(TYPE_PXA2XX_PIC); + + object_property_set_link(OBJECT(dev), "arm-cpu", + OBJECT(cpu), &error_abort); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); + + return dev; +} + +static void pxa2xx_pic_realize(DeviceState *dev, Error **errp) +{ + PXA2xxPICState *s = PXA2XX_PIC(dev); qdev_init_gpio_in(dev, pxa2xx_pic_set_irq, PXA2XX_PIC_SRCS); @@ -293,12 +306,9 @@ DeviceState *pxa2xx_pic_init(hwaddr base, ARMCPU *cpu) memory_region_init_io(&s->iomem, OBJECT(s), &pxa2xx_pic_ops, s, "pxa2xx-pic", 0x00100000); sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); - sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); /* Enable IC coprocessor access. */ - define_arm_cp_regs_with_opaque(cpu, pxa_pic_cp_reginfo, s); - - return dev; + define_arm_cp_regs_with_opaque(s->cpu, pxa_pic_cp_reginfo, s); } static const VMStateDescription vmstate_pxa2xx_pic_regs = { @@ -316,12 +326,22 @@ static const VMStateDescription vmstate_pxa2xx_pic_regs = { }, }; +static Property pxa2xx_pic_properties[] = { + DEFINE_PROP_LINK("arm-cpu", PXA2xxPICState, cpu, + TYPE_ARM_CPU, ARMCPU *), + DEFINE_PROP_END_OF_LIST(), +}; + static void pxa2xx_pic_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + device_class_set_props(dc, pxa2xx_pic_properties); + dc->realize = pxa2xx_pic_realize; dc->desc = "PXA2xx PIC"; dc->vmsd = &vmstate_pxa2xx_pic_regs; + rc->phases.hold = pxa2xx_pic_reset_hold; } static const TypeInfo pxa2xx_pic_info = { diff --git a/hw/arm/realview.c b/hw/arm/realview.c index 8f89526596..132217b2ed 100644 --- a/hw/arm/realview.c +++ b/hw/arm/realview.c @@ -384,7 +384,7 @@ static void realview_init(MachineState *machine, realview_binfo.ram_size = ram_size; realview_binfo.board_id = realview_board_id[board_type]; realview_binfo.loader_start = (board_type == BOARD_PB_A8 ? 0x70000000 : 0); - arm_load_kernel(ARM_CPU(first_cpu), machine, &realview_binfo); + arm_load_kernel(cpu, machine, &realview_binfo); } static void realview_eb_init(MachineState *machine) diff --git a/hw/arm/sabrelite.c b/hw/arm/sabrelite.c index 41191245b8..56f184b9ae 100644 --- a/hw/arm/sabrelite.c +++ b/hw/arm/sabrelite.c @@ -13,6 +13,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "hw/arm/fsl-imx6.h" +#include "hw/arm/boot.h" #include "hw/boards.h" #include "hw/qdev-properties.h" #include "qemu/error-report.h" diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c index e8a82618f0..bce44690e5 100644 --- a/hw/arm/sbsa-ref.c +++ b/hw/arm/sbsa-ref.c @@ -149,6 +149,7 @@ static const char * const valid_cpus[] = { ARM_CPU_TYPE_NAME("cortex-a72"), ARM_CPU_TYPE_NAME("neoverse-n1"), ARM_CPU_TYPE_NAME("neoverse-v1"), + ARM_CPU_TYPE_NAME("neoverse-n2"), ARM_CPU_TYPE_NAME("max"), }; diff --git a/hw/arm/virt.c b/hw/arm/virt.c index 529f1c089c..92085d2d8f 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -215,6 +215,7 @@ static const char *valid_cpus[] = { ARM_CPU_TYPE_NAME("a64fx"), ARM_CPU_TYPE_NAME("neoverse-n1"), ARM_CPU_TYPE_NAME("neoverse-v1"), + ARM_CPU_TYPE_NAME("neoverse-n2"), #endif ARM_CPU_TYPE_NAME("cortex-a53"), ARM_CPU_TYPE_NAME("cortex-a57"), diff --git a/hw/arm/xilinx_zynq.c b/hw/arm/xilinx_zynq.c index 8dc2ea83a9..dbb9793aa1 100644 --- a/hw/arm/xilinx_zynq.c +++ b/hw/arm/xilinx_zynq.c @@ -349,7 +349,7 @@ static void zynq_init(MachineState *machine) zynq_binfo.board_setup_addr = BOARD_SETUP_ADDR; zynq_binfo.write_board_setup = zynq_write_board_setup; - arm_load_kernel(ARM_CPU(first_cpu), machine, &zynq_binfo); + arm_load_kernel(cpu, machine, &zynq_binfo); } static void zynq_machine_class_init(ObjectClass *oc, void *data) diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c index 88c561ff63..537118224f 100644 --- a/hw/arm/xlnx-versal-virt.c +++ b/hw/arm/xlnx-versal-virt.c @@ -19,6 +19,7 @@ #include "cpu.h" #include "hw/qdev-properties.h" #include "hw/arm/xlnx-versal.h" +#include "hw/arm/boot.h" #include "qom/object.h" #define TYPE_XLNX_VERSAL_VIRT_MACHINE MACHINE_TYPE_NAME("xlnx-versal-virt") diff --git a/hw/arm/xlnx-zcu102.c b/hw/arm/xlnx-zcu102.c index c5a07cfe19..4667cb333c 100644 --- a/hw/arm/xlnx-zcu102.c +++ b/hw/arm/xlnx-zcu102.c @@ -18,6 +18,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "hw/arm/xlnx-zynqmp.h" +#include "hw/arm/boot.h" #include "hw/boards.h" #include "qemu/error-report.h" #include "qemu/log.h" diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c index 03b6b8c986..942be7bd11 100644 --- a/hw/intc/armv7m_nvic.c +++ b/hw/intc/armv7m_nvic.c @@ -21,6 +21,7 @@ #include "sysemu/tcg.h" #include "sysemu/runstate.h" #include "target/arm/cpu.h" +#include "target/arm/cpu-features.h" #include "exec/exec-all.h" #include "exec/memop.h" #include "qemu/log.h" diff --git a/hw/misc/led.c b/hw/misc/led.c index f6d6d68bce..42bb43a39a 100644 --- a/hw/misc/led.c +++ b/hw/misc/led.c @@ -63,7 +63,7 @@ static void led_set_state_gpio_handler(void *opaque, int line, int new_state) LEDState *s = LED(opaque); assert(line == 0); - led_set_state(s, !!new_state != s->gpio_active_high); + led_set_state(s, !!new_state == s->gpio_active_high); } static void led_reset(DeviceState *dev) diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c index 37e209cda6..5b989f5b52 100644 --- a/hw/net/cadence_gem.c +++ b/hw/net/cadence_gem.c @@ -28,6 +28,7 @@ #include "hw/irq.h" #include "hw/net/cadence_gem.h" #include "hw/qdev-properties.h" +#include "hw/registerfields.h" #include "migration/vmstate.h" #include "qapi/error.h" #include "qemu/log.h" @@ -44,210 +45,310 @@ } \ } while (0) -#define GEM_NWCTRL (0x00000000 / 4) /* Network Control reg */ -#define GEM_NWCFG (0x00000004 / 4) /* Network Config reg */ -#define GEM_NWSTATUS (0x00000008 / 4) /* Network Status reg */ -#define GEM_USERIO (0x0000000C / 4) /* User IO reg */ -#define GEM_DMACFG (0x00000010 / 4) /* DMA Control reg */ -#define GEM_TXSTATUS (0x00000014 / 4) /* TX Status reg */ -#define GEM_RXQBASE (0x00000018 / 4) /* RX Q Base address reg */ -#define GEM_TXQBASE (0x0000001C / 4) /* TX Q Base address reg */ -#define GEM_RXSTATUS (0x00000020 / 4) /* RX Status reg */ -#define GEM_ISR (0x00000024 / 4) /* Interrupt Status reg */ -#define GEM_IER (0x00000028 / 4) /* Interrupt Enable reg */ -#define GEM_IDR (0x0000002C / 4) /* Interrupt Disable reg */ -#define GEM_IMR (0x00000030 / 4) /* Interrupt Mask reg */ -#define GEM_PHYMNTNC (0x00000034 / 4) /* Phy Maintenance reg */ -#define GEM_RXPAUSE (0x00000038 / 4) /* RX Pause Time reg */ -#define GEM_TXPAUSE (0x0000003C / 4) /* TX Pause Time reg */ -#define GEM_TXPARTIALSF (0x00000040 / 4) /* TX Partial Store and Forward */ -#define GEM_RXPARTIALSF (0x00000044 / 4) /* RX Partial Store and Forward */ -#define GEM_JUMBO_MAX_LEN (0x00000048 / 4) /* Max Jumbo Frame Size */ -#define GEM_HASHLO (0x00000080 / 4) /* Hash Low address reg */ -#define GEM_HASHHI (0x00000084 / 4) /* Hash High address reg */ -#define GEM_SPADDR1LO (0x00000088 / 4) /* Specific addr 1 low reg */ -#define GEM_SPADDR1HI (0x0000008C / 4) /* Specific addr 1 high reg */ -#define GEM_SPADDR2LO (0x00000090 / 4) /* Specific addr 2 low reg */ -#define GEM_SPADDR2HI (0x00000094 / 4) /* Specific addr 2 high reg */ -#define GEM_SPADDR3LO (0x00000098 / 4) /* Specific addr 3 low reg */ -#define GEM_SPADDR3HI (0x0000009C / 4) /* Specific addr 3 high reg */ -#define GEM_SPADDR4LO (0x000000A0 / 4) /* Specific addr 4 low reg */ -#define GEM_SPADDR4HI (0x000000A4 / 4) /* Specific addr 4 high reg */ -#define GEM_TIDMATCH1 (0x000000A8 / 4) /* Type ID1 Match reg */ -#define GEM_TIDMATCH2 (0x000000AC / 4) /* Type ID2 Match reg */ -#define GEM_TIDMATCH3 (0x000000B0 / 4) /* Type ID3 Match reg */ -#define GEM_TIDMATCH4 (0x000000B4 / 4) /* Type ID4 Match reg */ -#define GEM_WOLAN (0x000000B8 / 4) /* Wake on LAN reg */ -#define GEM_IPGSTRETCH (0x000000BC / 4) /* IPG Stretch reg */ -#define GEM_SVLAN (0x000000C0 / 4) /* Stacked VLAN reg */ -#define GEM_MODID (0x000000FC / 4) /* Module ID reg */ -#define GEM_OCTTXLO (0x00000100 / 4) /* Octets transmitted Low reg */ -#define GEM_OCTTXHI (0x00000104 / 4) /* Octets transmitted High reg */ -#define GEM_TXCNT (0x00000108 / 4) /* Error-free Frames transmitted */ -#define GEM_TXBCNT (0x0000010C / 4) /* Error-free Broadcast Frames */ -#define GEM_TXMCNT (0x00000110 / 4) /* Error-free Multicast Frame */ -#define GEM_TXPAUSECNT (0x00000114 / 4) /* Pause Frames Transmitted */ -#define GEM_TX64CNT (0x00000118 / 4) /* Error-free 64 TX */ -#define GEM_TX65CNT (0x0000011C / 4) /* Error-free 65-127 TX */ -#define GEM_TX128CNT (0x00000120 / 4) /* Error-free 128-255 TX */ -#define GEM_TX256CNT (0x00000124 / 4) /* Error-free 256-511 */ -#define GEM_TX512CNT (0x00000128 / 4) /* Error-free 512-1023 TX */ -#define GEM_TX1024CNT (0x0000012C / 4) /* Error-free 1024-1518 TX */ -#define GEM_TX1519CNT (0x00000130 / 4) /* Error-free larger than 1519 TX */ -#define GEM_TXURUNCNT (0x00000134 / 4) /* TX under run error counter */ -#define GEM_SINGLECOLLCNT (0x00000138 / 4) /* Single Collision Frames */ -#define GEM_MULTCOLLCNT (0x0000013C / 4) /* Multiple Collision Frames */ -#define GEM_EXCESSCOLLCNT (0x00000140 / 4) /* Excessive Collision Frames */ -#define GEM_LATECOLLCNT (0x00000144 / 4) /* Late Collision Frames */ -#define GEM_DEFERTXCNT (0x00000148 / 4) /* Deferred Transmission Frames */ -#define GEM_CSENSECNT (0x0000014C / 4) /* Carrier Sense Error Counter */ -#define GEM_OCTRXLO (0x00000150 / 4) /* Octets Received register Low */ -#define GEM_OCTRXHI (0x00000154 / 4) /* Octets Received register High */ -#define GEM_RXCNT (0x00000158 / 4) /* Error-free Frames Received */ -#define GEM_RXBROADCNT (0x0000015C / 4) /* Error-free Broadcast Frames RX */ -#define GEM_RXMULTICNT (0x00000160 / 4) /* Error-free Multicast Frames RX */ -#define GEM_RXPAUSECNT (0x00000164 / 4) /* Pause Frames Received Counter */ -#define GEM_RX64CNT (0x00000168 / 4) /* Error-free 64 byte Frames RX */ -#define GEM_RX65CNT (0x0000016C / 4) /* Error-free 65-127B Frames RX */ -#define GEM_RX128CNT (0x00000170 / 4) /* Error-free 128-255B Frames RX */ -#define GEM_RX256CNT (0x00000174 / 4) /* Error-free 256-512B Frames RX */ -#define GEM_RX512CNT (0x00000178 / 4) /* Error-free 512-1023B Frames RX */ -#define GEM_RX1024CNT (0x0000017C / 4) /* Error-free 1024-1518B Frames RX */ -#define GEM_RX1519CNT (0x00000180 / 4) /* Error-free 1519-max Frames RX */ -#define GEM_RXUNDERCNT (0x00000184 / 4) /* Undersize Frames Received */ -#define GEM_RXOVERCNT (0x00000188 / 4) /* Oversize Frames Received */ -#define GEM_RXJABCNT (0x0000018C / 4) /* Jabbers Received Counter */ -#define GEM_RXFCSCNT (0x00000190 / 4) /* Frame Check seq. Error Counter */ -#define GEM_RXLENERRCNT (0x00000194 / 4) /* Length Field Error Counter */ -#define GEM_RXSYMERRCNT (0x00000198 / 4) /* Symbol Error Counter */ -#define GEM_RXALIGNERRCNT (0x0000019C / 4) /* Alignment Error Counter */ -#define GEM_RXRSCERRCNT (0x000001A0 / 4) /* Receive Resource Error Counter */ -#define GEM_RXORUNCNT (0x000001A4 / 4) /* Receive Overrun Counter */ -#define GEM_RXIPCSERRCNT (0x000001A8 / 4) /* IP header Checksum Err Counter */ -#define GEM_RXTCPCCNT (0x000001AC / 4) /* TCP Checksum Error Counter */ -#define GEM_RXUDPCCNT (0x000001B0 / 4) /* UDP Checksum Error Counter */ - -#define GEM_1588S (0x000001D0 / 4) /* 1588 Timer Seconds */ -#define GEM_1588NS (0x000001D4 / 4) /* 1588 Timer Nanoseconds */ -#define GEM_1588ADJ (0x000001D8 / 4) /* 1588 Timer Adjust */ -#define GEM_1588INC (0x000001DC / 4) /* 1588 Timer Increment */ -#define GEM_PTPETXS (0x000001E0 / 4) /* PTP Event Frame Transmitted (s) */ -#define GEM_PTPETXNS (0x000001E4 / 4) /* - * PTP Event Frame Transmitted (ns) - */ -#define GEM_PTPERXS (0x000001E8 / 4) /* PTP Event Frame Received (s) */ -#define GEM_PTPERXNS (0x000001EC / 4) /* PTP Event Frame Received (ns) */ -#define GEM_PTPPTXS (0x000001E0 / 4) /* PTP Peer Frame Transmitted (s) */ -#define GEM_PTPPTXNS (0x000001E4 / 4) /* PTP Peer Frame Transmitted (ns) */ -#define GEM_PTPPRXS (0x000001E8 / 4) /* PTP Peer Frame Received (s) */ -#define GEM_PTPPRXNS (0x000001EC / 4) /* PTP Peer Frame Received (ns) */ +REG32(NWCTRL, 0x0) /* Network Control reg */ + FIELD(NWCTRL, LOOPBACK , 0, 1) + FIELD(NWCTRL, LOOPBACK_LOCAL , 1, 1) + FIELD(NWCTRL, ENABLE_RECEIVE, 2, 1) + FIELD(NWCTRL, ENABLE_TRANSMIT, 3, 1) + FIELD(NWCTRL, MAN_PORT_EN , 4, 1) + FIELD(NWCTRL, CLEAR_ALL_STATS_REGS , 5, 1) + FIELD(NWCTRL, INC_ALL_STATS_REGS, 6, 1) + FIELD(NWCTRL, STATS_WRITE_EN, 7, 1) + FIELD(NWCTRL, BACK_PRESSURE, 8, 1) + FIELD(NWCTRL, TRANSMIT_START , 9, 1) + FIELD(NWCTRL, TRANSMIT_HALT, 10, 1) + FIELD(NWCTRL, TX_PAUSE_FRAME_RE, 11, 1) + FIELD(NWCTRL, TX_PAUSE_FRAME_ZE, 12, 1) + FIELD(NWCTRL, STATS_TAKE_SNAP, 13, 1) + FIELD(NWCTRL, STATS_READ_SNAP, 14, 1) + FIELD(NWCTRL, STORE_RX_TS, 15, 1) + FIELD(NWCTRL, PFC_ENABLE, 16, 1) + FIELD(NWCTRL, PFC_PRIO_BASED, 17, 1) + FIELD(NWCTRL, FLUSH_RX_PKT_PCLK , 18, 1) + FIELD(NWCTRL, TX_LPI_EN, 19, 1) + FIELD(NWCTRL, PTP_UNICAST_ENA, 20, 1) + FIELD(NWCTRL, ALT_SGMII_MODE, 21, 1) + FIELD(NWCTRL, STORE_UDP_OFFSET, 22, 1) + FIELD(NWCTRL, EXT_TSU_PORT_EN, 23, 1) + FIELD(NWCTRL, ONE_STEP_SYNC_MO, 24, 1) + FIELD(NWCTRL, PFC_CTRL , 25, 1) + FIELD(NWCTRL, EXT_RXQ_SEL_EN , 26, 1) + FIELD(NWCTRL, OSS_CORRECTION_FIELD, 27, 1) + FIELD(NWCTRL, SEL_MII_ON_RGMII, 28, 1) + FIELD(NWCTRL, TWO_PT_FIVE_GIG, 29, 1) + FIELD(NWCTRL, IFG_EATS_QAV_CREDIT, 30, 1) + +REG32(NWCFG, 0x4) /* Network Config reg */ + FIELD(NWCFG, SPEED, 0, 1) + FIELD(NWCFG, FULL_DUPLEX, 1, 1) + FIELD(NWCFG, DISCARD_NON_VLAN_FRAMES, 2, 1) + FIELD(NWCFG, JUMBO_FRAMES, 3, 1) + FIELD(NWCFG, PROMISC, 4, 1) + FIELD(NWCFG, NO_BROADCAST, 5, 1) + FIELD(NWCFG, MULTICAST_HASH_EN, 6, 1) + FIELD(NWCFG, UNICAST_HASH_EN, 7, 1) + FIELD(NWCFG, RECV_1536_BYTE_FRAMES, 8, 1) + FIELD(NWCFG, EXTERNAL_ADDR_MATCH_EN, 9, 1) + FIELD(NWCFG, GIGABIT_MODE_ENABLE, 10, 1) + FIELD(NWCFG, PCS_SELECT, 11, 1) + FIELD(NWCFG, RETRY_TEST, 12, 1) + FIELD(NWCFG, PAUSE_ENABLE, 13, 1) + FIELD(NWCFG, RECV_BUF_OFFSET, 14, 2) + FIELD(NWCFG, LEN_ERR_DISCARD, 16, 1) + FIELD(NWCFG, FCS_REMOVE, 17, 1) + FIELD(NWCFG, MDC_CLOCK_DIV, 18, 3) + FIELD(NWCFG, DATA_BUS_WIDTH, 21, 2) + FIELD(NWCFG, DISABLE_COPY_PAUSE_FRAMES, 23, 1) + FIELD(NWCFG, RECV_CSUM_OFFLOAD_EN, 24, 1) + FIELD(NWCFG, EN_HALF_DUPLEX_RX, 25, 1) + FIELD(NWCFG, IGNORE_RX_FCS, 26, 1) + FIELD(NWCFG, SGMII_MODE_ENABLE, 27, 1) + FIELD(NWCFG, IPG_STRETCH_ENABLE, 28, 1) + FIELD(NWCFG, NSP_ACCEPT, 29, 1) + FIELD(NWCFG, IGNORE_IPG_RX_ER, 30, 1) + FIELD(NWCFG, UNI_DIRECTION_ENABLE, 31, 1) + +REG32(NWSTATUS, 0x8) /* Network Status reg */ +REG32(USERIO, 0xc) /* User IO reg */ + +REG32(DMACFG, 0x10) /* DMA Control reg */ + FIELD(DMACFG, SEND_BCAST_TO_ALL_QS, 31, 1) + FIELD(DMACFG, DMA_ADDR_BUS_WIDTH, 30, 1) + FIELD(DMACFG, TX_BD_EXT_MODE_EN , 29, 1) + FIELD(DMACFG, RX_BD_EXT_MODE_EN , 28, 1) + FIELD(DMACFG, FORCE_MAX_AMBA_BURST_TX, 26, 1) + FIELD(DMACFG, FORCE_MAX_AMBA_BURST_RX, 25, 1) + FIELD(DMACFG, FORCE_DISCARD_ON_ERR, 24, 1) + FIELD(DMACFG, RX_BUF_SIZE, 16, 8) + FIELD(DMACFG, CRC_ERROR_REPORT, 13, 1) + FIELD(DMACFG, INF_LAST_DBUF_SIZE_EN, 12, 1) + FIELD(DMACFG, TX_PBUF_CSUM_OFFLOAD, 11, 1) + FIELD(DMACFG, TX_PBUF_SIZE, 10, 1) + FIELD(DMACFG, RX_PBUF_SIZE, 8, 2) + FIELD(DMACFG, ENDIAN_SWAP_PACKET, 7, 1) + FIELD(DMACFG, ENDIAN_SWAP_MGNT, 6, 1) + FIELD(DMACFG, HDR_DATA_SPLIT_EN, 5, 1) + FIELD(DMACFG, AMBA_BURST_LEN , 0, 5) +#define GEM_DMACFG_RBUFSZ_MUL 64 /* DMA RX Buffer Size multiplier */ + +REG32(TXSTATUS, 0x14) /* TX Status reg */ + FIELD(TXSTATUS, TX_USED_BIT_READ_MIDFRAME, 12, 1) + FIELD(TXSTATUS, TX_FRAME_TOO_LARGE, 11, 1) + FIELD(TXSTATUS, TX_DMA_LOCKUP, 10, 1) + FIELD(TXSTATUS, TX_MAC_LOCKUP, 9, 1) + FIELD(TXSTATUS, RESP_NOT_OK, 8, 1) + FIELD(TXSTATUS, LATE_COLLISION, 7, 1) + FIELD(TXSTATUS, TRANSMIT_UNDER_RUN, 6, 1) + FIELD(TXSTATUS, TRANSMIT_COMPLETE, 5, 1) + FIELD(TXSTATUS, AMBA_ERROR, 4, 1) + FIELD(TXSTATUS, TRANSMIT_GO, 3, 1) + FIELD(TXSTATUS, RETRY_LIMIT, 2, 1) + FIELD(TXSTATUS, COLLISION, 1, 1) + FIELD(TXSTATUS, USED_BIT_READ, 0, 1) + +REG32(RXQBASE, 0x18) /* RX Q Base address reg */ +REG32(TXQBASE, 0x1c) /* TX Q Base address reg */ +REG32(RXSTATUS, 0x20) /* RX Status reg */ + FIELD(RXSTATUS, RX_DMA_LOCKUP, 5, 1) + FIELD(RXSTATUS, RX_MAC_LOCKUP, 4, 1) + FIELD(RXSTATUS, RESP_NOT_OK, 3, 1) + FIELD(RXSTATUS, RECEIVE_OVERRUN, 2, 1) + FIELD(RXSTATUS, FRAME_RECEIVED, 1, 1) + FIELD(RXSTATUS, BUF_NOT_AVAILABLE, 0, 1) + +REG32(ISR, 0x24) /* Interrupt Status reg */ + FIELD(ISR, TX_LOCKUP, 31, 1) + FIELD(ISR, RX_LOCKUP, 30, 1) + FIELD(ISR, TSU_TIMER, 29, 1) + FIELD(ISR, WOL, 28, 1) + FIELD(ISR, RECV_LPI, 27, 1) + FIELD(ISR, TSU_SEC_INCR, 26, 1) + FIELD(ISR, PTP_PDELAY_RESP_XMIT, 25, 1) + FIELD(ISR, PTP_PDELAY_REQ_XMIT, 24, 1) + FIELD(ISR, PTP_PDELAY_RESP_RECV, 23, 1) + FIELD(ISR, PTP_PDELAY_REQ_RECV, 22, 1) + FIELD(ISR, PTP_SYNC_XMIT, 21, 1) + FIELD(ISR, PTP_DELAY_REQ_XMIT, 20, 1) + FIELD(ISR, PTP_SYNC_RECV, 19, 1) + FIELD(ISR, PTP_DELAY_REQ_RECV, 18, 1) + FIELD(ISR, PCS_LP_PAGE_RECV, 17, 1) + FIELD(ISR, PCS_AN_COMPLETE, 16, 1) + FIELD(ISR, EXT_IRQ, 15, 1) + FIELD(ISR, PAUSE_FRAME_XMIT, 14, 1) + FIELD(ISR, PAUSE_TIME_ELAPSED, 13, 1) + FIELD(ISR, PAUSE_FRAME_RECV, 12, 1) + FIELD(ISR, RESP_NOT_OK, 11, 1) + FIELD(ISR, RECV_OVERRUN, 10, 1) + FIELD(ISR, LINK_CHANGE, 9, 1) + FIELD(ISR, USXGMII_INT, 8, 1) + FIELD(ISR, XMIT_COMPLETE, 7, 1) + FIELD(ISR, AMBA_ERROR, 6, 1) + FIELD(ISR, RETRY_EXCEEDED, 5, 1) + FIELD(ISR, XMIT_UNDER_RUN, 4, 1) + FIELD(ISR, TX_USED, 3, 1) + FIELD(ISR, RX_USED, 2, 1) + FIELD(ISR, RECV_COMPLETE, 1, 1) + FIELD(ISR, MGNT_FRAME_SENT, 0, 1) +REG32(IER, 0x28) /* Interrupt Enable reg */ +REG32(IDR, 0x2c) /* Interrupt Disable reg */ +REG32(IMR, 0x30) /* Interrupt Mask reg */ + +REG32(PHYMNTNC, 0x34) /* Phy Maintenance reg */ + FIELD(PHYMNTNC, DATA, 0, 16) + FIELD(PHYMNTNC, REG_ADDR, 18, 5) + FIELD(PHYMNTNC, PHY_ADDR, 23, 5) + FIELD(PHYMNTNC, OP, 28, 2) + FIELD(PHYMNTNC, ST, 30, 2) +#define MDIO_OP_READ 0x3 +#define MDIO_OP_WRITE 0x2 + +REG32(RXPAUSE, 0x38) /* RX Pause Time reg */ +REG32(TXPAUSE, 0x3c) /* TX Pause Time reg */ +REG32(TXPARTIALSF, 0x40) /* TX Partial Store and Forward */ +REG32(RXPARTIALSF, 0x44) /* RX Partial Store and Forward */ +REG32(JUMBO_MAX_LEN, 0x48) /* Max Jumbo Frame Size */ +REG32(HASHLO, 0x80) /* Hash Low address reg */ +REG32(HASHHI, 0x84) /* Hash High address reg */ +REG32(SPADDR1LO, 0x88) /* Specific addr 1 low reg */ +REG32(SPADDR1HI, 0x8c) /* Specific addr 1 high reg */ +REG32(SPADDR2LO, 0x90) /* Specific addr 2 low reg */ +REG32(SPADDR2HI, 0x94) /* Specific addr 2 high reg */ +REG32(SPADDR3LO, 0x98) /* Specific addr 3 low reg */ +REG32(SPADDR3HI, 0x9c) /* Specific addr 3 high reg */ +REG32(SPADDR4LO, 0xa0) /* Specific addr 4 low reg */ +REG32(SPADDR4HI, 0xa4) /* Specific addr 4 high reg */ +REG32(TIDMATCH1, 0xa8) /* Type ID1 Match reg */ +REG32(TIDMATCH2, 0xac) /* Type ID2 Match reg */ +REG32(TIDMATCH3, 0xb0) /* Type ID3 Match reg */ +REG32(TIDMATCH4, 0xb4) /* Type ID4 Match reg */ +REG32(WOLAN, 0xb8) /* Wake on LAN reg */ +REG32(IPGSTRETCH, 0xbc) /* IPG Stretch reg */ +REG32(SVLAN, 0xc0) /* Stacked VLAN reg */ +REG32(MODID, 0xfc) /* Module ID reg */ +REG32(OCTTXLO, 0x100) /* Octects transmitted Low reg */ +REG32(OCTTXHI, 0x104) /* Octects transmitted High reg */ +REG32(TXCNT, 0x108) /* Error-free Frames transmitted */ +REG32(TXBCNT, 0x10c) /* Error-free Broadcast Frames */ +REG32(TXMCNT, 0x110) /* Error-free Multicast Frame */ +REG32(TXPAUSECNT, 0x114) /* Pause Frames Transmitted */ +REG32(TX64CNT, 0x118) /* Error-free 64 TX */ +REG32(TX65CNT, 0x11c) /* Error-free 65-127 TX */ +REG32(TX128CNT, 0x120) /* Error-free 128-255 TX */ +REG32(TX256CNT, 0x124) /* Error-free 256-511 */ +REG32(TX512CNT, 0x128) /* Error-free 512-1023 TX */ +REG32(TX1024CNT, 0x12c) /* Error-free 1024-1518 TX */ +REG32(TX1519CNT, 0x130) /* Error-free larger than 1519 TX */ +REG32(TXURUNCNT, 0x134) /* TX under run error counter */ +REG32(SINGLECOLLCNT, 0x138) /* Single Collision Frames */ +REG32(MULTCOLLCNT, 0x13c) /* Multiple Collision Frames */ +REG32(EXCESSCOLLCNT, 0x140) /* Excessive Collision Frames */ +REG32(LATECOLLCNT, 0x144) /* Late Collision Frames */ +REG32(DEFERTXCNT, 0x148) /* Deferred Transmission Frames */ +REG32(CSENSECNT, 0x14c) /* Carrier Sense Error Counter */ +REG32(OCTRXLO, 0x150) /* Octects Received register Low */ +REG32(OCTRXHI, 0x154) /* Octects Received register High */ +REG32(RXCNT, 0x158) /* Error-free Frames Received */ +REG32(RXBROADCNT, 0x15c) /* Error-free Broadcast Frames RX */ +REG32(RXMULTICNT, 0x160) /* Error-free Multicast Frames RX */ +REG32(RXPAUSECNT, 0x164) /* Pause Frames Received Counter */ +REG32(RX64CNT, 0x168) /* Error-free 64 byte Frames RX */ +REG32(RX65CNT, 0x16c) /* Error-free 65-127B Frames RX */ +REG32(RX128CNT, 0x170) /* Error-free 128-255B Frames RX */ +REG32(RX256CNT, 0x174) /* Error-free 256-512B Frames RX */ +REG32(RX512CNT, 0x178) /* Error-free 512-1023B Frames RX */ +REG32(RX1024CNT, 0x17c) /* Error-free 1024-1518B Frames RX */ +REG32(RX1519CNT, 0x180) /* Error-free 1519-max Frames RX */ +REG32(RXUNDERCNT, 0x184) /* Undersize Frames Received */ +REG32(RXOVERCNT, 0x188) /* Oversize Frames Received */ +REG32(RXJABCNT, 0x18c) /* Jabbers Received Counter */ +REG32(RXFCSCNT, 0x190) /* Frame Check seq. Error Counter */ +REG32(RXLENERRCNT, 0x194) /* Length Field Error Counter */ +REG32(RXSYMERRCNT, 0x198) /* Symbol Error Counter */ +REG32(RXALIGNERRCNT, 0x19c) /* Alignment Error Counter */ +REG32(RXRSCERRCNT, 0x1a0) /* Receive Resource Error Counter */ +REG32(RXORUNCNT, 0x1a4) /* Receive Overrun Counter */ +REG32(RXIPCSERRCNT, 0x1a8) /* IP header Checksum Err Counter */ +REG32(RXTCPCCNT, 0x1ac) /* TCP Checksum Error Counter */ +REG32(RXUDPCCNT, 0x1b0) /* UDP Checksum Error Counter */ + +REG32(1588S, 0x1d0) /* 1588 Timer Seconds */ +REG32(1588NS, 0x1d4) /* 1588 Timer Nanoseconds */ +REG32(1588ADJ, 0x1d8) /* 1588 Timer Adjust */ +REG32(1588INC, 0x1dc) /* 1588 Timer Increment */ +REG32(PTPETXS, 0x1e0) /* PTP Event Frame Transmitted (s) */ +REG32(PTPETXNS, 0x1e4) /* PTP Event Frame Transmitted (ns) */ +REG32(PTPERXS, 0x1e8) /* PTP Event Frame Received (s) */ +REG32(PTPERXNS, 0x1ec) /* PTP Event Frame Received (ns) */ +REG32(PTPPTXS, 0x1e0) /* PTP Peer Frame Transmitted (s) */ +REG32(PTPPTXNS, 0x1e4) /* PTP Peer Frame Transmitted (ns) */ +REG32(PTPPRXS, 0x1e8) /* PTP Peer Frame Received (s) */ +REG32(PTPPRXNS, 0x1ec) /* PTP Peer Frame Received (ns) */ /* Design Configuration Registers */ -#define GEM_DESCONF (0x00000280 / 4) -#define GEM_DESCONF2 (0x00000284 / 4) -#define GEM_DESCONF3 (0x00000288 / 4) -#define GEM_DESCONF4 (0x0000028C / 4) -#define GEM_DESCONF5 (0x00000290 / 4) -#define GEM_DESCONF6 (0x00000294 / 4) -#define GEM_DESCONF6_64B_MASK (1U << 23) -#define GEM_DESCONF7 (0x00000298 / 4) - -#define GEM_INT_Q1_STATUS (0x00000400 / 4) -#define GEM_INT_Q1_MASK (0x00000640 / 4) - -#define GEM_TRANSMIT_Q1_PTR (0x00000440 / 4) -#define GEM_TRANSMIT_Q7_PTR (GEM_TRANSMIT_Q1_PTR + 6) - -#define GEM_RECEIVE_Q1_PTR (0x00000480 / 4) -#define GEM_RECEIVE_Q7_PTR (GEM_RECEIVE_Q1_PTR + 6) - -#define GEM_TBQPH (0x000004C8 / 4) -#define GEM_RBQPH (0x000004D4 / 4) - -#define GEM_INT_Q1_ENABLE (0x00000600 / 4) -#define GEM_INT_Q7_ENABLE (GEM_INT_Q1_ENABLE + 6) - -#define GEM_INT_Q1_DISABLE (0x00000620 / 4) -#define GEM_INT_Q7_DISABLE (GEM_INT_Q1_DISABLE + 6) - -#define GEM_INT_Q1_MASK (0x00000640 / 4) -#define GEM_INT_Q7_MASK (GEM_INT_Q1_MASK + 6) - -#define GEM_SCREENING_TYPE1_REGISTER_0 (0x00000500 / 4) - -#define GEM_ST1R_UDP_PORT_MATCH_ENABLE (1 << 29) -#define GEM_ST1R_DSTC_ENABLE (1 << 28) -#define GEM_ST1R_UDP_PORT_MATCH_SHIFT (12) -#define GEM_ST1R_UDP_PORT_MATCH_WIDTH (27 - GEM_ST1R_UDP_PORT_MATCH_SHIFT + 1) -#define GEM_ST1R_DSTC_MATCH_SHIFT (4) -#define GEM_ST1R_DSTC_MATCH_WIDTH (11 - GEM_ST1R_DSTC_MATCH_SHIFT + 1) -#define GEM_ST1R_QUEUE_SHIFT (0) -#define GEM_ST1R_QUEUE_WIDTH (3 - GEM_ST1R_QUEUE_SHIFT + 1) - -#define GEM_SCREENING_TYPE2_REGISTER_0 (0x00000540 / 4) - -#define GEM_ST2R_COMPARE_A_ENABLE (1 << 18) -#define GEM_ST2R_COMPARE_A_SHIFT (13) -#define GEM_ST2R_COMPARE_WIDTH (17 - GEM_ST2R_COMPARE_A_SHIFT + 1) -#define GEM_ST2R_ETHERTYPE_ENABLE (1 << 12) -#define GEM_ST2R_ETHERTYPE_INDEX_SHIFT (9) -#define GEM_ST2R_ETHERTYPE_INDEX_WIDTH (11 - GEM_ST2R_ETHERTYPE_INDEX_SHIFT \ - + 1) -#define GEM_ST2R_QUEUE_SHIFT (0) -#define GEM_ST2R_QUEUE_WIDTH (3 - GEM_ST2R_QUEUE_SHIFT + 1) - -#define GEM_SCREENING_TYPE2_ETHERTYPE_REG_0 (0x000006e0 / 4) -#define GEM_TYPE2_COMPARE_0_WORD_0 (0x00000700 / 4) - -#define GEM_T2CW1_COMPARE_OFFSET_SHIFT (7) -#define GEM_T2CW1_COMPARE_OFFSET_WIDTH (8 - GEM_T2CW1_COMPARE_OFFSET_SHIFT + 1) -#define GEM_T2CW1_OFFSET_VALUE_SHIFT (0) -#define GEM_T2CW1_OFFSET_VALUE_WIDTH (6 - GEM_T2CW1_OFFSET_VALUE_SHIFT + 1) +REG32(DESCONF, 0x280) +REG32(DESCONF2, 0x284) +REG32(DESCONF3, 0x288) +REG32(DESCONF4, 0x28c) +REG32(DESCONF5, 0x290) +REG32(DESCONF6, 0x294) + FIELD(DESCONF6, DMA_ADDR_64B, 23, 1) +REG32(DESCONF7, 0x298) + +REG32(INT_Q1_STATUS, 0x400) +REG32(INT_Q1_MASK, 0x640) + +REG32(TRANSMIT_Q1_PTR, 0x440) +REG32(TRANSMIT_Q7_PTR, 0x458) + +REG32(RECEIVE_Q1_PTR, 0x480) +REG32(RECEIVE_Q7_PTR, 0x498) + +REG32(TBQPH, 0x4c8) +REG32(RBQPH, 0x4d4) + +REG32(INT_Q1_ENABLE, 0x600) +REG32(INT_Q7_ENABLE, 0x618) + +REG32(INT_Q1_DISABLE, 0x620) +REG32(INT_Q7_DISABLE, 0x638) + +REG32(SCREENING_TYPE1_REG0, 0x500) + FIELD(SCREENING_TYPE1_REG0, QUEUE_NUM, 0, 4) + FIELD(SCREENING_TYPE1_REG0, DSTC_MATCH, 4, 8) + FIELD(SCREENING_TYPE1_REG0, UDP_PORT_MATCH, 12, 16) + FIELD(SCREENING_TYPE1_REG0, DSTC_ENABLE, 28, 1) + FIELD(SCREENING_TYPE1_REG0, UDP_PORT_MATCH_EN, 29, 1) + FIELD(SCREENING_TYPE1_REG0, DROP_ON_MATCH, 30, 1) + +REG32(SCREENING_TYPE2_REG0, 0x540) + FIELD(SCREENING_TYPE2_REG0, QUEUE_NUM, 0, 4) + FIELD(SCREENING_TYPE2_REG0, VLAN_PRIORITY, 4, 3) + FIELD(SCREENING_TYPE2_REG0, VLAN_ENABLE, 8, 1) + FIELD(SCREENING_TYPE2_REG0, ETHERTYPE_REG_INDEX, 9, 3) + FIELD(SCREENING_TYPE2_REG0, ETHERTYPE_ENABLE, 12, 1) + FIELD(SCREENING_TYPE2_REG0, COMPARE_A, 13, 5) + FIELD(SCREENING_TYPE2_REG0, COMPARE_A_ENABLE, 18, 1) + FIELD(SCREENING_TYPE2_REG0, COMPARE_B, 19, 5) + FIELD(SCREENING_TYPE2_REG0, COMPARE_B_ENABLE, 24, 1) + FIELD(SCREENING_TYPE2_REG0, COMPARE_C, 25, 5) + FIELD(SCREENING_TYPE2_REG0, COMPARE_C_ENABLE, 30, 1) + FIELD(SCREENING_TYPE2_REG0, DROP_ON_MATCH, 31, 1) + +REG32(SCREENING_TYPE2_ETHERTYPE_REG0, 0x6e0) + +REG32(TYPE2_COMPARE_0_WORD_0, 0x700) + FIELD(TYPE2_COMPARE_0_WORD_0, MASK_VALUE, 0, 16) + FIELD(TYPE2_COMPARE_0_WORD_0, COMPARE_VALUE, 16, 16) + +REG32(TYPE2_COMPARE_0_WORD_1, 0x704) + FIELD(TYPE2_COMPARE_0_WORD_1, OFFSET_VALUE, 0, 7) + FIELD(TYPE2_COMPARE_0_WORD_1, COMPARE_OFFSET, 7, 2) + FIELD(TYPE2_COMPARE_0_WORD_1, DISABLE_MASK, 9, 1) + FIELD(TYPE2_COMPARE_0_WORD_1, COMPARE_VLAN_ID, 10, 1) /*****************************************/ -#define GEM_NWCTRL_TXSTART 0x00000200 /* Transmit Enable */ -#define GEM_NWCTRL_TXENA 0x00000008 /* Transmit Enable */ -#define GEM_NWCTRL_RXENA 0x00000004 /* Receive Enable */ -#define GEM_NWCTRL_LOCALLOOP 0x00000002 /* Local Loopback */ - -#define GEM_NWCFG_STRIP_FCS 0x00020000 /* Strip FCS field */ -#define GEM_NWCFG_LERR_DISC 0x00010000 /* Discard RX frames with len err */ -#define GEM_NWCFG_BUFF_OFST_M 0x0000C000 /* Receive buffer offset mask */ -#define GEM_NWCFG_BUFF_OFST_S 14 /* Receive buffer offset shift */ -#define GEM_NWCFG_RCV_1538 0x00000100 /* Receive 1538 bytes frame */ -#define GEM_NWCFG_UCAST_HASH 0x00000080 /* accept unicast if hash match */ -#define GEM_NWCFG_MCAST_HASH 0x00000040 /* accept multicast if hash match */ -#define GEM_NWCFG_BCAST_REJ 0x00000020 /* Reject broadcast packets */ -#define GEM_NWCFG_PROMISC 0x00000010 /* Accept all packets */ -#define GEM_NWCFG_JUMBO_FRAME 0x00000008 /* Jumbo Frames enable */ - -#define GEM_DMACFG_ADDR_64B (1U << 30) -#define GEM_DMACFG_TX_BD_EXT (1U << 29) -#define GEM_DMACFG_RX_BD_EXT (1U << 28) -#define GEM_DMACFG_RBUFSZ_M 0x00FF0000 /* DMA RX Buffer Size mask */ -#define GEM_DMACFG_RBUFSZ_S 16 /* DMA RX Buffer Size shift */ -#define GEM_DMACFG_RBUFSZ_MUL 64 /* DMA RX Buffer Size multiplier */ -#define GEM_DMACFG_TXCSUM_OFFL 0x00000800 /* Transmit checksum offload */ - -#define GEM_TXSTATUS_TXCMPL 0x00000020 /* Transmit Complete */ -#define GEM_TXSTATUS_USED 0x00000001 /* sw owned descriptor encountered */ -#define GEM_RXSTATUS_FRMRCVD 0x00000002 /* Frame received */ -#define GEM_RXSTATUS_NOBUF 0x00000001 /* Buffer unavailable */ -/* GEM_ISR GEM_IER GEM_IDR GEM_IMR */ -#define GEM_INT_TXCMPL 0x00000080 /* Transmit Complete */ -#define GEM_INT_AMBA_ERR 0x00000040 -#define GEM_INT_TXUSED 0x00000008 -#define GEM_INT_RXUSED 0x00000004 -#define GEM_INT_RXCMPL 0x00000002 - -#define GEM_PHYMNTNC_OP_R 0x20000000 /* read operation */ -#define GEM_PHYMNTNC_OP_W 0x10000000 /* write operation */ -#define GEM_PHYMNTNC_ADDR 0x0F800000 /* Address bits */ -#define GEM_PHYMNTNC_ADDR_SHFT 23 -#define GEM_PHYMNTNC_REG 0x007C0000 /* register bits */ -#define GEM_PHYMNTNC_REG_SHIFT 18 /* Marvell PHY definitions */ #define BOARD_PHY_ADDRESS 0 /* PHY address we will emulate a device at */ @@ -325,7 +426,7 @@ static inline uint64_t tx_desc_get_buffer(CadenceGEMState *s, uint32_t *desc) { uint64_t ret = desc[0]; - if (s->regs[GEM_DMACFG] & GEM_DMACFG_ADDR_64B) { + if (FIELD_EX32(s->regs[R_DMACFG], DMACFG, DMA_ADDR_BUS_WIDTH)) { ret |= (uint64_t)desc[2] << 32; } return ret; @@ -370,7 +471,7 @@ static inline uint64_t rx_desc_get_buffer(CadenceGEMState *s, uint32_t *desc) { uint64_t ret = desc[0] & ~0x3UL; - if (s->regs[GEM_DMACFG] & GEM_DMACFG_ADDR_64B) { + if (FIELD_EX32(s->regs[R_DMACFG], DMACFG, DMA_ADDR_BUS_WIDTH)) { ret |= (uint64_t)desc[2] << 32; } return ret; @@ -380,11 +481,11 @@ static inline int gem_get_desc_len(CadenceGEMState *s, bool rx_n_tx) { int ret = 2; - if (s->regs[GEM_DMACFG] & GEM_DMACFG_ADDR_64B) { + if (FIELD_EX32(s->regs[R_DMACFG], DMACFG, DMA_ADDR_BUS_WIDTH)) { ret += 2; } - if (s->regs[GEM_DMACFG] & (rx_n_tx ? GEM_DMACFG_RX_BD_EXT - : GEM_DMACFG_TX_BD_EXT)) { + if (s->regs[R_DMACFG] & (rx_n_tx ? R_DMACFG_RX_BD_EXT_MODE_EN_MASK + : R_DMACFG_TX_BD_EXT_MODE_EN_MASK)) { ret += 2; } @@ -456,8 +557,8 @@ static const uint8_t broadcast_addr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; static uint32_t gem_get_max_buf_len(CadenceGEMState *s, bool tx) { uint32_t size; - if (s->regs[GEM_NWCFG] & GEM_NWCFG_JUMBO_FRAME) { - size = s->regs[GEM_JUMBO_MAX_LEN]; + if (FIELD_EX32(s->regs[R_NWCFG], NWCFG, JUMBO_FRAMES)) { + size = s->regs[R_JUMBO_MAX_LEN]; if (size > s->jumbo_max_len) { size = s->jumbo_max_len; qemu_log_mask(LOG_GUEST_ERROR, "GEM_JUMBO_MAX_LEN reg cannot be" @@ -466,7 +567,8 @@ static uint32_t gem_get_max_buf_len(CadenceGEMState *s, bool tx) } else if (tx) { size = 1518; } else { - size = s->regs[GEM_NWCFG] & GEM_NWCFG_RCV_1538 ? 1538 : 1518; + size = FIELD_EX32(s->regs[R_NWCFG], + NWCFG, RECV_1536_BYTE_FRAMES) ? 1538 : 1518; } return size; } @@ -474,10 +576,10 @@ static uint32_t gem_get_max_buf_len(CadenceGEMState *s, bool tx) static void gem_set_isr(CadenceGEMState *s, int q, uint32_t flag) { if (q == 0) { - s->regs[GEM_ISR] |= flag & ~(s->regs[GEM_IMR]); + s->regs[R_ISR] |= flag & ~(s->regs[R_IMR]); } else { - s->regs[GEM_INT_Q1_STATUS + q - 1] |= flag & - ~(s->regs[GEM_INT_Q1_MASK + q - 1]); + s->regs[R_INT_Q1_STATUS + q - 1] |= flag & + ~(s->regs[R_INT_Q1_MASK + q - 1]); } } @@ -491,43 +593,43 @@ static void gem_init_register_masks(CadenceGEMState *s) unsigned int i; /* Mask of register bits which are read only */ memset(&s->regs_ro[0], 0, sizeof(s->regs_ro)); - s->regs_ro[GEM_NWCTRL] = 0xFFF80000; - s->regs_ro[GEM_NWSTATUS] = 0xFFFFFFFF; - s->regs_ro[GEM_DMACFG] = 0x8E00F000; - s->regs_ro[GEM_TXSTATUS] = 0xFFFFFE08; - s->regs_ro[GEM_RXQBASE] = 0x00000003; - s->regs_ro[GEM_TXQBASE] = 0x00000003; - s->regs_ro[GEM_RXSTATUS] = 0xFFFFFFF0; - s->regs_ro[GEM_ISR] = 0xFFFFFFFF; - s->regs_ro[GEM_IMR] = 0xFFFFFFFF; - s->regs_ro[GEM_MODID] = 0xFFFFFFFF; + s->regs_ro[R_NWCTRL] = 0xFFF80000; + s->regs_ro[R_NWSTATUS] = 0xFFFFFFFF; + s->regs_ro[R_DMACFG] = 0x8E00F000; + s->regs_ro[R_TXSTATUS] = 0xFFFFFE08; + s->regs_ro[R_RXQBASE] = 0x00000003; + s->regs_ro[R_TXQBASE] = 0x00000003; + s->regs_ro[R_RXSTATUS] = 0xFFFFFFF0; + s->regs_ro[R_ISR] = 0xFFFFFFFF; + s->regs_ro[R_IMR] = 0xFFFFFFFF; + s->regs_ro[R_MODID] = 0xFFFFFFFF; for (i = 0; i < s->num_priority_queues; i++) { - s->regs_ro[GEM_INT_Q1_STATUS + i] = 0xFFFFFFFF; - s->regs_ro[GEM_INT_Q1_ENABLE + i] = 0xFFFFF319; - s->regs_ro[GEM_INT_Q1_DISABLE + i] = 0xFFFFF319; - s->regs_ro[GEM_INT_Q1_MASK + i] = 0xFFFFFFFF; + s->regs_ro[R_INT_Q1_STATUS + i] = 0xFFFFFFFF; + s->regs_ro[R_INT_Q1_ENABLE + i] = 0xFFFFF319; + s->regs_ro[R_INT_Q1_DISABLE + i] = 0xFFFFF319; + s->regs_ro[R_INT_Q1_MASK + i] = 0xFFFFFFFF; } /* Mask of register bits which are clear on read */ memset(&s->regs_rtc[0], 0, sizeof(s->regs_rtc)); - s->regs_rtc[GEM_ISR] = 0xFFFFFFFF; + s->regs_rtc[R_ISR] = 0xFFFFFFFF; for (i = 0; i < s->num_priority_queues; i++) { - s->regs_rtc[GEM_INT_Q1_STATUS + i] = 0x00000CE6; + s->regs_rtc[R_INT_Q1_STATUS + i] = 0x00000CE6; } /* Mask of register bits which are write 1 to clear */ memset(&s->regs_w1c[0], 0, sizeof(s->regs_w1c)); - s->regs_w1c[GEM_TXSTATUS] = 0x000001F7; - s->regs_w1c[GEM_RXSTATUS] = 0x0000000F; + s->regs_w1c[R_TXSTATUS] = 0x000001F7; + s->regs_w1c[R_RXSTATUS] = 0x0000000F; /* Mask of register bits which are write only */ memset(&s->regs_wo[0], 0, sizeof(s->regs_wo)); - s->regs_wo[GEM_NWCTRL] = 0x00073E60; - s->regs_wo[GEM_IER] = 0x07FFFFFF; - s->regs_wo[GEM_IDR] = 0x07FFFFFF; + s->regs_wo[R_NWCTRL] = 0x00073E60; + s->regs_wo[R_IER] = 0x07FFFFFF; + s->regs_wo[R_IDR] = 0x07FFFFFF; for (i = 0; i < s->num_priority_queues; i++) { - s->regs_wo[GEM_INT_Q1_ENABLE + i] = 0x00000CE6; - s->regs_wo[GEM_INT_Q1_DISABLE + i] = 0x00000CE6; + s->regs_wo[R_INT_Q1_ENABLE + i] = 0x00000CE6; + s->regs_wo[R_INT_Q1_DISABLE + i] = 0x00000CE6; } } @@ -561,7 +663,7 @@ static bool gem_can_receive(NetClientState *nc) s = qemu_get_nic_opaque(nc); /* Do nothing if receive is not enabled. */ - if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_RXENA)) { + if (!FIELD_EX32(s->regs[R_NWCTRL], NWCTRL, ENABLE_RECEIVE)) { if (s->can_rx_state != 1) { s->can_rx_state = 1; DB_PRINT("can't receive - no enable\n"); @@ -598,10 +700,10 @@ static void gem_update_int_status(CadenceGEMState *s) { int i; - qemu_set_irq(s->irq[0], !!s->regs[GEM_ISR]); + qemu_set_irq(s->irq[0], !!s->regs[R_ISR]); for (i = 1; i < s->num_priority_queues; ++i) { - qemu_set_irq(s->irq[i], !!s->regs[GEM_INT_Q1_STATUS + i - 1]); + qemu_set_irq(s->irq[i], !!s->regs[R_INT_Q1_STATUS + i - 1]); } } @@ -615,39 +717,39 @@ static void gem_receive_updatestats(CadenceGEMState *s, const uint8_t *packet, uint64_t octets; /* Total octets (bytes) received */ - octets = ((uint64_t)(s->regs[GEM_OCTRXLO]) << 32) | - s->regs[GEM_OCTRXHI]; + octets = ((uint64_t)(s->regs[R_OCTRXLO]) << 32) | + s->regs[R_OCTRXHI]; octets += bytes; - s->regs[GEM_OCTRXLO] = octets >> 32; - s->regs[GEM_OCTRXHI] = octets; + s->regs[R_OCTRXLO] = octets >> 32; + s->regs[R_OCTRXHI] = octets; /* Error-free Frames received */ - s->regs[GEM_RXCNT]++; + s->regs[R_RXCNT]++; /* Error-free Broadcast Frames counter */ if (!memcmp(packet, broadcast_addr, 6)) { - s->regs[GEM_RXBROADCNT]++; + s->regs[R_RXBROADCNT]++; } /* Error-free Multicast Frames counter */ if (packet[0] == 0x01) { - s->regs[GEM_RXMULTICNT]++; + s->regs[R_RXMULTICNT]++; } if (bytes <= 64) { - s->regs[GEM_RX64CNT]++; + s->regs[R_RX64CNT]++; } else if (bytes <= 127) { - s->regs[GEM_RX65CNT]++; + s->regs[R_RX65CNT]++; } else if (bytes <= 255) { - s->regs[GEM_RX128CNT]++; + s->regs[R_RX128CNT]++; } else if (bytes <= 511) { - s->regs[GEM_RX256CNT]++; + s->regs[R_RX256CNT]++; } else if (bytes <= 1023) { - s->regs[GEM_RX512CNT]++; + s->regs[R_RX512CNT]++; } else if (bytes <= 1518) { - s->regs[GEM_RX1024CNT]++; + s->regs[R_RX1024CNT]++; } else { - s->regs[GEM_RX1519CNT]++; + s->regs[R_RX1519CNT]++; } } @@ -706,13 +808,13 @@ static int gem_mac_address_filter(CadenceGEMState *s, const uint8_t *packet) int i, is_mc; /* Promiscuous mode? */ - if (s->regs[GEM_NWCFG] & GEM_NWCFG_PROMISC) { + if (FIELD_EX32(s->regs[R_NWCFG], NWCFG, PROMISC)) { return GEM_RX_PROMISCUOUS_ACCEPT; } if (!memcmp(packet, broadcast_addr, 6)) { /* Reject broadcast packets? */ - if (s->regs[GEM_NWCFG] & GEM_NWCFG_BCAST_REJ) { + if (FIELD_EX32(s->regs[R_NWCFG], NWCFG, NO_BROADCAST)) { return GEM_RX_REJECT; } return GEM_RX_BROADCAST_ACCEPT; @@ -720,13 +822,13 @@ static int gem_mac_address_filter(CadenceGEMState *s, const uint8_t *packet) /* Accept packets -w- hash match? */ is_mc = is_multicast_ether_addr(packet); - if ((is_mc && (s->regs[GEM_NWCFG] & GEM_NWCFG_MCAST_HASH)) || - (!is_mc && (s->regs[GEM_NWCFG] & GEM_NWCFG_UCAST_HASH))) { + if ((is_mc && (FIELD_EX32(s->regs[R_NWCFG], NWCFG, MULTICAST_HASH_EN))) || + (!is_mc && FIELD_EX32(s->regs[R_NWCFG], NWCFG, UNICAST_HASH_EN))) { uint64_t buckets; unsigned hash_index; hash_index = calc_mac_hash(packet); - buckets = ((uint64_t)s->regs[GEM_HASHHI] << 32) | s->regs[GEM_HASHLO]; + buckets = ((uint64_t)s->regs[R_HASHHI] << 32) | s->regs[R_HASHLO]; if ((buckets >> hash_index) & 1) { return is_mc ? GEM_RX_MULTICAST_HASH_ACCEPT : GEM_RX_UNICAST_HASH_ACCEPT; @@ -734,7 +836,7 @@ static int gem_mac_address_filter(CadenceGEMState *s, const uint8_t *packet) } /* Check all 4 specific addresses */ - gem_spaddr = (uint8_t *)&(s->regs[GEM_SPADDR1LO]); + gem_spaddr = (uint8_t *)&(s->regs[R_SPADDR1LO]); for (i = 3; i >= 0; i--) { if (s->sar_active[i] && !memcmp(packet, gem_spaddr + 8 * i, 6)) { return GEM_RX_SAR_ACCEPT + i; @@ -754,15 +856,14 @@ static int get_queue_from_screen(CadenceGEMState *s, uint8_t *rxbuf_ptr, int i, j; for (i = 0; i < s->num_type1_screeners; i++) { - reg = s->regs[GEM_SCREENING_TYPE1_REGISTER_0 + i]; + reg = s->regs[R_SCREENING_TYPE1_REG0 + i]; matched = false; mismatched = false; /* Screening is based on UDP Port */ - if (reg & GEM_ST1R_UDP_PORT_MATCH_ENABLE) { + if (FIELD_EX32(reg, SCREENING_TYPE1_REG0, UDP_PORT_MATCH_EN)) { uint16_t udp_port = rxbuf_ptr[14 + 22] << 8 | rxbuf_ptr[14 + 23]; - if (udp_port == extract32(reg, GEM_ST1R_UDP_PORT_MATCH_SHIFT, - GEM_ST1R_UDP_PORT_MATCH_WIDTH)) { + if (udp_port == FIELD_EX32(reg, SCREENING_TYPE1_REG0, UDP_PORT_MATCH)) { matched = true; } else { mismatched = true; @@ -770,10 +871,9 @@ static int get_queue_from_screen(CadenceGEMState *s, uint8_t *rxbuf_ptr, } /* Screening is based on DS/TC */ - if (reg & GEM_ST1R_DSTC_ENABLE) { + if (FIELD_EX32(reg, SCREENING_TYPE1_REG0, DSTC_ENABLE)) { uint8_t dscp = rxbuf_ptr[14 + 1]; - if (dscp == extract32(reg, GEM_ST1R_DSTC_MATCH_SHIFT, - GEM_ST1R_DSTC_MATCH_WIDTH)) { + if (dscp == FIELD_EX32(reg, SCREENING_TYPE1_REG0, DSTC_MATCH)) { matched = true; } else { mismatched = true; @@ -781,25 +881,25 @@ static int get_queue_from_screen(CadenceGEMState *s, uint8_t *rxbuf_ptr, } if (matched && !mismatched) { - return extract32(reg, GEM_ST1R_QUEUE_SHIFT, GEM_ST1R_QUEUE_WIDTH); + return FIELD_EX32(reg, SCREENING_TYPE1_REG0, QUEUE_NUM); } } for (i = 0; i < s->num_type2_screeners; i++) { - reg = s->regs[GEM_SCREENING_TYPE2_REGISTER_0 + i]; + reg = s->regs[R_SCREENING_TYPE2_REG0 + i]; matched = false; mismatched = false; - if (reg & GEM_ST2R_ETHERTYPE_ENABLE) { + if (FIELD_EX32(reg, SCREENING_TYPE2_REG0, ETHERTYPE_ENABLE)) { uint16_t type = rxbuf_ptr[12] << 8 | rxbuf_ptr[13]; - int et_idx = extract32(reg, GEM_ST2R_ETHERTYPE_INDEX_SHIFT, - GEM_ST2R_ETHERTYPE_INDEX_WIDTH); + int et_idx = FIELD_EX32(reg, SCREENING_TYPE2_REG0, + ETHERTYPE_REG_INDEX); if (et_idx > s->num_type2_screeners) { qemu_log_mask(LOG_GUEST_ERROR, "Out of range ethertype " "register index: %d\n", et_idx); } - if (type == s->regs[GEM_SCREENING_TYPE2_ETHERTYPE_REG_0 + + if (type == s->regs[R_SCREENING_TYPE2_ETHERTYPE_REG0 + et_idx]) { matched = true; } else { @@ -809,27 +909,27 @@ static int get_queue_from_screen(CadenceGEMState *s, uint8_t *rxbuf_ptr, /* Compare A, B, C */ for (j = 0; j < 3; j++) { - uint32_t cr0, cr1, mask; + uint32_t cr0, cr1, mask, compare; uint16_t rx_cmp; int offset; - int cr_idx = extract32(reg, GEM_ST2R_COMPARE_A_SHIFT + j * 6, - GEM_ST2R_COMPARE_WIDTH); + int cr_idx = extract32(reg, R_SCREENING_TYPE2_REG0_COMPARE_A_SHIFT + j * 6, + R_SCREENING_TYPE2_REG0_COMPARE_A_LENGTH); - if (!(reg & (GEM_ST2R_COMPARE_A_ENABLE << (j * 6)))) { + if (!extract32(reg, R_SCREENING_TYPE2_REG0_COMPARE_A_ENABLE_SHIFT + j * 6, + R_SCREENING_TYPE2_REG0_COMPARE_A_ENABLE_LENGTH)) { continue; } + if (cr_idx > s->num_type2_screeners) { qemu_log_mask(LOG_GUEST_ERROR, "Out of range compare " "register index: %d\n", cr_idx); } - cr0 = s->regs[GEM_TYPE2_COMPARE_0_WORD_0 + cr_idx * 2]; - cr1 = s->regs[GEM_TYPE2_COMPARE_0_WORD_0 + cr_idx * 2 + 1]; - offset = extract32(cr1, GEM_T2CW1_OFFSET_VALUE_SHIFT, - GEM_T2CW1_OFFSET_VALUE_WIDTH); + cr0 = s->regs[R_TYPE2_COMPARE_0_WORD_0 + cr_idx * 2]; + cr1 = s->regs[R_TYPE2_COMPARE_0_WORD_1 + cr_idx * 2]; + offset = FIELD_EX32(cr1, TYPE2_COMPARE_0_WORD_1, OFFSET_VALUE); - switch (extract32(cr1, GEM_T2CW1_COMPARE_OFFSET_SHIFT, - GEM_T2CW1_COMPARE_OFFSET_WIDTH)) { + switch (FIELD_EX32(cr1, TYPE2_COMPARE_0_WORD_1, COMPARE_OFFSET)) { case 3: /* Skip UDP header */ qemu_log_mask(LOG_UNIMP, "TCP compare offsets" "unimplemented - assuming UDP\n"); @@ -847,9 +947,10 @@ static int get_queue_from_screen(CadenceGEMState *s, uint8_t *rxbuf_ptr, } rx_cmp = rxbuf_ptr[offset] << 8 | rxbuf_ptr[offset]; - mask = extract32(cr0, 0, 16); + mask = FIELD_EX32(cr0, TYPE2_COMPARE_0_WORD_0, MASK_VALUE); + compare = FIELD_EX32(cr0, TYPE2_COMPARE_0_WORD_0, COMPARE_VALUE); - if ((rx_cmp & mask) == (extract32(cr0, 16, 16) & mask)) { + if ((rx_cmp & mask) == (compare & mask)) { matched = true; } else { mismatched = true; @@ -857,7 +958,7 @@ static int get_queue_from_screen(CadenceGEMState *s, uint8_t *rxbuf_ptr, } if (matched && !mismatched) { - return extract32(reg, GEM_ST2R_QUEUE_SHIFT, GEM_ST2R_QUEUE_WIDTH); + return FIELD_EX32(reg, SCREENING_TYPE2_REG0, QUEUE_NUM); } } @@ -871,11 +972,11 @@ static uint32_t gem_get_queue_base_addr(CadenceGEMState *s, bool tx, int q) switch (q) { case 0: - base_addr = s->regs[tx ? GEM_TXQBASE : GEM_RXQBASE]; + base_addr = s->regs[tx ? R_TXQBASE : R_RXQBASE]; break; case 1 ... (MAX_PRIORITY_QUEUES - 1): - base_addr = s->regs[(tx ? GEM_TRANSMIT_Q1_PTR : - GEM_RECEIVE_Q1_PTR) + q - 1]; + base_addr = s->regs[(tx ? R_TRANSMIT_Q1_PTR : + R_RECEIVE_Q1_PTR) + q - 1]; break; default: g_assert_not_reached(); @@ -898,8 +999,8 @@ static hwaddr gem_get_desc_addr(CadenceGEMState *s, bool tx, int q) { hwaddr desc_addr = 0; - if (s->regs[GEM_DMACFG] & GEM_DMACFG_ADDR_64B) { - desc_addr = s->regs[tx ? GEM_TBQPH : GEM_RBQPH]; + if (FIELD_EX32(s->regs[R_DMACFG], DMACFG, DMA_ADDR_BUS_WIDTH)) { + desc_addr = s->regs[tx ? R_TBQPH : R_RBQPH]; } desc_addr <<= 32; desc_addr |= tx ? s->tx_desc_addr[q] : s->rx_desc_addr[q]; @@ -930,8 +1031,8 @@ static void gem_get_rx_desc(CadenceGEMState *s, int q) /* Descriptor owned by software ? */ if (rx_desc_get_ownership(s->rx_desc[q]) == 1) { DB_PRINT("descriptor 0x%" HWADDR_PRIx " owned by sw.\n", desc_addr); - s->regs[GEM_RXSTATUS] |= GEM_RXSTATUS_NOBUF; - gem_set_isr(s, q, GEM_INT_RXUSED); + s->regs[R_RXSTATUS] |= R_RXSTATUS_BUF_NOT_AVAILABLE_MASK; + gem_set_isr(s, q, R_ISR_RX_USED_MASK); /* Handle interrupt consequences */ gem_update_int_status(s); } @@ -958,7 +1059,7 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size) } /* Discard packets with receive length error enabled ? */ - if (s->regs[GEM_NWCFG] & GEM_NWCFG_LERR_DISC) { + if (FIELD_EX32(s->regs[R_NWCFG], NWCFG, LEN_ERR_DISCARD)) { unsigned type_len; /* Fish the ethertype / length field out of the RX packet */ @@ -975,14 +1076,14 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size) /* * Determine configured receive buffer offset (probably 0) */ - rxbuf_offset = (s->regs[GEM_NWCFG] & GEM_NWCFG_BUFF_OFST_M) >> - GEM_NWCFG_BUFF_OFST_S; + rxbuf_offset = FIELD_EX32(s->regs[R_NWCFG], NWCFG, RECV_BUF_OFFSET); /* The configure size of each receive buffer. Determines how many * buffers needed to hold this packet. */ - rxbufsize = ((s->regs[GEM_DMACFG] & GEM_DMACFG_RBUFSZ_M) >> - GEM_DMACFG_RBUFSZ_S) * GEM_DMACFG_RBUFSZ_MUL; + rxbufsize = FIELD_EX32(s->regs[R_DMACFG], DMACFG, RX_BUF_SIZE); + rxbufsize *= GEM_DMACFG_RBUFSZ_MUL; + bytes_to_copy = size; /* Hardware allows a zero value here but warns against it. To avoid QEMU @@ -1001,10 +1102,10 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size) } /* Strip of FCS field ? (usually yes) */ - if (s->regs[GEM_NWCFG] & GEM_NWCFG_STRIP_FCS) { + if (FIELD_EX32(s->regs[R_NWCFG], NWCFG, FCS_REMOVE)) { rxbuf_ptr = (void *)buf; } else { - unsigned crc_val; + uint32_t crc_val; if (size > MAX_FRAME_SIZE - sizeof(crc_val)) { size = MAX_FRAME_SIZE - sizeof(crc_val); @@ -1031,7 +1132,7 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size) if (size > gem_get_max_buf_len(s, false)) { qemu_log_mask(LOG_GUEST_ERROR, "rx frame too long\n"); - gem_set_isr(s, q, GEM_INT_AMBA_ERR); + gem_set_isr(s, q, R_ISR_AMBA_ERROR_MASK); return -1; } @@ -1107,8 +1208,8 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size) /* Count it */ gem_receive_updatestats(s, buf, size); - s->regs[GEM_RXSTATUS] |= GEM_RXSTATUS_FRMRCVD; - gem_set_isr(s, q, GEM_INT_RXCMPL); + s->regs[R_RXSTATUS] |= R_RXSTATUS_FRAME_RECEIVED_MASK; + gem_set_isr(s, q, R_ISR_RECV_COMPLETE_MASK); /* Handle interrupt consequences */ gem_update_int_status(s); @@ -1126,39 +1227,39 @@ static void gem_transmit_updatestats(CadenceGEMState *s, const uint8_t *packet, uint64_t octets; /* Total octets (bytes) transmitted */ - octets = ((uint64_t)(s->regs[GEM_OCTTXLO]) << 32) | - s->regs[GEM_OCTTXHI]; + octets = ((uint64_t)(s->regs[R_OCTTXLO]) << 32) | + s->regs[R_OCTTXHI]; octets += bytes; - s->regs[GEM_OCTTXLO] = octets >> 32; - s->regs[GEM_OCTTXHI] = octets; + s->regs[R_OCTTXLO] = octets >> 32; + s->regs[R_OCTTXHI] = octets; /* Error-free Frames transmitted */ - s->regs[GEM_TXCNT]++; + s->regs[R_TXCNT]++; /* Error-free Broadcast Frames counter */ if (!memcmp(packet, broadcast_addr, 6)) { - s->regs[GEM_TXBCNT]++; + s->regs[R_TXBCNT]++; } /* Error-free Multicast Frames counter */ if (packet[0] == 0x01) { - s->regs[GEM_TXMCNT]++; + s->regs[R_TXMCNT]++; } if (bytes <= 64) { - s->regs[GEM_TX64CNT]++; + s->regs[R_TX64CNT]++; } else if (bytes <= 127) { - s->regs[GEM_TX65CNT]++; + s->regs[R_TX65CNT]++; } else if (bytes <= 255) { - s->regs[GEM_TX128CNT]++; + s->regs[R_TX128CNT]++; } else if (bytes <= 511) { - s->regs[GEM_TX256CNT]++; + s->regs[R_TX256CNT]++; } else if (bytes <= 1023) { - s->regs[GEM_TX512CNT]++; + s->regs[R_TX512CNT]++; } else if (bytes <= 1518) { - s->regs[GEM_TX1024CNT]++; + s->regs[R_TX1024CNT]++; } else { - s->regs[GEM_TX1519CNT]++; + s->regs[R_TX1519CNT]++; } } @@ -1175,7 +1276,7 @@ static void gem_transmit(CadenceGEMState *s) int q = 0; /* Do nothing if transmit is not enabled. */ - if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_TXENA)) { + if (!FIELD_EX32(s->regs[R_NWCTRL], NWCTRL, ENABLE_TRANSMIT)) { return; } @@ -1200,7 +1301,7 @@ static void gem_transmit(CadenceGEMState *s) while (tx_desc_get_used(desc) == 0) { /* Do nothing if transmit is not enabled. */ - if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_TXENA)) { + if (!FIELD_EX32(s->regs[R_NWCTRL], NWCTRL, ENABLE_TRANSMIT)) { return; } print_gem_tx_desc(desc, q); @@ -1221,7 +1322,7 @@ static void gem_transmit(CadenceGEMState *s) HWADDR_PRIx " too large: size 0x%x space 0x%zx\n", packet_desc_addr, tx_desc_get_length(desc), gem_get_max_buf_len(s, true) - (p - s->tx_packet)); - gem_set_isr(s, q, GEM_INT_AMBA_ERR); + gem_set_isr(s, q, R_ISR_AMBA_ERROR_MASK); break; } @@ -1258,14 +1359,14 @@ static void gem_transmit(CadenceGEMState *s) } DB_PRINT("TX descriptor next: 0x%08x\n", s->tx_desc_addr[q]); - s->regs[GEM_TXSTATUS] |= GEM_TXSTATUS_TXCMPL; - gem_set_isr(s, q, GEM_INT_TXCMPL); + s->regs[R_TXSTATUS] |= R_TXSTATUS_TRANSMIT_COMPLETE_MASK; + gem_set_isr(s, q, R_ISR_XMIT_COMPLETE_MASK); /* Handle interrupt consequences */ gem_update_int_status(s); /* Is checksum offload enabled? */ - if (s->regs[GEM_DMACFG] & GEM_DMACFG_TXCSUM_OFFL) { + if (FIELD_EX32(s->regs[R_DMACFG], DMACFG, TX_PBUF_CSUM_OFFLOAD)) { net_checksum_calculate(s->tx_packet, total_bytes, CSUM_ALL); } @@ -1273,8 +1374,8 @@ static void gem_transmit(CadenceGEMState *s) gem_transmit_updatestats(s, s->tx_packet, total_bytes); /* Send the packet somewhere */ - if (s->phy_loop || (s->regs[GEM_NWCTRL] & - GEM_NWCTRL_LOCALLOOP)) { + if (s->phy_loop || FIELD_EX32(s->regs[R_NWCTRL], NWCTRL, + LOOPBACK_LOCAL)) { qemu_receive_packet(qemu_get_queue(s->nic), s->tx_packet, total_bytes); } else { @@ -1289,9 +1390,8 @@ static void gem_transmit(CadenceGEMState *s) /* read next descriptor */ if (tx_desc_get_wrap(desc)) { - - if (s->regs[GEM_DMACFG] & GEM_DMACFG_ADDR_64B) { - packet_desc_addr = s->regs[GEM_TBQPH]; + if (FIELD_EX32(s->regs[R_DMACFG], DMACFG, DMA_ADDR_BUS_WIDTH)) { + packet_desc_addr = s->regs[R_TBQPH]; packet_desc_addr <<= 32; } else { packet_desc_addr = 0; @@ -1307,10 +1407,10 @@ static void gem_transmit(CadenceGEMState *s) } if (tx_desc_get_used(desc)) { - s->regs[GEM_TXSTATUS] |= GEM_TXSTATUS_USED; + s->regs[R_TXSTATUS] |= R_TXSTATUS_USED_BIT_READ_MASK; /* IRQ TXUSED is defined only for queue 0 */ if (q == 0) { - gem_set_isr(s, 0, GEM_INT_TXUSED); + gem_set_isr(s, 0, R_ISR_TX_USED_MASK); } gem_update_int_status(s); } @@ -1353,30 +1453,30 @@ static void gem_reset(DeviceState *d) /* Set post reset register values */ memset(&s->regs[0], 0, sizeof(s->regs)); - s->regs[GEM_NWCFG] = 0x00080000; - s->regs[GEM_NWSTATUS] = 0x00000006; - s->regs[GEM_DMACFG] = 0x00020784; - s->regs[GEM_IMR] = 0x07ffffff; - s->regs[GEM_TXPAUSE] = 0x0000ffff; - s->regs[GEM_TXPARTIALSF] = 0x000003ff; - s->regs[GEM_RXPARTIALSF] = 0x000003ff; - s->regs[GEM_MODID] = s->revision; - s->regs[GEM_DESCONF] = 0x02D00111; - s->regs[GEM_DESCONF2] = 0x2ab10000 | s->jumbo_max_len; - s->regs[GEM_DESCONF5] = 0x002f2045; - s->regs[GEM_DESCONF6] = GEM_DESCONF6_64B_MASK; - s->regs[GEM_INT_Q1_MASK] = 0x00000CE6; - s->regs[GEM_JUMBO_MAX_LEN] = s->jumbo_max_len; + s->regs[R_NWCFG] = 0x00080000; + s->regs[R_NWSTATUS] = 0x00000006; + s->regs[R_DMACFG] = 0x00020784; + s->regs[R_IMR] = 0x07ffffff; + s->regs[R_TXPAUSE] = 0x0000ffff; + s->regs[R_TXPARTIALSF] = 0x000003ff; + s->regs[R_RXPARTIALSF] = 0x000003ff; + s->regs[R_MODID] = s->revision; + s->regs[R_DESCONF] = 0x02D00111; + s->regs[R_DESCONF2] = 0x2ab10000 | s->jumbo_max_len; + s->regs[R_DESCONF5] = 0x002f2045; + s->regs[R_DESCONF6] = R_DESCONF6_DMA_ADDR_64B_MASK; + s->regs[R_INT_Q1_MASK] = 0x00000CE6; + s->regs[R_JUMBO_MAX_LEN] = s->jumbo_max_len; if (s->num_priority_queues > 1) { queues_mask = MAKE_64BIT_MASK(1, s->num_priority_queues - 1); - s->regs[GEM_DESCONF6] |= queues_mask; + s->regs[R_DESCONF6] |= queues_mask; } /* Set MAC address */ a = &s->conf.macaddr.a[0]; - s->regs[GEM_SPADDR1LO] = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24); - s->regs[GEM_SPADDR1HI] = a[4] | (a[5] << 8); + s->regs[R_SPADDR1LO] = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24); + s->regs[R_SPADDR1HI] = a[4] | (a[5] << 8); for (i = 0; i < 4; i++) { s->sar_active[i] = false; @@ -1421,6 +1521,38 @@ static void gem_phy_write(CadenceGEMState *s, unsigned reg_num, uint16_t val) s->phy_regs[reg_num] = val; } +static void gem_handle_phy_access(CadenceGEMState *s) +{ + uint32_t val = s->regs[R_PHYMNTNC]; + uint32_t phy_addr, reg_num; + + phy_addr = FIELD_EX32(val, PHYMNTNC, PHY_ADDR); + + if (phy_addr != s->phy_addr) { + /* no phy at this address */ + if (FIELD_EX32(val, PHYMNTNC, OP) == MDIO_OP_READ) { + s->regs[R_PHYMNTNC] = FIELD_DP32(val, PHYMNTNC, DATA, 0xffff); + } + return; + } + + reg_num = FIELD_EX32(val, PHYMNTNC, REG_ADDR); + + switch (FIELD_EX32(val, PHYMNTNC, OP)) { + case MDIO_OP_READ: + s->regs[R_PHYMNTNC] = FIELD_DP32(val, PHYMNTNC, DATA, + gem_phy_read(s, reg_num)); + break; + + case MDIO_OP_WRITE: + gem_phy_write(s, reg_num, val); + break; + + default: + break; /* only clause 22 operations are supported */ + } +} + /* * gem_read32: * Read a GEM register. @@ -1437,24 +1569,10 @@ static uint64_t gem_read(void *opaque, hwaddr offset, unsigned size) DB_PRINT("offset: 0x%04x read: 0x%08x\n", (unsigned)offset*4, retval); switch (offset) { - case GEM_ISR: + case R_ISR: DB_PRINT("lowering irqs on ISR read\n"); /* The interrupts get updated at the end of the function. */ break; - case GEM_PHYMNTNC: - if (retval & GEM_PHYMNTNC_OP_R) { - uint32_t phy_addr, reg_num; - - phy_addr = (retval & GEM_PHYMNTNC_ADDR) >> GEM_PHYMNTNC_ADDR_SHFT; - if (phy_addr == s->phy_addr) { - reg_num = (retval & GEM_PHYMNTNC_REG) >> GEM_PHYMNTNC_REG_SHIFT; - retval &= 0xFFFF0000; - retval |= gem_phy_read(s, reg_num); - } else { - retval |= 0xFFFF; /* No device at this address */ - } - } - break; } /* Squash read to clear bits */ @@ -1495,16 +1613,16 @@ static void gem_write(void *opaque, hwaddr offset, uint64_t val, /* Handle register write side effects */ switch (offset) { - case GEM_NWCTRL: - if (val & GEM_NWCTRL_RXENA) { + case R_NWCTRL: + if (FIELD_EX32(val, NWCTRL, ENABLE_RECEIVE)) { for (i = 0; i < s->num_priority_queues; ++i) { gem_get_rx_desc(s, i); } } - if (val & GEM_NWCTRL_TXSTART) { + if (FIELD_EX32(val, NWCTRL, TRANSMIT_START)) { gem_transmit(s); } - if (!(val & GEM_NWCTRL_TXENA)) { + if (!(FIELD_EX32(val, NWCTRL, ENABLE_TRANSMIT))) { /* Reset to start of Q when transmit disabled. */ for (i = 0; i < s->num_priority_queues; i++) { s->tx_desc_addr[i] = gem_get_tx_queue_base_addr(s, i); @@ -1515,65 +1633,57 @@ static void gem_write(void *opaque, hwaddr offset, uint64_t val, } break; - case GEM_TXSTATUS: + case R_TXSTATUS: gem_update_int_status(s); break; - case GEM_RXQBASE: + case R_RXQBASE: s->rx_desc_addr[0] = val; break; - case GEM_RECEIVE_Q1_PTR ... GEM_RECEIVE_Q7_PTR: - s->rx_desc_addr[offset - GEM_RECEIVE_Q1_PTR + 1] = val; + case R_RECEIVE_Q1_PTR ... R_RECEIVE_Q7_PTR: + s->rx_desc_addr[offset - R_RECEIVE_Q1_PTR + 1] = val; break; - case GEM_TXQBASE: + case R_TXQBASE: s->tx_desc_addr[0] = val; break; - case GEM_TRANSMIT_Q1_PTR ... GEM_TRANSMIT_Q7_PTR: - s->tx_desc_addr[offset - GEM_TRANSMIT_Q1_PTR + 1] = val; + case R_TRANSMIT_Q1_PTR ... R_TRANSMIT_Q7_PTR: + s->tx_desc_addr[offset - R_TRANSMIT_Q1_PTR + 1] = val; break; - case GEM_RXSTATUS: + case R_RXSTATUS: gem_update_int_status(s); break; - case GEM_IER: - s->regs[GEM_IMR] &= ~val; + case R_IER: + s->regs[R_IMR] &= ~val; gem_update_int_status(s); break; - case GEM_JUMBO_MAX_LEN: - s->regs[GEM_JUMBO_MAX_LEN] = val & MAX_JUMBO_FRAME_SIZE_MASK; + case R_JUMBO_MAX_LEN: + s->regs[R_JUMBO_MAX_LEN] = val & MAX_JUMBO_FRAME_SIZE_MASK; break; - case GEM_INT_Q1_ENABLE ... GEM_INT_Q7_ENABLE: - s->regs[GEM_INT_Q1_MASK + offset - GEM_INT_Q1_ENABLE] &= ~val; + case R_INT_Q1_ENABLE ... R_INT_Q7_ENABLE: + s->regs[R_INT_Q1_MASK + offset - R_INT_Q1_ENABLE] &= ~val; gem_update_int_status(s); break; - case GEM_IDR: - s->regs[GEM_IMR] |= val; + case R_IDR: + s->regs[R_IMR] |= val; gem_update_int_status(s); break; - case GEM_INT_Q1_DISABLE ... GEM_INT_Q7_DISABLE: - s->regs[GEM_INT_Q1_MASK + offset - GEM_INT_Q1_DISABLE] |= val; + case R_INT_Q1_DISABLE ... R_INT_Q7_DISABLE: + s->regs[R_INT_Q1_MASK + offset - R_INT_Q1_DISABLE] |= val; gem_update_int_status(s); break; - case GEM_SPADDR1LO: - case GEM_SPADDR2LO: - case GEM_SPADDR3LO: - case GEM_SPADDR4LO: - s->sar_active[(offset - GEM_SPADDR1LO) / 2] = false; + case R_SPADDR1LO: + case R_SPADDR2LO: + case R_SPADDR3LO: + case R_SPADDR4LO: + s->sar_active[(offset - R_SPADDR1LO) / 2] = false; break; - case GEM_SPADDR1HI: - case GEM_SPADDR2HI: - case GEM_SPADDR3HI: - case GEM_SPADDR4HI: - s->sar_active[(offset - GEM_SPADDR1HI) / 2] = true; + case R_SPADDR1HI: + case R_SPADDR2HI: + case R_SPADDR3HI: + case R_SPADDR4HI: + s->sar_active[(offset - R_SPADDR1HI) / 2] = true; break; - case GEM_PHYMNTNC: - if (val & GEM_PHYMNTNC_OP_W) { - uint32_t phy_addr, reg_num; - - phy_addr = (val & GEM_PHYMNTNC_ADDR) >> GEM_PHYMNTNC_ADDR_SHFT; - if (phy_addr == s->phy_addr) { - reg_num = (val & GEM_PHYMNTNC_REG) >> GEM_PHYMNTNC_REG_SHIFT; - gem_phy_write(s, reg_num, val); - } - } + case R_PHYMNTNC: + gem_handle_phy_access(s); break; } diff --git a/hw/pcmcia/pxa2xx.c b/hw/pcmcia/pxa2xx.c index fcca7e571b..e3111fdf1a 100644 --- a/hw/pcmcia/pxa2xx.c +++ b/hw/pcmcia/pxa2xx.c @@ -138,21 +138,6 @@ static void pxa2xx_pcmcia_set_irq(void *opaque, int line, int level) qemu_set_irq(s->irq, level); } -PXA2xxPCMCIAState *pxa2xx_pcmcia_init(MemoryRegion *sysmem, - hwaddr base) -{ - DeviceState *dev; - PXA2xxPCMCIAState *s; - - dev = qdev_new(TYPE_PXA2XX_PCMCIA); - sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); - s = PXA2XX_PCMCIA(dev); - - sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - - return s; -} - static void pxa2xx_pcmcia_initfn(Object *obj) { SysBusDevice *sbd = SYS_BUS_DEVICE(obj); diff --git a/hw/sd/pxa2xx_mmci.c b/hw/sd/pxa2xx_mmci.c index 124fbf8bbd..4749e935d8 100644 --- a/hw/sd/pxa2xx_mmci.c +++ b/hw/sd/pxa2xx_mmci.c @@ -479,15 +479,10 @@ PXA2xxMMCIState *pxa2xx_mmci_init(MemoryRegion *sysmem, qemu_irq irq, qemu_irq rx_dma, qemu_irq tx_dma) { DeviceState *dev; - SysBusDevice *sbd; - dev = qdev_new(TYPE_PXA2XX_MMCI); - sbd = SYS_BUS_DEVICE(dev); - sysbus_mmio_map(sbd, 0, base); - sysbus_connect_irq(sbd, 0, irq); + dev = sysbus_create_simple(TYPE_PXA2XX_MMCI, base, irq); qdev_connect_gpio_out_named(dev, "rx-dma", 0, rx_dma); qdev_connect_gpio_out_named(dev, "tx-dma", 0, tx_dma); - sysbus_realize_and_unref(sbd, &error_fatal); return PXA2XX_MMCI(dev); } diff --git a/hw/ufs/lu.c b/hw/ufs/lu.c index 13b5e37b53..81bfff9b4e 100644 --- a/hw/ufs/lu.c +++ b/hw/ufs/lu.c @@ -19,57 +19,117 @@ #include "trace.h" #include "ufs.h" -/* - * The code below handling SCSI commands is copied from hw/scsi/scsi-disk.c, - * with minor adjustments to make it work for UFS. - */ +#define SCSI_COMMAND_FAIL (-1) -#define SCSI_DMA_BUF_SIZE (128 * KiB) -#define SCSI_MAX_INQUIRY_LEN 256 -#define SCSI_INQUIRY_DATA_SIZE 36 -#define SCSI_MAX_MODE_LEN 256 - -typedef struct UfsSCSIReq { - SCSIRequest req; - /* Both sector and sector_count are in terms of BDRV_SECTOR_SIZE bytes. */ - uint64_t sector; - uint32_t sector_count; - uint32_t buflen; - bool started; - bool need_fua_emulation; - struct iovec iov; - QEMUIOVector qiov; - BlockAcctCookie acct; -} UfsSCSIReq; - -static void ufs_scsi_free_request(SCSIRequest *req) +static void ufs_build_upiu_sense_data(UfsRequest *req, uint8_t *sense, + uint32_t sense_len) { - UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req); + req->rsp_upiu.sr.sense_data_len = cpu_to_be16(sense_len); + assert(sense_len <= SCSI_SENSE_LEN); + memcpy(req->rsp_upiu.sr.sense_data, sense, sense_len); +} + +static void ufs_build_scsi_response_upiu(UfsRequest *req, uint8_t *sense, + uint32_t sense_len, + uint32_t transfered_len, + int16_t status) +{ + uint32_t expected_len = be32_to_cpu(req->req_upiu.sc.exp_data_transfer_len); + uint8_t flags = 0, response = UFS_COMMAND_RESULT_SUCCESS; + uint16_t data_segment_length; + + if (expected_len > transfered_len) { + req->rsp_upiu.sr.residual_transfer_count = + cpu_to_be32(expected_len - transfered_len); + flags |= UFS_UPIU_FLAG_UNDERFLOW; + } else if (expected_len < transfered_len) { + req->rsp_upiu.sr.residual_transfer_count = + cpu_to_be32(transfered_len - expected_len); + flags |= UFS_UPIU_FLAG_OVERFLOW; + } - qemu_vfree(r->iov.iov_base); + if (status != 0) { + ufs_build_upiu_sense_data(req, sense, sense_len); + response = UFS_COMMAND_RESULT_FAIL; + } + + data_segment_length = + cpu_to_be16(sense_len + sizeof(req->rsp_upiu.sr.sense_data_len)); + ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_RESPONSE, flags, response, + status, data_segment_length); } -static void scsi_check_condition(UfsSCSIReq *r, SCSISense sense) +static void ufs_scsi_command_complete(SCSIRequest *scsi_req, size_t resid) { - trace_ufs_scsi_check_condition(r->req.tag, sense.key, sense.asc, - sense.ascq); - scsi_req_build_sense(&r->req, sense); - scsi_req_complete(&r->req, CHECK_CONDITION); + UfsRequest *req = scsi_req->hba_private; + int16_t status = scsi_req->status; + + uint32_t transfered_len = scsi_req->cmd.xfer - resid; + + ufs_build_scsi_response_upiu(req, scsi_req->sense, scsi_req->sense_len, + transfered_len, status); + + ufs_complete_req(req, UFS_REQUEST_SUCCESS); + + scsi_req->hba_private = NULL; + scsi_req_unref(scsi_req); } -static int ufs_scsi_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf, - uint32_t outbuf_len) +static QEMUSGList *ufs_get_sg_list(SCSIRequest *scsi_req) { - UfsHc *u = UFS(req->bus->qbus.parent); - UfsLu *lu = DO_UPCAST(UfsLu, qdev, req->dev); - uint8_t page_code = req->cmd.buf[2]; - int start, buflen = 0; + UfsRequest *req = scsi_req->hba_private; + return req->sg; +} + +static const struct SCSIBusInfo ufs_scsi_info = { + .tcq = true, + .max_target = 0, + .max_lun = UFS_MAX_LUS, + .max_channel = 0, + + .get_sg_list = ufs_get_sg_list, + .complete = ufs_scsi_command_complete, +}; + +static int ufs_emulate_report_luns(UfsRequest *req, uint8_t *outbuf, + uint32_t outbuf_len) +{ + UfsHc *u = req->hc; + int len = 0; - if (outbuf_len < SCSI_INQUIRY_DATA_SIZE) { - return -1; + /* TODO: Support for cases where SELECT REPORT is 1 and 2 */ + if (req->req_upiu.sc.cdb[2] != 0) { + return SCSI_COMMAND_FAIL; } - outbuf[buflen++] = lu->qdev.type & 0x1f; + len += 8; + + for (uint8_t lun = 0; lun < UFS_MAX_LUS; ++lun) { + if (u->lus[lun]) { + if (len + 8 > outbuf_len) { + break; + } + + memset(outbuf + len, 0, 8); + outbuf[len] = 0; + outbuf[len + 1] = lun; + len += 8; + } + } + + /* store the LUN list length */ + stl_be_p(outbuf, len - 8); + + return len; +} + +static int ufs_scsi_emulate_vpd_page(UfsRequest *req, uint8_t *outbuf, + uint32_t outbuf_len) +{ + uint8_t page_code = req->req_upiu.sc.cdb[2]; + int start, buflen = 0; + + outbuf[buflen++] = TYPE_WLUN; outbuf[buflen++] = page_code; outbuf[buflen++] = 0x00; outbuf[buflen++] = 0x00; @@ -78,36 +138,12 @@ static int ufs_scsi_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf, switch (page_code) { case 0x00: /* Supported page codes, mandatory */ { - trace_ufs_scsi_emulate_vpd_page_00(req->cmd.xfer); outbuf[buflen++] = 0x00; /* list of supported pages (this page) */ - if (u->params.serial) { - outbuf[buflen++] = 0x80; /* unit serial number */ - } outbuf[buflen++] = 0x87; /* mode page policy */ break; } - case 0x80: /* Device serial number, optional */ - { - int l; - - if (!u->params.serial) { - trace_ufs_scsi_emulate_vpd_page_80_not_supported(); - return -1; - } - - l = strlen(u->params.serial); - if (l > SCSI_INQUIRY_DATA_SIZE) { - l = SCSI_INQUIRY_DATA_SIZE; - } - - trace_ufs_scsi_emulate_vpd_page_80(req->cmd.xfer); - memcpy(outbuf + buflen, u->params.serial, l); - buflen += l; - break; - } case 0x87: /* Mode Page Policy, mandatory */ { - trace_ufs_scsi_emulate_vpd_page_87(req->cmd.xfer); outbuf[buflen++] = 0x3f; /* apply to all mode pages and subpages */ outbuf[buflen++] = 0xff; outbuf[buflen++] = 0; /* shared */ @@ -115,7 +151,7 @@ static int ufs_scsi_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf, break; } default: - return -1; + return SCSI_COMMAND_FAIL; } /* done with EVPD */ assert(buflen - start <= 255); @@ -123,1150 +159,130 @@ static int ufs_scsi_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf, return buflen; } -static int ufs_scsi_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf, +static int ufs_emulate_wlun_inquiry(UfsRequest *req, uint8_t *outbuf, uint32_t outbuf_len) { - int buflen = 0; - - if (outbuf_len < SCSI_INQUIRY_DATA_SIZE) { - return -1; + if (outbuf_len < SCSI_INQUIRY_LEN) { + return 0; } - if (req->cmd.buf[1] & 0x1) { + if (req->req_upiu.sc.cdb[1] & 0x1) { /* Vital product data */ return ufs_scsi_emulate_vpd_page(req, outbuf, outbuf_len); } /* Standard INQUIRY data */ - if (req->cmd.buf[2] != 0) { - return -1; + if (req->req_upiu.sc.cdb[2] != 0) { + return SCSI_COMMAND_FAIL; } - /* PAGE CODE == 0 */ - buflen = req->cmd.xfer; - if (buflen > SCSI_MAX_INQUIRY_LEN) { - buflen = SCSI_MAX_INQUIRY_LEN; - } - - if (is_wlun(req->lun)) { - outbuf[0] = TYPE_WLUN; - } else { - outbuf[0] = 0; - } + outbuf[0] = TYPE_WLUN; outbuf[1] = 0; - - strpadcpy((char *)&outbuf[16], 16, "QEMU UFS", ' '); + outbuf[2] = 0x6; /* SPC-4 */ + outbuf[3] = 0x2; + outbuf[4] = 31; + outbuf[5] = 0; + outbuf[6] = 0; + outbuf[7] = 0x2; strpadcpy((char *)&outbuf[8], 8, "QEMU", ' '); - + strpadcpy((char *)&outbuf[16], 16, "QEMU UFS", ' '); memset(&outbuf[32], 0, 4); - outbuf[2] = 0x06; /* SPC-4 */ - outbuf[3] = 0x2; - - if (buflen > SCSI_INQUIRY_DATA_SIZE) { - outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ - } else { - /* - * If the allocation length of CDB is too small, the additional - * length is not adjusted - */ - outbuf[4] = SCSI_INQUIRY_DATA_SIZE - 5; - } - - /* Support TCQ. */ - outbuf[7] = req->bus->info->tcq ? 0x02 : 0; - return buflen; -} - -static int mode_sense_page(UfsLu *lu, int page, uint8_t **p_outbuf, - int page_control) -{ - static const int mode_sense_valid[0x3f] = { - [MODE_PAGE_CACHING] = 1, - [MODE_PAGE_R_W_ERROR] = 1, - [MODE_PAGE_CONTROL] = 1, - }; - - uint8_t *p = *p_outbuf + 2; - int length; - - assert(page < ARRAY_SIZE(mode_sense_valid)); - if ((mode_sense_valid[page]) == 0) { - return -1; - } - - /* - * If Changeable Values are requested, a mask denoting those mode parameters - * that are changeable shall be returned. As we currently don't support - * parameter changes via MODE_SELECT all bits are returned set to zero. - * The buffer was already memset to zero by the caller of this function. - */ - switch (page) { - case MODE_PAGE_CACHING: - length = 0x12; - if (page_control == 1 || /* Changeable Values */ - blk_enable_write_cache(lu->qdev.conf.blk)) { - p[0] = 4; /* WCE */ - } - break; - - case MODE_PAGE_R_W_ERROR: - length = 10; - if (page_control == 1) { /* Changeable Values */ - break; - } - p[0] = 0x80; /* Automatic Write Reallocation Enabled */ - break; - - case MODE_PAGE_CONTROL: - length = 10; - if (page_control == 1) { /* Changeable Values */ - break; - } - p[1] = 0x10; /* Queue Algorithm modifier */ - p[8] = 0xff; /* Busy Timeout Period */ - p[9] = 0xff; - break; - - default: - return -1; - } - - assert(length < 256); - (*p_outbuf)[0] = page; - (*p_outbuf)[1] = length; - *p_outbuf += length + 2; - return length + 2; + return SCSI_INQUIRY_LEN; } -static int ufs_scsi_emulate_mode_sense(UfsSCSIReq *r, uint8_t *outbuf) +static UfsReqResult ufs_emulate_scsi_cmd(UfsLu *lu, UfsRequest *req) { - UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); - bool dbd; - int page, buflen, ret, page_control; - uint8_t *p; - uint8_t dev_specific_param = 0; - - dbd = (r->req.cmd.buf[1] & 0x8) != 0; - if (!dbd) { - return -1; - } + uint8_t lun = lu->lun; + uint8_t outbuf[4096]; + uint8_t sense_buf[UFS_SENSE_SIZE]; + uint8_t scsi_status; + int len = 0; - page = r->req.cmd.buf[2] & 0x3f; - page_control = (r->req.cmd.buf[2] & 0xc0) >> 6; - - trace_ufs_scsi_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 : - 10, - page, r->req.cmd.xfer, page_control); - memset(outbuf, 0, r->req.cmd.xfer); - p = outbuf; - - if (!blk_is_writable(lu->qdev.conf.blk)) { - dev_specific_param |= 0x80; /* Readonly. */ - } - - p[2] = 0; /* Medium type. */ - p[3] = dev_specific_param; - p[6] = p[7] = 0; /* Block descriptor length. */ - p += 8; - - if (page_control == 3) { - /* Saved Values */ - scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED)); - return -1; - } - - if (page == 0x3f) { - for (page = 0; page <= 0x3e; page++) { - mode_sense_page(lu, page, &p, page_control); - } - } else { - ret = mode_sense_page(lu, page, &p, page_control); - if (ret == -1) { - return -1; - } - } - - buflen = p - outbuf; - /* - * The mode data length field specifies the length in bytes of the - * following data that is available to be transferred. The mode data - * length does not include itself. - */ - outbuf[0] = ((buflen - 2) >> 8) & 0xff; - outbuf[1] = (buflen - 2) & 0xff; - return buflen; -} - -/* - * scsi_handle_rw_error has two return values. False means that the error - * must be ignored, true means that the error has been processed and the - * caller should not do anything else for this request. Note that - * scsi_handle_rw_error always manages its reference counts, independent - * of the return value. - */ -static bool scsi_handle_rw_error(UfsSCSIReq *r, int ret, bool acct_failed) -{ - bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV); - UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); - SCSISense sense = SENSE_CODE(NO_SENSE); - int error = 0; - bool req_has_sense = false; - BlockErrorAction action; - int status; - - if (ret < 0) { - status = scsi_sense_from_errno(-ret, &sense); - error = -ret; - } else { - /* A passthrough command has completed with nonzero status. */ - status = ret; - if (status == CHECK_CONDITION) { - req_has_sense = true; - error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); + switch (req->req_upiu.sc.cdb[0]) { + case REPORT_LUNS: + len = ufs_emulate_report_luns(req, outbuf, sizeof(outbuf)); + if (len == SCSI_COMMAND_FAIL) { + scsi_build_sense(sense_buf, SENSE_CODE(INVALID_FIELD)); + scsi_status = CHECK_CONDITION; } else { - error = EINVAL; + scsi_status = GOOD; } - } - - /* - * Check whether the error has to be handled by the guest or should - * rather follow the rerror=/werror= settings. Guest-handled errors - * are usually retried immediately, so do not post them to QMP and - * do not account them as failed I/O. - */ - if (req_has_sense && scsi_sense_buf_is_guest_recoverable( - r->req.sense, sizeof(r->req.sense))) { - action = BLOCK_ERROR_ACTION_REPORT; - acct_failed = false; - } else { - action = blk_get_error_action(lu->qdev.conf.blk, is_read, error); - blk_error_action(lu->qdev.conf.blk, action, is_read, error); - } - - switch (action) { - case BLOCK_ERROR_ACTION_REPORT: - if (acct_failed) { - block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct); - } - if (!req_has_sense && status == CHECK_CONDITION) { - scsi_req_build_sense(&r->req, sense); - } - scsi_req_complete(&r->req, status); - return true; - - case BLOCK_ERROR_ACTION_IGNORE: - return false; - - case BLOCK_ERROR_ACTION_STOP: - scsi_req_retry(&r->req); - return true; - - default: - g_assert_not_reached(); - } -} - -static bool ufs_scsi_req_check_error(UfsSCSIReq *r, int ret, bool acct_failed) -{ - if (r->req.io_canceled) { - scsi_req_cancel_complete(&r->req); - return true; - } - - if (ret < 0) { - return scsi_handle_rw_error(r, ret, acct_failed); - } - - return false; -} - -static void scsi_aio_complete(void *opaque, int ret) -{ - UfsSCSIReq *r = (UfsSCSIReq *)opaque; - UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); - - assert(r->req.aiocb != NULL); - r->req.aiocb = NULL; - aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk)); - if (ufs_scsi_req_check_error(r, ret, true)) { - goto done; - } - - block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct); - scsi_req_complete(&r->req, GOOD); - -done: - aio_context_release(blk_get_aio_context(lu->qdev.conf.blk)); - scsi_req_unref(&r->req); -} - -static int32_t ufs_scsi_emulate_command(SCSIRequest *req, uint8_t *buf) -{ - UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req); - UfsLu *lu = DO_UPCAST(UfsLu, qdev, req->dev); - uint32_t last_block = 0; - uint8_t *outbuf; - int buflen; - - switch (req->cmd.buf[0]) { - case INQUIRY: - case MODE_SENSE_10: - case START_STOP: - case REQUEST_SENSE: - break; - - default: - if (!blk_is_available(lu->qdev.conf.blk)) { - scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); - return 0; - } - break; - } - - /* - * FIXME: we shouldn't return anything bigger than 4k, but the code - * requires the buffer to be as big as req->cmd.xfer in several - * places. So, do not allow CDBs with a very large ALLOCATION - * LENGTH. The real fix would be to modify scsi_read_data and - * dma_buf_read, so that they return data beyond the buflen - * as all zeros. - */ - if (req->cmd.xfer > 65536) { - goto illegal_request; - } - r->buflen = MAX(4096, req->cmd.xfer); - - if (!r->iov.iov_base) { - r->iov.iov_base = blk_blockalign(lu->qdev.conf.blk, r->buflen); - } - - outbuf = r->iov.iov_base; - memset(outbuf, 0, r->buflen); - switch (req->cmd.buf[0]) { - case TEST_UNIT_READY: - assert(blk_is_available(lu->qdev.conf.blk)); break; case INQUIRY: - buflen = ufs_scsi_emulate_inquiry(req, outbuf, r->buflen); - if (buflen < 0) { - goto illegal_request; - } - break; - case MODE_SENSE_10: - buflen = ufs_scsi_emulate_mode_sense(r, outbuf); - if (buflen < 0) { - goto illegal_request; + len = ufs_emulate_wlun_inquiry(req, outbuf, sizeof(outbuf)); + if (len == SCSI_COMMAND_FAIL) { + scsi_build_sense(sense_buf, SENSE_CODE(INVALID_FIELD)); + scsi_status = CHECK_CONDITION; + } else { + scsi_status = GOOD; } break; - case READ_CAPACITY_10: - /* The normal LEN field for this command is zero. */ - memset(outbuf, 0, 8); - if (lu->qdev.max_lba > 0) { - last_block = lu->qdev.max_lba - 1; - }; - outbuf[0] = (last_block >> 24) & 0xff; - outbuf[1] = (last_block >> 16) & 0xff; - outbuf[2] = (last_block >> 8) & 0xff; - outbuf[3] = last_block & 0xff; - outbuf[4] = (lu->qdev.blocksize >> 24) & 0xff; - outbuf[5] = (lu->qdev.blocksize >> 16) & 0xff; - outbuf[6] = (lu->qdev.blocksize >> 8) & 0xff; - outbuf[7] = lu->qdev.blocksize & 0xff; - break; case REQUEST_SENSE: - /* Just return "NO SENSE". */ - buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen, - (req->cmd.buf[1] & 1) == 0); - if (buflen < 0) { - goto illegal_request; - } - break; - case SYNCHRONIZE_CACHE: - /* The request is used as the AIO opaque value, so add a ref. */ - scsi_req_ref(&r->req); - block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, 0, - BLOCK_ACCT_FLUSH); - r->req.aiocb = blk_aio_flush(lu->qdev.conf.blk, scsi_aio_complete, r); - return 0; - case VERIFY_10: - trace_ufs_scsi_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3); - if (req->cmd.buf[1] & 6) { - goto illegal_request; - } - break; - case SERVICE_ACTION_IN_16: - /* Service Action In subcommands. */ - if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { - trace_ufs_scsi_emulate_command_SAI_16(); - memset(outbuf, 0, req->cmd.xfer); - - if (lu->qdev.max_lba > 0) { - last_block = lu->qdev.max_lba - 1; - }; - outbuf[0] = 0; - outbuf[1] = 0; - outbuf[2] = 0; - outbuf[3] = 0; - outbuf[4] = (last_block >> 24) & 0xff; - outbuf[5] = (last_block >> 16) & 0xff; - outbuf[6] = (last_block >> 8) & 0xff; - outbuf[7] = last_block & 0xff; - outbuf[8] = (lu->qdev.blocksize >> 24) & 0xff; - outbuf[9] = (lu->qdev.blocksize >> 16) & 0xff; - outbuf[10] = (lu->qdev.blocksize >> 8) & 0xff; - outbuf[11] = lu->qdev.blocksize & 0xff; - outbuf[12] = 0; - outbuf[13] = get_physical_block_exp(&lu->qdev.conf); - - if (lu->unit_desc.provisioning_type == 2 || - lu->unit_desc.provisioning_type == 3) { - outbuf[14] = 0x80; - } - /* Protection, exponent and lowest lba field left blank. */ - break; - } - trace_ufs_scsi_emulate_command_SAI_unsupported(); - goto illegal_request; - case MODE_SELECT_10: - trace_ufs_scsi_emulate_command_MODE_SELECT_10(r->req.cmd.xfer); + /* Just return no sense data */ + len = scsi_build_sense_buf(outbuf, sizeof(outbuf), SENSE_CODE(NO_SENSE), + true); + scsi_status = GOOD; break; case START_STOP: - /* - * TODO: START_STOP is not yet implemented. It always returns success. - * Revisit it when ufs power management is implemented. - */ - trace_ufs_scsi_emulate_command_START_STOP(); - break; - case FORMAT_UNIT: - trace_ufs_scsi_emulate_command_FORMAT_UNIT(); - break; - case SEND_DIAGNOSTIC: - trace_ufs_scsi_emulate_command_SEND_DIAGNOSTIC(); - break; - default: - trace_ufs_scsi_emulate_command_UNKNOWN(buf[0], - scsi_command_name(buf[0])); - scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); - return 0; - } - assert(!r->req.aiocb); - r->iov.iov_len = MIN(r->buflen, req->cmd.xfer); - if (r->iov.iov_len == 0) { - scsi_req_complete(&r->req, GOOD); - } - if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { - assert(r->iov.iov_len == req->cmd.xfer); - return -r->iov.iov_len; - } else { - return r->iov.iov_len; - } - -illegal_request: - if (r->req.status == -1) { - scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); - } - return 0; -} - -static void ufs_scsi_emulate_read_data(SCSIRequest *req) -{ - UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req); - int buflen = r->iov.iov_len; - - if (buflen) { - trace_ufs_scsi_emulate_read_data(buflen); - r->iov.iov_len = 0; - r->started = true; - scsi_req_data(&r->req, buflen); - return; - } - - /* This also clears the sense buffer for REQUEST SENSE. */ - scsi_req_complete(&r->req, GOOD); -} - -static int ufs_scsi_check_mode_select(UfsLu *lu, int page, uint8_t *inbuf, - int inlen) -{ - uint8_t mode_current[SCSI_MAX_MODE_LEN]; - uint8_t mode_changeable[SCSI_MAX_MODE_LEN]; - uint8_t *p; - int len, expected_len, changeable_len, i; - - /* - * The input buffer does not include the page header, so it is - * off by 2 bytes. - */ - expected_len = inlen + 2; - if (expected_len > SCSI_MAX_MODE_LEN) { - return -1; - } - - /* MODE_PAGE_ALLS is only valid for MODE SENSE commands */ - if (page == MODE_PAGE_ALLS) { - return -1; - } - - p = mode_current; - memset(mode_current, 0, inlen + 2); - len = mode_sense_page(lu, page, &p, 0); - if (len < 0 || len != expected_len) { - return -1; - } - - p = mode_changeable; - memset(mode_changeable, 0, inlen + 2); - changeable_len = mode_sense_page(lu, page, &p, 1); - assert(changeable_len == len); - - /* - * Check that unchangeable bits are the same as what MODE SENSE - * would return. - */ - for (i = 2; i < len; i++) { - if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) { - return -1; + /* TODO: Revisit it when Power Management is implemented */ + if (lun == UFS_UPIU_UFS_DEVICE_WLUN) { + scsi_status = GOOD; + break; } - } - return 0; -} - -static void ufs_scsi_apply_mode_select(UfsLu *lu, int page, uint8_t *p) -{ - switch (page) { - case MODE_PAGE_CACHING: - blk_set_enable_write_cache(lu->qdev.conf.blk, (p[0] & 4) != 0); - break; - + /* fallthrough */ default: - break; + scsi_build_sense(sense_buf, SENSE_CODE(INVALID_OPCODE)); + scsi_status = CHECK_CONDITION; } -} - -static int mode_select_pages(UfsSCSIReq *r, uint8_t *p, int len, bool change) -{ - UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); - - while (len > 0) { - int page, page_len; - page = p[0] & 0x3f; - if (p[0] & 0x40) { - goto invalid_param; - } else { - if (len < 2) { - goto invalid_param_len; - } - page_len = p[1]; - p += 2; - len -= 2; - } - - if (page_len > len) { - goto invalid_param_len; - } - - if (!change) { - if (ufs_scsi_check_mode_select(lu, page, p, page_len) < 0) { - goto invalid_param; - } - } else { - ufs_scsi_apply_mode_select(lu, page, p); - } - - p += page_len; - len -= page_len; + len = MIN(len, (int)req->data_len); + if (scsi_status == GOOD && len > 0 && + dma_buf_read(outbuf, len, NULL, req->sg, MEMTXATTRS_UNSPECIFIED) != + MEMTX_OK) { + return UFS_REQUEST_FAIL; } - return 0; - -invalid_param: - scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); - return -1; -invalid_param_len: - scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); - return -1; + ufs_build_scsi_response_upiu(req, sense_buf, sizeof(sense_buf), len, + scsi_status); + return UFS_REQUEST_SUCCESS; } -static void ufs_scsi_emulate_mode_select(UfsSCSIReq *r, uint8_t *inbuf) +static UfsReqResult ufs_process_scsi_cmd(UfsLu *lu, UfsRequest *req) { - UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); - uint8_t *p = inbuf; - int len = r->req.cmd.xfer; - int hdr_len = 8; - int bd_len; - int pass; - - /* We only support PF=1, SP=0. */ - if ((r->req.cmd.buf[1] & 0x11) != 0x10) { - goto invalid_field; - } - - if (len < hdr_len) { - goto invalid_param_len; - } - - bd_len = lduw_be_p(&p[6]); - if (bd_len != 0) { - goto invalid_param; - } - - len -= hdr_len; - p += hdr_len; - - /* Ensure no change is made if there is an error! */ - for (pass = 0; pass < 2; pass++) { - if (mode_select_pages(r, p, len, pass == 1) < 0) { - assert(pass == 0); - return; - } - } - - if (!blk_enable_write_cache(lu->qdev.conf.blk)) { - /* The request is used as the AIO opaque value, so add a ref. */ - scsi_req_ref(&r->req); - block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, 0, - BLOCK_ACCT_FLUSH); - r->req.aiocb = blk_aio_flush(lu->qdev.conf.blk, scsi_aio_complete, r); - return; - } - - scsi_req_complete(&r->req, GOOD); - return; - -invalid_param: - scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); - return; + uint8_t task_tag = req->req_upiu.header.task_tag; -invalid_param_len: - scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); - return; - -invalid_field: - scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); -} - -/* block_num and nb_blocks expected to be in qdev blocksize */ -static inline bool check_lba_range(UfsLu *lu, uint64_t block_num, - uint32_t nb_blocks) -{ /* - * The first line tests that no overflow happens when computing the last - * block. The second line tests that the last accessed block is in - * range. - * - * Careful, the computations should not underflow for nb_blocks == 0, - * and a 0-block read to the first LBA beyond the end of device is - * valid. + * Each ufs-lu has its own independent virtual SCSI bus. Therefore, we can't + * use scsi_target_emulate_report_luns() which gets all lu information over + * the SCSI bus. Therefore, we use ufs_emulate_scsi_cmd() like the + * well-known lu. */ - return (block_num <= block_num + nb_blocks && - block_num + nb_blocks <= lu->qdev.max_lba + 1); -} - -static void ufs_scsi_emulate_write_data(SCSIRequest *req) -{ - UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req); - - if (r->iov.iov_len) { - int buflen = r->iov.iov_len; - trace_ufs_scsi_emulate_write_data(buflen); - r->iov.iov_len = 0; - scsi_req_data(&r->req, buflen); - return; - } - - switch (req->cmd.buf[0]) { - case MODE_SELECT_10: - /* This also clears the sense buffer for REQUEST SENSE. */ - ufs_scsi_emulate_mode_select(r, r->iov.iov_base); - break; - default: - abort(); - } -} - -/* Return a pointer to the data buffer. */ -static uint8_t *ufs_scsi_get_buf(SCSIRequest *req) -{ - UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req); - - return (uint8_t *)r->iov.iov_base; -} - -static int32_t ufs_scsi_dma_command(SCSIRequest *req, uint8_t *buf) -{ - UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req); - UfsLu *lu = DO_UPCAST(UfsLu, qdev, req->dev); - uint32_t len; - uint8_t command; - - command = buf[0]; - - if (!blk_is_available(lu->qdev.conf.blk)) { - scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); - return 0; - } - - len = scsi_data_cdb_xfer(r->req.cmd.buf); - switch (command) { - case READ_6: - case READ_10: - trace_ufs_scsi_dma_command_READ(r->req.cmd.lba, len); - if (r->req.cmd.buf[1] & 0xe0) { - goto illegal_request; - } - if (!check_lba_range(lu, r->req.cmd.lba, len)) { - goto illegal_lba; - } - r->sector = r->req.cmd.lba * (lu->qdev.blocksize / BDRV_SECTOR_SIZE); - r->sector_count = len * (lu->qdev.blocksize / BDRV_SECTOR_SIZE); - break; - case WRITE_6: - case WRITE_10: - trace_ufs_scsi_dma_command_WRITE(r->req.cmd.lba, len); - if (!blk_is_writable(lu->qdev.conf.blk)) { - scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); - return 0; - } - if (r->req.cmd.buf[1] & 0xe0) { - goto illegal_request; - } - if (!check_lba_range(lu, r->req.cmd.lba, len)) { - goto illegal_lba; - } - r->sector = r->req.cmd.lba * (lu->qdev.blocksize / BDRV_SECTOR_SIZE); - r->sector_count = len * (lu->qdev.blocksize / BDRV_SECTOR_SIZE); - break; - default: - abort(); - illegal_request: - scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); - return 0; - illegal_lba: - scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); - return 0; - } - r->need_fua_emulation = ((r->req.cmd.buf[1] & 8) != 0); - if (r->sector_count == 0) { - scsi_req_complete(&r->req, GOOD); - } - assert(r->iov.iov_len == 0); - if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { - return -r->sector_count * BDRV_SECTOR_SIZE; - } else { - return r->sector_count * BDRV_SECTOR_SIZE; - } -} - -static void scsi_write_do_fua(UfsSCSIReq *r) -{ - UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); - - assert(r->req.aiocb == NULL); - assert(!r->req.io_canceled); - - if (r->need_fua_emulation) { - block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, 0, - BLOCK_ACCT_FLUSH); - r->req.aiocb = blk_aio_flush(lu->qdev.conf.blk, scsi_aio_complete, r); - return; - } - - scsi_req_complete(&r->req, GOOD); - scsi_req_unref(&r->req); -} - -static void scsi_dma_complete_noio(UfsSCSIReq *r, int ret) -{ - assert(r->req.aiocb == NULL); - if (ufs_scsi_req_check_error(r, ret, false)) { - goto done; - } - - r->sector += r->sector_count; - r->sector_count = 0; - if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { - scsi_write_do_fua(r); - return; - } else { - scsi_req_complete(&r->req, GOOD); - } - -done: - scsi_req_unref(&r->req); -} - -static void scsi_dma_complete(void *opaque, int ret) -{ - UfsSCSIReq *r = (UfsSCSIReq *)opaque; - UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); - - assert(r->req.aiocb != NULL); - r->req.aiocb = NULL; - - aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk)); - if (ret < 0) { - block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct); - } else { - block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct); - } - scsi_dma_complete_noio(r, ret); - aio_context_release(blk_get_aio_context(lu->qdev.conf.blk)); -} - -static BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov, - BlockCompletionFunc *cb, void *cb_opaque, - void *opaque) -{ - UfsSCSIReq *r = opaque; - UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); - return blk_aio_preadv(lu->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); -} - -static void scsi_init_iovec(UfsSCSIReq *r, size_t size) -{ - UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); - - if (!r->iov.iov_base) { - r->buflen = size; - r->iov.iov_base = blk_blockalign(lu->qdev.conf.blk, r->buflen); + if (req->req_upiu.sc.cdb[0] == REPORT_LUNS) { + return ufs_emulate_scsi_cmd(lu, req); } - r->iov.iov_len = MIN(r->sector_count * BDRV_SECTOR_SIZE, r->buflen); - qemu_iovec_init_external(&r->qiov, &r->iov, 1); -} -static void scsi_read_complete_noio(UfsSCSIReq *r, int ret) -{ - uint32_t n; + SCSIRequest *scsi_req = + scsi_req_new(lu->scsi_dev, task_tag, lu->lun, req->req_upiu.sc.cdb, + UFS_CDB_SIZE, req); - assert(r->req.aiocb == NULL); - if (ufs_scsi_req_check_error(r, ret, false)) { - goto done; + uint32_t len = scsi_req_enqueue(scsi_req); + if (len) { + scsi_req_continue(scsi_req); } - n = r->qiov.size / BDRV_SECTOR_SIZE; - r->sector += n; - r->sector_count -= n; - scsi_req_data(&r->req, r->qiov.size); - -done: - scsi_req_unref(&r->req); -} - -static void scsi_read_complete(void *opaque, int ret) -{ - UfsSCSIReq *r = (UfsSCSIReq *)opaque; - UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); - - assert(r->req.aiocb != NULL); - r->req.aiocb = NULL; - trace_ufs_scsi_read_data_count(r->sector_count); - aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk)); - if (ret < 0) { - block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct); - } else { - block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct); - trace_ufs_scsi_read_complete(r->req.tag, r->qiov.size); - } - scsi_read_complete_noio(r, ret); - aio_context_release(blk_get_aio_context(lu->qdev.conf.blk)); -} - -/* Actually issue a read to the block device. */ -static void scsi_do_read(UfsSCSIReq *r, int ret) -{ - UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); - - assert(r->req.aiocb == NULL); - if (ufs_scsi_req_check_error(r, ret, false)) { - goto done; - } - - /* The request is used as the AIO opaque value, so add a ref. */ - scsi_req_ref(&r->req); - - if (r->req.sg) { - dma_acct_start(lu->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); - r->req.residual -= r->req.sg->size; - r->req.aiocb = dma_blk_io( - blk_get_aio_context(lu->qdev.conf.blk), r->req.sg, - r->sector << BDRV_SECTOR_BITS, BDRV_SECTOR_SIZE, scsi_dma_readv, r, - scsi_dma_complete, r, DMA_DIRECTION_FROM_DEVICE); - } else { - scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); - block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, - r->qiov.size, BLOCK_ACCT_READ); - r->req.aiocb = scsi_dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov, - scsi_read_complete, r, r); - } - -done: - scsi_req_unref(&r->req); -} - -static void scsi_do_read_cb(void *opaque, int ret) -{ - UfsSCSIReq *r = (UfsSCSIReq *)opaque; - UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); - - assert(r->req.aiocb != NULL); - r->req.aiocb = NULL; - - aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk)); - if (ret < 0) { - block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct); - } else { - block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct); - } - scsi_do_read(opaque, ret); - aio_context_release(blk_get_aio_context(lu->qdev.conf.blk)); -} - -/* Read more data from scsi device into buffer. */ -static void scsi_read_data(SCSIRequest *req) -{ - UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req); - UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); - bool first; - - trace_ufs_scsi_read_data_count(r->sector_count); - if (r->sector_count == 0) { - /* This also clears the sense buffer for REQUEST SENSE. */ - scsi_req_complete(&r->req, GOOD); - return; - } - - /* No data transfer may already be in progress */ - assert(r->req.aiocb == NULL); - - /* The request is used as the AIO opaque value, so add a ref. */ - scsi_req_ref(&r->req); - if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { - trace_ufs_scsi_read_data_invalid(); - scsi_read_complete_noio(r, -EINVAL); - return; - } - - if (!blk_is_available(req->dev->conf.blk)) { - scsi_read_complete_noio(r, -ENOMEDIUM); - return; - } - - first = !r->started; - r->started = true; - if (first && r->need_fua_emulation) { - block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, 0, - BLOCK_ACCT_FLUSH); - r->req.aiocb = blk_aio_flush(lu->qdev.conf.blk, scsi_do_read_cb, r); - } else { - scsi_do_read(r, 0); - } -} - -static void scsi_write_complete_noio(UfsSCSIReq *r, int ret) -{ - uint32_t n; - - assert(r->req.aiocb == NULL); - if (ufs_scsi_req_check_error(r, ret, false)) { - goto done; - } - - n = r->qiov.size / BDRV_SECTOR_SIZE; - r->sector += n; - r->sector_count -= n; - if (r->sector_count == 0) { - scsi_write_do_fua(r); - return; - } else { - scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); - trace_ufs_scsi_write_complete_noio(r->req.tag, r->qiov.size); - scsi_req_data(&r->req, r->qiov.size); - } - -done: - scsi_req_unref(&r->req); -} - -static void scsi_write_complete(void *opaque, int ret) -{ - UfsSCSIReq *r = (UfsSCSIReq *)opaque; - UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); - - assert(r->req.aiocb != NULL); - r->req.aiocb = NULL; - - aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk)); - if (ret < 0) { - block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct); - } else { - block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct); - } - scsi_write_complete_noio(r, ret); - aio_context_release(blk_get_aio_context(lu->qdev.conf.blk)); -} - -static BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov, - BlockCompletionFunc *cb, void *cb_opaque, - void *opaque) -{ - UfsSCSIReq *r = opaque; - UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); - return blk_aio_pwritev(lu->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); -} - -static void scsi_write_data(SCSIRequest *req) -{ - UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req); - UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); - - /* No data transfer may already be in progress */ - assert(r->req.aiocb == NULL); - - /* The request is used as the AIO opaque value, so add a ref. */ - scsi_req_ref(&r->req); - if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { - trace_ufs_scsi_write_data_invalid(); - scsi_write_complete_noio(r, -EINVAL); - return; - } - - if (!r->req.sg && !r->qiov.size) { - /* Called for the first time. Ask the driver to send us more data. */ - r->started = true; - scsi_write_complete_noio(r, 0); - return; - } - if (!blk_is_available(req->dev->conf.blk)) { - scsi_write_complete_noio(r, -ENOMEDIUM); - return; - } - - if (r->req.sg) { - dma_acct_start(lu->qdev.conf.blk, &r->acct, r->req.sg, - BLOCK_ACCT_WRITE); - r->req.residual -= r->req.sg->size; - r->req.aiocb = dma_blk_io( - blk_get_aio_context(lu->qdev.conf.blk), r->req.sg, - r->sector << BDRV_SECTOR_BITS, BDRV_SECTOR_SIZE, scsi_dma_writev, r, - scsi_dma_complete, r, DMA_DIRECTION_TO_DEVICE); - } else { - block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, - r->qiov.size, BLOCK_ACCT_WRITE); - r->req.aiocb = scsi_dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov, - scsi_write_complete, r, r); - } -} - -static const SCSIReqOps ufs_scsi_emulate_reqops = { - .size = sizeof(UfsSCSIReq), - .free_req = ufs_scsi_free_request, - .send_command = ufs_scsi_emulate_command, - .read_data = ufs_scsi_emulate_read_data, - .write_data = ufs_scsi_emulate_write_data, - .get_buf = ufs_scsi_get_buf, -}; - -static const SCSIReqOps ufs_scsi_dma_reqops = { - .size = sizeof(UfsSCSIReq), - .free_req = ufs_scsi_free_request, - .send_command = ufs_scsi_dma_command, - .read_data = scsi_read_data, - .write_data = scsi_write_data, - .get_buf = ufs_scsi_get_buf, -}; - -/* - * Following commands are not yet supported - * PRE_FETCH(10), - * UNMAP, - * WRITE_BUFFER, READ_BUFFER, - * SECURITY_PROTOCOL_IN, SECURITY_PROTOCOL_OUT - */ -static const SCSIReqOps *const ufs_scsi_reqops_dispatch[256] = { - [TEST_UNIT_READY] = &ufs_scsi_emulate_reqops, - [INQUIRY] = &ufs_scsi_emulate_reqops, - [MODE_SENSE_10] = &ufs_scsi_emulate_reqops, - [START_STOP] = &ufs_scsi_emulate_reqops, - [READ_CAPACITY_10] = &ufs_scsi_emulate_reqops, - [REQUEST_SENSE] = &ufs_scsi_emulate_reqops, - [SYNCHRONIZE_CACHE] = &ufs_scsi_emulate_reqops, - [MODE_SELECT_10] = &ufs_scsi_emulate_reqops, - [VERIFY_10] = &ufs_scsi_emulate_reqops, - [FORMAT_UNIT] = &ufs_scsi_emulate_reqops, - [SERVICE_ACTION_IN_16] = &ufs_scsi_emulate_reqops, - [SEND_DIAGNOSTIC] = &ufs_scsi_emulate_reqops, - - [READ_6] = &ufs_scsi_dma_reqops, - [READ_10] = &ufs_scsi_dma_reqops, - [WRITE_6] = &ufs_scsi_dma_reqops, - [WRITE_10] = &ufs_scsi_dma_reqops, -}; - -static SCSIRequest *scsi_new_request(SCSIDevice *dev, uint32_t tag, - uint32_t lun, uint8_t *buf, - void *hba_private) -{ - UfsLu *lu = DO_UPCAST(UfsLu, qdev, dev); - SCSIRequest *req; - const SCSIReqOps *ops; - uint8_t command; - - command = buf[0]; - ops = ufs_scsi_reqops_dispatch[command]; - if (!ops) { - ops = &ufs_scsi_emulate_reqops; - } - req = scsi_req_alloc(ops, &lu->qdev, tag, lun, hba_private); - - return req; + return UFS_REQUEST_NO_COMPLETE; } static Property ufs_lu_props[] = { - DEFINE_PROP_DRIVE("drive", UfsLu, qdev.conf.blk), + DEFINE_PROP_DRIVE("drive", UfsLu, conf.blk), + DEFINE_PROP_UINT8("lun", UfsLu, lun, 0), DEFINE_PROP_END_OF_LIST(), }; -static bool ufs_lu_brdv_init(UfsLu *lu, Error **errp) -{ - SCSIDevice *dev = &lu->qdev; - bool read_only; - - if (!lu->qdev.conf.blk) { - error_setg(errp, "drive property not set"); - return false; - } - - if (!blkconf_blocksizes(&lu->qdev.conf, errp)) { - return false; - } - - if (blk_get_aio_context(lu->qdev.conf.blk) != qemu_get_aio_context() && - !lu->qdev.hba_supports_iothread) { - error_setg(errp, "HBA does not support iothreads"); - return false; - } - - read_only = !blk_supports_write_perm(lu->qdev.conf.blk); - - if (!blkconf_apply_backend_options(&dev->conf, read_only, - dev->type == TYPE_DISK, errp)) { - return false; - } - - if (blk_is_sg(lu->qdev.conf.blk)) { - error_setg(errp, "unwanted /dev/sg*"); - return false; - } - - blk_iostatus_enable(lu->qdev.conf.blk); - return true; -} - static bool ufs_add_lu(UfsHc *u, UfsLu *lu, Error **errp) { - BlockBackend *blk = lu->qdev.conf.blk; + BlockBackend *blk = lu->conf.blk; int64_t brdv_len = blk_getlength(blk); uint64_t raw_dev_cap = be64_to_cpu(u->geometry_desc.total_raw_device_capacity); @@ -1288,156 +304,143 @@ static bool ufs_add_lu(UfsHc *u, UfsLu *lu, Error **errp) return true; } -static inline uint8_t ufs_log2(uint64_t input) +void ufs_init_wlu(UfsLu *wlu, uint8_t wlun) { - int log = 0; - while (input >>= 1) { - log++; - } - return log; + wlu->lun = wlun; + wlu->scsi_op = &ufs_emulate_scsi_cmd; } static void ufs_init_lu(UfsLu *lu) { - BlockBackend *blk = lu->qdev.conf.blk; + BlockBackend *blk = lu->conf.blk; int64_t brdv_len = blk_getlength(blk); - lu->lun = lu->qdev.lun; memset(&lu->unit_desc, 0, sizeof(lu->unit_desc)); lu->unit_desc.length = sizeof(UnitDescriptor); lu->unit_desc.descriptor_idn = UFS_QUERY_DESC_IDN_UNIT; lu->unit_desc.lu_enable = 0x01; - lu->unit_desc.logical_block_size = ufs_log2(lu->qdev.blocksize); - lu->unit_desc.unit_index = lu->qdev.lun; + lu->unit_desc.logical_block_size = UFS_BLOCK_SIZE_SHIFT; + lu->unit_desc.unit_index = lu->lun; lu->unit_desc.logical_block_count = cpu_to_be64(brdv_len / (1 << lu->unit_desc.logical_block_size)); + + lu->scsi_op = &ufs_process_scsi_cmd; } static bool ufs_lu_check_constraints(UfsLu *lu, Error **errp) { - if (!lu->qdev.conf.blk) { + if (!lu->conf.blk) { error_setg(errp, "drive property not set"); return false; } - if (lu->qdev.channel != 0) { - error_setg(errp, "ufs logical unit does not support channel"); + if (lu->lun >= UFS_MAX_LUS) { + error_setg(errp, "lun must be between 0 and %d", UFS_MAX_LUS - 1); return false; } - if (lu->qdev.lun >= UFS_MAX_LUS) { - error_setg(errp, "lun must be between 1 and %d", UFS_MAX_LUS - 1); - return false; + return true; +} + +static void ufs_init_scsi_device(UfsLu *lu, BlockBackend *blk, Error **errp) +{ + DeviceState *scsi_dev; + + scsi_bus_init(&lu->bus, sizeof(lu->bus), DEVICE(lu), &ufs_scsi_info); + + blk_ref(blk); + blk_detach_dev(blk, DEVICE(lu)); + lu->conf.blk = NULL; + + /* + * The ufs-lu is the device that is wrapping the scsi-hd. It owns a virtual + * SCSI bus that serves the scsi-hd. + */ + scsi_dev = qdev_new("scsi-hd"); + object_property_add_child(OBJECT(&lu->bus), "ufs-scsi", OBJECT(scsi_dev)); + + qdev_prop_set_uint32(scsi_dev, "physical_block_size", UFS_BLOCK_SIZE); + qdev_prop_set_uint32(scsi_dev, "logical_block_size", UFS_BLOCK_SIZE); + qdev_prop_set_uint32(scsi_dev, "scsi-id", 0); + qdev_prop_set_uint32(scsi_dev, "lun", lu->lun); + if (!qdev_prop_set_drive_err(scsi_dev, "drive", blk, errp)) { + object_unparent(OBJECT(scsi_dev)); + return; } - return true; + if (!qdev_realize_and_unref(scsi_dev, &lu->bus.qbus, errp)) { + object_unparent(OBJECT(scsi_dev)); + return; + } + + blk_unref(blk); + lu->scsi_dev = SCSI_DEVICE(scsi_dev); } -static void ufs_lu_realize(SCSIDevice *dev, Error **errp) +static void ufs_lu_realize(DeviceState *dev, Error **errp) { UfsLu *lu = DO_UPCAST(UfsLu, qdev, dev); - BusState *s = qdev_get_parent_bus(&dev->qdev); + BusState *s = qdev_get_parent_bus(dev); UfsHc *u = UFS(s->parent); - AioContext *ctx = NULL; - uint64_t nb_sectors, nb_blocks; + BlockBackend *blk = lu->conf.blk; if (!ufs_lu_check_constraints(lu, errp)) { return; } - ctx = blk_get_aio_context(lu->qdev.conf.blk); - aio_context_acquire(ctx); - if (!blkconf_blocksizes(&lu->qdev.conf, errp)) { - goto out; + if (!blk) { + error_setg(errp, "drive property not set"); + return; + } + + if (!blkconf_blocksizes(&lu->conf, errp)) { + return; } - lu->qdev.blocksize = UFS_BLOCK_SIZE; - blk_get_geometry(lu->qdev.conf.blk, &nb_sectors); - nb_blocks = nb_sectors / (lu->qdev.blocksize / BDRV_SECTOR_SIZE); - if (nb_blocks > UINT32_MAX) { - nb_blocks = UINT32_MAX; + if (!blkconf_apply_backend_options(&lu->conf, !blk_supports_write_perm(blk), + true, errp)) { + return; } - lu->qdev.max_lba = nb_blocks; - lu->qdev.type = TYPE_DISK; ufs_init_lu(lu); if (!ufs_add_lu(u, lu, errp)) { - goto out; + return; } - ufs_lu_brdv_init(lu, errp); - -out: - aio_context_release(ctx); + ufs_init_scsi_device(lu, blk, errp); } -static void ufs_lu_unrealize(SCSIDevice *dev) +static void ufs_lu_unrealize(DeviceState *dev) { UfsLu *lu = DO_UPCAST(UfsLu, qdev, dev); - blk_drain(lu->qdev.conf.blk); -} - -static void ufs_wlu_realize(DeviceState *qdev, Error **errp) -{ - UfsWLu *wlu = UFSWLU(qdev); - SCSIDevice *dev = &wlu->qdev; - - if (!is_wlun(dev->lun)) { - error_setg(errp, "not well-known logical unit number"); - return; + if (lu->scsi_dev) { + object_unref(OBJECT(lu->scsi_dev)); + lu->scsi_dev = NULL; } - - QTAILQ_INIT(&dev->requests); } static void ufs_lu_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); - SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(oc); - sc->realize = ufs_lu_realize; - sc->unrealize = ufs_lu_unrealize; - sc->alloc_req = scsi_new_request; + dc->realize = ufs_lu_realize; + dc->unrealize = ufs_lu_unrealize; dc->bus_type = TYPE_UFS_BUS; device_class_set_props(dc, ufs_lu_props); dc->desc = "Virtual UFS logical unit"; } -static void ufs_wlu_class_init(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(oc); - - /* - * The realize() function of TYPE_SCSI_DEVICE causes a segmentation fault - * if a block drive does not exist. Define a new realize function for - * well-known LUs that do not have a block drive. - */ - dc->realize = ufs_wlu_realize; - sc->alloc_req = scsi_new_request; - dc->bus_type = TYPE_UFS_BUS; - dc->desc = "Virtual UFS well-known logical unit"; -} - static const TypeInfo ufs_lu_info = { .name = TYPE_UFS_LU, - .parent = TYPE_SCSI_DEVICE, + .parent = TYPE_DEVICE, .class_init = ufs_lu_class_init, .instance_size = sizeof(UfsLu), }; -static const TypeInfo ufs_wlu_info = { - .name = TYPE_UFS_WLU, - .parent = TYPE_SCSI_DEVICE, - .class_init = ufs_wlu_class_init, - .instance_size = sizeof(UfsWLu), -}; - static void ufs_lu_register_types(void) { type_register_static(&ufs_lu_info); - type_register_static(&ufs_wlu_info); } type_init(ufs_lu_register_types) diff --git a/hw/ufs/trace-events b/hw/ufs/trace-events index 1e55fb0d08..665e1a942b 100644 --- a/hw/ufs/trace-events +++ b/hw/ufs/trace-events @@ -12,31 +12,6 @@ ufs_exec_scsi_cmd(uint32_t slot, uint8_t lun, uint8_t opcode) "slot %"PRIu32", l ufs_exec_query_cmd(uint32_t slot, uint8_t opcode) "slot %"PRIu32", opcode 0x%"PRIx8"" ufs_process_uiccmd(uint32_t uiccmd, uint32_t ucmdarg1, uint32_t ucmdarg2, uint32_t ucmdarg3) "uiccmd 0x%"PRIx32", ucmdarg1 0x%"PRIx32", ucmdarg2 0x%"PRIx32", ucmdarg3 0x%"PRIx32"" -# lu.c -ufs_scsi_check_condition(uint32_t tag, uint8_t key, uint8_t asc, uint8_t ascq) "Command complete tag=0x%x sense=%d/%d/%d" -ufs_scsi_read_complete(uint32_t tag, size_t size) "Data ready tag=0x%x len=%zd" -ufs_scsi_read_data_count(uint32_t sector_count) "Read sector_count=%d" -ufs_scsi_read_data_invalid(void) "Data transfer direction invalid" -ufs_scsi_write_complete_noio(uint32_t tag, size_t size) "Write complete tag=0x%x more=%zd" -ufs_scsi_write_data_invalid(void) "Data transfer direction invalid" -ufs_scsi_emulate_vpd_page_00(size_t xfer) "Inquiry EVPD[Supported pages] buffer size %zd" -ufs_scsi_emulate_vpd_page_80_not_supported(void) "Inquiry EVPD[Serial number] not supported" -ufs_scsi_emulate_vpd_page_80(size_t xfer) "Inquiry EVPD[Serial number] buffer size %zd" -ufs_scsi_emulate_vpd_page_87(size_t xfer) "Inquiry EVPD[Mode Page Policy] buffer size %zd" -ufs_scsi_emulate_mode_sense(int cmd, int page, size_t xfer, int control) "Mode Sense(%d) (page %d, xfer %zd, page_control %d)" -ufs_scsi_emulate_read_data(int buflen) "Read buf_len=%d" -ufs_scsi_emulate_write_data(int buflen) "Write buf_len=%d" -ufs_scsi_emulate_command_START_STOP(void) "START STOP UNIT" -ufs_scsi_emulate_command_FORMAT_UNIT(void) "FORMAT UNIT" -ufs_scsi_emulate_command_SEND_DIAGNOSTIC(void) "SEND DIAGNOSTIC" -ufs_scsi_emulate_command_SAI_16(void) "SAI READ CAPACITY(16)" -ufs_scsi_emulate_command_SAI_unsupported(void) "Unsupported Service Action In" -ufs_scsi_emulate_command_MODE_SELECT_10(size_t xfer) "Mode Select(10) (len %zd)" -ufs_scsi_emulate_command_VERIFY(int bytchk) "Verify (bytchk %d)" -ufs_scsi_emulate_command_UNKNOWN(int cmd, const char *name) "Unknown SCSI command (0x%2.2x=%s)" -ufs_scsi_dma_command_READ(uint64_t lba, uint32_t len) "Read (block %" PRIu64 ", count %u)" -ufs_scsi_dma_command_WRITE(uint64_t lba, int len) "Write (block %" PRIu64 ", count %u)" - # error condition ufs_err_dma_read_utrd(uint32_t slot, uint64_t addr) "failed to read utrd. UTRLDBR slot %"PRIu32", UTRD dma addr %"PRIu64"" ufs_err_dma_read_req_upiu(uint32_t slot, uint64_t addr) "failed to read req upiu. UTRLDBR slot %"PRIu32", request upiu addr %"PRIu64"" diff --git a/hw/ufs/ufs.c b/hw/ufs/ufs.c index 2e6d582cc3..68c5f1f6c9 100644 --- a/hw/ufs/ufs.c +++ b/hw/ufs/ufs.c @@ -24,6 +24,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "migration/vmstate.h" +#include "scsi/constants.h" #include "trace.h" #include "ufs.h" @@ -62,8 +63,6 @@ static MemTxResult ufs_addr_write(UfsHc *u, hwaddr addr, const void *buf, return pci_dma_write(PCI_DEVICE(u), addr, buf, size); } -static void ufs_complete_req(UfsRequest *req, UfsReqResult req_result); - static inline hwaddr ufs_get_utrd_addr(UfsHc *u, uint32_t slot) { hwaddr utrl_base_addr = (((hwaddr)u->reg.utrlbau) << 32) + u->reg.utrlba; @@ -163,11 +162,13 @@ static MemTxResult ufs_dma_read_prdt(UfsRequest *req) req->sg = g_malloc0(sizeof(QEMUSGList)); pci_dma_sglist_init(req->sg, PCI_DEVICE(u), prdt_len); + req->data_len = 0; for (uint16_t i = 0; i < prdt_len; ++i) { hwaddr data_dma_addr = le64_to_cpu(prd_entries[i].addr); uint32_t data_byte_count = le32_to_cpu(prd_entries[i].size) + 1; qemu_sglist_add(req->sg, data_dma_addr, data_byte_count); + req->data_len += data_byte_count; } return MEMTX_OK; } @@ -433,23 +434,10 @@ static const MemoryRegionOps ufs_mmio_ops = { }, }; -static QEMUSGList *ufs_get_sg_list(SCSIRequest *scsi_req) -{ - UfsRequest *req = scsi_req->hba_private; - return req->sg; -} - -static void ufs_build_upiu_sense_data(UfsRequest *req, SCSIRequest *scsi_req) -{ - req->rsp_upiu.sr.sense_data_len = cpu_to_be16(scsi_req->sense_len); - assert(scsi_req->sense_len <= SCSI_SENSE_LEN); - memcpy(req->rsp_upiu.sr.sense_data, scsi_req->sense, scsi_req->sense_len); -} -static void ufs_build_upiu_header(UfsRequest *req, uint8_t trans_type, - uint8_t flags, uint8_t response, - uint8_t scsi_status, - uint16_t data_segment_length) +void ufs_build_upiu_header(UfsRequest *req, uint8_t trans_type, uint8_t flags, + uint8_t response, uint8_t scsi_status, + uint16_t data_segment_length) { memcpy(&req->rsp_upiu.header, &req->req_upiu.header, sizeof(UtpUpiuHeader)); req->rsp_upiu.header.trans_type = trans_type; @@ -459,96 +447,38 @@ static void ufs_build_upiu_header(UfsRequest *req, uint8_t trans_type, req->rsp_upiu.header.data_segment_length = cpu_to_be16(data_segment_length); } -static void ufs_scsi_command_complete(SCSIRequest *scsi_req, size_t resid) -{ - UfsRequest *req = scsi_req->hba_private; - int16_t status = scsi_req->status; - uint32_t expected_len = be32_to_cpu(req->req_upiu.sc.exp_data_transfer_len); - uint32_t transfered_len = scsi_req->cmd.xfer - resid; - uint8_t flags = 0, response = UFS_COMMAND_RESULT_SUCESS; - uint16_t data_segment_length; - - if (expected_len > transfered_len) { - req->rsp_upiu.sr.residual_transfer_count = - cpu_to_be32(expected_len - transfered_len); - flags |= UFS_UPIU_FLAG_UNDERFLOW; - } else if (expected_len < transfered_len) { - req->rsp_upiu.sr.residual_transfer_count = - cpu_to_be32(transfered_len - expected_len); - flags |= UFS_UPIU_FLAG_OVERFLOW; - } - - if (status != 0) { - ufs_build_upiu_sense_data(req, scsi_req); - response = UFS_COMMAND_RESULT_FAIL; - } - - data_segment_length = cpu_to_be16(scsi_req->sense_len + - sizeof(req->rsp_upiu.sr.sense_data_len)); - ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_RESPONSE, flags, response, - status, data_segment_length); - - ufs_complete_req(req, UFS_REQUEST_SUCCESS); - - scsi_req->hba_private = NULL; - scsi_req_unref(scsi_req); -} - -static const struct SCSIBusInfo ufs_scsi_info = { - .tcq = true, - .max_target = 0, - .max_lun = UFS_MAX_LUS, - .max_channel = 0, - - .get_sg_list = ufs_get_sg_list, - .complete = ufs_scsi_command_complete, -}; - static UfsReqResult ufs_exec_scsi_cmd(UfsRequest *req) { UfsHc *u = req->hc; uint8_t lun = req->req_upiu.header.lun; - uint8_t task_tag = req->req_upiu.header.task_tag; - SCSIDevice *dev = NULL; + + UfsLu *lu = NULL; trace_ufs_exec_scsi_cmd(req->slot, lun, req->req_upiu.sc.cdb[0]); - if (!is_wlun(lun)) { - if (lun >= u->device_desc.number_lu) { - trace_ufs_err_scsi_cmd_invalid_lun(lun); - return UFS_REQUEST_FAIL; - } else if (u->lus[lun] == NULL) { - trace_ufs_err_scsi_cmd_invalid_lun(lun); - return UFS_REQUEST_FAIL; - } + if (!is_wlun(lun) && (lun >= UFS_MAX_LUS || u->lus[lun] == NULL)) { + trace_ufs_err_scsi_cmd_invalid_lun(lun); + return UFS_REQUEST_FAIL; } switch (lun) { case UFS_UPIU_REPORT_LUNS_WLUN: - dev = &u->report_wlu->qdev; + lu = &u->report_wlu; break; case UFS_UPIU_UFS_DEVICE_WLUN: - dev = &u->dev_wlu->qdev; + lu = &u->dev_wlu; break; case UFS_UPIU_BOOT_WLUN: - dev = &u->boot_wlu->qdev; + lu = &u->boot_wlu; break; case UFS_UPIU_RPMB_WLUN: - dev = &u->rpmb_wlu->qdev; + lu = &u->rpmb_wlu; break; default: - dev = &u->lus[lun]->qdev; - } - - SCSIRequest *scsi_req = scsi_req_new( - dev, task_tag, lun, req->req_upiu.sc.cdb, UFS_CDB_SIZE, req); - - uint32_t len = scsi_req_enqueue(scsi_req); - if (len) { - scsi_req_continue(scsi_req); + lu = u->lus[lun]; } - return UFS_REQUEST_NO_COMPLETE; + return lu->scsi_op(lu, req); } static UfsReqResult ufs_exec_nop_cmd(UfsRequest *req) @@ -1137,7 +1067,7 @@ static void ufs_process_req(void *opaque) } } -static void ufs_complete_req(UfsRequest *req, UfsReqResult req_result) +void ufs_complete_req(UfsRequest *req, UfsReqResult req_result) { UfsHc *u = req->hc; assert(req->state == UFS_REQUEST_RUNNING); @@ -1159,6 +1089,7 @@ static void ufs_clear_req(UfsRequest *req) qemu_sglist_destroy(req->sg); g_free(req->sg); req->sg = NULL; + req->data_len = 0; } memset(&req->utrd, 0, sizeof(req->utrd)); @@ -1317,28 +1248,6 @@ static void ufs_init_hc(UfsHc *u) u->flags.permanently_disable_fw_update = 1; } -static bool ufs_init_wlu(UfsHc *u, UfsWLu **wlu, uint8_t wlun, Error **errp) -{ - UfsWLu *new_wlu = UFSWLU(qdev_new(TYPE_UFS_WLU)); - - qdev_prop_set_uint32(DEVICE(new_wlu), "lun", wlun); - - /* - * The well-known lu shares the same bus as the normal lu. If the well-known - * lu writes the same channel value as the normal lu, the report will be - * made not only for the normal lu but also for the well-known lu at - * REPORT_LUN time. To prevent this, the channel value of normal lu is fixed - * to 0 and the channel value of well-known lu is fixed to 1. - */ - qdev_prop_set_uint32(DEVICE(new_wlu), "channel", 1); - if (!qdev_realize_and_unref(DEVICE(new_wlu), BUS(&u->bus), errp)) { - return false; - } - - *wlu = new_wlu; - return true; -} - static void ufs_realize(PCIDevice *pci_dev, Error **errp) { UfsHc *u = UFS(pci_dev); @@ -1349,53 +1258,21 @@ static void ufs_realize(PCIDevice *pci_dev, Error **errp) qbus_init(&u->bus, sizeof(UfsBus), TYPE_UFS_BUS, &pci_dev->qdev, u->parent_obj.qdev.id); - u->bus.parent_bus.info = &ufs_scsi_info; ufs_init_state(u); ufs_init_hc(u); ufs_init_pci(u, pci_dev); - if (!ufs_init_wlu(u, &u->report_wlu, UFS_UPIU_REPORT_LUNS_WLUN, errp)) { - return; - } - - if (!ufs_init_wlu(u, &u->dev_wlu, UFS_UPIU_UFS_DEVICE_WLUN, errp)) { - return; - } - - if (!ufs_init_wlu(u, &u->boot_wlu, UFS_UPIU_BOOT_WLUN, errp)) { - return; - } - - if (!ufs_init_wlu(u, &u->rpmb_wlu, UFS_UPIU_RPMB_WLUN, errp)) { - return; - } + ufs_init_wlu(&u->report_wlu, UFS_UPIU_REPORT_LUNS_WLUN); + ufs_init_wlu(&u->dev_wlu, UFS_UPIU_UFS_DEVICE_WLUN); + ufs_init_wlu(&u->boot_wlu, UFS_UPIU_BOOT_WLUN); + ufs_init_wlu(&u->rpmb_wlu, UFS_UPIU_RPMB_WLUN); } static void ufs_exit(PCIDevice *pci_dev) { UfsHc *u = UFS(pci_dev); - if (u->dev_wlu) { - object_unref(OBJECT(u->dev_wlu)); - u->dev_wlu = NULL; - } - - if (u->report_wlu) { - object_unref(OBJECT(u->report_wlu)); - u->report_wlu = NULL; - } - - if (u->rpmb_wlu) { - object_unref(OBJECT(u->rpmb_wlu)); - u->rpmb_wlu = NULL; - } - - if (u->boot_wlu) { - object_unref(OBJECT(u->boot_wlu)); - u->boot_wlu = NULL; - } - qemu_bh_delete(u->doorbell_bh); qemu_bh_delete(u->complete_bh); @@ -1437,43 +1314,18 @@ static void ufs_class_init(ObjectClass *oc, void *data) static bool ufs_bus_check_address(BusState *qbus, DeviceState *qdev, Error **errp) { - SCSIDevice *dev = SCSI_DEVICE(qdev); - UfsBusClass *ubc = UFS_BUS_GET_CLASS(qbus); - UfsHc *u = UFS(qbus->parent); - - if (strcmp(object_get_typename(OBJECT(dev)), TYPE_UFS_WLU) == 0) { - if (dev->lun != UFS_UPIU_REPORT_LUNS_WLUN && - dev->lun != UFS_UPIU_UFS_DEVICE_WLUN && - dev->lun != UFS_UPIU_BOOT_WLUN && dev->lun != UFS_UPIU_RPMB_WLUN) { - error_setg(errp, "bad well-known lun: %d", dev->lun); - return false; - } - - if ((dev->lun == UFS_UPIU_REPORT_LUNS_WLUN && u->report_wlu != NULL) || - (dev->lun == UFS_UPIU_UFS_DEVICE_WLUN && u->dev_wlu != NULL) || - (dev->lun == UFS_UPIU_BOOT_WLUN && u->boot_wlu != NULL) || - (dev->lun == UFS_UPIU_RPMB_WLUN && u->rpmb_wlu != NULL)) { - error_setg(errp, "well-known lun %d already exists", dev->lun); - return false; - } - - return true; - } - - if (strcmp(object_get_typename(OBJECT(dev)), TYPE_UFS_LU) != 0) { + if (strcmp(object_get_typename(OBJECT(qdev)), TYPE_UFS_LU) != 0) { error_setg(errp, "%s cannot be connected to ufs-bus", - object_get_typename(OBJECT(dev))); + object_get_typename(OBJECT(qdev))); return false; } - return ubc->parent_check_address(qbus, qdev, errp); + return true; } static void ufs_bus_class_init(ObjectClass *class, void *data) { BusClass *bc = BUS_CLASS(class); - UfsBusClass *ubc = UFS_BUS_CLASS(class); - ubc->parent_check_address = bc->check_address; bc->check_address = ufs_bus_check_address; } @@ -1487,7 +1339,7 @@ static const TypeInfo ufs_info = { static const TypeInfo ufs_bus_info = { .name = TYPE_UFS_BUS, - .parent = TYPE_SCSI_BUS, + .parent = TYPE_BUS, .class_init = ufs_bus_class_init, .class_size = sizeof(UfsBusClass), .instance_size = sizeof(UfsBus), diff --git a/hw/ufs/ufs.h b/hw/ufs/ufs.h index f244228617..8fda94f4ef 100644 --- a/hw/ufs/ufs.h +++ b/hw/ufs/ufs.h @@ -16,7 +16,8 @@ #include "block/ufs.h" #define UFS_MAX_LUS 32 -#define UFS_BLOCK_SIZE 4096 +#define UFS_BLOCK_SIZE_SHIFT 12 +#define UFS_BLOCK_SIZE (1 << UFS_BLOCK_SIZE_SHIFT) typedef struct UfsBusClass { BusClass parent_class; @@ -24,7 +25,7 @@ typedef struct UfsBusClass { } UfsBusClass; typedef struct UfsBus { - SCSIBus parent_bus; + BusState parent_bus; } UfsBus; #define TYPE_UFS_BUS "ufs-bus" @@ -55,19 +56,22 @@ typedef struct UfsRequest { /* for scsi command */ QEMUSGList *sg; + uint32_t data_len; } UfsRequest; +struct UfsLu; +typedef UfsReqResult (*UfsScsiOp)(struct UfsLu *, UfsRequest *); + typedef struct UfsLu { - SCSIDevice qdev; + DeviceState qdev; uint8_t lun; UnitDescriptor unit_desc; + SCSIBus bus; + SCSIDevice *scsi_dev; + BlockConf conf; + UfsScsiOp scsi_op; } UfsLu; -typedef struct UfsWLu { - SCSIDevice qdev; - uint8_t lun; -} UfsWLu; - typedef struct UfsParams { char *serial; uint8_t nutrs; /* Number of UTP Transfer Request Slots */ @@ -84,10 +88,10 @@ typedef struct UfsHc { UfsRequest *req_list; UfsLu *lus[UFS_MAX_LUS]; - UfsWLu *report_wlu; - UfsWLu *dev_wlu; - UfsWLu *boot_wlu; - UfsWLu *rpmb_wlu; + UfsLu report_wlu; + UfsLu dev_wlu; + UfsLu boot_wlu; + UfsLu rpmb_wlu; DeviceDescriptor device_desc; GeometryDescriptor geometry_desc; Attributes attributes; @@ -104,9 +108,6 @@ typedef struct UfsHc { #define TYPE_UFS_LU "ufs-lu" #define UFSLU(obj) OBJECT_CHECK(UfsLu, (obj), TYPE_UFS_LU) -#define TYPE_UFS_WLU "ufs-wlu" -#define UFSWLU(obj) OBJECT_CHECK(UfsWLu, (obj), TYPE_UFS_WLU) - typedef enum UfsQueryFlagPerm { UFS_QUERY_FLAG_NONE = 0x0, UFS_QUERY_FLAG_READ = 0x1, @@ -128,4 +129,9 @@ static inline bool is_wlun(uint8_t lun) lun == UFS_UPIU_RPMB_WLUN); } +void ufs_build_upiu_header(UfsRequest *req, uint8_t trans_type, uint8_t flags, + uint8_t response, uint8_t scsi_status, + uint16_t data_segment_length); +void ufs_complete_req(UfsRequest *req, UfsReqResult req_result); +void ufs_init_wlu(UfsLu *wlu, uint8_t wlun); #endif /* HW_UFS_UFS_H */ diff --git a/include/block/ufs.h b/include/block/ufs.h index 7631a5af10..0b6ec0814d 100644 --- a/include/block/ufs.h +++ b/include/block/ufs.h @@ -379,7 +379,7 @@ typedef struct Attributes { /* Command response result code */ typedef enum CommandRespCode { - UFS_COMMAND_RESULT_SUCESS = 0x00, + UFS_COMMAND_RESULT_SUCCESS = 0x00, UFS_COMMAND_RESULT_FAIL = 0x01, } CommandRespCode; diff --git a/include/hw/arm/allwinner-a10.h b/include/hw/arm/allwinner-a10.h index cd1465c613..2eb83a17ea 100644 --- a/include/hw/arm/allwinner-a10.h +++ b/include/hw/arm/allwinner-a10.h @@ -1,7 +1,6 @@ #ifndef HW_ARM_ALLWINNER_A10_H #define HW_ARM_ALLWINNER_A10_H -#include "hw/arm/boot.h" #include "hw/timer/allwinner-a10-pit.h" #include "hw/intc/allwinner-a10-pic.h" #include "hw/net/allwinner_emac.h" diff --git a/include/hw/arm/allwinner-h3.h b/include/hw/arm/allwinner-h3.h index f15d6d7cc7..24ba4e1bf4 100644 --- a/include/hw/arm/allwinner-h3.h +++ b/include/hw/arm/allwinner-h3.h @@ -36,7 +36,6 @@ #define HW_ARM_ALLWINNER_H3_H #include "qom/object.h" -#include "hw/arm/boot.h" #include "hw/timer/allwinner-a10-pit.h" #include "hw/intc/arm_gic.h" #include "hw/misc/allwinner-h3-ccu.h" diff --git a/include/hw/arm/allwinner-r40.h b/include/hw/arm/allwinner-r40.h index 72710d3edc..6e1ac9d4c1 100644 --- a/include/hw/arm/allwinner-r40.h +++ b/include/hw/arm/allwinner-r40.h @@ -21,7 +21,6 @@ #define HW_ARM_ALLWINNER_R40_H #include "qom/object.h" -#include "hw/arm/boot.h" #include "hw/timer/allwinner-a10-pit.h" #include "hw/intc/arm_gic.h" #include "hw/sd/allwinner-sdhost.h" diff --git a/include/hw/arm/fsl-imx25.h b/include/hw/arm/fsl-imx25.h index 1b1086e945..df2f83980f 100644 --- a/include/hw/arm/fsl-imx25.h +++ b/include/hw/arm/fsl-imx25.h @@ -17,7 +17,6 @@ #ifndef FSL_IMX25_H #define FSL_IMX25_H -#include "hw/arm/boot.h" #include "hw/intc/imx_avic.h" #include "hw/misc/imx25_ccm.h" #include "hw/char/imx_serial.h" diff --git a/include/hw/arm/fsl-imx31.h b/include/hw/arm/fsl-imx31.h index c116a73e0b..40c593a5cf 100644 --- a/include/hw/arm/fsl-imx31.h +++ b/include/hw/arm/fsl-imx31.h @@ -17,7 +17,6 @@ #ifndef FSL_IMX31_H #define FSL_IMX31_H -#include "hw/arm/boot.h" #include "hw/intc/imx_avic.h" #include "hw/misc/imx31_ccm.h" #include "hw/char/imx_serial.h" diff --git a/include/hw/arm/fsl-imx6.h b/include/hw/arm/fsl-imx6.h index 5b4d48da08..519b871014 100644 --- a/include/hw/arm/fsl-imx6.h +++ b/include/hw/arm/fsl-imx6.h @@ -17,7 +17,6 @@ #ifndef FSL_IMX6_H #define FSL_IMX6_H -#include "hw/arm/boot.h" #include "hw/cpu/a9mpcore.h" #include "hw/misc/imx6_ccm.h" #include "hw/misc/imx6_src.h" diff --git a/include/hw/arm/fsl-imx6ul.h b/include/hw/arm/fsl-imx6ul.h index 63012628ff..14390f6014 100644 --- a/include/hw/arm/fsl-imx6ul.h +++ b/include/hw/arm/fsl-imx6ul.h @@ -17,7 +17,6 @@ #ifndef FSL_IMX6UL_H #define FSL_IMX6UL_H -#include "hw/arm/boot.h" #include "hw/cpu/a15mpcore.h" #include "hw/misc/imx6ul_ccm.h" #include "hw/misc/imx6_src.h" diff --git a/include/hw/arm/fsl-imx7.h b/include/hw/arm/fsl-imx7.h index 2cbfc6b2b2..411fa1c2e3 100644 --- a/include/hw/arm/fsl-imx7.h +++ b/include/hw/arm/fsl-imx7.h @@ -19,7 +19,6 @@ #ifndef FSL_IMX7_H #define FSL_IMX7_H -#include "hw/arm/boot.h" #include "hw/cpu/a15mpcore.h" #include "hw/intc/imx_gpcv2.h" #include "hw/misc/imx7_ccm.h" diff --git a/include/hw/arm/pxa.h b/include/hw/arm/pxa.h index 54eb895e42..4c6caee113 100644 --- a/include/hw/arm/pxa.h +++ b/include/hw/arm/pxa.h @@ -100,8 +100,6 @@ void pxa2xx_mmci_handlers(PXA2xxMMCIState *s, qemu_irq readonly, #define TYPE_PXA2XX_PCMCIA "pxa2xx-pcmcia" OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxPCMCIAState, PXA2XX_PCMCIA) -PXA2xxPCMCIAState *pxa2xx_pcmcia_init(MemoryRegion *sysmem, - hwaddr base); int pxa2xx_pcmcia_attach(void *opaque, PCMCIACardState *card); int pxa2xx_pcmcia_detach(void *opaque); void pxa2xx_pcmcia_set_irq_cb(void *opaque, qemu_irq irq, qemu_irq cd_irq); diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h index 7b419f88c2..b710d71fb0 100644 --- a/include/hw/arm/xlnx-versal.h +++ b/include/hw/arm/xlnx-versal.h @@ -13,7 +13,6 @@ #define XLNX_VERSAL_H #include "hw/sysbus.h" -#include "hw/arm/boot.h" #include "hw/cpu/cluster.h" #include "hw/or-irq.h" #include "hw/sd/sdhci.h" diff --git a/include/hw/arm/xlnx-zynqmp.h b/include/hw/arm/xlnx-zynqmp.h index 687c75e3b0..96358d51eb 100644 --- a/include/hw/arm/xlnx-zynqmp.h +++ b/include/hw/arm/xlnx-zynqmp.h @@ -18,7 +18,6 @@ #ifndef XLNX_ZYNQMP_H #define XLNX_ZYNQMP_H -#include "hw/arm/boot.h" #include "hw/intc/arm_gic.h" #include "hw/net/cadence_gem.h" #include "hw/char/cadence_uart.h" diff --git a/linux-user/aarch64/Makefile.vdso b/linux-user/aarch64/Makefile.vdso new file mode 100644 index 0000000000..599958116b --- /dev/null +++ b/linux-user/aarch64/Makefile.vdso @@ -0,0 +1,15 @@ +include $(BUILD_DIR)/tests/tcg/aarch64-linux-user/config-target.mak + +SUBDIR = $(SRC_PATH)/linux-user/aarch64 +VPATH += $(SUBDIR) + +all: $(SUBDIR)/vdso-be.so $(SUBDIR)/vdso-le.so + +LDFLAGS = -nostdlib -shared -Wl,-h,linux-vdso.so.1 -Wl,--build-id=sha1 \ + -Wl,--hash-style=both -Wl,-T,$(SUBDIR)/vdso.ld + +$(SUBDIR)/vdso-be.so: vdso.S vdso.ld + $(CC) -o $@ $(LDFLAGS) -mbig-endian $< + +$(SUBDIR)/vdso-le.so: vdso.S vdso.ld + $(CC) -o $@ $(LDFLAGS) -mlittle-endian $< diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c index 2e2f7cf218..8c20dc8a39 100644 --- a/linux-user/aarch64/cpu_loop.c +++ b/linux-user/aarch64/cpu_loop.c @@ -25,6 +25,7 @@ #include "qemu/guest-random.h" #include "semihosting/common-semi.h" #include "target/arm/syndrome.h" +#include "target/arm/cpu-features.h" #define get_user_code_u32(x, gaddr, env) \ ({ abi_long __r = get_user_u32((x), (gaddr)); \ diff --git a/linux-user/aarch64/meson.build b/linux-user/aarch64/meson.build new file mode 100644 index 0000000000..248c578d15 --- /dev/null +++ b/linux-user/aarch64/meson.build @@ -0,0 +1,11 @@ +# TARGET_BIG_ENDIAN is defined to 'n' for little-endian; which means it +# is always true as far as source_set.apply() is concerned. Always build +# both header files and include the right one via #if. + +vdso_be_inc = gen_vdso.process('vdso-be.so', + extra_args: ['-r', '__kernel_rt_sigreturn']) + +vdso_le_inc = gen_vdso.process('vdso-le.so', + extra_args: ['-r', '__kernel_rt_sigreturn']) + +linux_user_ss.add(when: 'TARGET_AARCH64', if_true: [vdso_be_inc, vdso_le_inc]) diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c index b265cfd470..a1e22d526d 100644 --- a/linux-user/aarch64/signal.c +++ b/linux-user/aarch64/signal.c @@ -21,6 +21,7 @@ #include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" +#include "target/arm/cpu-features.h" struct target_sigcontext { uint64_t fault_address; diff --git a/linux-user/aarch64/target_prctl.h b/linux-user/aarch64/target_prctl.h index 907c314146..5067e7d731 100644 --- a/linux-user/aarch64/target_prctl.h +++ b/linux-user/aarch64/target_prctl.h @@ -6,6 +6,8 @@ #ifndef AARCH64_TARGET_PRCTL_H #define AARCH64_TARGET_PRCTL_H +#include "target/arm/cpu-features.h" + static abi_long do_prctl_sve_get_vl(CPUArchState *env) { ARMCPU *cpu = env_archcpu(env); diff --git a/linux-user/aarch64/vdso-be.so b/linux-user/aarch64/vdso-be.so new file mode 100755 index 0000000000..6084f3d1a7 --- /dev/null +++ b/linux-user/aarch64/vdso-be.so Binary files differdiff --git a/linux-user/aarch64/vdso-le.so b/linux-user/aarch64/vdso-le.so new file mode 100755 index 0000000000..947d534ec1 --- /dev/null +++ b/linux-user/aarch64/vdso-le.so Binary files differdiff --git a/linux-user/aarch64/vdso.S b/linux-user/aarch64/vdso.S new file mode 100644 index 0000000000..34d3a9ebd2 --- /dev/null +++ b/linux-user/aarch64/vdso.S @@ -0,0 +1,71 @@ +/* + * aarch64 linux replacement vdso. + * + * Copyright 2023 Linaro, Ltd. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include <asm/unistd.h> + +/* ??? These are in include/elf.h, which is not ready for inclusion in asm. */ +#define NT_GNU_PROPERTY_TYPE_0 5 +#define GNU_PROPERTY_AARCH64_FEATURE_1_AND 0xc0000000 +#define GNU_PROPERTY_AARCH64_FEATURE_1_BTI (1U << 0) +#define GNU_PROPERTY_AARCH64_FEATURE_1_PAC (1U << 1) + +#define GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT \ + (GNU_PROPERTY_AARCH64_FEATURE_1_BTI | GNU_PROPERTY_AARCH64_FEATURE_1_PAC) + + .section .note.gnu.property + .align 3 + .long 2f - 1f + .long 6f - 3f + .long NT_GNU_PROPERTY_TYPE_0 +1: .string "GNU" +2: .align 3 +3: .long GNU_PROPERTY_AARCH64_FEATURE_1_AND + .long 5f - 4f +4: .long GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT +5: .align 3 +6: + + .text + +.macro endf name + .globl \name + .type \name, @function + .size \name, . - \name +.endm + +.macro vdso_syscall name, nr +\name: + bti c + mov x8, #\nr + svc #0 + ret +endf \name +.endm + + .cfi_startproc + +vdso_syscall __kernel_gettimeofday, __NR_gettimeofday +vdso_syscall __kernel_clock_gettime, __NR_clock_gettime +vdso_syscall __kernel_clock_getres, __NR_clock_getres + + .cfi_endproc + + +/* + * TODO: The kernel makes a big deal of turning off the .cfi directives, + * because they cause libgcc to crash, but that's because they're wrong. + * + * For now, elide the unwind info for __kernel_rt_sigreturn and rely on + * the libgcc fallback routine as we have always done. This requires + * that the code sequence used be exact. + */ +__kernel_rt_sigreturn: + /* No BTI C insn here -- we arrive via RET. */ + mov x8, #__NR_rt_sigreturn + svc #0 +endf __kernel_rt_sigreturn diff --git a/linux-user/aarch64/vdso.ld b/linux-user/aarch64/vdso.ld new file mode 100644 index 0000000000..4c12f33352 --- /dev/null +++ b/linux-user/aarch64/vdso.ld @@ -0,0 +1,72 @@ +/* + * Linker script for linux aarch64 replacement vdso. + * + * Copyright 2021 Linaro, Ltd. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +VERSION { + LINUX_2.6.39 { + global: + __kernel_rt_sigreturn; + __kernel_gettimeofday; + __kernel_clock_gettime; + __kernel_clock_getres; + + local: *; + }; +} + + +PHDRS { + phdr PT_PHDR FLAGS(4) PHDRS; + load PT_LOAD FLAGS(7) FILEHDR PHDRS; + dynamic PT_DYNAMIC FLAGS(4); + eh_frame_hdr PT_GNU_EH_FRAME; + note PT_NOTE FLAGS(4); +} + +SECTIONS { + /* + * We can't prelink to any address without knowing something about + * the virtual memory space of the host, since that leaks over into + * the available memory space of the guest. + */ + . = SIZEOF_HEADERS; + + /* + * The following, including the FILEHDRS and PHDRS, are modified + * when we relocate the binary. We want them to be initially + * writable for the relocation; we'll force them read-only after. + */ + .note : { *(.note*) } :load :note + .dynamic : { *(.dynamic) } :load :dynamic + .dynsym : { *(.dynsym) } :load + /* + * There ought not be any real read-write data. + * But since we manipulated the segment layout, + * we have to put these sections somewhere. + */ + .data : { + *(.data*) + *(.sdata*) + *(.got.plt) *(.got) + *(.gnu.linkonce.d.*) + *(.bss*) + *(.dynbss*) + *(.gnu.linkonce.b.*) + } + + .rodata : { *(.rodata*) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .eh_frame_hdr : { *(.eh_frame_hdr) } :load :eh_frame_hdr + .eh_frame : { *(.eh_frame) } :load + + .text : { *(.text*) } :load =0xd503201f +} diff --git a/linux-user/arm/Makefile.vdso b/linux-user/arm/Makefile.vdso new file mode 100644 index 0000000000..2d098a5748 --- /dev/null +++ b/linux-user/arm/Makefile.vdso @@ -0,0 +1,17 @@ +include $(BUILD_DIR)/tests/tcg/arm-linux-user/config-target.mak + +SUBDIR = $(SRC_PATH)/linux-user/arm +VPATH += $(SUBDIR) + +all: $(SUBDIR)/vdso-be.so $(SUBDIR)/vdso-le.so + +# Adding -use-blx disables unneeded interworking without actually using blx. +LDFLAGS = -nostdlib -shared -Wl,-use-blx \ + -Wl,-h,linux-vdso.so.1 -Wl,--build-id=sha1 \ + -Wl,--hash-style=both -Wl,-T,$(SUBDIR)/vdso.ld + +$(SUBDIR)/vdso-be.so: vdso.S vdso.ld vdso-asmoffset.h + $(CC) -o $@ $(LDFLAGS) -mbig-endian $< + +$(SUBDIR)/vdso-le.so: vdso.S vdso.ld vdso-asmoffset.h + $(CC) -o $@ $(LDFLAGS) -mlittle-endian $< diff --git a/linux-user/arm/meson.build b/linux-user/arm/meson.build index 5a93c925cf..c4bb9af5b8 100644 --- a/linux-user/arm/meson.build +++ b/linux-user/arm/meson.build @@ -5,3 +5,15 @@ syscall_nr_generators += { arguments: [ meson.current_source_dir() / 'syscallhdr.sh', '@INPUT@', '@OUTPUT@', '@EXTRA_ARGS@' ], output: '@BASENAME@_nr.h') } + +# TARGET_BIG_ENDIAN is defined to 'n' for little-endian; which means it +# is always true as far as source_set.apply() is concerned. Always build +# both header files and include the right one via #if. + +vdso_be_inc = gen_vdso.process('vdso-be.so', + extra_args: ['-s', 'sigreturn_codes']) + +vdso_le_inc = gen_vdso.process('vdso-le.so', + extra_args: ['-s', 'sigreturn_codes']) + +linux_user_ss.add(when: 'TARGET_ARM', if_true: [vdso_be_inc, vdso_le_inc]) diff --git a/linux-user/arm/signal.c b/linux-user/arm/signal.c index cf99fd7b8a..f77f692c63 100644 --- a/linux-user/arm/signal.c +++ b/linux-user/arm/signal.c @@ -21,6 +21,8 @@ #include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" +#include "target/arm/cpu-features.h" +#include "vdso-asmoffset.h" struct target_sigcontext { abi_ulong trap_no; @@ -102,6 +104,11 @@ struct rt_sigframe struct sigframe sig; }; +QEMU_BUILD_BUG_ON(offsetof(struct sigframe, retcode[3]) + != SIGFRAME_RC3_OFFSET); +QEMU_BUILD_BUG_ON(offsetof(struct rt_sigframe, sig.retcode[3]) + != RT_SIGFRAME_RC3_OFFSET); + static abi_ptr sigreturn_fdpic_tramp; /* @@ -160,6 +167,9 @@ get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize) return (sp - framesize) & ~7; } +static void write_arm_sigreturn(uint32_t *rc, int syscall); +static void write_arm_fdpic_sigreturn(uint32_t *rc, int ofs); + static int setup_return(CPUARMState *env, struct target_sigaction *ka, int usig, struct sigframe *frame, abi_ulong sp_addr) @@ -167,9 +177,9 @@ setup_return(CPUARMState *env, struct target_sigaction *ka, int usig, abi_ulong handler = 0; abi_ulong handler_fdpic_GOT = 0; abi_ulong retcode; - int thumb, retcode_idx; - int is_fdpic = info_is_fdpic(((TaskState *)thread_cpu->opaque)->info); - bool copy_retcode; + bool is_fdpic = info_is_fdpic(((TaskState *)thread_cpu->opaque)->info); + bool is_rt = ka->sa_flags & TARGET_SA_SIGINFO; + bool thumb; if (is_fdpic) { /* In FDPIC mode, ka->_sa_handler points to a function @@ -184,9 +194,7 @@ setup_return(CPUARMState *env, struct target_sigaction *ka, int usig, } else { handler = ka->_sa_handler; } - thumb = handler & 1; - retcode_idx = thumb + (ka->sa_flags & TARGET_SA_SIGINFO ? 2 : 0); uint32_t cpsr = cpsr_read(env); @@ -202,24 +210,32 @@ setup_return(CPUARMState *env, struct target_sigaction *ka, int usig, cpsr &= ~CPSR_E; } + /* Our vdso default_sigreturn label is a table of entry points. */ + retcode = default_sigreturn + (is_fdpic * 2 + is_rt) * 8; + + /* + * Put the sigreturn code on the stack no matter which return + * mechanism we use in order to remain ABI compliant. + * Because this is about ABI, always use the A32 instructions, + * despite the fact that our actual vdso trampoline is T16. + */ + if (is_fdpic) { + write_arm_fdpic_sigreturn(frame->retcode, + is_rt ? RT_SIGFRAME_RC3_OFFSET + : SIGFRAME_RC3_OFFSET); + } else { + write_arm_sigreturn(frame->retcode, + is_rt ? TARGET_NR_rt_sigreturn + : TARGET_NR_sigreturn); + } + if (ka->sa_flags & TARGET_SA_RESTORER) { if (is_fdpic) { + /* Place the function descriptor in slot 3. */ __put_user((abi_ulong)ka->sa_restorer, &frame->retcode[3]); - retcode = (sigreturn_fdpic_tramp + - retcode_idx * RETCODE_BYTES + thumb); - copy_retcode = true; } else { retcode = ka->sa_restorer; - copy_retcode = false; } - } else { - retcode = default_sigreturn + retcode_idx * RETCODE_BYTES + thumb; - copy_retcode = true; - } - - /* Copy the code to the stack slot for ABI compatibility. */ - if (copy_retcode) { - memcpy(frame->retcode, g2h_untagged(retcode & ~1), RETCODE_BYTES); } env->regs[0] = usig; diff --git a/linux-user/arm/vdso-asmoffset.h b/linux-user/arm/vdso-asmoffset.h new file mode 100644 index 0000000000..252a95c46e --- /dev/null +++ b/linux-user/arm/vdso-asmoffset.h @@ -0,0 +1,3 @@ +/* offsetof(struct sigframe, retcode[3]) */ +#define SIGFRAME_RC3_OFFSET 756 +#define RT_SIGFRAME_RC3_OFFSET 884 diff --git a/linux-user/arm/vdso-be.so b/linux-user/arm/vdso-be.so new file mode 100755 index 0000000000..69cafbb956 --- /dev/null +++ b/linux-user/arm/vdso-be.so Binary files differdiff --git a/linux-user/arm/vdso-le.so b/linux-user/arm/vdso-le.so new file mode 100755 index 0000000000..ad05a12518 --- /dev/null +++ b/linux-user/arm/vdso-le.so Binary files differdiff --git a/linux-user/arm/vdso.S b/linux-user/arm/vdso.S new file mode 100644 index 0000000000..b3bb6491dc --- /dev/null +++ b/linux-user/arm/vdso.S @@ -0,0 +1,174 @@ +/* + * arm linux replacement vdso. + * + * Copyright 2023 Linaro, Ltd. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include <asm/unistd.h> +#include "vdso-asmoffset.h" + +/* + * All supported cpus have T16 instructions: at least arm4t. + * + * We support user-user with m-profile cpus as an extension, because it + * is useful for testing gcc, which requires we avoid A32 instructions. + */ + .thumb + .arch armv4t + .eabi_attribute Tag_FP_arch, 0 + .eabi_attribute Tag_ARM_ISA_use, 0 + + .text + +.macro raw_syscall n + .ifne \n < 0x100 + mov r7, #\n + .elseif \n < 0x1ff + mov r7, #0xff + add r7, #(\n - 0xff) + .else + .err + .endif + swi #0 +.endm + +.macro fdpic_thunk ofs + ldr r3, [sp, #\ofs] + ldmia r2, {r2, r3} + mov r9, r3 + bx r2 +.endm + +.macro endf name + .globl \name + .type \name, %function + .size \name, . - \name +.endm + +/* + * We must save/restore r7 for the EABI syscall number. + * While we're doing that, we might as well save LR to get a free return, + * and a branch that is interworking back to ARMv5. + */ + +.macro SYSCALL name, nr +\name: + .cfi_startproc + push {r7, lr} + .cfi_adjust_cfa_offset 8 + .cfi_offset r7, -8 + .cfi_offset lr, -4 + raw_syscall \nr + pop {r7, pc} + .cfi_endproc +endf \name +.endm + +SYSCALL __vdso_clock_gettime, __NR_clock_gettime +SYSCALL __vdso_clock_gettime64, __NR_clock_gettime64 +SYSCALL __vdso_clock_getres, __NR_clock_getres +SYSCALL __vdso_gettimeofday, __NR_gettimeofday + + +/* + * We, like the real kernel, use a table of sigreturn trampolines. + * Unlike the real kernel, we do not attempt to pack this into as + * few bytes as possible -- simply use 8 bytes per slot. + * + * Within each slot, use the exact same code sequence as the kernel, + * lest we trip up someone doing code inspection. + */ + +.macro slot n + .balign 8 + .org sigreturn_codes + 8 * \n +.endm + +.macro cfi_fdpic_r9 ofs + /* + * fd = *(r13 + ofs) + * r9 = *(fd + 4) + * + * DW_CFA_expression r9, length (7), + * DW_OP_breg13, ofs, DW_OP_deref, + * DW_OP_plus_uconst, 4, DW_OP_deref + */ + .cfi_escape 0x10, 9, 7, 0x7d, (\ofs & 0x7f) + 0x80, (\ofs >> 7), 0x06, 0x23, 4, 0x06 +.endm + +.macro cfi_fdpic_pc ofs + /* + * fd = *(r13 + ofs) + * pc = *fd + * + * DW_CFA_expression lr (14), length (5), + * DW_OP_breg13, ofs, DW_OP_deref, DW_OP_deref + */ + .cfi_escape 0x10, 14, 5, 0x7d, (\ofs & 0x7f) + 0x80, (\ofs >> 7), 0x06, 0x06 +.endm + +/* + * Start the unwind info at least one instruction before the signal + * trampoline, because the unwinder will assume we are returning + * after a call site. + */ + .cfi_startproc simple + .cfi_signal_frame + .cfi_return_column 15 + + .cfi_def_cfa sp, 32 + 64 + .cfi_offset r0, -16 * 4 + .cfi_offset r1, -15 * 4 + .cfi_offset r2, -14 * 4 + .cfi_offset r3, -13 * 4 + .cfi_offset r4, -12 * 4 + .cfi_offset r5, -11 * 4 + .cfi_offset r6, -10 * 4 + .cfi_offset r7, -9 * 4 + .cfi_offset r8, -8 * 4 + .cfi_offset r9, -7 * 4 + .cfi_offset r10, -6 * 4 + .cfi_offset r11, -5 * 4 + .cfi_offset r12, -4 * 4 + .cfi_offset r13, -3 * 4 + .cfi_offset r14, -2 * 4 + .cfi_offset r15, -1 * 4 + + nop + + .balign 16 +sigreturn_codes: + /* [EO]ABI sigreturn */ + slot 0 + raw_syscall __NR_sigreturn + + .cfi_def_cfa_offset 160 + 64 + + /* [EO]ABI rt_sigreturn */ + slot 1 + raw_syscall __NR_rt_sigreturn + + .cfi_endproc + + /* FDPIC sigreturn */ + .cfi_startproc + cfi_fdpic_pc SIGFRAME_RC3_OFFSET + cfi_fdpic_r9 SIGFRAME_RC3_OFFSET + + slot 2 + fdpic_thunk SIGFRAME_RC3_OFFSET + .cfi_endproc + + /* FDPIC rt_sigreturn */ + .cfi_startproc + cfi_fdpic_pc RT_SIGFRAME_RC3_OFFSET + cfi_fdpic_r9 RT_SIGFRAME_RC3_OFFSET + + slot 3 + fdpic_thunk RT_SIGFRAME_RC3_OFFSET + .cfi_endproc + + .balign 16 +endf sigreturn_codes diff --git a/linux-user/arm/vdso.ld b/linux-user/arm/vdso.ld new file mode 100644 index 0000000000..3b00adf27a --- /dev/null +++ b/linux-user/arm/vdso.ld @@ -0,0 +1,67 @@ +/* + * Linker script for linux arm replacement vdso. + * + * Copyright 2023 Linaro, Ltd. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +VERSION { + LINUX_2.6 { + global: + __vdso_clock_gettime; + __vdso_gettimeofday; + __vdso_clock_getres; + __vdso_clock_gettime64; + + local: *; + }; +} + + +PHDRS { + phdr PT_PHDR FLAGS(4) PHDRS; + load PT_LOAD FLAGS(7) FILEHDR PHDRS; /* FLAGS=RWX */ + dynamic PT_DYNAMIC FLAGS(4); + eh_frame_hdr PT_GNU_EH_FRAME; + note PT_NOTE FLAGS(4); +} + +SECTIONS { + . = SIZEOF_HEADERS; + + /* + * The following, including the FILEHDRS and PHDRS, are modified + * when we relocate the binary. We want them to be initially + * writable for the relocation; we'll force them read-only after. + */ + .note : { *(.note*) } :load :note + .dynamic : { *(.dynamic) } :load :dynamic + .dynsym : { *(.dynsym) } :load + /* + * There ought not be any real read-write data. + * But since we manipulated the segment layout, + * we have to put these sections somewhere. + */ + .data : { + *(.data*) + *(.sdata*) + *(.got.plt) *(.got) + *(.gnu.linkonce.d.*) + *(.bss*) + *(.dynbss*) + *(.gnu.linkonce.b.*) + } + + .rodata : { *(.rodata*) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .eh_frame_hdr : { *(.eh_frame_hdr) } :load :eh_frame_hdr + .eh_frame : { *(.eh_frame) } :load + + .text : { *(.text*) } :load +} diff --git a/linux-user/elfload.c b/linux-user/elfload.c index 2e3809f03c..8761f9e26b 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -23,6 +23,10 @@ #include "target_signal.h" #include "accel/tcg/debuginfo.h" +#ifdef TARGET_ARM +#include "target/arm/cpu-features.h" +#endif + #ifdef _ARCH_PPC64 #undef ARCH_DLINFO #undef ELF_PLATFORM @@ -33,6 +37,19 @@ #undef ELF_ARCH #endif +#ifndef TARGET_ARCH_HAS_SIGTRAMP_PAGE +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 0 +#endif + +typedef struct { + const uint8_t *image; + const uint32_t *relocs; + unsigned image_size; + unsigned reloc_count; + unsigned sigreturn_ofs; + unsigned rt_sigreturn_ofs; +} VdsoImageInfo; + #define ELF_OSABI ELFOSABI_SYSV /* from personality.h */ @@ -292,12 +309,27 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *en (*regs)[15] = tswapreg(env->regs[R_ESP]); (*regs)[16] = tswapreg(env->segs[R_SS].selector & 0xffff); } -#endif + +/* + * i386 is the only target which supplies AT_SYSINFO for the vdso. + * All others only supply AT_SYSINFO_EHDR. + */ +#define DLINFO_ARCH_ITEMS (vdso_info != NULL) +#define ARCH_DLINFO \ + do { \ + if (vdso_info) { \ + NEW_AUX_ENT(AT_SYSINFO, vdso_info->entry); \ + } \ + } while (0) + +#endif /* TARGET_X86_64 */ + +#define VDSO_HEADER "vdso.c.inc" #define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE 4096 -#endif +#endif /* TARGET_I386 */ #ifdef TARGET_ARM @@ -917,6 +949,13 @@ const char *elf_hwcap2_str(uint32_t bit) #undef GET_FEATURE_ID #endif /* not TARGET_AARCH64 */ + +#if TARGET_BIG_ENDIAN +# define VDSO_HEADER "vdso-be.c.inc" +#else +# define VDSO_HEADER "vdso-le.c.inc" +#endif + #endif /* TARGET_ARM */ #ifdef TARGET_SPARC @@ -1152,6 +1191,14 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *en #define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE 4096 +#ifndef TARGET_PPC64 +# define VDSO_HEADER "vdso-32.c.inc" +#elif TARGET_BIG_ENDIAN +# define VDSO_HEADER "vdso-64.c.inc" +#else +# define VDSO_HEADER "vdso-64le.c.inc" +#endif + #endif #ifdef TARGET_LOONGARCH64 @@ -1162,6 +1209,8 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *en #define elf_check_arch(x) ((x) == EM_LOONGARCH) +#define VDSO_HEADER "vdso.c.inc" + static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) { @@ -1849,6 +1898,8 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, #define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE 4096 +#define VDSO_HEADER "vdso.c.inc" + #endif /* TARGET_S390X */ #ifdef TARGET_RISCV @@ -1857,8 +1908,10 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, #ifdef TARGET_RISCV32 #define ELF_CLASS ELFCLASS32 +#define VDSO_HEADER "vdso-32.c.inc" #else #define ELF_CLASS ELFCLASS64 +#define VDSO_HEADER "vdso-64.c.inc" #endif #define ELF_HWCAP get_elf_hwcap() @@ -1894,6 +1947,8 @@ static inline void init_thread(struct target_pt_regs *regs, #define STACK_GROWS_DOWN 0 #define STACK_ALIGNMENT 64 +#define VDSO_HEADER "vdso.c.inc" + static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) { @@ -2201,7 +2256,8 @@ static inline void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags) { } #ifdef USE_ELF_CORE_DUMP static int elf_core_dump(int, const CPUArchState *); #endif /* USE_ELF_CORE_DUMP */ -static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias); +static void load_symbols(struct elfhdr *hdr, const ImageSource *src, + abi_ulong load_bias); /* Verify the portions of EHDR within E_IDENT for the target. This can be performed before bswapping the entire header. */ @@ -2470,7 +2526,8 @@ static abi_ulong loader_build_fdpic_loadmap(struct image_info *info, abi_ulong s static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, struct elfhdr *exec, struct image_info *info, - struct image_info *interp_info) + struct image_info *interp_info, + struct image_info *vdso_info) { abi_ulong sp; abi_ulong u_argc, u_argv, u_envp, u_auxv; @@ -2558,10 +2615,15 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, } size = (DLINFO_ITEMS + 1) * 2; - if (k_base_platform) + if (k_base_platform) { size += 2; - if (k_platform) + } + if (k_platform) { size += 2; + } + if (vdso_info) { + size += 2; + } #ifdef DLINFO_ARCH_ITEMS size += DLINFO_ARCH_ITEMS * 2; #endif @@ -2643,6 +2705,9 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, if (u_platform) { NEW_AUX_ENT(AT_PLATFORM, u_platform); } + if (vdso_info) { + NEW_AUX_ENT(AT_SYSINFO_EHDR, vdso_info->load_addr); + } NEW_AUX_ENT (AT_NULL, 0); #undef NEW_AUX_ENT @@ -3102,10 +3167,9 @@ static bool parse_elf_property(const uint32_t *data, int *off, int datasz, } /* Process NT_GNU_PROPERTY_TYPE_0. */ -static bool parse_elf_properties(int image_fd, +static bool parse_elf_properties(const ImageSource *src, struct image_info *info, const struct elf_phdr *phdr, - char bprm_buf[BPRM_BUF_SIZE], Error **errp) { union { @@ -3133,14 +3197,8 @@ static bool parse_elf_properties(int image_fd, return false; } - if (phdr->p_offset + n <= BPRM_BUF_SIZE) { - memcpy(¬e, bprm_buf + phdr->p_offset, n); - } else { - ssize_t len = pread(image_fd, ¬e, n, phdr->p_offset); - if (len != n) { - error_setg_errno(errp, errno, "Error reading file header"); - return false; - } + if (!imgsrc_read(¬e, phdr->p_offset, n, src, errp)) { + return false; } /* @@ -3186,29 +3244,34 @@ static bool parse_elf_properties(int image_fd, } } -/* Load an ELF image into the address space. - - IMAGE_NAME is the filename of the image, to use in error messages. - IMAGE_FD is the open file descriptor for the image. - - BPRM_BUF is a copy of the beginning of the file; this of course - contains the elf file header at offset 0. It is assumed that this - buffer is sufficiently aligned to present no problems to the host - in accessing data at aligned offsets within the buffer. - - On return: INFO values will be filled in, as necessary or available. */ +/** + * load_elf_image: Load an ELF image into the address space. + * @image_name: the filename of the image, to use in error messages. + * @src: the ImageSource from which to read. + * @info: info collected from the loaded image. + * @ehdr: the ELF header, not yet bswapped. + * @pinterp_name: record any PT_INTERP string found. + * + * On return: @info values will be filled in, as necessary or available. + */ -static void load_elf_image(const char *image_name, int image_fd, - struct image_info *info, char **pinterp_name, - char bprm_buf[BPRM_BUF_SIZE]) +static void load_elf_image(const char *image_name, const ImageSource *src, + struct image_info *info, struct elfhdr *ehdr, + char **pinterp_name) { - struct elfhdr *ehdr = (struct elfhdr *)bprm_buf; - struct elf_phdr *phdr; + g_autofree struct elf_phdr *phdr = NULL; abi_ulong load_addr, load_bias, loaddr, hiaddr, error; - int i, retval, prot_exec; + int i, prot_exec; Error *err = NULL; - /* First of all, some simple consistency checks */ + /* + * First of all, some simple consistency checks. + * Note that we rely on the bswapped ehdr staying in bprm_buf, + * for later use by load_elf_binary and create_elf_tables. + */ + if (!imgsrc_read(ehdr, 0, sizeof(*ehdr), src, &err)) { + goto exit_errmsg; + } if (!elf_check_ident(ehdr)) { error_setg(&err, "Invalid ELF image for this architecture"); goto exit_errmsg; @@ -3219,15 +3282,11 @@ static void load_elf_image(const char *image_name, int image_fd, goto exit_errmsg; } - i = ehdr->e_phnum * sizeof(struct elf_phdr); - if (ehdr->e_phoff + i <= BPRM_BUF_SIZE) { - phdr = (struct elf_phdr *)(bprm_buf + ehdr->e_phoff); - } else { - phdr = (struct elf_phdr *) alloca(i); - retval = pread(image_fd, phdr, i, ehdr->e_phoff); - if (retval != i) { - goto exit_read; - } + phdr = imgsrc_read_alloc(ehdr->e_phoff, + ehdr->e_phnum * sizeof(struct elf_phdr), + src, &err); + if (phdr == NULL) { + goto exit_errmsg; } bswap_phdr(phdr, ehdr->e_phnum); @@ -3264,17 +3323,10 @@ static void load_elf_image(const char *image_name, int image_fd, goto exit_errmsg; } - interp_name = g_malloc(eppnt->p_filesz); - - if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) { - memcpy(interp_name, bprm_buf + eppnt->p_offset, - eppnt->p_filesz); - } else { - retval = pread(image_fd, interp_name, eppnt->p_filesz, - eppnt->p_offset); - if (retval != eppnt->p_filesz) { - goto exit_read; - } + interp_name = imgsrc_read_alloc(eppnt->p_offset, eppnt->p_filesz, + src, &err); + if (interp_name == NULL) { + goto exit_errmsg; } if (interp_name[eppnt->p_filesz - 1] != 0) { error_setg(&err, "Invalid PT_INTERP entry"); @@ -3282,7 +3334,7 @@ static void load_elf_image(const char *image_name, int image_fd, } *pinterp_name = g_steal_pointer(&interp_name); } else if (eppnt->p_type == PT_GNU_PROPERTY) { - if (!parse_elf_properties(image_fd, info, eppnt, bprm_buf, &err)) { + if (!parse_elf_properties(src, info, eppnt, &err)) { goto exit_errmsg; } } else if (eppnt->p_type == PT_GNU_STACK) { @@ -3435,9 +3487,9 @@ static void load_elf_image(const char *image_name, int image_fd, * but no backing file segment. */ if (eppnt->p_filesz != 0) { - error = target_mmap(vaddr_ps, eppnt->p_filesz + vaddr_po, + error = imgsrc_mmap(vaddr_ps, eppnt->p_filesz + vaddr_po, elf_prot, MAP_PRIVATE | MAP_FIXED, - image_fd, eppnt->p_offset - vaddr_po); + src, eppnt->p_offset - vaddr_po); if (error == -1) { goto exit_mmap; } @@ -3469,20 +3521,11 @@ static void load_elf_image(const char *image_name, int image_fd, #ifdef TARGET_MIPS } else if (eppnt->p_type == PT_MIPS_ABIFLAGS) { Mips_elf_abiflags_v0 abiflags; - if (eppnt->p_filesz < sizeof(Mips_elf_abiflags_v0)) { - error_setg(&err, "Invalid PT_MIPS_ABIFLAGS entry"); + + if (!imgsrc_read(&abiflags, eppnt->p_offset, sizeof(abiflags), + src, &err)) { goto exit_errmsg; } - if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) { - memcpy(&abiflags, bprm_buf + eppnt->p_offset, - sizeof(Mips_elf_abiflags_v0)); - } else { - retval = pread(image_fd, &abiflags, sizeof(Mips_elf_abiflags_v0), - eppnt->p_offset); - if (retval != sizeof(Mips_elf_abiflags_v0)) { - goto exit_read; - } - } bswap_mips_abiflags(&abiflags); info->fp_abi = abiflags.fp_abi; #endif @@ -3495,23 +3538,16 @@ static void load_elf_image(const char *image_name, int image_fd, } if (qemu_log_enabled()) { - load_symbols(ehdr, image_fd, load_bias); + load_symbols(ehdr, src, load_bias); } - debuginfo_report_elf(image_name, image_fd, load_bias); + debuginfo_report_elf(image_name, src->fd, load_bias); mmap_unlock(); - close(image_fd); + close(src->fd); return; - exit_read: - if (retval >= 0) { - error_setg(&err, "Incomplete read of file header"); - } else { - error_setg_errno(&err, errno, "Error reading file header"); - } - goto exit_errmsg; exit_mmap: error_setg_errno(&err, errno, "Error mapping file"); goto exit_errmsg; @@ -3523,6 +3559,8 @@ static void load_elf_image(const char *image_name, int image_fd, static void load_elf_interp(const char *filename, struct image_info *info, char bprm_buf[BPRM_BUF_SIZE]) { + struct elfhdr ehdr; + ImageSource src; int fd, retval; Error *err = NULL; @@ -3540,11 +3578,57 @@ static void load_elf_interp(const char *filename, struct image_info *info, exit(-1); } - if (retval < BPRM_BUF_SIZE) { - memset(bprm_buf + retval, 0, BPRM_BUF_SIZE - retval); + src.fd = fd; + src.cache = bprm_buf; + src.cache_size = retval; + + load_elf_image(filename, &src, info, &ehdr, NULL); +} + +#ifdef VDSO_HEADER +#include VDSO_HEADER +#define vdso_image_info() &vdso_image_info +#else +#define vdso_image_info() NULL +#endif + +static void load_elf_vdso(struct image_info *info, const VdsoImageInfo *vdso) +{ + ImageSource src; + struct elfhdr ehdr; + abi_ulong load_bias, load_addr; + + src.fd = -1; + src.cache = vdso->image; + src.cache_size = vdso->image_size; + + load_elf_image("<internal-vdso>", &src, info, &ehdr, NULL); + load_addr = info->load_addr; + load_bias = info->load_bias; + + /* + * We need to relocate the VDSO image. The one built into the kernel + * is built for a fixed address. The one built for QEMU is not, since + * that requires close control of the guest address space. + * We pre-processed the image to locate all of the addresses that need + * to be updated. + */ + for (unsigned i = 0, n = vdso->reloc_count; i < n; i++) { + abi_ulong *addr = g2h_untagged(load_addr + vdso->relocs[i]); + *addr = tswapal(tswapal(*addr) + load_bias); } - load_elf_image(filename, fd, info, NULL, bprm_buf); + /* Install signal trampolines, if present. */ + if (vdso->sigreturn_ofs) { + default_sigreturn = load_addr + vdso->sigreturn_ofs; + } + if (vdso->rt_sigreturn_ofs) { + default_rt_sigreturn = load_addr + vdso->rt_sigreturn_ofs; + } + + /* Remove write from VDSO segment. */ + target_mprotect(info->start_data, info->end_data - info->start_data, + PROT_READ | PROT_EXEC); } static int symfind(const void *s0, const void *s1) @@ -3591,19 +3675,20 @@ static int symcmp(const void *s0, const void *s1) } /* Best attempt to load symbols from this ELF object. */ -static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias) +static void load_symbols(struct elfhdr *hdr, const ImageSource *src, + abi_ulong load_bias) { int i, shnum, nsyms, sym_idx = 0, str_idx = 0; - uint64_t segsz; - struct elf_shdr *shdr; + g_autofree struct elf_shdr *shdr = NULL; char *strings = NULL; - struct syminfo *s = NULL; - struct elf_sym *new_syms, *syms = NULL; + struct elf_sym *syms = NULL; + struct elf_sym *new_syms; + uint64_t segsz; shnum = hdr->e_shnum; - i = shnum * sizeof(struct elf_shdr); - shdr = (struct elf_shdr *)alloca(i); - if (pread(fd, shdr, i, hdr->e_shoff) != i) { + shdr = imgsrc_read_alloc(hdr->e_shoff, shnum * sizeof(struct elf_shdr), + src, NULL); + if (shdr == NULL) { return; } @@ -3621,31 +3706,33 @@ static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias) found: /* Now know where the strtab and symtab are. Snarf them. */ - s = g_try_new(struct syminfo, 1); - if (!s) { - goto give_up; - } segsz = shdr[str_idx].sh_size; - s->disas_strtab = strings = g_try_malloc(segsz); - if (!strings || - pread(fd, strings, segsz, shdr[str_idx].sh_offset) != segsz) { + strings = g_try_malloc(segsz); + if (!strings) { goto give_up; } - - segsz = shdr[sym_idx].sh_size; - syms = g_try_malloc(segsz); - if (!syms || pread(fd, syms, segsz, shdr[sym_idx].sh_offset) != segsz) { + if (!imgsrc_read(strings, shdr[str_idx].sh_offset, segsz, src, NULL)) { goto give_up; } + segsz = shdr[sym_idx].sh_size; if (segsz / sizeof(struct elf_sym) > INT_MAX) { - /* Implausibly large symbol table: give up rather than ploughing - * on with the number of symbols calculation overflowing + /* + * Implausibly large symbol table: give up rather than ploughing + * on with the number of symbols calculation overflowing. */ goto give_up; } nsyms = segsz / sizeof(struct elf_sym); + syms = g_try_malloc(segsz); + if (!syms) { + goto give_up; + } + if (!imgsrc_read(syms, shdr[sym_idx].sh_offset, segsz, src, NULL)) { + goto give_up; + } + for (i = 0; i < nsyms; ) { bswap_sym(syms + i); /* Throw away entries which we do not need. */ @@ -3670,10 +3757,12 @@ static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias) goto give_up; } - /* Attempt to free the storage associated with the local symbols - that we threw away. Whether or not this has any effect on the - memory allocation depends on the malloc implementation and how - many symbols we managed to discard. */ + /* + * Attempt to free the storage associated with the local symbols + * that we threw away. Whether or not this has any effect on the + * memory allocation depends on the malloc implementation and how + * many symbols we managed to discard. + */ new_syms = g_try_renew(struct elf_sym, syms, nsyms); if (new_syms == NULL) { goto give_up; @@ -3682,20 +3771,23 @@ static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias) qsort(syms, nsyms, sizeof(*syms), symcmp); - s->disas_num_syms = nsyms; + { + struct syminfo *s = g_new(struct syminfo, 1); + + s->disas_strtab = strings; + s->disas_num_syms = nsyms; #if ELF_CLASS == ELFCLASS32 - s->disas_symtab.elf32 = syms; + s->disas_symtab.elf32 = syms; #else - s->disas_symtab.elf64 = syms; + s->disas_symtab.elf64 = syms; #endif - s->lookup_symbol = lookup_symbolxx; - s->next = syminfos; - syminfos = s; - + s->lookup_symbol = lookup_symbolxx; + s->next = syminfos; + syminfos = s; + } return; -give_up: - g_free(s); + give_up: g_free(strings); g_free(syms); } @@ -3737,8 +3829,14 @@ uint32_t get_elf_eflags(int fd) int load_elf_binary(struct linux_binprm *bprm, struct image_info *info) { - struct image_info interp_info; - struct elfhdr elf_ex; + /* + * We need a copy of the elf header for passing to create_elf_tables. + * We will have overwritten the original when we re-use bprm->buf + * while loading the interpreter. Allocate the storage for this now + * and let elf_load_image do any swapping that may be required. + */ + struct elfhdr ehdr; + struct image_info interp_info, vdso_info; char *elf_interpreter = NULL; char *scratch; @@ -3747,13 +3845,7 @@ int load_elf_binary(struct linux_binprm *bprm, struct image_info *info) interp_info.fp_abi = MIPS_ABI_FP_UNKNOWN; #endif - load_elf_image(bprm->filename, bprm->fd, info, - &elf_interpreter, bprm->buf); - - /* ??? We need a copy of the elf header for passing to create_elf_tables. - If we do nothing, we'll have overwritten this when we re-use bprm->buf - when we load the interpreter. */ - elf_ex = *(struct elfhdr *)bprm->buf; + load_elf_image(bprm->filename, &bprm->src, info, &ehdr, &elf_interpreter); /* Do this so that we can load the interpreter, if need be. We will change some of these later */ @@ -3825,10 +3917,14 @@ int load_elf_binary(struct linux_binprm *bprm, struct image_info *info) } /* - * TODO: load a vdso, which would also contain the signal trampolines. - * Otherwise, allocate a private page to hold them. + * Load a vdso if available, which will amongst other things contain the + * signal trampolines. Otherwise, allocate a separate page for them. */ - if (TARGET_ARCH_HAS_SIGTRAMP_PAGE) { + const VdsoImageInfo *vdso = vdso_image_info(); + if (vdso) { + load_elf_vdso(&vdso_info, vdso); + info->vdso = vdso_info.load_bias; + } else if (TARGET_ARCH_HAS_SIGTRAMP_PAGE) { abi_long tramp_page = target_mmap(0, TARGET_PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); @@ -3840,8 +3936,9 @@ int load_elf_binary(struct linux_binprm *bprm, struct image_info *info) target_mprotect(tramp_page, TARGET_PAGE_SIZE, PROT_READ | PROT_EXEC); } - bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &elf_ex, - info, (elf_interpreter ? &interp_info : NULL)); + bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &ehdr, info, + elf_interpreter ? &interp_info : NULL, + vdso ? &vdso_info : NULL); info->start_stack = bprm->p; /* If we have an interpreter, set that as the program's entry point. diff --git a/linux-user/flatload.c b/linux-user/flatload.c index fdcc4610fa..5b62aa0a2b 100644 --- a/linux-user/flatload.c +++ b/linux-user/flatload.c @@ -463,7 +463,7 @@ static int load_flat_file(struct linux_binprm * bprm, DBG_FLT("BINFMT_FLAT: ROM mapping of file (we hope)\n"); textpos = target_mmap(0, text_len, PROT_READ|PROT_EXEC, - MAP_PRIVATE, bprm->fd, 0); + MAP_PRIVATE, bprm->src.fd, 0); if (textpos == -1) { fprintf(stderr, "Unable to mmap process text\n"); return -1; @@ -490,7 +490,7 @@ static int load_flat_file(struct linux_binprm * bprm, } else #endif { - result = target_pread(bprm->fd, datapos, + result = target_pread(bprm->src.fd, datapos, data_len + (relocs * sizeof(abi_ulong)), fpos); } @@ -540,10 +540,10 @@ static int load_flat_file(struct linux_binprm * bprm, else #endif { - result = target_pread(bprm->fd, textpos, + result = target_pread(bprm->src.fd, textpos, text_len, 0); if (result >= 0) { - result = target_pread(bprm->fd, datapos, + result = target_pread(bprm->src.fd, datapos, data_len + (relocs * sizeof(abi_ulong)), ntohl(hdr->data_start)); } diff --git a/linux-user/gen-vdso-elfn.c.inc b/linux-user/gen-vdso-elfn.c.inc new file mode 100644 index 0000000000..95856eb839 --- /dev/null +++ b/linux-user/gen-vdso-elfn.c.inc @@ -0,0 +1,314 @@ +/* + * Post-process a vdso elf image for inclusion into qemu. + * Elf size specialization. + * + * Copyright 2023 Linaro, Ltd. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +static void elfN(bswap_ehdr)(ElfN(Ehdr) *ehdr) +{ + bswaps(&ehdr->e_type); /* Object file type */ + bswaps(&ehdr->e_machine); /* Architecture */ + bswaps(&ehdr->e_version); /* Object file version */ + bswaps(&ehdr->e_entry); /* Entry point virtual address */ + bswaps(&ehdr->e_phoff); /* Program header table file offset */ + bswaps(&ehdr->e_shoff); /* Section header table file offset */ + bswaps(&ehdr->e_flags); /* Processor-specific flags */ + bswaps(&ehdr->e_ehsize); /* ELF header size in bytes */ + bswaps(&ehdr->e_phentsize); /* Program header table entry size */ + bswaps(&ehdr->e_phnum); /* Program header table entry count */ + bswaps(&ehdr->e_shentsize); /* Section header table entry size */ + bswaps(&ehdr->e_shnum); /* Section header table entry count */ + bswaps(&ehdr->e_shstrndx); /* Section header string table index */ +} + +static void elfN(bswap_phdr)(ElfN(Phdr) *phdr) +{ + bswaps(&phdr->p_type); /* Segment type */ + bswaps(&phdr->p_flags); /* Segment flags */ + bswaps(&phdr->p_offset); /* Segment file offset */ + bswaps(&phdr->p_vaddr); /* Segment virtual address */ + bswaps(&phdr->p_paddr); /* Segment physical address */ + bswaps(&phdr->p_filesz); /* Segment size in file */ + bswaps(&phdr->p_memsz); /* Segment size in memory */ + bswaps(&phdr->p_align); /* Segment alignment */ +} + +static void elfN(bswap_shdr)(ElfN(Shdr) *shdr) +{ + bswaps(&shdr->sh_name); + bswaps(&shdr->sh_type); + bswaps(&shdr->sh_flags); + bswaps(&shdr->sh_addr); + bswaps(&shdr->sh_offset); + bswaps(&shdr->sh_size); + bswaps(&shdr->sh_link); + bswaps(&shdr->sh_info); + bswaps(&shdr->sh_addralign); + bswaps(&shdr->sh_entsize); +} + +static void elfN(bswap_sym)(ElfN(Sym) *sym) +{ + bswaps(&sym->st_name); + bswaps(&sym->st_value); + bswaps(&sym->st_size); + bswaps(&sym->st_shndx); +} + +static void elfN(bswap_dyn)(ElfN(Dyn) *dyn) +{ + bswaps(&dyn->d_tag); /* Dynamic type tag */ + bswaps(&dyn->d_un.d_ptr); /* Dynamic ptr or val, in union */ +} + +static void elfN(search_symtab)(ElfN(Shdr) *shdr, unsigned sym_idx, + void *buf, bool need_bswap) +{ + unsigned str_idx = shdr[sym_idx].sh_link; + ElfN(Sym) *sym = buf + shdr[sym_idx].sh_offset; + unsigned sym_n = shdr[sym_idx].sh_size / sizeof(*sym); + const char *str = buf + shdr[str_idx].sh_offset; + + for (unsigned i = 0; i < sym_n; ++i) { + const char *name; + + if (need_bswap) { + elfN(bswap_sym)(sym + i); + } + name = str + sym[i].st_name; + + if (sigreturn_sym && strcmp(sigreturn_sym, name) == 0) { + sigreturn_addr = sym[i].st_value; + } + if (rt_sigreturn_sym && strcmp(rt_sigreturn_sym, name) == 0) { + rt_sigreturn_addr = sym[i].st_value; + } + } +} + +static void elfN(process)(FILE *outf, void *buf, bool need_bswap) +{ + ElfN(Ehdr) *ehdr = buf; + ElfN(Phdr) *phdr; + ElfN(Shdr) *shdr; + unsigned phnum, shnum; + unsigned dynamic_ofs = 0; + unsigned dynamic_addr = 0; + unsigned symtab_idx = 0; + unsigned dynsym_idx = 0; + unsigned first_segsz = 0; + int errors = 0; + + if (need_bswap) { + elfN(bswap_ehdr)(ehdr); + } + + phnum = ehdr->e_phnum; + phdr = buf + ehdr->e_phoff; + if (need_bswap) { + for (unsigned i = 0; i < phnum; ++i) { + elfN(bswap_phdr)(phdr + i); + } + } + + shnum = ehdr->e_shnum; + shdr = buf + ehdr->e_shoff; + if (need_bswap) { + for (unsigned i = 0; i < shnum; ++i) { + elfN(bswap_shdr)(shdr + i); + } + } + for (unsigned i = 0; i < shnum; ++i) { + switch (shdr[i].sh_type) { + case SHT_SYMTAB: + symtab_idx = i; + break; + case SHT_DYNSYM: + dynsym_idx = i; + break; + } + } + + /* + * Validate the VDSO is created as we expect: that PT_PHDR, + * PT_DYNAMIC, and PT_NOTE located in a writable data segment. + * PHDR and DYNAMIC require relocation, and NOTE will get the + * linux version number. + */ + for (unsigned i = 0; i < phnum; ++i) { + if (phdr[i].p_type != PT_LOAD) { + continue; + } + if (first_segsz != 0) { + fprintf(stderr, "Multiple LOAD segments\n"); + errors++; + } + if (phdr[i].p_offset != 0) { + fprintf(stderr, "LOAD segment does not cover EHDR\n"); + errors++; + } + if (phdr[i].p_vaddr != 0) { + fprintf(stderr, "LOAD segment not loaded at address 0\n"); + errors++; + } + first_segsz = phdr[i].p_filesz; + if (first_segsz < ehdr->e_phoff + phnum * sizeof(*phdr)) { + fprintf(stderr, "LOAD segment does not cover PHDRs\n"); + errors++; + } + if ((phdr[i].p_flags & (PF_R | PF_W)) != (PF_R | PF_W)) { + fprintf(stderr, "LOAD segment is not read-write\n"); + errors++; + } + } + for (unsigned i = 0; i < phnum; ++i) { + const char *which; + + switch (phdr[i].p_type) { + case PT_PHDR: + which = "PT_PHDR"; + break; + case PT_NOTE: + which = "PT_NOTE"; + break; + case PT_DYNAMIC: + dynamic_ofs = phdr[i].p_offset; + dynamic_addr = phdr[i].p_vaddr; + which = "PT_DYNAMIC"; + break; + default: + continue; + } + if (first_segsz < phdr[i].p_vaddr + phdr[i].p_filesz) { + fprintf(stderr, "LOAD segment does not cover %s\n", which); + errors++; + } + } + if (errors) { + exit(EXIT_FAILURE); + } + + /* Relocate the program headers. */ + for (unsigned i = 0; i < phnum; ++i) { + output_reloc(outf, buf, &phdr[i].p_vaddr); + output_reloc(outf, buf, &phdr[i].p_paddr); + } + + /* Relocate the DYNAMIC entries. */ + if (dynamic_addr) { + ElfN(Dyn) *dyn = buf + dynamic_ofs; + __typeof(dyn->d_tag) tag; + + do { + + if (need_bswap) { + elfN(bswap_dyn)(dyn); + } + tag = dyn->d_tag; + + switch (tag) { + case DT_HASH: + case DT_SYMTAB: + case DT_STRTAB: + case DT_VERDEF: + case DT_VERSYM: + case DT_PLTGOT: + case DT_ADDRRNGLO ... DT_ADDRRNGHI: + /* These entries store an address in the entry. */ + output_reloc(outf, buf, &dyn->d_un.d_val); + break; + + case DT_NULL: + case DT_STRSZ: + case DT_SONAME: + case DT_DEBUG: + case DT_FLAGS: + case DT_FLAGS_1: + case DT_SYMBOLIC: + case DT_BIND_NOW: + case DT_VERDEFNUM: + case DT_VALRNGLO ... DT_VALRNGHI: + /* These entries store an integer in the entry. */ + break; + + case DT_SYMENT: + if (dyn->d_un.d_val != sizeof(ElfN(Sym))) { + fprintf(stderr, "VDSO has incorrect dynamic symbol size\n"); + errors++; + } + break; + + case DT_REL: + case DT_RELSZ: + case DT_RELA: + case DT_RELASZ: + /* + * These entries indicate that the VDSO was built incorrectly. + * It should not have any real relocations. + * ??? The RISC-V toolchain will emit these even when there + * are no relocations. Validate zeros. + */ + if (dyn->d_un.d_val != 0) { + fprintf(stderr, "VDSO has dynamic relocations\n"); + errors++; + } + break; + case DT_RELENT: + case DT_RELAENT: + case DT_TEXTREL: + /* These entries store an integer in the entry. */ + /* Should not be required; see above. */ + break; + + case DT_NEEDED: + case DT_VERNEED: + case DT_PLTREL: + case DT_JMPREL: + case DT_RPATH: + case DT_RUNPATH: + fprintf(stderr, "VDSO has external dependencies\n"); + errors++; + break; + + case PT_LOPROC + 3: + if (ehdr->e_machine == EM_PPC64) { + break; /* DT_PPC64_OPT: integer bitmask */ + } + goto do_default; + + default: + do_default: + /* This is probably something target specific. */ + fprintf(stderr, "VDSO has unknown DYNAMIC entry (%lx)\n", + (unsigned long)tag); + errors++; + break; + } + dyn++; + } while (tag != DT_NULL); + if (errors) { + exit(EXIT_FAILURE); + } + } + + /* Relocate the dynamic symbol table. */ + if (dynsym_idx) { + ElfN(Sym) *sym = buf + shdr[dynsym_idx].sh_offset; + unsigned sym_n = shdr[dynsym_idx].sh_size / sizeof(*sym); + + for (unsigned i = 0; i < sym_n; ++i) { + output_reloc(outf, buf, &sym[i].st_value); + } + } + + /* Search both dynsym and symtab for the signal return symbols. */ + if (dynsym_idx) { + elfN(search_symtab)(shdr, dynsym_idx, buf, need_bswap); + } + if (symtab_idx) { + elfN(search_symtab)(shdr, symtab_idx, buf, need_bswap); + } +} diff --git a/linux-user/gen-vdso.c b/linux-user/gen-vdso.c new file mode 100644 index 0000000000..31e333be80 --- /dev/null +++ b/linux-user/gen-vdso.c @@ -0,0 +1,223 @@ +/* + * Post-process a vdso elf image for inclusion into qemu. + * + * Copyright 2023 Linaro, Ltd. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include <stdlib.h> +#include <stdbool.h> +#include <stdint.h> +#include <stdio.h> +#include <string.h> +#include <errno.h> +#include <endian.h> +#include <unistd.h> +#include "elf.h" + + +#define bswap_(p) _Generic(*(p), \ + uint16_t: __builtin_bswap16, \ + uint32_t: __builtin_bswap32, \ + uint64_t: __builtin_bswap64, \ + int16_t: __builtin_bswap16, \ + int32_t: __builtin_bswap32, \ + int64_t: __builtin_bswap64) +#define bswaps(p) (*(p) = bswap_(p)(*(p))) + +static void output_reloc(FILE *outf, void *buf, void *loc) +{ + fprintf(outf, " 0x%08tx,\n", loc - buf); +} + +static const char *sigreturn_sym; +static const char *rt_sigreturn_sym; + +static unsigned sigreturn_addr; +static unsigned rt_sigreturn_addr; + +#define N 32 +#define elfN(x) elf32_##x +#define ElfN(x) Elf32_##x +#include "gen-vdso-elfn.c.inc" +#undef N +#undef elfN +#undef ElfN + +#define N 64 +#define elfN(x) elf64_##x +#define ElfN(x) Elf64_##x +#include "gen-vdso-elfn.c.inc" +#undef N +#undef elfN +#undef ElfN + + +int main(int argc, char **argv) +{ + FILE *inf, *outf; + long total_len; + const char *prefix = "vdso"; + const char *inf_name; + const char *outf_name = NULL; + unsigned char *buf; + bool need_bswap; + + while (1) { + int opt = getopt(argc, argv, "o:p:r:s:"); + if (opt < 0) { + break; + } + switch (opt) { + case 'o': + outf_name = optarg; + break; + case 'p': + prefix = optarg; + break; + case 'r': + rt_sigreturn_sym = optarg; + break; + case 's': + sigreturn_sym = optarg; + break; + default: + usage: + fprintf(stderr, "usage: [-p prefix] [-r rt-sigreturn-name] " + "[-s sigreturn-name] -o output-file input-file\n"); + return EXIT_FAILURE; + } + } + + if (optind >= argc || outf_name == NULL) { + goto usage; + } + inf_name = argv[optind]; + + /* + * Open the input and output files. + */ + inf = fopen(inf_name, "rb"); + if (inf == NULL) { + goto perror_inf; + } + outf = fopen(outf_name, "w"); + if (outf == NULL) { + goto perror_outf; + } + + /* + * Read the input file into a buffer. + * We expect the vdso to be small, on the order of one page, + * therefore we do not expect a partial read. + */ + fseek(inf, 0, SEEK_END); + total_len = ftell(inf); + fseek(inf, 0, SEEK_SET); + + buf = malloc(total_len); + if (buf == NULL) { + goto perror_inf; + } + + errno = 0; + if (fread(buf, 1, total_len, inf) != total_len) { + if (errno) { + goto perror_inf; + } + fprintf(stderr, "%s: incomplete read\n", inf_name); + return EXIT_FAILURE; + } + fclose(inf); + + /* + * Write out the vdso image now, before we make local changes. + */ + + fprintf(outf, + "/* Automatically generated from linux-user/gen-vdso.c. */\n" + "\n" + "static const uint8_t %s_image[] = {", + prefix); + for (long i = 0; i < total_len; ++i) { + if (i % 12 == 0) { + fputs("\n ", outf); + } + fprintf(outf, " 0x%02x,", buf[i]); + } + fprintf(outf, "\n};\n\n"); + + /* + * Identify which elf flavor we're processing. + * The first 16 bytes of the file are e_ident. + */ + + if (buf[EI_MAG0] != ELFMAG0 || buf[EI_MAG1] != ELFMAG1 || + buf[EI_MAG2] != ELFMAG2 || buf[EI_MAG3] != ELFMAG3) { + fprintf(stderr, "%s: not an elf file\n", inf_name); + return EXIT_FAILURE; + } + switch (buf[EI_DATA]) { + case ELFDATA2LSB: + need_bswap = BYTE_ORDER != LITTLE_ENDIAN; + break; + case ELFDATA2MSB: + need_bswap = BYTE_ORDER != BIG_ENDIAN; + break; + default: + fprintf(stderr, "%s: invalid elf EI_DATA (%u)\n", + inf_name, buf[EI_DATA]); + return EXIT_FAILURE; + } + + /* + * We need to relocate the VDSO image. The one built into the kernel + * is built for a fixed address. The one we built for QEMU is not, + * since that requires close control of the guest address space. + * + * Output relocation addresses as we go. + */ + + fprintf(outf, "static const unsigned %s_relocs[] = {\n", prefix); + + switch (buf[EI_CLASS]) { + case ELFCLASS32: + elf32_process(outf, buf, need_bswap); + break; + case ELFCLASS64: + elf64_process(outf, buf, need_bswap); + break; + default: + fprintf(stderr, "%s: invalid elf EI_CLASS (%u)\n", + inf_name, buf[EI_CLASS]); + return EXIT_FAILURE; + } + + fprintf(outf, "};\n\n"); /* end vdso_relocs. */ + + fprintf(outf, "static const VdsoImageInfo %s_image_info = {\n", prefix); + fprintf(outf, " .image = %s_image,\n", prefix); + fprintf(outf, " .relocs = %s_relocs,\n", prefix); + fprintf(outf, " .image_size = sizeof(%s_image),\n", prefix); + fprintf(outf, " .reloc_count = ARRAY_SIZE(%s_relocs),\n", prefix); + fprintf(outf, " .sigreturn_ofs = 0x%x,\n", sigreturn_addr); + fprintf(outf, " .rt_sigreturn_ofs = 0x%x,\n", rt_sigreturn_addr); + fprintf(outf, "};\n"); + + /* + * Everything should have gone well. + */ + if (fclose(outf)) { + goto perror_outf; + } + return EXIT_SUCCESS; + + perror_inf: + perror(inf_name); + return EXIT_FAILURE; + + perror_outf: + perror(outf_name); + return EXIT_FAILURE; +} diff --git a/linux-user/hppa/Makefile.vdso b/linux-user/hppa/Makefile.vdso new file mode 100644 index 0000000000..f4537ae716 --- /dev/null +++ b/linux-user/hppa/Makefile.vdso @@ -0,0 +1,11 @@ +include $(BUILD_DIR)/tests/tcg/hppa-linux-user/config-target.mak + +SUBDIR = $(SRC_PATH)/linux-user/hppa +VPATH += $(SUBDIR) + +all: $(SUBDIR)/vdso.so + +$(SUBDIR)/vdso.so: vdso.S vdso.ld vdso-asmoffset.h + $(CC) -o $@ -nostdlib -shared -Wl,-h,linux-vdso32.so.1 \ + -Wl,--build-id=sha1 -Wl,--hash-style=both \ + -Wl,-T,$(SUBDIR)/vdso.ld $< diff --git a/linux-user/hppa/meson.build b/linux-user/hppa/meson.build index 4709508a09..aa2d9a87a6 100644 --- a/linux-user/hppa/meson.build +++ b/linux-user/hppa/meson.build @@ -3,3 +3,8 @@ syscall_nr_generators += { arguments: [ meson.current_source_dir() / 'syscallhdr.sh', '@INPUT@', '@OUTPUT@', '@EXTRA_ARGS@' ], output: '@BASENAME@_nr.h') } + +vdso_inc = gen_vdso.process('vdso.so', + extra_args: [ '-r', '__kernel_sigtramp_rt' ]) + +linux_user_ss.add(when: 'TARGET_HPPA', if_true: vdso_inc) diff --git a/linux-user/hppa/signal.c b/linux-user/hppa/signal.c index ec5f5412d1..17920e9ceb 100644 --- a/linux-user/hppa/signal.c +++ b/linux-user/hppa/signal.c @@ -21,6 +21,7 @@ #include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" +#include "vdso-asmoffset.h" struct target_sigcontext { abi_ulong sc_flags; @@ -47,6 +48,19 @@ struct target_rt_sigframe { /* hidden location of upper halves of pa2.0 64-bit gregs */ }; +QEMU_BUILD_BUG_ON(sizeof(struct target_rt_sigframe) != sizeof_rt_sigframe); +QEMU_BUILD_BUG_ON(offsetof(struct target_rt_sigframe, uc.tuc_mcontext) + != offsetof_sigcontext); +QEMU_BUILD_BUG_ON(offsetof(struct target_sigcontext, sc_gr) + != offsetof_sigcontext_gr); +QEMU_BUILD_BUG_ON(offsetof(struct target_sigcontext, sc_fr) + != offsetof_sigcontext_fr); +QEMU_BUILD_BUG_ON(offsetof(struct target_sigcontext, sc_iaoq) + != offsetof_sigcontext_iaoq); +QEMU_BUILD_BUG_ON(offsetof(struct target_sigcontext, sc_sar) + != offsetof_sigcontext_sar); + + static void setup_sigcontext(struct target_sigcontext *sc, CPUArchState *env) { int i; @@ -91,16 +105,6 @@ static void restore_sigcontext(CPUArchState *env, struct target_sigcontext *sc) __get_user(env->cr[CR_SAR], &sc->sc_sar); } -#if TARGET_ABI_BITS == 32 -#define SIGFRAME 64 -#define FUNCTIONCALLFRAME 48 -#else -#define SIGFRAME 128 -#define FUNCTIONCALLFRAME 96 -#endif -#define PARISC_RT_SIGFRAME_SIZE32 \ - ((sizeof(struct target_rt_sigframe) + FUNCTIONCALLFRAME + SIGFRAME) & -SIGFRAME) - void setup_rt_frame(int sig, struct target_sigaction *ka, target_siginfo_t *info, target_sigset_t *set, CPUArchState *env) diff --git a/linux-user/hppa/vdso-asmoffset.h b/linux-user/hppa/vdso-asmoffset.h new file mode 100644 index 0000000000..c8b40c0332 --- /dev/null +++ b/linux-user/hppa/vdso-asmoffset.h @@ -0,0 +1,12 @@ +#define sizeof_rt_sigframe 584 +#define offsetof_sigcontext 160 +#define offsetof_sigcontext_gr 0x4 +#define offsetof_sigcontext_fr 0x88 +#define offsetof_sigcontext_iaoq 0x190 +#define offsetof_sigcontext_sar 0x198 + +/* arch/parisc/include/asm/rt_sigframe.h */ +#define SIGFRAME 64 +#define FUNCTIONCALLFRAME 48 +#define PARISC_RT_SIGFRAME_SIZE32 \ + (((sizeof_rt_sigframe) + FUNCTIONCALLFRAME + SIGFRAME) & -SIGFRAME) diff --git a/linux-user/hppa/vdso.S b/linux-user/hppa/vdso.S new file mode 100644 index 0000000000..5be14d2f70 --- /dev/null +++ b/linux-user/hppa/vdso.S @@ -0,0 +1,165 @@ +/* + * hppa linux kernel vdso replacement. + * + * Copyright 2023 Linaro, Ltd. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include <asm/unistd.h> +#include "vdso-asmoffset.h" + + .text + + +/* + * arch/parisc/kernel/vdso32/sigtramp.S: + * Gdb expects the trampoline is on the stack and the pc is offset from + * a 64-byte boundary by 0, 4 or 5 instructions. Since the vdso trampoline + * is not on the stack, we need a new variant with different offsets and + * data to tell gdb where to find the signal context on the stack. + * + * Here we put the offset to the context data at the start of the trampoline + * region and offset the first trampoline by 2 instructions. Please do + * not change the trampoline as the code in gdb depends on the following + * instruction sequence exactly. + */ + +/* arch/parisc/kernel/asm-offsets.c */ +#define SIGFRAME_CONTEXT_REGS32 \ + (offsetof_sigcontext - PARISC_RT_SIGFRAME_SIZE32) + + .align 64 + .word SIGFRAME_CONTEXT_REGS32 + +/* + * All that said, we can provide a proper unwind record, which means that + * GDB should not actually need the offset magic. + * + * The return address that arrived here, from the inner frame, is + * not marked as a signal frame and so the unwinder still tries to + * subtract 1 to examine the presumed call insn. Thus we must + * extend the unwind info to a nop before the start. + */ + + .cfi_startproc simple + .cfi_signal_frame + + /* Compare pa32_fallback_frame_state from libgcc. */ + + /* + * Place the CFA at the start of sigcontext for convenience. + * The previous CFA will be restored from the saved stack pointer. + */ + .cfi_def_cfa 30, -PARISC_RT_SIGFRAME_SIZE32 + offsetof_sigcontext + + /* Record save offset of general registers. */ + .cfi_offset 1, offsetof_sigcontext_gr + 1 * 4 + .cfi_offset 2, offsetof_sigcontext_gr + 2 * 4 + .cfi_offset 3, offsetof_sigcontext_gr + 3 * 4 + .cfi_offset 4, offsetof_sigcontext_gr + 4 * 4 + .cfi_offset 5, offsetof_sigcontext_gr + 5 * 4 + .cfi_offset 6, offsetof_sigcontext_gr + 6 * 4 + .cfi_offset 7, offsetof_sigcontext_gr + 7 * 4 + .cfi_offset 8, offsetof_sigcontext_gr + 8 * 4 + .cfi_offset 9, offsetof_sigcontext_gr + 9 * 4 + .cfi_offset 10, offsetof_sigcontext_gr + 10 * 4 + .cfi_offset 11, offsetof_sigcontext_gr + 11 * 4 + .cfi_offset 12, offsetof_sigcontext_gr + 12 * 4 + .cfi_offset 13, offsetof_sigcontext_gr + 13 * 4 + .cfi_offset 14, offsetof_sigcontext_gr + 14 * 4 + .cfi_offset 15, offsetof_sigcontext_gr + 15 * 4 + .cfi_offset 16, offsetof_sigcontext_gr + 16 * 4 + .cfi_offset 17, offsetof_sigcontext_gr + 17 * 4 + .cfi_offset 18, offsetof_sigcontext_gr + 18 * 4 + .cfi_offset 19, offsetof_sigcontext_gr + 19 * 4 + .cfi_offset 20, offsetof_sigcontext_gr + 20 * 4 + .cfi_offset 21, offsetof_sigcontext_gr + 21 * 4 + .cfi_offset 22, offsetof_sigcontext_gr + 22 * 4 + .cfi_offset 23, offsetof_sigcontext_gr + 23 * 4 + .cfi_offset 24, offsetof_sigcontext_gr + 24 * 4 + .cfi_offset 25, offsetof_sigcontext_gr + 25 * 4 + .cfi_offset 26, offsetof_sigcontext_gr + 26 * 4 + .cfi_offset 27, offsetof_sigcontext_gr + 27 * 4 + .cfi_offset 28, offsetof_sigcontext_gr + 28 * 4 + .cfi_offset 29, offsetof_sigcontext_gr + 29 * 4 + .cfi_offset 30, offsetof_sigcontext_gr + 30 * 4 + .cfi_offset 31, offsetof_sigcontext_gr + 31 * 4 + + /* Record save offset of fp registers, left and right halves. */ + .cfi_offset 32, offsetof_sigcontext_fr + 4 * 8 + .cfi_offset 33, offsetof_sigcontext_fr + 4 * 8 + 4 + .cfi_offset 34, offsetof_sigcontext_fr + 5 * 8 + .cfi_offset 35, offsetof_sigcontext_fr + 5 * 8 + 4 + .cfi_offset 36, offsetof_sigcontext_fr + 6 * 8 + .cfi_offset 37, offsetof_sigcontext_fr + 6 * 8 + 4 + .cfi_offset 38, offsetof_sigcontext_fr + 7 * 8 + .cfi_offset 39, offsetof_sigcontext_fr + 7 * 8 + 4 + .cfi_offset 40, offsetof_sigcontext_fr + 8 * 8 + .cfi_offset 41, offsetof_sigcontext_fr + 8 * 8 + 4 + .cfi_offset 42, offsetof_sigcontext_fr + 9 * 8 + .cfi_offset 43, offsetof_sigcontext_fr + 9 * 8 + 4 + .cfi_offset 44, offsetof_sigcontext_fr + 10 * 8 + .cfi_offset 45, offsetof_sigcontext_fr + 10 * 8 + 4 + .cfi_offset 46, offsetof_sigcontext_fr + 11 * 8 + .cfi_offset 47, offsetof_sigcontext_fr + 11 * 8 + 4 + .cfi_offset 48, offsetof_sigcontext_fr + 12 * 8 + .cfi_offset 49, offsetof_sigcontext_fr + 12 * 8 + 4 + .cfi_offset 50, offsetof_sigcontext_fr + 13 * 8 + .cfi_offset 51, offsetof_sigcontext_fr + 13 * 8 + 4 + .cfi_offset 52, offsetof_sigcontext_fr + 14 * 8 + .cfi_offset 53, offsetof_sigcontext_fr + 14 * 8 + 4 + .cfi_offset 54, offsetof_sigcontext_fr + 15 * 8 + .cfi_offset 55, offsetof_sigcontext_fr + 15 * 8 + 4 + .cfi_offset 56, offsetof_sigcontext_fr + 16 * 8 + .cfi_offset 57, offsetof_sigcontext_fr + 16 * 8 + 4 + .cfi_offset 58, offsetof_sigcontext_fr + 17 * 8 + .cfi_offset 59, offsetof_sigcontext_fr + 17 * 8 + 4 + .cfi_offset 60, offsetof_sigcontext_fr + 18 * 8 + .cfi_offset 61, offsetof_sigcontext_fr + 18 * 8 + 4 + .cfi_offset 62, offsetof_sigcontext_fr + 19 * 8 + .cfi_offset 63, offsetof_sigcontext_fr + 19 * 8 + 4 + .cfi_offset 64, offsetof_sigcontext_fr + 20 * 8 + .cfi_offset 65, offsetof_sigcontext_fr + 20 * 8 + 4 + .cfi_offset 66, offsetof_sigcontext_fr + 21 * 8 + .cfi_offset 67, offsetof_sigcontext_fr + 21 * 8 + 4 + .cfi_offset 68, offsetof_sigcontext_fr + 22 * 8 + .cfi_offset 69, offsetof_sigcontext_fr + 22 * 8 + 4 + .cfi_offset 70, offsetof_sigcontext_fr + 23 * 8 + .cfi_offset 71, offsetof_sigcontext_fr + 23 * 8 + 4 + .cfi_offset 72, offsetof_sigcontext_fr + 24 * 8 + .cfi_offset 73, offsetof_sigcontext_fr + 24 * 8 + 4 + .cfi_offset 74, offsetof_sigcontext_fr + 25 * 8 + .cfi_offset 75, offsetof_sigcontext_fr + 25 * 8 + 4 + .cfi_offset 76, offsetof_sigcontext_fr + 26 * 8 + .cfi_offset 77, offsetof_sigcontext_fr + 26 * 8 + 4 + .cfi_offset 78, offsetof_sigcontext_fr + 27 * 8 + .cfi_offset 79, offsetof_sigcontext_fr + 27 * 8 + 4 + .cfi_offset 80, offsetof_sigcontext_fr + 28 * 8 + .cfi_offset 81, offsetof_sigcontext_fr + 28 * 8 + 4 + .cfi_offset 82, offsetof_sigcontext_fr + 29 * 8 + .cfi_offset 83, offsetof_sigcontext_fr + 29 * 8 + 4 + .cfi_offset 84, offsetof_sigcontext_fr + 30 * 8 + .cfi_offset 85, offsetof_sigcontext_fr + 30 * 8 + 4 + .cfi_offset 86, offsetof_sigcontext_fr + 31 * 8 + .cfi_offset 87, offsetof_sigcontext_fr + 31 * 8 + 4 + + /* Record save offset of %sar */ + .cfi_offset 88, offsetof_sigcontext_sar + + /* Record save offset of return address, iaoq[0]. */ + .cfi_return_column 89 + .cfi_offset 89, offsetof_sigcontext_iaoq + + nop + +__kernel_sigtramp_rt: + ldi 0, %r25 + ldi __NR_rt_sigreturn, %r20 + be,l 0x100(%sr2, %r0), %sr0, %r31 + nop + + .cfi_endproc + .size __kernel_sigtramp_rt, . - __kernel_sigtramp_rt + .type __kernel_sigtramp_rt, @function + .globl __kernel_sigtramp_rt diff --git a/linux-user/hppa/vdso.ld b/linux-user/hppa/vdso.ld new file mode 100644 index 0000000000..b17ad974f3 --- /dev/null +++ b/linux-user/hppa/vdso.ld @@ -0,0 +1,77 @@ +/* + * Linker script for linux hppa vdso. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +VERSION { + /* + * The kernel's vdso32.lds.S attempts to export + * __kernel_sigtramp_rt32 + * __kernel_restart_syscall32 + * except that those symbols don't exist. The actual symbols are + * __kernel_sigtramp_rt + * __kernel_restart_syscall + * which means that nothing is exported at all. + * QEMU handles syscall restart internally, so we don't + * need to implement __kernel_restart_syscall at all. + */ + LINUX_5.18 { + local: *; + }; +} + + +PHDRS { + phdr PT_PHDR FLAGS(4) PHDRS; + load PT_LOAD FLAGS(7) FILEHDR PHDRS; + dynamic PT_DYNAMIC FLAGS(4); + note PT_NOTE FLAGS(4); + eh_frame_hdr PT_GNU_EH_FRAME; +} + +SECTIONS { + . = SIZEOF_HEADERS; + + /* The following, including the FILEHDRS and PHDRS, are modified + when we relocate the binary. We want them to be initially + writable for the relocation; we'll force them read-only after. */ + .note : { *(.note*) } :load :note + .dynamic : { *(.dynamic) } :load :dynamic + .dynsym : { *(.dynsym) } :load + .data : { + /* There ought not be any real read-write data. + But since we manipulated the segment layout, + we have to put these sections somewhere. */ + *(.data*) + *(.sdata*) + *(.got.plt) *(.got) + *(.gnu.linkonce.d.*) + *(.bss*) + *(.dynbss*) + *(.gnu.linkonce.b.*) + } + + .rodata : { *(.rodata) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .eh_frame_hdr : { *(.eh_frame_hdr) } :load :eh_frame_hdr + .eh_frame : { *(.eh_frame) } :load + + .text : { *(.text*) } :load +} diff --git a/linux-user/hppa/vdso.so b/linux-user/hppa/vdso.so new file mode 100755 index 0000000000..e1ddd70c37 --- /dev/null +++ b/linux-user/hppa/vdso.so Binary files differdiff --git a/linux-user/i386/Makefile.vdso b/linux-user/i386/Makefile.vdso new file mode 100644 index 0000000000..95bc616f6d --- /dev/null +++ b/linux-user/i386/Makefile.vdso @@ -0,0 +1,11 @@ +include $(BUILD_DIR)/tests/tcg/i386-linux-user/config-target.mak + +SUBDIR = $(SRC_PATH)/linux-user/i386 +VPATH += $(SUBDIR) + +all: $(SUBDIR)/vdso.so + +$(SUBDIR)/vdso.so: vdso.S vdso.ld vdso-asmoffset.h + $(CC) -o $@ -m32 -nostdlib -shared -Wl,-h,linux-gate.so.1 \ + -Wl,--build-id=sha1 -Wl,--hash-style=both \ + -Wl,-T,$(SUBDIR)/vdso.ld $< diff --git a/linux-user/i386/meson.build b/linux-user/i386/meson.build index ee523019a5..d42fc6cbc9 100644 --- a/linux-user/i386/meson.build +++ b/linux-user/i386/meson.build @@ -3,3 +3,10 @@ syscall_nr_generators += { arguments: [ meson.current_source_dir() / 'syscallhdr.sh', '@INPUT@', '@OUTPUT@', '@EXTRA_ARGS@' ], output: '@BASENAME@_nr.h') } + +vdso_inc = gen_vdso.process('vdso.so', extra_args: [ + '-s', '__kernel_sigreturn', + '-r', '__kernel_rt_sigreturn' + ]) + +linux_user_ss.add(when: 'TARGET_I386', if_true: vdso_inc) diff --git a/linux-user/i386/signal.c b/linux-user/i386/signal.c index 60fa07d6f9..bc5d45302e 100644 --- a/linux-user/i386/signal.c +++ b/linux-user/i386/signal.c @@ -214,6 +214,17 @@ struct rt_sigframe { }; #define TARGET_RT_SIGFRAME_FXSAVE_OFFSET ( \ offsetof(struct rt_sigframe, fpstate) + TARGET_FPSTATE_FXSAVE_OFFSET) + +/* + * Verify that vdso-asmoffset.h constants match. + */ +#include "i386/vdso-asmoffset.h" + +QEMU_BUILD_BUG_ON(offsetof(struct sigframe, sc.eip) + != SIGFRAME_SIGCONTEXT_eip); +QEMU_BUILD_BUG_ON(offsetof(struct rt_sigframe, uc.tuc_mcontext.eip) + != RT_SIGFRAME_SIGCONTEXT_eip); + #else struct rt_sigframe { diff --git a/linux-user/i386/vdso-asmoffset.h b/linux-user/i386/vdso-asmoffset.h new file mode 100644 index 0000000000..4e5ee0dd49 --- /dev/null +++ b/linux-user/i386/vdso-asmoffset.h @@ -0,0 +1,6 @@ +/* + * offsetof(struct sigframe, sc.eip) + * offsetof(struct rt_sigframe, uc.tuc_mcontext.eip) + */ +#define SIGFRAME_SIGCONTEXT_eip 64 +#define RT_SIGFRAME_SIGCONTEXT_eip 220 diff --git a/linux-user/i386/vdso.S b/linux-user/i386/vdso.S new file mode 100644 index 0000000000..e7a1f333a1 --- /dev/null +++ b/linux-user/i386/vdso.S @@ -0,0 +1,143 @@ +/* + * i386 linux replacement vdso. + * + * Copyright 2023 Linaro, Ltd. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include <asm/unistd.h> +#include "vdso-asmoffset.h" + +.macro endf name + .globl \name + .type \name, @function + .size \name, . - \name +.endm + +.macro vdso_syscall1 name, nr +\name: + .cfi_startproc + mov %ebx, %edx + .cfi_register %ebx, %edx + mov 4(%esp), %ebx + mov $\nr, %eax + int $0x80 + mov %edx, %ebx + ret + .cfi_endproc +endf \name +.endm + +.macro vdso_syscall2 name, nr +\name: + .cfi_startproc + mov %ebx, %edx + .cfi_register %ebx, %edx + mov 4(%esp), %ebx + mov 8(%esp), %ecx + mov $\nr, %eax + int $0x80 + mov %edx, %ebx + ret + .cfi_endproc +endf \name +.endm + +.macro vdso_syscall3 name, nr +\name: + .cfi_startproc + push %ebx + .cfi_adjust_cfa_offset 4 + .cfi_rel_offset %ebx, 0 + mov 8(%esp), %ebx + mov 12(%esp), %ecx + mov 16(%esp), %edx + mov $\nr, %eax + int $0x80 + pop %ebx + .cfi_adjust_cfa_offset -4 + .cfi_restore %ebx + ret + .cfi_endproc +endf \name +.endm + +__kernel_vsyscall: + .cfi_startproc + int $0x80 + ret + .cfi_endproc +endf __kernel_vsyscall + +vdso_syscall2 __vdso_clock_gettime, __NR_clock_gettime +vdso_syscall2 __vdso_clock_gettime64, __NR_clock_gettime64 +vdso_syscall2 __vdso_clock_getres, __NR_clock_getres +vdso_syscall2 __vdso_gettimeofday, __NR_gettimeofday +vdso_syscall1 __vdso_time, __NR_time +vdso_syscall3 __vdso_getcpu, __NR_gettimeofday + +/* + * Signal return handlers. + */ + + .cfi_startproc simple + .cfi_signal_frame + +/* + * For convenience, put the cfa just above eip in sigcontext, and count + * offsets backward from there. Re-compute the cfa in the two contexts + * we have for signal unwinding. This is far simpler than the + * DW_CFA_expression form that the kernel uses, and is equally correct. + */ + + .cfi_def_cfa %esp, SIGFRAME_SIGCONTEXT_eip + 4 + + .cfi_offset %eip, -4 + /* err, -8 */ + /* trapno, -12 */ + .cfi_offset %eax, -16 + .cfi_offset %ecx, -20 + .cfi_offset %edx, -24 + .cfi_offset %ebx, -28 + .cfi_offset %esp, -32 + .cfi_offset %ebp, -36 + .cfi_offset %esi, -40 + .cfi_offset %edi, -44 + +/* + * While this frame is marked as a signal frame, that only applies to how + * the return address is handled for the outer frame. The return address + * that arrived here, from the inner frame, is not marked as a signal frame + * and so the unwinder still tries to subtract 1 to examine the presumed + * call insn. Thus we must extend the unwind info to a nop before the start. + */ + nop + +__kernel_sigreturn: + popl %eax /* pop sig */ + .cfi_adjust_cfa_offset -4 + movl $__NR_sigreturn, %eax + int $0x80 +endf __kernel_sigreturn + + .cfi_def_cfa_offset RT_SIGFRAME_SIGCONTEXT_eip + 4 + nop + +__kernel_rt_sigreturn: + movl $__NR_rt_sigreturn, %eax + int $0x80 +endf __kernel_rt_sigreturn + + .cfi_endproc + +/* + * TODO: Add elf notes. E.g. + * + * #include <linux/elfnote.h> + * ELFNOTE_START(Linux, 0, "a") + * .long LINUX_VERSION_CODE + * ELFNOTE_END + * + * but what version number would we set for QEMU? + */ diff --git a/linux-user/i386/vdso.ld b/linux-user/i386/vdso.ld new file mode 100644 index 0000000000..326b7a8f98 --- /dev/null +++ b/linux-user/i386/vdso.ld @@ -0,0 +1,76 @@ +/* + * Linker script for linux i386 replacement vdso. + * + * Copyright 2023 Linaro, Ltd. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +ENTRY(__kernel_vsyscall) + +VERSION { + LINUX_2.6 { + global: + __vdso_clock_gettime; + __vdso_gettimeofday; + __vdso_time; + __vdso_clock_getres; + __vdso_clock_gettime64; + __vdso_getcpu; + }; + + LINUX_2.5 { + global: + __kernel_vsyscall; + __kernel_sigreturn; + __kernel_rt_sigreturn; + local: *; + }; +} + +PHDRS { + phdr PT_PHDR FLAGS(4) PHDRS; + load PT_LOAD FLAGS(7) FILEHDR PHDRS; /* FLAGS=RWX */ + dynamic PT_DYNAMIC FLAGS(4); + eh_frame_hdr PT_GNU_EH_FRAME; + note PT_NOTE FLAGS(4); +} + +SECTIONS { + . = SIZEOF_HEADERS; + + /* + * The following, including the FILEHDRS and PHDRS, are modified + * when we relocate the binary. We want them to be initially + * writable for the relocation; we'll force them read-only after. + */ + .note : { *(.note*) } :load :note + .dynamic : { *(.dynamic) } :load :dynamic + .dynsym : { *(.dynsym) } :load + .data : { + /* + * There ought not be any real read-write data. + * But since we manipulated the segment layout, + * we have to put these sections somewhere. + */ + *(.data*) + *(.sdata*) + *(.got.plt) *(.got) + *(.gnu.linkonce.d.*) + *(.bss*) + *(.dynbss*) + *(.gnu.linkonce.b.*) + } + + .rodata : { *(.rodata*) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .eh_frame_hdr : { *(.eh_frame_hdr) } :load :eh_frame_hdr + .eh_frame : { *(.eh_frame) } :load + + .text : { *(.text*) } :load =0x90909090 +} diff --git a/linux-user/i386/vdso.so b/linux-user/i386/vdso.so new file mode 100755 index 0000000000..bdece5dfcf --- /dev/null +++ b/linux-user/i386/vdso.so Binary files differdiff --git a/linux-user/linuxload.c b/linux-user/linuxload.c index 745cce70ab..4a794f8cea 100644 --- a/linux-user/linuxload.c +++ b/linux-user/linuxload.c @@ -3,7 +3,9 @@ #include "qemu/osdep.h" #include "qemu.h" #include "user-internals.h" +#include "user-mmap.h" #include "loader.h" +#include "qapi/error.h" #define NGROUPS 32 @@ -37,7 +39,7 @@ static int prepare_binprm(struct linux_binprm *bprm) int mode; int retval; - if (fstat(bprm->fd, &st) < 0) { + if (fstat(bprm->src.fd, &st) < 0) { return -errno; } @@ -67,7 +69,7 @@ static int prepare_binprm(struct linux_binprm *bprm) bprm->e_gid = st.st_gid; } - retval = read(bprm->fd, bprm->buf, BPRM_BUF_SIZE); + retval = read(bprm->src.fd, bprm->buf, BPRM_BUF_SIZE); if (retval < 0) { perror("prepare_binprm"); exit(-1); @@ -76,6 +78,10 @@ static int prepare_binprm(struct linux_binprm *bprm) /* Make sure the rest of the loader won't read garbage. */ memset(bprm->buf + retval, 0, BPRM_BUF_SIZE - retval); } + + bprm->src.cache = bprm->buf; + bprm->src.cache_size = retval; + return retval; } @@ -138,7 +144,7 @@ int loader_exec(int fdexec, const char *filename, char **argv, char **envp, { int retval; - bprm->fd = fdexec; + bprm->src.fd = fdexec; bprm->filename = (char *)filename; bprm->argc = count(argv); bprm->argv = argv; @@ -147,29 +153,112 @@ int loader_exec(int fdexec, const char *filename, char **argv, char **envp, retval = prepare_binprm(bprm); - if (retval >= 0) { - if (bprm->buf[0] == 0x7f - && bprm->buf[1] == 'E' - && bprm->buf[2] == 'L' - && bprm->buf[3] == 'F') { - retval = load_elf_binary(bprm, infop); + if (retval < 4) { + return -ENOEXEC; + } + if (bprm->buf[0] == 0x7f + && bprm->buf[1] == 'E' + && bprm->buf[2] == 'L' + && bprm->buf[3] == 'F') { + retval = load_elf_binary(bprm, infop); #if defined(TARGET_HAS_BFLT) - } else if (bprm->buf[0] == 'b' - && bprm->buf[1] == 'F' - && bprm->buf[2] == 'L' - && bprm->buf[3] == 'T') { - retval = load_flt_binary(bprm, infop); + } else if (bprm->buf[0] == 'b' + && bprm->buf[1] == 'F' + && bprm->buf[2] == 'L' + && bprm->buf[3] == 'T') { + retval = load_flt_binary(bprm, infop); #endif - } else { - return -ENOEXEC; - } + } else { + return -ENOEXEC; } - - if (retval >= 0) { - /* success. Initialize important registers */ - do_init_thread(regs, infop); + if (retval < 0) { return retval; } - return retval; + /* Success. Initialize important registers. */ + do_init_thread(regs, infop); + return 0; +} + +bool imgsrc_read(void *dst, off_t offset, size_t len, + const ImageSource *img, Error **errp) +{ + ssize_t ret; + + if (offset + len <= img->cache_size) { + memcpy(dst, img->cache + offset, len); + return true; + } + + if (img->fd < 0) { + error_setg(errp, "read past end of buffer"); + return false; + } + + ret = pread(img->fd, dst, len, offset); + if (ret == len) { + return true; + } + if (ret < 0) { + error_setg_errno(errp, errno, "Error reading file header"); + } else { + error_setg(errp, "Incomplete read of file header"); + } + return false; +} + +void *imgsrc_read_alloc(off_t offset, size_t len, + const ImageSource *img, Error **errp) +{ + void *alloc = g_malloc(len); + bool ok = imgsrc_read(alloc, offset, len, img, errp); + + if (!ok) { + g_free(alloc); + alloc = NULL; + } + return alloc; +} + +abi_long imgsrc_mmap(abi_ulong start, abi_ulong len, int prot, + int flags, const ImageSource *src, abi_ulong offset) +{ + const int prot_write = PROT_READ | PROT_WRITE; + abi_long ret; + void *haddr; + + assert(flags == (MAP_PRIVATE | MAP_FIXED)); + + if (src->fd >= 0) { + return target_mmap(start, len, prot, flags, src->fd, offset); + } + + /* + * This case is for the vdso; we don't expect bad images. + * The mmap may extend beyond the end of the image, especially + * to the end of the page. Zero fill. + */ + assert(offset < src->cache_size); + + ret = target_mmap(start, len, prot_write, flags | MAP_ANON, -1, 0); + if (ret == -1) { + return ret; + } + + haddr = lock_user(VERIFY_WRITE, start, len, 0); + assert(haddr != NULL); + if (offset + len <= src->cache_size) { + memcpy(haddr, src->cache + offset, len); + } else { + size_t rest = src->cache_size - offset; + memcpy(haddr, src->cache + offset, rest); + memset(haddr + rest, 0, len - rest); + } + unlock_user(haddr, start, len); + + if (prot != prot_write) { + target_mprotect(start, len, prot); + } + + return ret; } diff --git a/linux-user/loader.h b/linux-user/loader.h index 324e5c872a..a0834290e7 100644 --- a/linux-user/loader.h +++ b/linux-user/loader.h @@ -18,6 +18,48 @@ #ifndef LINUX_USER_LOADER_H #define LINUX_USER_LOADER_H +typedef struct { + const void *cache; + unsigned int cache_size; + int fd; +} ImageSource; + +/** + * imgsrc_read: Read from ImageSource + * @dst: destination for read + * @offset: offset within file for read + * @len: size of the read + * @img: ImageSource to read from + * @errp: Error details. + * + * Read into @dst, using the cache when possible. + */ +bool imgsrc_read(void *dst, off_t offset, size_t len, + const ImageSource *img, Error **errp); + +/** + * imgsrc_read_alloc: Read from ImageSource + * @offset: offset within file for read + * @size: size of the read + * @img: ImageSource to read from + * @errp: Error details. + * + * Read into newly allocated memory, using the cache when possible. + */ +void *imgsrc_read_alloc(off_t offset, size_t len, + const ImageSource *img, Error **errp); + +/** + * imgsrc_mmap: Map from ImageSource + * + * If @src has a file descriptor, pass on to target_mmap. Otherwise, + * this is "mapping" from a host buffer, which resolves to memcpy. + * Therefore, flags must be MAP_PRIVATE | MAP_FIXED; the argument is + * retained for clarity. + */ +abi_long imgsrc_mmap(abi_ulong start, abi_ulong len, int prot, + int flags, const ImageSource *src, abi_ulong offset); + /* * Read a good amount of data initially, to hopefully get all the * program headers loaded. @@ -29,15 +71,15 @@ * used when loading binaries. */ struct linux_binprm { - char buf[BPRM_BUF_SIZE] __attribute__((aligned)); - abi_ulong p; - int fd; - int e_uid, e_gid; - int argc, envc; - char **argv; - char **envp; - char *filename; /* Name of binary */ - int (*core_dump)(int, const CPUArchState *); /* coredump routine */ + char buf[BPRM_BUF_SIZE] __attribute__((aligned)); + ImageSource src; + abi_ulong p; + int e_uid, e_gid; + int argc, envc; + char **argv; + char **envp; + char *filename; /* Name of binary */ + int (*core_dump)(int, const CPUArchState *); /* coredump routine */ }; void do_init_thread(struct target_pt_regs *regs, struct image_info *infop); diff --git a/linux-user/loongarch64/Makefile.vdso b/linux-user/loongarch64/Makefile.vdso new file mode 100644 index 0000000000..369de13344 --- /dev/null +++ b/linux-user/loongarch64/Makefile.vdso @@ -0,0 +1,11 @@ +include $(BUILD_DIR)/tests/tcg/loongarch64-linux-user/config-target.mak + +SUBDIR = $(SRC_PATH)/linux-user/loongarch64 +VPATH += $(SUBDIR) + +all: $(SUBDIR)/vdso.so + +$(SUBDIR)/vdso.so: vdso.S vdso.ld vdso-asmoffset.h + $(CC) -o $@ -nostdlib -shared -fpic -Wl,-h,linux-vdso.so.1 \ + -Wl,--build-id=sha1 -Wl,--hash-style=both \ + -Wl,--no-warn-rwx-segments -Wl,-T,$(SUBDIR)/vdso.ld $< diff --git a/linux-user/loongarch64/meson.build b/linux-user/loongarch64/meson.build new file mode 100644 index 0000000000..17896535f0 --- /dev/null +++ b/linux-user/loongarch64/meson.build @@ -0,0 +1,4 @@ +vdso_inc = gen_vdso.process('vdso.so', + extra_args: ['-r', '__vdso_rt_sigreturn']) + +linux_user_ss.add(when: 'TARGET_LOONGARCH64', if_true: vdso_inc) diff --git a/linux-user/loongarch64/signal.c b/linux-user/loongarch64/signal.c index 39572c1190..afcee641a6 100644 --- a/linux-user/loongarch64/signal.c +++ b/linux-user/loongarch64/signal.c @@ -10,9 +10,9 @@ #include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" - #include "target/loongarch/internals.h" #include "target/loongarch/vec.h" +#include "vdso-asmoffset.h" /* FP context was used */ #define SC_USED_FP (1 << 0) @@ -24,6 +24,11 @@ struct target_sigcontext { uint64_t sc_extcontext[0] QEMU_ALIGNED(16); }; +QEMU_BUILD_BUG_ON(sizeof(struct target_sigcontext) != sizeof_sigcontext); +QEMU_BUILD_BUG_ON(offsetof(struct target_sigcontext, sc_pc) + != offsetof_sigcontext_pc); +QEMU_BUILD_BUG_ON(offsetof(struct target_sigcontext, sc_regs) + != offsetof_sigcontext_gr); #define FPU_CTX_MAGIC 0x46505501 #define FPU_CTX_ALIGN 8 @@ -33,6 +38,9 @@ struct target_fpu_context { uint32_t fcsr; } QEMU_ALIGNED(FPU_CTX_ALIGN); +QEMU_BUILD_BUG_ON(offsetof(struct target_fpu_context, regs) + != offsetof_fpucontext_fr); + #define CONTEXT_INFO_ALIGN 16 struct target_sctx_info { uint32_t magic; @@ -40,6 +48,8 @@ struct target_sctx_info { uint64_t padding; } QEMU_ALIGNED(CONTEXT_INFO_ALIGN); +QEMU_BUILD_BUG_ON(sizeof(struct target_sctx_info) != sizeof_sctx_info); + struct target_ucontext { abi_ulong tuc_flags; abi_ptr tuc_link; @@ -54,6 +64,11 @@ struct target_rt_sigframe { struct target_ucontext rs_uc; }; +QEMU_BUILD_BUG_ON(sizeof(struct target_rt_sigframe) + != sizeof_rt_sigframe); +QEMU_BUILD_BUG_ON(offsetof(struct target_rt_sigframe, rs_uc.tuc_mcontext) + != offsetof_sigcontext); + /* * These two structures are not present in guest memory, are private * to the signal implementation, but are largely copied from the diff --git a/linux-user/loongarch64/vdso-asmoffset.h b/linux-user/loongarch64/vdso-asmoffset.h new file mode 100644 index 0000000000..60d113822f --- /dev/null +++ b/linux-user/loongarch64/vdso-asmoffset.h @@ -0,0 +1,8 @@ +#define sizeof_rt_sigframe 0x240 +#define sizeof_sigcontext 0x110 +#define sizeof_sctx_info 0x10 + +#define offsetof_sigcontext 0x130 +#define offsetof_sigcontext_pc 0 +#define offsetof_sigcontext_gr 8 +#define offsetof_fpucontext_fr 0 diff --git a/linux-user/loongarch64/vdso.S b/linux-user/loongarch64/vdso.S new file mode 100644 index 0000000000..780a5fda12 --- /dev/null +++ b/linux-user/loongarch64/vdso.S @@ -0,0 +1,130 @@ +/* + * Loongarch64 linux replacement vdso. + * + * Copyright 2023 Linaro, Ltd. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include <asm/unistd.h> +#include <asm/errno.h> +#include "vdso-asmoffset.h" + + + .text + +.macro endf name + .globl \name + .type \name, @function + .size \name, . - \name +.endm + +.macro vdso_syscall name, nr +\name: + li.w $a7, \nr + syscall 0 + jr $ra +endf \name +.endm + + .cfi_startproc + +vdso_syscall __vdso_gettimeofday, __NR_gettimeofday +vdso_syscall __vdso_clock_gettime, __NR_clock_gettime +vdso_syscall __vdso_clock_getres, __NR_clock_getres +vdso_syscall __vdso_getcpu, __NR_getcpu + + .cfi_endproc + +/* + * Start the unwind info at least one instruction before the signal + * trampoline, because the unwinder will assume we are returning + * after a call site. + */ + + .cfi_startproc simple + .cfi_signal_frame + +#define B_GR offsetof_sigcontext_gr +#define B_FR sizeof_sigcontext + sizeof_sctx_info + offsetof_fpucontext_fr + + .cfi_def_cfa 2, offsetof_sigcontext + + /* Return address */ + .cfi_return_column 64 + .cfi_offset 64, offsetof_sigcontext_pc /* pc */ + + /* Integer registers */ + .cfi_offset 1, B_GR + 1 * 8 + .cfi_offset 2, B_GR + 2 * 8 + .cfi_offset 3, B_GR + 3 * 8 + .cfi_offset 4, B_GR + 4 * 8 + .cfi_offset 5, B_GR + 5 * 8 + .cfi_offset 6, B_GR + 6 * 8 + .cfi_offset 7, B_GR + 7 * 8 + .cfi_offset 8, B_GR + 8 * 8 + .cfi_offset 9, B_GR + 9 * 8 + .cfi_offset 10, B_GR + 10 * 8 + .cfi_offset 11, B_GR + 11 * 8 + .cfi_offset 12, B_GR + 12 * 8 + .cfi_offset 13, B_GR + 13 * 8 + .cfi_offset 14, B_GR + 14 * 8 + .cfi_offset 15, B_GR + 15 * 8 + .cfi_offset 16, B_GR + 16 * 8 + .cfi_offset 17, B_GR + 17 * 8 + .cfi_offset 18, B_GR + 18 * 8 + .cfi_offset 19, B_GR + 19 * 8 + .cfi_offset 20, B_GR + 20 * 8 + .cfi_offset 21, B_GR + 21 * 8 + .cfi_offset 22, B_GR + 22 * 8 + .cfi_offset 23, B_GR + 23 * 8 + .cfi_offset 24, B_GR + 24 * 8 + .cfi_offset 25, B_GR + 25 * 8 + .cfi_offset 26, B_GR + 26 * 8 + .cfi_offset 27, B_GR + 27 * 8 + .cfi_offset 28, B_GR + 28 * 8 + .cfi_offset 29, B_GR + 29 * 8 + .cfi_offset 30, B_GR + 30 * 8 + .cfi_offset 31, B_GR + 31 * 8 + + /* Floating point registers */ + .cfi_offset 32, B_FR + 0 + .cfi_offset 33, B_FR + 1 * 8 + .cfi_offset 34, B_FR + 2 * 8 + .cfi_offset 35, B_FR + 3 * 8 + .cfi_offset 36, B_FR + 4 * 8 + .cfi_offset 37, B_FR + 5 * 8 + .cfi_offset 38, B_FR + 6 * 8 + .cfi_offset 39, B_FR + 7 * 8 + .cfi_offset 40, B_FR + 8 * 8 + .cfi_offset 41, B_FR + 9 * 8 + .cfi_offset 42, B_FR + 10 * 8 + .cfi_offset 43, B_FR + 11 * 8 + .cfi_offset 44, B_FR + 12 * 8 + .cfi_offset 45, B_FR + 13 * 8 + .cfi_offset 46, B_FR + 14 * 8 + .cfi_offset 47, B_FR + 15 * 8 + .cfi_offset 48, B_FR + 16 * 8 + .cfi_offset 49, B_FR + 17 * 8 + .cfi_offset 50, B_FR + 18 * 8 + .cfi_offset 51, B_FR + 19 * 8 + .cfi_offset 52, B_FR + 20 * 8 + .cfi_offset 53, B_FR + 21 * 8 + .cfi_offset 54, B_FR + 22 * 8 + .cfi_offset 55, B_FR + 23 * 8 + .cfi_offset 56, B_FR + 24 * 8 + .cfi_offset 57, B_FR + 25 * 8 + .cfi_offset 58, B_FR + 26 * 8 + .cfi_offset 59, B_FR + 27 * 8 + .cfi_offset 60, B_FR + 28 * 8 + .cfi_offset 61, B_FR + 29 * 8 + .cfi_offset 62, B_FR + 30 * 8 + .cfi_offset 63, B_FR + 31 * 8 + + nop + +__vdso_rt_sigreturn: + li.w $a7, __NR_rt_sigreturn + syscall 0 + .cfi_endproc +endf __vdso_rt_sigreturn diff --git a/linux-user/loongarch64/vdso.ld b/linux-user/loongarch64/vdso.ld new file mode 100644 index 0000000000..682446ed0c --- /dev/null +++ b/linux-user/loongarch64/vdso.ld @@ -0,0 +1,73 @@ +/* + * Linker script for linux loongarch64 replacement vdso. + * + * Copyright 2023 Linaro, Ltd. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +VERSION { + LINUX_5.10 { + global: + __vdso_getcpu; + __vdso_clock_getres; + __vdso_clock_gettime; + __vdso_gettimeofday; + __vdso_rt_sigreturn; + + local: *; + }; +} + + +PHDRS { + phdr PT_PHDR FLAGS(4) PHDRS; + load PT_LOAD FLAGS(7) FILEHDR PHDRS; + dynamic PT_DYNAMIC FLAGS(4); + eh_frame_hdr PT_GNU_EH_FRAME; + note PT_NOTE FLAGS(4); +} + +SECTIONS { + /* + * We can't prelink to any address without knowing something about + * the virtual memory space of the host, since that leaks over into + * the available memory space of the guest. + */ + . = SIZEOF_HEADERS; + + /* + * The following, including the FILEHDRS and PHDRS, are modified + * when we relocate the binary. We want them to be initially + * writable for the relocation; we'll force them read-only after. + */ + .note : { *(.note*) } :load :note + .dynamic : { *(.dynamic) } :load :dynamic + .dynsym : { *(.dynsym) } :load + /* + * There ought not be any real read-write data. + * But since we manipulated the segment layout, + * we have to put these sections somewhere. + */ + .data : { + *(.data*) + *(.sdata*) + *(.got.plt) *(.got) + *(.gnu.linkonce.d.*) + *(.bss*) + *(.dynbss*) + *(.gnu.linkonce.b.*) + } + + .rodata : { *(.rodata*) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .eh_frame_hdr : { *(.eh_frame_hdr) } :load :eh_frame_hdr + .eh_frame : { *(.eh_frame) } :load + + .text : { *(.text*) } :load =0xd503201f +} diff --git a/linux-user/loongarch64/vdso.so b/linux-user/loongarch64/vdso.so new file mode 100755 index 0000000000..bfaa26f2bf --- /dev/null +++ b/linux-user/loongarch64/vdso.so Binary files differdiff --git a/linux-user/meson.build b/linux-user/meson.build index 7171dc60be..bc41e8c3bc 100644 --- a/linux-user/meson.build +++ b/linux-user/meson.build @@ -28,18 +28,25 @@ linux_user_ss.add(when: 'TARGET_HAS_BFLT', if_true: files('flatload.c')) linux_user_ss.add(when: 'TARGET_I386', if_true: files('vm86.c')) linux_user_ss.add(when: 'CONFIG_ARM_COMPATIBLE_SEMIHOSTING', if_true: files('semihost.c')) - syscall_nr_generators = {} +gen_vdso_exe = executable('gen-vdso', 'gen-vdso.c', + native: true, build_by_default: false) +gen_vdso = generator(gen_vdso_exe, output: '@BASENAME@.c.inc', + arguments: ['-o', '@OUTPUT@', '@EXTRA_ARGS@', '@INPUT@']) + +subdir('aarch64') subdir('alpha') subdir('arm') subdir('hppa') subdir('i386') +subdir('loongarch64') subdir('m68k') subdir('microblaze') subdir('mips64') subdir('mips') subdir('ppc') +subdir('riscv') subdir('s390x') subdir('sh4') subdir('sparc') diff --git a/linux-user/mmap.c b/linux-user/mmap.c index 7b44b9ff49..96c9433e27 100644 --- a/linux-user/mmap.c +++ b/linux-user/mmap.c @@ -26,6 +26,10 @@ #include "target_mman.h" #include "qemu/interval-tree.h" +#ifdef TARGET_ARM +#include "target/arm/cpu-features.h" +#endif + static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER; static __thread int mmap_lock_count; diff --git a/linux-user/ppc/Makefile.vdso b/linux-user/ppc/Makefile.vdso new file mode 100644 index 0000000000..3ca3c6b83e --- /dev/null +++ b/linux-user/ppc/Makefile.vdso @@ -0,0 +1,20 @@ +include $(BUILD_DIR)/tests/tcg/ppc64-linux-user/config-target.mak + +SUBDIR = $(SRC_PATH)/linux-user/ppc +VPATH += $(SUBDIR) + +all: $(SUBDIR)/vdso-32.so $(SUBDIR)/vdso-64.so $(SUBDIR)/vdso-64le.so + +LDFLAGS32 = -nostdlib -shared -Wl,-T,$(SUBDIR)/vdso-32.ld \ + -Wl,-h,linux-vdso32.so.1 -Wl,--hash-style=both -Wl,--build-id=sha1 +LDFLAGS64 = -nostdlib -shared -Wl,-T,$(SUBDIR)/vdso-64.ld \ + -Wl,-h,linux-vdso64.so.1 -Wl,--hash-style=both -Wl,--build-id=sha1 + +$(SUBDIR)/vdso-32.so: vdso.S vdso-32.ld vdso-asmoffset.h + $(CC) -o $@ $(LDFLAGS32) -m32 $< + +$(SUBDIR)/vdso-64.so: vdso.S vdso-64.ld vdso-asmoffset.h + $(CC) -o $@ $(LDFLAGS64) -mbig-endian $< + +$(SUBDIR)/vdso-64le.so: vdso.S vdso-64.ld vdso-asmoffset.h + $(CC) -o $@ $(LDFLAGS64) -mlittle-endian $< diff --git a/linux-user/ppc/meson.build b/linux-user/ppc/meson.build index 19fead7bc8..80cacae396 100644 --- a/linux-user/ppc/meson.build +++ b/linux-user/ppc/meson.build @@ -3,3 +3,15 @@ syscall_nr_generators += { arguments: [ meson.current_source_dir() / 'syscallhdr.sh', '@INPUT@', '@OUTPUT@', '@EXTRA_ARGS@' ], output: '@BASENAME@_nr.h') } + +vdso_32_inc = gen_vdso.process('vdso-32.so', extra_args: [ + '-s', '__kernel_sigtramp32', + '-r', '__kernel_sigtramp_rt32' + ]) +linux_user_ss.add(when: 'TARGET_PPC', if_true: vdso_32_inc) + +vdso_64_inc = gen_vdso.process('vdso-64.so', + extra_args: ['-r', '__kernel_sigtramp_rt64']) +vdso_64le_inc = gen_vdso.process('vdso-64le.so', + extra_args: ['-r', '__kernel_sigtramp_rt64']) +linux_user_ss.add(when: 'TARGET_PPC64', if_true: [vdso_64_inc, vdso_64le_inc]) diff --git a/linux-user/ppc/signal.c b/linux-user/ppc/signal.c index a616f20efb..7e7302823b 100644 --- a/linux-user/ppc/signal.c +++ b/linux-user/ppc/signal.c @@ -21,14 +21,7 @@ #include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" - -/* Size of dummy stack frame allocated when calling signal handler. - See arch/powerpc/include/asm/ptrace.h. */ -#if defined(TARGET_PPC64) -#define SIGNAL_FRAMESIZE 128 -#else -#define SIGNAL_FRAMESIZE 64 -#endif +#include "vdso-asmoffset.h" /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC; on 64-bit PPC, sigcontext and mcontext are one and the same. */ @@ -73,6 +66,16 @@ struct target_mcontext { #endif }; +QEMU_BUILD_BUG_ON(offsetof(struct target_mcontext, mc_fregs) + != offsetof_mcontext_fregs); +#if defined(TARGET_PPC64) +QEMU_BUILD_BUG_ON(offsetof(struct target_mcontext, v_regs) + != offsetof_mcontext_vregs_ptr); +#else +QEMU_BUILD_BUG_ON(offsetof(struct target_mcontext, mc_vregs) + != offsetof_mcontext_vregs); +#endif + /* See arch/powerpc/include/asm/sigcontext.h. */ struct target_sigcontext { target_ulong _unused[4]; @@ -161,6 +164,7 @@ struct target_ucontext { #endif }; +#if !defined(TARGET_PPC64) /* See arch/powerpc/kernel/signal_32.c. */ struct target_sigframe { struct target_sigcontext sctx; @@ -168,6 +172,10 @@ struct target_sigframe { int32_t abigap[56]; }; +QEMU_BUILD_BUG_ON(offsetof(struct target_sigframe, mctx) + != offsetof_sigframe_mcontext); +#endif + #if defined(TARGET_PPC64) #define TARGET_TRAMP_SIZE 6 @@ -184,6 +192,10 @@ struct target_rt_sigframe { char abigap[288]; } __attribute__((aligned(16))); +QEMU_BUILD_BUG_ON(offsetof(struct target_rt_sigframe, + uc.tuc_sigcontext.mcontext) + != offsetof_rt_sigframe_mcontext); + #else struct target_rt_sigframe { @@ -192,6 +204,9 @@ struct target_rt_sigframe { int32_t abigap[56]; }; +QEMU_BUILD_BUG_ON(offsetof(struct target_rt_sigframe, uc.tuc_mcontext) + != offsetof_rt_sigframe_mcontext); + #endif #if defined(TARGET_PPC64) diff --git a/linux-user/ppc/vdso-32.ld b/linux-user/ppc/vdso-32.ld new file mode 100644 index 0000000000..6962696540 --- /dev/null +++ b/linux-user/ppc/vdso-32.ld @@ -0,0 +1,70 @@ +/* + * Linker script for linux powerpc64 replacement vdso. + * + * Copyright 2023 Linaro, Ltd. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +VERSION { + LINUX_2.6.15 { + global: + __kernel_gettimeofday; + __kernel_clock_gettime; + __kernel_clock_gettime64; + __kernel_clock_getres; + __kernel_time; + __kernel_sync_dicache; + __kernel_sigtramp32; + __kernel_sigtramp_rt32; + __kernel_getcpu; + local: *; + }; +} + +PHDRS { + phdr PT_PHDR FLAGS(4) PHDRS; + load PT_LOAD FLAGS(7) FILEHDR PHDRS; /* FLAGS=RWX */ + dynamic PT_DYNAMIC FLAGS(4); + eh_frame_hdr PT_GNU_EH_FRAME; + note PT_NOTE FLAGS(4); +} + +SECTIONS { + . = SIZEOF_HEADERS; + + /* + * The following, including the FILEHDRS and PHDRS, are modified + * when we relocate the binary. We want them to be initially + * writable for the relocation; we'll force them read-only after. + */ + .note : { *(.note*) } :load :note + .dynamic : { *(.dynamic) } :load :dynamic + .dynsym : { *(.dynsym) } :load + .data : { + /* + * There ought not be any real read-write data. + * But since we manipulated the segment layout, + * we have to put these sections somewhere. + */ + *(.data*) + *(.sdata*) + *(.got.plt) *(.got) + *(.gnu.linkonce.d.*) + *(.bss*) + *(.dynbss*) + *(.gnu.linkonce.b.*) + } + + .rodata : { *(.rodata*) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .eh_frame_hdr : { *(.eh_frame_hdr) } :load :eh_frame_hdr + .eh_frame : { *(.eh_frame) } :load + + .text : { *(.text*) } :load +} diff --git a/linux-user/ppc/vdso-32.so b/linux-user/ppc/vdso-32.so new file mode 100755 index 0000000000..b19baafb0d --- /dev/null +++ b/linux-user/ppc/vdso-32.so Binary files differdiff --git a/linux-user/ppc/vdso-64.ld b/linux-user/ppc/vdso-64.ld new file mode 100644 index 0000000000..a55c65ed54 --- /dev/null +++ b/linux-user/ppc/vdso-64.ld @@ -0,0 +1,68 @@ +/* + * Linker script for linux powerpc64 replacement vdso. + * + * Copyright 2023 Linaro, Ltd. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +VERSION { + LINUX_2.6.15 { + global: + __kernel_gettimeofday; + __kernel_clock_gettime; + __kernel_clock_getres; + __kernel_sync_dicache; + __kernel_sigtramp_rt64; + __kernel_getcpu; + __kernel_time; + local: *; + }; +} + +PHDRS { + phdr PT_PHDR FLAGS(4) PHDRS; + load PT_LOAD FLAGS(7) FILEHDR PHDRS; /* FLAGS=RWX */ + dynamic PT_DYNAMIC FLAGS(4); + eh_frame_hdr PT_GNU_EH_FRAME; + note PT_NOTE FLAGS(4); +} + +SECTIONS { + . = SIZEOF_HEADERS; + + /* + * The following, including the FILEHDRS and PHDRS, are modified + * when we relocate the binary. We want them to be initially + * writable for the relocation; we'll force them read-only after. + */ + .note : { *(.note*) } :load :note + .dynamic : { *(.dynamic) } :load :dynamic + .dynsym : { *(.dynsym) } :load + .data : { + /* + * There ought not be any real read-write data. + * But since we manipulated the segment layout, + * we have to put these sections somewhere. + */ + *(.data*) + *(.sdata*) + *(.got.plt) *(.got) + *(.gnu.linkonce.d.*) + *(.bss*) + *(.dynbss*) + *(.gnu.linkonce.b.*) + } + + .rodata : { *(.rodata*) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .eh_frame_hdr : { *(.eh_frame_hdr) } :load :eh_frame_hdr + .eh_frame : { *(.eh_frame) } :load + + .text : { *(.text*) } :load +} diff --git a/linux-user/ppc/vdso-64.so b/linux-user/ppc/vdso-64.so new file mode 100755 index 0000000000..913c831b38 --- /dev/null +++ b/linux-user/ppc/vdso-64.so Binary files differdiff --git a/linux-user/ppc/vdso-64le.so b/linux-user/ppc/vdso-64le.so new file mode 100755 index 0000000000..258a03b807 --- /dev/null +++ b/linux-user/ppc/vdso-64le.so Binary files differdiff --git a/linux-user/ppc/vdso-asmoffset.h b/linux-user/ppc/vdso-asmoffset.h new file mode 100644 index 0000000000..6844c8c81c --- /dev/null +++ b/linux-user/ppc/vdso-asmoffset.h @@ -0,0 +1,20 @@ +/* + * Size of dummy stack frame allocated when calling signal handler. + * See arch/powerpc/include/asm/ptrace.h. + */ +#ifdef TARGET_ABI32 +# define SIGNAL_FRAMESIZE 64 +#else +# define SIGNAL_FRAMESIZE 128 +#endif + +#ifdef TARGET_ABI32 +# define offsetof_sigframe_mcontext 0x20 +# define offsetof_rt_sigframe_mcontext 0x140 +# define offsetof_mcontext_fregs 0xc0 +# define offsetof_mcontext_vregs 0x1d0 +#else +# define offsetof_rt_sigframe_mcontext 0xe8 +# define offsetof_mcontext_fregs 0x180 +# define offsetof_mcontext_vregs_ptr 0x288 +#endif diff --git a/linux-user/ppc/vdso.S b/linux-user/ppc/vdso.S new file mode 100644 index 0000000000..689010db13 --- /dev/null +++ b/linux-user/ppc/vdso.S @@ -0,0 +1,239 @@ +/* + * PowerPC linux replacement vdso. + * + * Copyright 2023 Linaro, Ltd. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include <asm/unistd.h> +#include <asm/errno.h> + +#ifndef _ARCH_PPC64 +# define TARGET_ABI32 +#endif +#include "vdso-asmoffset.h" + + + .text + +.macro endf name + .globl \name + .size \name, .-\name + /* For PPC64, functions have special linkage; we export pointers. */ +#ifndef _ARCH_PPC64 + .type \name, @function +#endif +.endm + +.macro raw_syscall nr + addi 0, 0, \nr + sc +.endm + +.macro vdso_syscall name, nr +\name: + raw_syscall \nr + blr +endf \name +.endm + + .cfi_startproc + +vdso_syscall __kernel_gettimeofday, __NR_gettimeofday +vdso_syscall __kernel_clock_gettime, __NR_clock_gettime +vdso_syscall __kernel_clock_getres, __NR_clock_getres +vdso_syscall __kernel_getcpu, __NR_getcpu +vdso_syscall __kernel_time, __NR_time + +#ifdef __NR_clock_gettime64 +vdso_syscall __kernel_clock_gettime64, __NR_clock_gettime64 +#endif + +__kernel_sync_dicache: + /* qemu does not need to flush caches */ + blr +endf __kernel_sync_dicache + + .cfi_endproc + +/* + * TODO: __kernel_get_tbfreq + * This is probably a constant for QEMU. + */ + +/* + * Start the unwind info at least one instruction before the signal + * trampoline, because the unwinder will assume we are returning + * after a call site. + */ + + .cfi_startproc simple + .cfi_signal_frame + +#ifdef _ARCH_PPC64 +# define __kernel_sigtramp_rt __kernel_sigtramp_rt64 +# define sizeof_reg 8 +#else +# define __kernel_sigtramp_rt __kernel_sigtramp_rt32 +# define sizeof_reg 4 +#endif +#define sizeof_freg 8 +#define sizeof_vreg 16 + + .cfi_def_cfa 1, SIGNAL_FRAMESIZE + offsetof_rt_sigframe_mcontext + + /* Return address */ + .cfi_return_column 67 + .cfi_offset 67, 32 * sizeof_reg /* nip */ + + /* Integer registers */ + .cfi_offset 0, 0 * sizeof_reg + .cfi_offset 1, 1 * sizeof_reg + .cfi_offset 2, 2 * sizeof_reg + .cfi_offset 3, 3 * sizeof_reg + .cfi_offset 4, 4 * sizeof_reg + .cfi_offset 5, 5 * sizeof_reg + .cfi_offset 6, 6 * sizeof_reg + .cfi_offset 7, 7 * sizeof_reg + .cfi_offset 8, 8 * sizeof_reg + .cfi_offset 9, 9 * sizeof_reg + .cfi_offset 10, 10 * sizeof_reg + .cfi_offset 11, 11 * sizeof_reg + .cfi_offset 12, 12 * sizeof_reg + .cfi_offset 13, 13 * sizeof_reg + .cfi_offset 14, 14 * sizeof_reg + .cfi_offset 15, 15 * sizeof_reg + .cfi_offset 16, 16 * sizeof_reg + .cfi_offset 17, 17 * sizeof_reg + .cfi_offset 18, 18 * sizeof_reg + .cfi_offset 19, 19 * sizeof_reg + .cfi_offset 20, 20 * sizeof_reg + .cfi_offset 21, 21 * sizeof_reg + .cfi_offset 22, 22 * sizeof_reg + .cfi_offset 23, 23 * sizeof_reg + .cfi_offset 24, 24 * sizeof_reg + .cfi_offset 25, 25 * sizeof_reg + .cfi_offset 26, 26 * sizeof_reg + .cfi_offset 27, 27 * sizeof_reg + .cfi_offset 28, 28 * sizeof_reg + .cfi_offset 29, 29 * sizeof_reg + .cfi_offset 30, 30 * sizeof_reg + .cfi_offset 31, 31 * sizeof_reg + .cfi_offset 65, 36 * sizeof_reg /* lr */ + .cfi_offset 70, 38 * sizeof_reg /* ccr */ + + /* Floating point registers */ + .cfi_offset 32, offsetof_mcontext_fregs + .cfi_offset 33, offsetof_mcontext_fregs + 1 * sizeof_freg + .cfi_offset 34, offsetof_mcontext_fregs + 2 * sizeof_freg + .cfi_offset 35, offsetof_mcontext_fregs + 3 * sizeof_freg + .cfi_offset 36, offsetof_mcontext_fregs + 4 * sizeof_freg + .cfi_offset 37, offsetof_mcontext_fregs + 5 * sizeof_freg + .cfi_offset 38, offsetof_mcontext_fregs + 6 * sizeof_freg + .cfi_offset 39, offsetof_mcontext_fregs + 7 * sizeof_freg + .cfi_offset 40, offsetof_mcontext_fregs + 8 * sizeof_freg + .cfi_offset 41, offsetof_mcontext_fregs + 9 * sizeof_freg + .cfi_offset 42, offsetof_mcontext_fregs + 10 * sizeof_freg + .cfi_offset 43, offsetof_mcontext_fregs + 11 * sizeof_freg + .cfi_offset 44, offsetof_mcontext_fregs + 12 * sizeof_freg + .cfi_offset 45, offsetof_mcontext_fregs + 13 * sizeof_freg + .cfi_offset 46, offsetof_mcontext_fregs + 14 * sizeof_freg + .cfi_offset 47, offsetof_mcontext_fregs + 15 * sizeof_freg + .cfi_offset 48, offsetof_mcontext_fregs + 16 * sizeof_freg + .cfi_offset 49, offsetof_mcontext_fregs + 17 * sizeof_freg + .cfi_offset 50, offsetof_mcontext_fregs + 18 * sizeof_freg + .cfi_offset 51, offsetof_mcontext_fregs + 19 * sizeof_freg + .cfi_offset 52, offsetof_mcontext_fregs + 20 * sizeof_freg + .cfi_offset 53, offsetof_mcontext_fregs + 21 * sizeof_freg + .cfi_offset 54, offsetof_mcontext_fregs + 22 * sizeof_freg + .cfi_offset 55, offsetof_mcontext_fregs + 23 * sizeof_freg + .cfi_offset 56, offsetof_mcontext_fregs + 24 * sizeof_freg + .cfi_offset 57, offsetof_mcontext_fregs + 25 * sizeof_freg + .cfi_offset 58, offsetof_mcontext_fregs + 26 * sizeof_freg + .cfi_offset 59, offsetof_mcontext_fregs + 27 * sizeof_freg + .cfi_offset 60, offsetof_mcontext_fregs + 28 * sizeof_freg + .cfi_offset 61, offsetof_mcontext_fregs + 29 * sizeof_freg + .cfi_offset 62, offsetof_mcontext_fregs + 30 * sizeof_freg + .cfi_offset 63, offsetof_mcontext_fregs + 31 * sizeof_freg + + /* + * Unlike the kernel, unconditionally represent the Altivec/VSX regs. + * The space within the stack frame is always available, and most of + * our supported processors have them enabled. The only complication + * for PPC64 is the misalignment, so that we have to use indirection. + */ +.macro save_vreg_ofs reg, ofs +#ifdef _ARCH_PPC64 + /* + * vreg = *(cfa + offsetof(v_regs)) + ofs + * + * The CFA is input to the expression on the stack, so: + * DW_CFA_expression reg, length (7), + * DW_OP_plus_uconst (0x23), vreg_ptr, DW_OP_deref (0x06), + * DW_OP_plus_uconst (0x23), ofs + */ + .cfi_escape 0x10, 77 + \reg, 7, 0x23, (offsetof_mcontext_vregs_ptr & 0x7f) + 0x80, offsetof_mcontext_vregs_ptr >> 7, 0x06, 0x23, (\ofs & 0x7f) | 0x80, \ofs >> 7 +#else + .cfi_offset 77 + \reg, offsetof_mcontext_vregs + \ofs +#endif +.endm + +.macro save_vreg reg + save_vreg_ofs \reg, (\reg * sizeof_vreg) +.endm + + save_vreg 0 + save_vreg 1 + save_vreg 2 + save_vreg 3 + save_vreg 4 + save_vreg 5 + save_vreg 6 + save_vreg 7 + save_vreg 8 + save_vreg 9 + save_vreg 10 + save_vreg 11 + save_vreg 12 + save_vreg 13 + save_vreg 14 + save_vreg 15 + save_vreg 16 + save_vreg 17 + save_vreg 18 + save_vreg 19 + save_vreg 20 + save_vreg 21 + save_vreg 22 + save_vreg 23 + save_vreg 24 + save_vreg 25 + save_vreg 26 + save_vreg 27 + save_vreg 28 + save_vreg 29 + save_vreg 30 + save_vreg 31 + save_vreg 32 + save_vreg_ofs 33, (32 * sizeof_vreg + 12) + + nop + +__kernel_sigtramp_rt: + raw_syscall __NR_rt_sigreturn +endf __kernel_sigtramp_rt + +#ifndef _ARCH_PPC64 + /* + * The non-rt sigreturn has the same layout at a different offset. + * Move the CFA and leave all othe other descriptions the same. + */ + .cfi_def_cfa 1, SIGNAL_FRAMESIZE + offsetof_sigframe_mcontext + nop +__kernel_sigtramp32: + raw_syscall __NR_sigreturn +endf __kernel_sigtramp32 +#endif + + .cfi_endproc diff --git a/linux-user/qemu.h b/linux-user/qemu.h index 12f638336a..4de9ec783f 100644 --- a/linux-user/qemu.h +++ b/linux-user/qemu.h @@ -32,6 +32,7 @@ struct image_info { abi_ulong brk; abi_ulong start_stack; abi_ulong stack_limit; + abi_ulong vdso; abi_ulong entry; abi_ulong code_offset; abi_ulong data_offset; diff --git a/linux-user/riscv/Makefile.vdso b/linux-user/riscv/Makefile.vdso new file mode 100644 index 0000000000..2c257dbfda --- /dev/null +++ b/linux-user/riscv/Makefile.vdso @@ -0,0 +1,15 @@ +include $(BUILD_DIR)/tests/tcg/riscv64-linux-user/config-target.mak + +SUBDIR = $(SRC_PATH)/linux-user/riscv +VPATH += $(SUBDIR) + +all: $(SUBDIR)/vdso-32.so $(SUBDIR)/vdso-64.so + +LDFLAGS = -nostdlib -shared -fpic -Wl,-h,linux-vdso.so.1 -Wl,--build-id=sha1 \ + -Wl,--hash-style=both -Wl,-T,$(SUBDIR)/vdso.ld + +$(SUBDIR)/vdso-32.so: vdso.S vdso.ld vdso-asmoffset.h + $(CC) -o $@ $(LDFLAGS) -mabi=ilp32d -march=rv32g $< + +$(SUBDIR)/vdso-64.so: vdso.S vdso.ld vdso-asmoffset.h + $(CC) -o $@ $(LDFLAGS) -mabi=lp64d -march=rv64g $< diff --git a/linux-user/riscv/meson.build b/linux-user/riscv/meson.build new file mode 100644 index 0000000000..beb989a7ca --- /dev/null +++ b/linux-user/riscv/meson.build @@ -0,0 +1,7 @@ +vdso_32_inc = gen_vdso.process('vdso-32.so', + extra_args: ['-r', '__vdso_rt_sigreturn']) +vdso_64_inc = gen_vdso.process('vdso-64.so', + extra_args: ['-r', '__vdso_rt_sigreturn']) + +linux_user_ss.add(when: 'TARGET_RISCV32', if_true: vdso_32_inc) +linux_user_ss.add(when: 'TARGET_RISCV64', if_true: vdso_64_inc) diff --git a/linux-user/riscv/signal.c b/linux-user/riscv/signal.c index f989f7f51f..941eadce87 100644 --- a/linux-user/riscv/signal.c +++ b/linux-user/riscv/signal.c @@ -21,6 +21,7 @@ #include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" +#include "vdso-asmoffset.h" /* Signal handler invocation must be transparent for the code being interrupted. Complete CPU (hart) state is saved on entry and restored @@ -37,6 +38,8 @@ struct target_sigcontext { uint32_t fcsr; }; /* cf. riscv-linux:arch/riscv/include/uapi/asm/ptrace.h */ +QEMU_BUILD_BUG_ON(offsetof(struct target_sigcontext, fpr) != offsetof_freg0); + struct target_ucontext { abi_ulong uc_flags; abi_ptr uc_link; @@ -51,6 +54,11 @@ struct target_rt_sigframe { struct target_ucontext uc; }; +QEMU_BUILD_BUG_ON(sizeof(struct target_rt_sigframe) + != sizeof_rt_sigframe); +QEMU_BUILD_BUG_ON(offsetof(struct target_rt_sigframe, uc.uc_mcontext) + != offsetof_uc_mcontext); + static abi_ulong get_sigframe(struct target_sigaction *ka, CPURISCVState *regs, size_t framesize) { diff --git a/linux-user/riscv/vdso-32.so b/linux-user/riscv/vdso-32.so new file mode 100755 index 0000000000..1ad1e5cbbb --- /dev/null +++ b/linux-user/riscv/vdso-32.so Binary files differdiff --git a/linux-user/riscv/vdso-64.so b/linux-user/riscv/vdso-64.so new file mode 100755 index 0000000000..83992bebe6 --- /dev/null +++ b/linux-user/riscv/vdso-64.so Binary files differdiff --git a/linux-user/riscv/vdso-asmoffset.h b/linux-user/riscv/vdso-asmoffset.h new file mode 100644 index 0000000000..123902ef61 --- /dev/null +++ b/linux-user/riscv/vdso-asmoffset.h @@ -0,0 +1,9 @@ +#ifdef TARGET_ABI32 +# define sizeof_rt_sigframe 0x2b0 +# define offsetof_uc_mcontext 0x120 +# define offsetof_freg0 0x80 +#else +# define sizeof_rt_sigframe 0x340 +# define offsetof_uc_mcontext 0x130 +# define offsetof_freg0 0x100 +#endif diff --git a/linux-user/riscv/vdso.S b/linux-user/riscv/vdso.S new file mode 100644 index 0000000000..a86d8fc488 --- /dev/null +++ b/linux-user/riscv/vdso.S @@ -0,0 +1,187 @@ +/* + * RISC-V linux replacement vdso. + * + * Copyright 2021 Linaro, Ltd. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include <asm/unistd.h> +#include <asm/errno.h> + +#if __riscv_xlen == 32 +# define TARGET_ABI32 +#endif +#include "vdso-asmoffset.h" + + .text + +.macro endf name + .globl \name + .type \name, @function + .size \name, . - \name +.endm + +.macro raw_syscall nr + li a7, \nr + ecall +.endm + +.macro vdso_syscall name, nr +\name: + raw_syscall \nr + ret +endf \name +.endm + +__vdso_gettimeofday: + .cfi_startproc +#ifdef __NR_gettimeofday + raw_syscall __NR_gettimeofday + ret +#else + /* No gettimeofday, fall back to clock_gettime64. */ + beq a1, zero, 1f + sw zero, 0(a1) /* tz->tz_minuteswest = 0 */ + sw zero, 4(a1) /* tz->tz_dsttime = 0 */ +1: addi sp, sp, -32 + .cfi_adjust_cfa_offset 32 + sw a0, 16(sp) /* save tv */ + mv a0, sp + raw_syscall __NR_clock_gettime64 + lw t0, 0(sp) /* timespec.tv_sec.low */ + lw t1, 4(sp) /* timespec.tv_sec.high */ + lw t2, 8(sp) /* timespec.tv_nsec.low */ + lw a1, 16(sp) /* restore tv */ + addi sp, sp, 32 + .cfi_adjust_cfa_offset -32 + bne a0, zero, 9f /* syscall error? */ + li a0, -EOVERFLOW + bne t1, zero, 9f /* y2038? */ + li a0, 0 + li t3, 1000 + divu t2, t2, t3 /* nsec -> usec */ + sw t0, 0(a1) /* tz->tv_sec */ + sw t2, 4(a1) /* tz->tv_usec */ +9: ret +#endif + .cfi_endproc +endf __vdso_gettimeofday + + .cfi_startproc + +#ifdef __NR_clock_gettime +vdso_syscall __vdso_clock_gettime, __NR_clock_gettime +#else +vdso_syscall __vdso_clock_gettime, __NR_clock_gettime64 +#endif + +#ifdef __NR_clock_getres +vdso_syscall __vdso_clock_getres, __NR_clock_getres +#else +vdso_syscall __vdso_clock_getres, __NR_clock_getres_time64 +#endif + +vdso_syscall __vdso_getcpu, __NR_getcpu + +__vdso_flush_icache: + /* qemu does not need to flush the icache */ + li a0, 0 + ret +endf __vdso_flush_icache + + .cfi_endproc + +/* + * Start the unwind info at least one instruction before the signal + * trampoline, because the unwinder will assume we are returning + * after a call site. + */ + + .cfi_startproc simple + .cfi_signal_frame + +#define sizeof_reg (__riscv_xlen / 4) +#define sizeof_freg 8 +#define B_GR (offsetof_uc_mcontext - sizeof_rt_sigframe) +#define B_FR (offsetof_uc_mcontext - sizeof_rt_sigframe + offsetof_freg0) + + .cfi_def_cfa 2, sizeof_rt_sigframe + + /* Return address */ + .cfi_return_column 64 + .cfi_offset 64, B_GR + 0 /* pc */ + + /* Integer registers */ + .cfi_offset 1, B_GR + 1 * sizeof_reg /* r1 (ra) */ + .cfi_offset 2, B_GR + 2 * sizeof_reg /* r2 (sp) */ + .cfi_offset 3, B_GR + 3 * sizeof_reg + .cfi_offset 4, B_GR + 4 * sizeof_reg + .cfi_offset 5, B_GR + 5 * sizeof_reg + .cfi_offset 6, B_GR + 6 * sizeof_reg + .cfi_offset 7, B_GR + 7 * sizeof_reg + .cfi_offset 8, B_GR + 8 * sizeof_reg + .cfi_offset 9, B_GR + 9 * sizeof_reg + .cfi_offset 10, B_GR + 10 * sizeof_reg + .cfi_offset 11, B_GR + 11 * sizeof_reg + .cfi_offset 12, B_GR + 12 * sizeof_reg + .cfi_offset 13, B_GR + 13 * sizeof_reg + .cfi_offset 14, B_GR + 14 * sizeof_reg + .cfi_offset 15, B_GR + 15 * sizeof_reg + .cfi_offset 16, B_GR + 16 * sizeof_reg + .cfi_offset 17, B_GR + 17 * sizeof_reg + .cfi_offset 18, B_GR + 18 * sizeof_reg + .cfi_offset 19, B_GR + 19 * sizeof_reg + .cfi_offset 20, B_GR + 20 * sizeof_reg + .cfi_offset 21, B_GR + 21 * sizeof_reg + .cfi_offset 22, B_GR + 22 * sizeof_reg + .cfi_offset 23, B_GR + 23 * sizeof_reg + .cfi_offset 24, B_GR + 24 * sizeof_reg + .cfi_offset 25, B_GR + 25 * sizeof_reg + .cfi_offset 26, B_GR + 26 * sizeof_reg + .cfi_offset 27, B_GR + 27 * sizeof_reg + .cfi_offset 28, B_GR + 28 * sizeof_reg + .cfi_offset 29, B_GR + 29 * sizeof_reg + .cfi_offset 30, B_GR + 30 * sizeof_reg + .cfi_offset 31, B_GR + 31 * sizeof_reg /* r31 */ + + .cfi_offset 32, B_FR + 0 /* f0 */ + .cfi_offset 33, B_FR + 1 * sizeof_freg /* f1 */ + .cfi_offset 34, B_FR + 2 * sizeof_freg + .cfi_offset 35, B_FR + 3 * sizeof_freg + .cfi_offset 36, B_FR + 4 * sizeof_freg + .cfi_offset 37, B_FR + 5 * sizeof_freg + .cfi_offset 38, B_FR + 6 * sizeof_freg + .cfi_offset 39, B_FR + 7 * sizeof_freg + .cfi_offset 40, B_FR + 8 * sizeof_freg + .cfi_offset 41, B_FR + 9 * sizeof_freg + .cfi_offset 42, B_FR + 10 * sizeof_freg + .cfi_offset 43, B_FR + 11 * sizeof_freg + .cfi_offset 44, B_FR + 12 * sizeof_freg + .cfi_offset 45, B_FR + 13 * sizeof_freg + .cfi_offset 46, B_FR + 14 * sizeof_freg + .cfi_offset 47, B_FR + 15 * sizeof_freg + .cfi_offset 48, B_FR + 16 * sizeof_freg + .cfi_offset 49, B_FR + 17 * sizeof_freg + .cfi_offset 50, B_FR + 18 * sizeof_freg + .cfi_offset 51, B_FR + 19 * sizeof_freg + .cfi_offset 52, B_FR + 20 * sizeof_freg + .cfi_offset 53, B_FR + 21 * sizeof_freg + .cfi_offset 54, B_FR + 22 * sizeof_freg + .cfi_offset 55, B_FR + 23 * sizeof_freg + .cfi_offset 56, B_FR + 24 * sizeof_freg + .cfi_offset 57, B_FR + 25 * sizeof_freg + .cfi_offset 58, B_FR + 26 * sizeof_freg + .cfi_offset 59, B_FR + 27 * sizeof_freg + .cfi_offset 60, B_FR + 28 * sizeof_freg + .cfi_offset 61, B_FR + 29 * sizeof_freg + .cfi_offset 62, B_FR + 30 * sizeof_freg + .cfi_offset 63, B_FR + 31 * sizeof_freg /* f31 */ + + nop + +__vdso_rt_sigreturn: + raw_syscall __NR_rt_sigreturn +endf __vdso_rt_sigreturn + + .cfi_endproc diff --git a/linux-user/riscv/vdso.ld b/linux-user/riscv/vdso.ld new file mode 100644 index 0000000000..aabe2b0ab3 --- /dev/null +++ b/linux-user/riscv/vdso.ld @@ -0,0 +1,74 @@ +/* + * Linker script for linux riscv replacement vdso. + * + * Copyright 2021 Linaro, Ltd. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +VERSION { + LINUX_4.15 { + global: + __vdso_rt_sigreturn; + __vdso_gettimeofday; + __vdso_clock_gettime; + __vdso_clock_getres; + __vdso_getcpu; + __vdso_flush_icache; + + local: *; + }; +} + + +PHDRS { + phdr PT_PHDR FLAGS(4) PHDRS; + load PT_LOAD FLAGS(7) FILEHDR PHDRS; + dynamic PT_DYNAMIC FLAGS(4); + eh_frame_hdr PT_GNU_EH_FRAME; + note PT_NOTE FLAGS(4); +} + +SECTIONS { + /* + * We can't prelink to any address without knowing something about + * the virtual memory space of the host, since that leaks over into + * the available memory space of the guest. + */ + . = SIZEOF_HEADERS; + + /* + * The following, including the FILEHDRS and PHDRS, are modified + * when we relocate the binary. We want them to be initially + * writable for the relocation; we'll force them read-only after. + */ + .note : { *(.note*) } :load :note + .dynamic : { *(.dynamic) } :load :dynamic + .dynsym : { *(.dynsym) } :load + /* + * There ought not be any real read-write data. + * But since we manipulated the segment layout, + * we have to put these sections somewhere. + */ + .data : { + *(.data*) + *(.sdata*) + *(.got.plt) *(.got) + *(.gnu.linkonce.d.*) + *(.bss*) + *(.dynbss*) + *(.gnu.linkonce.b.*) + } + + .rodata : { *(.rodata*) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .eh_frame_hdr : { *(.eh_frame_hdr) } :load :eh_frame_hdr + .eh_frame : { *(.eh_frame) } :load + + .text : { *(.text*) } :load =0xd503201f +} diff --git a/linux-user/s390x/Makefile.vdso b/linux-user/s390x/Makefile.vdso new file mode 100644 index 0000000000..e82bf9e29f --- /dev/null +++ b/linux-user/s390x/Makefile.vdso @@ -0,0 +1,11 @@ +include $(BUILD_DIR)/tests/tcg/s390x-linux-user/config-target.mak + +SUBDIR = $(SRC_PATH)/linux-user/s390x +VPATH += $(SUBDIR) + +all: $(SUBDIR)/vdso.so + +$(SUBDIR)/vdso.so: vdso.S vdso.ld vdso-asmoffset.h + $(CC) -o $@ -nostdlib -shared -Wl,-h,linux-vdso64.so.1 \ + -Wl,--build-id=sha1 -Wl,--hash-style=both \ + -Wl,-T,$(SUBDIR)/vdso.ld $< diff --git a/linux-user/s390x/meson.build b/linux-user/s390x/meson.build index 0781ccea1d..a7a25ed9ce 100644 --- a/linux-user/s390x/meson.build +++ b/linux-user/s390x/meson.build @@ -3,3 +3,9 @@ syscall_nr_generators += { arguments: [ meson.current_source_dir() / 'syscallhdr.sh', '@INPUT@', '@OUTPUT@', '@EXTRA_ARGS@' ], output: '@BASENAME@_nr.h') } + +vdso_inc = gen_vdso.process('vdso.so', extra_args: [ + '-s', '__kernel_sigreturn', + '-r', '__kernel_rt_sigreturn' + ]) +linux_user_ss.add(when: 'TARGET_S390X', if_true: vdso_inc) diff --git a/linux-user/s390x/signal.c b/linux-user/s390x/signal.c index f72165576f..b40f738a70 100644 --- a/linux-user/s390x/signal.c +++ b/linux-user/s390x/signal.c @@ -21,13 +21,12 @@ #include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" +#include "vdso-asmoffset.h" #define __NUM_GPRS 16 #define __NUM_FPRS 16 #define __NUM_ACRS 16 -#define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */ - #define _SIGCONTEXT_NSIG 64 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */ #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW) @@ -63,7 +62,7 @@ typedef struct { } target_sigcontext; typedef struct { - uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; + uint8_t callee_used_stack[STACK_FRAME_OVERHEAD]; target_sigcontext sc; target_sigregs sregs; int signo; @@ -83,7 +82,7 @@ struct target_ucontext { }; typedef struct { - uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; + uint8_t callee_used_stack[STACK_FRAME_OVERHEAD]; /* * This field is no longer initialized by the kernel, but it's still a part * of the ABI. diff --git a/linux-user/s390x/vdso-asmoffset.h b/linux-user/s390x/vdso-asmoffset.h new file mode 100644 index 0000000000..27a062d6c1 --- /dev/null +++ b/linux-user/s390x/vdso-asmoffset.h @@ -0,0 +1,2 @@ +/* Minimum stack frame size */ +#define STACK_FRAME_OVERHEAD 160 diff --git a/linux-user/s390x/vdso.S b/linux-user/s390x/vdso.S new file mode 100644 index 0000000000..3332492477 --- /dev/null +++ b/linux-user/s390x/vdso.S @@ -0,0 +1,61 @@ +/* + * s390x linux replacement vdso. + * + * Copyright 2023 Linaro, Ltd. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include <asm/unistd.h> +#include "vdso-asmoffset.h" + +.macro endf name + .globl \name + .type \name, @function + .size \name, . - \name +.endm + +.macro raw_syscall n + .ifne \n < 0x100 + svc \n + .else + lghi %r1, \n + svc 0 + .endif +.endm + +.macro vdso_syscall name, nr +\name: + .cfi_startproc + aghi %r15, -(STACK_FRAME_OVERHEAD + 16) + .cfi_adjust_cfa_offset STACK_FRAME_OVERHEAD + 16 + stg %r14, STACK_FRAME_OVERHEAD(%r15) + .cfi_rel_offset %r14, STACK_FRAME_OVERHEAD + raw_syscall \nr + lg %r14, STACK_FRAME_OVERHEAD(%r15) + aghi %r15, STACK_FRAME_OVERHEAD + 16 + .cfi_restore %r14 + .cfi_adjust_cfa_offset -(STACK_FRAME_OVERHEAD + 16) + br %r14 + .cfi_endproc +endf \name +.endm + +vdso_syscall __kernel_gettimeofday, __NR_gettimeofday +vdso_syscall __kernel_clock_gettime, __NR_clock_gettime +vdso_syscall __kernel_clock_getres, __NR_clock_getres +vdso_syscall __kernel_getcpu, __NR_getcpu + +/* + * TODO unwind info, though we're ok without it. + * The kernel supplies bogus empty unwind info, and it is likely ignored + * by all users. Without it we get the fallback signal frame handling. + */ + +__kernel_sigreturn: + raw_syscall __NR_sigreturn +endf __kernel_sigreturn + +__kernel_rt_sigreturn: + raw_syscall __NR_rt_sigreturn +endf __kernel_rt_sigreturn diff --git a/linux-user/s390x/vdso.ld b/linux-user/s390x/vdso.ld new file mode 100644 index 0000000000..d3f1d1b164 --- /dev/null +++ b/linux-user/s390x/vdso.ld @@ -0,0 +1,72 @@ +/* + * Linker script for linux s390x replacement vdso. + * + * Copyright 2023 Linaro, Ltd. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +VERSION { + LINUX_2.6.29 { + global: + __kernel_gettimeofday; + __kernel_clock_gettime; + __kernel_clock_getres; + __kernel_getcpu; + __kernel_rt_sigreturn; + __kernel_sigreturn; + /* + * QEMU handles syscall restart internally, so we don't + * need the __kernel_restart_syscall entry point. + */ + local: *; + }; +} + + +PHDRS { + phdr PT_PHDR FLAGS(4) PHDRS; + load PT_LOAD FLAGS(7) FILEHDR PHDRS; /* FLAGS=RWX */ + dynamic PT_DYNAMIC FLAGS(4); + eh_frame_hdr PT_GNU_EH_FRAME; + note PT_NOTE FLAGS(4); +} + +SECTIONS { + . = SIZEOF_HEADERS; + + /* + * The following, including the FILEHDRS and PHDRS, are modified + * when we relocate the binary. We want them to be initially + * writable for the relocation; we'll force them read-only after. + */ + .note : { *(.note*) } :load :note + .dynamic : { *(.dynamic) } :load :dynamic + .dynsym : { *(.dynsym) } :load + /* + * There ought not be any real read-write data. + * But since we manipulated the segment layout, + * we have to put these sections somewhere. + */ + .data : { + *(.data*) + *(.sdata*) + *(.got.plt) *(.got) + *(.gnu.linkonce.d.*) + *(.bss*) + *(.dynbss*) + *(.gnu.linkonce.b.*) + } + + .rodata : { *(.rodata*) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .eh_frame_hdr : { *(.eh_frame_hdr) } :load :eh_frame_hdr + .eh_frame : { *(.eh_frame) } :load + + .text : { *(.text*) } :load +} diff --git a/linux-user/s390x/vdso.so b/linux-user/s390x/vdso.so new file mode 100755 index 0000000000..64130f6f33 --- /dev/null +++ b/linux-user/s390x/vdso.so Binary files differdiff --git a/linux-user/signal.c b/linux-user/signal.c index 3b8efec89f..b35d1e512f 100644 --- a/linux-user/signal.c +++ b/linux-user/signal.c @@ -536,11 +536,10 @@ static void signal_table_init(void) host_to_target_signal_table[SIGABRT] = 0; host_to_target_signal_table[hsig++] = TARGET_SIGABRT; - for (; hsig <= SIGRTMAX; hsig++) { - tsig = hsig - SIGRTMIN + TARGET_SIGRTMIN; - if (tsig <= TARGET_NSIG) { - host_to_target_signal_table[hsig] = tsig; - } + for (tsig = TARGET_SIGRTMIN; + hsig <= SIGRTMAX && tsig <= TARGET_NSIG; + hsig++, tsig++) { + host_to_target_signal_table[hsig] = tsig; } /* Invert the mapping that has already been assigned. */ diff --git a/linux-user/syscall.c b/linux-user/syscall.c index d49cd314a2..65ac3ac796 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -7992,6 +7992,8 @@ static void open_self_maps_4(const struct open_self_maps_data *d, path = "[stack]"; } else if (start == info->brk) { path = "[heap]"; + } else if (start == info->vdso) { + path = "[vdso]"; } /* Except null device (MAP_ANON), adjust offset for this fragment. */ diff --git a/linux-user/x86_64/Makefile.vdso b/linux-user/x86_64/Makefile.vdso new file mode 100644 index 0000000000..26552b66db --- /dev/null +++ b/linux-user/x86_64/Makefile.vdso @@ -0,0 +1,11 @@ +include $(BUILD_DIR)/tests/tcg/x86_64-linux-user/config-target.mak + +SUBDIR = $(SRC_PATH)/linux-user/x86_64 +VPATH += $(SUBDIR) + +all: $(SUBDIR)/vdso.so + +$(SUBDIR)/vdso.so: vdso.S vdso.ld + $(CC) -o $@ -nostdlib -shared -Wl,-h,linux-vdso.so.1 \ + -Wl,--build-id=sha1 -Wl,--hash-style=both \ + -Wl,-T,$(SUBDIR)/vdso.ld $< diff --git a/linux-user/x86_64/meson.build b/linux-user/x86_64/meson.build index 203af9a60c..8c60da7a60 100644 --- a/linux-user/x86_64/meson.build +++ b/linux-user/x86_64/meson.build @@ -3,3 +3,7 @@ syscall_nr_generators += { arguments: [ meson.current_source_dir() / 'syscallhdr.sh', '@INPUT@', '@OUTPUT@', '@EXTRA_ARGS@' ], output: '@BASENAME@_nr.h') } + +vdso_inc = gen_vdso.process('vdso.so') + +linux_user_ss.add(when: 'TARGET_X86_64', if_true: vdso_inc) diff --git a/linux-user/x86_64/vdso.S b/linux-user/x86_64/vdso.S new file mode 100644 index 0000000000..47d16c00ab --- /dev/null +++ b/linux-user/x86_64/vdso.S @@ -0,0 +1,78 @@ +/* + * x86-64 linux replacement vdso. + * + * Copyright 2023 Linaro, Ltd. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include <asm/unistd.h> + +.macro endf name + .globl \name + .type \name, @function + .size \name, . - \name +.endm + +.macro weakalias name +\name = __vdso_\name + .weak \name +.endm + +.macro vdso_syscall name, nr +__vdso_\name: + mov $\nr, %eax + syscall + ret +endf __vdso_\name +weakalias \name +.endm + + .cfi_startproc + +vdso_syscall clock_gettime, __NR_clock_gettime +vdso_syscall clock_getres, __NR_clock_getres +vdso_syscall gettimeofday, __NR_gettimeofday +vdso_syscall time, __NR_time + +__vdso_getcpu: + /* + * There is no syscall number for this allocated on x64. + * We can handle this several ways: + * + * (1) Invent a syscall number for use within qemu. + * It should be easy enough to pick a number that + * is well out of the way of the kernel numbers. + * + * (2) Force the emulated cpu to support the rdtscp insn, + * and initialize the TSC_AUX value the appropriate value. + * + * (3) Pretend that we're always running on cpu 0. + * + * This last is the one that's implemented here, with the + * tiny bit of extra code to support rdtscp in place. + */ + xor %ecx, %ecx /* rdtscp w/ tsc_aux = 0 */ + + /* if (cpu != NULL) *cpu = (ecx & 0xfff); */ + test %rdi, %rdi + jz 1f + mov %ecx, %eax + and $0xfff, %eax + mov %eax, (%rdi) + + /* if (node != NULL) *node = (ecx >> 12); */ +1: test %rsi, %rsi + jz 2f + shr $12, %ecx + mov %ecx, (%rsi) + +2: xor %eax, %eax + ret +endf __vdso_getcpu + +weakalias getcpu + + .cfi_endproc + +/* TODO: Add elf note for LINUX_VERSION_CODE */ diff --git a/linux-user/x86_64/vdso.ld b/linux-user/x86_64/vdso.ld new file mode 100644 index 0000000000..ca6001cc3c --- /dev/null +++ b/linux-user/x86_64/vdso.ld @@ -0,0 +1,73 @@ +/* + * Linker script for linux x86-64 replacement vdso. + * + * Copyright 2023 Linaro, Ltd. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +VERSION { + LINUX_2.6 { + global: + clock_gettime; + __vdso_clock_gettime; + gettimeofday; + __vdso_gettimeofday; + getcpu; + __vdso_getcpu; + time; + __vdso_time; + clock_getres; + __vdso_clock_getres; + + local: *; + }; +} + + +PHDRS { + phdr PT_PHDR FLAGS(4) PHDRS; + load PT_LOAD FLAGS(7) FILEHDR PHDRS; /* FLAGS=RWX */ + dynamic PT_DYNAMIC FLAGS(4); + eh_frame_hdr PT_GNU_EH_FRAME; + note PT_NOTE FLAGS(4); +} + +SECTIONS { + . = SIZEOF_HEADERS; + + /* + * The following, including the FILEHDRS and PHDRS, are modified + * when we relocate the binary. We want them to be initially + * writable for the relocation; we'll force them read-only after. + */ + .note : { *(.note*) } :load :note + .dynamic : { *(.dynamic) } :load :dynamic + .dynsym : { *(.dynsym) } :load + .data : { + /* + * There ought not be any real read-write data. + * But since we manipulated the segment layout, + * we have to put these sections somewhere. + */ + *(.data*) + *(.sdata*) + *(.got.plt) *(.got) + *(.gnu.linkonce.d.*) + *(.bss*) + *(.dynbss*) + *(.gnu.linkonce.b.*) + } + + .rodata : { *(.rodata*) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .eh_frame_hdr : { *(.eh_frame_hdr) } :load :eh_frame_hdr + .eh_frame : { *(.eh_frame) } :load + + .text : { *(.text*) } :load =0x90909090 +} diff --git a/linux-user/x86_64/vdso.so b/linux-user/x86_64/vdso.so new file mode 100755 index 0000000000..c873d6ea58 --- /dev/null +++ b/linux-user/x86_64/vdso.so Binary files differdiff --git a/target/arm/arch_dump.c b/target/arm/arch_dump.c index 2d8e41ab8a..06cdf4ba28 100644 --- a/target/arm/arch_dump.c +++ b/target/arm/arch_dump.c @@ -22,6 +22,7 @@ #include "cpu.h" #include "elf.h" #include "sysemu/dump.h" +#include "cpu-features.h" /* struct user_pt_regs from arch/arm64/include/uapi/asm/ptrace.h */ struct aarch64_user_regs { diff --git a/target/arm/cpu-features.h b/target/arm/cpu-features.h new file mode 100644 index 0000000000..66212cd7ec --- /dev/null +++ b/target/arm/cpu-features.h @@ -0,0 +1,994 @@ +/* + * QEMU Arm CPU -- feature test functions + * + * Copyright (c) 2023 Linaro Ltd + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef TARGET_ARM_FEATURES_H +#define TARGET_ARM_FEATURES_H + +/* + * Naming convention for isar_feature functions: + * Functions which test 32-bit ID registers should have _aa32_ in + * their name. Functions which test 64-bit ID registers should have + * _aa64_ in their name. These must only be used in code where we + * know for certain that the CPU has AArch32 or AArch64 respectively + * or where the correct answer for a CPU which doesn't implement that + * CPU state is "false" (eg when generating A32 or A64 code, if adding + * system registers that are specific to that CPU state, for "should + * we let this system register bit be set" tests where the 32-bit + * flavour of the register doesn't have the bit, and so on). + * Functions which simply ask "does this feature exist at all" have + * _any_ in their name, and always return the logical OR of the _aa64_ + * and the _aa32_ function. + */ + +/* + * 32-bit feature tests via id registers. + */ +static inline bool isar_feature_aa32_thumb_div(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) != 0; +} + +static inline bool isar_feature_aa32_arm_div(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) > 1; +} + +static inline bool isar_feature_aa32_lob(const ARMISARegisters *id) +{ + /* (M-profile) low-overhead loops and branch future */ + return FIELD_EX32(id->id_isar0, ID_ISAR0, CMPBRANCH) >= 3; +} + +static inline bool isar_feature_aa32_jazelle(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar1, ID_ISAR1, JAZELLE) != 0; +} + +static inline bool isar_feature_aa32_aes(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0; +} + +static inline bool isar_feature_aa32_pmull(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) > 1; +} + +static inline bool isar_feature_aa32_sha1(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA1) != 0; +} + +static inline bool isar_feature_aa32_sha2(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA2) != 0; +} + +static inline bool isar_feature_aa32_crc32(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar5, ID_ISAR5, CRC32) != 0; +} + +static inline bool isar_feature_aa32_rdm(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar5, ID_ISAR5, RDM) != 0; +} + +static inline bool isar_feature_aa32_vcma(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar5, ID_ISAR5, VCMA) != 0; +} + +static inline bool isar_feature_aa32_jscvt(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar6, ID_ISAR6, JSCVT) != 0; +} + +static inline bool isar_feature_aa32_dp(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar6, ID_ISAR6, DP) != 0; +} + +static inline bool isar_feature_aa32_fhm(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar6, ID_ISAR6, FHM) != 0; +} + +static inline bool isar_feature_aa32_sb(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar6, ID_ISAR6, SB) != 0; +} + +static inline bool isar_feature_aa32_predinv(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar6, ID_ISAR6, SPECRES) != 0; +} + +static inline bool isar_feature_aa32_bf16(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar6, ID_ISAR6, BF16) != 0; +} + +static inline bool isar_feature_aa32_i8mm(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar6, ID_ISAR6, I8MM) != 0; +} + +static inline bool isar_feature_aa32_ras(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_pfr0, ID_PFR0, RAS) != 0; +} + +static inline bool isar_feature_aa32_mprofile(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_pfr1, ID_PFR1, MPROGMOD) != 0; +} + +static inline bool isar_feature_aa32_m_sec_state(const ARMISARegisters *id) +{ + /* + * Return true if M-profile state handling insns + * (VSCCLRM, CLRM, FPCTX access insns) are implemented + */ + return FIELD_EX32(id->id_pfr1, ID_PFR1, SECURITY) >= 3; +} + +static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id) +{ + /* Sadly this is encoded differently for A-profile and M-profile */ + if (isar_feature_aa32_mprofile(id)) { + return FIELD_EX32(id->mvfr1, MVFR1, FP16) > 0; + } else { + return FIELD_EX32(id->mvfr1, MVFR1, FPHP) >= 3; + } +} + +static inline bool isar_feature_aa32_mve(const ARMISARegisters *id) +{ + /* + * Return true if MVE is supported (either integer or floating point). + * We must check for M-profile as the MVFR1 field means something + * else for A-profile. + */ + return isar_feature_aa32_mprofile(id) && + FIELD_EX32(id->mvfr1, MVFR1, MVE) > 0; +} + +static inline bool isar_feature_aa32_mve_fp(const ARMISARegisters *id) +{ + /* + * Return true if MVE is supported (either integer or floating point). + * We must check for M-profile as the MVFR1 field means something + * else for A-profile. + */ + return isar_feature_aa32_mprofile(id) && + FIELD_EX32(id->mvfr1, MVFR1, MVE) >= 2; +} + +static inline bool isar_feature_aa32_vfp_simd(const ARMISARegisters *id) +{ + /* + * Return true if either VFP or SIMD is implemented. + * In this case, a minimum of VFP w/ D0-D15. + */ + return FIELD_EX32(id->mvfr0, MVFR0, SIMDREG) > 0; +} + +static inline bool isar_feature_aa32_simd_r32(const ARMISARegisters *id) +{ + /* Return true if D16-D31 are implemented */ + return FIELD_EX32(id->mvfr0, MVFR0, SIMDREG) >= 2; +} + +static inline bool isar_feature_aa32_fpshvec(const ARMISARegisters *id) +{ + return FIELD_EX32(id->mvfr0, MVFR0, FPSHVEC) > 0; +} + +static inline bool isar_feature_aa32_fpsp_v2(const ARMISARegisters *id) +{ + /* Return true if CPU supports single precision floating point, VFPv2 */ + return FIELD_EX32(id->mvfr0, MVFR0, FPSP) > 0; +} + +static inline bool isar_feature_aa32_fpsp_v3(const ARMISARegisters *id) +{ + /* Return true if CPU supports single precision floating point, VFPv3 */ + return FIELD_EX32(id->mvfr0, MVFR0, FPSP) >= 2; +} + +static inline bool isar_feature_aa32_fpdp_v2(const ARMISARegisters *id) +{ + /* Return true if CPU supports double precision floating point, VFPv2 */ + return FIELD_EX32(id->mvfr0, MVFR0, FPDP) > 0; +} + +static inline bool isar_feature_aa32_fpdp_v3(const ARMISARegisters *id) +{ + /* Return true if CPU supports double precision floating point, VFPv3 */ + return FIELD_EX32(id->mvfr0, MVFR0, FPDP) >= 2; +} + +static inline bool isar_feature_aa32_vfp(const ARMISARegisters *id) +{ + return isar_feature_aa32_fpsp_v2(id) || isar_feature_aa32_fpdp_v2(id); +} + +/* + * We always set the FP and SIMD FP16 fields to indicate identical + * levels of support (assuming SIMD is implemented at all), so + * we only need one set of accessors. + */ +static inline bool isar_feature_aa32_fp16_spconv(const ARMISARegisters *id) +{ + return FIELD_EX32(id->mvfr1, MVFR1, FPHP) > 0; +} + +static inline bool isar_feature_aa32_fp16_dpconv(const ARMISARegisters *id) +{ + return FIELD_EX32(id->mvfr1, MVFR1, FPHP) > 1; +} + +/* + * Note that this ID register field covers both VFP and Neon FMAC, + * so should usually be tested in combination with some other + * check that confirms the presence of whichever of VFP or Neon is + * relevant, to avoid accidentally enabling a Neon feature on + * a VFP-no-Neon core or vice-versa. + */ +static inline bool isar_feature_aa32_simdfmac(const ARMISARegisters *id) +{ + return FIELD_EX32(id->mvfr1, MVFR1, SIMDFMAC) != 0; +} + +static inline bool isar_feature_aa32_vsel(const ARMISARegisters *id) +{ + return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 1; +} + +static inline bool isar_feature_aa32_vcvt_dr(const ARMISARegisters *id) +{ + return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 2; +} + +static inline bool isar_feature_aa32_vrint(const ARMISARegisters *id) +{ + return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 3; +} + +static inline bool isar_feature_aa32_vminmaxnm(const ARMISARegisters *id) +{ + return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 4; +} + +static inline bool isar_feature_aa32_pxn(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_mmfr0, ID_MMFR0, VMSA) >= 4; +} + +static inline bool isar_feature_aa32_pan(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_mmfr3, ID_MMFR3, PAN) != 0; +} + +static inline bool isar_feature_aa32_ats1e1(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_mmfr3, ID_MMFR3, PAN) >= 2; +} + +static inline bool isar_feature_aa32_pmuv3p1(const ARMISARegisters *id) +{ + /* 0xf means "non-standard IMPDEF PMU" */ + return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 4 && + FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf; +} + +static inline bool isar_feature_aa32_pmuv3p4(const ARMISARegisters *id) +{ + /* 0xf means "non-standard IMPDEF PMU" */ + return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 5 && + FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf; +} + +static inline bool isar_feature_aa32_pmuv3p5(const ARMISARegisters *id) +{ + /* 0xf means "non-standard IMPDEF PMU" */ + return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 6 && + FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf; +} + +static inline bool isar_feature_aa32_hpd(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_mmfr4, ID_MMFR4, HPDS) != 0; +} + +static inline bool isar_feature_aa32_ac2(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_mmfr4, ID_MMFR4, AC2) != 0; +} + +static inline bool isar_feature_aa32_ccidx(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_mmfr4, ID_MMFR4, CCIDX) != 0; +} + +static inline bool isar_feature_aa32_tts2uxn(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_mmfr4, ID_MMFR4, XNX) != 0; +} + +static inline bool isar_feature_aa32_half_evt(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_mmfr4, ID_MMFR4, EVT) >= 1; +} + +static inline bool isar_feature_aa32_evt(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_mmfr4, ID_MMFR4, EVT) >= 2; +} + +static inline bool isar_feature_aa32_dit(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_pfr0, ID_PFR0, DIT) != 0; +} + +static inline bool isar_feature_aa32_ssbs(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_pfr2, ID_PFR2, SSBS) != 0; +} + +static inline bool isar_feature_aa32_debugv7p1(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_dfr0, ID_DFR0, COPDBG) >= 5; +} + +static inline bool isar_feature_aa32_debugv8p2(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_dfr0, ID_DFR0, COPDBG) >= 8; +} + +static inline bool isar_feature_aa32_doublelock(const ARMISARegisters *id) +{ + return FIELD_EX32(id->dbgdevid, DBGDEVID, DOUBLELOCK) > 0; +} + +/* + * 64-bit feature tests via id registers. + */ +static inline bool isar_feature_aa64_aes(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) != 0; +} + +static inline bool isar_feature_aa64_pmull(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) > 1; +} + +static inline bool isar_feature_aa64_sha1(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA1) != 0; +} + +static inline bool isar_feature_aa64_sha256(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) != 0; +} + +static inline bool isar_feature_aa64_sha512(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) > 1; +} + +static inline bool isar_feature_aa64_crc32(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, CRC32) != 0; +} + +static inline bool isar_feature_aa64_atomics(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, ATOMIC) != 0; +} + +static inline bool isar_feature_aa64_rdm(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RDM) != 0; +} + +static inline bool isar_feature_aa64_sha3(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA3) != 0; +} + +static inline bool isar_feature_aa64_sm3(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM3) != 0; +} + +static inline bool isar_feature_aa64_sm4(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM4) != 0; +} + +static inline bool isar_feature_aa64_dp(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, DP) != 0; +} + +static inline bool isar_feature_aa64_fhm(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, FHM) != 0; +} + +static inline bool isar_feature_aa64_condm_4(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TS) != 0; +} + +static inline bool isar_feature_aa64_condm_5(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TS) >= 2; +} + +static inline bool isar_feature_aa64_rndr(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RNDR) != 0; +} + +static inline bool isar_feature_aa64_tlbirange(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) == 2; +} + +static inline bool isar_feature_aa64_tlbios(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) != 0; +} + +static inline bool isar_feature_aa64_jscvt(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, JSCVT) != 0; +} + +static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0; +} + +/* + * These are the values from APA/API/APA3. + * In general these must be compared '>=', per the normal Arm ARM + * treatment of fields in ID registers. + */ +typedef enum { + PauthFeat_None = 0, + PauthFeat_1 = 1, + PauthFeat_EPAC = 2, + PauthFeat_2 = 3, + PauthFeat_FPAC = 4, + PauthFeat_FPACCOMBINED = 5, +} ARMPauthFeature; + +static inline ARMPauthFeature +isar_feature_pauth_feature(const ARMISARegisters *id) +{ + /* + * Architecturally, only one of {APA,API,APA3} may be active (non-zero) + * and the other two must be zero. Thus we may avoid conditionals. + */ + return (FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, APA) | + FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, API) | + FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, APA3)); +} + +static inline bool isar_feature_aa64_pauth(const ARMISARegisters *id) +{ + /* + * Return true if any form of pauth is enabled, as this + * predicate controls migration of the 128-bit keys. + */ + return isar_feature_pauth_feature(id) != PauthFeat_None; +} + +static inline bool isar_feature_aa64_pauth_qarma5(const ARMISARegisters *id) +{ + /* + * Return true if pauth is enabled with the architected QARMA5 algorithm. + * QEMU will always enable or disable both APA and GPA. + */ + return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, APA) != 0; +} + +static inline bool isar_feature_aa64_pauth_qarma3(const ARMISARegisters *id) +{ + /* + * Return true if pauth is enabled with the architected QARMA3 algorithm. + * QEMU will always enable or disable both APA3 and GPA3. + */ + return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, APA3) != 0; +} + +static inline bool isar_feature_aa64_sb(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SB) != 0; +} + +static inline bool isar_feature_aa64_predinv(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SPECRES) != 0; +} + +static inline bool isar_feature_aa64_frint(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FRINTTS) != 0; +} + +static inline bool isar_feature_aa64_dcpop(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) != 0; +} + +static inline bool isar_feature_aa64_dcpodp(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) >= 2; +} + +static inline bool isar_feature_aa64_bf16(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, BF16) != 0; +} + +static inline bool isar_feature_aa64_rcpc_8_3(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, LRCPC) != 0; +} + +static inline bool isar_feature_aa64_rcpc_8_4(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, LRCPC) >= 2; +} + +static inline bool isar_feature_aa64_i8mm(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, I8MM) != 0; +} + +static inline bool isar_feature_aa64_hbc(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, BC) != 0; +} + +static inline bool isar_feature_aa64_mops(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, MOPS); +} + +static inline bool isar_feature_aa64_fp_simd(const ARMISARegisters *id) +{ + /* We always set the AdvSIMD and FP fields identically. */ + return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) != 0xf; +} + +static inline bool isar_feature_aa64_fp16(const ARMISARegisters *id) +{ + /* We always set the AdvSIMD and FP fields identically wrt FP16. */ + return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1; +} + +static inline bool isar_feature_aa64_aa32(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL0) >= 2; +} + +static inline bool isar_feature_aa64_aa32_el1(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL1) >= 2; +} + +static inline bool isar_feature_aa64_aa32_el2(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL2) >= 2; +} + +static inline bool isar_feature_aa64_ras(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, RAS) != 0; +} + +static inline bool isar_feature_aa64_doublefault(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, RAS) >= 2; +} + +static inline bool isar_feature_aa64_sve(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0; +} + +static inline bool isar_feature_aa64_sel2(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SEL2) != 0; +} + +static inline bool isar_feature_aa64_rme(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, RME) != 0; +} + +static inline bool isar_feature_aa64_dit(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, DIT) != 0; +} + +static inline bool isar_feature_aa64_scxtnum(const ARMISARegisters *id) +{ + int key = FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, CSV2); + if (key >= 2) { + return true; /* FEAT_CSV2_2 */ + } + if (key == 1) { + key = FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, CSV2_FRAC); + return key >= 2; /* FEAT_CSV2_1p2 */ + } + return false; +} + +static inline bool isar_feature_aa64_ssbs(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, SSBS) != 0; +} + +static inline bool isar_feature_aa64_bti(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, BT) != 0; +} + +static inline bool isar_feature_aa64_mte_insn_reg(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, MTE) != 0; +} + +static inline bool isar_feature_aa64_mte(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, MTE) >= 2; +} + +static inline bool isar_feature_aa64_sme(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, SME) != 0; +} + +static inline bool isar_feature_aa64_tgran4_lpa2(const ARMISARegisters *id) +{ + return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4) >= 1; +} + +static inline bool isar_feature_aa64_tgran4_2_lpa2(const ARMISARegisters *id) +{ + unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4_2); + return t >= 3 || (t == 0 && isar_feature_aa64_tgran4_lpa2(id)); +} + +static inline bool isar_feature_aa64_tgran16_lpa2(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16) >= 2; +} + +static inline bool isar_feature_aa64_tgran16_2_lpa2(const ARMISARegisters *id) +{ + unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16_2); + return t >= 3 || (t == 0 && isar_feature_aa64_tgran16_lpa2(id)); +} + +static inline bool isar_feature_aa64_tgran4(const ARMISARegisters *id) +{ + return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4) >= 0; +} + +static inline bool isar_feature_aa64_tgran16(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16) >= 1; +} + +static inline bool isar_feature_aa64_tgran64(const ARMISARegisters *id) +{ + return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN64) >= 0; +} + +static inline bool isar_feature_aa64_tgran4_2(const ARMISARegisters *id) +{ + unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4_2); + return t >= 2 || (t == 0 && isar_feature_aa64_tgran4(id)); +} + +static inline bool isar_feature_aa64_tgran16_2(const ARMISARegisters *id) +{ + unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16_2); + return t >= 2 || (t == 0 && isar_feature_aa64_tgran16(id)); +} + +static inline bool isar_feature_aa64_tgran64_2(const ARMISARegisters *id) +{ + unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN64_2); + return t >= 2 || (t == 0 && isar_feature_aa64_tgran64(id)); +} + +static inline bool isar_feature_aa64_fgt(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, FGT) != 0; +} + +static inline bool isar_feature_aa64_vh(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, VH) != 0; +} + +static inline bool isar_feature_aa64_lor(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, LO) != 0; +} + +static inline bool isar_feature_aa64_pan(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) != 0; +} + +static inline bool isar_feature_aa64_ats1e1(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) >= 2; +} + +static inline bool isar_feature_aa64_pan3(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) >= 3; +} + +static inline bool isar_feature_aa64_hcx(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HCX) != 0; +} + +static inline bool isar_feature_aa64_tidcp1(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR1, TIDCP1) != 0; +} + +static inline bool isar_feature_aa64_hafs(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HAFDBS) != 0; +} + +static inline bool isar_feature_aa64_hdbs(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HAFDBS) >= 2; +} + +static inline bool isar_feature_aa64_tts2uxn(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, XNX) != 0; +} + +static inline bool isar_feature_aa64_uao(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, UAO) != 0; +} + +static inline bool isar_feature_aa64_st(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, ST) != 0; +} + +static inline bool isar_feature_aa64_lse2(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, AT) != 0; +} + +static inline bool isar_feature_aa64_fwb(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, FWB) != 0; +} + +static inline bool isar_feature_aa64_ids(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, IDS) != 0; +} + +static inline bool isar_feature_aa64_half_evt(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, EVT) >= 1; +} + +static inline bool isar_feature_aa64_evt(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, EVT) >= 2; +} + +static inline bool isar_feature_aa64_ccidx(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, CCIDX) != 0; +} + +static inline bool isar_feature_aa64_lva(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, VARANGE) != 0; +} + +static inline bool isar_feature_aa64_e0pd(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, E0PD) != 0; +} + +static inline bool isar_feature_aa64_pmuv3p1(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 4 && + FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf; +} + +static inline bool isar_feature_aa64_pmuv3p4(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 5 && + FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf; +} + +static inline bool isar_feature_aa64_pmuv3p5(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 6 && + FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf; +} + +static inline bool isar_feature_aa64_debugv8p2(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, DEBUGVER) >= 8; +} + +static inline bool isar_feature_aa64_doublelock(const ARMISARegisters *id) +{ + return FIELD_SEX64(id->id_aa64dfr0, ID_AA64DFR0, DOUBLELOCK) >= 0; +} + +static inline bool isar_feature_aa64_sve2(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SVEVER) != 0; +} + +static inline bool isar_feature_aa64_sve2_aes(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, AES) != 0; +} + +static inline bool isar_feature_aa64_sve2_pmull128(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, AES) >= 2; +} + +static inline bool isar_feature_aa64_sve2_bitperm(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BITPERM) != 0; +} + +static inline bool isar_feature_aa64_sve_bf16(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BFLOAT16) != 0; +} + +static inline bool isar_feature_aa64_sve2_sha3(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SHA3) != 0; +} + +static inline bool isar_feature_aa64_sve2_sm4(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SM4) != 0; +} + +static inline bool isar_feature_aa64_sve_i8mm(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, I8MM) != 0; +} + +static inline bool isar_feature_aa64_sve_f32mm(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, F32MM) != 0; +} + +static inline bool isar_feature_aa64_sve_f64mm(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, F64MM) != 0; +} + +static inline bool isar_feature_aa64_sme_f64f64(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64smfr0, ID_AA64SMFR0, F64F64); +} + +static inline bool isar_feature_aa64_sme_i16i64(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64smfr0, ID_AA64SMFR0, I16I64) == 0xf; +} + +static inline bool isar_feature_aa64_sme_fa64(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64smfr0, ID_AA64SMFR0, FA64); +} + +/* + * Feature tests for "does this exist in either 32-bit or 64-bit?" + */ +static inline bool isar_feature_any_fp16(const ARMISARegisters *id) +{ + return isar_feature_aa64_fp16(id) || isar_feature_aa32_fp16_arith(id); +} + +static inline bool isar_feature_any_predinv(const ARMISARegisters *id) +{ + return isar_feature_aa64_predinv(id) || isar_feature_aa32_predinv(id); +} + +static inline bool isar_feature_any_pmuv3p1(const ARMISARegisters *id) +{ + return isar_feature_aa64_pmuv3p1(id) || isar_feature_aa32_pmuv3p1(id); +} + +static inline bool isar_feature_any_pmuv3p4(const ARMISARegisters *id) +{ + return isar_feature_aa64_pmuv3p4(id) || isar_feature_aa32_pmuv3p4(id); +} + +static inline bool isar_feature_any_pmuv3p5(const ARMISARegisters *id) +{ + return isar_feature_aa64_pmuv3p5(id) || isar_feature_aa32_pmuv3p5(id); +} + +static inline bool isar_feature_any_ccidx(const ARMISARegisters *id) +{ + return isar_feature_aa64_ccidx(id) || isar_feature_aa32_ccidx(id); +} + +static inline bool isar_feature_any_tts2uxn(const ARMISARegisters *id) +{ + return isar_feature_aa64_tts2uxn(id) || isar_feature_aa32_tts2uxn(id); +} + +static inline bool isar_feature_any_debugv8p2(const ARMISARegisters *id) +{ + return isar_feature_aa64_debugv8p2(id) || isar_feature_aa32_debugv8p2(id); +} + +static inline bool isar_feature_any_ras(const ARMISARegisters *id) +{ + return isar_feature_aa64_ras(id) || isar_feature_aa32_ras(id); +} + +static inline bool isar_feature_any_half_evt(const ARMISARegisters *id) +{ + return isar_feature_aa64_half_evt(id) || isar_feature_aa32_half_evt(id); +} + +static inline bool isar_feature_any_evt(const ARMISARegisters *id) +{ + return isar_feature_aa64_evt(id) || isar_feature_aa32_evt(id); +} + +/* + * Forward to the above feature tests given an ARMCPU pointer. + */ +#define cpu_isar_feature(name, cpu) \ + ({ ARMCPU *cpu_ = (cpu); isar_feature_##name(&cpu_->isar); }) + +#endif diff --git a/target/arm/cpu.c b/target/arm/cpu.c index aa4e006f21..954328d72a 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -31,6 +31,7 @@ #include "hw/core/tcg-cpu-ops.h" #endif /* CONFIG_TCG */ #include "internals.h" +#include "cpu-features.h" #include "exec/exec-all.h" #include "hw/qdev-properties.h" #if !defined(CONFIG_USER_ONLY) diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 76d4cef9e3..d51dfe48db 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -3402,975 +3402,4 @@ static inline target_ulong cpu_untagged_addr(CPUState *cs, target_ulong x) } #endif -/* - * Naming convention for isar_feature functions: - * Functions which test 32-bit ID registers should have _aa32_ in - * their name. Functions which test 64-bit ID registers should have - * _aa64_ in their name. These must only be used in code where we - * know for certain that the CPU has AArch32 or AArch64 respectively - * or where the correct answer for a CPU which doesn't implement that - * CPU state is "false" (eg when generating A32 or A64 code, if adding - * system registers that are specific to that CPU state, for "should - * we let this system register bit be set" tests where the 32-bit - * flavour of the register doesn't have the bit, and so on). - * Functions which simply ask "does this feature exist at all" have - * _any_ in their name, and always return the logical OR of the _aa64_ - * and the _aa32_ function. - */ - -/* - * 32-bit feature tests via id registers. - */ -static inline bool isar_feature_aa32_thumb_div(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) != 0; -} - -static inline bool isar_feature_aa32_arm_div(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) > 1; -} - -static inline bool isar_feature_aa32_lob(const ARMISARegisters *id) -{ - /* (M-profile) low-overhead loops and branch future */ - return FIELD_EX32(id->id_isar0, ID_ISAR0, CMPBRANCH) >= 3; -} - -static inline bool isar_feature_aa32_jazelle(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_isar1, ID_ISAR1, JAZELLE) != 0; -} - -static inline bool isar_feature_aa32_aes(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0; -} - -static inline bool isar_feature_aa32_pmull(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) > 1; -} - -static inline bool isar_feature_aa32_sha1(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA1) != 0; -} - -static inline bool isar_feature_aa32_sha2(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA2) != 0; -} - -static inline bool isar_feature_aa32_crc32(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_isar5, ID_ISAR5, CRC32) != 0; -} - -static inline bool isar_feature_aa32_rdm(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_isar5, ID_ISAR5, RDM) != 0; -} - -static inline bool isar_feature_aa32_vcma(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_isar5, ID_ISAR5, VCMA) != 0; -} - -static inline bool isar_feature_aa32_jscvt(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_isar6, ID_ISAR6, JSCVT) != 0; -} - -static inline bool isar_feature_aa32_dp(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_isar6, ID_ISAR6, DP) != 0; -} - -static inline bool isar_feature_aa32_fhm(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_isar6, ID_ISAR6, FHM) != 0; -} - -static inline bool isar_feature_aa32_sb(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_isar6, ID_ISAR6, SB) != 0; -} - -static inline bool isar_feature_aa32_predinv(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_isar6, ID_ISAR6, SPECRES) != 0; -} - -static inline bool isar_feature_aa32_bf16(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_isar6, ID_ISAR6, BF16) != 0; -} - -static inline bool isar_feature_aa32_i8mm(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_isar6, ID_ISAR6, I8MM) != 0; -} - -static inline bool isar_feature_aa32_ras(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_pfr0, ID_PFR0, RAS) != 0; -} - -static inline bool isar_feature_aa32_mprofile(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_pfr1, ID_PFR1, MPROGMOD) != 0; -} - -static inline bool isar_feature_aa32_m_sec_state(const ARMISARegisters *id) -{ - /* - * Return true if M-profile state handling insns - * (VSCCLRM, CLRM, FPCTX access insns) are implemented - */ - return FIELD_EX32(id->id_pfr1, ID_PFR1, SECURITY) >= 3; -} - -static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id) -{ - /* Sadly this is encoded differently for A-profile and M-profile */ - if (isar_feature_aa32_mprofile(id)) { - return FIELD_EX32(id->mvfr1, MVFR1, FP16) > 0; - } else { - return FIELD_EX32(id->mvfr1, MVFR1, FPHP) >= 3; - } -} - -static inline bool isar_feature_aa32_mve(const ARMISARegisters *id) -{ - /* - * Return true if MVE is supported (either integer or floating point). - * We must check for M-profile as the MVFR1 field means something - * else for A-profile. - */ - return isar_feature_aa32_mprofile(id) && - FIELD_EX32(id->mvfr1, MVFR1, MVE) > 0; -} - -static inline bool isar_feature_aa32_mve_fp(const ARMISARegisters *id) -{ - /* - * Return true if MVE is supported (either integer or floating point). - * We must check for M-profile as the MVFR1 field means something - * else for A-profile. - */ - return isar_feature_aa32_mprofile(id) && - FIELD_EX32(id->mvfr1, MVFR1, MVE) >= 2; -} - -static inline bool isar_feature_aa32_vfp_simd(const ARMISARegisters *id) -{ - /* - * Return true if either VFP or SIMD is implemented. - * In this case, a minimum of VFP w/ D0-D15. - */ - return FIELD_EX32(id->mvfr0, MVFR0, SIMDREG) > 0; -} - -static inline bool isar_feature_aa32_simd_r32(const ARMISARegisters *id) -{ - /* Return true if D16-D31 are implemented */ - return FIELD_EX32(id->mvfr0, MVFR0, SIMDREG) >= 2; -} - -static inline bool isar_feature_aa32_fpshvec(const ARMISARegisters *id) -{ - return FIELD_EX32(id->mvfr0, MVFR0, FPSHVEC) > 0; -} - -static inline bool isar_feature_aa32_fpsp_v2(const ARMISARegisters *id) -{ - /* Return true if CPU supports single precision floating point, VFPv2 */ - return FIELD_EX32(id->mvfr0, MVFR0, FPSP) > 0; -} - -static inline bool isar_feature_aa32_fpsp_v3(const ARMISARegisters *id) -{ - /* Return true if CPU supports single precision floating point, VFPv3 */ - return FIELD_EX32(id->mvfr0, MVFR0, FPSP) >= 2; -} - -static inline bool isar_feature_aa32_fpdp_v2(const ARMISARegisters *id) -{ - /* Return true if CPU supports double precision floating point, VFPv2 */ - return FIELD_EX32(id->mvfr0, MVFR0, FPDP) > 0; -} - -static inline bool isar_feature_aa32_fpdp_v3(const ARMISARegisters *id) -{ - /* Return true if CPU supports double precision floating point, VFPv3 */ - return FIELD_EX32(id->mvfr0, MVFR0, FPDP) >= 2; -} - -static inline bool isar_feature_aa32_vfp(const ARMISARegisters *id) -{ - return isar_feature_aa32_fpsp_v2(id) || isar_feature_aa32_fpdp_v2(id); -} - -/* - * We always set the FP and SIMD FP16 fields to indicate identical - * levels of support (assuming SIMD is implemented at all), so - * we only need one set of accessors. - */ -static inline bool isar_feature_aa32_fp16_spconv(const ARMISARegisters *id) -{ - return FIELD_EX32(id->mvfr1, MVFR1, FPHP) > 0; -} - -static inline bool isar_feature_aa32_fp16_dpconv(const ARMISARegisters *id) -{ - return FIELD_EX32(id->mvfr1, MVFR1, FPHP) > 1; -} - -/* - * Note that this ID register field covers both VFP and Neon FMAC, - * so should usually be tested in combination with some other - * check that confirms the presence of whichever of VFP or Neon is - * relevant, to avoid accidentally enabling a Neon feature on - * a VFP-no-Neon core or vice-versa. - */ -static inline bool isar_feature_aa32_simdfmac(const ARMISARegisters *id) -{ - return FIELD_EX32(id->mvfr1, MVFR1, SIMDFMAC) != 0; -} - -static inline bool isar_feature_aa32_vsel(const ARMISARegisters *id) -{ - return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 1; -} - -static inline bool isar_feature_aa32_vcvt_dr(const ARMISARegisters *id) -{ - return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 2; -} - -static inline bool isar_feature_aa32_vrint(const ARMISARegisters *id) -{ - return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 3; -} - -static inline bool isar_feature_aa32_vminmaxnm(const ARMISARegisters *id) -{ - return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 4; -} - -static inline bool isar_feature_aa32_pxn(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_mmfr0, ID_MMFR0, VMSA) >= 4; -} - -static inline bool isar_feature_aa32_pan(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_mmfr3, ID_MMFR3, PAN) != 0; -} - -static inline bool isar_feature_aa32_ats1e1(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_mmfr3, ID_MMFR3, PAN) >= 2; -} - -static inline bool isar_feature_aa32_pmuv3p1(const ARMISARegisters *id) -{ - /* 0xf means "non-standard IMPDEF PMU" */ - return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 4 && - FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf; -} - -static inline bool isar_feature_aa32_pmuv3p4(const ARMISARegisters *id) -{ - /* 0xf means "non-standard IMPDEF PMU" */ - return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 5 && - FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf; -} - -static inline bool isar_feature_aa32_pmuv3p5(const ARMISARegisters *id) -{ - /* 0xf means "non-standard IMPDEF PMU" */ - return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 6 && - FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf; -} - -static inline bool isar_feature_aa32_hpd(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_mmfr4, ID_MMFR4, HPDS) != 0; -} - -static inline bool isar_feature_aa32_ac2(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_mmfr4, ID_MMFR4, AC2) != 0; -} - -static inline bool isar_feature_aa32_ccidx(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_mmfr4, ID_MMFR4, CCIDX) != 0; -} - -static inline bool isar_feature_aa32_tts2uxn(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_mmfr4, ID_MMFR4, XNX) != 0; -} - -static inline bool isar_feature_aa32_half_evt(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_mmfr4, ID_MMFR4, EVT) >= 1; -} - -static inline bool isar_feature_aa32_evt(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_mmfr4, ID_MMFR4, EVT) >= 2; -} - -static inline bool isar_feature_aa32_dit(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_pfr0, ID_PFR0, DIT) != 0; -} - -static inline bool isar_feature_aa32_ssbs(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_pfr2, ID_PFR2, SSBS) != 0; -} - -static inline bool isar_feature_aa32_debugv7p1(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_dfr0, ID_DFR0, COPDBG) >= 5; -} - -static inline bool isar_feature_aa32_debugv8p2(const ARMISARegisters *id) -{ - return FIELD_EX32(id->id_dfr0, ID_DFR0, COPDBG) >= 8; -} - -static inline bool isar_feature_aa32_doublelock(const ARMISARegisters *id) -{ - return FIELD_EX32(id->dbgdevid, DBGDEVID, DOUBLELOCK) > 0; -} - -/* - * 64-bit feature tests via id registers. - */ -static inline bool isar_feature_aa64_aes(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) != 0; -} - -static inline bool isar_feature_aa64_pmull(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) > 1; -} - -static inline bool isar_feature_aa64_sha1(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA1) != 0; -} - -static inline bool isar_feature_aa64_sha256(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) != 0; -} - -static inline bool isar_feature_aa64_sha512(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) > 1; -} - -static inline bool isar_feature_aa64_crc32(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, CRC32) != 0; -} - -static inline bool isar_feature_aa64_atomics(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, ATOMIC) != 0; -} - -static inline bool isar_feature_aa64_rdm(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RDM) != 0; -} - -static inline bool isar_feature_aa64_sha3(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA3) != 0; -} - -static inline bool isar_feature_aa64_sm3(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM3) != 0; -} - -static inline bool isar_feature_aa64_sm4(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM4) != 0; -} - -static inline bool isar_feature_aa64_dp(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, DP) != 0; -} - -static inline bool isar_feature_aa64_fhm(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, FHM) != 0; -} - -static inline bool isar_feature_aa64_condm_4(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TS) != 0; -} - -static inline bool isar_feature_aa64_condm_5(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TS) >= 2; -} - -static inline bool isar_feature_aa64_rndr(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RNDR) != 0; -} - -static inline bool isar_feature_aa64_jscvt(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, JSCVT) != 0; -} - -static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0; -} - -/* - * These are the values from APA/API/APA3. - * In general these must be compared '>=', per the normal Arm ARM - * treatment of fields in ID registers. - */ -typedef enum { - PauthFeat_None = 0, - PauthFeat_1 = 1, - PauthFeat_EPAC = 2, - PauthFeat_2 = 3, - PauthFeat_FPAC = 4, - PauthFeat_FPACCOMBINED = 5, -} ARMPauthFeature; - -static inline ARMPauthFeature -isar_feature_pauth_feature(const ARMISARegisters *id) -{ - /* - * Architecturally, only one of {APA,API,APA3} may be active (non-zero) - * and the other two must be zero. Thus we may avoid conditionals. - */ - return (FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, APA) | - FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, API) | - FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, APA3)); -} - -static inline bool isar_feature_aa64_pauth(const ARMISARegisters *id) -{ - /* - * Return true if any form of pauth is enabled, as this - * predicate controls migration of the 128-bit keys. - */ - return isar_feature_pauth_feature(id) != PauthFeat_None; -} - -static inline bool isar_feature_aa64_pauth_qarma5(const ARMISARegisters *id) -{ - /* - * Return true if pauth is enabled with the architected QARMA5 algorithm. - * QEMU will always enable or disable both APA and GPA. - */ - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, APA) != 0; -} - -static inline bool isar_feature_aa64_pauth_qarma3(const ARMISARegisters *id) -{ - /* - * Return true if pauth is enabled with the architected QARMA3 algorithm. - * QEMU will always enable or disable both APA3 and GPA3. - */ - return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, APA3) != 0; -} - -static inline bool isar_feature_aa64_tlbirange(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) == 2; -} - -static inline bool isar_feature_aa64_tlbios(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) != 0; -} - -static inline bool isar_feature_aa64_sb(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SB) != 0; -} - -static inline bool isar_feature_aa64_predinv(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SPECRES) != 0; -} - -static inline bool isar_feature_aa64_frint(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FRINTTS) != 0; -} - -static inline bool isar_feature_aa64_dcpop(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) != 0; -} - -static inline bool isar_feature_aa64_dcpodp(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) >= 2; -} - -static inline bool isar_feature_aa64_bf16(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, BF16) != 0; -} - -static inline bool isar_feature_aa64_fp_simd(const ARMISARegisters *id) -{ - /* We always set the AdvSIMD and FP fields identically. */ - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) != 0xf; -} - -static inline bool isar_feature_aa64_fp16(const ARMISARegisters *id) -{ - /* We always set the AdvSIMD and FP fields identically wrt FP16. */ - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1; -} - -static inline bool isar_feature_aa64_aa32(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL0) >= 2; -} - -static inline bool isar_feature_aa64_aa32_el1(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL1) >= 2; -} - -static inline bool isar_feature_aa64_aa32_el2(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL2) >= 2; -} - -static inline bool isar_feature_aa64_ras(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, RAS) != 0; -} - -static inline bool isar_feature_aa64_doublefault(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, RAS) >= 2; -} - -static inline bool isar_feature_aa64_sve(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0; -} - -static inline bool isar_feature_aa64_sel2(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SEL2) != 0; -} - -static inline bool isar_feature_aa64_rme(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, RME) != 0; -} - -static inline bool isar_feature_aa64_vh(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, VH) != 0; -} - -static inline bool isar_feature_aa64_lor(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, LO) != 0; -} - -static inline bool isar_feature_aa64_pan(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) != 0; -} - -static inline bool isar_feature_aa64_ats1e1(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) >= 2; -} - -static inline bool isar_feature_aa64_pan3(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) >= 3; -} - -static inline bool isar_feature_aa64_hcx(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HCX) != 0; -} - -static inline bool isar_feature_aa64_tidcp1(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR1, TIDCP1) != 0; -} - -static inline bool isar_feature_aa64_uao(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, UAO) != 0; -} - -static inline bool isar_feature_aa64_st(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, ST) != 0; -} - -static inline bool isar_feature_aa64_lse2(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, AT) != 0; -} - -static inline bool isar_feature_aa64_fwb(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, FWB) != 0; -} - -static inline bool isar_feature_aa64_ids(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, IDS) != 0; -} - -static inline bool isar_feature_aa64_half_evt(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, EVT) >= 1; -} - -static inline bool isar_feature_aa64_evt(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, EVT) >= 2; -} - -static inline bool isar_feature_aa64_bti(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, BT) != 0; -} - -static inline bool isar_feature_aa64_mte_insn_reg(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, MTE) != 0; -} - -static inline bool isar_feature_aa64_mte(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, MTE) >= 2; -} - -static inline bool isar_feature_aa64_sme(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, SME) != 0; -} - -static inline bool isar_feature_aa64_pmuv3p1(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 4 && - FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf; -} - -static inline bool isar_feature_aa64_pmuv3p4(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 5 && - FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf; -} - -static inline bool isar_feature_aa64_pmuv3p5(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 6 && - FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf; -} - -static inline bool isar_feature_aa64_rcpc_8_3(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, LRCPC) != 0; -} - -static inline bool isar_feature_aa64_rcpc_8_4(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, LRCPC) >= 2; -} - -static inline bool isar_feature_aa64_i8mm(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, I8MM) != 0; -} - -static inline bool isar_feature_aa64_hbc(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, BC) != 0; -} - -static inline bool isar_feature_aa64_tgran4_lpa2(const ARMISARegisters *id) -{ - return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4) >= 1; -} - -static inline bool isar_feature_aa64_tgran4_2_lpa2(const ARMISARegisters *id) -{ - unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4_2); - return t >= 3 || (t == 0 && isar_feature_aa64_tgran4_lpa2(id)); -} - -static inline bool isar_feature_aa64_tgran16_lpa2(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16) >= 2; -} - -static inline bool isar_feature_aa64_tgran16_2_lpa2(const ARMISARegisters *id) -{ - unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16_2); - return t >= 3 || (t == 0 && isar_feature_aa64_tgran16_lpa2(id)); -} - -static inline bool isar_feature_aa64_tgran4(const ARMISARegisters *id) -{ - return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4) >= 0; -} - -static inline bool isar_feature_aa64_tgran16(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16) >= 1; -} - -static inline bool isar_feature_aa64_tgran64(const ARMISARegisters *id) -{ - return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN64) >= 0; -} - -static inline bool isar_feature_aa64_tgran4_2(const ARMISARegisters *id) -{ - unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4_2); - return t >= 2 || (t == 0 && isar_feature_aa64_tgran4(id)); -} - -static inline bool isar_feature_aa64_tgran16_2(const ARMISARegisters *id) -{ - unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16_2); - return t >= 2 || (t == 0 && isar_feature_aa64_tgran16(id)); -} - -static inline bool isar_feature_aa64_tgran64_2(const ARMISARegisters *id) -{ - unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN64_2); - return t >= 2 || (t == 0 && isar_feature_aa64_tgran64(id)); -} - -static inline bool isar_feature_aa64_fgt(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, FGT) != 0; -} - -static inline bool isar_feature_aa64_ccidx(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, CCIDX) != 0; -} - -static inline bool isar_feature_aa64_lva(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, VARANGE) != 0; -} - -static inline bool isar_feature_aa64_e0pd(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, E0PD) != 0; -} - -static inline bool isar_feature_aa64_hafs(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HAFDBS) != 0; -} - -static inline bool isar_feature_aa64_hdbs(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HAFDBS) >= 2; -} - -static inline bool isar_feature_aa64_tts2uxn(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, XNX) != 0; -} - -static inline bool isar_feature_aa64_dit(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, DIT) != 0; -} - -static inline bool isar_feature_aa64_scxtnum(const ARMISARegisters *id) -{ - int key = FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, CSV2); - if (key >= 2) { - return true; /* FEAT_CSV2_2 */ - } - if (key == 1) { - key = FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, CSV2_FRAC); - return key >= 2; /* FEAT_CSV2_1p2 */ - } - return false; -} - -static inline bool isar_feature_aa64_ssbs(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, SSBS) != 0; -} - -static inline bool isar_feature_aa64_debugv8p2(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, DEBUGVER) >= 8; -} - -static inline bool isar_feature_aa64_sve2(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SVEVER) != 0; -} - -static inline bool isar_feature_aa64_sve2_aes(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, AES) != 0; -} - -static inline bool isar_feature_aa64_sve2_pmull128(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, AES) >= 2; -} - -static inline bool isar_feature_aa64_sve2_bitperm(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BITPERM) != 0; -} - -static inline bool isar_feature_aa64_sve_bf16(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BFLOAT16) != 0; -} - -static inline bool isar_feature_aa64_sve2_sha3(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SHA3) != 0; -} - -static inline bool isar_feature_aa64_sve2_sm4(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SM4) != 0; -} - -static inline bool isar_feature_aa64_sve_i8mm(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, I8MM) != 0; -} - -static inline bool isar_feature_aa64_sve_f32mm(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, F32MM) != 0; -} - -static inline bool isar_feature_aa64_sve_f64mm(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, F64MM) != 0; -} - -static inline bool isar_feature_aa64_sme_f64f64(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64smfr0, ID_AA64SMFR0, F64F64); -} - -static inline bool isar_feature_aa64_sme_i16i64(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64smfr0, ID_AA64SMFR0, I16I64) == 0xf; -} - -static inline bool isar_feature_aa64_sme_fa64(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64smfr0, ID_AA64SMFR0, FA64); -} - -static inline bool isar_feature_aa64_doublelock(const ARMISARegisters *id) -{ - return FIELD_SEX64(id->id_aa64dfr0, ID_AA64DFR0, DOUBLELOCK) >= 0; -} - -static inline bool isar_feature_aa64_mops(const ARMISARegisters *id) -{ - return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, MOPS); -} - -/* - * Feature tests for "does this exist in either 32-bit or 64-bit?" - */ -static inline bool isar_feature_any_fp16(const ARMISARegisters *id) -{ - return isar_feature_aa64_fp16(id) || isar_feature_aa32_fp16_arith(id); -} - -static inline bool isar_feature_any_predinv(const ARMISARegisters *id) -{ - return isar_feature_aa64_predinv(id) || isar_feature_aa32_predinv(id); -} - -static inline bool isar_feature_any_pmuv3p1(const ARMISARegisters *id) -{ - return isar_feature_aa64_pmuv3p1(id) || isar_feature_aa32_pmuv3p1(id); -} - -static inline bool isar_feature_any_pmuv3p4(const ARMISARegisters *id) -{ - return isar_feature_aa64_pmuv3p4(id) || isar_feature_aa32_pmuv3p4(id); -} - -static inline bool isar_feature_any_pmuv3p5(const ARMISARegisters *id) -{ - return isar_feature_aa64_pmuv3p5(id) || isar_feature_aa32_pmuv3p5(id); -} - -static inline bool isar_feature_any_ccidx(const ARMISARegisters *id) -{ - return isar_feature_aa64_ccidx(id) || isar_feature_aa32_ccidx(id); -} - -static inline bool isar_feature_any_tts2uxn(const ARMISARegisters *id) -{ - return isar_feature_aa64_tts2uxn(id) || isar_feature_aa32_tts2uxn(id); -} - -static inline bool isar_feature_any_debugv8p2(const ARMISARegisters *id) -{ - return isar_feature_aa64_debugv8p2(id) || isar_feature_aa32_debugv8p2(id); -} - -static inline bool isar_feature_any_ras(const ARMISARegisters *id) -{ - return isar_feature_aa64_ras(id) || isar_feature_aa32_ras(id); -} - -static inline bool isar_feature_any_half_evt(const ARMISARegisters *id) -{ - return isar_feature_aa64_half_evt(id) || isar_feature_aa32_half_evt(id); -} - -static inline bool isar_feature_any_evt(const ARMISARegisters *id) -{ - return isar_feature_aa64_evt(id) || isar_feature_aa32_evt(id); -} - -/* - * Forward to the above feature tests given an ARMCPU pointer. - */ -#define cpu_isar_feature(name, cpu) \ - ({ ARMCPU *cpu_ = (cpu); isar_feature_##name(&cpu_->isar); }) - #endif diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c index 1cb9d5b81a..1e9c6c85ae 100644 --- a/target/arm/cpu64.c +++ b/target/arm/cpu64.c @@ -32,6 +32,7 @@ #include "qapi/visitor.h" #include "hw/qdev-properties.h" #include "internals.h" +#include "cpu-features.h" #include "cpregs.h" void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c index abe72e35ae..79a3659c0c 100644 --- a/target/arm/debug_helper.c +++ b/target/arm/debug_helper.c @@ -9,6 +9,7 @@ #include "qemu/log.h" #include "cpu.h" #include "internals.h" +#include "cpu-features.h" #include "cpregs.h" #include "exec/exec-all.h" #include "exec/helper-proto.h" diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c index b7ace24bfc..28f546a5ff 100644 --- a/target/arm/gdbstub.c +++ b/target/arm/gdbstub.c @@ -23,6 +23,7 @@ #include "gdbstub/helpers.h" #include "sysemu/tcg.h" #include "internals.h" +#include "cpu-features.h" #include "cpregs.h" typedef struct RegisterSysregXmlParam { diff --git a/target/arm/helper.c b/target/arm/helper.c index b29edb26af..5dc0d20a84 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -11,6 +11,7 @@ #include "trace.h" #include "cpu.h" #include "internals.h" +#include "cpu-features.h" #include "exec/helper-proto.h" #include "qemu/main-loop.h" #include "qemu/timer.h" diff --git a/target/arm/internals.h b/target/arm/internals.h index 1dd9182a54..f7224e6f4d 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -28,6 +28,7 @@ #include "hw/registerfields.h" #include "tcg/tcg-gvec-desc.h" #include "syndrome.h" +#include "cpu-features.h" /* register banks for CPU modes */ #define BANK_USRSYS 0 diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c index 4bb68646e4..3c175c93a7 100644 --- a/target/arm/kvm64.c +++ b/target/arm/kvm64.c @@ -28,6 +28,7 @@ #include "sysemu/kvm_int.h" #include "kvm_arm.h" #include "internals.h" +#include "cpu-features.h" #include "hw/acpi/acpi.h" #include "hw/acpi/ghes.h" diff --git a/target/arm/machine.c b/target/arm/machine.c index fc4a4a4064..9e20b41189 100644 --- a/target/arm/machine.c +++ b/target/arm/machine.c @@ -5,6 +5,7 @@ #include "sysemu/tcg.h" #include "kvm_arm.h" #include "internals.h" +#include "cpu-features.h" #include "migration/cpu.h" static bool vfp_needed(void *opaque) diff --git a/target/arm/ptw.c b/target/arm/ptw.c index 95db9ec4c3..53713e0300 100644 --- a/target/arm/ptw.c +++ b/target/arm/ptw.c @@ -13,6 +13,7 @@ #include "exec/exec-all.h" #include "cpu.h" #include "internals.h" +#include "cpu-features.h" #include "idau.h" #ifdef CONFIG_TCG # include "tcg/oversized-guest.h" diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c index d978aa5f7a..08db1dbcc7 100644 --- a/target/arm/tcg/cpu64.c +++ b/target/arm/tcg/cpu64.c @@ -26,6 +26,7 @@ #include "hw/qdev-properties.h" #include "qemu/units.h" #include "internals.h" +#include "cpu-features.h" #include "cpregs.h" static uint64_t make_ccsidr64(unsigned assoc, unsigned linesize, @@ -840,6 +841,13 @@ static const ARMCPRegInfo cortex_a710_cp_reginfo[] = { { .name = "CPUPFR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 6, .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + /* + * Report CPUCFR_EL1.SCU as 1, as we do not implement the DSU + * (and in particular its system registers). + */ + { .name = "CPUCFR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 0, .opc2 = 0, + .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 4 }, /* * Stub RAMINDEX, as we don't actually implement caches, BTB, @@ -909,12 +917,12 @@ static void aarch64_a710_initfn(Object *obj) cpu->isar.id_aa64pfr0 = 0x1201111120111112ull; /* GIC filled in later */ cpu->isar.id_aa64pfr1 = 0x0000000000000221ull; cpu->isar.id_aa64zfr0 = 0x0000110100110021ull; /* with Crypto */ - cpu->isar.id_aa64dfr0 = 0x000011f010305611ull; + cpu->isar.id_aa64dfr0 = 0x000011f010305619ull; cpu->isar.id_aa64dfr1 = 0; cpu->id_aa64afr0 = 0; cpu->id_aa64afr1 = 0; cpu->isar.id_aa64isar0 = 0x0221111110212120ull; /* with Crypto */ - cpu->isar.id_aa64isar1 = 0x0010111101211032ull; + cpu->isar.id_aa64isar1 = 0x0010111101211052ull; cpu->isar.id_aa64mmfr0 = 0x0000022200101122ull; cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull; cpu->isar.id_aa64mmfr2 = 0x1221011110101011ull; @@ -956,6 +964,108 @@ static void aarch64_a710_initfn(Object *obj) aarch64_add_sve_properties(obj); } +/* Extra IMPDEF regs in the N2 beyond those in the A710 */ +static const ARMCPRegInfo neoverse_n2_cp_reginfo[] = { + { .name = "CPURNDBR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 3, .opc2 = 0, + .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CPURNDPEID_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 3, .opc2 = 1, + .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, +}; + +static void aarch64_neoverse_n2_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + cpu->dtb_compatible = "arm,neoverse-n2"; + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + set_feature(&cpu->env, ARM_FEATURE_AARCH64); + set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); + set_feature(&cpu->env, ARM_FEATURE_EL2); + set_feature(&cpu->env, ARM_FEATURE_EL3); + set_feature(&cpu->env, ARM_FEATURE_PMU); + + /* Ordered by Section B.5: AArch64 ID registers */ + cpu->midr = 0x410FD493; /* r0p3 */ + cpu->revidr = 0; + cpu->isar.id_pfr0 = 0x21110131; + cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */ + cpu->isar.id_dfr0 = 0x16011099; + cpu->id_afr0 = 0; + cpu->isar.id_mmfr0 = 0x10201105; + cpu->isar.id_mmfr1 = 0x40000000; + cpu->isar.id_mmfr2 = 0x01260000; + cpu->isar.id_mmfr3 = 0x02122211; + cpu->isar.id_isar0 = 0x02101110; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232042; + cpu->isar.id_isar3 = 0x01112131; + cpu->isar.id_isar4 = 0x00010142; + cpu->isar.id_isar5 = 0x11011121; /* with Crypto */ + cpu->isar.id_mmfr4 = 0x01021110; + cpu->isar.id_isar6 = 0x01111111; + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x13211111; + cpu->isar.mvfr2 = 0x00000043; + cpu->isar.id_pfr2 = 0x00000011; + cpu->isar.id_aa64pfr0 = 0x1201111120111112ull; /* GIC filled in later */ + cpu->isar.id_aa64pfr1 = 0x0000000000000221ull; + cpu->isar.id_aa64zfr0 = 0x0000110100110021ull; /* with Crypto */ + cpu->isar.id_aa64dfr0 = 0x000011f210305619ull; + cpu->isar.id_aa64dfr1 = 0; + cpu->id_aa64afr0 = 0; + cpu->id_aa64afr1 = 0; + cpu->isar.id_aa64isar0 = 0x0221111110212120ull; /* with Crypto */ + cpu->isar.id_aa64isar1 = 0x0011111101211052ull; + cpu->isar.id_aa64mmfr0 = 0x0000022200101125ull; + cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull; + cpu->isar.id_aa64mmfr2 = 0x1221011112101011ull; + cpu->clidr = 0x0000001482000023ull; + cpu->gm_blocksize = 4; + cpu->ctr = 0x00000004b444c004ull; + cpu->dcz_blocksize = 4; + /* TODO FEAT_MPAM: mpamidr_el1 = 0x0000_0001_001e_01ff */ + + /* Section B.7.2: PMCR_EL0 */ + cpu->isar.reset_pmcr_el0 = 0x3000; /* with 6 counters */ + + /* Section B.8.9: ICH_VTR_EL2 */ + cpu->gic_num_lrs = 4; + cpu->gic_vpribits = 5; + cpu->gic_vprebits = 5; + cpu->gic_pribits = 5; + + /* Section 14: Scalable Vector Extensions support */ + cpu->sve_vq.supported = 1 << 0; /* 128bit */ + + /* + * The Neoverse N2 TRM does not list CCSIDR values. The layout of + * the caches are in text in Table 7-1, Table 8-1, and Table 9-1. + * + * L1: 4-way set associative 64-byte line size, total 64K. + * L2: 8-way set associative 64 byte line size, total either 512K or 1024K. + */ + cpu->ccsidr[0] = make_ccsidr64(4, 64, 64 * KiB); /* L1 dcache */ + cpu->ccsidr[1] = cpu->ccsidr[0]; /* L1 icache */ + cpu->ccsidr[2] = make_ccsidr64(8, 64, 512 * KiB); /* L2 cache */ + + /* FIXME: Not documented -- copied from neoverse-v1 */ + cpu->reset_sctlr = 0x30c50838; + + /* + * The Neoverse N2 has all of the Cortex-A710 IMPDEF registers, + * and a few more RNG related ones. + */ + define_arm_cp_regs(cpu, cortex_a710_cp_reginfo); + define_arm_cp_regs(cpu, neoverse_n2_cp_reginfo); + + aarch64_add_pauth_properties(obj); + aarch64_add_sve_properties(obj); +} + /* * -cpu max: a CPU with as many features enabled as our emulation supports. * The version of '-cpu max' for qemu-system-arm is defined in cpu32.c; @@ -1158,6 +1268,7 @@ static const ARMCPUInfo aarch64_cpus[] = { { .name = "a64fx", .initfn = aarch64_a64fx_initfn }, { .name = "neoverse-n1", .initfn = aarch64_neoverse_n1_initfn }, { .name = "neoverse-v1", .initfn = aarch64_neoverse_v1_initfn }, + { .name = "neoverse-n2", .initfn = aarch64_neoverse_n2_initfn }, }; static void aarch64_cpu_register_types(void) diff --git a/target/arm/tcg/hflags.c b/target/arm/tcg/hflags.c index cea1adb7b6..3d7fdce5c3 100644 --- a/target/arm/tcg/hflags.c +++ b/target/arm/tcg/hflags.c @@ -8,6 +8,7 @@ #include "qemu/osdep.h" #include "cpu.h" #include "internals.h" +#include "cpu-features.h" #include "exec/helper-proto.h" #include "cpregs.h" diff --git a/target/arm/tcg/m_helper.c b/target/arm/tcg/m_helper.c index 0045c18f80..a26adb75aa 100644 --- a/target/arm/tcg/m_helper.c +++ b/target/arm/tcg/m_helper.c @@ -9,6 +9,7 @@ #include "qemu/osdep.h" #include "cpu.h" #include "internals.h" +#include "cpu-features.h" #include "gdbstub/helpers.h" #include "exec/helper-proto.h" #include "qemu/main-loop.h" diff --git a/target/arm/tcg/op_helper.c b/target/arm/tcg/op_helper.c index 403f8b09d3..ea08936a85 100644 --- a/target/arm/tcg/op_helper.c +++ b/target/arm/tcg/op_helper.c @@ -21,6 +21,7 @@ #include "cpu.h" #include "exec/helper-proto.h" #include "internals.h" +#include "cpu-features.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "cpregs.h" diff --git a/target/arm/tcg/pauth_helper.c b/target/arm/tcg/pauth_helper.c index 4da2962ad5..c4b143024f 100644 --- a/target/arm/tcg/pauth_helper.c +++ b/target/arm/tcg/pauth_helper.c @@ -20,6 +20,7 @@ #include "qemu/osdep.h" #include "cpu.h" #include "internals.h" +#include "cpu-features.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "exec/helper-proto.h" diff --git a/target/arm/tcg/tlb_helper.c b/target/arm/tcg/tlb_helper.c index 59bff8b452..4fdd85359e 100644 --- a/target/arm/tcg/tlb_helper.c +++ b/target/arm/tcg/tlb_helper.c @@ -8,6 +8,7 @@ #include "qemu/osdep.h" #include "cpu.h" #include "internals.h" +#include "cpu-features.h" #include "exec/exec-all.h" #include "exec/helper-proto.h" diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index ad78b8b120..41484d8ae5 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -1606,7 +1606,7 @@ static bool trans_ERET(DisasContext *s, arg_ERET *a) return false; } if (s->fgt_eret) { - gen_exception_insn_el(s, 0, EXCP_UDEF, 0, 2); + gen_exception_insn_el(s, 0, EXCP_UDEF, syn_erettrap(0), 2); return true; } dst = tcg_temp_new_i64(); @@ -1633,7 +1633,7 @@ static bool trans_ERETA(DisasContext *s, arg_reta *a) } /* The FGT trap takes precedence over an auth trap. */ if (s->fgt_eret) { - gen_exception_insn_el(s, 0, EXCP_UDEF, a->m ? 3 : 2, 2); + gen_exception_insn_el(s, 0, EXCP_UDEF, syn_erettrap(a->m ? 3 : 2), 2); return true; } dst = tcg_temp_new_i64(); diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h index b4046611f5..9efe00cf6c 100644 --- a/target/arm/tcg/translate.h +++ b/target/arm/tcg/translate.h @@ -8,7 +8,7 @@ #include "exec/translator.h" #include "exec/helper-gen.h" #include "internals.h" - +#include "cpu-features.h" /* internal defines */ diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c index 789bba36cc..3e5e37abbe 100644 --- a/target/arm/vfp_helper.c +++ b/target/arm/vfp_helper.c @@ -21,6 +21,7 @@ #include "cpu.h" #include "exec/helper-proto.h" #include "internals.h" +#include "cpu-features.h" #ifdef CONFIG_TCG #include "qemu/log.h" #include "fpu/softfloat.h" diff --git a/tests/qtest/ufs-test.c b/tests/qtest/ufs-test.c index 15d467630c..5daf8c9c49 100644 --- a/tests/qtest/ufs-test.c +++ b/tests/qtest/ufs-test.c @@ -425,6 +425,9 @@ static void ufstest_init(void *obj, void *data, QGuestAllocator *alloc) const uint8_t test_unit_ready_cdb[UFS_CDB_SIZE] = { TEST_UNIT_READY, }; + const uint8_t request_sense_cdb[UFS_CDB_SIZE] = { + REQUEST_SENSE, + }; UtpTransferReqDesc utrd; UtpUpiuRsp rsp_upiu; @@ -440,6 +443,12 @@ static void ufstest_init(void *obj, void *data, QGuestAllocator *alloc) /* There is one logical unit whose lun is 0 */ g_assert_cmpuint(buf[9], ==, 0); + /* Clear Unit Attention */ + ufs_send_scsi_command(ufs, 0, 0, request_sense_cdb, NULL, 0, buf, + sizeof(buf), &utrd, &rsp_upiu); + g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.scsi_status, ==, CHECK_CONDITION); + /* Check TEST_UNIT_READY */ ufs_send_scsi_command(ufs, 0, 0, test_unit_ready_cdb, NULL, 0, NULL, 0, &utrd, &rsp_upiu); @@ -473,6 +482,9 @@ static void ufstest_read_write(void *obj, void *data, QGuestAllocator *alloc) 0x00, 0x00 }; + const uint8_t request_sense_cdb[UFS_CDB_SIZE] = { + REQUEST_SENSE, + }; const uint8_t read_cdb[UFS_CDB_SIZE] = { /* READ(10) to LBA 0, transfer length 1 */ READ_10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00 @@ -484,32 +496,39 @@ static void ufstest_read_write(void *obj, void *data, QGuestAllocator *alloc) uint32_t block_size; UtpTransferReqDesc utrd; UtpUpiuRsp rsp_upiu; + const int test_lun = 1; ufs_init(ufs, alloc); + /* Clear Unit Attention */ + ufs_send_scsi_command(ufs, 0, test_lun, request_sense_cdb, NULL, 0, + read_buf, sizeof(read_buf), &utrd, &rsp_upiu); + g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.scsi_status, ==, CHECK_CONDITION); + /* Read capacity */ - ufs_send_scsi_command(ufs, 0, 1, read_capacity_cdb, NULL, 0, read_buf, - sizeof(read_buf), &utrd, &rsp_upiu); + ufs_send_scsi_command(ufs, 0, test_lun, read_capacity_cdb, NULL, 0, + read_buf, sizeof(read_buf), &utrd, &rsp_upiu); g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS); g_assert_cmpuint(rsp_upiu.header.scsi_status, ==, - UFS_COMMAND_RESULT_SUCESS); + UFS_COMMAND_RESULT_SUCCESS); block_size = ldl_be_p(&read_buf[8]); g_assert_cmpuint(block_size, ==, 4096); /* Write data */ memset(write_buf, 0xab, block_size); - ufs_send_scsi_command(ufs, 0, 1, write_cdb, write_buf, block_size, NULL, 0, - &utrd, &rsp_upiu); + ufs_send_scsi_command(ufs, 0, test_lun, write_cdb, write_buf, block_size, + NULL, 0, &utrd, &rsp_upiu); g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS); g_assert_cmpuint(rsp_upiu.header.scsi_status, ==, - UFS_COMMAND_RESULT_SUCESS); + UFS_COMMAND_RESULT_SUCCESS); /* Read data and verify */ - ufs_send_scsi_command(ufs, 0, 1, read_cdb, NULL, 0, read_buf, block_size, - &utrd, &rsp_upiu); + ufs_send_scsi_command(ufs, 0, test_lun, read_cdb, NULL, 0, read_buf, + block_size, &utrd, &rsp_upiu); g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS); g_assert_cmpuint(rsp_upiu.header.scsi_status, ==, - UFS_COMMAND_RESULT_SUCESS); + UFS_COMMAND_RESULT_SUCCESS); g_assert_cmpint(memcmp(read_buf, write_buf, block_size), ==, 0); ufs_exit(ufs, alloc); |