summary refs log tree commit diff stats
diff options
context:
space:
mode:
-rw-r--r--docs/system/arm/max78000.rst37
-rw-r--r--docs/system/arm/virt.rst9
-rw-r--r--docs/system/devices/cxl.rst11
-rw-r--r--docs/system/target-arm.rst1
-rw-r--r--hw/acpi/cxl.c76
-rw-r--r--hw/arm/Kconfig15
-rw-r--r--hw/arm/fsl-imx8mp.c4
-rw-r--r--hw/arm/max78000_soc.c232
-rw-r--r--hw/arm/max78000fthr.c50
-rw-r--r--hw/arm/meson.build2
-rw-r--r--hw/arm/virt-acpi-build.c40
-rw-r--r--hw/arm/virt.c52
-rw-r--r--hw/char/Kconfig3
-rw-r--r--hw/char/max78000_uart.c285
-rw-r--r--hw/char/meson.build1
-rw-r--r--hw/cxl/cxl-host-stubs.c7
-rw-r--r--hw/cxl/cxl-host.c174
-rw-r--r--hw/i386/pc.c50
-rw-r--r--hw/intc/arm_gicv3_common.c1
-rw-r--r--hw/intc/arm_gicv3_kvm.c29
-rw-r--r--hw/intc/armv7m_nvic.c4
-rw-r--r--hw/misc/Kconfig12
-rw-r--r--hw/misc/max78000_aes.c223
-rw-r--r--hw/misc/max78000_gcr.c351
-rw-r--r--hw/misc/max78000_icc.c120
-rw-r--r--hw/misc/max78000_trng.c139
-rw-r--r--hw/misc/meson.build4
-rw-r--r--include/hw/arm/max78000_soc.h50
-rw-r--r--include/hw/arm/virt.h4
-rw-r--r--include/hw/char/max78000_uart.h78
-rw-r--r--include/hw/cxl/cxl.h5
-rw-r--r--include/hw/cxl/cxl_host.h5
-rw-r--r--include/hw/intc/arm_gicv3_common.h1
-rw-r--r--include/hw/misc/max78000_aes.h68
-rw-r--r--include/hw/misc/max78000_gcr.h131
-rw-r--r--include/hw/misc/max78000_icc.h33
-rw-r--r--include/hw/misc/max78000_trng.h35
-rw-r--r--system/qdev-monitor.c9
-rw-r--r--target/arm/cpregs-pmu.c1309
-rw-r--r--target/arm/cpregs.h3
-rw-r--r--target/arm/cpu-sysregs.h.inc4
-rw-r--r--target/arm/cpu.h6
-rw-r--r--target/arm/cpu64.c8
-rw-r--r--target/arm/helper.c1813
-rw-r--r--target/arm/internals.h5
-rw-r--r--target/arm/kvm-stub.c5
-rw-r--r--target/arm/kvm.c24
-rw-r--r--target/arm/kvm_arm.h7
-rw-r--r--target/arm/meson.build2
-rw-r--r--target/arm/tcg-stubs.c5
-rw-r--r--target/arm/tcg/cpregs-at.c519
-rw-r--r--target/arm/tcg/cpu-v7m.c16
-rw-r--r--target/arm/tcg/cpu32.c34
-rw-r--r--target/arm/tcg/cpu64.c68
-rw-r--r--target/arm/tcg/helper-a64.c20
-rw-r--r--target/arm/tcg/helper.h1
-rw-r--r--target/arm/tcg/meson.build1
-rw-r--r--target/arm/tcg/vec_helper.c1
-rw-r--r--tests/functional/meson.build1
-rwxr-xr-xtests/functional/test_arm_max78000fthr.py48
-rw-r--r--tests/qtest/cxl-test.c58
-rw-r--r--tests/qtest/meson.build1
62 files changed, 4300 insertions, 2010 deletions
diff --git a/docs/system/arm/max78000.rst b/docs/system/arm/max78000.rst
new file mode 100644
index 0000000000..3d95011fef
--- /dev/null
+++ b/docs/system/arm/max78000.rst
@@ -0,0 +1,37 @@
+.. SPDX-License-Identifier: GPL-2.0-or-later
+
+Analog Devices max78000 board (``max78000fthr``)
+================================================
+
+The max78000 is a Cortex-M4 based SOC with a RISC-V coprocessor. The RISC-V coprocessor is not supported.
+
+Supported devices
+-----------------
+
+ * Instruction Cache Controller
+ * UART
+ * Global Control Register
+ * True Random Number Generator
+ * AES
+
+Notable unsupported devices
+---------------------------
+
+ * I2C
+ * CNN
+ * CRC
+ * SPI
+
+Boot options
+------------
+
+The max78000 can be started using the ``-kernel`` option to load a
+firmware at address 0 as the ROM. As the ROM normally jumps to software loaded
+from the internal flash at address 0x10000000, loading your program there is
+generally advisable. If you don't have a copy of the ROM, the interrupt
+vector table from user firmware will do.
+Example:
+
+.. code-block:: bash
+
+  $ qemu-system-arm -machine max78000fthr -kernel max78000.bin -device loader,file=max78000.bin,addr=0x10000000
diff --git a/docs/system/arm/virt.rst b/docs/system/arm/virt.rst
index 6a719b9586..10cbffc8a7 100644
--- a/docs/system/arm/virt.rst
+++ b/docs/system/arm/virt.rst
@@ -31,6 +31,7 @@ Supported devices
 The virt board supports:
 
 - PCI/PCIe devices
+- CXL Fixed memory windows, root bridges and devices.
 - Flash memory
 - Either one or two PL011 UARTs for the NonSecure World
 - An RTC
@@ -189,6 +190,14 @@ ras
 acpi
   Set ``on``/``off``/``auto`` to enable/disable ACPI.
 
+cxl
+  Set  ``on``/``off`` to enable/disable CXL. More details in
+  :doc:`../devices/cxl`. The default is off.
+
+cxl-fmw
+  Array of CXL fixed memory windows describing fixed address routing to
+  target CXL host bridges. See :doc:`../devices/cxl`.
+
 dtb-randomness
   Set ``on``/``off`` to pass random seeds via the guest DTB
   rng-seed and kaslr-seed nodes (in both "/chosen" and
diff --git a/docs/system/devices/cxl.rst b/docs/system/devices/cxl.rst
index e307caf3f8..ca15a0da1c 100644
--- a/docs/system/devices/cxl.rst
+++ b/docs/system/devices/cxl.rst
@@ -384,6 +384,17 @@ An example of 4 devices below a switch suitable for 1, 2 or 4 way interleave::
   -device cxl-type3,bus=swport3,persistent-memdev=cxl-mem3,lsa=cxl-lsa3,id=cxl-pmem3,sn=0x4 \
   -M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G,cxl-fmw.0.interleave-granularity=4k
 
+A simple arm/virt example featuring a single direct connected CXL Type 3
+Volatile Memory device::
+
+  qemu-system-aarch64 -M virt,gic-version=3,cxl=on -m 4g,maxmem=8g,slots=4 -cpu max -smp 4 \
+  ...
+  -object memory-backend-ram,id=vmem0,share=on,size=256M \
+  -device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1 \
+  -device cxl-rp,port=0,bus=cxl.1,id=root_port13,chassis=0,slot=2 \
+  -device cxl-type3,bus=root_port13,volatile-memdev=vmem0,id=cxl-vmem0 \
+  -M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G
+
 Deprecations
 ------------
 
diff --git a/docs/system/target-arm.rst b/docs/system/target-arm.rst
index b96a05a920..a96d1867df 100644
--- a/docs/system/target-arm.rst
+++ b/docs/system/target-arm.rst
@@ -71,6 +71,7 @@ Board-specific documentation
 .. toctree::
    :maxdepth: 1
 
+   arm/max78000
    arm/integratorcp
    arm/mps2
    arm/musca
diff --git a/hw/acpi/cxl.c b/hw/acpi/cxl.c
index 9cd7905ea2..75d5b30bb8 100644
--- a/hw/acpi/cxl.c
+++ b/hw/acpi/cxl.c
@@ -22,6 +22,7 @@
 #include "hw/pci/pci_bridge.h"
 #include "hw/pci/pci_host.h"
 #include "hw/cxl/cxl.h"
+#include "hw/cxl/cxl_host.h"
 #include "hw/mem/memory-device.h"
 #include "hw/acpi/acpi.h"
 #include "hw/acpi/aml-build.h"
@@ -135,55 +136,52 @@ static void cedt_build_chbs(GArray *table_data, PXBCXLDev *cxl)
  * Interleave ways encoding in CXL 2.0 ECN: 3, 6, 12 and 16-way memory
  * interleaving.
  */
-static void cedt_build_cfmws(GArray *table_data, CXLState *cxls)
+static void cedt_build_cfmws(CXLFixedWindow *fw, Aml *cedt)
 {
-    GList *it;
+    GArray *table_data = cedt->buf;
+    int i;
 
-    for (it = cxls->fixed_windows; it; it = it->next) {
-        CXLFixedWindow *fw = it->data;
-        int i;
-
-        /* Type */
-        build_append_int_noprefix(table_data, 1, 1);
+    /* Type */
+    build_append_int_noprefix(table_data, 1, 1);
 
-        /* Reserved */
-        build_append_int_noprefix(table_data, 0, 1);
+    /* Reserved */
+    build_append_int_noprefix(table_data, 0, 1);
 
-        /* Record Length */
-        build_append_int_noprefix(table_data, 36 + 4 * fw->num_targets, 2);
+    /* Record Length */
+    build_append_int_noprefix(table_data, 36 + 4 * fw->num_targets, 2);
 
-        /* Reserved */
-        build_append_int_noprefix(table_data, 0, 4);
+    /* Reserved */
+    build_append_int_noprefix(table_data, 0, 4);
 
-        /* Base HPA */
-        build_append_int_noprefix(table_data, fw->mr.addr, 8);
+    /* Base HPA */
+    build_append_int_noprefix(table_data, fw->mr.addr, 8);
 
-        /* Window Size */
-        build_append_int_noprefix(table_data, fw->size, 8);
+    /* Window Size */
+    build_append_int_noprefix(table_data, fw->size, 8);
 
-        /* Host Bridge Interleave Ways */
-        build_append_int_noprefix(table_data, fw->enc_int_ways, 1);
+    /* Host Bridge Interleave Ways */
+    build_append_int_noprefix(table_data, fw->enc_int_ways, 1);
 
-        /* Host Bridge Interleave Arithmetic */
-        build_append_int_noprefix(table_data, 0, 1);
+    /* Host Bridge Interleave Arithmetic */
+    build_append_int_noprefix(table_data, 0, 1);
 
-        /* Reserved */
-        build_append_int_noprefix(table_data, 0, 2);
+    /* Reserved */
+    build_append_int_noprefix(table_data, 0, 2);
 
-        /* Host Bridge Interleave Granularity */
-        build_append_int_noprefix(table_data, fw->enc_int_gran, 4);
+    /* Host Bridge Interleave Granularity */
+    build_append_int_noprefix(table_data, fw->enc_int_gran, 4);
 
-        /* Window Restrictions */
-        build_append_int_noprefix(table_data, 0x0f, 2); /* No restrictions */
+    /* Window Restrictions */
+    build_append_int_noprefix(table_data, 0x0f, 2);
 
-        /* QTG ID */
-        build_append_int_noprefix(table_data, 0, 2);
+    /* QTG ID */
+    build_append_int_noprefix(table_data, 0, 2);
 
-        /* Host Bridge List (list of UIDs - currently bus_nr) */
-        for (i = 0; i < fw->num_targets; i++) {
-            g_assert(fw->target_hbs[i]);
-            build_append_int_noprefix(table_data, PXB_DEV(fw->target_hbs[i])->bus_nr, 4);
-        }
+    /* Host Bridge List (list of UIDs - currently bus_nr) */
+    for (i = 0; i < fw->num_targets; i++) {
+        g_assert(fw->target_hbs[i]);
+        build_append_int_noprefix(table_data,
+                                  PXB_DEV(fw->target_hbs[i])->bus_nr, 4);
     }
 }
 
@@ -202,6 +200,7 @@ void cxl_build_cedt(GArray *table_offsets, GArray *table_data,
                     BIOSLinker *linker, const char *oem_id,
                     const char *oem_table_id, CXLState *cxl_state)
 {
+    GSList *cfmws_list, *iter;
     Aml *cedt;
     AcpiTable table = { .sig = "CEDT", .rev = 1, .oem_id = oem_id,
                         .oem_table_id = oem_table_id };
@@ -213,7 +212,12 @@ void cxl_build_cedt(GArray *table_offsets, GArray *table_data,
     /* reserve space for CEDT header */
 
     object_child_foreach_recursive(object_get_root(), cxl_foreach_pxb_hb, cedt);
-    cedt_build_cfmws(cedt->buf, cxl_state);
+
+    cfmws_list = cxl_fmws_get_all_sorted();
+    for (iter = cfmws_list; iter; iter = iter->next) {
+        cedt_build_cfmws(CXL_FMW(iter->data), cedt);
+    }
+    g_slist_free(cfmws_list);
 
     /* copy AML table into ACPI tables blob and patch header there */
     g_array_append_vals(table_data, cedt->buf->data, cedt->buf->len);
diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig
index 6ea86534d5..1634e26fcc 100644
--- a/hw/arm/Kconfig
+++ b/hw/arm/Kconfig
@@ -95,6 +95,12 @@ config INTEGRATOR
     select PL181 # display
     select SMC91C111
 
+config MAX78000FTHR
+    bool
+    default y
+    depends on TCG && ARM
+    select MAX78000_SOC
+
 config MPS3R
     bool
     default y
@@ -357,6 +363,15 @@ config ALLWINNER_R40
     select USB_EHCI_SYSBUS
     select SD
 
+config MAX78000_SOC
+    bool
+    select ARM_V7M
+    select MAX78000_ICC
+    select MAX78000_UART
+    select MAX78000_GCR
+    select MAX78000_TRNG
+    select MAX78000_AES
+
 config RASPI
     bool
     default y
diff --git a/hw/arm/fsl-imx8mp.c b/hw/arm/fsl-imx8mp.c
index 23e662c16c..866f4d1d74 100644
--- a/hw/arm/fsl-imx8mp.c
+++ b/hw/arm/fsl-imx8mp.c
@@ -356,6 +356,10 @@ static void fsl_imx8mp_realize(DeviceState *dev, Error **errp)
                                qdev_get_gpio_in(cpudev, ARM_CPU_IRQ));
             sysbus_connect_irq(gicsbd, i + ms->smp.cpus,
                                qdev_get_gpio_in(cpudev, ARM_CPU_FIQ));
+            sysbus_connect_irq(gicsbd, i + 2 * ms->smp.cpus,
+                               qdev_get_gpio_in(cpudev, ARM_CPU_VIRQ));
+            sysbus_connect_irq(gicsbd, i + 3 * ms->smp.cpus,
+                               qdev_get_gpio_in(cpudev, ARM_CPU_VFIQ));
         }
     }
 
diff --git a/hw/arm/max78000_soc.c b/hw/arm/max78000_soc.c
new file mode 100644
index 0000000000..7f1856f5ba
--- /dev/null
+++ b/hw/arm/max78000_soc.c
@@ -0,0 +1,232 @@
+/*
+ * MAX78000 SOC
+ *
+ * Copyright (c) 2025 Jackson Donaldson <jcksn@duck.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Implementation based on stm32f205 and Max78000 user guide at
+ * https://www.analog.com/media/en/technical-documentation/user-guides/max78000-user-guide.pdf
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "system/address-spaces.h"
+#include "system/system.h"
+#include "hw/arm/max78000_soc.h"
+#include "hw/qdev-clock.h"
+#include "hw/misc/unimp.h"
+
+static const uint32_t max78000_icc_addr[] = {0x4002a000, 0x4002a800};
+static const uint32_t max78000_uart_addr[] = {0x40042000, 0x40043000,
+                                              0x40044000};
+
+static const int max78000_uart_irq[] = {14, 15, 34};
+
+static void max78000_soc_initfn(Object *obj)
+{
+    MAX78000State *s = MAX78000_SOC(obj);
+    int i;
+
+    object_initialize_child(obj, "armv7m", &s->armv7m, TYPE_ARMV7M);
+
+    object_initialize_child(obj, "gcr", &s->gcr, TYPE_MAX78000_GCR);
+
+    for (i = 0; i < MAX78000_NUM_ICC; i++) {
+        g_autofree char *name = g_strdup_printf("icc%d", i);
+        object_initialize_child(obj, name, &s->icc[i], TYPE_MAX78000_ICC);
+    }
+
+    for (i = 0; i < MAX78000_NUM_UART; i++) {
+        g_autofree char *name = g_strdup_printf("uart%d", i);
+        object_initialize_child(obj, name, &s->uart[i],
+                                TYPE_MAX78000_UART);
+    }
+
+    object_initialize_child(obj, "trng", &s->trng, TYPE_MAX78000_TRNG);
+
+    object_initialize_child(obj, "aes", &s->aes, TYPE_MAX78000_AES);
+
+    s->sysclk = qdev_init_clock_in(DEVICE(s), "sysclk", NULL, NULL, 0);
+}
+
+static void max78000_soc_realize(DeviceState *dev_soc, Error **errp)
+{
+    MAX78000State *s = MAX78000_SOC(dev_soc);
+    MemoryRegion *system_memory = get_system_memory();
+    DeviceState *dev, *gcrdev, *armv7m;
+    SysBusDevice *busdev;
+    Error *err = NULL;
+    int i;
+
+    if (!clock_has_source(s->sysclk)) {
+        error_setg(errp, "sysclk clock must be wired up by the board code");
+        return;
+    }
+
+    memory_region_init_rom(&s->flash, OBJECT(dev_soc), "MAX78000.flash",
+                           FLASH_SIZE, &err);
+    if (err != NULL) {
+        error_propagate(errp, err);
+        return;
+    }
+
+    memory_region_add_subregion(system_memory, FLASH_BASE_ADDRESS, &s->flash);
+
+    memory_region_init_ram(&s->sram, NULL, "MAX78000.sram", SRAM_SIZE,
+                           &err);
+
+    gcrdev = DEVICE(&s->gcr);
+    object_property_set_link(OBJECT(gcrdev), "sram", OBJECT(&s->sram),
+                                 &err);
+
+    if (err != NULL) {
+        error_propagate(errp, err);
+        return;
+    }
+    memory_region_add_subregion(system_memory, SRAM_BASE_ADDRESS, &s->sram);
+
+    armv7m = DEVICE(&s->armv7m);
+
+    /*
+     * The MAX78000 user guide's Interrupt Vector Table section
+     * suggests that there are 120 IRQs in the text, while only listing
+     * 104 in table 5-1. Implement the more generous of the two.
+     * This has not been tested in hardware.
+     */
+    qdev_prop_set_uint32(armv7m, "num-irq", 120);
+    qdev_prop_set_uint8(armv7m, "num-prio-bits", 3);
+    qdev_prop_set_string(armv7m, "cpu-type", ARM_CPU_TYPE_NAME("cortex-m4"));
+    qdev_prop_set_bit(armv7m, "enable-bitband", true);
+    qdev_connect_clock_in(armv7m, "cpuclk", s->sysclk);
+    object_property_set_link(OBJECT(&s->armv7m), "memory",
+                             OBJECT(system_memory), &error_abort);
+    if (!sysbus_realize(SYS_BUS_DEVICE(&s->armv7m), errp)) {
+        return;
+    }
+
+    for (i = 0; i < MAX78000_NUM_ICC; i++) {
+        dev = DEVICE(&(s->icc[i]));
+        sysbus_realize(SYS_BUS_DEVICE(dev), errp);
+        sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, max78000_icc_addr[i]);
+    }
+
+    for (i = 0; i < MAX78000_NUM_UART; i++) {
+        g_autofree char *link = g_strdup_printf("uart%d", i);
+        dev = DEVICE(&(s->uart[i]));
+        qdev_prop_set_chr(dev, "chardev", serial_hd(i));
+        if (!sysbus_realize(SYS_BUS_DEVICE(&s->uart[i]), errp)) {
+            return;
+        }
+
+        object_property_set_link(OBJECT(gcrdev), link, OBJECT(dev),
+                                 &err);
+
+        busdev = SYS_BUS_DEVICE(dev);
+        sysbus_mmio_map(busdev, 0, max78000_uart_addr[i]);
+        sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(armv7m,
+                                                       max78000_uart_irq[i]));
+    }
+
+    dev = DEVICE(&s->trng);
+    sysbus_realize(SYS_BUS_DEVICE(dev), errp);
+    sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0x4004d000);
+    sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, qdev_get_gpio_in(armv7m, 4));
+
+    object_property_set_link(OBJECT(gcrdev), "trng", OBJECT(dev), &err);
+
+    dev = DEVICE(&s->aes);
+    sysbus_realize(SYS_BUS_DEVICE(dev), errp);
+    sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0x40007400);
+    sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, qdev_get_gpio_in(armv7m, 5));
+
+    object_property_set_link(OBJECT(gcrdev), "aes", OBJECT(dev), &err);
+
+    dev = DEVICE(&s->gcr);
+    sysbus_realize(SYS_BUS_DEVICE(dev), errp);
+    sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0x40000000);
+
+    create_unimplemented_device("systemInterface",      0x40000400, 0x400);
+    create_unimplemented_device("functionControl",      0x40000800, 0x400);
+    create_unimplemented_device("watchdogTimer0",       0x40003000, 0x400);
+    create_unimplemented_device("dynamicVoltScale",     0x40003c00, 0x40);
+    create_unimplemented_device("SIMO",                 0x40004400, 0x400);
+    create_unimplemented_device("trimSystemInit",       0x40005400, 0x400);
+    create_unimplemented_device("generalCtrlFunc",      0x40005800, 0x400);
+    create_unimplemented_device("wakeupTimer",          0x40006400, 0x400);
+    create_unimplemented_device("powerSequencer",       0x40006800, 0x400);
+    create_unimplemented_device("miscControl",          0x40006c00, 0x400);
+
+    create_unimplemented_device("gpio0",                0x40008000, 0x1000);
+    create_unimplemented_device("gpio1",                0x40009000, 0x1000);
+
+    create_unimplemented_device("parallelCamInterface", 0x4000e000, 0x1000);
+    create_unimplemented_device("CRC",                  0x4000f000, 0x1000);
+
+    create_unimplemented_device("timer0",               0x40010000, 0x1000);
+    create_unimplemented_device("timer1",               0x40011000, 0x1000);
+    create_unimplemented_device("timer2",               0x40012000, 0x1000);
+    create_unimplemented_device("timer3",               0x40013000, 0x1000);
+
+    create_unimplemented_device("i2c0",                 0x4001d000, 0x1000);
+    create_unimplemented_device("i2c1",                 0x4001e000, 0x1000);
+    create_unimplemented_device("i2c2",                 0x4001f000, 0x1000);
+
+    create_unimplemented_device("standardDMA",          0x40028000, 0x1000);
+    create_unimplemented_device("flashController0",     0x40029000, 0x400);
+
+    create_unimplemented_device("adc",                  0x40034000, 0x1000);
+    create_unimplemented_device("pulseTrainEngine",     0x4003c000, 0xa0);
+    create_unimplemented_device("oneWireMaster",        0x4003d000, 0x1000);
+    create_unimplemented_device("semaphore",            0x4003e000, 0x1000);
+
+    create_unimplemented_device("spi1",                 0x40046000, 0x2000);
+    create_unimplemented_device("i2s",                  0x40060000, 0x1000);
+    create_unimplemented_device("lowPowerControl",      0x40080000, 0x400);
+    create_unimplemented_device("gpio2",                0x40080400, 0x200);
+    create_unimplemented_device("lowPowerWatchdogTimer",    0x40080800, 0x400);
+    create_unimplemented_device("lowPowerTimer4",       0x40080c00, 0x400);
+
+    create_unimplemented_device("lowPowerTimer5",       0x40081000, 0x400);
+    create_unimplemented_device("lowPowerUART0",        0x40081400, 0x400);
+    create_unimplemented_device("lowPowerComparator",   0x40088000, 0x400);
+
+    create_unimplemented_device("spi0",                 0x400be000, 0x400);
+
+    /*
+     * The MAX78000 user guide's base address map lists the CNN TX FIFO as
+     * beginning at 0x400c0400 and ending at 0x400c0400. Given that CNN_FIFO
+     * is listed as having data accessible up to offset 0x1000, the user
+     * guide is likely incorrect.
+     */
+    create_unimplemented_device("cnnTxFIFO",            0x400c0400, 0x2000);
+
+    create_unimplemented_device("cnnGlobalControl",     0x50000000, 0x10000);
+    create_unimplemented_device("cnnx16quad0",          0x50100000, 0x40000);
+    create_unimplemented_device("cnnx16quad1",          0x50500000, 0x40000);
+    create_unimplemented_device("cnnx16quad2",          0x50900000, 0x40000);
+    create_unimplemented_device("cnnx16quad3",          0x50d00000, 0x40000);
+
+}
+
+static void max78000_soc_class_init(ObjectClass *klass, const void *data)
+{
+    DeviceClass *dc = DEVICE_CLASS(klass);
+
+    dc->realize = max78000_soc_realize;
+}
+
+static const TypeInfo max78000_soc_info = {
+    .name          = TYPE_MAX78000_SOC,
+    .parent        = TYPE_SYS_BUS_DEVICE,
+    .instance_size = sizeof(MAX78000State),
+    .instance_init = max78000_soc_initfn,
+    .class_init    = max78000_soc_class_init,
+};
+
+static void max78000_soc_types(void)
+{
+    type_register_static(&max78000_soc_info);
+}
+
+type_init(max78000_soc_types)
diff --git a/hw/arm/max78000fthr.c b/hw/arm/max78000fthr.c
new file mode 100644
index 0000000000..c4f6b5b1b0
--- /dev/null
+++ b/hw/arm/max78000fthr.c
@@ -0,0 +1,50 @@
+/*
+ * MAX78000FTHR Evaluation Board
+ *
+ * Copyright (c) 2025 Jackson Donaldson <jcksn@duck.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/boards.h"
+#include "hw/qdev-properties.h"
+#include "hw/qdev-clock.h"
+#include "qemu/error-report.h"
+#include "hw/arm/max78000_soc.h"
+#include "hw/arm/boot.h"
+
+/* 60MHz is the default, but other clocks can be selected. */
+#define SYSCLK_FRQ 60000000ULL
+static void max78000_init(MachineState *machine)
+{
+    DeviceState *dev;
+    Clock *sysclk;
+
+    sysclk = clock_new(OBJECT(machine), "SYSCLK");
+    clock_set_hz(sysclk, SYSCLK_FRQ);
+
+    dev = qdev_new(TYPE_MAX78000_SOC);
+    object_property_add_child(OBJECT(machine), "soc", OBJECT(dev));
+    qdev_connect_clock_in(dev, "sysclk", sysclk);
+    sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+
+    armv7m_load_kernel(ARM_CPU(first_cpu),
+                       machine->kernel_filename,
+                       0x00000000, FLASH_SIZE);
+}
+
+static void max78000_machine_init(MachineClass *mc)
+{
+    static const char * const valid_cpu_types[] = {
+        ARM_CPU_TYPE_NAME("cortex-m4"),
+        NULL
+    };
+
+    mc->desc = "MAX78000FTHR Board (Cortex-M4 / (Unimplemented) RISC-V)";
+    mc->init = max78000_init;
+    mc->valid_cpu_types = valid_cpu_types;
+}
+
+DEFINE_MACHINE("max78000fthr", max78000_machine_init)
diff --git a/hw/arm/meson.build b/hw/arm/meson.build
index d90be8f4c9..dc68391305 100644
--- a/hw/arm/meson.build
+++ b/hw/arm/meson.build
@@ -27,6 +27,7 @@ arm_common_ss.add(when: 'CONFIG_OMAP', if_true: files('omap1.c'))
 arm_common_ss.add(when: 'CONFIG_ALLWINNER_A10', if_true: files('allwinner-a10.c', 'cubieboard.c'))
 arm_common_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-h3.c', 'orangepi.c'))
 arm_common_ss.add(when: 'CONFIG_ALLWINNER_R40', if_true: files('allwinner-r40.c', 'bananapi_m2u.c'))
+arm_common_ss.add(when: 'CONFIG_MAX78000_SOC', if_true: files('max78000_soc.c'))
 arm_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2836.c', 'raspi.c'))
 arm_common_ss.add(when: ['CONFIG_RASPI', 'TARGET_AARCH64'], if_true: files('bcm2838.c', 'raspi4b.c'))
 arm_common_ss.add(when: 'CONFIG_STM32F100_SOC', if_true: files('stm32f100_soc.c'))
@@ -71,6 +72,7 @@ arm_ss.add(when: 'CONFIG_XEN', if_true: files(
 arm_common_ss.add(when: 'CONFIG_ARM_SMMUV3', if_true: files('smmu-common.c'))
 arm_common_ss.add(when: 'CONFIG_COLLIE', if_true: files('collie.c'))
 arm_common_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4_boards.c'))
+arm_common_ss.add(when: 'CONFIG_MAX78000FTHR', if_true: files('max78000fthr.c'))
 arm_common_ss.add(when: 'CONFIG_NETDUINO2', if_true: files('netduino2.c'))
 arm_common_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_peripherals.c'))
 arm_common_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2838_peripherals.c'))
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index cd90c47976..0dfb8ec2c3 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -39,10 +39,12 @@
 #include "hw/acpi/aml-build.h"
 #include "hw/acpi/utils.h"
 #include "hw/acpi/pci.h"
+#include "hw/acpi/cxl.h"
 #include "hw/acpi/memory_hotplug.h"
 #include "hw/acpi/generic_event_device.h"
 #include "hw/acpi/tpm.h"
 #include "hw/acpi/hmat.h"
+#include "hw/cxl/cxl.h"
 #include "hw/pci/pcie_host.h"
 #include "hw/pci/pci.h"
 #include "hw/pci/pci_bus.h"
@@ -119,10 +121,29 @@ static void acpi_dsdt_add_flash(Aml *scope, const MemMapEntry *flash_memmap)
     aml_append(scope, dev);
 }
 
+static void build_acpi0017(Aml *table)
+{
+    Aml *dev, *scope, *method;
+
+    scope =  aml_scope("_SB");
+    dev = aml_device("CXLM");
+    aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0017")));
+
+    method = aml_method("_STA", 0, AML_NOTSERIALIZED);
+    aml_append(method, aml_return(aml_int(0x0B)));
+    aml_append(dev, method);
+    build_cxl_dsm_method(dev);
+
+    aml_append(scope, dev);
+    aml_append(table, scope);
+}
+
 static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
                               uint32_t irq, VirtMachineState *vms)
 {
     int ecam_id = VIRT_ECAM_ID(vms->highmem_ecam);
+    bool cxl_present = false;
+    PCIBus *bus = vms->bus;
     struct GPEXConfig cfg = {
         .mmio32 = memmap[VIRT_PCIE_MMIO],
         .pio    = memmap[VIRT_PCIE_PIO],
@@ -136,6 +157,14 @@ static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
     }
 
     acpi_dsdt_add_gpex(scope, &cfg);
+    QLIST_FOREACH(bus, &vms->bus->child, sibling) {
+        if (pci_bus_is_cxl(bus)) {
+            cxl_present = true;
+        }
+    }
+    if (cxl_present) {
+        build_acpi0017(scope);
+    }
 }
 
 static void acpi_dsdt_add_gpio(Aml *scope, const MemMapEntry *gpio_memmap,
@@ -329,12 +358,6 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
         /* Sort the smmu idmap by input_base */
         g_array_sort(rc_smmu_idmaps, iort_idmap_compare);
 
-        /*
-         * Knowing the ID ranges from the RC to the SMMU, it's possible to
-         * determine the ID ranges from RC that are directed to the ITS.
-         */
-        create_rc_its_idmaps(rc_its_idmaps, rc_smmu_idmaps);
-
         nb_nodes = 2; /* RC and SMMUv3 */
         rc_mapping_count = rc_smmu_idmaps->len;
 
@@ -1027,6 +1050,11 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables)
         }
     }
 
+    if (vms->cxl_devices_state.is_enabled) {
+        cxl_build_cedt(table_offsets, tables_blob, tables->linker,
+                       vms->oem_id, vms->oem_table_id, &vms->cxl_devices_state);
+    }
+
     if (ms->nvdimms_state->is_enabled) {
         nvdimm_build_acpi(table_offsets, tables_blob, tables->linker,
                           ms->nvdimms_state, ms->ram_slots, vms->oem_id,
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 3bcdf92e2f..8070ff7b11 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -57,6 +57,7 @@
 #include "qemu/error-report.h"
 #include "qemu/module.h"
 #include "hw/pci-host/gpex.h"
+#include "hw/pci-bridge/pci_expander_bridge.h"
 #include "hw/virtio/virtio-pci.h"
 #include "hw/core/sysbus-fdt.h"
 #include "hw/platform-bus.h"
@@ -86,6 +87,8 @@
 #include "hw/virtio/virtio-md-pci.h"
 #include "hw/virtio/virtio-iommu.h"
 #include "hw/char/pl011.h"
+#include "hw/cxl/cxl.h"
+#include "hw/cxl/cxl_host.h"
 #include "qemu/guest-random.h"
 
 static GlobalProperty arm_virt_compat[] = {
@@ -220,9 +223,11 @@ static const MemMapEntry base_memmap[] = {
 static MemMapEntry extended_memmap[] = {
     /* Additional 64 MB redist region (can contain up to 512 redistributors) */
     [VIRT_HIGH_GIC_REDIST2] =   { 0x0, 64 * MiB },
+    [VIRT_CXL_HOST] =           { 0x0, 64 * KiB * 16 }, /* 16 UID */
     [VIRT_HIGH_PCIE_ECAM] =     { 0x0, 256 * MiB },
     /* Second PCIe window */
     [VIRT_HIGH_PCIE_MMIO] =     { 0x0, DEFAULT_HIGH_PCIE_MMIO_SIZE },
+    /* Any CXL Fixed memory windows come here */
 };
 
 static const int a15irqmap[] = {
@@ -792,6 +797,13 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem)
     default:
         g_assert_not_reached();
     }
+
+    if (kvm_enabled() && vms->virt &&
+        (revision != 3 || !kvm_irqchip_in_kernel())) {
+        error_report("KVM EL2 is only supported with in-kernel GICv3");
+        exit(1);
+    }
+
     vms->gic = qdev_new(gictype);
     qdev_prop_set_uint32(vms->gic, "revision", revision);
     qdev_prop_set_uint32(vms->gic, "num-cpu", smp_cpus);
@@ -828,6 +840,9 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem)
                                          OBJECT(mem), &error_fatal);
                 qdev_prop_set_bit(vms->gic, "has-lpi", true);
             }
+        } else if (vms->virt) {
+            qdev_prop_set_uint32(vms->gic, "maintenance-interrupt-id",
+                                 ARCH_GIC_MAINT_IRQ);
         }
     } else {
         if (!kvm_irqchip_in_kernel()) {
@@ -1623,6 +1638,17 @@ static void create_pcie(VirtMachineState *vms)
     }
 }
 
+static void create_cxl_host_reg_region(VirtMachineState *vms)
+{
+    MemoryRegion *sysmem = get_system_memory();
+    MemoryRegion *mr = &vms->cxl_devices_state.host_mr;
+
+    memory_region_init(mr, OBJECT(vms), "cxl_host_reg",
+                       vms->memmap[VIRT_CXL_HOST].size);
+    memory_region_add_subregion(sysmem, vms->memmap[VIRT_CXL_HOST].base, mr);
+    vms->highmem_cxl = true;
+}
+
 static void create_platform_bus(VirtMachineState *vms)
 {
     DeviceState *dev;
@@ -1739,6 +1765,12 @@ void virt_machine_done(Notifier *notifier, void *data)
     struct arm_boot_info *info = &vms->bootinfo;
     AddressSpace *as = arm_boot_address_space(cpu, info);
 
+    cxl_hook_up_pxb_registers(vms->bus, &vms->cxl_devices_state,
+                              &error_fatal);
+
+    if (vms->cxl_devices_state.is_enabled) {
+        cxl_fmws_link_targets(&error_fatal);
+    }
     /*
      * If the user provided a dtb, we assume the dynamic sysbus nodes
      * already are integrated there. This corresponds to a use case where
@@ -1785,6 +1817,7 @@ static inline bool *virt_get_high_memmap_enabled(VirtMachineState *vms,
 {
     bool *enabled_array[] = {
         &vms->highmem_redists,
+        &vms->highmem_cxl,
         &vms->highmem_ecam,
         &vms->highmem_mmio,
     };
@@ -1892,6 +1925,9 @@ static void virt_set_memmap(VirtMachineState *vms, int pa_bits)
     if (device_memory_size > 0) {
         machine_memory_devices_init(ms, device_memory_base, device_memory_size);
     }
+    vms->highest_gpa = cxl_fmws_set_memmap(ROUND_UP(vms->highest_gpa + 1,
+                                                    256 * MiB),
+                                           BIT_ULL(pa_bits)) - 1;
 }
 
 static VirtGICType finalize_gic_version_do(const char *accel_name,
@@ -2063,6 +2099,10 @@ static void virt_post_cpus_gic_realized(VirtMachineState *vms,
             memory_region_init_ram(pvtime, NULL, "pvtime", pvtime_size, NULL);
             memory_region_add_subregion(sysmem, pvtime_reg_base, pvtime);
         }
+        if (!aarch64 && vms->virt) {
+            error_report("KVM does not support EL2 on an AArch32 vCPU");
+            exit(1);
+        }
 
         CPU_FOREACH(cpu) {
             if (pmu) {
@@ -2208,7 +2248,13 @@ static void machvirt_init(MachineState *machine)
         exit(1);
     }
 
-    if (vms->virt && !tcg_enabled() && !qtest_enabled()) {
+    if (vms->virt && kvm_enabled() && !kvm_arm_el2_supported()) {
+        error_report("mach-virt: host kernel KVM does not support providing "
+                     "Virtualization extensions to the guest CPU");
+        exit(1);
+    }
+
+    if (vms->virt && !kvm_enabled() && !tcg_enabled() && !qtest_enabled()) {
         error_report("mach-virt: %s does not support providing "
                      "Virtualization extensions to the guest CPU",
                      current_accel_name());
@@ -2343,6 +2389,8 @@ static void machvirt_init(MachineState *machine)
     memory_region_add_subregion(sysmem, vms->memmap[VIRT_MEM].base,
                                 machine->ram);
 
+    cxl_fmws_update_mmio();
+
     virt_flash_fdt(vms, sysmem, secure_sysmem ?: sysmem);
 
     create_gic(vms, sysmem);
@@ -2398,6 +2446,7 @@ static void machvirt_init(MachineState *machine)
     create_rtc(vms);
 
     create_pcie(vms);
+    create_cxl_host_reg_region(vms);
 
     if (has_ged && aarch64 && firmware_loaded && virt_is_acpi_enabled(vms)) {
         vms->acpi_dev = create_acpi_ged(vms);
@@ -3364,6 +3413,7 @@ static void virt_instance_init(Object *obj)
 
     vms->oem_id = g_strndup(ACPI_BUILD_APPNAME6, 6);
     vms->oem_table_id = g_strndup(ACPI_BUILD_APPNAME8, 8);
+    cxl_machine_init(obj, &vms->cxl_devices_state);
 }
 
 static const TypeInfo virt_machine_info = {
diff --git a/hw/char/Kconfig b/hw/char/Kconfig
index 9d517f3e28..020c0a84bb 100644
--- a/hw/char/Kconfig
+++ b/hw/char/Kconfig
@@ -48,6 +48,9 @@ config VIRTIO_SERIAL
     default y
     depends on VIRTIO
 
+config MAX78000_UART
+    bool
+
 config STM32F2XX_USART
     bool
 
diff --git a/hw/char/max78000_uart.c b/hw/char/max78000_uart.c
new file mode 100644
index 0000000000..19506d52ef
--- /dev/null
+++ b/hw/char/max78000_uart.c
@@ -0,0 +1,285 @@
+/*
+ * MAX78000 UART
+ *
+ * Copyright (c) 2025 Jackson Donaldson <jcksn@duck.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "hw/char/max78000_uart.h"
+#include "hw/irq.h"
+#include "hw/qdev-properties.h"
+#include "hw/qdev-properties-system.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "migration/vmstate.h"
+#include "trace.h"
+
+
+static int max78000_uart_can_receive(void *opaque)
+{
+    Max78000UartState *s = opaque;
+    if (!(s->ctrl & UART_BCLKEN)) {
+        return 0;
+    }
+    return fifo8_num_free(&s->rx_fifo);
+}
+
+static void max78000_update_irq(Max78000UartState *s)
+{
+    int interrupt_level;
+
+    interrupt_level = s->int_fl & s->int_en;
+    qemu_set_irq(s->irq, interrupt_level);
+}
+
+static void max78000_uart_receive(void *opaque, const uint8_t *buf, int size)
+{
+    Max78000UartState *s = opaque;
+
+    assert(size <= fifo8_num_free(&s->rx_fifo));
+
+    fifo8_push_all(&s->rx_fifo, buf, size);
+
+    uint32_t rx_threshold = s->ctrl & 0xf;
+
+    if (fifo8_num_used(&s->rx_fifo) >= rx_threshold) {
+        s->int_fl |= UART_RX_THD;
+    }
+
+    max78000_update_irq(s);
+}
+
+static void max78000_uart_reset_hold(Object *obj, ResetType type)
+{
+    Max78000UartState *s = MAX78000_UART(obj);
+
+    s->ctrl = 0;
+    s->status = UART_TX_EM | UART_RX_EM;
+    s->int_en = 0;
+    s->int_fl = 0;
+    s->osr = 0;
+    s->txpeek = 0;
+    s->pnr = UART_RTS;
+    s->fifo = 0;
+    s->dma = 0;
+    s->wken = 0;
+    s->wkfl = 0;
+    fifo8_reset(&s->rx_fifo);
+}
+
+static uint64_t max78000_uart_read(void *opaque, hwaddr addr,
+                                       unsigned int size)
+{
+    Max78000UartState *s = opaque;
+    uint64_t retvalue = 0;
+    switch (addr) {
+    case UART_CTRL:
+        retvalue = s->ctrl;
+        break;
+    case UART_STATUS:
+        retvalue = (fifo8_num_used(&s->rx_fifo) << UART_RX_LVL) |
+                    UART_TX_EM |
+                    (fifo8_is_empty(&s->rx_fifo) ? UART_RX_EM : 0);
+        break;
+    case UART_INT_EN:
+        retvalue = s->int_en;
+        break;
+    case UART_INT_FL:
+        retvalue = s->int_fl;
+        break;
+    case UART_CLKDIV:
+        retvalue = s->clkdiv;
+        break;
+    case UART_OSR:
+        retvalue = s->osr;
+        break;
+    case UART_TXPEEK:
+        if (!fifo8_is_empty(&s->rx_fifo)) {
+            retvalue = fifo8_peek(&s->rx_fifo);
+        }
+        break;
+    case UART_PNR:
+        retvalue = s->pnr;
+        break;
+    case UART_FIFO:
+        if (!fifo8_is_empty(&s->rx_fifo)) {
+            retvalue = fifo8_pop(&s->rx_fifo);
+            max78000_update_irq(s);
+        }
+        break;
+    case UART_DMA:
+        /* DMA not implemented */
+        retvalue = s->dma;
+        break;
+    case UART_WKEN:
+        retvalue = s->wken;
+        break;
+    case UART_WKFL:
+        retvalue = s->wkfl;
+        break;
+    default:
+        qemu_log_mask(LOG_GUEST_ERROR,
+            "%s: Bad offset 0x%"HWADDR_PRIx"\n", __func__, addr);
+        break;
+    }
+
+    return retvalue;
+}
+
+static void max78000_uart_write(void *opaque, hwaddr addr,
+                                  uint64_t val64, unsigned int size)
+{
+    Max78000UartState *s = opaque;
+
+    uint32_t value = val64;
+    uint8_t data;
+
+    switch (addr) {
+    case UART_CTRL:
+        if (value & UART_FLUSH_RX) {
+            fifo8_reset(&s->rx_fifo);
+        }
+        if (value & UART_BCLKEN) {
+            value = value | UART_BCLKRDY;
+        }
+        s->ctrl = value & ~(UART_FLUSH_RX | UART_FLUSH_TX);
+
+        /*
+         * Software can manage UART flow control manually by setting hfc_en
+         * in UART_CTRL. This would require emulating uart at a lower level,
+         * and is currently unimplemented.
+         */
+
+        return;
+    case UART_STATUS:
+        /* UART_STATUS is read only */
+        return;
+    case UART_INT_EN:
+        s->int_en = value;
+        return;
+    case UART_INT_FL:
+        s->int_fl = s->int_fl & ~(value);
+        max78000_update_irq(s);
+        return;
+    case UART_CLKDIV:
+        s->clkdiv = value;
+        return;
+    case UART_OSR:
+        s->osr = value;
+        return;
+    case UART_PNR:
+        s->pnr = value;
+        return;
+    case UART_FIFO:
+        data = value & 0xff;
+        /*
+         * XXX this blocks entire thread. Rewrite to use
+         * qemu_chr_fe_write and background I/O callbacks
+         */
+        qemu_chr_fe_write_all(&s->chr, &data, 1);
+
+        /* TX is always empty */
+        s->int_fl |= UART_TX_HE;
+        max78000_update_irq(s);
+
+        return;
+    case UART_DMA:
+        /* DMA not implemented */
+        s->dma = value;
+        return;
+    case UART_WKEN:
+        s->wken = value;
+        return;
+    case UART_WKFL:
+        s->wkfl = value;
+        return;
+    default:
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"
+            HWADDR_PRIx "\n", __func__, addr);
+    }
+}
+
+static const MemoryRegionOps max78000_uart_ops = {
+    .read = max78000_uart_read,
+    .write = max78000_uart_write,
+    .endianness = DEVICE_LITTLE_ENDIAN,
+    .valid.min_access_size = 4,
+    .valid.max_access_size = 4,
+};
+
+static const Property max78000_uart_properties[] = {
+    DEFINE_PROP_CHR("chardev", Max78000UartState, chr),
+};
+
+static const VMStateDescription max78000_uart_vmstate = {
+    .name = TYPE_MAX78000_UART,
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT32(ctrl, Max78000UartState),
+        VMSTATE_UINT32(status, Max78000UartState),
+        VMSTATE_UINT32(int_en, Max78000UartState),
+        VMSTATE_UINT32(int_fl, Max78000UartState),
+        VMSTATE_UINT32(clkdiv, Max78000UartState),
+        VMSTATE_UINT32(osr, Max78000UartState),
+        VMSTATE_UINT32(txpeek, Max78000UartState),
+        VMSTATE_UINT32(pnr, Max78000UartState),
+        VMSTATE_UINT32(fifo, Max78000UartState),
+        VMSTATE_UINT32(dma, Max78000UartState),
+        VMSTATE_UINT32(wken, Max78000UartState),
+        VMSTATE_UINT32(wkfl, Max78000UartState),
+        VMSTATE_FIFO8(rx_fifo, Max78000UartState),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static void max78000_uart_init(Object *obj)
+{
+    Max78000UartState *s = MAX78000_UART(obj);
+    fifo8_create(&s->rx_fifo, 8);
+
+    sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq);
+
+    memory_region_init_io(&s->mmio, obj, &max78000_uart_ops, s,
+                          TYPE_MAX78000_UART, 0x400);
+    sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio);
+}
+
+static void max78000_uart_realize(DeviceState *dev, Error **errp)
+{
+    Max78000UartState *s = MAX78000_UART(dev);
+
+    qemu_chr_fe_set_handlers(&s->chr, max78000_uart_can_receive,
+                             max78000_uart_receive, NULL, NULL,
+                             s, NULL, true);
+}
+
+static void max78000_uart_class_init(ObjectClass *klass, const void *data)
+{
+    DeviceClass *dc = DEVICE_CLASS(klass);
+    ResettableClass *rc = RESETTABLE_CLASS(klass);
+
+    rc->phases.hold = max78000_uart_reset_hold;
+
+    device_class_set_props(dc, max78000_uart_properties);
+    dc->realize = max78000_uart_realize;
+
+    dc->vmsd = &max78000_uart_vmstate;
+}
+
+static const TypeInfo max78000_uart_info = {
+    .name          = TYPE_MAX78000_UART,
+    .parent        = TYPE_SYS_BUS_DEVICE,
+    .instance_size = sizeof(Max78000UartState),
+    .instance_init = max78000_uart_init,
+    .class_init    = max78000_uart_class_init,
+};
+
+static void max78000_uart_register_types(void)
+{
+    type_register_static(&max78000_uart_info);
+}
+
+type_init(max78000_uart_register_types)
diff --git a/hw/char/meson.build b/hw/char/meson.build
index 4e439da8b9..a9e1dc26c0 100644
--- a/hw/char/meson.build
+++ b/hw/char/meson.build
@@ -26,6 +26,7 @@ system_ss.add(when: 'CONFIG_AVR_USART', if_true: files('avr_usart.c'))
 system_ss.add(when: 'CONFIG_COLDFIRE', if_true: files('mcf_uart.c'))
 system_ss.add(when: 'CONFIG_DIGIC', if_true: files('digic-uart.c'))
 system_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_uart.c'))
+system_ss.add(when: 'CONFIG_MAX78000_UART', if_true: files('max78000_uart.c'))
 system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_uart.c'))
 system_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_aux.c'))
 system_ss.add(when: 'CONFIG_RENESAS_SCI', if_true: files('renesas_sci.c'))
diff --git a/hw/cxl/cxl-host-stubs.c b/hw/cxl/cxl-host-stubs.c
index cae4afcdde..c015baac81 100644
--- a/hw/cxl/cxl-host-stubs.c
+++ b/hw/cxl/cxl-host-stubs.c
@@ -8,8 +8,13 @@
 #include "hw/cxl/cxl.h"
 #include "hw/cxl/cxl_host.h"
 
-void cxl_fmws_link_targets(CXLState *stat, Error **errp) {};
+void cxl_fmws_link_targets(Error **errp) {};
 void cxl_machine_init(Object *obj, CXLState *state) {};
 void cxl_hook_up_pxb_registers(PCIBus *bus, CXLState *state, Error **errp) {};
+hwaddr cxl_fmws_set_memmap(hwaddr base, hwaddr max_addr)
+{
+    return base;
+};
+void cxl_fmws_update_mmio(void) {};
 
 const MemoryRegionOps cfmws_ops;
diff --git a/hw/cxl/cxl-host.c b/hw/cxl/cxl-host.c
index e010163174..5c2ce25a19 100644
--- a/hw/cxl/cxl-host.c
+++ b/hw/cxl/cxl-host.c
@@ -22,15 +22,17 @@
 #include "hw/pci/pcie_port.h"
 #include "hw/pci-bridge/pci_expander_bridge.h"
 
-static void cxl_fixed_memory_window_config(CXLState *cxl_state,
-                                           CXLFixedMemoryWindowOptions *object,
-                                           Error **errp)
+static void cxl_fixed_memory_window_config(CXLFixedMemoryWindowOptions *object,
+                                           int index, Error **errp)
 {
     ERRP_GUARD();
-    g_autofree CXLFixedWindow *fw = g_malloc0(sizeof(*fw));
+    DeviceState *dev = qdev_new(TYPE_CXL_FMW);
+    CXLFixedWindow *fw = CXL_FMW(dev);
     strList *target;
     int i;
 
+    fw->index = index;
+
     for (target = object->targets; target; target = target->next) {
         fw->num_targets++;
     }
@@ -65,35 +67,39 @@ static void cxl_fixed_memory_window_config(CXLState *cxl_state,
         fw->targets[i] = g_strdup(target->value);
     }
 
-    cxl_state->fixed_windows = g_list_append(cxl_state->fixed_windows,
-                                             g_steal_pointer(&fw));
+    sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), errp);
 }
 
-void cxl_fmws_link_targets(CXLState *cxl_state, Error **errp)
+static int cxl_fmws_link(Object *obj, void *opaque)
 {
-    if (cxl_state && cxl_state->fixed_windows) {
-        GList *it;
-
-        for (it = cxl_state->fixed_windows; it; it = it->next) {
-            CXLFixedWindow *fw = it->data;
-            int i;
-
-            for (i = 0; i < fw->num_targets; i++) {
-                Object *o;
-                bool ambig;
-
-                o = object_resolve_path_type(fw->targets[i],
-                                             TYPE_PXB_CXL_DEV,
-                                             &ambig);
-                if (!o) {
-                    error_setg(errp, "Could not resolve CXLFM target %s",
-                               fw->targets[i]);
-                    return;
-                }
-                fw->target_hbs[i] = PXB_CXL_DEV(o);
-            }
+    struct CXLFixedWindow *fw;
+    int i;
+
+    if (!object_dynamic_cast(obj, TYPE_CXL_FMW)) {
+        return 0;
+    }
+    fw = CXL_FMW(obj);
+
+    for (i = 0; i < fw->num_targets; i++) {
+        Object *o;
+        bool ambig;
+
+        o = object_resolve_path_type(fw->targets[i], TYPE_PXB_CXL_DEV,
+                                     &ambig);
+        if (!o) {
+            error_setg(&error_fatal, "Could not resolve CXLFM target %s",
+                       fw->targets[i]);
+            return 1;
         }
+        fw->target_hbs[i] = PXB_CXL_DEV(o);
     }
+    return 0;
+}
+
+void cxl_fmws_link_targets(Error **errp)
+{
+    /* Order doesn't matter for this, so no need to build list */
+    object_child_foreach_recursive(object_get_root(), cxl_fmws_link, NULL);
 }
 
 static bool cxl_hdm_find_target(uint32_t *cache_mem, hwaddr addr,
@@ -325,14 +331,15 @@ static void machine_set_cfmw(Object *obj, Visitor *v, const char *name,
     CXLState *state = opaque;
     CXLFixedMemoryWindowOptionsList *cfmw_list = NULL;
     CXLFixedMemoryWindowOptionsList *it;
+    int index;
 
     visit_type_CXLFixedMemoryWindowOptionsList(v, name, &cfmw_list, errp);
     if (!cfmw_list) {
         return;
     }
 
-    for (it = cfmw_list; it; it = it->next) {
-        cxl_fixed_memory_window_config(state, it->value, errp);
+    for (it = cfmw_list, index = 0; it; it = it->next, index++) {
+        cxl_fixed_memory_window_config(it->value, index, errp);
     }
     state->cfmw_list = cfmw_list;
 }
@@ -370,3 +377,110 @@ void cxl_hook_up_pxb_registers(PCIBus *bus, CXLState *state, Error **errp)
         }
     }
 }
+
+static int cxl_fmws_find(Object *obj, void *opaque)
+{
+    GSList **list = opaque;
+
+    if (!object_dynamic_cast(obj, TYPE_CXL_FMW)) {
+        return 0;
+    }
+    *list = g_slist_prepend(*list, obj);
+
+    return 0;
+}
+
+static GSList *cxl_fmws_get_all(void)
+{
+    GSList *list = NULL;
+
+    object_child_foreach_recursive(object_get_root(), cxl_fmws_find, &list);
+
+    return list;
+}
+
+static gint cfmws_cmp(gconstpointer a, gconstpointer b, gpointer d)
+{
+    const struct CXLFixedWindow *ap = a;
+    const struct CXLFixedWindow *bp = b;
+
+    return ap->index > bp->index;
+}
+
+GSList *cxl_fmws_get_all_sorted(void)
+{
+    return g_slist_sort_with_data(cxl_fmws_get_all(), cfmws_cmp, NULL);
+}
+
+static int cxl_fmws_mmio_map(Object *obj, void *opaque)
+{
+    struct CXLFixedWindow *fw;
+
+    if (!object_dynamic_cast(obj, TYPE_CXL_FMW)) {
+        return 0;
+    }
+    fw = CXL_FMW(obj);
+    sysbus_mmio_map(SYS_BUS_DEVICE(fw), 0, fw->base);
+
+    return 0;
+}
+
+void cxl_fmws_update_mmio(void)
+{
+    /* Ordering is not required for this */
+    object_child_foreach_recursive(object_get_root(), cxl_fmws_mmio_map, NULL);
+}
+
+hwaddr cxl_fmws_set_memmap(hwaddr base, hwaddr max_addr)
+{
+    GSList *cfmws_list, *iter;
+    CXLFixedWindow *fw;
+
+    cfmws_list = cxl_fmws_get_all_sorted();
+    for (iter = cfmws_list; iter; iter = iter->next) {
+        fw = CXL_FMW(iter->data);
+        if (base + fw->size <= max_addr) {
+            fw->base = base;
+            base += fw->size;
+        }
+    }
+    g_slist_free(cfmws_list);
+
+    return base;
+}
+
+static void cxl_fmw_realize(DeviceState *dev, Error **errp)
+{
+    CXLFixedWindow *fw = CXL_FMW(dev);
+
+    memory_region_init_io(&fw->mr, OBJECT(dev), &cfmws_ops, fw,
+                          "cxl-fixed-memory-region", fw->size);
+    sysbus_init_mmio(SYS_BUS_DEVICE(dev), &fw->mr);
+}
+
+/*
+ * Note: Fixed memory windows represent fixed address decoders on the host and
+ * as such have no dynamic state to reset or migrate
+ */
+static void cxl_fmw_class_init(ObjectClass *klass, const void *data)
+{
+    DeviceClass *dc = DEVICE_CLASS(klass);
+
+    dc->desc = "CXL Fixed Memory Window";
+    dc->realize = cxl_fmw_realize;
+    /* Reason - created by machines as tightly coupled to machine memory map */
+    dc->user_creatable = false;
+}
+
+static const TypeInfo cxl_fmw_info = {
+    .name = TYPE_CXL_FMW,
+    .parent = TYPE_SYS_BUS_DEVICE,
+    .instance_size = sizeof(CXLFixedWindow),
+    .class_init = cxl_fmw_class_init,
+};
+
+static void cxl_host_register_types(void)
+{
+    type_register_static(&cxl_fmw_info);
+}
+type_init(cxl_host_register_types)
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index b211633575..860346d6b7 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -609,7 +609,7 @@ void pc_machine_done(Notifier *notifier, void *data)
                               &error_fatal);
 
     if (pcms->cxl_devices_state.is_enabled) {
-        cxl_fmws_link_targets(&pcms->cxl_devices_state, &error_fatal);
+        cxl_fmws_link_targets(&error_fatal);
     }
 
     /* set the number of CPUs */
@@ -718,20 +718,28 @@ static uint64_t pc_get_cxl_range_start(PCMachineState *pcms)
     return cxl_base;
 }
 
-static uint64_t pc_get_cxl_range_end(PCMachineState *pcms)
+static int cxl_get_fmw_end(Object *obj, void *opaque)
 {
-    uint64_t start = pc_get_cxl_range_start(pcms) + MiB;
+    struct CXLFixedWindow *fw;
+    uint64_t *start = opaque;
 
-    if (pcms->cxl_devices_state.fixed_windows) {
-        GList *it;
-
-        start = ROUND_UP(start, 256 * MiB);
-        for (it = pcms->cxl_devices_state.fixed_windows; it; it = it->next) {
-            CXLFixedWindow *fw = it->data;
-            start += fw->size;
-        }
+    if (!object_dynamic_cast(obj, TYPE_CXL_FMW)) {
+        return 0;
     }
+    fw = CXL_FMW(obj);
+
+    *start += fw->size;
 
+    return 0;
+}
+
+static uint64_t pc_get_cxl_range_end(PCMachineState *pcms)
+{
+    uint64_t start = pc_get_cxl_range_start(pcms) + MiB;
+
+    /* Ordering doesn't matter so no need to build a sorted list */
+    object_child_foreach_recursive(object_get_root(), cxl_get_fmw_end,
+                                   &start);
     return start;
 }
 
@@ -933,23 +941,9 @@ void pc_memory_init(PCMachineState *pcms,
         cxl_base = pc_get_cxl_range_start(pcms);
         memory_region_init(mr, OBJECT(machine), "cxl_host_reg", cxl_size);
         memory_region_add_subregion(system_memory, cxl_base, mr);
-        cxl_resv_end = cxl_base + cxl_size;
-        if (pcms->cxl_devices_state.fixed_windows) {
-            hwaddr cxl_fmw_base;
-            GList *it;
-
-            cxl_fmw_base = ROUND_UP(cxl_base + cxl_size, 256 * MiB);
-            for (it = pcms->cxl_devices_state.fixed_windows; it; it = it->next) {
-                CXLFixedWindow *fw = it->data;
-
-                fw->base = cxl_fmw_base;
-                memory_region_init_io(&fw->mr, OBJECT(machine), &cfmws_ops, fw,
-                                      "cxl-fixed-memory-region", fw->size);
-                memory_region_add_subregion(system_memory, fw->base, &fw->mr);
-                cxl_fmw_base += fw->size;
-                cxl_resv_end = cxl_fmw_base;
-            }
-        }
+        cxl_base = ROUND_UP(cxl_base + cxl_size, 256 * MiB);
+        cxl_resv_end = cxl_fmws_set_memmap(cxl_base, maxphysaddr);
+        cxl_fmws_update_mmio();
     }
 
     /* Initialize PC system firmware */
diff --git a/hw/intc/arm_gicv3_common.c b/hw/intc/arm_gicv3_common.c
index 1cee68193c..e438d8c042 100644
--- a/hw/intc/arm_gicv3_common.c
+++ b/hw/intc/arm_gicv3_common.c
@@ -612,6 +612,7 @@ static const Property arm_gicv3_common_properties[] = {
     DEFINE_PROP_BOOL("has-lpi", GICv3State, lpi_enable, 0),
     DEFINE_PROP_BOOL("has-nmi", GICv3State, nmi_support, 0),
     DEFINE_PROP_BOOL("has-security-extensions", GICv3State, security_extn, 0),
+    DEFINE_PROP_UINT32("maintenance-interrupt-id", GICv3State, maint_irq, 0),
     /*
      * Compatibility property: force 8 bits of physical priority, even
      * if the CPU being emulated should have fewer.
diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c
index 3be3bf6c28..8ed88e7429 100644
--- a/hw/intc/arm_gicv3_kvm.c
+++ b/hw/intc/arm_gicv3_kvm.c
@@ -22,6 +22,7 @@
 #include "qemu/osdep.h"
 #include "qapi/error.h"
 #include "hw/intc/arm_gicv3_common.h"
+#include "hw/arm/virt.h"
 #include "qemu/error-report.h"
 #include "qemu/module.h"
 #include "system/kvm.h"
@@ -825,6 +826,34 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp)
         return;
     }
 
+    if (s->maint_irq) {
+        Error *kvm_nv_migration_blocker = NULL;
+        int ret;
+
+        error_setg(&kvm_nv_migration_blocker,
+                   "Live migration disabled because KVM nested virt is enabled");
+        if (migrate_add_blocker(&kvm_nv_migration_blocker, errp)) {
+            error_free(kvm_nv_migration_blocker);
+            return;
+        }
+
+        ret = kvm_device_check_attr(s->dev_fd,
+                                    KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ, 0);
+        if (!ret) {
+            error_setg_errno(errp, errno,
+                             "VGICv3 setting maintenance IRQ is not "
+                             "supported by this host kernel");
+            return;
+        }
+
+        ret = kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ, 0,
+                                &s->maint_irq, true, errp);
+        if (ret) {
+            error_setg_errno(errp, errno, "Failed to set VGIC maintenance IRQ");
+            return;
+       }
+    }
+
     multiple_redist_region_allowed =
         kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
                               KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION);
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
index 6d85720f1b..7c78961040 100644
--- a/hw/intc/armv7m_nvic.c
+++ b/hw/intc/armv7m_nvic.c
@@ -1279,7 +1279,7 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
         if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
             goto bad_offset;
         }
-        return cpu->id_afr0;
+        return GET_IDREG(isar, ID_AFR0);
     case 0xd50: /* MMFR0.  */
         if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
             goto bad_offset;
@@ -1331,7 +1331,7 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
         }
         return GET_IDREG(&cpu->isar, ID_ISAR5);
     case 0xd78: /* CLIDR */
-        return cpu->clidr;
+        return GET_IDREG(&cpu->isar, CLIDR);
     case 0xd7c: /* CTR */
         return cpu->ctr;
     case 0xd80: /* CSSIDR */
diff --git a/hw/misc/Kconfig b/hw/misc/Kconfig
index ec0fa5aa9f..c27285b47a 100644
--- a/hw/misc/Kconfig
+++ b/hw/misc/Kconfig
@@ -47,6 +47,18 @@ config A9SCU
 config ARM11SCU
     bool
 
+config MAX78000_AES
+    bool
+
+config MAX78000_GCR
+    bool
+
+config MAX78000_ICC
+    bool
+
+config MAX78000_TRNG
+    bool
+
 config MOS6522
     bool
 
diff --git a/hw/misc/max78000_aes.c b/hw/misc/max78000_aes.c
new file mode 100644
index 0000000000..0bfb2f02b5
--- /dev/null
+++ b/hw/misc/max78000_aes.c
@@ -0,0 +1,223 @@
+/*
+ * MAX78000 AES
+ *
+ * Copyright (c) 2025 Jackson Donaldson <jcksn@duck.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "trace.h"
+#include "hw/irq.h"
+#include "migration/vmstate.h"
+#include "hw/misc/max78000_aes.h"
+#include "crypto/aes.h"
+
+static void max78000_aes_set_status(Max78000AesState *s)
+{
+    s->status = 0;
+    if (s->result_index >= 16) {
+        s->status |= OUTPUT_FULL;
+    }
+    if (s->result_index == 0) {
+        s->status |= OUTPUT_EMPTY;
+    }
+    if (s->data_index >= 16) {
+        s->status |= INPUT_FULL;
+    }
+    if (s->data_index == 0) {
+        s->status |= INPUT_EMPTY;
+    }
+}
+
+static uint64_t max78000_aes_read(void *opaque, hwaddr addr,
+                                    unsigned int size)
+{
+    Max78000AesState *s = opaque;
+    switch (addr) {
+    case CTRL:
+        return s->ctrl;
+
+    case STATUS:
+        return s->status;
+
+    case INTFL:
+        return s->intfl;
+
+    case INTEN:
+        return s->inten;
+
+    case FIFO:
+        if (s->result_index >= 4) {
+            s->intfl &= ~DONE;
+            s->result_index -= 4;
+            max78000_aes_set_status(s);
+            return ldl_be_p(&s->result[s->result_index]);
+        } else{
+            return 0;
+        }
+
+    default:
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"
+            HWADDR_PRIx "\n", __func__, addr);
+        break;
+
+    }
+    return 0;
+}
+
+static void max78000_aes_do_crypto(Max78000AesState *s)
+{
+    int keylen = 256;
+    uint8_t *keydata = s->key;
+    if ((s->ctrl & KEY_SIZE) == 0) {
+        keylen = 128;
+        keydata += 16;
+    } else if ((s->ctrl & KEY_SIZE) == 1 << 6) {
+        keylen = 192;
+        keydata += 8;
+    }
+
+    AES_KEY key;
+    if ((s->ctrl & TYPE) == 0) {
+        AES_set_encrypt_key(keydata, keylen, &key);
+        AES_set_decrypt_key(keydata, keylen, &s->internal_key);
+        AES_encrypt(s->data, s->result, &key);
+        s->result_index = 16;
+    } else if ((s->ctrl & TYPE) == 1 << 8) {
+        AES_set_decrypt_key(keydata, keylen, &key);
+        AES_set_decrypt_key(keydata, keylen, &s->internal_key);
+        AES_decrypt(s->data, s->result, &key);
+        s->result_index = 16;
+    } else{
+        AES_decrypt(s->data, s->result, &s->internal_key);
+        s->result_index = 16;
+    }
+    s->intfl |= DONE;
+}
+
+static void max78000_aes_write(void *opaque, hwaddr addr,
+                    uint64_t val64, unsigned int size)
+{
+    Max78000AesState *s = opaque;
+    uint32_t val = val64;
+    switch (addr) {
+    case CTRL:
+        if (val & OUTPUT_FLUSH) {
+            s->result_index = 0;
+            val &= ~OUTPUT_FLUSH;
+        }
+        if (val & INPUT_FLUSH) {
+            s->data_index = 0;
+            val &= ~INPUT_FLUSH;
+        }
+        if (val & START) {
+            max78000_aes_do_crypto(s);
+        }
+
+        /* Hardware appears to stay enabled even if 0 written */
+        s->ctrl = val | (s->ctrl & AES_EN);
+        break;
+
+    case FIFO:
+        assert(s->data_index <= 12);
+        stl_be_p(&s->data[12 - s->data_index], val);
+        s->data_index += 4;
+        if (s->data_index >= 16) {
+            s->data_index = 0;
+            max78000_aes_do_crypto(s);
+        }
+        break;
+
+    case KEY_BASE ... KEY_END - 4:
+        stl_be_p(&s->key[(KEY_END - KEY_BASE - 4) - (addr - KEY_BASE)], val);
+        break;
+
+    default:
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"
+            HWADDR_PRIx "\n", __func__, addr);
+        break;
+
+    }
+    max78000_aes_set_status(s);
+}
+
+static void max78000_aes_reset_hold(Object *obj, ResetType type)
+{
+    Max78000AesState *s = MAX78000_AES(obj);
+    s->ctrl = 0;
+    s->status = 0;
+    s->intfl = 0;
+    s->inten = 0;
+
+    s->data_index = 0;
+    s->result_index = 0;
+
+    memset(s->data, 0, sizeof(s->data));
+    memset(s->key, 0, sizeof(s->key));
+    memset(s->result, 0, sizeof(s->result));
+    memset(&s->internal_key, 0, sizeof(s->internal_key));
+}
+
+static const MemoryRegionOps max78000_aes_ops = {
+    .read = max78000_aes_read,
+    .write = max78000_aes_write,
+    .endianness = DEVICE_LITTLE_ENDIAN,
+    .valid.min_access_size = 4,
+    .valid.max_access_size = 4,
+};
+
+static const VMStateDescription vmstate_max78000_aes = {
+    .name = TYPE_MAX78000_AES,
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .fields = (const VMStateField[]) {
+        VMSTATE_UINT32(ctrl, Max78000AesState),
+        VMSTATE_UINT32(status, Max78000AesState),
+        VMSTATE_UINT32(intfl, Max78000AesState),
+        VMSTATE_UINT32(inten, Max78000AesState),
+        VMSTATE_UINT8_ARRAY(data, Max78000AesState, 16),
+        VMSTATE_UINT8_ARRAY(key, Max78000AesState, 32),
+        VMSTATE_UINT8_ARRAY(result, Max78000AesState, 16),
+        VMSTATE_UINT32_ARRAY(internal_key.rd_key, Max78000AesState, 60),
+        VMSTATE_INT32(internal_key.rounds, Max78000AesState),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static void max78000_aes_init(Object *obj)
+{
+    Max78000AesState *s = MAX78000_AES(obj);
+    sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq);
+
+    memory_region_init_io(&s->mmio, obj, &max78000_aes_ops, s,
+                        TYPE_MAX78000_AES, 0xc00);
+    sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio);
+
+}
+
+static void max78000_aes_class_init(ObjectClass *klass, const void *data)
+{
+    ResettableClass *rc = RESETTABLE_CLASS(klass);
+    DeviceClass *dc = DEVICE_CLASS(klass);
+
+    rc->phases.hold = max78000_aes_reset_hold;
+    dc->vmsd = &vmstate_max78000_aes;
+
+}
+
+static const TypeInfo max78000_aes_info = {
+    .name          = TYPE_MAX78000_AES,
+    .parent        = TYPE_SYS_BUS_DEVICE,
+    .instance_size = sizeof(Max78000AesState),
+    .instance_init = max78000_aes_init,
+    .class_init    = max78000_aes_class_init,
+};
+
+static void max78000_aes_register_types(void)
+{
+    type_register_static(&max78000_aes_info);
+}
+
+type_init(max78000_aes_register_types)
diff --git a/hw/misc/max78000_gcr.c b/hw/misc/max78000_gcr.c
new file mode 100644
index 0000000000..fbbc92cca3
--- /dev/null
+++ b/hw/misc/max78000_gcr.c
@@ -0,0 +1,351 @@
+/*
+ * MAX78000 Global Control Registers
+ *
+ * Copyright (c) 2025 Jackson Donaldson <jcksn@duck.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "trace.h"
+#include "hw/irq.h"
+#include "system/runstate.h"
+#include "migration/vmstate.h"
+#include "hw/qdev-properties.h"
+#include "hw/char/max78000_uart.h"
+#include "hw/misc/max78000_trng.h"
+#include "hw/misc/max78000_aes.h"
+#include "hw/misc/max78000_gcr.h"
+
+
+static void max78000_gcr_reset_hold(Object *obj, ResetType type)
+{
+    DeviceState *dev = DEVICE(obj);
+    Max78000GcrState *s = MAX78000_GCR(dev);
+    s->sysctrl = 0x21002;
+    s->rst0 = 0;
+    /* All clocks are always ready */
+    s->clkctrl = 0x3e140008;
+    s->pm = 0x3f000;
+    s->pclkdiv = 0;
+    s->pclkdis0 = 0xffffffff;
+    s->memctrl = 0x5;
+    s->memz = 0;
+    s->sysst = 0;
+    s->rst1 = 0;
+    s->pckdis1 = 0xffffffff;
+    s->eventen = 0;
+    s->revision = 0xa1;
+    s->sysie = 0;
+    s->eccerr = 0;
+    s->ecced = 0;
+    s->eccie = 0;
+    s->eccaddr = 0;
+}
+
+static uint64_t max78000_gcr_read(void *opaque, hwaddr addr,
+                                     unsigned int size)
+{
+    Max78000GcrState *s = opaque;
+
+    switch (addr) {
+    case SYSCTRL:
+        return s->sysctrl;
+
+    case RST0:
+        return s->rst0;
+
+    case CLKCTRL:
+        return s->clkctrl;
+
+    case PM:
+        return s->pm;
+
+    case PCLKDIV:
+        return s->pclkdiv;
+
+    case PCLKDIS0:
+        return s->pclkdis0;
+
+    case MEMCTRL:
+        return s->memctrl;
+
+    case MEMZ:
+        return s->memz;
+
+    case SYSST:
+        return s->sysst;
+
+    case RST1:
+        return s->rst1;
+
+    case PCKDIS1:
+        return s->pckdis1;
+
+    case EVENTEN:
+        return s->eventen;
+
+    case REVISION:
+        return s->revision;
+
+    case SYSIE:
+        return s->sysie;
+
+    case ECCERR:
+        return s->eccerr;
+
+    case ECCED:
+        return s->ecced;
+
+    case ECCIE:
+        return s->eccie;
+
+    case ECCADDR:
+        return s->eccaddr;
+
+    default:
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"
+            HWADDR_PRIx "\n", __func__, addr);
+        return 0;
+
+    }
+}
+
+static void max78000_gcr_write(void *opaque, hwaddr addr,
+                       uint64_t val64, unsigned int size)
+{
+    Max78000GcrState *s = opaque;
+    uint32_t val = val64;
+    uint8_t zero[0xc000] = {0};
+    switch (addr) {
+    case SYSCTRL:
+        /* Checksum calculations always pass immediately */
+        s->sysctrl = (val & 0x30000) | 0x1002;
+        break;
+
+    case RST0:
+        if (val & SYSTEM_RESET) {
+            qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
+        }
+        if (val & PERIPHERAL_RESET) {
+            /*
+             * Peripheral reset resets all peripherals. The CPU
+             * retains its state. The GPIO, watchdog timers, AoD,
+             * RAM retention, and general control registers (GCR),
+             * including the clock configuration, are unaffected.
+             */
+            val = UART2_RESET | UART1_RESET | UART0_RESET |
+                    ADC_RESET | CNN_RESET | TRNG_RESET |
+                    RTC_RESET | I2C0_RESET | SPI1_RESET |
+                    TMR3_RESET | TMR2_RESET | TMR1_RESET |
+                    TMR0_RESET | WDT0_RESET | DMA_RESET;
+        }
+        if (val & SOFT_RESET) {
+            /* Soft reset also resets GPIO */
+            val = UART2_RESET | UART1_RESET | UART0_RESET |
+                    ADC_RESET | CNN_RESET | TRNG_RESET |
+                    RTC_RESET | I2C0_RESET | SPI1_RESET |
+                    TMR3_RESET | TMR2_RESET | TMR1_RESET |
+                    TMR0_RESET | GPIO1_RESET | GPIO0_RESET |
+                    DMA_RESET;
+        }
+        if (val & UART2_RESET) {
+            device_cold_reset(s->uart2);
+        }
+        if (val & UART1_RESET) {
+            device_cold_reset(s->uart1);
+        }
+        if (val & UART0_RESET) {
+            device_cold_reset(s->uart0);
+        }
+        if (val & TRNG_RESET) {
+            device_cold_reset(s->trng);
+        }
+        if (val & AES_RESET) {
+            device_cold_reset(s->aes);
+        }
+        /* TODO: As other devices are implemented, add them here */
+        break;
+
+    case CLKCTRL:
+        s->clkctrl = val | SYSCLK_RDY;
+        break;
+
+    case PM:
+        s->pm = val;
+        break;
+
+    case PCLKDIV:
+        s->pclkdiv = val;
+        break;
+
+    case PCLKDIS0:
+        s->pclkdis0 = val;
+        break;
+
+    case MEMCTRL:
+        s->memctrl = val;
+        break;
+
+    case MEMZ:
+        if (val & ram0) {
+            address_space_write(&s->sram_as, SYSRAM0_START,
+                                MEMTXATTRS_UNSPECIFIED, zero, 0x8000);
+        }
+        if (val & ram1) {
+            address_space_write(&s->sram_as, SYSRAM1_START,
+                                MEMTXATTRS_UNSPECIFIED, zero, 0x8000);
+        }
+        if (val & ram2) {
+            address_space_write(&s->sram_as, SYSRAM2_START,
+                                MEMTXATTRS_UNSPECIFIED, zero, 0xC000);
+        }
+        if (val & ram3) {
+            address_space_write(&s->sram_as, SYSRAM3_START,
+                                MEMTXATTRS_UNSPECIFIED, zero, 0x4000);
+        }
+        break;
+
+    case SYSST:
+        s->sysst = val;
+        break;
+
+    case RST1:
+        /* TODO: As other devices are implemented, add them here */
+        s->rst1 = val;
+        break;
+
+    case PCKDIS1:
+        s->pckdis1 = val;
+        break;
+
+    case EVENTEN:
+        s->eventen = val;
+        break;
+
+    case REVISION:
+        s->revision = val;
+        break;
+
+    case SYSIE:
+        s->sysie = val;
+        break;
+
+    case ECCERR:
+        s->eccerr = val;
+        break;
+
+    case ECCED:
+        s->ecced = val;
+        break;
+
+    case ECCIE:
+        s->eccie = val;
+        break;
+
+    case ECCADDR:
+        s->eccaddr = val;
+        break;
+
+    default:
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n",
+                      __func__, addr);
+        break;
+
+    }
+}
+
+static const Property max78000_gcr_properties[] = {
+    DEFINE_PROP_LINK("sram", Max78000GcrState, sram,
+                     TYPE_MEMORY_REGION, MemoryRegion*),
+    DEFINE_PROP_LINK("uart0", Max78000GcrState, uart0,
+                     TYPE_MAX78000_UART, DeviceState*),
+    DEFINE_PROP_LINK("uart1", Max78000GcrState, uart1,
+                     TYPE_MAX78000_UART, DeviceState*),
+    DEFINE_PROP_LINK("uart2", Max78000GcrState, uart2,
+                     TYPE_MAX78000_UART, DeviceState*),
+    DEFINE_PROP_LINK("trng", Max78000GcrState, trng,
+                        TYPE_MAX78000_TRNG, DeviceState*),
+    DEFINE_PROP_LINK("aes", Max78000GcrState, aes,
+                        TYPE_MAX78000_AES, DeviceState*),
+};
+
+static const MemoryRegionOps max78000_gcr_ops = {
+    .read = max78000_gcr_read,
+    .write = max78000_gcr_write,
+    .endianness = DEVICE_LITTLE_ENDIAN,
+    .valid.min_access_size = 4,
+    .valid.max_access_size = 4,
+};
+
+static const VMStateDescription vmstate_max78000_gcr = {
+    .name = TYPE_MAX78000_GCR,
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .fields = (const VMStateField[]) {
+        VMSTATE_UINT32(sysctrl, Max78000GcrState),
+        VMSTATE_UINT32(rst0, Max78000GcrState),
+        VMSTATE_UINT32(clkctrl, Max78000GcrState),
+        VMSTATE_UINT32(pm, Max78000GcrState),
+        VMSTATE_UINT32(pclkdiv, Max78000GcrState),
+        VMSTATE_UINT32(pclkdis0, Max78000GcrState),
+        VMSTATE_UINT32(memctrl, Max78000GcrState),
+        VMSTATE_UINT32(memz, Max78000GcrState),
+        VMSTATE_UINT32(sysst, Max78000GcrState),
+        VMSTATE_UINT32(rst1, Max78000GcrState),
+        VMSTATE_UINT32(pckdis1, Max78000GcrState),
+        VMSTATE_UINT32(eventen, Max78000GcrState),
+        VMSTATE_UINT32(revision, Max78000GcrState),
+        VMSTATE_UINT32(sysie, Max78000GcrState),
+        VMSTATE_UINT32(eccerr, Max78000GcrState),
+        VMSTATE_UINT32(ecced, Max78000GcrState),
+        VMSTATE_UINT32(eccie, Max78000GcrState),
+        VMSTATE_UINT32(eccaddr, Max78000GcrState),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static void max78000_gcr_init(Object *obj)
+{
+    Max78000GcrState *s = MAX78000_GCR(obj);
+
+    memory_region_init_io(&s->mmio, obj, &max78000_gcr_ops, s,
+                          TYPE_MAX78000_GCR, 0x400);
+    sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio);
+
+}
+
+static void max78000_gcr_realize(DeviceState *dev, Error **errp)
+{
+    Max78000GcrState *s = MAX78000_GCR(dev);
+
+    address_space_init(&s->sram_as, s->sram, "sram");
+}
+
+static void max78000_gcr_class_init(ObjectClass *klass, const void *data)
+{
+    DeviceClass *dc = DEVICE_CLASS(klass);
+    ResettableClass *rc = RESETTABLE_CLASS(klass);
+
+    device_class_set_props(dc, max78000_gcr_properties);
+
+    dc->realize = max78000_gcr_realize;
+    dc->vmsd = &vmstate_max78000_gcr;
+    rc->phases.hold = max78000_gcr_reset_hold;
+}
+
+static const TypeInfo max78000_gcr_info = {
+    .name          = TYPE_MAX78000_GCR,
+    .parent        = TYPE_SYS_BUS_DEVICE,
+    .instance_size = sizeof(Max78000GcrState),
+    .instance_init = max78000_gcr_init,
+    .class_init     = max78000_gcr_class_init,
+};
+
+static void max78000_gcr_register_types(void)
+{
+    type_register_static(&max78000_gcr_info);
+}
+
+type_init(max78000_gcr_register_types)
diff --git a/hw/misc/max78000_icc.c b/hw/misc/max78000_icc.c
new file mode 100644
index 0000000000..6f7d2b20bf
--- /dev/null
+++ b/hw/misc/max78000_icc.c
@@ -0,0 +1,120 @@
+/*
+ * MAX78000 Instruction Cache
+ *
+ * Copyright (c) 2025 Jackson Donaldson <jcksn@duck.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "trace.h"
+#include "hw/irq.h"
+#include "migration/vmstate.h"
+#include "hw/misc/max78000_icc.h"
+
+
+static uint64_t max78000_icc_read(void *opaque, hwaddr addr,
+                                    unsigned int size)
+{
+    Max78000IccState *s = opaque;
+    switch (addr) {
+    case ICC_INFO:
+        return s->info;
+
+    case ICC_SZ:
+        return s->sz;
+
+    case ICC_CTRL:
+        return s->ctrl;
+
+    default:
+        qemu_log_mask(LOG_GUEST_ERROR,
+                      "%s: Bad offset 0x%" HWADDR_PRIx "\n",
+                      __func__, addr);
+        return 0;
+
+    }
+}
+
+static void max78000_icc_write(void *opaque, hwaddr addr,
+                    uint64_t val64, unsigned int size)
+{
+    Max78000IccState *s = opaque;
+
+    switch (addr) {
+    case ICC_CTRL:
+        s->ctrl = 0x10000 | (val64 & 1);
+        break;
+
+    case ICC_INVALIDATE:
+        break;
+
+    default:
+        qemu_log_mask(LOG_GUEST_ERROR,
+                      "%s: Bad offset 0x%" HWADDR_PRIx "\n",
+                      __func__, addr);
+        break;
+    }
+}
+
+static const MemoryRegionOps max78000_icc_ops = {
+    .read = max78000_icc_read,
+    .write = max78000_icc_write,
+    .endianness = DEVICE_LITTLE_ENDIAN,
+    .valid.min_access_size = 4,
+    .valid.max_access_size = 4,
+};
+
+static const VMStateDescription max78000_icc_vmstate = {
+    .name = TYPE_MAX78000_ICC,
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .fields = (const VMStateField[]) {
+        VMSTATE_UINT32(info, Max78000IccState),
+        VMSTATE_UINT32(sz, Max78000IccState),
+        VMSTATE_UINT32(ctrl, Max78000IccState),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static void max78000_icc_reset_hold(Object *obj, ResetType type)
+{
+    Max78000IccState *s = MAX78000_ICC(obj);
+    s->info = 0;
+    s->sz = 0x10000010;
+    s->ctrl = 0x10000;
+}
+
+static void max78000_icc_init(Object *obj)
+{
+    Max78000IccState *s = MAX78000_ICC(obj);
+
+    memory_region_init_io(&s->mmio, obj, &max78000_icc_ops, s,
+                        TYPE_MAX78000_ICC, 0x800);
+    sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio);
+}
+
+static void max78000_icc_class_init(ObjectClass *klass, const void *data)
+{
+    DeviceClass *dc = DEVICE_CLASS(klass);
+    ResettableClass *rc = RESETTABLE_CLASS(klass);
+
+    rc->phases.hold = max78000_icc_reset_hold;
+    dc->vmsd = &max78000_icc_vmstate;
+}
+
+static const TypeInfo max78000_icc_info = {
+    .name          = TYPE_MAX78000_ICC,
+    .parent        = TYPE_SYS_BUS_DEVICE,
+    .instance_size = sizeof(Max78000IccState),
+    .instance_init = max78000_icc_init,
+    .class_init    = max78000_icc_class_init,
+};
+
+static void max78000_icc_register_types(void)
+{
+    type_register_static(&max78000_icc_info);
+}
+
+type_init(max78000_icc_register_types)
diff --git a/hw/misc/max78000_trng.c b/hw/misc/max78000_trng.c
new file mode 100644
index 0000000000..ecdaef53b6
--- /dev/null
+++ b/hw/misc/max78000_trng.c
@@ -0,0 +1,139 @@
+/*
+ * MAX78000 True Random Number Generator
+ *
+ * Copyright (c) 2025 Jackson Donaldson <jcksn@duck.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "trace.h"
+#include "hw/irq.h"
+#include "migration/vmstate.h"
+#include "hw/misc/max78000_trng.h"
+#include "qemu/guest-random.h"
+
+static uint64_t max78000_trng_read(void *opaque, hwaddr addr,
+                                    unsigned int size)
+{
+    uint32_t data;
+
+    Max78000TrngState *s = opaque;
+    switch (addr) {
+    case CTRL:
+        return s->ctrl;
+
+    case STATUS:
+        return 1;
+
+    case DATA:
+        /*
+         * When interrupts are enabled, reading random data should cause a
+         * new interrupt to be generated; since there's always a random number
+         * available, we could qemu_set_irq(s->irq, s->ctrl & RND_IE). Because
+         * of how trng_write is set up, this is always a noop, so don't
+         */
+        qemu_guest_getrandom_nofail(&data, sizeof(data));
+        return data;
+
+    default:
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"
+            HWADDR_PRIx "\n", __func__, addr);
+        break;
+    }
+    return 0;
+}
+
+static void max78000_trng_write(void *opaque, hwaddr addr,
+                    uint64_t val64, unsigned int size)
+{
+    Max78000TrngState *s = opaque;
+    uint32_t val = val64;
+    switch (addr) {
+    case CTRL:
+        /* TODO: implement AES keygen */
+        s->ctrl = val;
+
+        /*
+         * This device models random number generation as taking 0 time.
+         * A new random number is always available, so the condition for the
+         * RND interrupt is always fulfilled; we can just set irq to 1.
+         */
+        if (val & RND_IE) {
+            qemu_set_irq(s->irq, 1);
+        } else{
+            qemu_set_irq(s->irq, 0);
+        }
+        break;
+
+    default:
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"
+            HWADDR_PRIx "\n", __func__, addr);
+        break;
+    }
+}
+
+static void max78000_trng_reset_hold(Object *obj, ResetType type)
+{
+    Max78000TrngState *s = MAX78000_TRNG(obj);
+    s->ctrl = 0;
+    s->status = 0;
+    s->data = 0;
+}
+
+static const MemoryRegionOps max78000_trng_ops = {
+    .read = max78000_trng_read,
+    .write = max78000_trng_write,
+    .endianness = DEVICE_LITTLE_ENDIAN,
+    .valid.min_access_size = 4,
+    .valid.max_access_size = 4,
+};
+
+static const VMStateDescription max78000_trng_vmstate = {
+    .name = TYPE_MAX78000_TRNG,
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .fields = (const VMStateField[]) {
+        VMSTATE_UINT32(ctrl, Max78000TrngState),
+        VMSTATE_UINT32(status, Max78000TrngState),
+        VMSTATE_UINT32(data, Max78000TrngState),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static void max78000_trng_init(Object *obj)
+{
+    Max78000TrngState *s = MAX78000_TRNG(obj);
+    sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq);
+
+    memory_region_init_io(&s->mmio, obj, &max78000_trng_ops, s,
+                        TYPE_MAX78000_TRNG, 0x1000);
+    sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio);
+
+}
+
+static void max78000_trng_class_init(ObjectClass *klass, const void *data)
+{
+    ResettableClass *rc = RESETTABLE_CLASS(klass);
+    DeviceClass *dc = DEVICE_CLASS(klass);
+
+    rc->phases.hold = max78000_trng_reset_hold;
+    dc->vmsd = &max78000_trng_vmstate;
+
+}
+
+static const TypeInfo max78000_trng_info = {
+    .name          = TYPE_MAX78000_TRNG,
+    .parent        = TYPE_SYS_BUS_DEVICE,
+    .instance_size = sizeof(Max78000TrngState),
+    .instance_init = max78000_trng_init,
+    .class_init    = max78000_trng_class_init,
+};
+
+static void max78000_trng_register_types(void)
+{
+    type_register_static(&max78000_trng_info);
+}
+
+type_init(max78000_trng_register_types)
diff --git a/hw/misc/meson.build b/hw/misc/meson.build
index 6d47de482c..b1d8d8e5d2 100644
--- a/hw/misc/meson.build
+++ b/hw/misc/meson.build
@@ -70,6 +70,10 @@ system_ss.add(when: 'CONFIG_IMX', if_true: files(
   'imx_ccm.c',
   'imx_rngc.c',
 ))
+system_ss.add(when: 'CONFIG_MAX78000_AES', if_true: files('max78000_aes.c'))
+system_ss.add(when: 'CONFIG_MAX78000_GCR', if_true: files('max78000_gcr.c'))
+system_ss.add(when: 'CONFIG_MAX78000_ICC', if_true: files('max78000_icc.c'))
+system_ss.add(when: 'CONFIG_MAX78000_TRNG', if_true: files('max78000_trng.c'))
 system_ss.add(when: 'CONFIG_NPCM7XX', if_true: files(
   'npcm_clk.c',
   'npcm_gcr.c',
diff --git a/include/hw/arm/max78000_soc.h b/include/hw/arm/max78000_soc.h
new file mode 100644
index 0000000000..a203079ee9
--- /dev/null
+++ b/include/hw/arm/max78000_soc.h
@@ -0,0 +1,50 @@
+/*
+ * MAX78000 SOC
+ *
+ * Copyright (c) 2025 Jackson Donaldson <jcksn@duck.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_ARM_MAX78000_SOC_H
+#define HW_ARM_MAX78000_SOC_H
+
+#include "hw/or-irq.h"
+#include "hw/arm/armv7m.h"
+#include "hw/misc/max78000_aes.h"
+#include "hw/misc/max78000_gcr.h"
+#include "hw/misc/max78000_icc.h"
+#include "hw/char/max78000_uart.h"
+#include "hw/misc/max78000_trng.h"
+#include "qom/object.h"
+
+#define TYPE_MAX78000_SOC "max78000-soc"
+OBJECT_DECLARE_SIMPLE_TYPE(MAX78000State, MAX78000_SOC)
+
+#define FLASH_BASE_ADDRESS 0x10000000
+#define FLASH_SIZE (512 * 1024)
+#define SRAM_BASE_ADDRESS 0x20000000
+#define SRAM_SIZE (128 * 1024)
+
+/* The MAX78k has 2 instruction caches; only icc0 matters, icc1 is for RISC */
+#define MAX78000_NUM_ICC 2
+#define MAX78000_NUM_UART 3
+
+struct MAX78000State {
+    SysBusDevice parent_obj;
+
+    ARMv7MState armv7m;
+
+    MemoryRegion sram;
+    MemoryRegion flash;
+
+    Max78000GcrState gcr;
+    Max78000IccState icc[MAX78000_NUM_ICC];
+    Max78000UartState uart[MAX78000_NUM_UART];
+    Max78000TrngState trng;
+    Max78000AesState aes;
+
+    Clock *sysclk;
+};
+
+#endif
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
index 9a1b0f53d2..4375819ea0 100644
--- a/include/hw/arm/virt.h
+++ b/include/hw/arm/virt.h
@@ -36,6 +36,7 @@
 #include "hw/arm/boot.h"
 #include "hw/arm/bsa.h"
 #include "hw/block/flash.h"
+#include "hw/cxl/cxl.h"
 #include "system/kvm.h"
 #include "hw/intc/arm_gicv3_common.h"
 #include "qom/object.h"
@@ -85,6 +86,7 @@ enum {
 /* indices of IO regions located after the RAM */
 enum {
     VIRT_HIGH_GIC_REDIST2 =  VIRT_LOWMEMMAP_LAST,
+    VIRT_CXL_HOST,
     VIRT_HIGH_PCIE_ECAM,
     VIRT_HIGH_PCIE_MMIO,
 };
@@ -140,6 +142,7 @@ struct VirtMachineState {
     bool secure;
     bool highmem;
     bool highmem_compact;
+    bool highmem_cxl;
     bool highmem_ecam;
     bool highmem_mmio;
     bool highmem_redists;
@@ -174,6 +177,7 @@ struct VirtMachineState {
     char *oem_id;
     char *oem_table_id;
     bool ns_el2_virt_timer_irq;
+    CXLState cxl_devices_state;
 };
 
 #define VIRT_ECAM_ID(high) (high ? VIRT_HIGH_PCIE_ECAM : VIRT_PCIE_ECAM)
diff --git a/include/hw/char/max78000_uart.h b/include/hw/char/max78000_uart.h
new file mode 100644
index 0000000000..cf90d51dbf
--- /dev/null
+++ b/include/hw/char/max78000_uart.h
@@ -0,0 +1,78 @@
+/*
+ * MAX78000 UART
+ *
+ * Copyright (c) 2025 Jackson Donaldson <jcksn@duck.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_MAX78000_UART_H
+#define HW_MAX78000_UART_H
+
+#include "hw/sysbus.h"
+#include "chardev/char-fe.h"
+#include "qemu/fifo8.h"
+#include "qom/object.h"
+
+#define UART_CTRL       0x0
+#define UART_STATUS     0x4
+#define UART_INT_EN     0x8
+#define UART_INT_FL     0xc
+#define UART_CLKDIV     0x10
+#define UART_OSR        0x14
+#define UART_TXPEEK     0x18
+#define UART_PNR        0x1c
+#define UART_FIFO       0x20
+#define UART_DMA        0x30
+#define UART_WKEN       0x34
+#define UART_WKFL       0x38
+
+/* CTRL */
+#define UART_CTF_DIS    (1 << 7)
+#define UART_FLUSH_TX   (1 << 8)
+#define UART_FLUSH_RX   (1 << 9)
+#define UART_BCLKEN     (1 << 15)
+#define UART_BCLKRDY    (1 << 19)
+
+/* STATUS */
+#define UART_RX_LVL     8
+#define UART_TX_EM      (1 << 6)
+#define UART_RX_FULL    (1 << 5)
+#define UART_RX_EM      (1 << 4)
+
+/* PNR (Pin Control Register) */
+#define UART_CTS        1
+#define UART_RTS        (1 << 1)
+
+/* INT_EN / INT_FL */
+#define UART_RX_THD     (1 << 4)
+#define UART_TX_HE      (1 << 6)
+
+#define UART_RXBUFLEN   0x100
+#define TYPE_MAX78000_UART "max78000-uart"
+OBJECT_DECLARE_SIMPLE_TYPE(Max78000UartState, MAX78000_UART)
+
+struct Max78000UartState {
+    SysBusDevice parent_obj;
+
+    MemoryRegion mmio;
+
+    uint32_t ctrl;
+    uint32_t status;
+    uint32_t int_en;
+    uint32_t int_fl;
+    uint32_t clkdiv;
+    uint32_t osr;
+    uint32_t txpeek;
+    uint32_t pnr;
+    uint32_t fifo;
+    uint32_t dma;
+    uint32_t wken;
+    uint32_t wkfl;
+
+    Fifo8 rx_fifo;
+
+    CharBackend chr;
+    qemu_irq irq;
+};
+#endif /* HW_STM32F2XX_USART_H */
diff --git a/include/hw/cxl/cxl.h b/include/hw/cxl/cxl.h
index 75e47b6864..de66ab8c35 100644
--- a/include/hw/cxl/cxl.h
+++ b/include/hw/cxl/cxl.h
@@ -27,6 +27,8 @@
 typedef struct PXBCXLDev PXBCXLDev;
 
 typedef struct CXLFixedWindow {
+    SysBusDevice parent_obj;
+    int index;
     uint64_t size;
     char **targets;
     PXBCXLDev *target_hbs[16];
@@ -37,12 +39,13 @@ typedef struct CXLFixedWindow {
     MemoryRegion mr;
     hwaddr base;
 } CXLFixedWindow;
+#define TYPE_CXL_FMW "cxl-fmw"
+OBJECT_DECLARE_SIMPLE_TYPE(CXLFixedWindow, CXL_FMW)
 
 typedef struct CXLState {
     bool is_enabled;
     MemoryRegion host_mr;
     unsigned int next_mr_idx;
-    GList *fixed_windows;
     CXLFixedMemoryWindowOptionsList *cfmw_list;
 } CXLState;
 
diff --git a/include/hw/cxl/cxl_host.h b/include/hw/cxl/cxl_host.h
index c9bc9c7c50..cd3c368c86 100644
--- a/include/hw/cxl/cxl_host.h
+++ b/include/hw/cxl/cxl_host.h
@@ -14,8 +14,11 @@
 #define CXL_HOST_H
 
 void cxl_machine_init(Object *obj, CXLState *state);
-void cxl_fmws_link_targets(CXLState *stat, Error **errp);
+void cxl_fmws_link_targets(Error **errp);
 void cxl_hook_up_pxb_registers(PCIBus *bus, CXLState *state, Error **errp);
+hwaddr cxl_fmws_set_memmap(hwaddr base, hwaddr max_addr);
+void cxl_fmws_update_mmio(void);
+GSList *cxl_fmws_get_all_sorted(void);
 
 extern const MemoryRegionOps cfmws_ops;
 
diff --git a/include/hw/intc/arm_gicv3_common.h b/include/hw/intc/arm_gicv3_common.h
index a3d6a0e507..c18503869f 100644
--- a/include/hw/intc/arm_gicv3_common.h
+++ b/include/hw/intc/arm_gicv3_common.h
@@ -231,6 +231,7 @@ struct GICv3State {
     uint32_t num_cpu;
     uint32_t num_irq;
     uint32_t revision;
+    uint32_t maint_irq;
     bool lpi_enable;
     bool nmi_support;
     bool security_extn;
diff --git a/include/hw/misc/max78000_aes.h b/include/hw/misc/max78000_aes.h
new file mode 100644
index 0000000000..407c45ef61
--- /dev/null
+++ b/include/hw/misc/max78000_aes.h
@@ -0,0 +1,68 @@
+/*
+ * MAX78000 AES
+ *
+ * Copyright (c) 2025 Jackson Donaldson <jcksn@duck.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef HW_MAX78000_AES_H
+#define HW_MAX78000_AES_H
+
+#include "hw/sysbus.h"
+#include "crypto/aes.h"
+#include "qom/object.h"
+
+#define TYPE_MAX78000_AES "max78000-aes"
+OBJECT_DECLARE_SIMPLE_TYPE(Max78000AesState, MAX78000_AES)
+
+#define CTRL 0
+#define STATUS 4
+#define INTFL 8
+#define INTEN 0xc
+#define FIFO 0x10
+
+#define KEY_BASE 0x400
+#define KEY_END 0x420
+
+/* CTRL */
+#define TYPE (1 << 9 | 1 << 8)
+#define KEY_SIZE (1 << 7 | 1 << 6)
+#define OUTPUT_FLUSH (1 << 5)
+#define INPUT_FLUSH (1 << 4)
+#define START (1 << 3)
+
+#define AES_EN (1 << 0)
+
+/* STATUS */
+#define OUTPUT_FULL (1 << 4)
+#define OUTPUT_EMPTY (1 << 3)
+#define INPUT_FULL (1 << 2)
+#define INPUT_EMPTY (1 << 1)
+#define BUSY (1 << 0)
+
+/* INTFL*/
+#define DONE (1 << 0)
+
+struct Max78000AesState {
+    SysBusDevice parent_obj;
+
+    MemoryRegion mmio;
+
+    uint32_t ctrl;
+    uint32_t status;
+    uint32_t intfl;
+    uint32_t inten;
+    uint32_t data_index;
+    uint8_t data[16];
+
+    uint8_t key[32];
+    AES_KEY internal_key;
+
+    uint32_t result_index;
+    uint8_t result[16];
+
+
+    qemu_irq irq;
+};
+
+#endif
diff --git a/include/hw/misc/max78000_gcr.h b/include/hw/misc/max78000_gcr.h
new file mode 100644
index 0000000000..d5858a40f3
--- /dev/null
+++ b/include/hw/misc/max78000_gcr.h
@@ -0,0 +1,131 @@
+/*
+ * MAX78000 Global Control Register
+ *
+ * Copyright (c) 2025 Jackson Donaldson <jcksn@duck.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef HW_MAX78000_GCR_H
+#define HW_MAX78000_GCR_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_MAX78000_GCR "max78000-gcr"
+OBJECT_DECLARE_SIMPLE_TYPE(Max78000GcrState, MAX78000_GCR)
+
+#define SYSCTRL     0x0
+#define RST0        0x4
+#define CLKCTRL     0x8
+#define PM          0xc
+#define PCLKDIV     0x18
+#define PCLKDIS0    0x24
+#define MEMCTRL     0x28
+#define MEMZ        0x2c
+#define SYSST       0x40
+#define RST1        0x44
+#define PCKDIS1     0x48
+#define EVENTEN     0x4c
+#define REVISION    0x50
+#define SYSIE       0x54
+#define ECCERR      0x64
+#define ECCED       0x68
+#define ECCIE       0x6c
+#define ECCADDR     0x70
+
+/* RST0 */
+#define SYSTEM_RESET (1 << 31)
+#define PERIPHERAL_RESET (1 << 30)
+#define SOFT_RESET (1 << 29)
+#define UART2_RESET (1 << 28)
+
+#define ADC_RESET (1 << 26)
+#define CNN_RESET (1 << 25)
+#define TRNG_RESET (1 << 24)
+
+#define RTC_RESET (1 << 17)
+#define I2C0_RESET (1 << 16)
+
+#define SPI1_RESET (1 << 13)
+#define UART1_RESET (1 << 12)
+#define UART0_RESET (1 << 11)
+
+#define TMR3_RESET (1 << 8)
+#define TMR2_RESET (1 << 7)
+#define TMR1_RESET (1 << 6)
+#define TMR0_RESET (1 << 5)
+
+#define GPIO1_RESET (1 << 3)
+#define GPIO0_RESET (1 << 2)
+#define WDT0_RESET (1 << 1)
+#define DMA_RESET (1 << 0)
+
+/* CLKCTRL */
+#define SYSCLK_RDY (1 << 13)
+
+/* MEMZ */
+#define ram0 (1 << 0)
+#define ram1 (1 << 1)
+#define ram2 (1 << 2)
+#define ram3 (1 << 3)
+
+/* RST1 */
+#define CPU1_RESET (1 << 31)
+
+#define SIMO_RESET (1 << 25)
+#define DVS_RESET (1 << 24)
+
+#define I2C2_RESET (1 << 20)
+#define I2S_RESET (1 << 19)
+
+#define SMPHR_RESET (1 << 16)
+
+#define SPI0_RESET (1 << 11)
+#define AES_RESET (1 << 10)
+#define CRC_RESET (1 << 9)
+
+#define PT_RESET (1 << 1)
+#define I2C1_RESET (1 << 0)
+
+
+#define SYSRAM0_START 0x20000000
+#define SYSRAM1_START 0x20008000
+#define SYSRAM2_START 0x20010000
+#define SYSRAM3_START 0x2001C000
+
+struct Max78000GcrState {
+    SysBusDevice parent_obj;
+
+    MemoryRegion mmio;
+
+    uint32_t sysctrl;
+    uint32_t rst0;
+    uint32_t clkctrl;
+    uint32_t pm;
+    uint32_t pclkdiv;
+    uint32_t pclkdis0;
+    uint32_t memctrl;
+    uint32_t memz;
+    uint32_t sysst;
+    uint32_t rst1;
+    uint32_t pckdis1;
+    uint32_t eventen;
+    uint32_t revision;
+    uint32_t sysie;
+    uint32_t eccerr;
+    uint32_t ecced;
+    uint32_t eccie;
+    uint32_t eccaddr;
+
+    MemoryRegion *sram;
+    AddressSpace sram_as;
+
+    DeviceState *uart0;
+    DeviceState *uart1;
+    DeviceState *uart2;
+    DeviceState *trng;
+    DeviceState *aes;
+
+};
+
+#endif
diff --git a/include/hw/misc/max78000_icc.h b/include/hw/misc/max78000_icc.h
new file mode 100644
index 0000000000..6fe2bb7a15
--- /dev/null
+++ b/include/hw/misc/max78000_icc.h
@@ -0,0 +1,33 @@
+/*
+ * MAX78000 Instruction Cache
+ *
+ * Copyright (c) 2025 Jackson Donaldson <jcksn@duck.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_MAX78000_ICC_H
+#define HW_MAX78000_ICC_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_MAX78000_ICC "max78000-icc"
+OBJECT_DECLARE_SIMPLE_TYPE(Max78000IccState, MAX78000_ICC)
+
+#define ICC_INFO       0x0
+#define ICC_SZ         0x4
+#define ICC_CTRL       0x100
+#define ICC_INVALIDATE 0x700
+
+struct Max78000IccState {
+    SysBusDevice parent_obj;
+
+    MemoryRegion mmio;
+
+    uint32_t info;
+    uint32_t sz;
+    uint32_t ctrl;
+};
+
+#endif
diff --git a/include/hw/misc/max78000_trng.h b/include/hw/misc/max78000_trng.h
new file mode 100644
index 0000000000..c5a8129b6a
--- /dev/null
+++ b/include/hw/misc/max78000_trng.h
@@ -0,0 +1,35 @@
+/*
+ * MAX78000 True Random Number Generator
+ *
+ * Copyright (c) 2025 Jackson Donaldson <jcksn@duck.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef HW_MAX78000_TRNG_H
+#define HW_MAX78000_TRNG_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_MAX78000_TRNG "max78000-trng"
+OBJECT_DECLARE_SIMPLE_TYPE(Max78000TrngState, MAX78000_TRNG)
+
+#define CTRL 0
+#define STATUS 4
+#define DATA 8
+
+#define RND_IE (1 << 1)
+
+struct Max78000TrngState {
+    SysBusDevice parent_obj;
+
+    MemoryRegion mmio;
+
+    uint32_t ctrl;
+    uint32_t status;
+    uint32_t data;
+
+    qemu_irq irq;
+};
+
+#endif
diff --git a/system/qdev-monitor.c b/system/qdev-monitor.c
index 5588ed2047..2ac92d0a07 100644
--- a/system/qdev-monitor.c
+++ b/system/qdev-monitor.c
@@ -628,7 +628,7 @@ DeviceState *qdev_device_add_from_qdict(const QDict *opts,
     DeviceClass *dc;
     const char *driver, *path;
     char *id;
-    DeviceState *dev = NULL;
+    DeviceState *dev;
     BusState *bus = NULL;
     QDict *properties;
 
@@ -717,10 +717,9 @@ DeviceState *qdev_device_add_from_qdict(const QDict *opts,
     return dev;
 
 err_del_dev:
-    if (dev) {
-        object_unparent(OBJECT(dev));
-        object_unref(OBJECT(dev));
-    }
+    object_unparent(OBJECT(dev));
+    object_unref(OBJECT(dev));
+
     return NULL;
 }
 
diff --git a/target/arm/cpregs-pmu.c b/target/arm/cpregs-pmu.c
new file mode 100644
index 0000000000..0f295b1376
--- /dev/null
+++ b/target/arm/cpregs-pmu.c
@@ -0,0 +1,1309 @@
+/*
+ * QEMU ARM CP Register PMU insns
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/timer.h"
+#include "exec/icount.h"
+#include "hw/irq.h"
+#include "cpu.h"
+#include "cpu-features.h"
+#include "cpregs.h"
+#include "internals.h"
+
+
+#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
+
+/*
+ * Check for traps to performance monitor registers, which are controlled
+ * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
+ */
+static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
+                                 bool isread)
+{
+    int el = arm_current_el(env);
+    uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
+
+    if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
+        return CP_ACCESS_TRAP_EL2;
+    }
+    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
+        return CP_ACCESS_TRAP_EL3;
+    }
+    return CP_ACCESS_OK;
+}
+
+typedef struct pm_event {
+    uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
+    /* If the event is supported on this CPU (used to generate PMCEID[01]) */
+    bool (*supported)(CPUARMState *);
+    /*
+     * Retrieve the current count of the underlying event. The programmed
+     * counters hold a difference from the return value from this function
+     */
+    uint64_t (*get_count)(CPUARMState *);
+    /*
+     * Return how many nanoseconds it will take (at a minimum) for count events
+     * to occur. A negative value indicates the counter will never overflow, or
+     * that the counter has otherwise arranged for the overflow bit to be set
+     * and the PMU interrupt to be raised on overflow.
+     */
+    int64_t (*ns_per_count)(uint64_t);
+} pm_event;
+
+static bool event_always_supported(CPUARMState *env)
+{
+    return true;
+}
+
+static uint64_t swinc_get_count(CPUARMState *env)
+{
+    /*
+     * SW_INCR events are written directly to the pmevcntr's by writes to
+     * PMSWINC, so there is no underlying count maintained by the PMU itself
+     */
+    return 0;
+}
+
+static int64_t swinc_ns_per(uint64_t ignored)
+{
+    return -1;
+}
+
+/*
+ * Return the underlying cycle count for the PMU cycle counters. If we're in
+ * usermode, simply return 0.
+ */
+static uint64_t cycles_get_count(CPUARMState *env)
+{
+#ifndef CONFIG_USER_ONLY
+    return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
+                   ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
+#else
+    return cpu_get_host_ticks();
+#endif
+}
+
+#ifndef CONFIG_USER_ONLY
+static int64_t cycles_ns_per(uint64_t cycles)
+{
+    return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
+}
+
+static bool instructions_supported(CPUARMState *env)
+{
+    /* Precise instruction counting */
+    return icount_enabled() == ICOUNT_PRECISE;
+}
+
+static uint64_t instructions_get_count(CPUARMState *env)
+{
+    assert(icount_enabled() == ICOUNT_PRECISE);
+    return (uint64_t)icount_get_raw();
+}
+
+static int64_t instructions_ns_per(uint64_t icount)
+{
+    assert(icount_enabled() == ICOUNT_PRECISE);
+    return icount_to_ns((int64_t)icount);
+}
+#endif
+
+static bool pmuv3p1_events_supported(CPUARMState *env)
+{
+    /* For events which are supported in any v8.1 PMU */
+    return cpu_isar_feature(any_pmuv3p1, env_archcpu(env));
+}
+
+static bool pmuv3p4_events_supported(CPUARMState *env)
+{
+    /* For events which are supported in any v8.1 PMU */
+    return cpu_isar_feature(any_pmuv3p4, env_archcpu(env));
+}
+
+static uint64_t zero_event_get_count(CPUARMState *env)
+{
+    /* For events which on QEMU never fire, so their count is always zero */
+    return 0;
+}
+
+static int64_t zero_event_ns_per(uint64_t cycles)
+{
+    /* An event which never fires can never overflow */
+    return -1;
+}
+
+static const pm_event pm_events[] = {
+    { .number = 0x000, /* SW_INCR */
+      .supported = event_always_supported,
+      .get_count = swinc_get_count,
+      .ns_per_count = swinc_ns_per,
+    },
+#ifndef CONFIG_USER_ONLY
+    { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
+      .supported = instructions_supported,
+      .get_count = instructions_get_count,
+      .ns_per_count = instructions_ns_per,
+    },
+    { .number = 0x011, /* CPU_CYCLES, Cycle */
+      .supported = event_always_supported,
+      .get_count = cycles_get_count,
+      .ns_per_count = cycles_ns_per,
+    },
+#endif
+    { .number = 0x023, /* STALL_FRONTEND */
+      .supported = pmuv3p1_events_supported,
+      .get_count = zero_event_get_count,
+      .ns_per_count = zero_event_ns_per,
+    },
+    { .number = 0x024, /* STALL_BACKEND */
+      .supported = pmuv3p1_events_supported,
+      .get_count = zero_event_get_count,
+      .ns_per_count = zero_event_ns_per,
+    },
+    { .number = 0x03c, /* STALL */
+      .supported = pmuv3p4_events_supported,
+      .get_count = zero_event_get_count,
+      .ns_per_count = zero_event_ns_per,
+    },
+};
+
+/*
+ * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
+ * events (i.e. the statistical profiling extension), this implementation
+ * should first be updated to something sparse instead of the current
+ * supported_event_map[] array.
+ */
+#define MAX_EVENT_ID 0x3c
+#define UNSUPPORTED_EVENT UINT16_MAX
+static uint16_t supported_event_map[MAX_EVENT_ID + 1];
+
+/*
+ * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
+ * of ARM event numbers to indices in our pm_events array.
+ *
+ * Note: Events in the 0x40XX range are not currently supported.
+ */
+void pmu_init(ARMCPU *cpu)
+{
+    unsigned int i;
+
+    /*
+     * Empty supported_event_map and cpu->pmceid[01] before adding supported
+     * events to them
+     */
+    for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
+        supported_event_map[i] = UNSUPPORTED_EVENT;
+    }
+    cpu->pmceid0 = 0;
+    cpu->pmceid1 = 0;
+
+    for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
+        const pm_event *cnt = &pm_events[i];
+        assert(cnt->number <= MAX_EVENT_ID);
+        /* We do not currently support events in the 0x40xx range */
+        assert(cnt->number <= 0x3f);
+
+        if (cnt->supported(&cpu->env)) {
+            supported_event_map[cnt->number] = i;
+            uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
+            if (cnt->number & 0x20) {
+                cpu->pmceid1 |= event_mask;
+            } else {
+                cpu->pmceid0 |= event_mask;
+            }
+        }
+    }
+}
+
+/*
+ * Check at runtime whether a PMU event is supported for the current machine
+ */
+static bool event_supported(uint16_t number)
+{
+    if (number > MAX_EVENT_ID) {
+        return false;
+    }
+    return supported_event_map[number] != UNSUPPORTED_EVENT;
+}
+
+static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
+                                   bool isread)
+{
+    /*
+     * Performance monitor registers user accessibility is controlled
+     * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
+     * trapping to EL2 or EL3 for other accesses.
+     */
+    int el = arm_current_el(env);
+    uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
+
+    if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
+        return CP_ACCESS_TRAP_EL1;
+    }
+    if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
+        return CP_ACCESS_TRAP_EL2;
+    }
+    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
+        return CP_ACCESS_TRAP_EL3;
+    }
+
+    return CP_ACCESS_OK;
+}
+
+static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
+                                           const ARMCPRegInfo *ri,
+                                           bool isread)
+{
+    /* ER: event counter read trap control */
+    if (arm_feature(env, ARM_FEATURE_V8)
+        && arm_current_el(env) == 0
+        && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
+        && isread) {
+        return CP_ACCESS_OK;
+    }
+
+    return pmreg_access(env, ri, isread);
+}
+
+static CPAccessResult pmreg_access_swinc(CPUARMState *env,
+                                         const ARMCPRegInfo *ri,
+                                         bool isread)
+{
+    /* SW: software increment write trap control */
+    if (arm_feature(env, ARM_FEATURE_V8)
+        && arm_current_el(env) == 0
+        && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
+        && !isread) {
+        return CP_ACCESS_OK;
+    }
+
+    return pmreg_access(env, ri, isread);
+}
+
+static CPAccessResult pmreg_access_selr(CPUARMState *env,
+                                        const ARMCPRegInfo *ri,
+                                        bool isread)
+{
+    /* ER: event counter read trap control */
+    if (arm_feature(env, ARM_FEATURE_V8)
+        && arm_current_el(env) == 0
+        && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
+        return CP_ACCESS_OK;
+    }
+
+    return pmreg_access(env, ri, isread);
+}
+
+static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
+                                         const ARMCPRegInfo *ri,
+                                         bool isread)
+{
+    /* CR: cycle counter read trap control */
+    if (arm_feature(env, ARM_FEATURE_V8)
+        && arm_current_el(env) == 0
+        && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
+        && isread) {
+        return CP_ACCESS_OK;
+    }
+
+    return pmreg_access(env, ri, isread);
+}
+
+/*
+ * Returns true if the counter (pass 31 for PMCCNTR) should count events using
+ * the current EL, security state, and register configuration.
+ */
+static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
+{
+    uint64_t filter;
+    bool e, p, u, nsk, nsu, nsh, m;
+    bool enabled, prohibited = false, filtered;
+    bool secure = arm_is_secure(env);
+    int el = arm_current_el(env);
+    uint64_t mdcr_el2;
+    uint8_t hpmn;
+
+    /*
+     * We might be called for M-profile cores where MDCR_EL2 doesn't
+     * exist and arm_mdcr_el2_eff() will assert, so this early-exit check
+     * must be before we read that value.
+     */
+    if (!arm_feature(env, ARM_FEATURE_PMU)) {
+        return false;
+    }
+
+    mdcr_el2 = arm_mdcr_el2_eff(env);
+    hpmn = mdcr_el2 & MDCR_HPMN;
+
+    if (!arm_feature(env, ARM_FEATURE_EL2) ||
+            (counter < hpmn || counter == 31)) {
+        e = env->cp15.c9_pmcr & PMCRE;
+    } else {
+        e = mdcr_el2 & MDCR_HPME;
+    }
+    enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
+
+    /* Is event counting prohibited? */
+    if (el == 2 && (counter < hpmn || counter == 31)) {
+        prohibited = mdcr_el2 & MDCR_HPMD;
+    }
+    if (secure) {
+        prohibited = prohibited || !(env->cp15.mdcr_el3 & MDCR_SPME);
+    }
+
+    if (counter == 31) {
+        /*
+         * The cycle counter defaults to running. PMCR.DP says "disable
+         * the cycle counter when event counting is prohibited".
+         * Some MDCR bits disable the cycle counter specifically.
+         */
+        prohibited = prohibited && env->cp15.c9_pmcr & PMCRDP;
+        if (cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
+            if (secure) {
+                prohibited = prohibited || (env->cp15.mdcr_el3 & MDCR_SCCD);
+            }
+            if (el == 2) {
+                prohibited = prohibited || (mdcr_el2 & MDCR_HCCD);
+            }
+        }
+    }
+
+    if (counter == 31) {
+        filter = env->cp15.pmccfiltr_el0;
+    } else {
+        filter = env->cp15.c14_pmevtyper[counter];
+    }
+
+    p   = filter & PMXEVTYPER_P;
+    u   = filter & PMXEVTYPER_U;
+    nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
+    nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
+    nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
+    m   = arm_el_is_aa64(env, 1) &&
+              arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
+
+    if (el == 0) {
+        filtered = secure ? u : u != nsu;
+    } else if (el == 1) {
+        filtered = secure ? p : p != nsk;
+    } else if (el == 2) {
+        filtered = !nsh;
+    } else { /* EL3 */
+        filtered = m != p;
+    }
+
+    if (counter != 31) {
+        /*
+         * If not checking PMCCNTR, ensure the counter is setup to an event we
+         * support
+         */
+        uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
+        if (!event_supported(event)) {
+            return false;
+        }
+    }
+
+    return enabled && !prohibited && !filtered;
+}
+
+static void pmu_update_irq(CPUARMState *env)
+{
+    ARMCPU *cpu = env_archcpu(env);
+    qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
+            (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
+}
+
+static bool pmccntr_clockdiv_enabled(CPUARMState *env)
+{
+    /*
+     * Return true if the clock divider is enabled and the cycle counter
+     * is supposed to tick only once every 64 clock cycles. This is
+     * controlled by PMCR.D, but if PMCR.LC is set to enable the long
+     * (64-bit) cycle counter PMCR.D has no effect.
+     */
+    return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD;
+}
+
+static bool pmevcntr_is_64_bit(CPUARMState *env, int counter)
+{
+    /* Return true if the specified event counter is configured to be 64 bit */
+
+    /* This isn't intended to be used with the cycle counter */
+    assert(counter < 31);
+
+    if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
+        return false;
+    }
+
+    if (arm_feature(env, ARM_FEATURE_EL2)) {
+        /*
+         * MDCR_EL2.HLP still applies even when EL2 is disabled in the
+         * current security state, so we don't use arm_mdcr_el2_eff() here.
+         */
+        bool hlp = env->cp15.mdcr_el2 & MDCR_HLP;
+        int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
+
+        if (counter >= hpmn) {
+            return hlp;
+        }
+    }
+    return env->cp15.c9_pmcr & PMCRLP;
+}
+
+/*
+ * Ensure c15_ccnt is the guest-visible count so that operations such as
+ * enabling/disabling the counter or filtering, modifying the count itself,
+ * etc. can be done logically. This is essentially a no-op if the counter is
+ * not enabled at the time of the call.
+ */
+static void pmccntr_op_start(CPUARMState *env)
+{
+    uint64_t cycles = cycles_get_count(env);
+
+    if (pmu_counter_enabled(env, 31)) {
+        uint64_t eff_cycles = cycles;
+        if (pmccntr_clockdiv_enabled(env)) {
+            eff_cycles /= 64;
+        }
+
+        uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
+
+        uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
+                                 1ull << 63 : 1ull << 31;
+        if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
+            env->cp15.c9_pmovsr |= (1ULL << 31);
+            pmu_update_irq(env);
+        }
+
+        env->cp15.c15_ccnt = new_pmccntr;
+    }
+    env->cp15.c15_ccnt_delta = cycles;
+}
+
+/*
+ * If PMCCNTR is enabled, recalculate the delta between the clock and the
+ * guest-visible count. A call to pmccntr_op_finish should follow every call to
+ * pmccntr_op_start.
+ */
+static void pmccntr_op_finish(CPUARMState *env)
+{
+    if (pmu_counter_enabled(env, 31)) {
+#ifndef CONFIG_USER_ONLY
+        /* Calculate when the counter will next overflow */
+        uint64_t remaining_cycles = -env->cp15.c15_ccnt;
+        if (!(env->cp15.c9_pmcr & PMCRLC)) {
+            remaining_cycles = (uint32_t)remaining_cycles;
+        }
+        int64_t overflow_in = cycles_ns_per(remaining_cycles);
+
+        if (overflow_in > 0) {
+            int64_t overflow_at;
+
+            if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
+                                 overflow_in, &overflow_at)) {
+                ARMCPU *cpu = env_archcpu(env);
+                timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
+            }
+        }
+#endif
+
+        uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
+        if (pmccntr_clockdiv_enabled(env)) {
+            prev_cycles /= 64;
+        }
+        env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
+    }
+}
+
+static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
+{
+
+    uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
+    uint64_t count = 0;
+    if (event_supported(event)) {
+        uint16_t event_idx = supported_event_map[event];
+        count = pm_events[event_idx].get_count(env);
+    }
+
+    if (pmu_counter_enabled(env, counter)) {
+        uint64_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
+        uint64_t overflow_mask = pmevcntr_is_64_bit(env, counter) ?
+            1ULL << 63 : 1ULL << 31;
+
+        if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & overflow_mask) {
+            env->cp15.c9_pmovsr |= (1 << counter);
+            pmu_update_irq(env);
+        }
+        env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
+    }
+    env->cp15.c14_pmevcntr_delta[counter] = count;
+}
+
+static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
+{
+    if (pmu_counter_enabled(env, counter)) {
+#ifndef CONFIG_USER_ONLY
+        uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
+        uint16_t event_idx = supported_event_map[event];
+        uint64_t delta = -(env->cp15.c14_pmevcntr[counter] + 1);
+        int64_t overflow_in;
+
+        if (!pmevcntr_is_64_bit(env, counter)) {
+            delta = (uint32_t)delta;
+        }
+        overflow_in = pm_events[event_idx].ns_per_count(delta);
+
+        if (overflow_in > 0) {
+            int64_t overflow_at;
+
+            if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
+                                 overflow_in, &overflow_at)) {
+                ARMCPU *cpu = env_archcpu(env);
+                timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
+            }
+        }
+#endif
+
+        env->cp15.c14_pmevcntr_delta[counter] -=
+            env->cp15.c14_pmevcntr[counter];
+    }
+}
+
+void pmu_op_start(CPUARMState *env)
+{
+    unsigned int i;
+    pmccntr_op_start(env);
+    for (i = 0; i < pmu_num_counters(env); i++) {
+        pmevcntr_op_start(env, i);
+    }
+}
+
+void pmu_op_finish(CPUARMState *env)
+{
+    unsigned int i;
+    pmccntr_op_finish(env);
+    for (i = 0; i < pmu_num_counters(env); i++) {
+        pmevcntr_op_finish(env, i);
+    }
+}
+
+void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
+{
+    pmu_op_start(&cpu->env);
+}
+
+void pmu_post_el_change(ARMCPU *cpu, void *ignored)
+{
+    pmu_op_finish(&cpu->env);
+}
+
+void arm_pmu_timer_cb(void *opaque)
+{
+    ARMCPU *cpu = opaque;
+
+    /*
+     * Update all the counter values based on the current underlying counts,
+     * triggering interrupts to be raised, if necessary. pmu_op_finish() also
+     * has the effect of setting the cpu->pmu_timer to the next earliest time a
+     * counter may expire.
+     */
+    pmu_op_start(&cpu->env);
+    pmu_op_finish(&cpu->env);
+}
+
+static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+                       uint64_t value)
+{
+    pmu_op_start(env);
+
+    if (value & PMCRC) {
+        /* The counter has been reset */
+        env->cp15.c15_ccnt = 0;
+    }
+
+    if (value & PMCRP) {
+        unsigned int i;
+        for (i = 0; i < pmu_num_counters(env); i++) {
+            env->cp15.c14_pmevcntr[i] = 0;
+        }
+    }
+
+    env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
+    env->cp15.c9_pmcr |= (value & PMCR_WRITABLE_MASK);
+
+    pmu_op_finish(env);
+}
+
+static uint64_t pmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+    uint64_t pmcr = env->cp15.c9_pmcr;
+
+    /*
+     * If EL2 is implemented and enabled for the current security state, reads
+     * of PMCR.N from EL1 or EL0 return the value of MDCR_EL2.HPMN or HDCR.HPMN.
+     */
+    if (arm_current_el(env) <= 1 && arm_is_el2_enabled(env)) {
+        pmcr &= ~PMCRN_MASK;
+        pmcr |= (env->cp15.mdcr_el2 & MDCR_HPMN) << PMCRN_SHIFT;
+    }
+
+    return pmcr;
+}
+
+static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
+                          uint64_t value)
+{
+    unsigned int i;
+    uint64_t overflow_mask, new_pmswinc;
+
+    for (i = 0; i < pmu_num_counters(env); i++) {
+        /* Increment a counter's count iff: */
+        if ((value & (1 << i)) && /* counter's bit is set */
+                /* counter is enabled and not filtered */
+                pmu_counter_enabled(env, i) &&
+                /* counter is SW_INCR */
+                (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
+            pmevcntr_op_start(env, i);
+
+            /*
+             * Detect if this write causes an overflow since we can't predict
+             * PMSWINC overflows like we can for other events
+             */
+            new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
+
+            overflow_mask = pmevcntr_is_64_bit(env, i) ?
+                1ULL << 63 : 1ULL << 31;
+
+            if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & overflow_mask) {
+                env->cp15.c9_pmovsr |= (1 << i);
+                pmu_update_irq(env);
+            }
+
+            env->cp15.c14_pmevcntr[i] = new_pmswinc;
+
+            pmevcntr_op_finish(env, i);
+        }
+    }
+}
+
+static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+    uint64_t ret;
+    pmccntr_op_start(env);
+    ret = env->cp15.c15_ccnt;
+    pmccntr_op_finish(env);
+    return ret;
+}
+
+static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+                         uint64_t value)
+{
+    /*
+     * The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
+     * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
+     * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
+     * accessed.
+     */
+    env->cp15.c9_pmselr = value & 0x1f;
+}
+
+static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+                        uint64_t value)
+{
+    pmccntr_op_start(env);
+    env->cp15.c15_ccnt = value;
+    pmccntr_op_finish(env);
+}
+
+static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
+                            uint64_t value)
+{
+    uint64_t cur_val = pmccntr_read(env, NULL);
+
+    pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
+}
+
+static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+                            uint64_t value)
+{
+    pmccntr_op_start(env);
+    env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
+    pmccntr_op_finish(env);
+}
+
+static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
+                            uint64_t value)
+{
+    pmccntr_op_start(env);
+    /* M is not accessible from AArch32 */
+    env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
+        (value & PMCCFILTR);
+    pmccntr_op_finish(env);
+}
+
+static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+    /* M is not visible in AArch32 */
+    return env->cp15.pmccfiltr_el0 & PMCCFILTR;
+}
+
+static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
+                            uint64_t value)
+{
+    pmu_op_start(env);
+    value &= pmu_counter_mask(env);
+    env->cp15.c9_pmcnten |= value;
+    pmu_op_finish(env);
+}
+
+static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+                             uint64_t value)
+{
+    pmu_op_start(env);
+    value &= pmu_counter_mask(env);
+    env->cp15.c9_pmcnten &= ~value;
+    pmu_op_finish(env);
+}
+
+static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+                         uint64_t value)
+{
+    value &= pmu_counter_mask(env);
+    env->cp15.c9_pmovsr &= ~value;
+    pmu_update_irq(env);
+}
+
+static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
+                         uint64_t value)
+{
+    value &= pmu_counter_mask(env);
+    env->cp15.c9_pmovsr |= value;
+    pmu_update_irq(env);
+}
+
+static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
+                             uint64_t value, const uint8_t counter)
+{
+    if (counter == 31) {
+        pmccfiltr_write(env, ri, value);
+    } else if (counter < pmu_num_counters(env)) {
+        pmevcntr_op_start(env, counter);
+
+        /*
+         * If this counter's event type is changing, store the current
+         * underlying count for the new type in c14_pmevcntr_delta[counter] so
+         * pmevcntr_op_finish has the correct baseline when it converts back to
+         * a delta.
+         */
+        uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
+            PMXEVTYPER_EVTCOUNT;
+        uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
+        if (old_event != new_event) {
+            uint64_t count = 0;
+            if (event_supported(new_event)) {
+                uint16_t event_idx = supported_event_map[new_event];
+                count = pm_events[event_idx].get_count(env);
+            }
+            env->cp15.c14_pmevcntr_delta[counter] = count;
+        }
+
+        env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
+        pmevcntr_op_finish(env, counter);
+    }
+    /*
+     * Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
+     * PMSELR value is equal to or greater than the number of implemented
+     * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
+     */
+}
+
+static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
+                               const uint8_t counter)
+{
+    if (counter == 31) {
+        return env->cp15.pmccfiltr_el0;
+    } else if (counter < pmu_num_counters(env)) {
+        return env->cp15.c14_pmevtyper[counter];
+    } else {
+      /*
+       * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
+       * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
+       */
+        return 0;
+    }
+}
+
+static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
+                              uint64_t value)
+{
+    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+    pmevtyper_write(env, ri, value, counter);
+}
+
+static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
+                               uint64_t value)
+{
+    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+    env->cp15.c14_pmevtyper[counter] = value;
+
+    /*
+     * pmevtyper_rawwrite is called between a pair of pmu_op_start and
+     * pmu_op_finish calls when loading saved state for a migration. Because
+     * we're potentially updating the type of event here, the value written to
+     * c14_pmevcntr_delta by the preceding pmu_op_start call may be for a
+     * different counter type. Therefore, we need to set this value to the
+     * current count for the counter type we're writing so that pmu_op_finish
+     * has the correct count for its calculation.
+     */
+    uint16_t event = value & PMXEVTYPER_EVTCOUNT;
+    if (event_supported(event)) {
+        uint16_t event_idx = supported_event_map[event];
+        env->cp15.c14_pmevcntr_delta[counter] =
+            pm_events[event_idx].get_count(env);
+    }
+}
+
+static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+    return pmevtyper_read(env, ri, counter);
+}
+
+static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
+                             uint64_t value)
+{
+    pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
+}
+
+static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+    return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
+}
+
+static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+                             uint64_t value, uint8_t counter)
+{
+    if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
+        /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
+        value &= MAKE_64BIT_MASK(0, 32);
+    }
+    if (counter < pmu_num_counters(env)) {
+        pmevcntr_op_start(env, counter);
+        env->cp15.c14_pmevcntr[counter] = value;
+        pmevcntr_op_finish(env, counter);
+    }
+    /*
+     * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
+     * are CONSTRAINED UNPREDICTABLE.
+     */
+}
+
+static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
+                              uint8_t counter)
+{
+    if (counter < pmu_num_counters(env)) {
+        uint64_t ret;
+        pmevcntr_op_start(env, counter);
+        ret = env->cp15.c14_pmevcntr[counter];
+        pmevcntr_op_finish(env, counter);
+        if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
+            /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
+            ret &= MAKE_64BIT_MASK(0, 32);
+        }
+        return ret;
+    } else {
+      /*
+       * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
+       * are CONSTRAINED UNPREDICTABLE.
+       */
+        return 0;
+    }
+}
+
+static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
+                             uint64_t value)
+{
+    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+    pmevcntr_write(env, ri, value, counter);
+}
+
+static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+    return pmevcntr_read(env, ri, counter);
+}
+
+static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
+                             uint64_t value)
+{
+    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+    assert(counter < pmu_num_counters(env));
+    env->cp15.c14_pmevcntr[counter] = value;
+    pmevcntr_write(env, ri, value, counter);
+}
+
+static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+    assert(counter < pmu_num_counters(env));
+    return env->cp15.c14_pmevcntr[counter];
+}
+
+static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+                             uint64_t value)
+{
+    pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
+}
+
+static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+    return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
+}
+
+static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+                            uint64_t value)
+{
+    if (arm_feature(env, ARM_FEATURE_V8)) {
+        env->cp15.c9_pmuserenr = value & 0xf;
+    } else {
+        env->cp15.c9_pmuserenr = value & 1;
+    }
+}
+
+static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
+                             uint64_t value)
+{
+    /* We have no event counters so only the C bit can be changed */
+    value &= pmu_counter_mask(env);
+    env->cp15.c9_pminten |= value;
+    pmu_update_irq(env);
+}
+
+static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+                             uint64_t value)
+{
+    value &= pmu_counter_mask(env);
+    env->cp15.c9_pminten &= ~value;
+    pmu_update_irq(env);
+}
+
+static const ARMCPRegInfo v7_pm_reginfo[] = {
+    /*
+     * Performance monitors are implementation defined in v7,
+     * but with an ARM recommended set of registers, which we
+     * follow.
+     *
+     * Performance registers fall into three categories:
+     *  (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
+     *  (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
+     *  (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
+     * For the cases controlled by PMUSERENR we must set .access to PL0_RW
+     * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
+     */
+    { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
+      .access = PL0_RW, .type = ARM_CP_ALIAS | ARM_CP_IO,
+      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
+      .writefn = pmcntenset_write,
+      .accessfn = pmreg_access,
+      .fgt = FGT_PMCNTEN,
+      .raw_writefn = raw_write },
+    { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, .type = ARM_CP_IO,
+      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
+      .access = PL0_RW, .accessfn = pmreg_access,
+      .fgt = FGT_PMCNTEN,
+      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
+      .writefn = pmcntenset_write, .raw_writefn = raw_write },
+    { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
+      .access = PL0_RW,
+      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
+      .accessfn = pmreg_access,
+      .fgt = FGT_PMCNTEN,
+      .writefn = pmcntenclr_write, .raw_writefn = raw_write,
+      .type = ARM_CP_ALIAS | ARM_CP_IO },
+    { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
+      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
+      .access = PL0_RW, .accessfn = pmreg_access,
+      .fgt = FGT_PMCNTEN,
+      .type = ARM_CP_ALIAS | ARM_CP_IO,
+      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
+      .writefn = pmcntenclr_write, .raw_writefn = raw_write },
+    { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
+      .access = PL0_RW, .type = ARM_CP_IO,
+      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
+      .accessfn = pmreg_access,
+      .fgt = FGT_PMOVS,
+      .writefn = pmovsr_write,
+      .raw_writefn = raw_write },
+    { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
+      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
+      .access = PL0_RW, .accessfn = pmreg_access,
+      .fgt = FGT_PMOVS,
+      .type = ARM_CP_ALIAS | ARM_CP_IO,
+      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
+      .writefn = pmovsr_write,
+      .raw_writefn = raw_write },
+    { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
+      .access = PL0_W, .accessfn = pmreg_access_swinc,
+      .fgt = FGT_PMSWINC_EL0,
+      .type = ARM_CP_NO_RAW | ARM_CP_IO,
+      .writefn = pmswinc_write },
+    { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
+      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
+      .access = PL0_W, .accessfn = pmreg_access_swinc,
+      .fgt = FGT_PMSWINC_EL0,
+      .type = ARM_CP_NO_RAW | ARM_CP_IO,
+      .writefn = pmswinc_write },
+    { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
+      .access = PL0_RW, .type = ARM_CP_ALIAS,
+      .fgt = FGT_PMSELR_EL0,
+      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
+      .accessfn = pmreg_access_selr, .writefn = pmselr_write,
+      .raw_writefn = raw_write},
+    { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
+      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
+      .access = PL0_RW, .accessfn = pmreg_access_selr,
+      .fgt = FGT_PMSELR_EL0,
+      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
+      .writefn = pmselr_write, .raw_writefn = raw_write, },
+    { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
+      .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
+      .fgt = FGT_PMCCNTR_EL0,
+      .readfn = pmccntr_read, .writefn = pmccntr_write32,
+      .accessfn = pmreg_access_ccntr },
+    { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
+      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
+      .access = PL0_RW, .accessfn = pmreg_access_ccntr,
+      .fgt = FGT_PMCCNTR_EL0,
+      .type = ARM_CP_IO,
+      .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
+      .readfn = pmccntr_read, .writefn = pmccntr_write,
+      .raw_readfn = raw_read, .raw_writefn = raw_write, },
+    { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
+      .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
+      .access = PL0_RW, .accessfn = pmreg_access,
+      .fgt = FGT_PMCCFILTR_EL0,
+      .type = ARM_CP_ALIAS | ARM_CP_IO,
+      .resetvalue = 0, },
+    { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
+      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
+      .writefn = pmccfiltr_write, .raw_writefn = raw_write,
+      .access = PL0_RW, .accessfn = pmreg_access,
+      .fgt = FGT_PMCCFILTR_EL0,
+      .type = ARM_CP_IO,
+      .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
+      .resetvalue = 0, },
+    { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
+      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
+      .accessfn = pmreg_access,
+      .fgt = FGT_PMEVTYPERN_EL0,
+      .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
+    { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
+      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
+      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
+      .accessfn = pmreg_access,
+      .fgt = FGT_PMEVTYPERN_EL0,
+      .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
+    { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
+      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
+      .accessfn = pmreg_access_xevcntr,
+      .fgt = FGT_PMEVCNTRN_EL0,
+      .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
+    { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
+      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
+      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
+      .accessfn = pmreg_access_xevcntr,
+      .fgt = FGT_PMEVCNTRN_EL0,
+      .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
+    { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
+      .access = PL0_R | PL1_RW, .accessfn = access_tpm,
+      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
+      .resetvalue = 0,
+      .writefn = pmuserenr_write, .raw_writefn = raw_write },
+    { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
+      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
+      .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
+      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
+      .resetvalue = 0,
+      .writefn = pmuserenr_write, .raw_writefn = raw_write },
+    { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
+      .access = PL1_RW, .accessfn = access_tpm,
+      .fgt = FGT_PMINTEN,
+      .type = ARM_CP_ALIAS | ARM_CP_IO,
+      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
+      .resetvalue = 0,
+      .writefn = pmintenset_write, .raw_writefn = raw_write },
+    { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
+      .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
+      .access = PL1_RW, .accessfn = access_tpm,
+      .fgt = FGT_PMINTEN,
+      .type = ARM_CP_IO,
+      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
+      .writefn = pmintenset_write, .raw_writefn = raw_write,
+      .resetvalue = 0x0 },
+    { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
+      .access = PL1_RW, .accessfn = access_tpm,
+      .fgt = FGT_PMINTEN,
+      .type = ARM_CP_ALIAS | ARM_CP_IO,
+      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
+      .writefn = pmintenclr_write, .raw_writefn = raw_write },
+    { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
+      .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
+      .access = PL1_RW, .accessfn = access_tpm,
+      .fgt = FGT_PMINTEN,
+      .type = ARM_CP_ALIAS | ARM_CP_IO,
+      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
+      .writefn = pmintenclr_write, .raw_writefn = raw_write },
+};
+
+static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
+    /* PMOVSSET is not implemented in v7 before v7ve */
+    { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
+      .access = PL0_RW, .accessfn = pmreg_access,
+      .fgt = FGT_PMOVS,
+      .type = ARM_CP_ALIAS | ARM_CP_IO,
+      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
+      .writefn = pmovsset_write,
+      .raw_writefn = raw_write },
+    { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
+      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
+      .access = PL0_RW, .accessfn = pmreg_access,
+      .fgt = FGT_PMOVS,
+      .type = ARM_CP_ALIAS | ARM_CP_IO,
+      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
+      .writefn = pmovsset_write,
+      .raw_writefn = raw_write },
+};
+
+void define_pm_cpregs(ARMCPU *cpu)
+{
+    CPUARMState *env = &cpu->env;
+
+    if (arm_feature(env, ARM_FEATURE_V7)) {
+        /*
+         * v7 performance monitor control register: same implementor
+         * field as main ID register, and we implement four counters in
+         * addition to the cycle count register.
+         */
+        static const ARMCPRegInfo pmcr = {
+            .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
+            .access = PL0_RW,
+            .fgt = FGT_PMCR_EL0,
+            .type = ARM_CP_IO | ARM_CP_ALIAS,
+            .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
+            .accessfn = pmreg_access,
+            .readfn = pmcr_read, .raw_readfn = raw_read,
+            .writefn = pmcr_write, .raw_writefn = raw_write,
+        };
+        const ARMCPRegInfo pmcr64 = {
+            .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
+            .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
+            .access = PL0_RW, .accessfn = pmreg_access,
+            .fgt = FGT_PMCR_EL0,
+            .type = ARM_CP_IO,
+            .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
+            .resetvalue = cpu->isar.reset_pmcr_el0,
+            .readfn = pmcr_read, .raw_readfn = raw_read,
+            .writefn = pmcr_write, .raw_writefn = raw_write,
+        };
+
+        define_one_arm_cp_reg(cpu, &pmcr);
+        define_one_arm_cp_reg(cpu, &pmcr64);
+        define_arm_cp_regs(cpu, v7_pm_reginfo);
+
+        for (unsigned i = 0, pmcrn = pmu_num_counters(env); i < pmcrn; i++) {
+            g_autofree char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
+            g_autofree char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
+            g_autofree char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
+            g_autofree char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
+
+            ARMCPRegInfo pmev_regs[] = {
+                { .name = pmevcntr_name, .cp = 15, .crn = 14,
+                  .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
+                  .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
+                  .fgt = FGT_PMEVCNTRN_EL0,
+                  .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
+                  .accessfn = pmreg_access_xevcntr },
+                { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
+                  .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
+                  .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access_xevcntr,
+                  .type = ARM_CP_IO,
+                  .fgt = FGT_PMEVCNTRN_EL0,
+                  .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
+                  .raw_readfn = pmevcntr_rawread,
+                  .raw_writefn = pmevcntr_rawwrite },
+                { .name = pmevtyper_name, .cp = 15, .crn = 14,
+                  .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
+                  .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
+                  .fgt = FGT_PMEVTYPERN_EL0,
+                  .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
+                  .accessfn = pmreg_access },
+                { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
+                  .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
+                  .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
+                  .fgt = FGT_PMEVTYPERN_EL0,
+                  .type = ARM_CP_IO,
+                  .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
+                  .raw_writefn = pmevtyper_rawwrite },
+            };
+            define_arm_cp_regs(cpu, pmev_regs);
+        }
+    }
+    if (arm_feature(env, ARM_FEATURE_V7VE)) {
+        define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
+    }
+
+    if (arm_feature(env, ARM_FEATURE_V8)) {
+        const ARMCPRegInfo v8_pm_reginfo[] = {
+            { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
+              .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
+              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+              .fgt = FGT_PMCEIDN_EL0,
+              .resetvalue = extract64(cpu->pmceid0, 0, 32) },
+            { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
+              .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
+              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+              .fgt = FGT_PMCEIDN_EL0,
+              .resetvalue = cpu->pmceid0 },
+            { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
+              .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
+              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+              .fgt = FGT_PMCEIDN_EL0,
+              .resetvalue = extract64(cpu->pmceid1, 0, 32) },
+            { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
+              .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
+              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+              .fgt = FGT_PMCEIDN_EL0,
+              .resetvalue = cpu->pmceid1 },
+        };
+        define_arm_cp_regs(cpu, v8_pm_reginfo);
+    }
+
+    if (cpu_isar_feature(aa32_pmuv3p1, cpu)) {
+        ARMCPRegInfo v81_pmu_regs[] = {
+            { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
+              .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
+              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+              .fgt = FGT_PMCEIDN_EL0,
+              .resetvalue = extract64(cpu->pmceid0, 32, 32) },
+            { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
+              .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
+              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+              .fgt = FGT_PMCEIDN_EL0,
+              .resetvalue = extract64(cpu->pmceid1, 32, 32) },
+        };
+        define_arm_cp_regs(cpu, v81_pmu_regs);
+    }
+
+    if (cpu_isar_feature(any_pmuv3p4, cpu)) {
+        static const ARMCPRegInfo v84_pmmir = {
+            .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH,
+            .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6,
+            .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+            .fgt = FGT_PMMIR_EL1,
+            .resetvalue = 0
+        };
+        define_one_arm_cp_reg(cpu, &v84_pmmir);
+    }
+}
diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h
index c1a7ae3735..c9506aa6d5 100644
--- a/target/arm/cpregs.h
+++ b/target/arm/cpregs.h
@@ -1065,6 +1065,9 @@ void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
 /* CPReadFn that can be used for read-as-zero behaviour */
 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri);
 
+/* CPReadFn that just reads the value from ri->fieldoffset */
+uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri);
+
 /* CPWriteFn that just writes the value to ri->fieldoffset */
 void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value);
 
diff --git a/target/arm/cpu-sysregs.h.inc b/target/arm/cpu-sysregs.h.inc
index cb99286f70..f48a9daa7c 100644
--- a/target/arm/cpu-sysregs.h.inc
+++ b/target/arm/cpu-sysregs.h.inc
@@ -4,6 +4,8 @@ DEF(ID_AA64PFR1_EL1, 3, 0, 0, 4, 1)
 DEF(ID_AA64SMFR0_EL1, 3, 0, 0, 4, 5)
 DEF(ID_AA64DFR0_EL1, 3, 0, 0, 5, 0)
 DEF(ID_AA64DFR1_EL1, 3, 0, 0, 5, 1)
+DEF(ID_AA64AFR0_EL1, 3, 0, 0, 5, 4)
+DEF(ID_AA64AFR1_EL1, 3, 0, 0, 5, 5)
 DEF(ID_AA64ISAR0_EL1, 3, 0, 0, 6, 0)
 DEF(ID_AA64ISAR1_EL1, 3, 0, 0, 6, 1)
 DEF(ID_AA64ISAR2_EL1, 3, 0, 0, 6, 2)
@@ -14,6 +16,7 @@ DEF(ID_AA64MMFR3_EL1, 3, 0, 0, 7, 3)
 DEF(ID_PFR0_EL1, 3, 0, 0, 1, 0)
 DEF(ID_PFR1_EL1, 3, 0, 0, 1, 1)
 DEF(ID_DFR0_EL1, 3, 0, 0, 1, 2)
+DEF(ID_AFR0_EL1, 3, 0, 0, 1, 3)
 DEF(ID_MMFR0_EL1, 3, 0, 0, 1, 4)
 DEF(ID_MMFR1_EL1, 3, 0, 0, 1, 5)
 DEF(ID_MMFR2_EL1, 3, 0, 0, 1, 6)
@@ -32,5 +35,6 @@ DEF(MVFR2_EL1, 3, 0, 0, 3, 2)
 DEF(ID_PFR2_EL1, 3, 0, 0, 3, 4)
 DEF(ID_DFR1_EL1, 3, 0, 0, 3, 5)
 DEF(ID_MMFR5_EL1, 3, 0, 0, 3, 6)
+DEF(CLIDR_EL1, 3, 1, 0, 0, 1)
 DEF(ID_AA64ZFR0_EL1, 3, 0, 0, 4, 4)
 DEF(CTR_EL0, 3, 3, 0, 0, 1)
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index c8cf0ab417..dc9b6dce4c 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -1082,10 +1082,6 @@ struct ArchCPU {
     uint32_t reset_sctlr;
     uint64_t pmceid0;
     uint64_t pmceid1;
-    uint32_t id_afr0;
-    uint64_t id_aa64afr0;
-    uint64_t id_aa64afr1;
-    uint64_t clidr;
     uint64_t mp_affinity; /* MP ID without feature bits */
     /* The elements of this array are the CCSIDR values for each cache,
      * in the order L1DCache, L1ICache, L2DCache, L2ICache, etc.
@@ -2948,7 +2944,7 @@ static inline bool arm_v7m_csselr_razwi(ARMCPU *cpu)
     /* If all the CLIDR.Ctypem bits are 0 there are no caches, and
      * CSSELR is RAZ/WI.
      */
-    return (cpu->clidr & R_V7M_CLIDR_CTYPE_ALL_MASK) != 0;
+    return (GET_IDREG(&cpu->isar, CLIDR) & R_V7M_CLIDR_CTYPE_ALL_MASK) != 0;
 }
 
 static inline bool arm_sctlr_b(CPUARMState *env)
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index bd33d6cc6e..26cf7e6dfa 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -663,7 +663,7 @@ static void aarch64_a57_initfn(Object *obj)
     SET_IDREG(isar, ID_PFR0, 0x00000131);
     SET_IDREG(isar, ID_PFR1, 0x00011011);
     SET_IDREG(isar, ID_DFR0, 0x03010066);
-    cpu->id_afr0 = 0x00000000;
+    SET_IDREG(isar, ID_AFR0, 0x00000000);
     SET_IDREG(isar, ID_MMFR0, 0x10101105);
     SET_IDREG(isar, ID_MMFR1, 0x40000000);
     SET_IDREG(isar, ID_MMFR2, 0x01260000);
@@ -683,7 +683,7 @@ static void aarch64_a57_initfn(Object *obj)
     cpu->isar.dbgdevid = 0x01110f13;
     cpu->isar.dbgdevid1 = 0x2;
     cpu->isar.reset_pmcr_el0 = 0x41013000;
-    cpu->clidr = 0x0a200023;
+    SET_IDREG(isar, CLIDR, 0x0a200023);
     /* 32KB L1 dcache */
     cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 7);
     /* 48KB L1 icache */
@@ -725,7 +725,7 @@ static void aarch64_a53_initfn(Object *obj)
     SET_IDREG(isar, ID_PFR0, 0x00000131);
     SET_IDREG(isar, ID_PFR1, 0x00011011);
     SET_IDREG(isar, ID_DFR0, 0x03010066);
-    cpu->id_afr0 = 0x00000000;
+    SET_IDREG(isar, ID_AFR0, 0x00000000);
     SET_IDREG(isar, ID_MMFR0, 0x10101105);
     SET_IDREG(isar, ID_MMFR1, 0x40000000);
     SET_IDREG(isar, ID_MMFR2, 0x01260000);
@@ -745,7 +745,7 @@ static void aarch64_a53_initfn(Object *obj)
     cpu->isar.dbgdevid = 0x00110f13;
     cpu->isar.dbgdevid1 = 0x1;
     cpu->isar.reset_pmcr_el0 = 0x41033000;
-    cpu->clidr = 0x0a200023;
+    SET_IDREG(isar, CLIDR, 0x0a200023);
     /* 32KB L1 dcache */
     cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 7);
     /* 32KB L1 icache */
diff --git a/target/arm/helper.c b/target/arm/helper.c
index b3f0d6f17a..0c1299ff84 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -38,11 +38,9 @@
 #define HELPER_H "tcg/helper.h"
 #include "exec/helper-proto.h.inc"
 
-#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
-
 static void switch_mode(CPUARMState *env, int mode);
 
-static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
+uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
 {
     assert(ri->fieldoffset);
     if (cpreg_field_is_64bit(ri)) {
@@ -270,7 +268,7 @@ void init_cpreg_list(ARMCPU *cpu)
     g_list_free(keys);
 }
 
-static bool arm_pan_enabled(CPUARMState *env)
+bool arm_pan_enabled(CPUARMState *env)
 {
     if (is_a64(env)) {
         if ((arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1)) {
@@ -319,25 +317,6 @@ static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
     return CP_ACCESS_UNDEFINED;
 }
 
-/*
- * Check for traps to performance monitor registers, which are controlled
- * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
- */
-static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
-                                 bool isread)
-{
-    int el = arm_current_el(env);
-    uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
-
-    if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
-        return CP_ACCESS_TRAP_EL2;
-    }
-    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
-        return CP_ACCESS_TRAP_EL3;
-    }
-    return CP_ACCESS_OK;
-}
-
 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM.  */
 CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
                                bool isread)
@@ -681,283 +660,6 @@ static const ARMCPRegInfo v6_cp_reginfo[] = {
       .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
 };
 
-typedef struct pm_event {
-    uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
-    /* If the event is supported on this CPU (used to generate PMCEID[01]) */
-    bool (*supported)(CPUARMState *);
-    /*
-     * Retrieve the current count of the underlying event. The programmed
-     * counters hold a difference from the return value from this function
-     */
-    uint64_t (*get_count)(CPUARMState *);
-    /*
-     * Return how many nanoseconds it will take (at a minimum) for count events
-     * to occur. A negative value indicates the counter will never overflow, or
-     * that the counter has otherwise arranged for the overflow bit to be set
-     * and the PMU interrupt to be raised on overflow.
-     */
-    int64_t (*ns_per_count)(uint64_t);
-} pm_event;
-
-static bool event_always_supported(CPUARMState *env)
-{
-    return true;
-}
-
-static uint64_t swinc_get_count(CPUARMState *env)
-{
-    /*
-     * SW_INCR events are written directly to the pmevcntr's by writes to
-     * PMSWINC, so there is no underlying count maintained by the PMU itself
-     */
-    return 0;
-}
-
-static int64_t swinc_ns_per(uint64_t ignored)
-{
-    return -1;
-}
-
-/*
- * Return the underlying cycle count for the PMU cycle counters. If we're in
- * usermode, simply return 0.
- */
-static uint64_t cycles_get_count(CPUARMState *env)
-{
-#ifndef CONFIG_USER_ONLY
-    return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
-                   ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
-#else
-    return cpu_get_host_ticks();
-#endif
-}
-
-#ifndef CONFIG_USER_ONLY
-static int64_t cycles_ns_per(uint64_t cycles)
-{
-    return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
-}
-
-static bool instructions_supported(CPUARMState *env)
-{
-    /* Precise instruction counting */
-    return icount_enabled() == ICOUNT_PRECISE;
-}
-
-static uint64_t instructions_get_count(CPUARMState *env)
-{
-    assert(icount_enabled() == ICOUNT_PRECISE);
-    return (uint64_t)icount_get_raw();
-}
-
-static int64_t instructions_ns_per(uint64_t icount)
-{
-    assert(icount_enabled() == ICOUNT_PRECISE);
-    return icount_to_ns((int64_t)icount);
-}
-#endif
-
-static bool pmuv3p1_events_supported(CPUARMState *env)
-{
-    /* For events which are supported in any v8.1 PMU */
-    return cpu_isar_feature(any_pmuv3p1, env_archcpu(env));
-}
-
-static bool pmuv3p4_events_supported(CPUARMState *env)
-{
-    /* For events which are supported in any v8.1 PMU */
-    return cpu_isar_feature(any_pmuv3p4, env_archcpu(env));
-}
-
-static uint64_t zero_event_get_count(CPUARMState *env)
-{
-    /* For events which on QEMU never fire, so their count is always zero */
-    return 0;
-}
-
-static int64_t zero_event_ns_per(uint64_t cycles)
-{
-    /* An event which never fires can never overflow */
-    return -1;
-}
-
-static const pm_event pm_events[] = {
-    { .number = 0x000, /* SW_INCR */
-      .supported = event_always_supported,
-      .get_count = swinc_get_count,
-      .ns_per_count = swinc_ns_per,
-    },
-#ifndef CONFIG_USER_ONLY
-    { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
-      .supported = instructions_supported,
-      .get_count = instructions_get_count,
-      .ns_per_count = instructions_ns_per,
-    },
-    { .number = 0x011, /* CPU_CYCLES, Cycle */
-      .supported = event_always_supported,
-      .get_count = cycles_get_count,
-      .ns_per_count = cycles_ns_per,
-    },
-#endif
-    { .number = 0x023, /* STALL_FRONTEND */
-      .supported = pmuv3p1_events_supported,
-      .get_count = zero_event_get_count,
-      .ns_per_count = zero_event_ns_per,
-    },
-    { .number = 0x024, /* STALL_BACKEND */
-      .supported = pmuv3p1_events_supported,
-      .get_count = zero_event_get_count,
-      .ns_per_count = zero_event_ns_per,
-    },
-    { .number = 0x03c, /* STALL */
-      .supported = pmuv3p4_events_supported,
-      .get_count = zero_event_get_count,
-      .ns_per_count = zero_event_ns_per,
-    },
-};
-
-/*
- * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
- * events (i.e. the statistical profiling extension), this implementation
- * should first be updated to something sparse instead of the current
- * supported_event_map[] array.
- */
-#define MAX_EVENT_ID 0x3c
-#define UNSUPPORTED_EVENT UINT16_MAX
-static uint16_t supported_event_map[MAX_EVENT_ID + 1];
-
-/*
- * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
- * of ARM event numbers to indices in our pm_events array.
- *
- * Note: Events in the 0x40XX range are not currently supported.
- */
-void pmu_init(ARMCPU *cpu)
-{
-    unsigned int i;
-
-    /*
-     * Empty supported_event_map and cpu->pmceid[01] before adding supported
-     * events to them
-     */
-    for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
-        supported_event_map[i] = UNSUPPORTED_EVENT;
-    }
-    cpu->pmceid0 = 0;
-    cpu->pmceid1 = 0;
-
-    for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
-        const pm_event *cnt = &pm_events[i];
-        assert(cnt->number <= MAX_EVENT_ID);
-        /* We do not currently support events in the 0x40xx range */
-        assert(cnt->number <= 0x3f);
-
-        if (cnt->supported(&cpu->env)) {
-            supported_event_map[cnt->number] = i;
-            uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
-            if (cnt->number & 0x20) {
-                cpu->pmceid1 |= event_mask;
-            } else {
-                cpu->pmceid0 |= event_mask;
-            }
-        }
-    }
-}
-
-/*
- * Check at runtime whether a PMU event is supported for the current machine
- */
-static bool event_supported(uint16_t number)
-{
-    if (number > MAX_EVENT_ID) {
-        return false;
-    }
-    return supported_event_map[number] != UNSUPPORTED_EVENT;
-}
-
-static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
-                                   bool isread)
-{
-    /*
-     * Performance monitor registers user accessibility is controlled
-     * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
-     * trapping to EL2 or EL3 for other accesses.
-     */
-    int el = arm_current_el(env);
-    uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
-
-    if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
-        return CP_ACCESS_TRAP_EL1;
-    }
-    if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
-        return CP_ACCESS_TRAP_EL2;
-    }
-    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
-        return CP_ACCESS_TRAP_EL3;
-    }
-
-    return CP_ACCESS_OK;
-}
-
-static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
-                                           const ARMCPRegInfo *ri,
-                                           bool isread)
-{
-    /* ER: event counter read trap control */
-    if (arm_feature(env, ARM_FEATURE_V8)
-        && arm_current_el(env) == 0
-        && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
-        && isread) {
-        return CP_ACCESS_OK;
-    }
-
-    return pmreg_access(env, ri, isread);
-}
-
-static CPAccessResult pmreg_access_swinc(CPUARMState *env,
-                                         const ARMCPRegInfo *ri,
-                                         bool isread)
-{
-    /* SW: software increment write trap control */
-    if (arm_feature(env, ARM_FEATURE_V8)
-        && arm_current_el(env) == 0
-        && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
-        && !isread) {
-        return CP_ACCESS_OK;
-    }
-
-    return pmreg_access(env, ri, isread);
-}
-
-static CPAccessResult pmreg_access_selr(CPUARMState *env,
-                                        const ARMCPRegInfo *ri,
-                                        bool isread)
-{
-    /* ER: event counter read trap control */
-    if (arm_feature(env, ARM_FEATURE_V8)
-        && arm_current_el(env) == 0
-        && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
-        return CP_ACCESS_OK;
-    }
-
-    return pmreg_access(env, ri, isread);
-}
-
-static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
-                                         const ARMCPRegInfo *ri,
-                                         bool isread)
-{
-    /* CR: cycle counter read trap control */
-    if (arm_feature(env, ARM_FEATURE_V8)
-        && arm_current_el(env) == 0
-        && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
-        && isread) {
-        return CP_ACCESS_OK;
-    }
-
-    return pmreg_access(env, ri, isread);
-}
-
 /*
  * Bits in MDCR_EL2 and MDCR_EL3 which pmu_counter_enabled() looks at.
  * We use these to decide whether we need to wrap a write to MDCR_EL2
@@ -967,684 +669,6 @@ static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
     (MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD | MDCR_HLP)
 #define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_SCCD)
 
-/*
- * Returns true if the counter (pass 31 for PMCCNTR) should count events using
- * the current EL, security state, and register configuration.
- */
-static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
-{
-    uint64_t filter;
-    bool e, p, u, nsk, nsu, nsh, m;
-    bool enabled, prohibited = false, filtered;
-    bool secure = arm_is_secure(env);
-    int el = arm_current_el(env);
-    uint64_t mdcr_el2;
-    uint8_t hpmn;
-
-    /*
-     * We might be called for M-profile cores where MDCR_EL2 doesn't
-     * exist and arm_mdcr_el2_eff() will assert, so this early-exit check
-     * must be before we read that value.
-     */
-    if (!arm_feature(env, ARM_FEATURE_PMU)) {
-        return false;
-    }
-
-    mdcr_el2 = arm_mdcr_el2_eff(env);
-    hpmn = mdcr_el2 & MDCR_HPMN;
-
-    if (!arm_feature(env, ARM_FEATURE_EL2) ||
-            (counter < hpmn || counter == 31)) {
-        e = env->cp15.c9_pmcr & PMCRE;
-    } else {
-        e = mdcr_el2 & MDCR_HPME;
-    }
-    enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
-
-    /* Is event counting prohibited? */
-    if (el == 2 && (counter < hpmn || counter == 31)) {
-        prohibited = mdcr_el2 & MDCR_HPMD;
-    }
-    if (secure) {
-        prohibited = prohibited || !(env->cp15.mdcr_el3 & MDCR_SPME);
-    }
-
-    if (counter == 31) {
-        /*
-         * The cycle counter defaults to running. PMCR.DP says "disable
-         * the cycle counter when event counting is prohibited".
-         * Some MDCR bits disable the cycle counter specifically.
-         */
-        prohibited = prohibited && env->cp15.c9_pmcr & PMCRDP;
-        if (cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
-            if (secure) {
-                prohibited = prohibited || (env->cp15.mdcr_el3 & MDCR_SCCD);
-            }
-            if (el == 2) {
-                prohibited = prohibited || (mdcr_el2 & MDCR_HCCD);
-            }
-        }
-    }
-
-    if (counter == 31) {
-        filter = env->cp15.pmccfiltr_el0;
-    } else {
-        filter = env->cp15.c14_pmevtyper[counter];
-    }
-
-    p   = filter & PMXEVTYPER_P;
-    u   = filter & PMXEVTYPER_U;
-    nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
-    nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
-    nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
-    m   = arm_el_is_aa64(env, 1) &&
-              arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
-
-    if (el == 0) {
-        filtered = secure ? u : u != nsu;
-    } else if (el == 1) {
-        filtered = secure ? p : p != nsk;
-    } else if (el == 2) {
-        filtered = !nsh;
-    } else { /* EL3 */
-        filtered = m != p;
-    }
-
-    if (counter != 31) {
-        /*
-         * If not checking PMCCNTR, ensure the counter is setup to an event we
-         * support
-         */
-        uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
-        if (!event_supported(event)) {
-            return false;
-        }
-    }
-
-    return enabled && !prohibited && !filtered;
-}
-
-static void pmu_update_irq(CPUARMState *env)
-{
-    ARMCPU *cpu = env_archcpu(env);
-    qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
-            (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
-}
-
-static bool pmccntr_clockdiv_enabled(CPUARMState *env)
-{
-    /*
-     * Return true if the clock divider is enabled and the cycle counter
-     * is supposed to tick only once every 64 clock cycles. This is
-     * controlled by PMCR.D, but if PMCR.LC is set to enable the long
-     * (64-bit) cycle counter PMCR.D has no effect.
-     */
-    return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD;
-}
-
-static bool pmevcntr_is_64_bit(CPUARMState *env, int counter)
-{
-    /* Return true if the specified event counter is configured to be 64 bit */
-
-    /* This isn't intended to be used with the cycle counter */
-    assert(counter < 31);
-
-    if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
-        return false;
-    }
-
-    if (arm_feature(env, ARM_FEATURE_EL2)) {
-        /*
-         * MDCR_EL2.HLP still applies even when EL2 is disabled in the
-         * current security state, so we don't use arm_mdcr_el2_eff() here.
-         */
-        bool hlp = env->cp15.mdcr_el2 & MDCR_HLP;
-        int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
-
-        if (counter >= hpmn) {
-            return hlp;
-        }
-    }
-    return env->cp15.c9_pmcr & PMCRLP;
-}
-
-/*
- * Ensure c15_ccnt is the guest-visible count so that operations such as
- * enabling/disabling the counter or filtering, modifying the count itself,
- * etc. can be done logically. This is essentially a no-op if the counter is
- * not enabled at the time of the call.
- */
-static void pmccntr_op_start(CPUARMState *env)
-{
-    uint64_t cycles = cycles_get_count(env);
-
-    if (pmu_counter_enabled(env, 31)) {
-        uint64_t eff_cycles = cycles;
-        if (pmccntr_clockdiv_enabled(env)) {
-            eff_cycles /= 64;
-        }
-
-        uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
-
-        uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
-                                 1ull << 63 : 1ull << 31;
-        if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
-            env->cp15.c9_pmovsr |= (1ULL << 31);
-            pmu_update_irq(env);
-        }
-
-        env->cp15.c15_ccnt = new_pmccntr;
-    }
-    env->cp15.c15_ccnt_delta = cycles;
-}
-
-/*
- * If PMCCNTR is enabled, recalculate the delta between the clock and the
- * guest-visible count. A call to pmccntr_op_finish should follow every call to
- * pmccntr_op_start.
- */
-static void pmccntr_op_finish(CPUARMState *env)
-{
-    if (pmu_counter_enabled(env, 31)) {
-#ifndef CONFIG_USER_ONLY
-        /* Calculate when the counter will next overflow */
-        uint64_t remaining_cycles = -env->cp15.c15_ccnt;
-        if (!(env->cp15.c9_pmcr & PMCRLC)) {
-            remaining_cycles = (uint32_t)remaining_cycles;
-        }
-        int64_t overflow_in = cycles_ns_per(remaining_cycles);
-
-        if (overflow_in > 0) {
-            int64_t overflow_at;
-
-            if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
-                                 overflow_in, &overflow_at)) {
-                ARMCPU *cpu = env_archcpu(env);
-                timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
-            }
-        }
-#endif
-
-        uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
-        if (pmccntr_clockdiv_enabled(env)) {
-            prev_cycles /= 64;
-        }
-        env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
-    }
-}
-
-static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
-{
-
-    uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
-    uint64_t count = 0;
-    if (event_supported(event)) {
-        uint16_t event_idx = supported_event_map[event];
-        count = pm_events[event_idx].get_count(env);
-    }
-
-    if (pmu_counter_enabled(env, counter)) {
-        uint64_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
-        uint64_t overflow_mask = pmevcntr_is_64_bit(env, counter) ?
-            1ULL << 63 : 1ULL << 31;
-
-        if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & overflow_mask) {
-            env->cp15.c9_pmovsr |= (1 << counter);
-            pmu_update_irq(env);
-        }
-        env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
-    }
-    env->cp15.c14_pmevcntr_delta[counter] = count;
-}
-
-static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
-{
-    if (pmu_counter_enabled(env, counter)) {
-#ifndef CONFIG_USER_ONLY
-        uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
-        uint16_t event_idx = supported_event_map[event];
-        uint64_t delta = -(env->cp15.c14_pmevcntr[counter] + 1);
-        int64_t overflow_in;
-
-        if (!pmevcntr_is_64_bit(env, counter)) {
-            delta = (uint32_t)delta;
-        }
-        overflow_in = pm_events[event_idx].ns_per_count(delta);
-
-        if (overflow_in > 0) {
-            int64_t overflow_at;
-
-            if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
-                                 overflow_in, &overflow_at)) {
-                ARMCPU *cpu = env_archcpu(env);
-                timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
-            }
-        }
-#endif
-
-        env->cp15.c14_pmevcntr_delta[counter] -=
-            env->cp15.c14_pmevcntr[counter];
-    }
-}
-
-void pmu_op_start(CPUARMState *env)
-{
-    unsigned int i;
-    pmccntr_op_start(env);
-    for (i = 0; i < pmu_num_counters(env); i++) {
-        pmevcntr_op_start(env, i);
-    }
-}
-
-void pmu_op_finish(CPUARMState *env)
-{
-    unsigned int i;
-    pmccntr_op_finish(env);
-    for (i = 0; i < pmu_num_counters(env); i++) {
-        pmevcntr_op_finish(env, i);
-    }
-}
-
-void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
-{
-    pmu_op_start(&cpu->env);
-}
-
-void pmu_post_el_change(ARMCPU *cpu, void *ignored)
-{
-    pmu_op_finish(&cpu->env);
-}
-
-void arm_pmu_timer_cb(void *opaque)
-{
-    ARMCPU *cpu = opaque;
-
-    /*
-     * Update all the counter values based on the current underlying counts,
-     * triggering interrupts to be raised, if necessary. pmu_op_finish() also
-     * has the effect of setting the cpu->pmu_timer to the next earliest time a
-     * counter may expire.
-     */
-    pmu_op_start(&cpu->env);
-    pmu_op_finish(&cpu->env);
-}
-
-static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
-                       uint64_t value)
-{
-    pmu_op_start(env);
-
-    if (value & PMCRC) {
-        /* The counter has been reset */
-        env->cp15.c15_ccnt = 0;
-    }
-
-    if (value & PMCRP) {
-        unsigned int i;
-        for (i = 0; i < pmu_num_counters(env); i++) {
-            env->cp15.c14_pmevcntr[i] = 0;
-        }
-    }
-
-    env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
-    env->cp15.c9_pmcr |= (value & PMCR_WRITABLE_MASK);
-
-    pmu_op_finish(env);
-}
-
-static uint64_t pmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
-    uint64_t pmcr = env->cp15.c9_pmcr;
-
-    /*
-     * If EL2 is implemented and enabled for the current security state, reads
-     * of PMCR.N from EL1 or EL0 return the value of MDCR_EL2.HPMN or HDCR.HPMN.
-     */
-    if (arm_current_el(env) <= 1 && arm_is_el2_enabled(env)) {
-        pmcr &= ~PMCRN_MASK;
-        pmcr |= (env->cp15.mdcr_el2 & MDCR_HPMN) << PMCRN_SHIFT;
-    }
-
-    return pmcr;
-}
-
-static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
-                          uint64_t value)
-{
-    unsigned int i;
-    uint64_t overflow_mask, new_pmswinc;
-
-    for (i = 0; i < pmu_num_counters(env); i++) {
-        /* Increment a counter's count iff: */
-        if ((value & (1 << i)) && /* counter's bit is set */
-                /* counter is enabled and not filtered */
-                pmu_counter_enabled(env, i) &&
-                /* counter is SW_INCR */
-                (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
-            pmevcntr_op_start(env, i);
-
-            /*
-             * Detect if this write causes an overflow since we can't predict
-             * PMSWINC overflows like we can for other events
-             */
-            new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
-
-            overflow_mask = pmevcntr_is_64_bit(env, i) ?
-                1ULL << 63 : 1ULL << 31;
-
-            if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & overflow_mask) {
-                env->cp15.c9_pmovsr |= (1 << i);
-                pmu_update_irq(env);
-            }
-
-            env->cp15.c14_pmevcntr[i] = new_pmswinc;
-
-            pmevcntr_op_finish(env, i);
-        }
-    }
-}
-
-static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
-    uint64_t ret;
-    pmccntr_op_start(env);
-    ret = env->cp15.c15_ccnt;
-    pmccntr_op_finish(env);
-    return ret;
-}
-
-static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
-                         uint64_t value)
-{
-    /*
-     * The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
-     * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
-     * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
-     * accessed.
-     */
-    env->cp15.c9_pmselr = value & 0x1f;
-}
-
-static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
-                        uint64_t value)
-{
-    pmccntr_op_start(env);
-    env->cp15.c15_ccnt = value;
-    pmccntr_op_finish(env);
-}
-
-static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
-                            uint64_t value)
-{
-    uint64_t cur_val = pmccntr_read(env, NULL);
-
-    pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
-}
-
-static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
-                            uint64_t value)
-{
-    pmccntr_op_start(env);
-    env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
-    pmccntr_op_finish(env);
-}
-
-static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
-                            uint64_t value)
-{
-    pmccntr_op_start(env);
-    /* M is not accessible from AArch32 */
-    env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
-        (value & PMCCFILTR);
-    pmccntr_op_finish(env);
-}
-
-static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
-{
-    /* M is not visible in AArch32 */
-    return env->cp15.pmccfiltr_el0 & PMCCFILTR;
-}
-
-static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
-                            uint64_t value)
-{
-    pmu_op_start(env);
-    value &= pmu_counter_mask(env);
-    env->cp15.c9_pmcnten |= value;
-    pmu_op_finish(env);
-}
-
-static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
-                             uint64_t value)
-{
-    pmu_op_start(env);
-    value &= pmu_counter_mask(env);
-    env->cp15.c9_pmcnten &= ~value;
-    pmu_op_finish(env);
-}
-
-static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
-                         uint64_t value)
-{
-    value &= pmu_counter_mask(env);
-    env->cp15.c9_pmovsr &= ~value;
-    pmu_update_irq(env);
-}
-
-static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
-                         uint64_t value)
-{
-    value &= pmu_counter_mask(env);
-    env->cp15.c9_pmovsr |= value;
-    pmu_update_irq(env);
-}
-
-static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
-                             uint64_t value, const uint8_t counter)
-{
-    if (counter == 31) {
-        pmccfiltr_write(env, ri, value);
-    } else if (counter < pmu_num_counters(env)) {
-        pmevcntr_op_start(env, counter);
-
-        /*
-         * If this counter's event type is changing, store the current
-         * underlying count for the new type in c14_pmevcntr_delta[counter] so
-         * pmevcntr_op_finish has the correct baseline when it converts back to
-         * a delta.
-         */
-        uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
-            PMXEVTYPER_EVTCOUNT;
-        uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
-        if (old_event != new_event) {
-            uint64_t count = 0;
-            if (event_supported(new_event)) {
-                uint16_t event_idx = supported_event_map[new_event];
-                count = pm_events[event_idx].get_count(env);
-            }
-            env->cp15.c14_pmevcntr_delta[counter] = count;
-        }
-
-        env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
-        pmevcntr_op_finish(env, counter);
-    }
-    /*
-     * Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
-     * PMSELR value is equal to or greater than the number of implemented
-     * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
-     */
-}
-
-static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
-                               const uint8_t counter)
-{
-    if (counter == 31) {
-        return env->cp15.pmccfiltr_el0;
-    } else if (counter < pmu_num_counters(env)) {
-        return env->cp15.c14_pmevtyper[counter];
-    } else {
-      /*
-       * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
-       * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
-       */
-        return 0;
-    }
-}
-
-static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
-                              uint64_t value)
-{
-    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
-    pmevtyper_write(env, ri, value, counter);
-}
-
-static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
-                               uint64_t value)
-{
-    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
-    env->cp15.c14_pmevtyper[counter] = value;
-
-    /*
-     * pmevtyper_rawwrite is called between a pair of pmu_op_start and
-     * pmu_op_finish calls when loading saved state for a migration. Because
-     * we're potentially updating the type of event here, the value written to
-     * c14_pmevcntr_delta by the preceding pmu_op_start call may be for a
-     * different counter type. Therefore, we need to set this value to the
-     * current count for the counter type we're writing so that pmu_op_finish
-     * has the correct count for its calculation.
-     */
-    uint16_t event = value & PMXEVTYPER_EVTCOUNT;
-    if (event_supported(event)) {
-        uint16_t event_idx = supported_event_map[event];
-        env->cp15.c14_pmevcntr_delta[counter] =
-            pm_events[event_idx].get_count(env);
-    }
-}
-
-static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
-{
-    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
-    return pmevtyper_read(env, ri, counter);
-}
-
-static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
-                             uint64_t value)
-{
-    pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
-}
-
-static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
-    return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
-}
-
-static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
-                             uint64_t value, uint8_t counter)
-{
-    if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
-        /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
-        value &= MAKE_64BIT_MASK(0, 32);
-    }
-    if (counter < pmu_num_counters(env)) {
-        pmevcntr_op_start(env, counter);
-        env->cp15.c14_pmevcntr[counter] = value;
-        pmevcntr_op_finish(env, counter);
-    }
-    /*
-     * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
-     * are CONSTRAINED UNPREDICTABLE.
-     */
-}
-
-static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
-                              uint8_t counter)
-{
-    if (counter < pmu_num_counters(env)) {
-        uint64_t ret;
-        pmevcntr_op_start(env, counter);
-        ret = env->cp15.c14_pmevcntr[counter];
-        pmevcntr_op_finish(env, counter);
-        if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
-            /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
-            ret &= MAKE_64BIT_MASK(0, 32);
-        }
-        return ret;
-    } else {
-      /*
-       * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
-       * are CONSTRAINED UNPREDICTABLE.
-       */
-        return 0;
-    }
-}
-
-static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
-                             uint64_t value)
-{
-    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
-    pmevcntr_write(env, ri, value, counter);
-}
-
-static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
-{
-    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
-    return pmevcntr_read(env, ri, counter);
-}
-
-static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
-                             uint64_t value)
-{
-    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
-    assert(counter < pmu_num_counters(env));
-    env->cp15.c14_pmevcntr[counter] = value;
-    pmevcntr_write(env, ri, value, counter);
-}
-
-static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
-{
-    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
-    assert(counter < pmu_num_counters(env));
-    return env->cp15.c14_pmevcntr[counter];
-}
-
-static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
-                             uint64_t value)
-{
-    pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
-}
-
-static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
-    return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
-}
-
-static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
-                            uint64_t value)
-{
-    if (arm_feature(env, ARM_FEATURE_V8)) {
-        env->cp15.c9_pmuserenr = value & 0xf;
-    } else {
-        env->cp15.c9_pmuserenr = value & 1;
-    }
-}
-
-static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
-                             uint64_t value)
-{
-    /* We have no event counters so only the C bit can be changed */
-    value &= pmu_counter_mask(env);
-    env->cp15.c9_pminten |= value;
-    pmu_update_irq(env);
-}
-
-static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
-                             uint64_t value)
-{
-    value &= pmu_counter_mask(env);
-    env->cp15.c9_pminten &= ~value;
-    pmu_update_irq(env);
-}
-
 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
                        uint64_t value)
 {
@@ -1874,171 +898,6 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
     /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
     { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
       .access = PL1_W, .type = ARM_CP_NOP },
-    /*
-     * Performance monitors are implementation defined in v7,
-     * but with an ARM recommended set of registers, which we
-     * follow.
-     *
-     * Performance registers fall into three categories:
-     *  (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
-     *  (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
-     *  (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
-     * For the cases controlled by PMUSERENR we must set .access to PL0_RW
-     * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
-     */
-    { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
-      .access = PL0_RW, .type = ARM_CP_ALIAS | ARM_CP_IO,
-      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
-      .writefn = pmcntenset_write,
-      .accessfn = pmreg_access,
-      .fgt = FGT_PMCNTEN,
-      .raw_writefn = raw_write },
-    { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, .type = ARM_CP_IO,
-      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
-      .access = PL0_RW, .accessfn = pmreg_access,
-      .fgt = FGT_PMCNTEN,
-      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
-      .writefn = pmcntenset_write, .raw_writefn = raw_write },
-    { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
-      .access = PL0_RW,
-      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
-      .accessfn = pmreg_access,
-      .fgt = FGT_PMCNTEN,
-      .writefn = pmcntenclr_write, .raw_writefn = raw_write,
-      .type = ARM_CP_ALIAS | ARM_CP_IO },
-    { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
-      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
-      .access = PL0_RW, .accessfn = pmreg_access,
-      .fgt = FGT_PMCNTEN,
-      .type = ARM_CP_ALIAS | ARM_CP_IO,
-      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
-      .writefn = pmcntenclr_write, .raw_writefn = raw_write },
-    { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
-      .access = PL0_RW, .type = ARM_CP_IO,
-      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
-      .accessfn = pmreg_access,
-      .fgt = FGT_PMOVS,
-      .writefn = pmovsr_write,
-      .raw_writefn = raw_write },
-    { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
-      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
-      .access = PL0_RW, .accessfn = pmreg_access,
-      .fgt = FGT_PMOVS,
-      .type = ARM_CP_ALIAS | ARM_CP_IO,
-      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
-      .writefn = pmovsr_write,
-      .raw_writefn = raw_write },
-    { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
-      .access = PL0_W, .accessfn = pmreg_access_swinc,
-      .fgt = FGT_PMSWINC_EL0,
-      .type = ARM_CP_NO_RAW | ARM_CP_IO,
-      .writefn = pmswinc_write },
-    { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
-      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
-      .access = PL0_W, .accessfn = pmreg_access_swinc,
-      .fgt = FGT_PMSWINC_EL0,
-      .type = ARM_CP_NO_RAW | ARM_CP_IO,
-      .writefn = pmswinc_write },
-    { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
-      .access = PL0_RW, .type = ARM_CP_ALIAS,
-      .fgt = FGT_PMSELR_EL0,
-      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
-      .accessfn = pmreg_access_selr, .writefn = pmselr_write,
-      .raw_writefn = raw_write},
-    { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
-      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
-      .access = PL0_RW, .accessfn = pmreg_access_selr,
-      .fgt = FGT_PMSELR_EL0,
-      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
-      .writefn = pmselr_write, .raw_writefn = raw_write, },
-    { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
-      .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
-      .fgt = FGT_PMCCNTR_EL0,
-      .readfn = pmccntr_read, .writefn = pmccntr_write32,
-      .accessfn = pmreg_access_ccntr },
-    { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
-      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
-      .access = PL0_RW, .accessfn = pmreg_access_ccntr,
-      .fgt = FGT_PMCCNTR_EL0,
-      .type = ARM_CP_IO,
-      .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
-      .readfn = pmccntr_read, .writefn = pmccntr_write,
-      .raw_readfn = raw_read, .raw_writefn = raw_write, },
-    { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
-      .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
-      .access = PL0_RW, .accessfn = pmreg_access,
-      .fgt = FGT_PMCCFILTR_EL0,
-      .type = ARM_CP_ALIAS | ARM_CP_IO,
-      .resetvalue = 0, },
-    { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
-      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
-      .writefn = pmccfiltr_write, .raw_writefn = raw_write,
-      .access = PL0_RW, .accessfn = pmreg_access,
-      .fgt = FGT_PMCCFILTR_EL0,
-      .type = ARM_CP_IO,
-      .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
-      .resetvalue = 0, },
-    { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
-      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
-      .accessfn = pmreg_access,
-      .fgt = FGT_PMEVTYPERN_EL0,
-      .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
-    { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
-      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
-      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
-      .accessfn = pmreg_access,
-      .fgt = FGT_PMEVTYPERN_EL0,
-      .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
-    { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
-      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
-      .accessfn = pmreg_access_xevcntr,
-      .fgt = FGT_PMEVCNTRN_EL0,
-      .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
-    { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
-      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
-      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
-      .accessfn = pmreg_access_xevcntr,
-      .fgt = FGT_PMEVCNTRN_EL0,
-      .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
-    { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
-      .access = PL0_R | PL1_RW, .accessfn = access_tpm,
-      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
-      .resetvalue = 0,
-      .writefn = pmuserenr_write, .raw_writefn = raw_write },
-    { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
-      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
-      .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
-      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
-      .resetvalue = 0,
-      .writefn = pmuserenr_write, .raw_writefn = raw_write },
-    { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
-      .access = PL1_RW, .accessfn = access_tpm,
-      .fgt = FGT_PMINTEN,
-      .type = ARM_CP_ALIAS | ARM_CP_IO,
-      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
-      .resetvalue = 0,
-      .writefn = pmintenset_write, .raw_writefn = raw_write },
-    { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
-      .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
-      .access = PL1_RW, .accessfn = access_tpm,
-      .fgt = FGT_PMINTEN,
-      .type = ARM_CP_IO,
-      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
-      .writefn = pmintenset_write, .raw_writefn = raw_write,
-      .resetvalue = 0x0 },
-    { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
-      .access = PL1_RW, .accessfn = access_tpm,
-      .fgt = FGT_PMINTEN,
-      .type = ARM_CP_ALIAS | ARM_CP_IO,
-      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
-      .writefn = pmintenclr_write, .raw_writefn = raw_write },
-    { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
-      .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
-      .access = PL1_RW, .accessfn = access_tpm,
-      .fgt = FGT_PMINTEN,
-      .type = ARM_CP_ALIAS | ARM_CP_IO,
-      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
-      .writefn = pmintenclr_write, .raw_writefn = raw_write },
     { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
       .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
       .access = PL1_R,
@@ -2121,25 +980,6 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
       .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
 };
 
-static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
-    /* PMOVSSET is not implemented in v7 before v7ve */
-    { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
-      .access = PL0_RW, .accessfn = pmreg_access,
-      .fgt = FGT_PMOVS,
-      .type = ARM_CP_ALIAS | ARM_CP_IO,
-      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
-      .writefn = pmovsset_write,
-      .raw_writefn = raw_write },
-    { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
-      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
-      .access = PL0_RW, .accessfn = pmreg_access,
-      .fgt = FGT_PMOVS,
-      .type = ARM_CP_ALIAS | ARM_CP_IO,
-      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
-      .writefn = pmovsset_write,
-      .raw_writefn = raw_write },
-};
-
 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
                         uint64_t value)
 {
@@ -3448,402 +2288,6 @@ static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
     }
 }
 
-#ifndef CONFIG_USER_ONLY
-/* get_phys_addr() isn't present for user-mode-only targets */
-
-static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
-                                 bool isread)
-{
-    if (ri->opc2 & 4) {
-        /*
-         * The ATS12NSO* operations must trap to EL3 or EL2 if executed in
-         * Secure EL1 (which can only happen if EL3 is AArch64).
-         * They are simply UNDEF if executed from NS EL1.
-         * They function normally from EL2 or EL3.
-         */
-        if (arm_current_el(env) == 1) {
-            if (arm_is_secure_below_el3(env)) {
-                if (env->cp15.scr_el3 & SCR_EEL2) {
-                    return CP_ACCESS_TRAP_EL2;
-                }
-                return CP_ACCESS_TRAP_EL3;
-            }
-            return CP_ACCESS_UNDEFINED;
-        }
-    }
-    return CP_ACCESS_OK;
-}
-
-#ifdef CONFIG_TCG
-static int par_el1_shareability(GetPhysAddrResult *res)
-{
-    /*
-     * The PAR_EL1.SH field must be 0b10 for Device or Normal-NC
-     * memory -- see pseudocode PAREncodeShareability().
-     */
-    if (((res->cacheattrs.attrs & 0xf0) == 0) ||
-        res->cacheattrs.attrs == 0x44 || res->cacheattrs.attrs == 0x40) {
-        return 2;
-    }
-    return res->cacheattrs.shareability;
-}
-
-static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
-                             MMUAccessType access_type, ARMMMUIdx mmu_idx,
-                             ARMSecuritySpace ss)
-{
-    bool ret;
-    uint64_t par64;
-    bool format64 = false;
-    ARMMMUFaultInfo fi = {};
-    GetPhysAddrResult res = {};
-
-    /*
-     * I_MXTJT: Granule protection checks are not performed on the final
-     * address of a successful translation.  This is a translation not a
-     * memory reference, so "memop = none = 0".
-     */
-    ret = get_phys_addr_with_space_nogpc(env, value, access_type, 0,
-                                         mmu_idx, ss, &res, &fi);
-
-    /*
-     * ATS operations only do S1 or S1+S2 translations, so we never
-     * have to deal with the ARMCacheAttrs format for S2 only.
-     */
-    assert(!res.cacheattrs.is_s2_format);
-
-    if (ret) {
-        /*
-         * Some kinds of translation fault must cause exceptions rather
-         * than being reported in the PAR.
-         */
-        int current_el = arm_current_el(env);
-        int target_el;
-        uint32_t syn, fsr, fsc;
-        bool take_exc = false;
-
-        if (fi.s1ptw && current_el == 1
-            && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
-            /*
-             * Synchronous stage 2 fault on an access made as part of the
-             * translation table walk for AT S1E0* or AT S1E1* insn
-             * executed from NS EL1. If this is a synchronous external abort
-             * and SCR_EL3.EA == 1, then we take a synchronous external abort
-             * to EL3. Otherwise the fault is taken as an exception to EL2,
-             * and HPFAR_EL2 holds the faulting IPA.
-             */
-            if (fi.type == ARMFault_SyncExternalOnWalk &&
-                (env->cp15.scr_el3 & SCR_EA)) {
-                target_el = 3;
-            } else {
-                env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
-                if (arm_is_secure_below_el3(env) && fi.s1ns) {
-                    env->cp15.hpfar_el2 |= HPFAR_NS;
-                }
-                target_el = 2;
-            }
-            take_exc = true;
-        } else if (fi.type == ARMFault_SyncExternalOnWalk) {
-            /*
-             * Synchronous external aborts during a translation table walk
-             * are taken as Data Abort exceptions.
-             */
-            if (fi.stage2) {
-                if (current_el == 3) {
-                    target_el = 3;
-                } else {
-                    target_el = 2;
-                }
-            } else {
-                target_el = exception_target_el(env);
-            }
-            take_exc = true;
-        }
-
-        if (take_exc) {
-            /* Construct FSR and FSC using same logic as arm_deliver_fault() */
-            if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
-                arm_s1_regime_using_lpae_format(env, mmu_idx)) {
-                fsr = arm_fi_to_lfsc(&fi);
-                fsc = extract32(fsr, 0, 6);
-            } else {
-                fsr = arm_fi_to_sfsc(&fi);
-                fsc = 0x3f;
-            }
-            /*
-             * Report exception with ESR indicating a fault due to a
-             * translation table walk for a cache maintenance instruction.
-             */
-            syn = syn_data_abort_no_iss(current_el == target_el, 0,
-                                        fi.ea, 1, fi.s1ptw, 1, fsc);
-            env->exception.vaddress = value;
-            env->exception.fsr = fsr;
-            raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
-        }
-    }
-
-    if (is_a64(env)) {
-        format64 = true;
-    } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
-        /*
-         * ATS1Cxx:
-         * * TTBCR.EAE determines whether the result is returned using the
-         *   32-bit or the 64-bit PAR format
-         * * Instructions executed in Hyp mode always use the 64bit format
-         *
-         * ATS1S2NSOxx uses the 64bit format if any of the following is true:
-         * * The Non-secure TTBCR.EAE bit is set to 1
-         * * The implementation includes EL2, and the value of HCR.VM is 1
-         *
-         * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
-         *
-         * ATS1Hx always uses the 64bit format.
-         */
-        format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
-
-        if (arm_feature(env, ARM_FEATURE_EL2)) {
-            if (mmu_idx == ARMMMUIdx_E10_0 ||
-                mmu_idx == ARMMMUIdx_E10_1 ||
-                mmu_idx == ARMMMUIdx_E10_1_PAN) {
-                format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
-            } else {
-                format64 |= arm_current_el(env) == 2;
-            }
-        }
-    }
-
-    if (format64) {
-        /* Create a 64-bit PAR */
-        par64 = (1 << 11); /* LPAE bit always set */
-        if (!ret) {
-            par64 |= res.f.phys_addr & ~0xfffULL;
-            if (!res.f.attrs.secure) {
-                par64 |= (1 << 9); /* NS */
-            }
-            par64 |= (uint64_t)res.cacheattrs.attrs << 56; /* ATTR */
-            par64 |= par_el1_shareability(&res) << 7; /* SH */
-        } else {
-            uint32_t fsr = arm_fi_to_lfsc(&fi);
-
-            par64 |= 1; /* F */
-            par64 |= (fsr & 0x3f) << 1; /* FS */
-            if (fi.stage2) {
-                par64 |= (1 << 9); /* S */
-            }
-            if (fi.s1ptw) {
-                par64 |= (1 << 8); /* PTW */
-            }
-        }
-    } else {
-        /*
-         * fsr is a DFSR/IFSR value for the short descriptor
-         * translation table format (with WnR always clear).
-         * Convert it to a 32-bit PAR.
-         */
-        if (!ret) {
-            /* We do not set any attribute bits in the PAR */
-            if (res.f.lg_page_size == 24
-                && arm_feature(env, ARM_FEATURE_V7)) {
-                par64 = (res.f.phys_addr & 0xff000000) | (1 << 1);
-            } else {
-                par64 = res.f.phys_addr & 0xfffff000;
-            }
-            if (!res.f.attrs.secure) {
-                par64 |= (1 << 9); /* NS */
-            }
-        } else {
-            uint32_t fsr = arm_fi_to_sfsc(&fi);
-
-            par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
-                    ((fsr & 0xf) << 1) | 1;
-        }
-    }
-    return par64;
-}
-#endif /* CONFIG_TCG */
-
-static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
-{
-#ifdef CONFIG_TCG
-    MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
-    uint64_t par64;
-    ARMMMUIdx mmu_idx;
-    int el = arm_current_el(env);
-    ARMSecuritySpace ss = arm_security_space(env);
-
-    switch (ri->opc2 & 6) {
-    case 0:
-        /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
-        switch (el) {
-        case 3:
-            if (ri->crm == 9 && arm_pan_enabled(env)) {
-                mmu_idx = ARMMMUIdx_E30_3_PAN;
-            } else {
-                mmu_idx = ARMMMUIdx_E3;
-            }
-            break;
-        case 2:
-            g_assert(ss != ARMSS_Secure);  /* ARMv8.4-SecEL2 is 64-bit only */
-            /* fall through */
-        case 1:
-            if (ri->crm == 9 && arm_pan_enabled(env)) {
-                mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
-            } else {
-                mmu_idx = ARMMMUIdx_Stage1_E1;
-            }
-            break;
-        default:
-            g_assert_not_reached();
-        }
-        break;
-    case 2:
-        /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
-        switch (el) {
-        case 3:
-            mmu_idx = ARMMMUIdx_E30_0;
-            break;
-        case 2:
-            g_assert(ss != ARMSS_Secure);  /* ARMv8.4-SecEL2 is 64-bit only */
-            mmu_idx = ARMMMUIdx_Stage1_E0;
-            break;
-        case 1:
-            mmu_idx = ARMMMUIdx_Stage1_E0;
-            break;
-        default:
-            g_assert_not_reached();
-        }
-        break;
-    case 4:
-        /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
-        mmu_idx = ARMMMUIdx_E10_1;
-        ss = ARMSS_NonSecure;
-        break;
-    case 6:
-        /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
-        mmu_idx = ARMMMUIdx_E10_0;
-        ss = ARMSS_NonSecure;
-        break;
-    default:
-        g_assert_not_reached();
-    }
-
-    par64 = do_ats_write(env, value, access_type, mmu_idx, ss);
-
-    A32_BANKED_CURRENT_REG_SET(env, par, par64);
-#else
-    /* Handled by hardware accelerator. */
-    g_assert_not_reached();
-#endif /* CONFIG_TCG */
-}
-
-static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
-                        uint64_t value)
-{
-#ifdef CONFIG_TCG
-    MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
-    uint64_t par64;
-
-    /* There is no SecureEL2 for AArch32. */
-    par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2,
-                         ARMSS_NonSecure);
-
-    A32_BANKED_CURRENT_REG_SET(env, par, par64);
-#else
-    /* Handled by hardware accelerator. */
-    g_assert_not_reached();
-#endif /* CONFIG_TCG */
-}
-
-static CPAccessResult at_e012_access(CPUARMState *env, const ARMCPRegInfo *ri,
-                                     bool isread)
-{
-    /*
-     * R_NYXTL: instruction is UNDEFINED if it applies to an Exception level
-     * lower than EL3 and the combination SCR_EL3.{NSE,NS} is reserved. This can
-     * only happen when executing at EL3 because that combination also causes an
-     * illegal exception return. We don't need to check FEAT_RME either, because
-     * scr_write() ensures that the NSE bit is not set otherwise.
-     */
-    if ((env->cp15.scr_el3 & (SCR_NSE | SCR_NS)) == SCR_NSE) {
-        return CP_ACCESS_UNDEFINED;
-    }
-    return CP_ACCESS_OK;
-}
-
-static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
-                                     bool isread)
-{
-    if (arm_current_el(env) == 3 &&
-        !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) {
-        return CP_ACCESS_UNDEFINED;
-    }
-    return at_e012_access(env, ri, isread);
-}
-
-static CPAccessResult at_s1e01_access(CPUARMState *env, const ARMCPRegInfo *ri,
-                                      bool isread)
-{
-    if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_AT)) {
-        return CP_ACCESS_TRAP_EL2;
-    }
-    return at_e012_access(env, ri, isread);
-}
-
-static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
-                        uint64_t value)
-{
-#ifdef CONFIG_TCG
-    MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
-    ARMMMUIdx mmu_idx;
-    uint64_t hcr_el2 = arm_hcr_el2_eff(env);
-    bool regime_e20 = (hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE);
-    bool for_el3 = false;
-    ARMSecuritySpace ss;
-
-    switch (ri->opc2 & 6) {
-    case 0:
-        switch (ri->opc1) {
-        case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
-            if (ri->crm == 9 && arm_pan_enabled(env)) {
-                mmu_idx = regime_e20 ?
-                          ARMMMUIdx_E20_2_PAN : ARMMMUIdx_Stage1_E1_PAN;
-            } else {
-                mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_Stage1_E1;
-            }
-            break;
-        case 4: /* AT S1E2R, AT S1E2W */
-            mmu_idx = hcr_el2 & HCR_E2H ? ARMMMUIdx_E20_2 : ARMMMUIdx_E2;
-            break;
-        case 6: /* AT S1E3R, AT S1E3W */
-            mmu_idx = ARMMMUIdx_E3;
-            for_el3 = true;
-            break;
-        default:
-            g_assert_not_reached();
-        }
-        break;
-    case 2: /* AT S1E0R, AT S1E0W */
-        mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_Stage1_E0;
-        break;
-    case 4: /* AT S12E1R, AT S12E1W */
-        mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_E10_1;
-        break;
-    case 6: /* AT S12E0R, AT S12E0W */
-        mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_E10_0;
-        break;
-    default:
-        g_assert_not_reached();
-    }
-
-    ss = for_el3 ? arm_security_space(env) : arm_security_space_below_el3(env);
-    env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx, ss);
-#else
-    /* Handled by hardware accelerator. */
-    g_assert_not_reached();
-#endif /* CONFIG_TCG */
-}
-#endif
-
 /* Return basic MPU access permission bits.  */
 static uint32_t simple_mpu_ap_bits(uint32_t val)
 {
@@ -5094,53 +3538,6 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
       .fgt = FGT_DCCISW,
       .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
-#ifndef CONFIG_USER_ONLY
-    /* 64 bit address translation operations */
-    { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
-      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
-      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
-      .fgt = FGT_ATS1E1R,
-      .accessfn = at_s1e01_access, .writefn = ats_write64 },
-    { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
-      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
-      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
-      .fgt = FGT_ATS1E1W,
-      .accessfn = at_s1e01_access, .writefn = ats_write64 },
-    { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
-      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
-      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
-      .fgt = FGT_ATS1E0R,
-      .accessfn = at_s1e01_access, .writefn = ats_write64 },
-    { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
-      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
-      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
-      .fgt = FGT_ATS1E0W,
-      .accessfn = at_s1e01_access, .writefn = ats_write64 },
-    { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
-      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
-      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
-      .accessfn = at_e012_access, .writefn = ats_write64 },
-    { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
-      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
-      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
-      .accessfn = at_e012_access, .writefn = ats_write64 },
-    { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
-      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
-      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
-      .accessfn = at_e012_access, .writefn = ats_write64 },
-    { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
-      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
-      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
-      .accessfn = at_e012_access, .writefn = ats_write64 },
-    /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
-    { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
-      .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
-      .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
-      .writefn = ats_write64 },
-    { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
-      .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
-      .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
-      .writefn = ats_write64 },
     { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
       .type = ARM_CP_ALIAS,
       .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
@@ -5148,7 +3545,6 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
       .fgt = FGT_PAR_EL1,
       .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
       .writefn = par_write },
-#endif
     /* 32 bit cache operations */
     { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_ticab },
@@ -5751,33 +4147,6 @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
 #ifndef CONFIG_USER_ONLY
-    /*
-     * Unlike the other EL2-related AT operations, these must
-     * UNDEF from EL3 if EL2 is not implemented, which is why we
-     * define them here rather than with the rest of the AT ops.
-     */
-    { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
-      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
-      .access = PL2_W, .accessfn = at_s1e2_access,
-      .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
-      .writefn = ats_write64 },
-    { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
-      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
-      .access = PL2_W, .accessfn = at_s1e2_access,
-      .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
-      .writefn = ats_write64 },
-    /*
-     * The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
-     * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
-     * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
-     * to behave as if SCR.NS was 1.
-     */
-    { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
-      .access = PL2_W,
-      .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
-    { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
-      .access = PL2_W,
-      .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
     { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
       /*
@@ -6827,105 +5196,6 @@ static const ARMCPRegInfo nmi_reginfo[] = {
       .resetfn = arm_cp_reset_ignore },
 };
 
-static void define_pmu_regs(ARMCPU *cpu)
-{
-    /*
-     * v7 performance monitor control register: same implementor
-     * field as main ID register, and we implement four counters in
-     * addition to the cycle count register.
-     */
-    unsigned int i, pmcrn = pmu_num_counters(&cpu->env);
-    ARMCPRegInfo pmcr = {
-        .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
-        .access = PL0_RW,
-        .fgt = FGT_PMCR_EL0,
-        .type = ARM_CP_IO | ARM_CP_ALIAS,
-        .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
-        .accessfn = pmreg_access,
-        .readfn = pmcr_read, .raw_readfn = raw_read,
-        .writefn = pmcr_write, .raw_writefn = raw_write,
-    };
-    ARMCPRegInfo pmcr64 = {
-        .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
-        .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
-        .access = PL0_RW, .accessfn = pmreg_access,
-        .fgt = FGT_PMCR_EL0,
-        .type = ARM_CP_IO,
-        .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
-        .resetvalue = cpu->isar.reset_pmcr_el0,
-        .readfn = pmcr_read, .raw_readfn = raw_read,
-        .writefn = pmcr_write, .raw_writefn = raw_write,
-    };
-
-    define_one_arm_cp_reg(cpu, &pmcr);
-    define_one_arm_cp_reg(cpu, &pmcr64);
-    for (i = 0; i < pmcrn; i++) {
-        char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
-        char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
-        char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
-        char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
-        ARMCPRegInfo pmev_regs[] = {
-            { .name = pmevcntr_name, .cp = 15, .crn = 14,
-              .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
-              .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
-              .fgt = FGT_PMEVCNTRN_EL0,
-              .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
-              .accessfn = pmreg_access_xevcntr },
-            { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
-              .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
-              .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access_xevcntr,
-              .type = ARM_CP_IO,
-              .fgt = FGT_PMEVCNTRN_EL0,
-              .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
-              .raw_readfn = pmevcntr_rawread,
-              .raw_writefn = pmevcntr_rawwrite },
-            { .name = pmevtyper_name, .cp = 15, .crn = 14,
-              .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
-              .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
-              .fgt = FGT_PMEVTYPERN_EL0,
-              .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
-              .accessfn = pmreg_access },
-            { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
-              .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
-              .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
-              .fgt = FGT_PMEVTYPERN_EL0,
-              .type = ARM_CP_IO,
-              .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
-              .raw_writefn = pmevtyper_rawwrite },
-        };
-        define_arm_cp_regs(cpu, pmev_regs);
-        g_free(pmevcntr_name);
-        g_free(pmevcntr_el0_name);
-        g_free(pmevtyper_name);
-        g_free(pmevtyper_el0_name);
-    }
-    if (cpu_isar_feature(aa32_pmuv3p1, cpu)) {
-        ARMCPRegInfo v81_pmu_regs[] = {
-            { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
-              .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
-              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
-              .fgt = FGT_PMCEIDN_EL0,
-              .resetvalue = extract64(cpu->pmceid0, 32, 32) },
-            { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
-              .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
-              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
-              .fgt = FGT_PMCEIDN_EL0,
-              .resetvalue = extract64(cpu->pmceid1, 32, 32) },
-        };
-        define_arm_cp_regs(cpu, v81_pmu_regs);
-    }
-    if (cpu_isar_feature(any_pmuv3p4, cpu)) {
-        static const ARMCPRegInfo v84_pmmir = {
-            .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH,
-            .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6,
-            .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
-            .fgt = FGT_PMMIR_EL1,
-            .resetvalue = 0
-        };
-        define_one_arm_cp_reg(cpu, &v84_pmmir);
-    }
-}
-
 #ifndef CONFIG_USER_ONLY
 /*
  * We don't know until after realize whether there's a GICv3
@@ -7704,32 +5974,6 @@ static const ARMCPRegInfo vhe_reginfo[] = {
 #endif
 };
 
-#ifndef CONFIG_USER_ONLY
-static const ARMCPRegInfo ats1e1_reginfo[] = {
-    { .name = "AT_S1E1RP", .state = ARM_CP_STATE_AA64,
-      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
-      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
-      .fgt = FGT_ATS1E1RP,
-      .accessfn = at_s1e01_access, .writefn = ats_write64 },
-    { .name = "AT_S1E1WP", .state = ARM_CP_STATE_AA64,
-      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
-      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
-      .fgt = FGT_ATS1E1WP,
-      .accessfn = at_s1e01_access, .writefn = ats_write64 },
-};
-
-static const ARMCPRegInfo ats1cp_reginfo[] = {
-    { .name = "ATS1CPRP",
-      .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
-      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
-      .writefn = ats_write },
-    { .name = "ATS1CPWP",
-      .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
-      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
-      .writefn = ats_write },
-};
-#endif
-
 /*
  * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
  * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
@@ -7771,7 +6015,10 @@ void register_cp_regs_for_features(ARMCPU *cpu)
     }
 
 #ifndef CONFIG_USER_ONLY
-    define_tlb_insn_regs(cpu);
+    if (tcg_enabled()) {
+        define_tlb_insn_regs(cpu);
+        define_at_insn_regs(cpu);
+    }
 #endif
 
     if (arm_feature(env, ARM_FEATURE_V6)) {
@@ -7809,7 +6056,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
               .access = PL1_R, .type = ARM_CP_CONST,
               .accessfn = access_aa32_tid3,
-              .resetvalue = cpu->id_afr0 },
+              .resetvalue = GET_IDREG(isar, ID_AFR0)},
             { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
               .access = PL1_R, .type = ARM_CP_CONST,
@@ -7879,9 +6126,6 @@ void register_cp_regs_for_features(ARMCPU *cpu)
     if (arm_feature(env, ARM_FEATURE_V6K)) {
         define_arm_cp_regs(cpu, v6k_cp_reginfo);
     }
-    if (arm_feature(env, ARM_FEATURE_V7VE)) {
-        define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
-    }
     if (arm_feature(env, ARM_FEATURE_V7)) {
         ARMCPRegInfo clidr = {
             .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
@@ -7889,12 +6133,11 @@ void register_cp_regs_for_features(ARMCPU *cpu)
             .access = PL1_R, .type = ARM_CP_CONST,
             .accessfn = access_tid4,
             .fgt = FGT_CLIDR_EL1,
-            .resetvalue = cpu->clidr
+            .resetvalue = GET_IDREG(isar, CLIDR)
         };
         define_one_arm_cp_reg(cpu, &clidr);
         define_arm_cp_regs(cpu, v7_cp_reginfo);
         define_debug_regs(cpu);
-        define_pmu_regs(cpu);
     } else {
         define_arm_cp_regs(cpu, not_v7_cp_reginfo);
     }
@@ -7987,12 +6230,12 @@ void register_cp_regs_for_features(ARMCPU *cpu)
               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
               .access = PL1_R, .type = ARM_CP_CONST,
               .accessfn = access_aa64_tid3,
-              .resetvalue = cpu->id_aa64afr0 },
+              .resetvalue = GET_IDREG(isar, ID_AA64AFR0) },
             { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
               .access = PL1_R, .type = ARM_CP_CONST,
               .accessfn = access_aa64_tid3,
-              .resetvalue = cpu->id_aa64afr1 },
+              .resetvalue = GET_IDREG(isar, ID_AA64AFR1) },
             { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
               .access = PL1_R, .type = ARM_CP_CONST,
@@ -8150,26 +6393,6 @@ void register_cp_regs_for_features(ARMCPU *cpu)
               .access = PL1_R, .type = ARM_CP_CONST,
               .accessfn = access_aa64_tid3,
               .resetvalue = 0 },
-            { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
-              .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
-              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
-              .fgt = FGT_PMCEIDN_EL0,
-              .resetvalue = extract64(cpu->pmceid0, 0, 32) },
-            { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
-              .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
-              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
-              .fgt = FGT_PMCEIDN_EL0,
-              .resetvalue = cpu->pmceid0 },
-            { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
-              .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
-              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
-              .fgt = FGT_PMCEIDN_EL0,
-              .resetvalue = extract64(cpu->pmceid1, 0, 32) },
-            { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
-              .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
-              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
-              .fgt = FGT_PMCEIDN_EL0,
-              .resetvalue = cpu->pmceid1 },
         };
 #ifdef CONFIG_USER_ONLY
         static const ARMCPRegUserSpaceInfo v8_user_idregs[] = {
@@ -8504,12 +6727,6 @@ void register_cp_regs_for_features(ARMCPU *cpu)
               .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
                                      offsetoflow32(CPUARMState, cp15.par_ns) },
               .writefn = par_write},
-#ifndef CONFIG_USER_ONLY
-            /* This underdecoding is safe because the reginfo is NO_RAW. */
-            { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
-              .access = PL1_W, .accessfn = ats_access,
-              .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
-#endif
         };
 
         /*
@@ -8915,14 +7132,6 @@ void register_cp_regs_for_features(ARMCPU *cpu)
     if (cpu_isar_feature(aa64_pan, cpu)) {
         define_one_arm_cp_reg(cpu, &pan_reginfo);
     }
-#ifndef CONFIG_USER_ONLY
-    if (cpu_isar_feature(aa64_ats1e1, cpu)) {
-        define_arm_cp_regs(cpu, ats1e1_reginfo);
-    }
-    if (cpu_isar_feature(aa32_ats1e1, cpu)) {
-        define_arm_cp_regs(cpu, ats1cp_reginfo);
-    }
-#endif
     if (cpu_isar_feature(aa64_uao, cpu)) {
         define_one_arm_cp_reg(cpu, &uao_reginfo);
     }
@@ -9022,6 +7231,8 @@ void register_cp_regs_for_features(ARMCPU *cpu)
         define_arm_cp_regs(cpu, ccsidr2_reginfo);
     }
 
+    define_pm_cpregs(cpu);
+
 #ifndef CONFIG_USER_ONLY
     /*
      * Register redirections and aliases must be done last,
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 21a8d67edd..c4765e4489 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -1871,6 +1871,10 @@ void define_debug_regs(ARMCPU *cpu);
 
 /* Add the cpreg definitions for TLBI instructions */
 void define_tlb_insn_regs(ARMCPU *cpu);
+/* Add the cpreg definitions for AT instructions */
+void define_at_insn_regs(ARMCPU *cpu);
+/* Add the cpreg definitions for PM cpregs */
+void define_pm_cpregs(ARMCPU *cpu);
 
 /* Effective value of MDCR_EL2 */
 static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env)
@@ -1981,5 +1985,6 @@ void vfp_clear_float_status_exc_flags(CPUARMState *env);
  * specified by mask changing to the values in val.
  */
 void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask);
+bool arm_pan_enabled(CPUARMState *env);
 
 #endif
diff --git a/target/arm/kvm-stub.c b/target/arm/kvm-stub.c
index 34e57fab01..c93462c5b9 100644
--- a/target/arm/kvm-stub.c
+++ b/target/arm/kvm-stub.c
@@ -47,6 +47,11 @@ bool kvm_arm_mte_supported(void)
     return false;
 }
 
+bool kvm_arm_el2_supported(void)
+{
+    return false;
+}
+
 /*
  * These functions should never actually be called without KVM support.
  */
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
index 426f8b159e..6672344855 100644
--- a/target/arm/kvm.c
+++ b/target/arm/kvm.c
@@ -230,7 +230,8 @@ static uint64_t idregs_sysreg_to_kvm_reg(ARMSysRegs sysreg)
 }
 
 /* read a sysreg value and store it in the idregs */
-static int get_host_cpu_reg(int fd, ARMHostCPUFeatures *ahcf, ARMIDRegisterIdx index)
+static int get_host_cpu_reg(int fd, ARMHostCPUFeatures *ahcf,
+                            ARMIDRegisterIdx index)
 {
     uint64_t *reg;
     int ret;
@@ -250,6 +251,7 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
      */
     int fdarray[3];
     bool sve_supported;
+    bool el2_supported;
     bool pmu_supported = false;
     uint64_t features = 0;
     int err;
@@ -270,6 +272,14 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
     }
 
     /*
+     * Ask for EL2 if supported.
+     */
+    el2_supported = kvm_arm_el2_supported();
+    if (el2_supported) {
+        init.features[0] |= 1 << KVM_ARM_VCPU_HAS_EL2;
+    }
+
+    /*
      * Ask for Pointer Authentication if supported, so that we get
      * the unsanitized field values for AA64ISAR1_EL1.
      */
@@ -422,6 +432,10 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
     features |= 1ULL << ARM_FEATURE_AARCH64;
     features |= 1ULL << ARM_FEATURE_GENERIC_TIMER;
 
+    if (el2_supported) {
+        features |= 1ULL << ARM_FEATURE_EL2;
+    }
+
     ahcf->features = features;
 
     return true;
@@ -1762,6 +1776,11 @@ bool kvm_arm_aarch32_supported(void)
     return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL1_32BIT);
 }
 
+bool kvm_arm_el2_supported(void)
+{
+    return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL2);
+}
+
 bool kvm_arm_sve_supported(void)
 {
     return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE);
@@ -1882,6 +1901,9 @@ int kvm_arch_init_vcpu(CPUState *cs)
         cpu->kvm_init_features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS |
                                       1 << KVM_ARM_VCPU_PTRAUTH_GENERIC);
     }
+    if (cpu->has_el2 && kvm_arm_el2_supported()) {
+        cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_HAS_EL2;
+    }
 
     /* Do KVM_ARM_VCPU_INIT ioctl */
     ret = kvm_arm_vcpu_init(cpu);
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
index 7dc83caed5..b4cad05155 100644
--- a/target/arm/kvm_arm.h
+++ b/target/arm/kvm_arm.h
@@ -192,6 +192,13 @@ bool kvm_arm_sve_supported(void);
 bool kvm_arm_mte_supported(void);
 
 /**
+ * kvm_arm_el2_supported:
+ *
+ * Returns true if KVM can enable EL2 and false otherwise.
+ */
+bool kvm_arm_el2_supported(void);
+
+/**
  * kvm_arm_get_max_vm_ipa_size:
  * @ms: Machine state handle
  * @fixed_ipa: True when the IPA limit is fixed at 40. This is the case
diff --git a/target/arm/meson.build b/target/arm/meson.build
index 7aa81e30ab..07d9271aa4 100644
--- a/target/arm/meson.build
+++ b/target/arm/meson.build
@@ -22,6 +22,7 @@ arm_user_ss.add(when: 'TARGET_AARCH64', if_false: files(
   'cpu32-stubs.c',
 ))
 arm_user_ss.add(files(
+  'cpregs-pmu.c',
   'debug_helper.c',
   'helper.c',
   'vfp_fpscr.c',
@@ -36,6 +37,7 @@ arm_common_system_ss.add(files(
   'arch_dump.c',
   'arm-powerctl.c',
   'cortex-regs.c',
+  'cpregs-pmu.c',
   'debug_helper.c',
   'helper.c',
   'machine.c',
diff --git a/target/arm/tcg-stubs.c b/target/arm/tcg-stubs.c
index 5e5166c049..aac99b2672 100644
--- a/target/arm/tcg-stubs.c
+++ b/target/arm/tcg-stubs.c
@@ -22,11 +22,6 @@ void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
     g_assert_not_reached();
 }
 
-/* TLBI insns are only used by TCG, so we don't need to do anything for KVM */
-void define_tlb_insn_regs(ARMCPU *cpu)
-{
-}
-
 /* With KVM, we never use float_status, so these can be no-ops */
 void arm_set_default_fp_behaviours(float_status *s)
 {
diff --git a/target/arm/tcg/cpregs-at.c b/target/arm/tcg/cpregs-at.c
new file mode 100644
index 0000000000..398a61d398
--- /dev/null
+++ b/target/arm/tcg/cpregs-at.c
@@ -0,0 +1,519 @@
+/*
+ * System instructions for address translation
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "cpu-features.h"
+#include "internals.h"
+#include "cpregs.h"
+
+
+static int par_el1_shareability(GetPhysAddrResult *res)
+{
+    /*
+     * The PAR_EL1.SH field must be 0b10 for Device or Normal-NC
+     * memory -- see pseudocode PAREncodeShareability().
+     */
+    if (((res->cacheattrs.attrs & 0xf0) == 0) ||
+        res->cacheattrs.attrs == 0x44 || res->cacheattrs.attrs == 0x40) {
+        return 2;
+    }
+    return res->cacheattrs.shareability;
+}
+
+static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
+                             MMUAccessType access_type, ARMMMUIdx mmu_idx,
+                             ARMSecuritySpace ss)
+{
+    bool ret;
+    uint64_t par64;
+    bool format64 = false;
+    ARMMMUFaultInfo fi = {};
+    GetPhysAddrResult res = {};
+
+    /*
+     * I_MXTJT: Granule protection checks are not performed on the final
+     * address of a successful translation.  This is a translation not a
+     * memory reference, so "memop = none = 0".
+     */
+    ret = get_phys_addr_with_space_nogpc(env, value, access_type, 0,
+                                         mmu_idx, ss, &res, &fi);
+
+    /*
+     * ATS operations only do S1 or S1+S2 translations, so we never
+     * have to deal with the ARMCacheAttrs format for S2 only.
+     */
+    assert(!res.cacheattrs.is_s2_format);
+
+    if (ret) {
+        /*
+         * Some kinds of translation fault must cause exceptions rather
+         * than being reported in the PAR.
+         */
+        int current_el = arm_current_el(env);
+        int target_el;
+        uint32_t syn, fsr, fsc;
+        bool take_exc = false;
+
+        if (fi.s1ptw && current_el == 1
+            && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
+            /*
+             * Synchronous stage 2 fault on an access made as part of the
+             * translation table walk for AT S1E0* or AT S1E1* insn
+             * executed from NS EL1. If this is a synchronous external abort
+             * and SCR_EL3.EA == 1, then we take a synchronous external abort
+             * to EL3. Otherwise the fault is taken as an exception to EL2,
+             * and HPFAR_EL2 holds the faulting IPA.
+             */
+            if (fi.type == ARMFault_SyncExternalOnWalk &&
+                (env->cp15.scr_el3 & SCR_EA)) {
+                target_el = 3;
+            } else {
+                env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
+                if (arm_is_secure_below_el3(env) && fi.s1ns) {
+                    env->cp15.hpfar_el2 |= HPFAR_NS;
+                }
+                target_el = 2;
+            }
+            take_exc = true;
+        } else if (fi.type == ARMFault_SyncExternalOnWalk) {
+            /*
+             * Synchronous external aborts during a translation table walk
+             * are taken as Data Abort exceptions.
+             */
+            if (fi.stage2) {
+                if (current_el == 3) {
+                    target_el = 3;
+                } else {
+                    target_el = 2;
+                }
+            } else {
+                target_el = exception_target_el(env);
+            }
+            take_exc = true;
+        }
+
+        if (take_exc) {
+            /* Construct FSR and FSC using same logic as arm_deliver_fault() */
+            if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
+                arm_s1_regime_using_lpae_format(env, mmu_idx)) {
+                fsr = arm_fi_to_lfsc(&fi);
+                fsc = extract32(fsr, 0, 6);
+            } else {
+                fsr = arm_fi_to_sfsc(&fi);
+                fsc = 0x3f;
+            }
+            /*
+             * Report exception with ESR indicating a fault due to a
+             * translation table walk for a cache maintenance instruction.
+             */
+            syn = syn_data_abort_no_iss(current_el == target_el, 0,
+                                        fi.ea, 1, fi.s1ptw, 1, fsc);
+            env->exception.vaddress = value;
+            env->exception.fsr = fsr;
+            raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
+        }
+    }
+
+    if (is_a64(env)) {
+        format64 = true;
+    } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
+        /*
+         * ATS1Cxx:
+         * * TTBCR.EAE determines whether the result is returned using the
+         *   32-bit or the 64-bit PAR format
+         * * Instructions executed in Hyp mode always use the 64bit format
+         *
+         * ATS1S2NSOxx uses the 64bit format if any of the following is true:
+         * * The Non-secure TTBCR.EAE bit is set to 1
+         * * The implementation includes EL2, and the value of HCR.VM is 1
+         *
+         * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
+         *
+         * ATS1Hx always uses the 64bit format.
+         */
+        format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
+
+        if (arm_feature(env, ARM_FEATURE_EL2)) {
+            if (mmu_idx == ARMMMUIdx_E10_0 ||
+                mmu_idx == ARMMMUIdx_E10_1 ||
+                mmu_idx == ARMMMUIdx_E10_1_PAN) {
+                format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
+            } else {
+                format64 |= arm_current_el(env) == 2;
+            }
+        }
+    }
+
+    if (format64) {
+        /* Create a 64-bit PAR */
+        par64 = (1 << 11); /* LPAE bit always set */
+        if (!ret) {
+            par64 |= res.f.phys_addr & ~0xfffULL;
+            if (!res.f.attrs.secure) {
+                par64 |= (1 << 9); /* NS */
+            }
+            par64 |= (uint64_t)res.cacheattrs.attrs << 56; /* ATTR */
+            par64 |= par_el1_shareability(&res) << 7; /* SH */
+        } else {
+            uint32_t fsr = arm_fi_to_lfsc(&fi);
+
+            par64 |= 1; /* F */
+            par64 |= (fsr & 0x3f) << 1; /* FS */
+            if (fi.stage2) {
+                par64 |= (1 << 9); /* S */
+            }
+            if (fi.s1ptw) {
+                par64 |= (1 << 8); /* PTW */
+            }
+        }
+    } else {
+        /*
+         * fsr is a DFSR/IFSR value for the short descriptor
+         * translation table format (with WnR always clear).
+         * Convert it to a 32-bit PAR.
+         */
+        if (!ret) {
+            /* We do not set any attribute bits in the PAR */
+            if (res.f.lg_page_size == 24
+                && arm_feature(env, ARM_FEATURE_V7)) {
+                par64 = (res.f.phys_addr & 0xff000000) | (1 << 1);
+            } else {
+                par64 = res.f.phys_addr & 0xfffff000;
+            }
+            if (!res.f.attrs.secure) {
+                par64 |= (1 << 9); /* NS */
+            }
+        } else {
+            uint32_t fsr = arm_fi_to_sfsc(&fi);
+
+            par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
+                    ((fsr & 0xf) << 1) | 1;
+        }
+    }
+    return par64;
+}
+
+static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
+{
+    MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
+    uint64_t par64;
+    ARMMMUIdx mmu_idx;
+    int el = arm_current_el(env);
+    ARMSecuritySpace ss = arm_security_space(env);
+
+    switch (ri->opc2 & 6) {
+    case 0:
+        /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
+        switch (el) {
+        case 3:
+            if (ri->crm == 9 && arm_pan_enabled(env)) {
+                mmu_idx = ARMMMUIdx_E30_3_PAN;
+            } else {
+                mmu_idx = ARMMMUIdx_E3;
+            }
+            break;
+        case 2:
+            g_assert(ss != ARMSS_Secure);  /* ARMv8.4-SecEL2 is 64-bit only */
+            /* fall through */
+        case 1:
+            if (ri->crm == 9 && arm_pan_enabled(env)) {
+                mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
+            } else {
+                mmu_idx = ARMMMUIdx_Stage1_E1;
+            }
+            break;
+        default:
+            g_assert_not_reached();
+        }
+        break;
+    case 2:
+        /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
+        switch (el) {
+        case 3:
+            mmu_idx = ARMMMUIdx_E30_0;
+            break;
+        case 2:
+            g_assert(ss != ARMSS_Secure);  /* ARMv8.4-SecEL2 is 64-bit only */
+            mmu_idx = ARMMMUIdx_Stage1_E0;
+            break;
+        case 1:
+            mmu_idx = ARMMMUIdx_Stage1_E0;
+            break;
+        default:
+            g_assert_not_reached();
+        }
+        break;
+    case 4:
+        /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
+        mmu_idx = ARMMMUIdx_E10_1;
+        ss = ARMSS_NonSecure;
+        break;
+    case 6:
+        /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
+        mmu_idx = ARMMMUIdx_E10_0;
+        ss = ARMSS_NonSecure;
+        break;
+    default:
+        g_assert_not_reached();
+    }
+
+    par64 = do_ats_write(env, value, access_type, mmu_idx, ss);
+
+    A32_BANKED_CURRENT_REG_SET(env, par, par64);
+}
+
+static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
+                        uint64_t value)
+{
+    MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
+    uint64_t par64;
+
+    /* There is no SecureEL2 for AArch32. */
+    par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2,
+                         ARMSS_NonSecure);
+
+    A32_BANKED_CURRENT_REG_SET(env, par, par64);
+}
+
+static CPAccessResult at_e012_access(CPUARMState *env, const ARMCPRegInfo *ri,
+                                     bool isread)
+{
+    /*
+     * R_NYXTL: instruction is UNDEFINED if it applies to an Exception level
+     * lower than EL3 and the combination SCR_EL3.{NSE,NS} is reserved. This can
+     * only happen when executing at EL3 because that combination also causes an
+     * illegal exception return. We don't need to check FEAT_RME either, because
+     * scr_write() ensures that the NSE bit is not set otherwise.
+     */
+    if ((env->cp15.scr_el3 & (SCR_NSE | SCR_NS)) == SCR_NSE) {
+        return CP_ACCESS_UNDEFINED;
+    }
+    return CP_ACCESS_OK;
+}
+
+static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
+                                     bool isread)
+{
+    if (arm_current_el(env) == 3 &&
+        !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) {
+        return CP_ACCESS_UNDEFINED;
+    }
+    return at_e012_access(env, ri, isread);
+}
+
+static CPAccessResult at_s1e01_access(CPUARMState *env, const ARMCPRegInfo *ri,
+                                      bool isread)
+{
+    if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_AT)) {
+        return CP_ACCESS_TRAP_EL2;
+    }
+    return at_e012_access(env, ri, isread);
+}
+
+static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
+                        uint64_t value)
+{
+    MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
+    ARMMMUIdx mmu_idx;
+    uint64_t hcr_el2 = arm_hcr_el2_eff(env);
+    bool regime_e20 = (hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE);
+    bool for_el3 = false;
+    ARMSecuritySpace ss;
+
+    switch (ri->opc2 & 6) {
+    case 0:
+        switch (ri->opc1) {
+        case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
+            if (ri->crm == 9 && arm_pan_enabled(env)) {
+                mmu_idx = regime_e20 ?
+                          ARMMMUIdx_E20_2_PAN : ARMMMUIdx_Stage1_E1_PAN;
+            } else {
+                mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_Stage1_E1;
+            }
+            break;
+        case 4: /* AT S1E2R, AT S1E2W */
+            mmu_idx = hcr_el2 & HCR_E2H ? ARMMMUIdx_E20_2 : ARMMMUIdx_E2;
+            break;
+        case 6: /* AT S1E3R, AT S1E3W */
+            mmu_idx = ARMMMUIdx_E3;
+            for_el3 = true;
+            break;
+        default:
+            g_assert_not_reached();
+        }
+        break;
+    case 2: /* AT S1E0R, AT S1E0W */
+        mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_Stage1_E0;
+        break;
+    case 4: /* AT S12E1R, AT S12E1W */
+        mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_E10_1;
+        break;
+    case 6: /* AT S12E0R, AT S12E0W */
+        mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_E10_0;
+        break;
+    default:
+        g_assert_not_reached();
+    }
+
+    ss = for_el3 ? arm_security_space(env) : arm_security_space_below_el3(env);
+    env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx, ss);
+}
+
+static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
+                                 bool isread)
+{
+    if (ri->opc2 & 4) {
+        /*
+         * The ATS12NSO* operations must trap to EL3 or EL2 if executed in
+         * Secure EL1 (which can only happen if EL3 is AArch64).
+         * They are simply UNDEF if executed from NS EL1.
+         * They function normally from EL2 or EL3.
+         */
+        if (arm_current_el(env) == 1) {
+            if (arm_is_secure_below_el3(env)) {
+                if (env->cp15.scr_el3 & SCR_EEL2) {
+                    return CP_ACCESS_TRAP_EL2;
+                }
+                return CP_ACCESS_TRAP_EL3;
+            }
+            return CP_ACCESS_UNDEFINED;
+        }
+    }
+    return CP_ACCESS_OK;
+}
+
+static const ARMCPRegInfo vapa_ats_reginfo[] = {
+    /* This underdecoding is safe because the reginfo is NO_RAW. */
+    { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
+      .access = PL1_W, .accessfn = ats_access,
+      .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
+};
+
+static const ARMCPRegInfo v8_ats_reginfo[] = {
+    /* 64 bit address translation operations */
+    { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
+      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+      .fgt = FGT_ATS1E1R,
+      .accessfn = at_s1e01_access, .writefn = ats_write64 },
+    { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
+      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+      .fgt = FGT_ATS1E1W,
+      .accessfn = at_s1e01_access, .writefn = ats_write64 },
+    { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
+      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+      .fgt = FGT_ATS1E0R,
+      .accessfn = at_s1e01_access, .writefn = ats_write64 },
+    { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
+      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+      .fgt = FGT_ATS1E0W,
+      .accessfn = at_s1e01_access, .writefn = ats_write64 },
+    { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
+      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+      .accessfn = at_e012_access, .writefn = ats_write64 },
+    { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
+      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+      .accessfn = at_e012_access, .writefn = ats_write64 },
+    { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
+      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+      .accessfn = at_e012_access, .writefn = ats_write64 },
+    { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
+      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+      .accessfn = at_e012_access, .writefn = ats_write64 },
+    /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
+    { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
+      .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+      .writefn = ats_write64 },
+    { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
+      .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+      .writefn = ats_write64 },
+};
+
+static const ARMCPRegInfo el2_ats_reginfo[] = {
+    /*
+     * Unlike the other EL2-related AT operations, these must
+     * UNDEF from EL3 if EL2 is not implemented, which is why we
+     * define them here rather than with the rest of the AT ops.
+     */
+    { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
+      .access = PL2_W, .accessfn = at_s1e2_access,
+      .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
+      .writefn = ats_write64 },
+    { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
+      .access = PL2_W, .accessfn = at_s1e2_access,
+      .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
+      .writefn = ats_write64 },
+    /*
+     * The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
+     * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
+     * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
+     * to behave as if SCR.NS was 1.
+     */
+    { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
+      .access = PL2_W,
+      .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
+    { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
+      .access = PL2_W,
+      .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
+};
+
+static const ARMCPRegInfo ats1e1_reginfo[] = {
+    { .name = "AT_S1E1RP", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
+      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+      .fgt = FGT_ATS1E1RP,
+      .accessfn = at_s1e01_access, .writefn = ats_write64 },
+    { .name = "AT_S1E1WP", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
+      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+      .fgt = FGT_ATS1E1WP,
+      .accessfn = at_s1e01_access, .writefn = ats_write64 },
+};
+
+static const ARMCPRegInfo ats1cp_reginfo[] = {
+    { .name = "ATS1CPRP",
+      .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
+      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+      .writefn = ats_write },
+    { .name = "ATS1CPWP",
+      .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
+      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+      .writefn = ats_write },
+};
+
+void define_at_insn_regs(ARMCPU *cpu)
+{
+    CPUARMState *env = &cpu->env;
+
+    if (arm_feature(env, ARM_FEATURE_VAPA)) {
+        define_arm_cp_regs(cpu, vapa_ats_reginfo);
+    }
+    if (arm_feature(env, ARM_FEATURE_V8)) {
+        define_arm_cp_regs(cpu, v8_ats_reginfo);
+    }
+    if (arm_feature(env, ARM_FEATURE_EL2)
+        || (arm_feature(env, ARM_FEATURE_EL3)
+            && arm_feature(env, ARM_FEATURE_V8))) {
+        define_arm_cp_regs(cpu, el2_ats_reginfo);
+    }
+    if (cpu_isar_feature(aa64_ats1e1, cpu)) {
+        define_arm_cp_regs(cpu, ats1e1_reginfo);
+    }
+    if (cpu_isar_feature(aa32_ats1e1, cpu)) {
+        define_arm_cp_regs(cpu, ats1cp_reginfo);
+    }
+}
diff --git a/target/arm/tcg/cpu-v7m.c b/target/arm/tcg/cpu-v7m.c
index eddd7117d5..dc249ce1f1 100644
--- a/target/arm/tcg/cpu-v7m.c
+++ b/target/arm/tcg/cpu-v7m.c
@@ -62,7 +62,7 @@ static void cortex_m0_initfn(Object *obj)
     SET_IDREG(isar, ID_PFR0, 0x00000030);
     SET_IDREG(isar, ID_PFR1, 0x00000200);
     SET_IDREG(isar, ID_DFR0, 0x00100000);
-    cpu->id_afr0 = 0x00000000;
+    SET_IDREG(isar, ID_AFR0, 0x00000000);
     SET_IDREG(isar, ID_MMFR0, 0x00000030);
     SET_IDREG(isar, ID_MMFR1, 0x00000000);
     SET_IDREG(isar, ID_MMFR2, 0x00000000);
@@ -88,7 +88,7 @@ static void cortex_m3_initfn(Object *obj)
     SET_IDREG(isar, ID_PFR0, 0x00000030);
     SET_IDREG(isar, ID_PFR1, 0x00000200);
     SET_IDREG(isar, ID_DFR0, 0x00100000);
-    cpu->id_afr0 = 0x00000000;
+    SET_IDREG(isar, ID_AFR0, 0x00000000);
     SET_IDREG(isar, ID_MMFR0, 0x00000030);
     SET_IDREG(isar, ID_MMFR1, 0x00000000);
     SET_IDREG(isar, ID_MMFR2, 0x00000000);
@@ -119,7 +119,7 @@ static void cortex_m4_initfn(Object *obj)
     SET_IDREG(isar, ID_PFR0, 0x00000030);
     SET_IDREG(isar, ID_PFR1, 0x00000200);
     SET_IDREG(isar, ID_DFR0, 0x00100000);
-    cpu->id_afr0 = 0x00000000;
+    SET_IDREG(isar, ID_AFR0, 0x00000000);
     SET_IDREG(isar, ID_MMFR0, 0x00000030);
     SET_IDREG(isar, ID_MMFR1, 0x00000000);
     SET_IDREG(isar, ID_MMFR2, 0x00000000);
@@ -150,7 +150,7 @@ static void cortex_m7_initfn(Object *obj)
     SET_IDREG(isar, ID_PFR0, 0x00000030);
     SET_IDREG(isar, ID_PFR1, 0x00000200);
     SET_IDREG(isar, ID_DFR0, 0x00100000);
-    cpu->id_afr0 = 0x00000000;
+    SET_IDREG(isar, ID_AFR0, 0x00000000);
     SET_IDREG(isar, ID_MMFR0, 0x00100030);
     SET_IDREG(isar, ID_MMFR1, 0x00000000);
     SET_IDREG(isar, ID_MMFR2, 0x01000000);
@@ -183,7 +183,7 @@ static void cortex_m33_initfn(Object *obj)
     SET_IDREG(isar, ID_PFR0, 0x00000030);
     SET_IDREG(isar, ID_PFR1, 0x00000210);
     SET_IDREG(isar, ID_DFR0, 0x00200000);
-    cpu->id_afr0 = 0x00000000;
+    SET_IDREG(isar, ID_AFR0, 0x00000000);
     SET_IDREG(isar, ID_MMFR0, 0x00101F40);
     SET_IDREG(isar, ID_MMFR1, 0x00000000);
     SET_IDREG(isar, ID_MMFR2, 0x01000000);
@@ -195,7 +195,7 @@ static void cortex_m33_initfn(Object *obj)
     SET_IDREG(isar, ID_ISAR4, 0x01310132);
     SET_IDREG(isar, ID_ISAR5, 0x00000000);
     SET_IDREG(isar, ID_ISAR6, 0x00000000);
-    cpu->clidr = 0x00000000;
+    SET_IDREG(isar, CLIDR, 0x00000000);
     cpu->ctr = 0x8000c000;
 }
 
@@ -221,7 +221,7 @@ static void cortex_m55_initfn(Object *obj)
     SET_IDREG(isar, ID_PFR0, 0x20000030);
     SET_IDREG(isar, ID_PFR1, 0x00000230);
     SET_IDREG(isar, ID_DFR0, 0x10200000);
-    cpu->id_afr0 = 0x00000000;
+    SET_IDREG(isar, ID_AFR0, 0x00000000);
     SET_IDREG(isar, ID_MMFR0, 0x00111040);
     SET_IDREG(isar, ID_MMFR1, 0x00000000);
     SET_IDREG(isar, ID_MMFR2, 0x01000000);
@@ -233,7 +233,7 @@ static void cortex_m55_initfn(Object *obj)
     SET_IDREG(isar, ID_ISAR4, 0x01310132);
     SET_IDREG(isar, ID_ISAR5, 0x00000000);
     SET_IDREG(isar, ID_ISAR6, 0x00000000);
-    cpu->clidr = 0x00000000; /* caches not implemented */
+    SET_IDREG(isar, CLIDR, 0x00000000); /* caches not implemented */
     cpu->ctr = 0x8303c003;
 }
 
diff --git a/target/arm/tcg/cpu32.c b/target/arm/tcg/cpu32.c
index 942b636aa5..a2a23eae0d 100644
--- a/target/arm/tcg/cpu32.c
+++ b/target/arm/tcg/cpu32.c
@@ -225,7 +225,7 @@ static void arm1136_r2_initfn(Object *obj)
     SET_IDREG(isar, ID_PFR0, 0x111);
     SET_IDREG(isar, ID_PFR1, 0x1);
     SET_IDREG(isar, ID_DFR0, 0x2);
-    cpu->id_afr0 = 0x3;
+    SET_IDREG(isar, ID_AFR0, 0x3);
     SET_IDREG(isar, ID_MMFR0, 0x01130003);
     SET_IDREG(isar, ID_MMFR1, 0x10030302);
     SET_IDREG(isar, ID_MMFR2, 0x01222110);
@@ -257,7 +257,7 @@ static void arm1136_initfn(Object *obj)
     SET_IDREG(isar, ID_PFR0, 0x111);
     SET_IDREG(isar, ID_PFR1, 0x1);
     SET_IDREG(isar, ID_DFR0, 0x2);
-    cpu->id_afr0 = 0x3;
+    SET_IDREG(isar, ID_AFR0, 0x3);
     SET_IDREG(isar, ID_MMFR0, 0x01130003);
     SET_IDREG(isar, ID_MMFR1, 0x10030302);
     SET_IDREG(isar, ID_MMFR2, 0x01222110);
@@ -290,7 +290,7 @@ static void arm1176_initfn(Object *obj)
     SET_IDREG(isar, ID_PFR0, 0x111);
     SET_IDREG(isar, ID_PFR1, 0x11);
     SET_IDREG(isar, ID_DFR0, 0x33);
-    cpu->id_afr0 = 0;
+    SET_IDREG(isar, ID_AFR0, 0);
     SET_IDREG(isar, ID_MMFR0, 0x01130003);
     SET_IDREG(isar, ID_MMFR1, 0x10030302);
     SET_IDREG(isar, ID_MMFR2, 0x01222100);
@@ -320,7 +320,7 @@ static void arm11mpcore_initfn(Object *obj)
     SET_IDREG(isar, ID_PFR0, 0x111);
     SET_IDREG(isar, ID_PFR1, 0x1);
     SET_IDREG(isar, ID_DFR0, 0);
-    cpu->id_afr0 = 0x2;
+    SET_IDREG(isar, ID_AFR0, 0x2);
     SET_IDREG(isar, ID_MMFR0, 0x01100103);
     SET_IDREG(isar, ID_MMFR1, 0x10020302);
     SET_IDREG(isar, ID_MMFR2, 0x01222000);
@@ -360,7 +360,7 @@ static void cortex_a8_initfn(Object *obj)
     SET_IDREG(isar, ID_PFR0, 0x1031);
     SET_IDREG(isar, ID_PFR1, 0x11);
     SET_IDREG(isar, ID_DFR0, 0x400);
-    cpu->id_afr0 = 0;
+    SET_IDREG(isar, ID_AFR0, 0);
     SET_IDREG(isar, ID_MMFR0, 0x31100003);
     SET_IDREG(isar, ID_MMFR1, 0x20000000);
     SET_IDREG(isar, ID_MMFR2, 0x01202000);
@@ -371,7 +371,7 @@ static void cortex_a8_initfn(Object *obj)
     SET_IDREG(isar, ID_ISAR3, 0x11112131);
     SET_IDREG(isar, ID_ISAR4, 0x00111142);
     cpu->isar.dbgdidr = 0x15141000;
-    cpu->clidr = (1 << 27) | (2 << 24) | 3;
+    SET_IDREG(isar, CLIDR, (1 << 27) | (2 << 24) | 3);
     cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */
     cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */
     cpu->ccsidr[2] = 0xf0000000; /* No L2 icache. */
@@ -436,7 +436,7 @@ static void cortex_a9_initfn(Object *obj)
     SET_IDREG(isar, ID_PFR0, 0x1031);
     SET_IDREG(isar, ID_PFR1, 0x11);
     SET_IDREG(isar, ID_DFR0, 0x000);
-    cpu->id_afr0 = 0;
+    SET_IDREG(isar, ID_AFR0, 0);
     SET_IDREG(isar, ID_MMFR0, 0x00100103);
     SET_IDREG(isar, ID_MMFR1, 0x20000000);
     SET_IDREG(isar, ID_MMFR2, 0x01230000);
@@ -447,7 +447,7 @@ static void cortex_a9_initfn(Object *obj)
     SET_IDREG(isar, ID_ISAR3, 0x11112131);
     SET_IDREG(isar, ID_ISAR4, 0x00111142);
     cpu->isar.dbgdidr = 0x35141000;
-    cpu->clidr = (1 << 27) | (1 << 24) | 3;
+    SET_IDREG(isar, CLIDR, (1 << 27) | (1 << 24) | 3);
     cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */
     cpu->ccsidr[1] = 0x200fe019; /* 16k L1 icache. */
     cpu->isar.reset_pmcr_el0 = 0x41093000;
@@ -502,7 +502,7 @@ static void cortex_a7_initfn(Object *obj)
     SET_IDREG(isar, ID_PFR0, 0x00001131);
     SET_IDREG(isar, ID_PFR1, 0x00011011);
     SET_IDREG(isar, ID_DFR0, 0x02010555);
-    cpu->id_afr0 = 0x00000000;
+    SET_IDREG(isar, ID_AFR0, 0x00000000);
     SET_IDREG(isar, ID_MMFR0, 0x10101105);
     SET_IDREG(isar, ID_MMFR1, 0x40000000);
     SET_IDREG(isar, ID_MMFR2, 0x01240000);
@@ -519,7 +519,7 @@ static void cortex_a7_initfn(Object *obj)
     cpu->isar.dbgdidr = 0x3515f005;
     cpu->isar.dbgdevid = 0x01110f13;
     cpu->isar.dbgdevid1 = 0x1;
-    cpu->clidr = 0x0a200023;
+    SET_IDREG(isar, CLIDR, 0x0a200023);
     cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
     cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
     cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
@@ -554,7 +554,7 @@ static void cortex_a15_initfn(Object *obj)
     SET_IDREG(isar, ID_PFR0, 0x00001131);
     SET_IDREG(isar, ID_PFR1, 0x00011011);
     SET_IDREG(isar, ID_DFR0, 0x02010555);
-    cpu->id_afr0 = 0x00000000;
+    SET_IDREG(isar, ID_AFR0, 0x00000000);
     SET_IDREG(isar, ID_MMFR0, 0x10201105);
     SET_IDREG(isar, ID_MMFR1, 0x20000000);
     SET_IDREG(isar, ID_MMFR2, 0x01240000);
@@ -567,7 +567,7 @@ static void cortex_a15_initfn(Object *obj)
     cpu->isar.dbgdidr = 0x3515f021;
     cpu->isar.dbgdevid = 0x01110f13;
     cpu->isar.dbgdevid1 = 0x0;
-    cpu->clidr = 0x0a200023;
+    SET_IDREG(isar, CLIDR, 0x0a200023);
     cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
     cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
     cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
@@ -598,7 +598,7 @@ static void cortex_r5_initfn(Object *obj)
     SET_IDREG(isar, ID_PFR0, 0x0131);
     SET_IDREG(isar, ID_PFR1, 0x001);
     SET_IDREG(isar, ID_DFR0, 0x010400);
-    cpu->id_afr0 = 0x0;
+    SET_IDREG(isar, ID_AFR0, 0x0);
     SET_IDREG(isar, ID_MMFR0, 0x0210030);
     SET_IDREG(isar, ID_MMFR1, 0x00000000);
     SET_IDREG(isar, ID_MMFR2, 0x01200000);
@@ -745,7 +745,7 @@ static void cortex_r52_initfn(Object *obj)
     SET_IDREG(isar, ID_PFR0, 0x00000131);
     SET_IDREG(isar, ID_PFR1, 0x10111001);
     SET_IDREG(isar, ID_DFR0, 0x03010006);
-    cpu->id_afr0 = 0x00000000;
+    SET_IDREG(isar, ID_AFR0, 0x00000000);
     SET_IDREG(isar, ID_MMFR0, 0x00211040);
     SET_IDREG(isar, ID_MMFR1, 0x40000000);
     SET_IDREG(isar, ID_MMFR2, 0x01200000);
@@ -758,7 +758,7 @@ static void cortex_r52_initfn(Object *obj)
     SET_IDREG(isar, ID_ISAR4, 0x00010142);
     SET_IDREG(isar, ID_ISAR5, 0x00010001);
     cpu->isar.dbgdidr = 0x77168000;
-    cpu->clidr = (1 << 27) | (1 << 24) | 0x3;
+    SET_IDREG(isar, CLIDR, (1 << 27) | (1 << 24) | 0x3);
     cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */
     cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */
 
@@ -977,7 +977,7 @@ static void arm_max_initfn(Object *obj)
     SET_IDREG(isar, ID_PFR0, 0x00000131);
     SET_IDREG(isar, ID_PFR1, 0x00011011);
     SET_IDREG(isar, ID_DFR0, 0x03010066);
-    cpu->id_afr0 = 0x00000000;
+    SET_IDREG(isar, ID_AFR0, 0x00000000);
     SET_IDREG(isar, ID_MMFR0, 0x10101105);
     SET_IDREG(isar, ID_MMFR1, 0x40000000);
     SET_IDREG(isar, ID_MMFR2, 0x01260000);
@@ -990,7 +990,7 @@ static void arm_max_initfn(Object *obj)
     SET_IDREG(isar, ID_ISAR5, 0x00011121);
     SET_IDREG(isar, ID_ISAR6, 0);
     cpu->isar.reset_pmcr_el0 = 0x41013000;
-    cpu->clidr = 0x0a200023;
+    SET_IDREG(isar, CLIDR, 0x0a200023);
     cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
     cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
     cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */
diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c
index d0df50a2f3..35cddbafa4 100644
--- a/target/arm/tcg/cpu64.c
+++ b/target/arm/tcg/cpu64.c
@@ -52,7 +52,7 @@ static void aarch64_a35_initfn(Object *obj)
     SET_IDREG(isar, ID_PFR0, 0x00000131);
     SET_IDREG(isar, ID_PFR1, 0x00011011);
     SET_IDREG(isar, ID_DFR0, 0x03010066);
-    cpu->id_afr0 = 0;
+    SET_IDREG(isar, ID_AFR0, 0);
     SET_IDREG(isar, ID_MMFR0, 0x10201105);
     SET_IDREG(isar, ID_MMFR1, 0x40000000);
     SET_IDREG(isar, ID_MMFR2, 0x01260000);
@@ -71,7 +71,7 @@ static void aarch64_a35_initfn(Object *obj)
     SET_IDREG(isar, ID_AA64ISAR1, 0);
     SET_IDREG(isar, ID_AA64MMFR0, 0x00101122);
     SET_IDREG(isar, ID_AA64MMFR1, 0);
-    cpu->clidr = 0x0a200023;
+    SET_IDREG(isar, CLIDR, 0x0a200023);
     cpu->dcz_blocksize = 4;
 
     /* From B2.4 AArch64 Virtual Memory control registers */
@@ -216,7 +216,7 @@ static void aarch64_a55_initfn(Object *obj)
     set_feature(&cpu->env, ARM_FEATURE_PMU);
 
     /* Ordered by B2.4 AArch64 registers by functional group */
-    cpu->clidr = 0x82000023;
+    SET_IDREG(isar, CLIDR, 0x82000023);
     cpu->ctr = 0x84448004; /* L1Ip = VIPT */
     cpu->dcz_blocksize = 4; /* 64 bytes */
     SET_IDREG(isar, ID_AA64DFR0, 0x0000000010305408ull);
@@ -227,7 +227,7 @@ static void aarch64_a55_initfn(Object *obj)
     SET_IDREG(isar, ID_AA64MMFR2, 0x0000000000001011ull);
     SET_IDREG(isar, ID_AA64PFR0, 0x0000000010112222ull);
     SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000010ull);
-    cpu->id_afr0       = 0x00000000;
+    SET_IDREG(isar, ID_AFR0, 0x00000000);
     SET_IDREG(isar, ID_DFR0, 0x04010088);
     SET_IDREG(isar, ID_ISAR0, 0x02101110);
     SET_IDREG(isar, ID_ISAR1, 0x13112111);
@@ -298,7 +298,7 @@ static void aarch64_a72_initfn(Object *obj)
     SET_IDREG(isar, ID_PFR0, 0x00000131);
     SET_IDREG(isar, ID_PFR1, 0x00011011);
     SET_IDREG(isar, ID_DFR0, 0x03010066);
-    cpu->id_afr0 = 0x00000000;
+    SET_IDREG(isar, ID_AFR0, 0x00000000);
     SET_IDREG(isar, ID_MMFR0, 0x10201105);
     SET_IDREG(isar, ID_MMFR1, 0x40000000);
     SET_IDREG(isar, ID_MMFR2, 0x01260000);
@@ -317,7 +317,7 @@ static void aarch64_a72_initfn(Object *obj)
     cpu->isar.dbgdevid = 0x01110f13;
     cpu->isar.dbgdevid1 = 0x2;
     cpu->isar.reset_pmcr_el0 = 0x41023000;
-    cpu->clidr = 0x0a200023;
+    SET_IDREG(isar, CLIDR, 0x0a200023);
     /* 32KB L1 dcache */
     cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 7);
     /* 48KB L1 dcache */
@@ -349,10 +349,10 @@ static void aarch64_a76_initfn(Object *obj)
     set_feature(&cpu->env, ARM_FEATURE_PMU);
 
     /* Ordered by B2.4 AArch64 registers by functional group */
-    cpu->clidr = 0x82000023;
+    SET_IDREG(isar, CLIDR, 0x82000023);
     cpu->ctr = 0x8444C004;
     cpu->dcz_blocksize = 4;
-    SET_IDREG(isar, ID_AA64DFR0, 0x0000000010305408ull),
+    SET_IDREG(isar, ID_AA64DFR0, 0x0000000010305408ull);
     SET_IDREG(isar, ID_AA64ISAR0, 0x0000100010211120ull);
     SET_IDREG(isar, ID_AA64ISAR1, 0x0000000000100001ull);
     SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000101122ull);
@@ -360,7 +360,7 @@ static void aarch64_a76_initfn(Object *obj)
     SET_IDREG(isar, ID_AA64MMFR2, 0x0000000000001011ull);
     SET_IDREG(isar, ID_AA64PFR0, 0x1100000010111112ull); /* GIC filled in later */
     SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000010ull);
-    cpu->id_afr0       = 0x00000000;
+    SET_IDREG(isar, ID_AFR0, 0x00000000);
     SET_IDREG(isar, ID_DFR0, 0x04010088);
     SET_IDREG(isar, ID_ISAR0, 0x02101110);
     SET_IDREG(isar, ID_ISAR1, 0x13112111);
@@ -426,17 +426,17 @@ static void aarch64_a64fx_initfn(Object *obj)
     cpu->reset_sctlr = 0x30000180;
     SET_IDREG(isar, ID_AA64PFR0, 0x0000000101111111); /* No RAS Extensions */
     SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000000);
-    SET_IDREG(isar, ID_AA64DFR0, 0x0000000010305408),
-    SET_IDREG(isar, ID_AA64DFR1, 0x0000000000000000),
-    cpu->id_aa64afr0 = 0x0000000000000000;
-    cpu->id_aa64afr1 = 0x0000000000000000;
+    SET_IDREG(isar, ID_AA64DFR0, 0x0000000010305408);
+    SET_IDREG(isar, ID_AA64DFR1, 0x0000000000000000);
+    SET_IDREG(isar, ID_AA64AFR0, 0x0000000000000000);
+    SET_IDREG(isar, ID_AA64AFR1, 0x0000000000000000);
     SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000001122);
     SET_IDREG(isar, ID_AA64MMFR1, 0x0000000011212100);
     SET_IDREG(isar, ID_AA64MMFR2, 0x0000000000001011);
     SET_IDREG(isar, ID_AA64ISAR0, 0x0000000010211120);
     SET_IDREG(isar, ID_AA64ISAR1, 0x0000000000010001);
     SET_IDREG(isar, ID_AA64ZFR0, 0x0000000000000000);
-    cpu->clidr = 0x0000000080000023;
+    SET_IDREG(isar, CLIDR, 0x0000000080000023);
     /* 64KB L1 dcache */
     cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 256, 64 * KiB, 7);
     /* 64KB L1 icache */
@@ -597,7 +597,7 @@ static void aarch64_neoverse_n1_initfn(Object *obj)
     set_feature(&cpu->env, ARM_FEATURE_PMU);
 
     /* Ordered by B2.4 AArch64 registers by functional group */
-    cpu->clidr = 0x82000023;
+    SET_IDREG(isar, CLIDR, 0x82000023);
     cpu->ctr = 0x8444c004;
     cpu->dcz_blocksize = 4;
     SET_IDREG(isar, ID_AA64DFR0, 0x0000000110305408ull);
@@ -608,7 +608,7 @@ static void aarch64_neoverse_n1_initfn(Object *obj)
     SET_IDREG(isar, ID_AA64MMFR2, 0x0000000000001011ull);
     SET_IDREG(isar, ID_AA64PFR0, 0x1100000010111112ull); /* GIC filled in later */
     SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000020ull);
-    cpu->id_afr0       = 0x00000000;
+    SET_IDREG(isar, ID_AFR0, 0x00000000);
     SET_IDREG(isar, ID_DFR0, 0x04010088);
     SET_IDREG(isar, ID_ISAR0, 0x02101110);
     SET_IDREG(isar, ID_ISAR1, 0x13112111);
@@ -673,21 +673,21 @@ static void aarch64_neoverse_v1_initfn(Object *obj)
     set_feature(&cpu->env, ARM_FEATURE_PMU);
 
     /* Ordered by 3.2.4 AArch64 registers by functional group */
-    cpu->clidr = 0x82000023;
+    SET_IDREG(isar, CLIDR, 0x82000023);
     cpu->ctr = 0xb444c004; /* With DIC and IDC set */
     cpu->dcz_blocksize = 4;
-    cpu->id_aa64afr0 = 0x00000000;
-    cpu->id_aa64afr1 = 0x00000000;
-    SET_IDREG(isar, ID_AA64DFR0, 0x000001f210305519ull),
-    SET_IDREG(isar, ID_AA64DFR1, 0x00000000),
+    SET_IDREG(isar, ID_AA64AFR0, 0x00000000);
+    SET_IDREG(isar, ID_AA64AFR1, 0x00000000);
+    SET_IDREG(isar, ID_AA64DFR0, 0x000001f210305519ull);
+    SET_IDREG(isar, ID_AA64DFR1, 0x00000000);
     SET_IDREG(isar, ID_AA64ISAR0, 0x1011111110212120ull); /* with FEAT_RNG */
     SET_IDREG(isar, ID_AA64ISAR1, 0x0011000001211032ull);
     SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000101125ull);
-    SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull),
-    SET_IDREG(isar, ID_AA64MMFR2, 0x0220011102101011ull),
+    SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull);
+    SET_IDREG(isar, ID_AA64MMFR2, 0x0220011102101011ull);
     SET_IDREG(isar, ID_AA64PFR0, 0x1101110120111112ull); /* GIC filled in later */
     SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000020ull);
-    cpu->id_afr0       = 0x00000000;
+    SET_IDREG(isar, ID_AFR0, 0x00000000);
     SET_IDREG(isar, ID_DFR0, 0x15011099);
     SET_IDREG(isar, ID_ISAR0, 0x02101110);
     SET_IDREG(isar, ID_ISAR1, 0x13112111);
@@ -905,7 +905,7 @@ static void aarch64_a710_initfn(Object *obj)
     SET_IDREG(isar, ID_PFR0, 0x21110131);
     SET_IDREG(isar, ID_PFR1, 0x00010000); /* GIC filled in later */
     SET_IDREG(isar, ID_DFR0, 0x16011099);
-    cpu->id_afr0       = 0;
+    SET_IDREG(isar, ID_AFR0, 0);
     SET_IDREG(isar, ID_MMFR0, 0x10201105);
     SET_IDREG(isar, ID_MMFR1, 0x40000000);
     SET_IDREG(isar, ID_MMFR2, 0x01260000);
@@ -927,14 +927,14 @@ static void aarch64_a710_initfn(Object *obj)
     SET_IDREG(isar, ID_AA64ZFR0, 0x0000110100110021ull); /* with Crypto */
     SET_IDREG(isar, ID_AA64DFR0, 0x000011f010305619ull);
     SET_IDREG(isar, ID_AA64DFR1, 0);
-    cpu->id_aa64afr0       = 0;
-    cpu->id_aa64afr1       = 0;
+    SET_IDREG(isar, ID_AA64AFR0, 0);
+    SET_IDREG(isar, ID_AA64AFR1, 0);
     SET_IDREG(isar, ID_AA64ISAR0, 0x0221111110212120ull); /* with Crypto */
     SET_IDREG(isar, ID_AA64ISAR1, 0x0010111101211052ull);
     SET_IDREG(isar, ID_AA64MMFR0, 0x0000022200101122ull);
     SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull);
     SET_IDREG(isar, ID_AA64MMFR2, 0x1221011110101011ull);
-    cpu->clidr             = 0x0000001482000023ull;
+    SET_IDREG(isar, CLIDR, 0x0000001482000023ull);
     cpu->gm_blocksize      = 4;
     cpu->ctr               = 0x000000049444c004ull;
     cpu->dcz_blocksize     = 4;
@@ -1007,7 +1007,7 @@ static void aarch64_neoverse_n2_initfn(Object *obj)
     SET_IDREG(isar, ID_PFR0, 0x21110131);
     SET_IDREG(isar, ID_PFR1, 0x00010000); /* GIC filled in later */
     SET_IDREG(isar, ID_DFR0, 0x16011099);
-    cpu->id_afr0       = 0;
+    SET_IDREG(isar, ID_AFR0, 0);
     SET_IDREG(isar, ID_MMFR0, 0x10201105);
     SET_IDREG(isar, ID_MMFR1, 0x40000000);
     SET_IDREG(isar, ID_MMFR2, 0x01260000);
@@ -1029,14 +1029,14 @@ static void aarch64_neoverse_n2_initfn(Object *obj)
     SET_IDREG(isar, ID_AA64ZFR0, 0x0000110100110021ull); /* with Crypto */
     SET_IDREG(isar, ID_AA64DFR0, 0x000011f210305619ull);
     SET_IDREG(isar, ID_AA64DFR1, 0);
-    cpu->id_aa64afr0       = 0;
-    cpu->id_aa64afr1       = 0;
+    SET_IDREG(isar, ID_AA64AFR0, 0);
+    SET_IDREG(isar, ID_AA64AFR1, 0);
     SET_IDREG(isar, ID_AA64ISAR0, 0x1221111110212120ull); /* with Crypto and FEAT_RNG */
     SET_IDREG(isar, ID_AA64ISAR1, 0x0011111101211052ull);
     SET_IDREG(isar, ID_AA64MMFR0, 0x0000022200101125ull);
     SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull);
     SET_IDREG(isar, ID_AA64MMFR2, 0x1221011112101011ull);
-    cpu->clidr             = 0x0000001482000023ull;
+    SET_IDREG(isar, CLIDR, 0x0000001482000023ull);
     cpu->gm_blocksize      = 4;
     cpu->ctr               = 0x00000004b444c004ull;
     cpu->dcz_blocksize     = 4;
@@ -1125,10 +1125,10 @@ void aarch64_max_tcg_initfn(Object *obj)
      * We're going to set FEAT_S2FWB, which mandates that CLIDR_EL1.{LoUU,LoUIS}
      * are zero.
      */
-    u = cpu->clidr;
+    u = GET_IDREG(isar, CLIDR);
     u = FIELD_DP32(u, CLIDR_EL1, LOUIS, 0);
     u = FIELD_DP32(u, CLIDR_EL1, LOUU, 0);
-    cpu->clidr = u;
+    SET_IDREG(isar, CLIDR, u);
 
     /*
      * Set CTR_EL0.DIC and IDC to tell the guest it doesnt' need to
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
index c66d521278..71c6c44ee8 100644
--- a/target/arm/tcg/helper-a64.c
+++ b/target/arm/tcg/helper-a64.c
@@ -658,15 +658,6 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
         spsr &= ~PSTATE_SS;
     }
 
-    /*
-     * FEAT_RME forbids return from EL3 with an invalid security state.
-     * We don't need an explicit check for FEAT_RME here because we enforce
-     * in scr_write() that you can't set the NSE bit without it.
-     */
-    if (cur_el == 3 && (env->cp15.scr_el3 & (SCR_NS | SCR_NSE)) == SCR_NSE) {
-        goto illegal_return;
-    }
-
     new_el = el_from_spsr(spsr);
     if (new_el == -1) {
         goto illegal_return;
@@ -678,6 +669,17 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
         goto illegal_return;
     }
 
+    /*
+     * FEAT_RME forbids return from EL3 to a lower exception level
+     * with an invalid security state.
+     * We don't need an explicit check for FEAT_RME here because we enforce
+     * in scr_write() that you can't set the NSE bit without it.
+     */
+    if (cur_el == 3 && new_el < 3 &&
+        (env->cp15.scr_el3 & (SCR_NS | SCR_NSE)) == SCR_NSE) {
+        goto illegal_return;
+    }
+
     if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) {
         /* Return to an EL which is configured for a different register width */
         goto illegal_return;
diff --git a/target/arm/tcg/helper.h b/target/arm/tcg/helper.h
index d9565c8069..0a006d9514 100644
--- a/target/arm/tcg/helper.h
+++ b/target/arm/tcg/helper.h
@@ -1209,6 +1209,5 @@ DEF_HELPER_FLAGS_4(sme2_luti4_2b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
 DEF_HELPER_FLAGS_4(sme2_luti4_2h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
 DEF_HELPER_FLAGS_4(sme2_luti4_2s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
 
-DEF_HELPER_FLAGS_4(sme2_luti4_4b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
 DEF_HELPER_FLAGS_4(sme2_luti4_4h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
 DEF_HELPER_FLAGS_4(sme2_luti4_4s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
diff --git a/target/arm/tcg/meson.build b/target/arm/tcg/meson.build
index c59f0f03a1..895facdc30 100644
--- a/target/arm/tcg/meson.build
+++ b/target/arm/tcg/meson.build
@@ -64,6 +64,7 @@ arm_common_ss.add(files(
 ))
 
 arm_common_system_ss.add(files(
+  'cpregs-at.c',
   'hflags.c',
   'iwmmxt_helper.c',
   'neon_helper.c',
diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c
index 0603db0909..bae6165b50 100644
--- a/target/arm/tcg/vec_helper.c
+++ b/target/arm/tcg/vec_helper.c
@@ -3526,7 +3526,6 @@ DO_SME2_LUT(4,1,s, 4)
 DO_SME2_LUT(4,2,b, 1)
 DO_SME2_LUT(4,2,h, 2)
 DO_SME2_LUT(4,2,s, 4)
-DO_SME2_LUT(4,4,b, 1)
 DO_SME2_LUT(4,4,h, 2)
 DO_SME2_LUT(4,4,s, 4)
 
diff --git a/tests/functional/meson.build b/tests/functional/meson.build
index ae5c52d79f..8bebcd4d94 100644
--- a/tests/functional/meson.build
+++ b/tests/functional/meson.build
@@ -138,6 +138,7 @@ tests_arm_system_thorough = [
   'arm_cubieboard',
   'arm_emcraft_sf2',
   'arm_integratorcp',
+  'arm_max78000fthr',
   'arm_microbit',
   'arm_orangepi',
   'arm_quanta_gsj',
diff --git a/tests/functional/test_arm_max78000fthr.py b/tests/functional/test_arm_max78000fthr.py
new file mode 100755
index 0000000000..a82980b0f7
--- /dev/null
+++ b/tests/functional/test_arm_max78000fthr.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python3
+#
+# Functional test that checks the max78000fthr machine.
+# Tests ICC, GCR, TRNG, AES, and UART
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import QemuSystemTest, Asset, exec_command_and_wait_for_pattern
+from qemu_test import wait_for_console_pattern
+
+
+class Max78000Machine(QemuSystemTest):
+
+    ASSET_FW = Asset(
+        'https://github.com/JacksonDonaldson/max78000Test/raw/main/build/max78000.bin',
+        '86940b4bf60931bc6a8aa5db4b9f7f3cf8f64dbbd7ac534647980e536cf3adf7')
+
+    def test_fthr(self):
+        self.set_machine('max78000fthr')
+        fw_path = self.ASSET_FW.fetch()
+        self.vm.set_console()
+        self.vm.add_args('-kernel', fw_path)
+        self.vm.add_args('-device', "loader,file=" + fw_path + ",addr=0x10000000")
+        self.vm.launch()
+
+        wait_for_console_pattern(self, 'started')
+
+        # i -> prints instruction cache values
+        exec_command_and_wait_for_pattern(self, 'i', 'CTRL: 00010001')
+
+        # r -> gcr resets the machine
+        exec_command_and_wait_for_pattern(self, 'r', 'started')
+
+        # z -> sets some memory, then has gcr zero it
+        exec_command_and_wait_for_pattern(self, 'z', 'initial value: 12345678')
+        wait_for_console_pattern(self, "after memz: 00000000")
+
+        # t -> runs trng
+        exec_command_and_wait_for_pattern(self, 't', 'random data:')
+
+        # a -> runs aes
+        exec_command_and_wait_for_pattern(self, 'a',
+                'encrypted to : a47ca9dd e0df4c86 a070af6e 91710dec')
+        wait_for_console_pattern(self,
+                'encrypted to : cab7a28e bf456751 9049fcea 8960494b')
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/qtest/cxl-test.c b/tests/qtest/cxl-test.c
index a600331843..8fb7e58d4f 100644
--- a/tests/qtest/cxl-test.c
+++ b/tests/qtest/cxl-test.c
@@ -19,6 +19,12 @@
     "-device pxb-cxl,id=cxl.1,bus=pcie.0,bus_nr=53 " \
     "-M cxl-fmw.0.targets.0=cxl.0,cxl-fmw.0.targets.1=cxl.1,cxl-fmw.0.size=4G "
 
+#define QEMU_VIRT_2PXB_CMD \
+    "-machine virt,cxl=on -cpu max " \
+    "-device pxb-cxl,id=cxl.0,bus=pcie.0,bus_nr=52 " \
+    "-device pxb-cxl,id=cxl.1,bus=pcie.0,bus_nr=53 " \
+    "-M cxl-fmw.0.targets.0=cxl.0,cxl-fmw.0.targets.1=cxl.1,cxl-fmw.0.size=4G "
+
 #define QEMU_RP \
     "-device cxl-rp,id=rp0,bus=cxl.0,chassis=0,slot=0 "
 
@@ -197,25 +203,51 @@ static void cxl_2pxb_4rp_4t3d(void)
     qtest_end();
     rmdir(tmpfs);
 }
+
+static void cxl_virt_2pxb_4rp_4t3d(void)
+{
+    g_autoptr(GString) cmdline = g_string_new(NULL);
+    g_autofree const char *tmpfs = NULL;
+
+    tmpfs = g_dir_make_tmp("cxl-test-XXXXXX", NULL);
+
+    g_string_printf(cmdline, QEMU_VIRT_2PXB_CMD QEMU_4RP QEMU_4T3D,
+                    tmpfs, tmpfs, tmpfs, tmpfs, tmpfs, tmpfs,
+                    tmpfs, tmpfs);
+
+    qtest_start(cmdline->str);
+    qtest_end();
+    rmdir(tmpfs);
+}
 #endif /* CONFIG_POSIX */
 
 int main(int argc, char **argv)
 {
-    g_test_init(&argc, &argv, NULL);
+    const char *arch = qtest_get_arch();
 
-    qtest_add_func("/pci/cxl/basic_hostbridge", cxl_basic_hb);
-    qtest_add_func("/pci/cxl/basic_pxb", cxl_basic_pxb);
-    qtest_add_func("/pci/cxl/pxb_with_window", cxl_pxb_with_window);
-    qtest_add_func("/pci/cxl/pxb_x2_with_window", cxl_2pxb_with_window);
-    qtest_add_func("/pci/cxl/rp", cxl_root_port);
-    qtest_add_func("/pci/cxl/rp_x2", cxl_2root_port);
+    g_test_init(&argc, &argv, NULL);
+    if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) {
+        qtest_add_func("/pci/cxl/basic_hostbridge", cxl_basic_hb);
+        qtest_add_func("/pci/cxl/basic_pxb", cxl_basic_pxb);
+        qtest_add_func("/pci/cxl/pxb_with_window", cxl_pxb_with_window);
+        qtest_add_func("/pci/cxl/pxb_x2_with_window", cxl_2pxb_with_window);
+        qtest_add_func("/pci/cxl/rp", cxl_root_port);
+        qtest_add_func("/pci/cxl/rp_x2", cxl_2root_port);
 #ifdef CONFIG_POSIX
-    qtest_add_func("/pci/cxl/type3_device", cxl_t3d_deprecated);
-    qtest_add_func("/pci/cxl/type3_device_pmem", cxl_t3d_persistent);
-    qtest_add_func("/pci/cxl/type3_device_vmem", cxl_t3d_volatile);
-    qtest_add_func("/pci/cxl/type3_device_vmem_lsa", cxl_t3d_volatile_lsa);
-    qtest_add_func("/pci/cxl/rp_x2_type3_x2", cxl_1pxb_2rp_2t3d);
-    qtest_add_func("/pci/cxl/pxb_x2_root_port_x4_type3_x4", cxl_2pxb_4rp_4t3d);
+        qtest_add_func("/pci/cxl/type3_device", cxl_t3d_deprecated);
+        qtest_add_func("/pci/cxl/type3_device_pmem", cxl_t3d_persistent);
+        qtest_add_func("/pci/cxl/type3_device_vmem", cxl_t3d_volatile);
+        qtest_add_func("/pci/cxl/type3_device_vmem_lsa", cxl_t3d_volatile_lsa);
+        qtest_add_func("/pci/cxl/rp_x2_type3_x2", cxl_1pxb_2rp_2t3d);
+        qtest_add_func("/pci/cxl/pxb_x2_root_port_x4_type3_x4",
+                       cxl_2pxb_4rp_4t3d);
 #endif
+    } else if (strcmp(arch, "aarch64") == 0) {
+#ifdef CONFIG_POSIX
+        qtest_add_func("/pci/cxl/virt/pxb_x2_root_port_x4_type3_x4",
+                       cxl_virt_2pxb_4rp_4t3d);
+#endif
+    }
+
     return g_test_run();
 }
diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build
index 91b4a71a18..5ad969f616 100644
--- a/tests/qtest/meson.build
+++ b/tests/qtest/meson.build
@@ -262,6 +262,7 @@ qtests_aarch64 = \
    config_all_devices.has_key('CONFIG_TPM_TIS_I2C') ? ['tpm-tis-i2c-test'] : []) + \
   (config_all_devices.has_key('CONFIG_ASPEED_SOC') ? qtests_aspeed64 : []) + \
   (config_all_devices.has_key('CONFIG_NPCM8XX') ? qtests_npcm8xx : []) + \
+  qtests_cxl +                                                                                  \
   ['arm-cpu-features',
    'numa-test',
    'boot-serial-test',